(.*?)(<\\/)?i>', itl).group(2)\n# description = description.replace(itl, f'*{txt}*')\n\n# # replace br tags with newline\n# description = description.replace('', '\\n')\n\n# desc = BeautifulSoup(description).get_text('\\n')\n# return desc\n", "repo_name": "srezasm/FeedWatcherTelegramBot", "sub_path": "src/formathandler.py", "file_name": "formathandler.py", "file_ext": "py", "file_size_in_byte": 4222, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "76", "api": [{"api_name": "config.get_feed_category", "line_number": 10, "usage_type": "call"}, {"api_name": "config.get_format_by_category", "line_number": 11, "usage_type": "call"}, {"api_name": "utils.get_entry", "line_number": 13, "usage_type": "call"}, {"api_name": "utils.get_entry", "line_number": 14, "usage_type": "call"}, {"api_name": "utils.get_entry", "line_number": 15, "usage_type": "call"}, {"api_name": "utils.get_entry", "line_number": 16, "usage_type": "call"}, {"api_name": "utils.get_entry", "line_number": 17, "usage_type": "call"}, {"api_name": "utils.get_entry", "line_number": 21, "usage_type": "call"}, {"api_name": "utils.format_str", "line_number": 23, "usage_type": "call"}, {"api_name": "dateutil.parser.parse", "line_number": 29, "usage_type": "call"}, {"api_name": "dateutil.parser", "line_number": 29, "usage_type": "name"}, {"api_name": "pytz.timezone", "line_number": 30, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 35, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 36, "usage_type": "call"}, {"api_name": "regex.match", "line_number": 41, "usage_type": "call"}, {"api_name": "regex.sub", "line_number": 72, "usage_type": "call"}]}
+{"seq_id": "70815543286", "text": "# region Example -1\r\n\r\nsayac = 0\r\ntoplam = 0\r\n\r\nwhile sayac <= 10:\r\n x = int(input(\"Lütfen bir sayı giriniz: \"))\r\n toplam += x\r\n sayac += 1\r\nprint(f'Sayıların toplamı: {toplam}')\r\n\r\n# endregion\r\n\r\n# region Example-2\r\n\r\nsayac = 0\r\nwhile sayac <= 100:\r\n print(sayac)\r\n sayac += 1\r\n\r\n# endregion\r\n\r\n# region Example -3\r\n\r\nsayac = 1\r\nc_s = 0\r\nt_s = 0\r\nwhile sayac <= 101:\r\n if sayac % 2 == 0:\r\n c_s += 1\r\n else:\r\n t_s += 1\r\n sayac += 1\r\nprint(f'Çift sayılar: {c_s} \\nTek sayılar:{t_s}')\r\n\r\n\r\n# endregion\r\n\r\n# region Example -4\r\n\r\nsayac = 1\r\ncs = 0\r\nts = 0\r\n\r\nwhile sayac <= 100:\r\n if sayac % 2 == 0:\r\n cs += 1\r\n sayac += 1\r\n else:\r\n ts += 1\r\n sayac += 1\r\nprint(f'{cs} tane çift sayı vardır')\r\nprint(f'{ts} tane tek sayı vardır')\r\n\r\n# endregion\r\n\r\n# region Example -5\r\n\r\nsayac = 1\r\ncst = 0\r\ntst = 0\r\n\r\nwhile sayac <= 100:\r\n if sayac % 2 == 0:\r\n cst += sayac\r\n else:\r\n tst += sayac\r\n sayac += 1\r\nprint(f'Çift sayıların toplamı: {cst}')\r\nprint(f'Tek sayıların toplamı: {tst}')\r\n\r\n# endregion\r\n\r\n# region Example -6\r\n\r\np_t_l = ['+', '-', '*', '/', 'e']\r\nwhile True:\r\n i = input(\"İşlem işareti giriniz: \")\r\n if i in p_t_l:\r\n \r\n if i == (\"e\"):\r\n break\r\n else:\r\n x = int(input(\"Sayı giriniz: \"))\r\n y = int(input(\"Sayı giriniz: \"))\r\n if i == (\"+\"):\r\n print(x+y)\r\n elif i == (\"-\"):\r\n print(x-y)\r\n elif i == (\"*\"):\r\n print(x*y)\r\n elif i == (\"/\"):\r\n print(x/y)\r\n else:\r\n print(\"Lütfen doğru bir işlem türü giriniz..'\")\r\n\r\n# endregion\r\n\r\n# region Example -7\r\n\r\nsayac = 0\r\ntst = 0\r\nwhile sayac <= 101:\r\n sayac += 1\r\n if sayac % 2 == 0:\r\n continue\r\n tst += sayac\r\nprint(f'Tek sayıların toplamı: {tst}')\r\n\r\n# endregion\r\n\r\n# region Example -8\r\n\r\nfrom datetime import datetime\r\nsayac = 1950\r\ny = int(input(\"Yıl giriniz: \"))\r\nin_exist = False\r\nwhile sayac <= datetime.now().year:\r\n if y == sayac:\r\n print(\"Bulundu\")\r\n in_exist = True\r\n break\r\n sayac += 1\r\nif not in_exist:\r\n print(\"Bulunamadı\")\r\n\r\n# endregion\r\n\r\n# region Example -9\r\nx = int(input(\"Bir sayı giriniz: \"))\r\ny = int(input(\"Bir sayı giriniz: \"))\r\nz = int(input(\"Bir sayı giriniz: \"))\r\nst = 0\r\nwhile x < y:\r\n st = st + x + z\r\n x += z\r\nprint(st)\r\n# endregion\r\n\r\n# region Example -10\r\nimport random\r\nx =random.randint(1, 100)\r\nhak = 3\r\n\r\nwhile x > 0:\r\n hak -= 1\r\n t = int(input(\"Tahminizi Griniz: \"))\r\n if hak == 0:\r\n print(f'Bilemediniz cevap:{x}')\r\n break\r\n if x == t:\r\n print(\"Bildiniz\")\r\n break\r\n elif x < t:\r\n print(\"Bilemediniz daha küçük\")\r\n elif x > t:\r\n print(\"Bilemediniz daha büyük\")\r\n\r\n# endregionn\r\n\r\n# region Example -11\r\n\r\nx = int(input(\"Bir sayı giriniz: \"))\r\nb = 2\r\nis_prime = True\r\nif x == 1 or x <= 0:\r\n is_prime = False\r\n print(\"Sıfır ve Negatif sayılar asal değildir...\")\r\nelse:\r\n while b < x:\r\n if x % b == 0:\r\n is_prime = False\r\n else:\r\n b += 1\r\n if is_prime == True:\r\n print(\"Asal sayı.\")\r\n break\r\n else:\r\n print(\"Asal sayı değil\")\r\n break\r\n\r\n# endregion\r\n", "repo_name": "Gufran99/Python", "sub_path": "01_introduution/while-loop.py", "file_name": "while-loop.py", "file_ext": "py", "file_size_in_byte": 3369, "program_lang": "python", "lang": "tr", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "76", "api": [{"api_name": "datetime.datetime.now", "line_number": 118, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 118, "usage_type": "name"}, {"api_name": "random.randint", "line_number": 142, "usage_type": "call"}]}
+{"seq_id": "36306972315", "text": "# -*- coding: utf-8 -*-\r\n# author:Super.Shen\r\nimport pandas as pd\r\n\r\npd.set_option('expand_frame_repr', False) # 当列太多时不换行\r\npd.set_option('display.max_rows', 1000)\r\n\r\nfrom pyecharts import Line\r\n\r\nattr = [\"衬衫\", \"羊毛衫\", \"雪纺衫\", \"裤子\", \"高跟鞋\", \"袜子\"]\r\nv1 = [5, 20, 36, 10, 10, 100]\r\nv2 = [55, 60, 16, 20, 15, 80]\r\nline = Line(\"折线图示例\")\r\nline.add(\"商家A\", attr, v1, mark_point=[\"average\"])\r\nline.add(\"商家B\", attr, v2, is_smooth=True, mark_line=[\"max\", \"average\"])\r\nline.show_config()\r\nline.render()\r\n\r\n\r\n", "repo_name": "SuperShen9/work_by_zc", "sub_path": "备用代码/老板风控周报/text.py", "file_name": "text.py", "file_ext": "py", "file_size_in_byte": 558, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "76", "api": [{"api_name": "pandas.set_option", "line_number": 5, "usage_type": "call"}, {"api_name": "pandas.set_option", "line_number": 6, "usage_type": "call"}, {"api_name": "pyecharts.Line", "line_number": 13, "usage_type": "call"}]}
+{"seq_id": "42205631210", "text": "import os\nimport importlib\n\n\nfrom ariadne import ObjectType\n\n\n# Global variables\nRESOLVER_FILENAME = \"resolver\"\nRESOLVER_PATH = os.path.join(os.getcwd(), RESOLVER_FILENAME)\n\n\nresolvers = []\n\n\ndef get_modules(path, is_dir=False):\n \"\"\" Get modules names from a path \"\"\"\n modules = []\n for dir in os.listdir(path):\n dir_path = os.path.join(path, dir)\n filename = os.path.splitext(dir)[0]\n\n if not filename.isalpha():\n continue\n\n if is_dir and os.path.isdir(dir_path):\n modules.append(dir)\n\n elif dir_path.endswith(\"py\"):\n modules.append(filename)\n\n return modules\n\n\n# Resolvers automatization sets\nfor dir_module in get_modules(RESOLVER_PATH, is_dir=True):\n resolver_path = os.path.join(RESOLVER_PATH, dir_module)\n obj_type = ObjectType(dir_module.title())\n for resolver in get_modules(resolver_path):\n module = importlib.import_module(\n f\"{RESOLVER_FILENAME}.{dir_module}.{resolver}\")\n fun = module.__getattribute__(resolver)\n obj_type.set_field(resolver, fun)\n\n resolvers.append(obj_type)\n", "repo_name": "Camilo-Camargo/iptv-api", "sub_path": "src/resolver/__init__.py", "file_name": "__init__.py", "file_ext": "py", "file_size_in_byte": 1112, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "76", "api": [{"api_name": "os.path.join", "line_number": 10, "usage_type": "call"}, {"api_name": "os.path", "line_number": 10, "usage_type": "attribute"}, {"api_name": "os.getcwd", "line_number": 10, "usage_type": "call"}, {"api_name": "os.listdir", "line_number": 19, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 20, "usage_type": "call"}, {"api_name": "os.path", "line_number": 20, "usage_type": "attribute"}, {"api_name": "os.path.splitext", "line_number": 21, "usage_type": "call"}, {"api_name": "os.path", "line_number": 21, "usage_type": "attribute"}, {"api_name": "os.path.isdir", "line_number": 26, "usage_type": "call"}, {"api_name": "os.path", "line_number": 26, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 37, "usage_type": "call"}, {"api_name": "os.path", "line_number": 37, "usage_type": "attribute"}, {"api_name": "ariadne.ObjectType", "line_number": 38, "usage_type": "call"}, {"api_name": "importlib.import_module", "line_number": 40, "usage_type": "call"}]}
+{"seq_id": "27670076257", "text": "from django.http import HttpResponse\nfrom django.shortcuts import redirect, render\nfrom .forms import VehicleForm\nfrom .models import Vehicle\nfrom django.contrib import messages\nfrom django.contrib.auth.models import User\nfrom django.core.mail import send_mail\nfrom vehicle_manag import settings\nfrom django.contrib.auth import authenticate,login,logout\nfrom django.contrib.auth.decorators import login_required\n\n\n\n\n\n\ndef remove_spaces(string):\n strg = string.replace(\" \", \"\")\n # also convert vh_number to uppercase\n return strg.upper()\n\n\n\ndef home(request):\n if request.user.is_authenticated:\n if request.user.is_superuser :\n return redirect(\"sadminH\")\n elif request.user.is_staff:\n return redirect(\"adminH\")\n return redirect(\"userH\") \n \n return render(request,'index.html')\n\n\n@login_required(login_url='/login')\ndef add_vehicle(request):\n if request.user.is_superuser:\n if request.method == \"GET\":\n form_obj = VehicleForm()\n context = {}\n context['form'] = form_obj\n\n return render(request,\"super_admin/add_vehicle.html\",context)\n elif request.method == \"POST\":\n\n # removing the white spaces from vehicle number\n mutable_query_set = request.POST.copy()\n mutable_query_set['vh_number'] = remove_spaces(mutable_query_set['vh_number'])\n\n vh_num = mutable_query_set['vh_number']\n\n form_data = VehicleForm(mutable_query_set)\n\n if Vehicle.objects.filter(vh_number=vh_num):\n messages.error(request,\"Vehicle Already Exists\")\n return redirect(\"addV\")\n if form_data.is_valid():\n form_data.save()\n messages.success(request,\"Vehicle Added Successfully!!\")\n return redirect(\"addV\")\n messages.error(request,VehicleForm.errors) \n return redirect(\"addV\") \n return HttpResponse (\"You dont have permission to accces this page\") \n\ndef sgup(request):\n if request.method == \"GET\":\n return render(request,'signup.html')\n elif request.method == \"POST\":\n un = request.POST['uname'] \n fn = request.POST['fname'] \n pw1 = request.POST['pwd1']\n pw2 = request.POST['pwd2']\n em = request.POST['em']\n\n\n if User.objects.filter(username=un):\n messages.error(request,\"Username alredy taken,try another one\")\n return redirect('sgup')\n\n if User.objects.filter(email=em):\n messages.error(request,\"email alredy exists !!\")\n return redirect('sgup')\n\n\n if pw1 != pw2:\n messages.error(request,\"Confirm password didn't match !\")\n return redirect('sgup')\n\n if len(un)>10:\n messages.error(request,\"Username must be less than 10\")\n return redirect('sgup')\n\n if not un.isalnum():\n messages.error(request,\"Username must be alpha numeric ( 'A to Z and 0 to 9') \")\n return redirect('sgup')\n\n new_user = User.objects.create_user(username=un,first_name=fn,password=pw1,email=em)\n new_user.save()\n\n messages.success(request,\"Your account has been successfully created!\")\n\n # welcome mail \n\n subject = \"Welcome to Vehicle Management\"\n message = \"Hello\" + new_user.first_name + \"\\n\" + \"Thankyou for Registering, Your Username is \"+new_user.username \n from_addr = settings.EMAIL_HOST_USER\n to_list = [new_user.email,]\n send_mail(from_email=from_addr,subject=subject,message=message,recipient_list=to_list)\n \n\n \n return redirect('lgin')\n\n\ndef lgin(request):\n if request.user.is_authenticated:\n return home(request)\n if request.method == 'GET': \n return render(request,'login.html')\n elif request.method == 'POST':\n un = request.POST['un']\n pw = request.POST['pw']\n\n user = authenticate(username=un,password=pw)\n\n if user:\n login(request,user)\n if user.is_superuser:\n return redirect('sadminH')\n elif user.is_staff:\n return redirect(\"adminH\") \n else:\n return redirect(\"userH\")\n else:\n messages.error(request,\"Incorrect Cradentials\") \n return redirect(\"lgin\") \n\n@login_required(login_url='/login')\ndef view_vehicle(request):\n data = Vehicle.objects.all()\n return render(request,\"view_vehicle.html\",{'data':data}) \n\ndef lgout(request):\n logout(request) \n return redirect(\"home\") \n\n@login_required(login_url='/login')\ndef s_admin_home(request):\n if request.user.is_superuser:\n return render(request,'super_admin/super_admin_home.html')\n else:\n return HttpResponse(\"You dont have acccess to this page\") \n\n@login_required(login_url='/login')\ndef admin_home(request):\n if request.user.is_staff:\n return render(request,'admin/admin_home.html')\n else:\n return HttpResponse(\"You dont have acccess to this page\") \n\n\n@login_required(login_url='/login')\ndef user_home(request):\n if request.user.is_superuser:\n return redirect(\"sadminH\")\n elif request.user.is_staff:\n return redirect(\"adminH\") \n else: \n return render(request,'user/user_home.html')\n\n\n \n@login_required(login_url='/login')\ndef edit_vehicle(request,vid):\n if request.user.has_perm('vehicle.change_vehicle'):\n if request.method == \"GET\":\n record = Vehicle.objects.get(id = vid)\n form_ob = VehicleForm(instance=record)\n return render (request,\"edit_vehicle.html\",{'form':form_ob})\n elif request.method == \"POST\":\n # getting the old record \n record = Vehicle.objects.get(id = vid)\n form_ob = VehicleForm(instance=record)\n\n\n v_no = remove_spaces(request.POST['vh_number'])\n # Checking if the user edited the vh number. then\n # checking for the new number is exist in the table,if present\n # returning error\n if record.vh_number != v_no and Vehicle.objects.filter(vh_number=v_no):\n messages.error(request,\"Vehicle Number alredy exits!!\")\n return render (request,\"edit_vehicle.html\",{'form':form_ob})\n \n \n v_type = request.POST['vh_type']\n v_model = request.POST['vh_model']\n v_disc = request.POST['vh_disc']\n Vehicle.objects.filter(id=vid).update(vh_number=v_no,vh_type=v_type,vh_model=v_model,vh_disc=v_disc)\n \n msg = \"Vehicle \"+ v_no +\" Edited successfully\"\n messages.success(request,message=msg)\n\n return redirect(\"viewV\")\n return HttpResponse('Permission denied for the user') \n\n\n \n\n\n\n\n \n@login_required(login_url='/login')\ndef del_vehicle(request,vid):\n if request.user.has_perm('vehicle.delete_vehicle'):\n Vehicle.objects.filter(id = vid).delete()\n messages.success(request,\"Vehicle deleted Successfully\")\n return redirect(\"viewV\")\n return HttpResponse('Permission denied for the user') \n \n \n\n\n\n\n\n\n\n \n\n \n\n\n \n\n\n ", "repo_name": "anurag-6/Vehicle_management", "sub_path": "vehicle/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 7249, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "76", "api": [{"api_name": "django.shortcuts.redirect", "line_number": 27, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 29, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 30, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 32, "usage_type": "call"}, {"api_name": "forms.VehicleForm", "line_number": 39, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 43, "usage_type": "call"}, {"api_name": "forms.VehicleForm", "line_number": 52, "usage_type": "call"}, {"api_name": "models.Vehicle.objects.filter", "line_number": 54, "usage_type": "call"}, {"api_name": "models.Vehicle.objects", "line_number": 54, "usage_type": "attribute"}, {"api_name": "models.Vehicle", "line_number": 54, "usage_type": "name"}, {"api_name": "django.contrib.messages.error", "line_number": 55, "usage_type": "call"}, {"api_name": "django.contrib.messages", "line_number": 55, "usage_type": "name"}, {"api_name": "django.shortcuts.redirect", "line_number": 56, "usage_type": "call"}, {"api_name": "django.contrib.messages.success", "line_number": 59, "usage_type": "call"}, {"api_name": "django.contrib.messages", "line_number": 59, "usage_type": "name"}, {"api_name": "django.shortcuts.redirect", "line_number": 60, "usage_type": "call"}, {"api_name": "django.contrib.messages.error", "line_number": 61, "usage_type": "call"}, {"api_name": "django.contrib.messages", "line_number": 61, "usage_type": "name"}, {"api_name": "forms.VehicleForm.errors", "line_number": 61, "usage_type": "attribute"}, {"api_name": "forms.VehicleForm", "line_number": 61, "usage_type": "name"}, {"api_name": "django.shortcuts.redirect", "line_number": 62, "usage_type": "call"}, {"api_name": "django.http.HttpResponse", "line_number": 63, "usage_type": "call"}, {"api_name": "django.contrib.auth.decorators.login_required", "line_number": 35, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 67, "usage_type": "call"}, {"api_name": "django.contrib.auth.models.User.objects.filter", "line_number": 76, "usage_type": "call"}, {"api_name": "django.contrib.auth.models.User.objects", "line_number": 76, "usage_type": "attribute"}, {"api_name": "django.contrib.auth.models.User", "line_number": 76, "usage_type": "name"}, {"api_name": "django.contrib.messages.error", "line_number": 77, "usage_type": "call"}, {"api_name": "django.contrib.messages", "line_number": 77, "usage_type": "name"}, {"api_name": "django.shortcuts.redirect", "line_number": 78, "usage_type": "call"}, {"api_name": "django.contrib.auth.models.User.objects.filter", "line_number": 80, "usage_type": "call"}, {"api_name": "django.contrib.auth.models.User.objects", "line_number": 80, "usage_type": "attribute"}, {"api_name": "django.contrib.auth.models.User", "line_number": 80, "usage_type": "name"}, {"api_name": "django.contrib.messages.error", "line_number": 81, "usage_type": "call"}, {"api_name": "django.contrib.messages", "line_number": 81, "usage_type": "name"}, {"api_name": "django.shortcuts.redirect", "line_number": 82, "usage_type": "call"}, {"api_name": "django.contrib.messages.error", "line_number": 86, "usage_type": "call"}, {"api_name": "django.contrib.messages", "line_number": 86, "usage_type": "name"}, {"api_name": "django.shortcuts.redirect", "line_number": 87, "usage_type": "call"}, {"api_name": "django.contrib.messages.error", "line_number": 90, "usage_type": "call"}, {"api_name": "django.contrib.messages", "line_number": 90, "usage_type": "name"}, {"api_name": "django.shortcuts.redirect", "line_number": 91, "usage_type": "call"}, {"api_name": "django.contrib.messages.error", "line_number": 94, "usage_type": "call"}, {"api_name": "django.contrib.messages", "line_number": 94, "usage_type": "name"}, {"api_name": "django.shortcuts.redirect", "line_number": 95, "usage_type": "call"}, {"api_name": "django.contrib.auth.models.User.objects.create_user", "line_number": 97, "usage_type": "call"}, {"api_name": "django.contrib.auth.models.User.objects", "line_number": 97, "usage_type": "attribute"}, {"api_name": "django.contrib.auth.models.User", "line_number": 97, "usage_type": "name"}, {"api_name": "django.contrib.messages.success", "line_number": 100, "usage_type": "call"}, {"api_name": "django.contrib.messages", "line_number": 100, "usage_type": "name"}, {"api_name": "vehicle_manag.settings.EMAIL_HOST_USER", "line_number": 106, "usage_type": "attribute"}, {"api_name": "vehicle_manag.settings", "line_number": 106, "usage_type": "name"}, {"api_name": "django.core.mail.send_mail", "line_number": 108, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 112, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 119, "usage_type": "call"}, {"api_name": "django.contrib.auth.authenticate", "line_number": 124, "usage_type": "call"}, {"api_name": "django.contrib.auth.login", "line_number": 127, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 129, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 131, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 133, "usage_type": "call"}, {"api_name": "django.contrib.messages.error", "line_number": 135, "usage_type": "call"}, {"api_name": "django.contrib.messages", "line_number": 135, "usage_type": "name"}, {"api_name": "django.shortcuts.redirect", "line_number": 136, "usage_type": "call"}, {"api_name": "models.Vehicle.objects.all", "line_number": 140, "usage_type": "call"}, {"api_name": "models.Vehicle.objects", "line_number": 140, "usage_type": "attribute"}, {"api_name": "models.Vehicle", "line_number": 140, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 141, "usage_type": "call"}, {"api_name": "django.contrib.auth.decorators.login_required", "line_number": 138, "usage_type": "call"}, {"api_name": "django.contrib.auth.logout", "line_number": 144, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 145, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 150, "usage_type": "call"}, {"api_name": "django.http.HttpResponse", "line_number": 152, "usage_type": "call"}, {"api_name": "django.contrib.auth.decorators.login_required", "line_number": 147, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 157, "usage_type": "call"}, {"api_name": "django.http.HttpResponse", "line_number": 159, "usage_type": "call"}, {"api_name": "django.contrib.auth.decorators.login_required", "line_number": 154, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 165, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 167, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 169, "usage_type": "call"}, {"api_name": "django.contrib.auth.decorators.login_required", "line_number": 162, "usage_type": "call"}, {"api_name": "models.Vehicle.objects.get", "line_number": 177, "usage_type": "call"}, {"api_name": "models.Vehicle.objects", "line_number": 177, "usage_type": "attribute"}, {"api_name": "models.Vehicle", "line_number": 177, "usage_type": "name"}, {"api_name": "forms.VehicleForm", "line_number": 178, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 179, "usage_type": "call"}, {"api_name": "models.Vehicle.objects.get", "line_number": 182, "usage_type": "call"}, {"api_name": "models.Vehicle.objects", "line_number": 182, "usage_type": "attribute"}, {"api_name": "models.Vehicle", "line_number": 182, "usage_type": "name"}, {"api_name": "forms.VehicleForm", "line_number": 183, "usage_type": "call"}, {"api_name": "models.Vehicle.objects.filter", "line_number": 190, "usage_type": "call"}, {"api_name": "models.Vehicle.objects", "line_number": 190, "usage_type": "attribute"}, {"api_name": "models.Vehicle", "line_number": 190, "usage_type": "name"}, {"api_name": "django.contrib.messages.error", "line_number": 191, "usage_type": "call"}, {"api_name": "django.contrib.messages", "line_number": 191, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 192, "usage_type": "call"}, {"api_name": "models.Vehicle.objects.filter", "line_number": 198, "usage_type": "call"}, {"api_name": "models.Vehicle.objects", "line_number": 198, "usage_type": "attribute"}, {"api_name": "models.Vehicle", "line_number": 198, "usage_type": "name"}, {"api_name": "django.contrib.messages.success", "line_number": 201, "usage_type": "call"}, {"api_name": "django.contrib.messages", "line_number": 201, "usage_type": "name"}, {"api_name": "django.shortcuts.redirect", "line_number": 203, "usage_type": "call"}, {"api_name": "django.http.HttpResponse", "line_number": 204, "usage_type": "call"}, {"api_name": "django.contrib.auth.decorators.login_required", "line_number": 173, "usage_type": "call"}, {"api_name": "models.Vehicle.objects.filter", "line_number": 216, "usage_type": "call"}, {"api_name": "models.Vehicle.objects", "line_number": 216, "usage_type": "attribute"}, {"api_name": "models.Vehicle", "line_number": 216, "usage_type": "name"}, {"api_name": "django.contrib.messages.success", "line_number": 217, "usage_type": "call"}, {"api_name": "django.contrib.messages", "line_number": 217, "usage_type": "name"}, {"api_name": "django.shortcuts.redirect", "line_number": 218, "usage_type": "call"}, {"api_name": "django.http.HttpResponse", "line_number": 219, "usage_type": "call"}, {"api_name": "django.contrib.auth.decorators.login_required", "line_number": 213, "usage_type": "call"}]}
+{"seq_id": "33615032749", "text": "from preprocessors.variables import *\nfrom preprocessors import multiprocess as mp\nfrom multiprocessing import cpu_count\nfrom sklearn.feature_extraction.text import TfidfVectorizer\nfrom keybert import KeyBERT\nfrom math import nan\nimport preprocessors.helper as help\nimport pandas as pd\nimport numpy as np\nimport re\nimport spacy\nimport swifter\nimport itertools\n# import cudf\n\ndef cleanse_text(dataframe):\n def extract_pos(text, nlp):\n try:\n text_nlp = nlp(text)\n extracted_text = \" \".join([token.lemma_ for token in text_nlp if bool(re.match(pat_pos, token.tag_)) == True])\n except ValueError: \n extracted_text = \"\"\n return extracted_text\n\n def remove_stopwords(text):\n stopword_removed_text = \" \".join([token for token in text.split(\" \") if bool(re.match(pat_stopwords, token)) == False])\n return stopword_removed_text\n \n title_text_cleansed = dataframe.title_text\n spacy.require_gpu()\n nlp = spacy.load(\"en_core_web_md\")\n\n title_text_cleansed = title_text_cleansed.swifter.apply(lambda x: x.lower())\n title_text_cleansed = title_text_cleansed.swifter.apply(lambda x: re.sub(pat_punc_replace, \" \", x))\n title_text_cleansed = title_text_cleansed.swifter.apply(lambda x: re.sub(pat_stopwords, \"\", x))\n title_text_cleansed = title_text_cleansed.swifter.apply(lambda x: re.sub(pat_tags, \"\", x))\n title_text_cleansed = title_text_cleansed.swifter.apply(lambda x: re.sub(pat_paths, \"\", x))\n title_text_cleansed = title_text_cleansed.swifter.apply(lambda x: re.sub(pat_emails, \"\", x))\n title_text_cleansed = title_text_cleansed.swifter.apply(lambda x: extract_pos(x, nlp))\n title_text_cleansed = title_text_cleansed.swifter.apply(lambda x: re.sub(pat_punc_remove, \" \", x))\n title_text_cleansed = title_text_cleansed.swifter.apply(lambda x: re.sub(pat_whitespaces, \"\", x))\n dataframe[\"title_text_cleansed\"] = title_text_cleansed\n return dataframe\n\ndef get_bert_keyword(dataframe):\n kw_model = KeyBERT(\"all-mpnet-base-v2\")\n # title_text_pooled = \" \".join(dataframe.title_text_cleansed)\n # keywords_list = kw_model.extract_keywords(title_text_pooled, keyphrase_ngram_range=(1,3), top_n = 500, stop_words=\"english\")\n # keywords_df = pd.DataFrame(keywords_list, columns=[\"word\", \"score\"]).sort_values(by=[\"score\"], ascending=False).drop_duplicates(subset=['word']).reset_index(drop=True)\n title_text_keywords = dataframe.title_text_cleansed.swifter.apply(lambda x: kw_model.extract_keywords(x, keyphrase_ngram_range=(1,3), stop_words=\"english\", top_n=10))\n keywords_list = help.concat_list(title_text_keywords)\n keywords_df = pd.DataFrame(keywords_list, columns=[\"word\", \"score\"]).sort_values(by=[\"score\"], ascending=False).drop_duplicates(subset=['word']).reset_index(drop=True)\n return keywords_df\n\ndef replace_keywords(dataframe, user_dict):\n title_text = dataframe.title_text_cleansed.copy()\n title_text_replaced = mp.multiprocess_keyword_replace(title_text, user_dict, mp.replace_df)\n # title_text_cudf = cudf.Series.from_pandas(title_text)\n # title_text_replaced = title_text_cudf.replace(dict, regex=True)\n dataframe[\"title_text_replaced\"] = title_text_replaced\n return dataframe\n\ndef tokenize(dataframe):\n try:\n title_text = dataframe.title_text_replaced\n except AttributeError:\n title_text = dataframe.title_text_cleansed\n title_text_tokenized = title_text.swifter.apply(lambda x: x.split() if type(x) != nan else nan)\n dataframe['title_text_tokenized'] = title_text_tokenized\n return dataframe", "repo_name": "InjeKang/SnT_Interaction", "sub_path": "preprocessors/internal.py", "file_name": "internal.py", "file_ext": "py", "file_size_in_byte": 3581, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "76", "api": [{"api_name": "re.match", "line_number": 20, "usage_type": "call"}, {"api_name": "re.match", "line_number": 26, "usage_type": "call"}, {"api_name": "spacy.require_gpu", "line_number": 30, "usage_type": "call"}, {"api_name": "spacy.load", "line_number": 31, "usage_type": "call"}, {"api_name": "re.sub", "line_number": 34, "usage_type": "call"}, {"api_name": "re.sub", "line_number": 35, "usage_type": "call"}, {"api_name": "re.sub", "line_number": 36, "usage_type": "call"}, {"api_name": "re.sub", "line_number": 37, "usage_type": "call"}, {"api_name": "re.sub", "line_number": 38, "usage_type": "call"}, {"api_name": "re.sub", "line_number": 40, "usage_type": "call"}, {"api_name": "re.sub", "line_number": 41, "usage_type": "call"}, {"api_name": "keybert.KeyBERT", "line_number": 46, "usage_type": "call"}, {"api_name": "preprocessors.helper.concat_list", "line_number": 51, "usage_type": "call"}, {"api_name": "preprocessors.helper", "line_number": 51, "usage_type": "name"}, {"api_name": "pandas.DataFrame", "line_number": 52, "usage_type": "call"}, {"api_name": "preprocessors.multiprocess.multiprocess_keyword_replace", "line_number": 57, "usage_type": "call"}, {"api_name": "preprocessors.multiprocess", "line_number": 57, "usage_type": "name"}, {"api_name": "preprocessors.multiprocess.replace_df", "line_number": 57, "usage_type": "attribute"}, {"api_name": "math.nan", "line_number": 68, "usage_type": "name"}]}
+{"seq_id": "72924338484", "text": "#! /usr/bin/env python\n\nfrom __future__ import print_function\nimport tensorflow as tf\nimport numpy as np\nimport re\nimport os\nimport time\nimport datetime\nimport gc\nfrom input_helpers import InputHelper\nfrom siamese_network import SiameseLSTM\nfrom siamese_network_semantic import SiameseLSTMw2v\nfrom random import random\nimport sqlite3 as lite\nimport sys\nimport math\nimport pickle\n\n# Parameters\n# ==================================================\n\ntf.flags.DEFINE_boolean(\"is_char_based\", False, \"is character based syntactic similarity. \"\n \"if false then word embedding based semantic similarity is used.\"\n \"(default: False)\")\n\ntf.flags.DEFINE_string(\"word2vec_model\", \"enwiki_20180420_100d.txt\", \"word2vec pre-trained embeddings file (default: enwiki_20180420_100d.txt)\")\ntf.flags.DEFINE_string(\"word2vec_format\", \"text\", \"word2vec pre-trained embeddings file format (bin/text/textgz)(default: text)\")\n\ntf.flags.DEFINE_integer(\"embedding_dim\", 100, \"Dimensionality of character embedding (default: 100)\")\ntf.flags.DEFINE_float(\"dropout_keep_prob\", 1.0, \"Dropout keep probability (default: 1.0)\")\ntf.flags.DEFINE_float(\"l2_reg_lambda\", 0.0, \"L2 regularization lambda (default: 0.0)\")\ntf.flags.DEFINE_string(\"database\", \"../plag.db\", \"training file (default: ../plag.db)\")\ntf.flags.DEFINE_string(\"training_folder\", 'ds', \"path to folder containing dataset (default: ds)\")\ntf.flags.DEFINE_integer(\"hidden_units\", 50, \"Number of hidden units (default:50)\")\n\n# Training parameters\ntf.flags.DEFINE_integer(\"batch_size\", 32, \"Batch Size (default: 32)\")\ntf.flags.DEFINE_integer(\"num_epochs\", 300, \"Number of training epochs (default: 300)\")\ntf.flags.DEFINE_integer(\"evaluate_every\", 1, \"Evaluate model on dev set after this many steps (default: 1)\")\ntf.flags.DEFINE_integer(\"checkpoint_every\", 50, \"Save model after this many steps (default: 50)\")\ntf.flags.DEFINE_integer(\"patience\", 20, \"Patience for early stopping (default: 20)\")\ntf.flags.DEFINE_integer(\"log_every\", 1000, \"Log results every X steps (default: 100000)\")\n# Misc Parameters\ntf.flags.DEFINE_boolean(\"allow_soft_placement\", True, \"Allow device soft device placement\")\ntf.flags.DEFINE_boolean(\"log_device_placement\", False, \"Log placement of ops on devices\")\n\nFLAGS = tf.flags.FLAGS\n\nbatch_size = FLAGS.batch_size\nnum_epochs = FLAGS.num_epochs\n\nprint(\"\\nParameters:\")\nfor attr, value in sorted(FLAGS.flag_values_dict().iteritems()):\n print(\"{}={}\".format(attr.upper(), value))\nprint(\"\")\n\nif FLAGS.database==None:\n print(\"Input Files List is empty. use -database argument.\")\n exit()\n\nmax_document_length=15\n#max_document_length=sys.maxint # attempt to read all words in a document\ninpH = InputHelper()\n#train_set, dev_set, vocab_processor,sum_no_of_batches = inpH.getDataSets(FLAGS.database,max_document_length, 10,\n# FLAGS.batch_size, FLAGS.is_char_based)\n\nnum_docs = inpH.get_num_docs(FLAGS.training_folder)\n\ndb = lite.connect(FLAGS.database)\ncursor = db.cursor()\nemb_map, vocab_processor = inpH.getEmbeddingsMap(cursor, max_document_length, num_docs)\ntrain_count, dev_count = inpH.get_counts(FLAGS.training_folder)[0:2]\ntotal_count = train_count + dev_count\n\nsum_no_of_batches = int(math.ceil(float(train_count) / batch_size))\ndev_no_of_batches = int(math.ceil(float(dev_count) / batch_size))\n\ntrain_set = inpH.my_train_batch(emb_map, train_count, FLAGS.batch_size, num_epochs)\n\ndev_set = inpH.my_dev_batch(emb_map, dev_count, FLAGS.batch_size, num_epochs)\n\n# train_set, dev_set, sum_no_of_batches = inpH.myGetDataSets(cursor ,max_document_length, 10,\n# FLAGS.batch_size, FLAGS.is_char_based, 1000)\n\ntrainableEmbeddings=False\nif FLAGS.is_char_based==True:\n FLAGS.word2vec_model = False\nelse:\n if FLAGS.word2vec_model==None:\n trainableEmbeddings=True\n print(\"!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\\n\"\n \"You are using word embedding based semantic similarity but \"\n \"word2vec model path is empty. It is Recommended to use --word2vec_model argument. \"\n \"Otherwise now the code is automatically trying to learn embedding values (may not help in accuracy)\"\n \"\\n!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\\n\")\n else:\n inpH.loadW2V(FLAGS.word2vec_model, FLAGS.word2vec_format)\n\n# Training\n# ==================================================\nprint(\"starting graph def\")\nwith tf.Graph().as_default():\n session_conf = tf.ConfigProto(\n allow_soft_placement=FLAGS.allow_soft_placement,\n log_device_placement=FLAGS.log_device_placement)\n sess = tf.Session(config=session_conf)\n print(\"started session\")\n with sess.as_default():\n if FLAGS.is_char_based:\n siameseModel = SiameseLSTM(\n sequence_length=max_document_length,\n vocab_size=len(vocab_processor.vocabulary_),\n embedding_size=FLAGS.embedding_dim,\n hidden_units=FLAGS.hidden_units,\n l2_reg_lambda=FLAGS.l2_reg_lambda,\n batch_size=FLAGS.batch_size\n )\n else:\n siameseModel = SiameseLSTMw2v(\n sequence_length=max_document_length,\n vocab_size=len(vocab_processor.vocabulary_),\n embedding_size=FLAGS.embedding_dim,\n hidden_units=FLAGS.hidden_units,\n l2_reg_lambda=FLAGS.l2_reg_lambda,\n batch_size=FLAGS.batch_size,\n trainableEmbeddings=trainableEmbeddings\n )\n # Define Training procedure\n global_step = tf.Variable(0, name=\"global_step\", trainable=False)\n print(siameseModel.accuracy)\n optimizer = tf.train.AdamOptimizer(1e-3)\n print(\"initialized siameseModel object\")\n \n grads_and_vars=optimizer.compute_gradients(siameseModel.loss)\n tr_op_set = optimizer.apply_gradients(grads_and_vars, global_step=global_step)\n print(\"defined training_ops\")\n # Keep track of gradient values and sparsity (optional)\n # grad_summaries = []\n # for g, v in grads_and_vars:\n # if g is not None:\n # grad_hist_summary = tf.summary.histogram(\"{}/grad/hist\".format(v.name), g)\n # sparsity_summary = tf.summary.scalar(\"{}/grad/sparsity\".format(v.name), tf.nn.zero_fraction(g))\n # grad_summaries.append(grad_hist_summary)\n # grad_summaries.append(sparsity_summary)\n # grad_summaries_merged = tf.summary.merge(grad_summaries)\n # print(\"defined gradient summaries\")\n # Output directory for models and summaries\n timestamp = str(int(time.time()))\n out_dir = os.path.abspath(os.path.join(os.path.curdir, \"runs\", timestamp))\n print(\"Writing to {}\\n\".format(out_dir))\n\n # Summaries for loss and accuracy\n loss_summary = tf.summary.scalar(\"loss\", siameseModel.loss)\n acc_summary = tf.summary.scalar(\"accuracy\", siameseModel.accuracy)\n\n # Train Summaries\n # train_summary_op = tf.summary.merge([loss_summary, acc_summary, grad_summaries_merged])\n # train_summary_dir = os.path.join(out_dir, \"summaries\", \"train\")\n # train_summary_writer = tf.summary.FileWriter(train_summary_dir, sess.graph)\n\n # Dev summaries\n # dev_summary_op = tf.summary.merge([loss_summary, acc_summary])\n # dev_summary_dir = os.path.join(out_dir, \"summaries\", \"dev\")\n # dev_summary_writer = tf.summary.FileWriter(dev_summary_dir, sess.graph)\n\n # Checkpoint directory. Tensorflow assumes this directory already exists so we need to create it\n checkpoint_dir = os.path.abspath(os.path.join(out_dir, \"checkpoints\"))\n checkpoint_prefix = os.path.join(checkpoint_dir, \"model\")\n if not os.path.exists(checkpoint_dir):\n os.makedirs(checkpoint_dir)\n saver = tf.train.Saver(tf.global_variables(), max_to_keep=100)\n \n # my summary dir\n summary_dir = os.path.join(out_dir, \"summaries\")\n if not os.path.exists(summary_dir):\n os.makedirs(summary_dir)\n\n # Write vocabulary\n vocab_processor.save(os.path.join(checkpoint_dir, \"vocab\"))\n\n # Write ids_mapping\n pickle.dump(emb_map, open(os.path.join(checkpoint_dir + '/ids_mapping'), 'w'))\n\n # Initialize all variables\n sess.run(tf.global_variables_initializer())\n \n print(\"init all variables\")\n graph_def = tf.get_default_graph().as_graph_def()\n graphpb_txt = str(graph_def)\n with open(os.path.join(checkpoint_dir, \"graphpb.txt\"), 'w') as f:\n f.write(graphpb_txt)\n\n if FLAGS.word2vec_model :\n # initial matrix with random uniform\n initW = np.random.uniform(-0.25,0.25,(len(vocab_processor.vocabulary_), FLAGS.embedding_dim))\n #initW = np.zeros(shape=(len(vocab_processor.vocabulary_), FLAGS.embedding_dim))\n # load any vectors from the word2vec\n print(\"initializing initW with pre-trained word2vec embeddings\")\n for w in vocab_processor.vocabulary_._mapping:\n arr=[]\n s = re.sub('[^0-9a-zA-Z]+', '', w)\n if w in inpH.pre_emb:\n arr=inpH.pre_emb[w]\n elif w.lower() in inpH.pre_emb:\n arr=inpH.pre_emb[w.lower()]\n elif s in inpH.pre_emb:\n arr=inpH.pre_emb[s]\n elif s.isdigit():\n arr=inpH.pre_emb[\"zero\"]\n if len(arr)>0:\n idx = vocab_processor.vocabulary_.get(w)\n initW[idx]=np.asarray(arr).astype(np.float32)\n print(\"Done assigning intiW. len=\"+str(len(initW)))\n inpH.deletePreEmb()\n gc.collect()\n sess.run(siameseModel.W.assign(initW))\n\n def train_step(x1_batch, x2_batch, y_batch, epoch, batch):\n \"\"\"\n A single training step\n \"\"\"\n if random()>0.5:\n feed_dict = {\n siameseModel.input_x1: x1_batch,\n siameseModel.input_x2: x2_batch,\n siameseModel.input_y: y_batch,\n siameseModel.dropout_keep_prob: FLAGS.dropout_keep_prob,\n }\n else:\n feed_dict = {\n siameseModel.input_x1: x2_batch,\n siameseModel.input_x2: x1_batch,\n siameseModel.input_y: y_batch,\n siameseModel.dropout_keep_prob: FLAGS.dropout_keep_prob,\n }\n _, step, loss, accuracy, dist, sim = sess.run([tr_op_set, global_step, siameseModel.loss, siameseModel.accuracy, siameseModel.distance, siameseModel.temp_sim], feed_dict)\n time_str = datetime.datetime.now().isoformat()\n if batch*(epoch+1) % FLAGS.log_every == 0:\n print(\"TRAIN {}: epoch/step {}/{}, loss {:g}, f1 {:g}\".format(time_str, epoch, batch, loss, accuracy))\n #train_summary_writer.add_summary(summaries, step)\n # print(y_batch, dist, sim)\n return loss, accuracy\n\n def dev_step(x1_batch, x2_batch, y_batch, epoch, batch):\n \"\"\"\n A single training step\n \"\"\" \n if random()>0.5:\n feed_dict = {\n siameseModel.input_x1: x1_batch,\n siameseModel.input_x2: x2_batch,\n siameseModel.input_y: y_batch,\n siameseModel.dropout_keep_prob: 1.0,\n }\n else:\n feed_dict = {\n siameseModel.input_x1: x2_batch,\n siameseModel.input_x2: x1_batch,\n siameseModel.input_y: y_batch,\n siameseModel.dropout_keep_prob: 1.0,\n }\n step, loss, accuracy, sim = sess.run([global_step, siameseModel.loss, siameseModel.accuracy, siameseModel.temp_sim], feed_dict)\n time_str = datetime.datetime.now().isoformat()\n if batch*(epoch+1) % FLAGS.log_every == 0:\n print(\"DEV {}: epoch/batch {}/{}, loss {:g}, f1 {:g}\".format(time_str, epoch, batch, loss, accuracy))\n #dev_summary_writer.add_summary(summaries, step)\n return loss, accuracy\n\n # Generate batches\n # batches=inpH.batch_batch_iter(\n # list(zip(train_set[0], train_set[1], train_set[2])), 128, FLAGS.batch_size, FLAGS.num_epochs)\n\n train_batches = train_set\n dev_batches = dev_set\n ptr=0\n max_validation_f1=0.0\n stopping_step = 0\n best_loss = sys.float_info.max\n\n for epoch in xrange(FLAGS.num_epochs):\n start_time = time.time()\n\n current_step = tf.train.global_step(sess, global_step)\n losses = []\n f1s = []\n\n for nn in xrange(sum_no_of_batches):\n train_batch = train_batches.next()\n if len(train_batch)<1:\n continue\n x1_batch,x2_batch, y_batch = zip(*train_batch)\n if len(y_batch)<1:\n continue\n loss, f1 = train_step(x1_batch, x2_batch, y_batch, epoch, nn)\n losses.append(loss)\n f1s.append(f1)\n \n epoch_f1 = np.mean(np.nan_to_num(f1s))\n epoch_loss = np.mean(np.nan_to_num(losses))\n \n with open(os.path.join(summary_dir, 'train_summary'), 'a') as f:\n f.write('\\t'.join((str(epoch), str(epoch_loss), str(epoch_f1))) + '\\n')\n \n \n\n if epoch % FLAGS.evaluate_every == 0:\n losses = []\n f1s = []\n \n print(\"\\nEvaluation:\")\n for _ in xrange(dev_no_of_batches):\n dev_batch = dev_batches.next()\n if len(dev_batch)<1:\n continue\n x1_dev_b,x2_dev_b,y_dev_b = zip(*dev_batch)\n if len(y_dev_b)<1:\n continue\n loss, f1 = dev_step(x1_dev_b, x2_dev_b, y_dev_b, epoch, nn)\n losses.append(loss)\n f1s.append(f1)\n\n train_epoch_f1 = np.mean(np.nan_to_num(f1s))\n train_epoch_loss = np.mean(np.nan_to_num(losses))\n\n with open(os.path.join(summary_dir, 'dev_summary'), 'a') as f:\n f.write('\\t'.join((str(epoch), str(train_epoch_loss), str(train_epoch_f1))) + '\\n')\n \n if epoch % FLAGS.checkpoint_every == 0:\n if epoch_f1 >= max_validation_f1:\n max_validation_f1 = epoch_f1\n saver.save(sess, checkpoint_prefix, global_step=current_step)\n tf.train.write_graph(sess.graph.as_graph_def(), checkpoint_prefix, \"graph\"+str(epoch)+\".pb\", as_text=False)\n print(\"Saved model {} with sum_accuracy={} checkpoint to {}\\n\".format(nn, max_validation_f1, checkpoint_prefix))\n\n # early stopping\n if epoch_loss < best_loss:\n stopping_step = 0\n best_loss = epoch_loss\n else:\n stopping_step += 1\n if stopping_step >= FLAGS.patience:\n print(\"Early stopping is trigger at epoch: {} loss:{}\".format(epoch, epoch_loss))\n saver.save(sess, checkpoint_prefix, global_step=current_step)\n tf.train.write_graph(sess.graph.as_graph_def(), checkpoint_prefix, \"graph\"+str(epoch)+\".pb\", as_text=False)\n print(\"Saved model {} with sum_accuracy={} checkpoint to {}\\n\".format(epoch, max_validation_f1, checkpoint_prefix))\n exit(0)\n\n end_time = time.time()\n print('Time spent on epoch {}: {:.2f} seconds. TRAIN - loss: {:f}, f1: {:f} | DEV - loss: {:f}, f1: {:f}'.format(epoch, end_time-start_time, train_epoch_loss, train_epoch_f1, epoch_loss, epoch_f1))\n\n print(\"End of training.\")\n saver.save(sess, checkpoint_prefix, global_step=current_step)\n tf.train.write_graph(sess.graph.as_graph_def(), checkpoint_prefix, \"graph\" + str(epoch) + \".pb\", as_text=False)\n print(\"Saved model {} with sum_accuracy={} checkpoint to {}\\n\".format(nn, max_validation_f1, checkpoint_prefix))\n", "repo_name": "MLRG-CEFET-RJ/plagdetect", "sub_path": "lstm/my_train.py", "file_name": "my_train.py", "file_ext": "py", "file_size_in_byte": 15717, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 4, "dataset": "github-code", "pt": "76", "api": [{"api_name": "tensorflow.flags.DEFINE_boolean", "line_number": 23, "usage_type": "call"}, {"api_name": "tensorflow.flags", "line_number": 23, "usage_type": "attribute"}, {"api_name": "tensorflow.flags.DEFINE_string", "line_number": 27, "usage_type": "call"}, {"api_name": "tensorflow.flags", "line_number": 27, "usage_type": "attribute"}, {"api_name": "tensorflow.flags.DEFINE_string", "line_number": 28, "usage_type": "call"}, {"api_name": "tensorflow.flags", "line_number": 28, "usage_type": "attribute"}, {"api_name": "tensorflow.flags.DEFINE_integer", "line_number": 30, "usage_type": "call"}, {"api_name": "tensorflow.flags", "line_number": 30, "usage_type": "attribute"}, {"api_name": "tensorflow.flags.DEFINE_float", "line_number": 31, "usage_type": "call"}, {"api_name": "tensorflow.flags", "line_number": 31, "usage_type": "attribute"}, {"api_name": "tensorflow.flags.DEFINE_float", "line_number": 32, "usage_type": "call"}, {"api_name": "tensorflow.flags", "line_number": 32, "usage_type": "attribute"}, {"api_name": "tensorflow.flags.DEFINE_string", "line_number": 33, "usage_type": "call"}, {"api_name": "tensorflow.flags", "line_number": 33, "usage_type": "attribute"}, {"api_name": "tensorflow.flags.DEFINE_string", "line_number": 34, "usage_type": "call"}, {"api_name": "tensorflow.flags", "line_number": 34, "usage_type": "attribute"}, {"api_name": "tensorflow.flags.DEFINE_integer", "line_number": 35, "usage_type": "call"}, {"api_name": "tensorflow.flags", "line_number": 35, "usage_type": "attribute"}, {"api_name": "tensorflow.flags.DEFINE_integer", "line_number": 38, "usage_type": "call"}, {"api_name": "tensorflow.flags", "line_number": 38, "usage_type": "attribute"}, {"api_name": "tensorflow.flags.DEFINE_integer", "line_number": 39, "usage_type": "call"}, {"api_name": "tensorflow.flags", "line_number": 39, "usage_type": "attribute"}, {"api_name": "tensorflow.flags.DEFINE_integer", "line_number": 40, "usage_type": "call"}, {"api_name": "tensorflow.flags", "line_number": 40, "usage_type": "attribute"}, {"api_name": "tensorflow.flags.DEFINE_integer", "line_number": 41, "usage_type": "call"}, {"api_name": "tensorflow.flags", "line_number": 41, "usage_type": "attribute"}, {"api_name": "tensorflow.flags.DEFINE_integer", "line_number": 42, "usage_type": "call"}, {"api_name": "tensorflow.flags", "line_number": 42, "usage_type": "attribute"}, {"api_name": "tensorflow.flags.DEFINE_integer", "line_number": 43, "usage_type": "call"}, {"api_name": "tensorflow.flags", "line_number": 43, "usage_type": "attribute"}, {"api_name": "tensorflow.flags.DEFINE_boolean", "line_number": 45, "usage_type": "call"}, {"api_name": "tensorflow.flags", "line_number": 45, "usage_type": "attribute"}, {"api_name": "tensorflow.flags.DEFINE_boolean", "line_number": 46, "usage_type": "call"}, {"api_name": "tensorflow.flags", "line_number": 46, "usage_type": "attribute"}, {"api_name": "tensorflow.flags", "line_number": 48, "usage_type": "attribute"}, {"api_name": "input_helpers.InputHelper", "line_number": 64, "usage_type": "call"}, {"api_name": "sqlite3.connect", "line_number": 70, "usage_type": "call"}, {"api_name": "math.ceil", "line_number": 76, "usage_type": "call"}, {"api_name": "math.ceil", "line_number": 77, "usage_type": "call"}, {"api_name": "tensorflow.Graph", "line_number": 103, "usage_type": "call"}, {"api_name": "tensorflow.ConfigProto", "line_number": 104, "usage_type": "call"}, {"api_name": "tensorflow.Session", "line_number": 107, "usage_type": "call"}, {"api_name": "siamese_network.SiameseLSTM", "line_number": 111, "usage_type": "call"}, {"api_name": "siamese_network_semantic.SiameseLSTMw2v", "line_number": 120, "usage_type": "call"}, {"api_name": "tensorflow.Variable", "line_number": 130, "usage_type": "call"}, {"api_name": "tensorflow.train.AdamOptimizer", "line_number": 132, "usage_type": "call"}, {"api_name": "tensorflow.train", "line_number": 132, "usage_type": "attribute"}, {"api_name": "time.time", "line_number": 149, "usage_type": "call"}, {"api_name": "os.path.abspath", "line_number": 150, "usage_type": "call"}, {"api_name": "os.path", "line_number": 150, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 150, "usage_type": "call"}, {"api_name": "tensorflow.summary.scalar", "line_number": 154, "usage_type": "call"}, {"api_name": "tensorflow.summary", "line_number": 154, "usage_type": "attribute"}, {"api_name": "tensorflow.summary.scalar", "line_number": 155, "usage_type": "call"}, {"api_name": "tensorflow.summary", "line_number": 155, "usage_type": "attribute"}, {"api_name": "os.path.abspath", "line_number": 168, "usage_type": "call"}, {"api_name": "os.path", "line_number": 168, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 168, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 169, "usage_type": "call"}, {"api_name": "os.path", "line_number": 169, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 170, "usage_type": "call"}, {"api_name": "os.path", "line_number": 170, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 171, "usage_type": "call"}, {"api_name": "tensorflow.train.Saver", "line_number": 172, "usage_type": "call"}, {"api_name": "tensorflow.train", "line_number": 172, "usage_type": "attribute"}, {"api_name": "tensorflow.global_variables", "line_number": 172, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 175, "usage_type": "call"}, {"api_name": "os.path", "line_number": 175, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 176, "usage_type": "call"}, {"api_name": "os.path", "line_number": 176, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 177, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 180, "usage_type": "call"}, {"api_name": "os.path", "line_number": 180, "usage_type": "attribute"}, {"api_name": "pickle.dump", "line_number": 183, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 183, "usage_type": "call"}, {"api_name": "os.path", "line_number": 183, "usage_type": "attribute"}, {"api_name": "tensorflow.global_variables_initializer", "line_number": 186, "usage_type": "call"}, {"api_name": "tensorflow.get_default_graph", "line_number": 189, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 191, "usage_type": "call"}, {"api_name": "os.path", "line_number": 191, "usage_type": "attribute"}, {"api_name": "numpy.random.uniform", "line_number": 196, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 196, "usage_type": "attribute"}, {"api_name": "re.sub", "line_number": 202, "usage_type": "call"}, {"api_name": "numpy.asarray", "line_number": 213, "usage_type": "call"}, {"api_name": "numpy.float32", "line_number": 213, "usage_type": "attribute"}, {"api_name": "gc.collect", "line_number": 216, "usage_type": "call"}, {"api_name": "random.random", "line_number": 223, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 238, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 238, "usage_type": "attribute"}, {"api_name": "random.random", "line_number": 249, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 264, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 264, "usage_type": "attribute"}, {"api_name": "sys.float_info", "line_number": 279, "usage_type": "attribute"}, {"api_name": "time.time", "line_number": 282, "usage_type": "call"}, {"api_name": "tensorflow.train.global_step", "line_number": 284, "usage_type": "call"}, {"api_name": "tensorflow.train", "line_number": 284, "usage_type": "attribute"}, {"api_name": "numpy.mean", "line_number": 299, "usage_type": "call"}, {"api_name": "numpy.nan_to_num", "line_number": 299, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 300, "usage_type": "call"}, {"api_name": "numpy.nan_to_num", "line_number": 300, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 302, "usage_type": "call"}, {"api_name": "os.path", "line_number": 302, "usage_type": "attribute"}, {"api_name": "numpy.mean", "line_number": 323, "usage_type": "call"}, {"api_name": "numpy.nan_to_num", "line_number": 323, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 324, "usage_type": "call"}, {"api_name": "numpy.nan_to_num", "line_number": 324, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 326, "usage_type": "call"}, {"api_name": "os.path", "line_number": 326, "usage_type": "attribute"}, {"api_name": "tensorflow.train.write_graph", "line_number": 333, "usage_type": "call"}, {"api_name": "tensorflow.train", "line_number": 333, "usage_type": "attribute"}, {"api_name": "tensorflow.train.write_graph", "line_number": 345, "usage_type": "call"}, {"api_name": "tensorflow.train", "line_number": 345, "usage_type": "attribute"}, {"api_name": "time.time", "line_number": 349, "usage_type": "call"}, {"api_name": "tensorflow.train.write_graph", "line_number": 354, "usage_type": "call"}, {"api_name": "tensorflow.train", "line_number": 354, "usage_type": "attribute"}]}
+{"seq_id": "11656476814", "text": "from django.shortcuts import render\nfrom django.contrib.auth.decorators import login_required\nfrom blog.forms import *\nfrom django.http import HttpResponseRedirect\nfrom blog.models import Post, Comment\n# Create your views here.\n@login_required\ndef create_post(request):\n if request.method == 'POST':\n form = PostForm(request.POST)\n if form.is_valid():\n post = form.save(commit=False)\n post.author = request.user\n post.save()\n return HttpResponseRedirect('/')\n else:\n form = PostForm()\n return render(request,\n 'post.html',\n {'form': form}\n )\n\n@login_required\ndef post_page(request, id):\n try:\n post = Post.objects.get(pk=id)\n comments = post.comments.all()\n if request.method == 'POST':\n form = CommentForm(request.POST)\n if form.is_valid():\n comment = form.save(commit=False)\n comment.author = request.user\n comment.com = post\n comment.save()\n\n else:\n form = CommentForm()\n return render(request,\n 'post_page.html',\n {'form': form, 'post': post, 'comments':comments}\n )\n\n except Exception as e:\n print(e)\n return render(request,\n 'post_page.html',\n {'alert': \"Такого поста нет\"}\n )\n\n\n@login_required\ndef feed(request):\n posts = Post.objects.all()\n print(posts)\n return render(request, 'posts.html', {'posts':posts})\n", "repo_name": "Archelunch/tasks", "sub_path": "blog/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 1513, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "76", "api": [{"api_name": "django.http.HttpResponseRedirect", "line_number": 15, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 18, "usage_type": "call"}, {"api_name": "django.contrib.auth.decorators.login_required", "line_number": 7, "usage_type": "name"}, {"api_name": "blog.models.Post.objects.get", "line_number": 26, "usage_type": "call"}, {"api_name": "blog.models.Post.objects", "line_number": 26, "usage_type": "attribute"}, {"api_name": "blog.models.Post", "line_number": 26, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 38, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 45, "usage_type": "call"}, {"api_name": "django.contrib.auth.decorators.login_required", "line_number": 23, "usage_type": "name"}, {"api_name": "blog.models.Post.objects.all", "line_number": 53, "usage_type": "call"}, {"api_name": "blog.models.Post.objects", "line_number": 53, "usage_type": "attribute"}, {"api_name": "blog.models.Post", "line_number": 53, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 55, "usage_type": "call"}, {"api_name": "django.contrib.auth.decorators.login_required", "line_number": 51, "usage_type": "name"}]}
+{"seq_id": "31261896066", "text": "import signal, time\n\nfrom qiskit_ibm_runtime import Sampler, Estimator, Session\nfrom qiskit.providers import JobStatus\n\ndef timeout_handler(signum, frame):\n raise Exception('Iteration timed out')\n\nclass RetryPrimitiveMixin:\n \"\"\"RetryPrimitive class.\n \n This class inherits from Qiskit IBM Runtime's Primitives and overwrites its run method such that it retries calling it\n a maximum of 'max_retries' consecutive times, if it encounters one of the following randomly occuring errors:\n \n * A Primitive error (in this case \"Job.ERROR\" is printed, and the job is cancelled automatically)\n * A timeout error where the job either remains running or completes but does not return anything, for a time larger \n than 'timeout' (in this case the job is cancelled by the patch and \"Job.CANCELLED\" is printed)\n * A creation error, where the job fails to be created because connection is lost between the runtime server and the\n quantum computer (in this case \"Failed to create job.\" is printed). If this error occurs, the patch connects the user\n to a new Session (to be handled with care! also, this will unfortunately put the next job in the queue). \n \"\"\"\n \n def __init__(self, *args, max_retries: int = 5, timeout: int = 3600, **kwargs) -> None:\n super().__init__(*args, **kwargs)\n self.max_retries = max_retries\n self.timeout = timeout\n self.backend = super().session._backend\n signal.signal(signal.SIGALRM, timeout_handler)\n \n def run(self, *args, **kwargs):\n result = None\n for i in range(self.max_retries):\n try:\n job = super().run(*args, **kwargs)\n while job.status() in [JobStatus.INITIALIZING, JobStatus.QUEUED, JobStatus.VALIDATING]:\n time.sleep(5) # Check every 5 seconds whether job status has changed\n signal.alarm(self.timeout) # Once job starts running, set timeout to 1 hour by default\n result = job.result()\n if result is not None:\n signal.alarm(0) # Reset timer\n return job\n except Exception as e:\n signal.alarm(0) # Reset timer\n print(\"\\nSomething went wrong...\")\n print(f\"\\n\\nERROR MESSAGE:\\n{e}\\n\\n\")\n if 'job' in locals(): # Sometimes job fails to create\n print(f\"Job ID: {job.job_id}. Job status: {job.status()}.\")\n if job.status() not in [JobStatus.DONE, JobStatus.ERROR, JobStatus.CANCELLED]:\n job.cancel()\n else:\n print(\"Failed to create job.\")\n try:\n super().session.close()\n print(\"Current session was closed.\")\n except:\n print(\"Current session could not be closed. Will leave it to close automatically.\")\n print(f\"Creating new session...\\n\")\n self._session = Session(backend=self.backend)\n print(f\"Starting trial number {i+2}...\\n\")\n signal.alarm(0) # Reset timer\n if result is None:\n raise RuntimeError(f\"Program failed! Maximum number of retries ({self.max_retries}) exceeded\")\n \nclass RetrySampler(RetryPrimitiveMixin, Sampler):\n pass\n\nclass RetryEstimator(RetryPrimitiveMixin, Estimator):\n pass", "repo_name": "MarcoBarroca/q4c-team", "sub_path": "Mg+H2O/real_hw_cairo/retry_primitives.py", "file_name": "retry_primitives.py", "file_ext": "py", "file_size_in_byte": 3445, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "76", "api": [{"api_name": "signal.signal", "line_number": 28, "usage_type": "call"}, {"api_name": "signal.SIGALRM", "line_number": 28, "usage_type": "attribute"}, {"api_name": "qiskit.providers.JobStatus.INITIALIZING", "line_number": 35, "usage_type": "attribute"}, {"api_name": "qiskit.providers.JobStatus", "line_number": 35, "usage_type": "name"}, {"api_name": "qiskit.providers.JobStatus.QUEUED", "line_number": 35, "usage_type": "attribute"}, {"api_name": "qiskit.providers.JobStatus.VALIDATING", "line_number": 35, "usage_type": "attribute"}, {"api_name": "time.sleep", "line_number": 36, "usage_type": "call"}, {"api_name": "signal.alarm", "line_number": 37, "usage_type": "call"}, {"api_name": "signal.alarm", "line_number": 40, "usage_type": "call"}, {"api_name": "signal.alarm", "line_number": 43, "usage_type": "call"}, {"api_name": "qiskit.providers.JobStatus.DONE", "line_number": 48, "usage_type": "attribute"}, {"api_name": "qiskit.providers.JobStatus", "line_number": 48, "usage_type": "name"}, {"api_name": "qiskit.providers.JobStatus.ERROR", "line_number": 48, "usage_type": "attribute"}, {"api_name": "qiskit.providers.JobStatus.CANCELLED", "line_number": 48, "usage_type": "attribute"}, {"api_name": "qiskit_ibm_runtime.Session", "line_number": 58, "usage_type": "call"}, {"api_name": "signal.alarm", "line_number": 60, "usage_type": "call"}, {"api_name": "qiskit_ibm_runtime.Sampler", "line_number": 64, "usage_type": "name"}, {"api_name": "qiskit_ibm_runtime.Estimator", "line_number": 67, "usage_type": "name"}]}
+{"seq_id": "3628233543", "text": "import pya\nimport re\nimport os\t\n\nWIDTH = 2048\nHEIGHT = 2048\n\napp = pya.Application.instance()\nwin = app.main_window()\n\n# Load technology file\ntech = pya.Technology()\ntech.load(tech_file)\nlayoutOptions = tech.load_layout_options\nlayoutOptions.text_enabled = False\n\n# Load def file in the main window\ncell_view = win.load_layout(input_layout, layoutOptions, 0)\nlayout_view = cell_view.view()\nlayout_view.grid_visible = False\n\nlayout_view.max_hier()\n# layout_view.clear_layers()\n\n# Hide layers with these purposes\nhidden_purposes = [0, 4, 5]\n\nli = layout_view.begin_layers()\nwhile not li.at_end():\n lp = li.current()\n if lp.source_datatype in hidden_purposes:\n new_lp = lp.dup()\n new_lp.visible = False\n layout_view.set_layer_properties(li, new_lp)\n\n li.next()\n\nlayout_view.save_image(os.path.splitext(input_layout)[0]+'.png', WIDTH, HEIGHT)\n\napp.exit(0)\n", "repo_name": "efabless/foss-asic-tools", "sub_path": "images/foss-asic-tools/addons/sak/klayout/scrotLayout.py", "file_name": "scrotLayout.py", "file_ext": "py", "file_size_in_byte": 882, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 43, "dataset": "github-code", "pt": "76", "api": [{"api_name": "pya.Application.instance", "line_number": 8, "usage_type": "call"}, {"api_name": "pya.Application", "line_number": 8, "usage_type": "attribute"}, {"api_name": "pya.Technology", "line_number": 12, "usage_type": "call"}, {"api_name": "os.path.splitext", "line_number": 38, "usage_type": "call"}, {"api_name": "os.path", "line_number": 38, "usage_type": "attribute"}]}
+{"seq_id": "21110395587", "text": "import pandas as pd\nfrom pathlib import Path\n\ndataDir = Path(__file__).parent.absolute()\n\nfake_c = pd.read_csv(f\"{dataDir}/fake_counterfactuals.csv\", header=None)\nfake_d_c = pd.read_csv(f\"{dataDir}/fake_delta_counterfactuals.csv\", header=None)\n\np_c = fake_c.iloc[1:,:]\np_c = p_c / 100\npos = p_c[(p_c[0]>= 0.5) & (p_c[1]>= 0.5) & (p_c[2]>= 0.5) & (p_c[3]>= 0.5)].count()\nneg = p_c[(p_c[0]<= -0.5) & (p_c[1]<= -0.5) & (p_c[2]<= -0.5) & (p_c[3]<= -0.5)].count()\npos_pos = p_c[(p_c[0]<= 0.5) & (p_c[1]<= 0.5) & (p_c[2]<= 0.5) & (p_c[3]<= 0.5)].count()\nneg_neg = p_c[(p_c[0]>= -0.5) & (p_c[1]>= -0.5) & (p_c[2]>= -0.5) & (p_c[3]>= -0.5)].count()\nprint(pos)\nprint(neg)\nprint(pos_pos)\nprint(neg_neg)\nprint(len(p_c))\n\n# pos = 0\n# neg = 0\n# for v in p_c[0]:\n# if v >= 0.5:\n# # print(\"if\", v)\n# pos += 1\n# pass\n# elif v <= -0.5:\n# # print(\"elif\", v)\n# neg += 1\n# pass\n# else:\n# # print(\"else\", v)\n# pass\n\n# print(pos)\n# print(neg)\n", "repo_name": "Paalar/friendly-enigma", "sub_path": "data/fake/cchvae_revert.py", "file_name": "cchvae_revert.py", "file_ext": "py", "file_size_in_byte": 997, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 5, "dataset": "github-code", "pt": "76", "api": [{"api_name": "pathlib.Path", "line_number": 4, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 6, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 7, "usage_type": "call"}]}
+{"seq_id": "71580540726", "text": "import torch\nimport torch.nn as nn\n\n\nclass Network(nn.Module):\n def __init__(self, nx, nu, n_joints):\n super(Network, self).__init__()\n\n self.nx = nx\n self.nu = nu\n self.n_joints = n_joints\n\n self.layer1 = nn.Sequential(\n nn.Linear(\n in_features=nx,\n out_features=16,\n ),\n nn.ReLU(),\n )\n\n self.layer2 = nn.Sequential(\n nn.Linear(\n in_features=16,\n out_features=32,\n ),\n nn.ReLU(),\n )\n\n self.layer3 = nn.Sequential(\n nn.Linear(\n in_features=32,\n out_features=64,\n ),\n nn.ReLU(),\n )\n\n self.layer4 = nn.Sequential(\n nn.Linear(\n in_features=64,\n out_features=64,\n ),\n nn.ReLU(),\n )\n\n # # Three jointed pendulum:\n\n # self.layer5 = nn.Sequential(\n # nn.Linear(\n # in_features=64,\n # out_features=128,\n # ),\n # nn.ReLU(),\n # )\n\n # Split into n_joints heads\n self.last_layers = nn.ModuleList()\n for _ in range(self.n_joints):\n self.last_layers.append(\n nn.Linear(\n in_features=64,\n out_features=nu,\n ),\n )\n\n self.init_weights()\n\n def init_weights(self):\n # Recommended weights initialization if using ReLU activation functions\n nn.init.kaiming_normal_(self.layer1[0].weight)\n nn.init.kaiming_normal_(self.layer2[0].weight)\n nn.init.kaiming_normal_(self.layer3[0].weight)\n nn.init.kaiming_normal_(self.layer4[0].weight)\n\n # # Three jointed pendulum:\n # nn.init.kaiming_normal_(self.layer5[0].weight)\n\n for _, layer in enumerate(self.last_layers):\n nn.init.kaiming_normal_(layer.weight)\n\n def forward(self, x):\n x = self.layer1(x)\n x = self.layer2(x)\n x = self.layer3(x)\n x = self.layer4(x)\n\n # # Three jointed pendulum:\n # x = self.layer5(x)\n\n # Get data from each (n_joints) head layer\n y = []\n for _, layer in enumerate(self.last_layers):\n y.append(layer(x))\n\n # Dimension is: n_joints, batch_size, nu\n \n # Reshape dimension to get: batch_size, j_joints, nu\n y = torch.stack(y, dim=1)\n\n return y\n", "repo_name": "lorenzinigiovanni/orc-project", "sub_path": "network.py", "file_name": "network.py", "file_ext": "py", "file_size_in_byte": 2512, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "76", "api": [{"api_name": "torch.nn.Module", "line_number": 5, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 5, "usage_type": "name"}, {"api_name": "torch.nn.Sequential", "line_number": 13, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 13, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 14, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 14, "usage_type": "name"}, {"api_name": "torch.nn.ReLU", "line_number": 18, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 18, "usage_type": "name"}, {"api_name": "torch.nn.Sequential", "line_number": 21, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 21, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 22, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 22, "usage_type": "name"}, {"api_name": "torch.nn.ReLU", "line_number": 26, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 26, "usage_type": "name"}, {"api_name": "torch.nn.Sequential", "line_number": 29, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 29, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 30, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 30, "usage_type": "name"}, {"api_name": "torch.nn.ReLU", "line_number": 34, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 34, "usage_type": "name"}, {"api_name": "torch.nn.Sequential", "line_number": 37, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 37, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 38, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 38, "usage_type": "name"}, {"api_name": "torch.nn.ReLU", "line_number": 42, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 42, "usage_type": "name"}, {"api_name": "torch.nn.ModuleList", "line_number": 56, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 56, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 59, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 59, "usage_type": "name"}, {"api_name": "torch.nn.init.kaiming_normal_", "line_number": 69, "usage_type": "call"}, {"api_name": "torch.nn.init", "line_number": 69, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 69, "usage_type": "name"}, {"api_name": "torch.nn.init.kaiming_normal_", "line_number": 70, "usage_type": "call"}, {"api_name": "torch.nn.init", "line_number": 70, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 70, "usage_type": "name"}, {"api_name": "torch.nn.init.kaiming_normal_", "line_number": 71, "usage_type": "call"}, {"api_name": "torch.nn.init", "line_number": 71, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 71, "usage_type": "name"}, {"api_name": "torch.nn.init.kaiming_normal_", "line_number": 72, "usage_type": "call"}, {"api_name": "torch.nn.init", "line_number": 72, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 72, "usage_type": "name"}, {"api_name": "torch.nn.init.kaiming_normal_", "line_number": 78, "usage_type": "call"}, {"api_name": "torch.nn.init", "line_number": 78, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 78, "usage_type": "name"}, {"api_name": "torch.stack", "line_number": 97, "usage_type": "call"}]}
+{"seq_id": "16968761299", "text": "import tensorflow as tf\nimport numpy as np\nfrom numpy import *\nimport yaml\nimport os\n\nimport matplotlib.pyplot as plt\nplt.rcParams.update({'font.size': 22})\n\nfrom matplotlib.patches import FancyArrowPatch\nfrom mpl_toolkits.mplot3d import proj3d\n\nimport scipy.signal\nfrom scipy.spatial.transform import Rotation as R\n\ndtype = tf.float32\n#dtype = tf.float64\nnpdtype = np.float32\n#npdtype = np.float64\n\ncontrol_items: dict = {}\ncontrol_step = 0\n\n\nclass Arrow3D(FancyArrowPatch):\n def __init__(self, xs, ys, zs, *args, **kwargs):\n FancyArrowPatch.__init__(self, (0,0), (0,0), *args, **kwargs)\n self._verts3d = xs, ys, zs\n\n def draw(self, renderer):\n xs3d, ys3d, zs3d = self._verts3d\n xs, ys, zs = proj3d.proj_transform(xs3d, ys3d, zs3d, renderer.M)\n self.set_positions((xs[0],ys[0]),(xs[1],ys[1]))\n FancyArrowPatch.draw(self, renderer)\n\n\ndef assert_shape(array, shape):\n ashape = array.shape\n if len(ashape) != len(shape):\n return False\n for i, j in zip(ashape, shape):\n if j != -1 and i != j:\n return False\n return True\n\n\ndef parse_config(file):\n with open(file) as file:\n conf = yaml.load(file, Loader=yaml.FullLoader)\n return conf\n\n\ndef list_files(log_dir):\n for file in os.listdir(log_dir):\n if os.path.isfile(os.path.join(log_dir, file)):\n yield file\n\n\ndef parse_dir(log_dir):\n for file in list_files(log_dir):\n if file == \"config.yaml\":\n config_file = os.path.join(log_dir, file)\n elif file == \"task.yaml\":\n task_file = os.path.join(log_dir, file)\n return parse_config(config_file), task_file\n\n\ndef plt_sgf(action_seq):\n print(action_seq.numpy()[:, :, 0].shape)\n _ = plt.figure()\n ax1 = plt.subplot(121)\n ax2 = plt.subplot(122)\n\n for i in np.arange(9, 49, 4):\n y = scipy.signal.savgol_filter(action_seq.numpy()[:, :, 0], i, 7,\n deriv=0, delta=1.0, axis=0)\n ax1.plot(y[:, 0], label=\"{}\".format(i))\n ax2.plot(y[:, 1], label=\"{}\".format(i))\n plt.legend()\n plt.show()\n\n\ndef plt_paths(paths, weights, noises, action_seq, cost):\n global control_step\n n_bins = 100\n best_idx = np.argmax(weights)\n # todo extract a_sape from tensor\n noises = noises.numpy().reshape(-1, 2)\n\n _ = plt.figure()\n ax1 = plt.subplot(333)\n ax2 = plt.subplot(336)\n ax3 = plt.subplot(221)\n ax4 = plt.subplot(337)\n ax5 = plt.subplot(338)\n ax6 = plt.subplot(339)\n # We can set the number of bins with the `bins` kwarg\n ax1.set_xlim(-1.5, 1.5)\n ax1.set_ylim(-0.1, 2)\n ax1.hist(noises[:, 0], bins=n_bins, density=True)\n\n ax2.set_xlim(-1.5, 1.5)\n ax2.set_ylim(-0.1, 2)\n ax2.hist(noises[:, 1], bins=n_bins, density=True)\n\n ax3.set_ylim(-5, 5)\n ax3.set_xlim(-5, 5)\n for _, sample in enumerate(paths):\n ax3.plot(sample[:, 0], sample[:, 2], \"-b\")\n ax3.plot(paths[best_idx, :, 0], paths[best_idx, :, 2], \"-r\")\n\n gx, gy = cost.draw_goal()\n ax3.scatter(gx, gy, c=\"k\")\n\n ax4.set_xlim(-1, 60)\n ax4.set_ylim(-0.3, 0.3)\n ax4.plot(action_seq[:, 0])\n\n ax5.set_xlim(-1, 60)\n ax5.set_ylim(-0.3, 0.3)\n ax5.plot(action_seq[:, 1])\n\n # ax6.set_xlim(-0.1, 1.1)\n ax6.plot(weights.numpy().reshape(-1))\n\n plt.savefig('/tmp/mppi_{}.png'.format(control_step-1))\n plt.close(\"all\")\n\n\ndef push_to_tensor(tensor, element):\n tmp = tf.expand_dims(element, axis=1) # shape [k, 1, dim, 1]\n return tf.concat([tensor[:, 1:], tmp], axis=1)\n\n\ndef plot_traj(trajs, seq=None, plotStateCols=None, plotActionCols=None, title=\"Traj\", dir=\".\", filename=None):\n '''\n Plot trajectories and action sequence.\n inputs:\n -------\n - trajs: dict with model name as key and trajectories entry. If key is \"gt\" then it is assumed to be\n the ground truth trajectory.\n - seq: Action Sequence associated to the generated trajectoires. If not None, plots the \n action seqence.\n - histories: list of history used for the different models, ignored when model entry is \"gt\".\n - frequencies: list of history used for the different models, ignored when model entry is \"gt\".\n - plotStateCols: Dict containing the state axis name as key and index as entry\n - plotAcitonCols: Dict containing the action axis name as key and index as entry.\n - title: String the name of fthe figure.\n - horizon: The horizon of the trajectory to plot.\n - dir: The saving directory for the generated images.\n '''\n maxS = len(plotStateCols)\n maxA = len(plotActionCols)\n # fig_state = plt.figure(figsize=(50, 50))\n fig, axes = plt.subplots(6, 2, figsize=(50, 50))\n fig.suptitle(title)\n for k in trajs:\n t, h, freq, tau = trajs[k]\n for i, name in enumerate(plotStateCols):\n m, n = np.unravel_index(i, (2, 6))\n #idx = 1*m + 2*n + 1\n axes[n, m].set_ylabel(f'{name}')\n if k == \"gt\":\n time_steps = np.linspace(0., freq*tau, tau)\n axes[n, m].plot(time_steps, t[:tau, plotStateCols[name]],\n marker='.', zorder=-10, label=k)\n else:\n time_steps = np.linspace(0, freq*(tau+h), (tau+h))\n axes[n, m].plot(time_steps, t[:, plotStateCols[name]],\n marker='X', label=k\n )\n plt.legend()\n #plt.tight_layout()\n if dir is not None:\n name = os.path.join(dir, f\"{filename}.png\")\n plt.savefig(name)\n plt.close()\n\n if seq is not None:\n fig_act = plt.figure(figsize=(30, 30))\n for i, name in enumerate(plotActionCols):\n plt.subplot(maxA, 1, i+1)\n plt.ylabel(f'{name}')\n plt.plot(seq[0, :horizon+h, plotActionCols[name]])\n\n #plt.tight_layout()\n if dir is not None:\n name = os.path.join(dir, f\"{filename}-actions.png\")\n plt.savefig(name)\n plt.close()\n \n plt.show()\n\n\ndef plot_6d(trajs, ColNames=None, title=\"Foo\", dir=\".\", filename=None):\n '''\n Plot trajectories and action sequence.\n inputs:\n -------\n - trajs: dict with model name as key and entries being [traj, delta t, steps]. If key is \"gt\" then it is assumed to be\n the ground truth trajectory.\n - seq: Action Sequence associated to the generated trajectoires. If not None, plots the \n action seqence.\n - plotStateCols: Dict containing the state axis name as key and index as entry\n - plotAcitonCols: Dict containing the action axis name as key and index as entry.\n - dir: The saving directory for the generated images.\n '''\n maxS = len(ColNames)\n #fig_state = plt.figure(figsize=(50, 50))\n fig, axes = plt.subplots(3, 2, figsize=(50, 50))\n fig.suptitle(title)\n for k in trajs:\n t, freq, tau = trajs[k]\n for i, name in enumerate(ColNames):\n m, n = np.unravel_index(i, (2, 3))\n axes[n, m].set_ylabel(f'{name}')\n x = np.linspace(0, freq*tau, tau)\n axes[n, m].plot(\n x, t[:, ColNames[name]],\n marker='X', label=k\n )\n plt.legend()\n #plt.tight_layout()\n if dir is not None:\n name = os.path.join(dir, f\"{filename}.png\")\n plt.savefig(name)\n plt.close()\n \n plt.show()\n\n\ndef traj_to_euler(traj, rep=\"rot\"):\n if rep == \"rot\":\n rot = traj[:, 3:3+9].reshape((-1, 3, 3))\n r = R.from_matrix(rot)\n elif rep == \"quat\":\n quat = traj[:, 3:3+4]\n r = R.from_quat(quat)\n else:\n raise NotImplementedError\n pos = traj[:, :3]\n euler = r.as_euler('XYZ', degrees=True)\n vel = traj[:, -6:]\n\n traj = np.concatenate([pos, euler, vel], axis=-1)\n return traj\n\n\ndef traj_to_forces(model, traj, rep=\"rot\", dt=0.1):\n '''\n Takes as input a trajectory composed of\n pose and velocity. Using a AUV model, it\n computes the different forces acting on the\n vehicle and returns them\n\n inputs:\n -------\n traj: trajectory compose of [pose, vel], shape [tau, sDim, 1]\n rep: the representation used for the rotation\n \n outputs:\n --------\n - Cv: the coriolis component. Shape [tau, 6]\n - Dv: the damping component. Shape [tau, 6]\n - g: the restoring forces. Shape [tau, 6]\n - tau: the control input. Shape [tau, 6]\n '''\n\n # First step: we need to compute the acceleration of the\n # auv a each steps.\n if rep == \"euler\":\n angle_len = 3\n elif rep == \"quat\":\n angle_len = 4\n elif rep == \"rot\":\n angle_len = 9\n\n traj = traj[..., None]\n\n pose = traj[:, :3 + angle_len]\n vel = traj[:, 3+angle_len:]\n\n acc = (vel[2: ] - vel[:-2])/dt\n pose = pose[1:-1]\n vel = vel[1:-1]\n\n cvs = []\n dvs = []\n gs = []\n fs = []\n # Use the acceleration together with the state to\n # compute the different values.\n\n for p, v, a in zip(pose, vel, acc):\n c, cv, d, dv, g, f = model.get_forces(p[None], v[None], a[None])\n cvs.append(cv)\n dvs.append(dv)\n gs.append(g)\n fs.append(f)\n \n cvs = np.concatenate(cvs, axis=0)\n dvs = np.concatenate(dvs, axis=0)\n gs = np.concatenate(gs, axis=0)\n fs = np.concatenate(fs, axis=0)\n\n return cvs, dvs, gs, fs", "repo_name": "NicolayP/mppi-rexrov2", "sub_path": "scripts/mppi_tf/scripts/src/misc/utile.py", "file_name": "utile.py", "file_ext": "py", "file_size_in_byte": 9514, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "76", "api": [{"api_name": "matplotlib.pyplot.rcParams.update", "line_number": 8, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.rcParams", "line_number": 8, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot", "line_number": 8, "usage_type": "name"}, {"api_name": "tensorflow.float32", "line_number": 16, "usage_type": "attribute"}, {"api_name": "numpy.float32", "line_number": 18, "usage_type": "attribute"}, {"api_name": "matplotlib.patches.FancyArrowPatch", "line_number": 25, "usage_type": "name"}, {"api_name": "matplotlib.patches.FancyArrowPatch.__init__", "line_number": 27, "usage_type": "call"}, {"api_name": "matplotlib.patches.FancyArrowPatch", "line_number": 27, "usage_type": "name"}, {"api_name": "mpl_toolkits.mplot3d.proj3d.proj_transform", "line_number": 32, "usage_type": "call"}, {"api_name": "mpl_toolkits.mplot3d.proj3d", "line_number": 32, "usage_type": "name"}, {"api_name": "matplotlib.patches.FancyArrowPatch.draw", "line_number": 34, "usage_type": "call"}, {"api_name": "matplotlib.patches.FancyArrowPatch", "line_number": 34, "usage_type": "name"}, {"api_name": "yaml.load", "line_number": 49, "usage_type": "call"}, {"api_name": "yaml.FullLoader", "line_number": 49, "usage_type": "attribute"}, {"api_name": "os.listdir", "line_number": 54, "usage_type": "call"}, {"api_name": "os.path.isfile", "line_number": 55, "usage_type": "call"}, {"api_name": "os.path", "line_number": 55, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 55, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 62, "usage_type": "call"}, {"api_name": "os.path", "line_number": 62, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 64, "usage_type": "call"}, {"api_name": "os.path", "line_number": 64, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 70, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 70, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplot", "line_number": 71, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 71, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplot", "line_number": 72, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 72, "usage_type": "name"}, {"api_name": "numpy.arange", "line_number": 74, "usage_type": "call"}, {"api_name": "scipy.signal.signal.savgol_filter", "line_number": 75, "usage_type": "call"}, {"api_name": "scipy.signal.signal", "line_number": 75, "usage_type": "attribute"}, {"api_name": "scipy.signal", "line_number": 75, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 79, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 79, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 80, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 80, "usage_type": "name"}, {"api_name": "numpy.argmax", "line_number": 86, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 90, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 90, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplot", "line_number": 91, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 91, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplot", "line_number": 92, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 92, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplot", "line_number": 93, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 93, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplot", "line_number": 94, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 94, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplot", "line_number": 95, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 95, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplot", "line_number": 96, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 96, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 126, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 126, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.close", "line_number": 127, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 127, "usage_type": "name"}, {"api_name": "tensorflow.expand_dims", "line_number": 131, "usage_type": "call"}, {"api_name": "tensorflow.concat", "line_number": 132, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.subplots", "line_number": 155, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 155, "usage_type": "name"}, {"api_name": "numpy.unravel_index", "line_number": 160, "usage_type": "call"}, {"api_name": "numpy.linspace", "line_number": 164, "usage_type": "call"}, {"api_name": "numpy.linspace", "line_number": 168, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 172, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 172, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 175, "usage_type": "call"}, {"api_name": "os.path", "line_number": 175, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 176, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 176, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.close", "line_number": 177, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 177, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 180, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 180, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplot", "line_number": 182, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 182, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 183, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 183, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 184, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 184, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 188, "usage_type": "call"}, {"api_name": "os.path", "line_number": 188, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 189, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 189, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.close", "line_number": 190, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 190, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 192, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 192, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplots", "line_number": 210, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 210, "usage_type": "name"}, {"api_name": "numpy.unravel_index", "line_number": 215, "usage_type": "call"}, {"api_name": "numpy.linspace", "line_number": 217, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 222, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 222, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 225, "usage_type": "call"}, {"api_name": "os.path", "line_number": 225, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 226, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 226, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.close", "line_number": 227, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 227, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 229, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 229, "usage_type": "name"}, {"api_name": "scipy.spatial.transform.Rotation.from_matrix", "line_number": 235, "usage_type": "call"}, {"api_name": "scipy.spatial.transform.Rotation", "line_number": 235, "usage_type": "name"}, {"api_name": "scipy.spatial.transform.Rotation.from_quat", "line_number": 238, "usage_type": "call"}, {"api_name": "scipy.spatial.transform.Rotation", "line_number": 238, "usage_type": "name"}, {"api_name": "numpy.concatenate", "line_number": 245, "usage_type": "call"}, {"api_name": "numpy.concatenate", "line_number": 301, "usage_type": "call"}, {"api_name": "numpy.concatenate", "line_number": 302, "usage_type": "call"}, {"api_name": "numpy.concatenate", "line_number": 303, "usage_type": "call"}, {"api_name": "numpy.concatenate", "line_number": 304, "usage_type": "call"}]}
+{"seq_id": "34230467228", "text": "from typing import List\n\n\ndef check_shift(a: str, b: str) -> bool:\n offset = None\n for ca, cb in zip(a, b):\n if offset is None:\n offset = ord(ca) - ord(cb)\n else:\n if offset != ord(ca) - ord(cb):\n return False\n\n return True\n\n\nclass Solution:\n def groupStrings(self, strings: List[str]) -> List[List[str]]:\n\n if len(strings) == 0:\n return []\n\n groups = dict()\n for elem in strings:\n group_list = groups.get(len(elem), [])\n group_list.append(elem)\n groups[len(elem)] = group_list\n\n sol = []\n for str_group_len, group_list in groups.items():\n group_sol = dict()\n for elem in group_list:\n if len(group_sol) == 0:\n group_sol[elem] = [elem]\n else:\n group = elem\n for possible_group, group_list in group_sol.items():\n if check_shift(possible_group, elem):\n group = possible_group\n\n the_group = group_sol.get(group, [])\n the_group.append(elem)\n group_sol[group] = the_group\n\n sol.extend(list(group_sol.values()))\n\n return sol\n\n\ndin = [\n [\"abc\",\"bcd\",\"acef\",\"xyz\",\"az\",\"ba\",\"a\",\"z\"]\n]\n\nexpected_out = [\n [[\"acef\"],[\"a\",\"z\"],[\"abc\",\"bcd\",\"xyz\"],[\"az\",\"ba\"]]\n]\n\nfor i, expected in zip(din, expected_out):\n s = Solution()\n actual = s.groupStrings(i)\n print(actual)\n print(expected)\n assert actual == expected", "repo_name": "DarioBernardo/hackerrank_exercises", "sub_path": "strings/group_shifted_strings.py", "file_name": "group_shifted_strings.py", "file_ext": "py", "file_size_in_byte": 1593, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "76", "api": [{"api_name": "typing.List", "line_number": 17, "usage_type": "name"}]}
+{"seq_id": "34811902518", "text": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Jul 13 15:19:47 2018\n\n@author: loganwu\n\"\"\"\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom scipy.stats import norm\n\nx = np.arange(0, 100)\n\nmu = [35, 60]\nsd = [5, 3]\n\nvar1 = sd[0]**2\nvar2 = sd[1]**2\nprec1=1/var1\nprec2=1/var2\nprec3=prec1+prec2\nvar3=1/prec3\nmu3=(mu[0]*prec1+mu[1]*prec2)/(prec1+prec2)\nsd3=np.sqrt(var3)\n\ndprior = norm.pdf(x, mu[0], sd[0])\ndlikelihood = norm.pdf(x, mu[1], sd[1])\ndposterior = norm.pdf(x, mu3, sd3)\n\n\nfig, ax = plt.subplots(figsize=[2, 1])\nplt.fill_between(x, dprior, alpha=0.5, label=\"Prior\")\nplt.fill_between(x, dlikelihood, alpha=0.5, label=\"Likelihood\")\nplt.fill_between(x, dposterior, alpha=0.5, label=\"Posterior\")\n#plt.xlabel(\"Parameter estimate\")\n#plt.legend()\n\n# Hide the right and top spines\nax.spines['left'].set_visible(False)\nax.spines['right'].set_visible(False)\nax.spines['top'].set_visible(False)\nax.spines['bottom'].set_visible(False)\n\nplt.yticks([])\nplt.xticks([])\n\nplt.savefig(\"../media/bayesexample.png\", bbox_inches=\"tight\", dpi=144, transparent=True)\n", "repo_name": "loganbwu/geothermal", "sub_path": "src/presentation_plots.py", "file_name": "presentation_plots.py", "file_ext": "py", "file_size_in_byte": 1076, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "76", "api": [{"api_name": "numpy.arange", "line_number": 13, "usage_type": "call"}, {"api_name": "numpy.sqrt", "line_number": 25, "usage_type": "call"}, {"api_name": "scipy.stats.norm.pdf", "line_number": 27, "usage_type": "call"}, {"api_name": "scipy.stats.norm", "line_number": 27, "usage_type": "name"}, {"api_name": "scipy.stats.norm.pdf", "line_number": 28, "usage_type": "call"}, {"api_name": "scipy.stats.norm", "line_number": 28, "usage_type": "name"}, {"api_name": "scipy.stats.norm.pdf", "line_number": 29, "usage_type": "call"}, {"api_name": "scipy.stats.norm", "line_number": 29, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplots", "line_number": 32, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 32, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.fill_between", "line_number": 33, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 33, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.fill_between", "line_number": 34, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 34, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.fill_between", "line_number": 35, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 35, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.yticks", "line_number": 45, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 45, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xticks", "line_number": 46, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 46, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 48, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 48, "usage_type": "name"}]}
+{"seq_id": "19905773241", "text": "# importing required packages\n'''\nHow to initiate bokeh server using this file:\n\ntype bokeh serve --port 5002 widgets_part2.py in your terminal\nThen you may go to the FirstStop landing page to click the bidding price link\n\n'''\n\n\nfrom bokeh.io import curdoc\nfrom bokeh.layouts import layout\nfrom bokeh.models.widgets import Button, TextInput, Paragraph, Div\nimport numpy as np\n\nLOGO = Div(text=\"\"\"
\"\"\")\n\n# Create Input Controls\nLISTPRICE = TextInput(title=\"enter list price/predict price here ($)\")\nZIPCODE = TextInput(title=\"enter zipcode here\")\n\nBUTTON_1 = Button(label=\"Submit\")\nBUTTON_2 = Button(label=\"Reset\")\nOUTPUT = Paragraph(width=600, height=300) #or use pretext, for a tag in html\n\nHOTTEST = [98004, 98006, 98007, 98008, 98112, 98033, 98034, 98039, 98040,\n 98052, 98053, 98074, 98075, 98077, 98103, 98112, 98177, 98115, 98117]\nMEDIUM_HOT = [98001, 98005, 98023, 98027, 98028, 98029, 98056, 98059, 98105,\n 98107, 98116, 98118, 98119, 98122, 98125, 98133, 98155, 98199]\n\ndef bidding_price(zipcode, list_price):\n \"\"\"\n This function implements a mathematical model to calculate bidding price of a house\n :param zipcode: Zipcode of house entered by user\n :param list_price: List price of house entered by user\n :return: returns the estimated bidding price\n \"\"\"\n if zipcode in HOTTEST:\n add_price = (np.random.randint(12, 18, None, int)/100)*list_price\n bid_price = list_price + add_price\n elif zipcode in MEDIUM_HOT:\n add_price = (np.random.randint(5, 10, None, int)/100)*list_price\n bid_price = list_price + add_price\n else:\n add_price = (np.random.randint(5, 10, None, int) / 100) * list_price\n bid_price = list_price - add_price\n\n return bid_price\n\ndef submit():\n \"\"\"\n these are made up coefficients for now\n \"\"\"\n value = bidding_price(float(ZIPCODE.value), float(LISTPRICE.value))\n OUTPUT.text = 'Your suggested bidding price is: ' + str(int(value)) + ' $'\n\ndef reset():\n \"\"\"\n This function resets the output\n \"\"\"\n OUTPUT.text = None\n\nBUTTON_1.on_click(submit)\nBUTTON_2.on_click(reset)\n\nLAY_OUT = layout(children=[[LOGO], [LISTPRICE, ZIPCODE], [BUTTON_1], [BUTTON_2], [OUTPUT]],\n sizing_mode='fixed')\ncurdoc().add_root(LAY_OUT)\ncurdoc().title = \"Predict the bidding price of your first home\"\n", "repo_name": "sliwhu/UWHousingTeam", "sub_path": "UWHousingTeam/Scripts/part2_bid_price.py", "file_name": "part2_bid_price.py", "file_ext": "py", "file_size_in_byte": 2434, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 2, "dataset": "github-code", "pt": "76", "api": [{"api_name": "bokeh.models.widgets.Div", "line_number": 16, "usage_type": "call"}, {"api_name": "bokeh.models.widgets.TextInput", "line_number": 20, "usage_type": "call"}, {"api_name": "bokeh.models.widgets.TextInput", "line_number": 21, "usage_type": "call"}, {"api_name": "bokeh.models.widgets.Button", "line_number": 23, "usage_type": "call"}, {"api_name": "bokeh.models.widgets.Button", "line_number": 24, "usage_type": "call"}, {"api_name": "bokeh.models.widgets.Paragraph", "line_number": 25, "usage_type": "call"}, {"api_name": "numpy.random.randint", "line_number": 40, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 40, "usage_type": "attribute"}, {"api_name": "numpy.random.randint", "line_number": 43, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 43, "usage_type": "attribute"}, {"api_name": "numpy.random.randint", "line_number": 46, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 46, "usage_type": "attribute"}, {"api_name": "bokeh.layouts.layout", "line_number": 67, "usage_type": "call"}, {"api_name": "bokeh.io.curdoc", "line_number": 69, "usage_type": "call"}, {"api_name": "bokeh.io.curdoc", "line_number": 70, "usage_type": "call"}]}
+{"seq_id": "17960203838", "text": "import cv2 \nimport numpy as np\nimport time\nbasePath = \"Computer-Vision-py/DATA/\"\n\n\"\"\"\nUsing an existing video file\n\"\"\"\n\n# create a cap obj but just provide the file path instead of a 0.... for the capture device \ncap = cv2.VideoCapture(basePath + \"hand_move.mp4\")\nframeRate = 0x00000014\n\n# print(f\"{frameRate} fps\\n{cv2.CAP_PROP_FRAME_COUNT // 20}\")\n\n# check if you where able to open the file \nif ( not cap.isOpened() ):\n print(\"Unable to open video file\")\n\nwhile ( cap.isOpened() ):\n ret, frame = cap.read()\n\n # 20 fps || dont delay unless you want to view the frames\n time.sleep(1/frameRate)\n if ( ret ):\n cv2.imshow(\"frame\", frame)\n\n if ( cv2.waitKey(1) & 0xFF == 27 ):\n break\n\n else:\n break\n\ncap.release()\ncv2.destroyAllWindows()", "repo_name": "Abukar-1000/myCompVision", "sub_path": "videoBasics/vidFile.py", "file_name": "vidFile.py", "file_ext": "py", "file_size_in_byte": 784, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "76", "api": [{"api_name": "cv2.VideoCapture", "line_number": 11, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 24, "usage_type": "call"}, {"api_name": "cv2.imshow", "line_number": 26, "usage_type": "call"}, {"api_name": "cv2.waitKey", "line_number": 28, "usage_type": "call"}, {"api_name": "cv2.destroyAllWindows", "line_number": 35, "usage_type": "call"}]}
+{"seq_id": "23217628608", "text": "import csv\nfrom datetime import datetime\nfrom django.core.management.base import BaseCommand, CommandError\nfrom django.core.exceptions import ValidationError\nfrom measure_snow import models\n\nclass Command(BaseCommand):\n args = ' '\n help = '''Imports snowfall measures from a CSV file.\n\nEach row of CSV data should be:\n,\n\nFor example:\nmm/dd/yy,inches with one decimal place\n11/12/11,4.5'''\n\n def handle(self, *args, **options):\n season_name = args[0]\n try:\n season = models.SnowSeason.objects.get(name=season_name)\n except models.SnowSeason.DoesNotExist:\n raise CommandError(\"SnowSeason with name %s does not exist\" % season_name)\n\n csv_filename = args[1]\n try:\n csv_fh = open(csv_filename, 'rb')\n except IOError:\n raise CommandError(\"Could not open %s\" % csv_filename)\n\n csv_reader = csv.reader(csv_fh)\n for row in csv_reader:\n try:\n timestamp = datetime.strptime(row[0], \"%m/%d/%y\")\n except ValueError:\n self.stderr.write('The row(%r) has a date that cannot be parsed. Skipping this row...\\n' % row)\n continue\n\n measure = models.SnowfallMeasure(timestamp=timestamp, season=season, inches=row[1])\n try:\n measure.save()\n self.stdout.write('Successfully created SnowfallMeasure.\\n')\n except ValidationError:\n self.stderr.write('The row(%r) likely has a value for inches that cannot be parsed. Skipping this row...\\n' % row)\n continue\n\n csv_fh.close()\n", "repo_name": "jpwoodbu/measure_snow", "sub_path": "management/commands/importsnowcsv.py", "file_name": "importsnowcsv.py", "file_ext": "py", "file_size_in_byte": 1671, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 2, "dataset": "github-code", "pt": "76", "api": [{"api_name": "django.core.management.base.BaseCommand", "line_number": 7, "usage_type": "name"}, {"api_name": "measure_snow.models.SnowSeason.objects.get", "line_number": 21, "usage_type": "call"}, {"api_name": "measure_snow.models.SnowSeason", "line_number": 21, "usage_type": "attribute"}, {"api_name": "measure_snow.models", "line_number": 21, "usage_type": "name"}, {"api_name": "measure_snow.models.SnowSeason", "line_number": 22, "usage_type": "attribute"}, {"api_name": "measure_snow.models", "line_number": 22, "usage_type": "name"}, {"api_name": "django.core.management.base.CommandError", "line_number": 23, "usage_type": "call"}, {"api_name": "django.core.management.base.CommandError", "line_number": 29, "usage_type": "call"}, {"api_name": "csv.reader", "line_number": 31, "usage_type": "call"}, {"api_name": "datetime.datetime.strptime", "line_number": 34, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 34, "usage_type": "name"}, {"api_name": "measure_snow.models.SnowfallMeasure", "line_number": 39, "usage_type": "call"}, {"api_name": "measure_snow.models", "line_number": 39, "usage_type": "name"}, {"api_name": "django.core.exceptions.ValidationError", "line_number": 43, "usage_type": "name"}]}
+{"seq_id": "127349178", "text": "from fides.api.schemas.masking.masking_configuration import NullMaskingConfiguration\nfrom fides.api.service.masking.strategy.masking_strategy_nullify import (\n NullMaskingStrategy,\n)\n\n\ndef test_mask_with_value():\n request_id = \"123\"\n config = NullMaskingConfiguration()\n masker = NullMaskingStrategy(configuration=config)\n assert masker.mask([\"something else\"], request_id)[0] is None\n\n\ndef test_mask_with_multi_value():\n request_id = \"123\"\n config = NullMaskingConfiguration()\n masker = NullMaskingStrategy(configuration=config)\n masked = masker.mask([\"something else\", \"some more\"], request_id)\n assert masked[0] is None\n assert masked[1] is None\n\n\ndef test_mask_no_value():\n request_id = \"123\"\n config = NullMaskingConfiguration()\n masker = NullMaskingStrategy(configuration=config)\n assert masker.mask(None, request_id) is None\n", "repo_name": "ethyca/fides", "sub_path": "tests/ops/service/masking/strategy/test_masking_strategy_null.py", "file_name": "test_masking_strategy_null.py", "file_ext": "py", "file_size_in_byte": 877, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 302, "dataset": "github-code", "pt": "76", "api": [{"api_name": "fides.api.schemas.masking.masking_configuration.NullMaskingConfiguration", "line_number": 9, "usage_type": "call"}, {"api_name": "fides.api.service.masking.strategy.masking_strategy_nullify.NullMaskingStrategy", "line_number": 10, "usage_type": "call"}, {"api_name": "fides.api.schemas.masking.masking_configuration.NullMaskingConfiguration", "line_number": 16, "usage_type": "call"}, {"api_name": "fides.api.service.masking.strategy.masking_strategy_nullify.NullMaskingStrategy", "line_number": 17, "usage_type": "call"}, {"api_name": "fides.api.schemas.masking.masking_configuration.NullMaskingConfiguration", "line_number": 25, "usage_type": "call"}, {"api_name": "fides.api.service.masking.strategy.masking_strategy_nullify.NullMaskingStrategy", "line_number": 26, "usage_type": "call"}]}
+{"seq_id": "33675057547", "text": "from django.test import TestCase\nfrom polls.models import Product\n\nclass ProductTestCase(TestCase):\n def create_product(self, name, price, desciption, category):\n p=Product() \n p.name = name\n p.price =price\n p.desciption = desciption\n p.category= category\n return p\n\n def test_category_fail(self):\n w = self.create_product(name=\"computer system\", price=100, desciption=\"nothing\", category=\"shoes\")\n w.addProduct()\n self.assertFalse(Product.objects.filter(name=\"computer system\").exists())\n\n def test_category_fail2(self):\n p = Product()\n p.name = \"test_category\"\n p.category = \"test_category\"\n p= p.addProduct()\n self.assertEqual(p, None)\n\n\n \n\n \n", "repo_name": "rondogency/HotPot", "sub_path": "polls/test2/tests_product_category.py", "file_name": "tests_product_category.py", "file_ext": "py", "file_size_in_byte": 765, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "76", "api": [{"api_name": "django.test.TestCase", "line_number": 4, "usage_type": "name"}, {"api_name": "polls.models.Product", "line_number": 6, "usage_type": "call"}, {"api_name": "polls.models.Product.objects.filter", "line_number": 16, "usage_type": "call"}, {"api_name": "polls.models.Product.objects", "line_number": 16, "usage_type": "attribute"}, {"api_name": "polls.models.Product", "line_number": 16, "usage_type": "name"}, {"api_name": "polls.models.Product", "line_number": 19, "usage_type": "call"}]}
+{"seq_id": "33292918768", "text": "from app import app\nfrom mongoengine.queryset import DoesNotExist\ndb = app.extensions['mongoengine']\n\nclass RefreshToken(db.Document):\n meta = {'collection': 'applications',\n 'indexes': [\n {'fields': ['client_id', 'refresh_token'], 'unique': True},\n ]\n }\n\n client_id = db.StringField(required=True)\n refresh_token = db.StringField(required=True)\n data = db.StringField(required=True)\n\n @classmethod\n def delete(cls, client_id, refresh_token):\n cls.objects(db.Q(client_id = client_id) & db.Q(refresh_token = refresh_token)).remove()\n\n @classmethod\n def find(cls, client_id, refresh_token):\n return RefreshToken.objects.get(db.Q(client_id=client_id) & db.Q(refresh_token=refresh_token))\n\n @classmethod\n def save(cls, client_id, refresh_token, data):\n token, created = RefreshToken.objects.get_or_create(client_id=client_id, refresh_token=refresh_token)\n token.data = data\n\n token.save()\n\nclass AccessKey(db.Document):\n meta = {'collection': 'applications',\n 'indexes': [\n {'fields': ['client_id', 'user_id'], 'unique': False},\n ]\n }\n\n client_id = db.StringField(required=True)\n user_id = db.StringField(required=True)\n access_key = db.StringField(required=True)\n token = db.ReferenceField(RefreshToken, required=True)\n\n @classmethod\n def has_access(cls, client_id, user_id):\n try:\n return cls.objects.get(db.Q(client_id = client_id) & db.Q(user_id = user_id)) != None\n except DoesNotExist:\n return None\n\n @classmethod\n def delete(cls, client_id, user_id):\n cls.objects(db.Q(client_id = client_id) & db.Q(user_id = user_id)).remove()\n\n @classmethod\n def save(cls, client_id, user_id, access_key, token):\n access, created = AccessKey.objects.get_or_create(client_id=client_id, user_id=user_id)\n access.access_key = access_key\n access.token = token\n\n access.save()", "repo_name": "Labgoo/redshift-goog-datasource", "sub_path": "models/refresh_token.py", "file_name": "refresh_token.py", "file_ext": "py", "file_size_in_byte": 2008, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 2, "dataset": "github-code", "pt": "76", "api": [{"api_name": "app.app.extensions", "line_number": 3, "usage_type": "attribute"}, {"api_name": "app.app", "line_number": 3, "usage_type": "name"}, {"api_name": "mongoengine.queryset.DoesNotExist", "line_number": 47, "usage_type": "name"}]}
+{"seq_id": "3195536282", "text": "#!/usr/bin/env python\n\"\"\"\nDBSCAN Project - M2 SSI - Istic, Univ. Rennes 1.\n\nAndriamilanto Tompoariniaina \n\nThis module is an implementation of K-mean algorithm to confront it with our\nimplementation of the DBSCAN one.\n\"\"\"\n\n# -- Imports\nimport sys\nimport random\nimport operator\nfrom pandas import DataFrame\nfrom pathlib import Path\nfrom datas import (read_dataset, dataframe_to_points, display_clusters, Center,\n Cluster)\n\n\n# -- Classes\nclass Kmean(object):\n \"\"\"The class representation of our implementation of Kmean.\"\"\"\n\n def __init__(self, dataset, k, precision=1):\n \"\"\"Initialization function, called when creating a new object.\"\"\"\n # Type checking the dataset\n if not isinstance(dataset, DataFrame) or dataset.empty:\n raise TypeError(\n 'Dataset given to Kmean class has to be a non empty',\n 'pandas.DataFrame instance'\n )\n\n # If asking more clusters than the number of points\n if k > dataset.size:\n raise ValueError(\n 'k cannot be superior than dataset size (> %d)' % dataset.size\n )\n\n # Initialize private attributes\n self._k = k\n self._precision = precision\n self._points = []\n self._clusters = []\n self._neighbour_counter = {}\n\n # Create the Point objects from the DataFrame one\n self._points = dataframe_to_points(dataset)\n\n # Initialize the neighbour counter\n for point in self._points:\n self._neighbour_counter[point] = 0\n\n # DEBUG: Display initial state of the algorithm\n # display_clusters(self._clusters, self._points)\n\n def _turn(self):\n \"\"\"Run a turn of the algorithm till we reach the convergence point.\"\"\"\n # Varible put to False only to enter the first time into the loop\n converged = False\n nb_loop = 0\n\n # While we still haven't reached the point of convergence\n while not converged:\n\n # DEBUG: Display the state at each loop\n # display_clusters(self._clusters)\n\n # Put the converged value back to True, if a point changes its\n # cluster, we will know that we still haven't converged\n converged = True\n\n # For every point (we assume that they are already into a cluster)\n for p in self._points:\n\n # The closest is the current cluster of the point\n closest = p.cluster\n curr_dist = p.dist(closest.center)\n\n # Parse all the other clusters\n for cluster in self._clusters:\n\n # If one is closest than the current one\n if p.dist(cluster.center) < curr_dist:\n closest = cluster\n curr_dist = p.dist(closest.center)\n\n # If the closest cluster is different than the current one,\n # assign this point to this cluster and we know that we still\n # haven't converged\n if p.cluster != closest:\n closest.assign(p)\n converged = False\n\n # Reassign the center of the clusters\n self._update_cluster_center()\n\n # Simple counter\n nb_loop += 1\n\n # Return the number of loops that this turn took\n return nb_loop\n\n def run(self):\n \"\"\"Run the algorithm a precision number of times.\"\"\"\n # Do a precision number of turns\n nb_loop = 0\n for turn in range(self._precision):\n\n # Initialization with random centers\n self._initialization()\n\n # Execute the turn and counting its number of loops\n nb_loop += self._turn()\n\n # Count the number of neighbour points of each points\n self._count_neighbours()\n\n # Execute the last turn with optimized centers\n opt_loop = self._optimized_turn()\n\n # At the end, print the final convergence time\n print('%d, %d, %d' % (self._k, nb_loop/self._precision, opt_loop))\n\n # Display the final state of the clusters\n display_clusters(self._clusters)\n # for c in self._clusters:\n # print(c)\n\n def _optimized_turn(self):\n \"\"\"Optimized turn to get the 'best' centers for clusters.\"\"\"\n # Get k points with the max neighbours which will make better centers\n best_centers = []\n for i in range(self._k):\n\n # Get the id of the point with maximum neighbours (better center)\n new_max_point = max(\n self._neighbour_counter.items(),\n key=operator.itemgetter(1)\n )[0]\n\n # For every point into the cluster of the maximum one, remove them\n # in order to not select two centers into the same cluster\n cluster = new_max_point.cluster\n # closest = cluster.points[0]\n closest = new_max_point\n for point in cluster.points:\n # if point.dist(cluster.center) < closest.dist(cluster.center):\n # closest = point\n self._neighbour_counter[point] = 0\n\n # Just add the created center into the center list\n best_centers.append(Center(i, closest.x, closest.y))\n\n # Clear the clusters\n self._clear_clusters()\n\n # Create the clusters with their optimized centers\n for center in best_centers:\n c = Cluster()\n c.center = center\n self._clusters.append(c)\n\n # Assign each point to its closest cluster\n self._assign_point_to_closest_cluster()\n\n # Reassign the center of the clusters\n self._update_cluster_center()\n\n # Execute the final and optimized turn and counting its number of loops\n return self._turn()\n\n def _count_neighbours(self):\n \"\"\"Count the number of neighbours of each point.\"\"\"\n for point in self._points:\n self._neighbour_counter[point] += len(point.cluster.points)\n\n def _initialization(self):\n \"\"\"Initialization part of the algorithm.\n\n Note that the points will be assigned to their nearest cluster and the\n center points of the clusters are scattered on the diagonal going from\n left bottom to top right.\n \"\"\"\n # Clear the clusters\n self._clear_clusters()\n\n # Initialize the clusters\n self._init_clusters()\n\n # Assign each point to its closest cluster\n self._assign_point_to_closest_cluster()\n\n # Reassign the center of the clusters\n self._update_cluster_center()\n\n def _update_cluster_center(self):\n \"\"\"Update the cluster's center.\"\"\"\n # Update the center of each cluster if there are points into it\n for cluster in self._clusters:\n\n # Get the number of points into this cluster\n nb_points = len(cluster.points)\n if nb_points > 0:\n\n # Update the way of getting sums and centers for 3D points\n\n # Add all x and y values of each point of this cluster\n x_sum, y_sum = 0, 0\n for point in cluster.points:\n x_sum += point.x\n y_sum += point.y\n\n # Reassign the center of this cluster by getting the mean\n cluster.center.x = x_sum / nb_points\n cluster.center.y = y_sum / nb_points\n\n # DEBUG: Display the new centers approximations\n # print(\n # 'center.x=%s and center.y=%s' %\n # (cluster.center.x, cluster.center.y)\n # )\n\n def _clear_clusters(self):\n \"\"\"Clear the clusters between each turn.\"\"\"\n for point in self._points:\n point.cluster = None\n self._clusters.clear()\n\n def _init_clusters(self):\n \"\"\"Initialize the clusters.\"\"\"\n # Select randomly k points and put them as cluster centers\n for index in range(self._k):\n\n # Select a random point\n random_point = random.choice(self._points)\n\n # Update what is needed for 3D centers using 3D points\n\n # Create a new cluster with this a random point as its center\n c = Cluster()\n c.center = Center(index, random_point.x, random_point.y)\n self._clusters.append(c)\n\n def _assign_point_to_closest_cluster(self):\n \"\"\"Assign each point to its closes cluster.\"\"\"\n for p in self._points:\n\n # The closest is the first cluster in the list (for the moment)\n closest = self._clusters[0]\n curr_dist = p.dist(closest.center)\n\n # Parse all the other clusters\n for cluster in self._clusters[1:]:\n\n # If one is closest than the current one\n if p.dist(cluster.center) < curr_dist:\n closest = cluster\n curr_dist = p.dist(closest.center)\n\n # Assign this point to its closest cluster\n closest.assign(p)\n\n\n# -- Private functions\ndef __get_params(argv):\n \"\"\"Function to manage input parameters.\"\"\"\n # Correct syntax\n syntax = '%s filename k [precision]' % argv[0]\n\n # Not enough parameters\n if len(argv) not in (3, 4):\n print('Usage: %s' % syntax)\n exit()\n\n # Get the parameter k\n try:\n k = int(argv[2])\n if k < 1:\n raise ValueError\n except ValueError:\n print(\n 'Parameter k as %s is invalid, must be a positive integer'\n % argv[2]\n )\n exit()\n\n # Get the filename after checking that the file exists and is a .csv\n f = Path(argv[1])\n if not f.is_file() or f.suffix != '.csv':\n print('The file %s was not found' % argv[1])\n exit()\n\n # Get the precision value\n try:\n precision = int(argv[3])\n if precision < 1:\n raise ValueError\n except IndexError:\n precision = 1\n except ValueError:\n print(\n 'Parameter precision as %s is invalid, must be a positive integer'\n % argv[3]\n )\n exit()\n\n # Return the parameters\n return argv[1], k, precision\n\n\nif __name__ == \"__main__\":\n \"\"\"Main function to be launched when this script is called \"\"\"\n\n # -- Normal functionment\n # Get parameters and execute K-mean algorithm\n dataset, k, precision = __get_params(sys.argv)\n Kmean(read_dataset(dataset), k, precision).run()\n\n # -- Convergence measurement gives 3 columns csv file\n # => (k | normal convergence time | optimized version convergence time)\n # datasets = [\n # 'carnet2.csv',\n # 'carnet_bis.csv',\n # 'circles.csv',\n # 'density_gap.csv',\n # 'example.csv',\n # 'stats_reseaux_ping_download.csv'\n # ]\n #\n # from contextlib import redirect_stdout\n # for ds in datasets:\n # with open('../Report/convergences/' + ds, 'w') as f:\n # with redirect_stdout(f):\n # print('k, convergence_time')\n # try:\n # for k in range(1, 100):\n # Kmean(read_dataset('../datasets/' + ds), k).run()\n # except ValueError:\n # pass\n", "repo_name": "tandriamil/clustering-based-ids", "sub_path": "src/kmean.py", "file_name": "kmean.py", "file_ext": "py", "file_size_in_byte": 11302, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "76", "api": [{"api_name": "pandas.DataFrame", "line_number": 28, "usage_type": "argument"}, {"api_name": "datas.dataframe_to_points", "line_number": 48, "usage_type": "call"}, {"api_name": "datas.display_clusters", "line_number": 126, "usage_type": "call"}, {"api_name": "operator.itemgetter", "line_number": 139, "usage_type": "call"}, {"api_name": "datas.Center", "line_number": 153, "usage_type": "call"}, {"api_name": "datas.Cluster", "line_number": 160, "usage_type": "call"}, {"api_name": "random.choice", "line_number": 236, "usage_type": "call"}, {"api_name": "datas.Cluster", "line_number": 241, "usage_type": "call"}, {"api_name": "datas.Center", "line_number": 242, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 289, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 317, "usage_type": "attribute"}, {"api_name": "datas.read_dataset", "line_number": 318, "usage_type": "call"}]}
+{"seq_id": "14511120954", "text": "from sys import argv, exit\nfrom time import time\n\nfrom cv2 import VideoCapture, LINE_4, putText, CAP_DSHOW\nfrom torch.backends import cudnn\nfrom torch.cuda import synchronize\nfrom PyQt5.QtCore import *\nfrom PyQt5.QtGui import *\nfrom PyQt5.QtWidgets import *\n\nfrom globals import TRAFFIC_NAMES, TRAFFIC_COLOR, GPU_DEVICE, TRAFFIC_MODEL\nfrom scripts import cv2_to_pil, opencvPaint, yoloPaint, FONT, CONFI_THRES, IOU_THRES, tensorShape, signPredict\nfrom predict import img_resize, img_transform, non_max_suppression\nfrom multiprocessing import Queue, Process\n\ncudnn.benchmark = True\n\n\n# noinspection PyAttributeOutsideInit,DuplicatedCode\nclass TrafficSystemGUI(QWidget):\n\n # noinspection PyArgumentList\n def __init__(self):\n super().__init__()\n self.initMainWindow()\n self.initImageGroup()\n self.initButtons()\n self.initReadMe()\n self.threadPool = QThreadPool()\n\n def initMainWindow(self):\n self.__height = 898\n self.__width = 1536\n self.__top = int(1080 / 2 - self.__height / 2)\n self.__left = int(1920 / 2 - self.__width / 2)\n self.__title = \"交通路况系统\"\n self.setWindowTitle(self.__title)\n self.setGeometry(self.__left, self.__top, self.__width, self.__height)\n self.setFixedWidth(self.__width)\n self.setFixedHeight(self.__height)\n self.setWindowIcon(QIcon(\"resources/hohai.png\"))\n\n def initImageGroup(self):\n self.ImageBackGround = QGraphicsView(self)\n self.ImageBackGround.setGeometry(QRect(10, 10, 1152, 854))\n\n self.ImageScreen = QLabel(self)\n rect = QRect(10, 10, 200, 200)\n self.ImageScreen.setGeometry(rect)\n self.ImageScreen.setText(\"\")\n\n self.ImageScreenTop = rect.top()\n self.ImageScreenLeft = rect.left()\n self.ImageScreenWidth = rect.width()\n self.ImageScreenHeight = rect.height()\n\n def imageBGMidPoint(self):\n bg = self.ImageBackGround.geometry()\n return bg.left() + int(bg.width() / 2), bg.top() + int(bg.height() / 2)\n\n def imageMidPoint(self):\n ig = self.ImageScreen.geometry()\n return ig.left() + int(ig.width() / 2), ig.top() + int(ig.height() / 2)\n\n def alignImage(self, img_mid: tuple, bg_mid: tuple):\n w_diff = bg_mid[0] - img_mid[0]\n h_diff = bg_mid[1] - img_mid[1]\n self.ImageScreenTop += h_diff\n self.ImageScreenLeft += w_diff\n self.ImageScreen.setGeometry(\n QRect(self.ImageScreenLeft, self.ImageScreenTop, self.ImageScreenWidth, self.ImageScreenHeight))\n\n # noinspection PyArgumentList,PyUnresolvedReferences\n def initButtons(self):\n self.VideoButton = QPushButton(self)\n self.VideoButton.setGeometry(QRect(1180, 640, 241, 101))\n self.VideoButton.setText(\"视频:off\")\n self.VideoButton.clicked.connect(self.clickVideoButton)\n\n font = QFont()\n font.setPointSize(24)\n font.setFamily(\"楷体\")\n self.VideoButton.setFont(font)\n\n self.CameraButton = QPushButton(self)\n self.CameraButton.setGeometry(QRect(1180, 770, 241, 101))\n self.CameraButton.setText(\"摄像头:off\")\n self.CameraButton.clicked.connect(self.clickCameraButton)\n self.CameraButton.setFont(font)\n\n self.cap = None\n\n def initReadMe(self):\n self.ReadMeLabel = QLabel(self)\n self.ReadMeLabel.setGeometry(QRect(1180, 490, 351, 131))\n font = QFont()\n font.setFamily(\"黑体\")\n font.setPointSize(14)\n self.ReadMeLabel.setFont(font)\n self.ReadMeLabel.setText(\"绿色:行人
\"\n \"红色:汽车(轿车、卡车、摩托车)
蓝色:禁止标志
\")\n\n @pyqtSlot()\n def clickVideoButton(self):\n if self.VideoButton.text().endswith(\"off\"):\n self.CameraButton.setEnabled(False)\n options = QFileDialog.Options()\n options |= QFileDialog.DontUseNativeDialog\n # noinspection PyTypeChecker\n file_path, _ = QFileDialog.getOpenFileName(self, \"选择视频文件\", \"\",\n \"All Files (*);;Python Files (*.py)\", options=options)\n if len(file_path) > 0:\n self.VideoButton.setText(self.VideoButton.text().replace(\"off\", \"on\"))\n self.threadPool.start(lambda: self.videoRunModels(video_path=file_path))\n else:\n self.CameraButton.setEnabled(True)\n else:\n self.VideoButton.setText(self.VideoButton.text().replace(\"on\", \"off\"))\n\n @pyqtSlot()\n def videoRunModels(self, video_path):\n self.cap = VideoCapture(video_path)\n\n sign_in = Queue()\n sign_out = Queue()\n sign_process = Process(target=signPredict, args=(sign_in, sign_out))\n sign_process.start()\n\n last_time = time()\n\n while self.cap.isOpened():\n read_succ, cv2_img = self.cap.read()\n if not read_succ:\n break\n\n pil_img = cv2_to_pil(cv2_img)\n sign_in.put(pil_img, True)\n\n tensor_img = img_transform(img_resize(cv2_img, 480), GPU_DEVICE)\n yolo_pred = TRAFFIC_MODEL(tensor_img)[0]\n yolo_pred = non_max_suppression(yolo_pred, CONFI_THRES, IOU_THRES)\n yoloPaint(yolo_pred, tensorShape(tensor_img), cv2_img, TRAFFIC_NAMES, TRAFFIC_COLOR)\n\n sign_pred = sign_out.get(True)\n opencvPaint(sign_pred, cv2_img)\n\n synchronize()\n t = time()\n current_latency = (t - last_time) * 1000\n last_time = t\n putText(cv2_img, \"FPS:%.1f\" % (1000 / current_latency), (0, 15), FONT, 0.5, (255, 80, 80), 1, LINE_4)\n\n img = QImage(cv2_img.data, cv2_img.shape[1], cv2_img.shape[0], QImage.Format_RGB888).rgbSwapped()\n if not (img.width() == self.ImageScreenWidth and img.height() == self.ImageScreenHeight):\n self.ImageScreen.resize(img.width(), img.height())\n self.ImageScreenWidth = img.width()\n self.ImageScreenHeight = img.height()\n\n bg_mid = self.imageBGMidPoint()\n img_mid = self.imageMidPoint()\n if not (bg_mid == img_mid):\n self.alignImage(img_mid, bg_mid)\n\n if self.VideoButton.text().endswith(\"on\"):\n # noinspection PyArgumentList\n self.ImageScreen.setPixmap(QPixmap.fromImage(img))\n else:\n break\n self.ImageScreen.clear()\n self.cap.release()\n sign_process.terminate()\n self.CameraButton.setEnabled(True)\n\n @pyqtSlot()\n def clickCameraButton(self):\n if self.CameraButton.text().endswith(\"off\"):\n self.VideoButton.setEnabled(False)\n self.CameraButton.setText(self.CameraButton.text().replace(\"off\", \"on\"))\n self.threadPool.start(self.cameraRunModels)\n else:\n self.CameraButton.setText(self.CameraButton.text().replace(\"on\", \"off\"))\n\n # noinspection PyArgumentList,DuplicatedCode\n @pyqtSlot()\n def cameraRunModels(self):\n self.cap = VideoCapture(0 + CAP_DSHOW)\n\n sign_in = Queue()\n sign_out = Queue()\n sign_process = Process(target=signPredict, args=(sign_in, sign_out))\n sign_process.start()\n\n last_time = time()\n\n while self.cap.isOpened():\n read_success, cv2_img = self.cap.read()\n if not read_success:\n break\n\n pil_img = cv2_to_pil(cv2_img)\n sign_in.put(pil_img, True)\n\n tensor_img = img_transform(img_resize(cv2_img, 480), GPU_DEVICE)\n yolo_pred = TRAFFIC_MODEL(tensor_img)[0]\n yolo_pred = non_max_suppression(yolo_pred, CONFI_THRES, IOU_THRES)\n yoloPaint(yolo_pred, tensor_img.shape[2:], cv2_img, TRAFFIC_NAMES, TRAFFIC_COLOR)\n\n sign_pred = sign_out.get(True)\n opencvPaint(sign_pred, cv2_img)\n\n synchronize()\n t = time()\n current_latency = (t - last_time) * 1000\n last_time = t\n putText(cv2_img, \"FPS:%.1f\" % (1000 / current_latency), (0, 15), FONT, 0.5, (255, 80, 80), 1, LINE_4)\n\n img = QImage(cv2_img.data, cv2_img.shape[1], cv2_img.shape[0], QImage.Format_RGB888).rgbSwapped()\n\n if not (img.width() == self.ImageScreenWidth and img.height() == self.ImageScreenHeight):\n self.ImageScreen.resize(img.width(), img.height())\n self.ImageScreenWidth = img.width()\n self.ImageScreenHeight = img.height()\n\n bg_mid = self.imageBGMidPoint()\n img_mid = self.imageMidPoint()\n if not (bg_mid == img_mid):\n self.alignImage(img_mid, bg_mid)\n\n if self.CameraButton.text().endswith(\"on\"):\n self.ImageScreen.setPixmap(QPixmap.fromImage(img))\n else:\n break\n self.cap.release()\n self.ImageScreen.clear()\n sign_process.terminate()\n self.VideoButton.setEnabled(True)\n\n\nif __name__ == '__main__':\n app = QApplication(argv)\n ui = TrafficSystemGUI()\n ui.show()\n exit(app.exec_())\n", "repo_name": "qhc99/FinalThesisProject", "sub_path": "src/gui.py", "file_name": "gui.py", "file_ext": "py", "file_size_in_byte": 9237, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "76", "api": [{"api_name": "torch.backends.cudnn.benchmark", "line_number": 16, "usage_type": "attribute"}, {"api_name": "torch.backends.cudnn", "line_number": 16, "usage_type": "name"}, {"api_name": "cv2.VideoCapture", "line_number": 122, "usage_type": "call"}, {"api_name": "multiprocessing.Queue", "line_number": 124, "usage_type": "call"}, {"api_name": "multiprocessing.Queue", "line_number": 125, "usage_type": "call"}, {"api_name": "multiprocessing.Process", "line_number": 126, "usage_type": "call"}, {"api_name": "scripts.signPredict", "line_number": 126, "usage_type": "name"}, {"api_name": "time.time", "line_number": 129, "usage_type": "call"}, {"api_name": "scripts.cv2_to_pil", "line_number": 136, "usage_type": "call"}, {"api_name": "predict.img_transform", "line_number": 139, "usage_type": "call"}, {"api_name": "globals.GPU_DEVICE", "line_number": 139, "usage_type": "argument"}, {"api_name": "predict.img_resize", "line_number": 139, "usage_type": "call"}, {"api_name": "globals.TRAFFIC_MODEL", "line_number": 140, "usage_type": "call"}, {"api_name": "predict.non_max_suppression", "line_number": 141, "usage_type": "call"}, {"api_name": "scripts.CONFI_THRES", "line_number": 141, "usage_type": "argument"}, {"api_name": "scripts.IOU_THRES", "line_number": 141, "usage_type": "argument"}, {"api_name": "scripts.yoloPaint", "line_number": 142, "usage_type": "call"}, {"api_name": "globals.TRAFFIC_NAMES", "line_number": 142, "usage_type": "argument"}, {"api_name": "globals.TRAFFIC_COLOR", "line_number": 142, "usage_type": "argument"}, {"api_name": "scripts.tensorShape", "line_number": 142, "usage_type": "call"}, {"api_name": "scripts.opencvPaint", "line_number": 145, "usage_type": "call"}, {"api_name": "torch.cuda.synchronize", "line_number": 147, "usage_type": "call"}, {"api_name": "time.time", "line_number": 148, "usage_type": "call"}, {"api_name": "cv2.putText", "line_number": 151, "usage_type": "call"}, {"api_name": "scripts.FONT", "line_number": 151, "usage_type": "argument"}, {"api_name": "cv2.LINE_4", "line_number": 151, "usage_type": "argument"}, {"api_name": "cv2.VideoCapture", "line_number": 186, "usage_type": "call"}, {"api_name": "cv2.CAP_DSHOW", "line_number": 186, "usage_type": "name"}, {"api_name": "multiprocessing.Queue", "line_number": 188, "usage_type": "call"}, {"api_name": "multiprocessing.Queue", "line_number": 189, "usage_type": "call"}, {"api_name": "multiprocessing.Process", "line_number": 190, "usage_type": "call"}, {"api_name": "scripts.signPredict", "line_number": 190, "usage_type": "name"}, {"api_name": "time.time", "line_number": 193, "usage_type": "call"}, {"api_name": "scripts.cv2_to_pil", "line_number": 200, "usage_type": "call"}, {"api_name": "predict.img_transform", "line_number": 203, "usage_type": "call"}, {"api_name": "globals.GPU_DEVICE", "line_number": 203, "usage_type": "argument"}, {"api_name": "predict.img_resize", "line_number": 203, "usage_type": "call"}, {"api_name": "globals.TRAFFIC_MODEL", "line_number": 204, "usage_type": "call"}, {"api_name": "predict.non_max_suppression", "line_number": 205, "usage_type": "call"}, {"api_name": "scripts.CONFI_THRES", "line_number": 205, "usage_type": "argument"}, {"api_name": "scripts.IOU_THRES", "line_number": 205, "usage_type": "argument"}, {"api_name": "scripts.yoloPaint", "line_number": 206, "usage_type": "call"}, {"api_name": "globals.TRAFFIC_NAMES", "line_number": 206, "usage_type": "argument"}, {"api_name": "globals.TRAFFIC_COLOR", "line_number": 206, "usage_type": "argument"}, {"api_name": "scripts.opencvPaint", "line_number": 209, "usage_type": "call"}, {"api_name": "torch.cuda.synchronize", "line_number": 211, "usage_type": "call"}, {"api_name": "time.time", "line_number": 212, "usage_type": "call"}, {"api_name": "cv2.putText", "line_number": 215, "usage_type": "call"}, {"api_name": "scripts.FONT", "line_number": 215, "usage_type": "argument"}, {"api_name": "cv2.LINE_4", "line_number": 215, "usage_type": "argument"}, {"api_name": "sys.argv", "line_number": 240, "usage_type": "argument"}, {"api_name": "sys.exit", "line_number": 243, "usage_type": "call"}]}
+{"seq_id": "8278692231", "text": "import numpy as np\r\nfrom lmfit import Parameters, Minimizer\r\nfrom scipy.signal import argrelmin\r\nfrom tsmoothie.smoother import LowessSmoother\r\nfrom scipy.signal import find_peaks\r\n\r\nfrom datetime import datetime\r\nfrom scipy.integrate import odeint\r\nimport logging\r\n\r\nlogger = logging.getLogger('my_logger')\r\nlogging.basicConfig(format='%(asctime)s - %(message)s', level=logging.INFO)\r\n\r\n\r\ndef deriv(y, t, r1, a1, K1, p1, q1):\r\n C = y\r\n r = r1\r\n K = K1\r\n a = a1\r\n p = p1\r\n q = q1\r\n dCdt = r * (C ** q) * (1 - (C / K) ** a) ** p\r\n return dCdt\r\n\r\n\r\ndef deriv1(y, t, r1, a1, K1, p1, q1, r2, a2, K2, p2, q2, rho, t_0):\r\n C = y\r\n r = r1 + 0.5 * (r2 - r1) * (1 + np.tanh(0.5 * rho * (t - t_0)))\r\n K = K1 + 0.5 * (K2 - K1) * (1 + np.tanh(0.5 * rho * (t - t_0)))\r\n a = a1 + 0.5 * (a2 - a1) * (1 + np.tanh(0.5 * rho * (t - t_0)))\r\n p = p1 + 0.5 * (p2 - p1) * (1 + np.tanh(0.5 * rho * (t - t_0)))\r\n q = q1 + 0.5 * (q2 - q1) * (1 + np.tanh(0.5 * rho * (t - t_0)))\r\n dCdt = r * (C ** q) * (1 - (C / K) ** a) ** p\r\n return dCdt\r\n\r\n\r\ndef deriv2(y, t, r1, a1, K1, p1, q1, r2, a2, K2, p2, q2, r3, a3, K3, p3, q3, rho, t_0, rho1, t_1):\r\n C = y\r\n r = r1 + 0.5 * (r2 - r1) * (1 + np.tanh(0.5 * rho * (t - t_0))) + 0.5 * (r3 - r2) * (\r\n 1 + np.tanh(0.5 * rho1 * (t - t_1)))\r\n K = K1 + 0.5 * (K2 - K1) * (1 + np.tanh(0.5 * rho * (t - t_0))) + 0.5 * (K3 - K2) * (\r\n 1 + np.tanh(0.5 * rho1 * (t - t_1)))\r\n a = a1 + 0.5 * (a2 - a1) * (1 + np.tanh(0.5 * rho * (t - t_0))) + 0.5 * (a3 - a2) * (\r\n 1 + np.tanh(0.5 * rho1 * (t - t_1)))\r\n p = p1 + 0.5 * (p2 - p1) * (1 + np.tanh(0.5 * rho * (t - t_0))) + 0.5 * (p3 - p2) * (\r\n 1 + np.tanh(0.5 * rho1 * (t - t_1)))\r\n q = q1 + 0.5 * (q2 - q1) * (1 + np.tanh(0.5 * rho * (t - t_0))) + 0.5 * (q3 - q2) * (\r\n 1 + np.tanh(0.5 * rho1 * (t - t_1)))\r\n dCdt = r * (C ** q) * (1 - (C / K) ** a) ** p\r\n return dCdt\r\n\r\n\r\ndef func(params, t, data): # Objective function to be minimized\r\n r1 = params['r1']\r\n a1 = params['a1']\r\n K1 = params['K1']\r\n p1 = params['p1']\r\n q1 = params['q1']\r\n ret = odeint(deriv, y0, t, args=(r1, a1, K1, p1, q1))\r\n C = ret.T\r\n resid = []\r\n pre_resid = []\r\n pre_resid.append(C)\r\n pre_resid = np.array(pre_resid)\r\n resid.append(pre_resid - data)\r\n resid = np.array(resid)\r\n return resid\r\n\r\n\r\ndef func1(params, t, data):\r\n r1 = params['r1']\r\n a1 = params['a1']\r\n K1 = params['K1']\r\n p1 = params['p1']\r\n q1 = params['q1']\r\n r2 = params['r2']\r\n a2 = params['a2']\r\n K2 = params['K2']\r\n p2 = params['p2']\r\n q2 = params['q2']\r\n rho = params['rho']\r\n t_0 = params['t_0']\r\n ret = odeint(deriv1, y0, t, args=(r1, a1, K1, p1, q1, r2, a2, K2, p2, q2, rho, t_0))\r\n C = ret.T\r\n resid = []\r\n pre_resid = []\r\n pre_resid.append(C)\r\n pre_resid = np.array(pre_resid)\r\n resid.append(pre_resid - data)\r\n resid = np.array(resid)\r\n return resid\r\n\r\n\r\ndef func2(params, t, data):\r\n r1 = params['r1']\r\n a1 = params['a1']\r\n K1 = params['K1']\r\n p1 = params['p1']\r\n q1 = params['q1']\r\n r2 = params['r2']\r\n a2 = params['a2']\r\n K2 = params['K2']\r\n p2 = params['p2']\r\n q2 = params['q2']\r\n r3 = params['r3']\r\n a3 = params['a3']\r\n K3 = params['K3']\r\n p3 = params['p3']\r\n q3 = params['q3']\r\n rho = params['rho']\r\n t_0 = params['t_0']\r\n rho1 = params['rho1']\r\n t_1 = params['t_1']\r\n ret = odeint(deriv2, y0, t, args=(r1, a1, K1, p1, q1, r2, a2, K2, p2, q2, r3, a3, K3, p3, q3, rho, t_0, rho1, t_1))\r\n C = ret.T\r\n resid = []\r\n pre_resid = []\r\n pre_resid.append(C)\r\n pre_resid = np.array(pre_resid)\r\n resid.append(pre_resid - data)\r\n resid = np.array(resid)\r\n return resid\r\n\r\n\r\ndef fit(deaths):\r\n global y0 # Gambiarra\r\n\r\n deaths = deaths.to_numpy()\r\n temp_inicio = datetime.now()\r\n logger.info('O fit começou...')\r\n\r\n daily1 = []\r\n daily1.append(deaths[0])\r\n for i in range(0, len(deaths) - 1):\r\n daily1.append(deaths[i + 1] - deaths[i])\r\n\r\n # operate smoothing\r\n smoother = LowessSmoother(smooth_fraction=0.1, iterations=1)\r\n smoother.smooth(daily1)\r\n\r\n # generate intervals\r\n low, up = smoother.get_intervals('prediction_interval')\r\n\r\n peaks, _ = find_peaks(-smoother.smooth_data[0])\r\n print(f\"picos: {peaks}\")\r\n peaks1, _ = find_peaks(smoother.smooth_data[0])\r\n print(f\"picos1: {peaks1}\")\r\n if len(deaths) - peaks1[-1] > 100:\r\n peaks1[-1] = len(deaths) - 1\r\n print()\r\n\r\n t = np.linspace(0, int(1 * len(deaths)) - 1, 2000)\r\n t_plot = np.linspace(0, int(1.2 * len(deaths)), 2000)\r\n t0 = np.array(range(len(deaths)))\r\n tw1 = int(.5 * (peaks[len(peaks) - 2] + peaks1[len(peaks1) - 2]))\r\n tw1 = 210\r\n t1 = np.array(range(tw1))\r\n deaths1 = deaths[0:tw1]\r\n tw2 = int(.5 * (peaks[-1] + peaks1[-1]))\r\n tw2 = 320\r\n t2 = np.array(range(tw2))\r\n deaths2 = deaths[0:tw2]\r\n deaths3 = deaths\r\n\r\n daily2 = smoother.smooth_data[0]\r\n\r\n relmin = argrelmin(daily2)\r\n print(relmin)\r\n print()\r\n\r\n ###########################################################\r\n\r\n # Initial conditions vector\r\n y0 = deaths[0]\r\n\r\n # Data to be fited\r\n data2 = []\r\n data2.append(deaths)\r\n data2 = np.array(data2)\r\n\r\n ###########################################################\r\n\r\n params = Parameters()\r\n params.add('r1', value=0.2, min=0, max=1)\r\n params.add('a1', value=0.2, min=0, max=1)\r\n params.add('K1', value=1.1 * deaths1[len(deaths1) - 1], min=deaths1[len(deaths1) - 1])\r\n params.add('p1', value=1, min=1, vary=True)\r\n # params.add('q1', value=q1.value, min=0, max=1, vary=False)\r\n params.add('q1', value=0.6, min=0, max=1)\r\n\r\n minner = Minimizer(func, params, fcn_args=(t1, deaths1))\r\n\r\n # Fit using Nelder-Mead\r\n logger.info('Começou o primeiro processo de minimização...')\r\n out1 = minner.minimize(method='nelder')\r\n # lmfit.report_fit(out1)\r\n logger.info('Terminou o primeiro processo de minimização...')\r\n\r\n print('R2=', 1 - out1.residual.var() / np.var(deaths1))\r\n print(out1.chisqr)\r\n\r\n # Fit using the Levenberg-Marquardt method with the result of the previous fit as initial guess\r\n # logger.info('Começou o segundo processo de minimização...')\r\n # out2 = minner.minimize(method='leastsq', params=out1.params)\r\n # lmfit.report_fit(out2)\r\n # logger.info('Terminou o segundo processo de minimização...')\r\n out2 = out1\r\n\r\n print('R2=', 1 - out2.residual.var() / np.var(deaths1))\r\n print()\r\n params = out2.params\r\n r1 = params['r1']\r\n a1 = params['a1']\r\n K1 = params['K1']\r\n p1 = params['p1']\r\n q1 = params['q1']\r\n\r\n ###########################################################\r\n\r\n params.add('r1', value=r1.value, min=0, max=1, vary=True)\r\n params.add('a1', value=a1.value, min=0, max=1, vary=True)\r\n params.add('K1', value=K1.value, min=0, vary=True)\r\n params.add('p1', value=p1.value, min=1, vary=True)\r\n params.add('q1', value=q1.value, min=0, max=1, vary=True)\r\n # params.add('r2', value=1, min=0, max=1, vary=False)\r\n params.add('r2', value=0.3, min=0, max=1)\r\n params.add('a2', value=1, min=0, max=1, vary=False)\r\n # params.add('a2', value=0.5, min=0, max=1)\r\n params.add('K2', value=1.1 * deaths2[-1], min=deaths2[-1], vary=True)\r\n # params.add('K2', value=1.1*K1.value, min=K1.value, vary=True)\r\n params.add('p2', value=1, min=1, vary=True)\r\n # params.add('q2', value=1, min=0, max=1, vary=False)\r\n params.add('q2', value=0.6, min=0, max=1)\r\n params.add('rho', value=0.01, min=0, max=0.2)\r\n params.add('t_0', value=tw1, min=0, max=len(t2))\r\n\r\n minner1 = Minimizer(func1, params, fcn_args=(t2, deaths2))\r\n\r\n # Fit using Nelder-Mead\r\n logger.info('Começou o terceiro processo de minimização...')\r\n out3 = minner1.minimize(method='least_squares')\r\n # lmfit.report_fit(out3)\r\n logger.info('Terminou o terceiro processo de minimização...')\r\n\r\n print('R2=', 1 - out3.residual.var() / np.var(deaths2))\r\n\r\n # Fit using the Levenberg-Marquardt method with the result of the previous fit as initial guess\r\n logger.info('Começou o quarto processo de minimização...')\r\n out4 = minner1.minimize(method='least_squares', params=out3.params)\r\n # lmfit.report_fit(out4)\r\n logger.info('Terminou o quarto processo de minimização...')\r\n\r\n print('R2=', 1 - out4.residual.var() / np.var(deaths2))\r\n print()\r\n params = out4.params\r\n r1 = params['r1']\r\n a1 = params['a1']\r\n K1 = params['K1']\r\n p1 = params['p1']\r\n q1 = params['q1']\r\n r2 = params['r2']\r\n a2 = params['a2']\r\n K2 = params['K2']\r\n p2 = params['p2']\r\n q2 = params['q2']\r\n rho = params['rho']\r\n t_0 = params['t_0']\r\n\r\n ###########################################################\r\n\r\n params.add('r1', value=r1.value, min=0, max=1, vary=True)\r\n params.add('a1', value=a1.value, min=0, max=1, vary=True)\r\n params.add('K1', value=K1.value, min=0, vary=True)\r\n params.add('p1', value=p1.value, min=1, vary=True)\r\n params.add('q1', value=q1.value, min=0, max=1, vary=True)\r\n params.add('r2', value=r2.value, min=0, max=1)\r\n params.add('a2', value=a2.value, min=0, max=1, vary=False)\r\n params.add('K2', value=K2.value, min=deaths2[-1], vary=True)\r\n params.add('p2', value=p2.value, min=1, vary=True)\r\n params.add('q2', value=q2.value, min=0, max=1)\r\n params.add('rho', value=rho.value, min=0, max=0.2)\r\n params.add('t_0', value=t_0.value, min=0)\r\n params.add('r3', value=0.3, min=0, max=1)\r\n params.add('a3', value=1, min=0, max=1, vary=False)\r\n # params.add('a2', value=0.5, min=0, max=1)\r\n params.add('K3', value=1.1 * deaths3[-1], min=deaths3[-1], vary=True)\r\n # params.add('K2', value=1.1*K1.value, min=K1.value, vary=True)\r\n params.add('p3', value=1, min=0, vary=False)\r\n # params.add('q2', value=1, min=0, max=1, vary=False)\r\n params.add('q3', value=0.6, min=0, max=1)\r\n params.add('rho1', value=0.05, min=0, max=0.2)\r\n params.add('t_1', value=tw2, min=0, max=len(t0))\r\n\r\n minner2 = Minimizer(func2, params, fcn_args=(t0, deaths3))\r\n\r\n # Fit using Nelder-Mead\r\n logger.info('Começou o quinto processo de minimização...')\r\n out5 = minner2.minimize(method='least_squares')\r\n # lmfit.report_fit(out5)\r\n logger.info('Terminou o quinto processo de minimização...')\r\n\r\n print('R2=', 1 - out5.residual.var() / np.var(deaths3))\r\n print()\r\n\r\n # Fit using the Levenberg-Marquardt method with the result of the previous fit as initial guess\r\n logger.info('Começou o sexto processo de minimização...')\r\n out6 = minner2.minimize(method='least_squares', params=out5.params)\r\n # lmfit.report_fit(out6)\r\n logger.info('Terminou o quinto processo de minimização...')\r\n\r\n print('R2=', 1 - out6.residual.var() / np.var(deaths3))\r\n print()\r\n\r\n param_err = []\r\n for name_par, param in out5.params.items():\r\n if (not (param.stderr is None)) & (param.value != 0):\r\n param_err.append(param.stderr / param.value)\r\n\r\n param_err1 = []\r\n for name_par, param in out6.params.items():\r\n if (not (param.stderr is None)) & (param.value != 0):\r\n param_err1.append(param.stderr / param.value)\r\n\r\n for i in param_err1:\r\n if i > 1:\r\n print('hello mf!')\r\n out6 = out5\r\n\r\n if any(x > 1 for x in param_err1) or np.sum(param_err1) > np.sum(param_err):\r\n print('hello mf!')\r\n out6 = out5\r\n\r\n params = out6.params\r\n r1 = params['r1']\r\n a1 = params['a1']\r\n K1 = params['K1']\r\n p1 = params['p1']\r\n q1 = params['q1']\r\n r2 = params['r2']\r\n a2 = params['a2']\r\n K2 = params['K2']\r\n p2 = params['p2']\r\n q2 = params['q2']\r\n rho = params['rho']\r\n t_0 = params['t_0']\r\n r3 = params['r3']\r\n a3 = params['a3']\r\n K3 = params['K3']\r\n p3 = params['p3']\r\n q3 = params['q3']\r\n rho1 = params['rho1']\r\n t_1 = params['t_1']\r\n\r\n logger.info('O fit terminou...')\r\n temp_final = datetime.now()\r\n logger.info(f\"Duração: {temp_final - temp_inicio}\")\r\n\r\n return (r1, a1, K1, p1, q1, r2, a2, K2, p2, q2, r3, a3, K3, p3, q3, rho, t_0, rho1, t_1)\r\n\r\n\r\ndef modelo_acumulado(params, deltaTempo, tmax, y0):\r\n t = np.linspace(0, int(1 * tmax) - 1 + deltaTempo, 2000)\r\n C1 = odeint(deriv2, y0, t, args=params).T[0]\r\n return [t, C1]\r\n\r\n\r\ndef modelo_diario(params, deltaTempo, tmax, y0):\r\n t = np.linspace(0, int(1 * tmax) - 1 + deltaTempo, 2000)\r\n C1 = odeint(deriv2, y0, t, args=params).T[0]\r\n\r\n params = list(params)\r\n params.insert(0, t)\r\n params.insert(0, C1)\r\n\r\n daily_theo = deriv2(*params)\r\n peaks, _ = find_peaks(daily_theo)\r\n peaks1, _ = find_peaks(-daily_theo)\r\n\r\n return [t, daily_theo, peaks, peaks1]", "repo_name": "luancordeiro/modintervPR", "sub_path": "app/modelo.py", "file_name": "modelo.py", "file_ext": "py", "file_size_in_byte": 12876, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "76", "api": [{"api_name": "logging.getLogger", "line_number": 11, "usage_type": "call"}, {"api_name": "logging.basicConfig", "line_number": 12, "usage_type": "call"}, {"api_name": "logging.INFO", "line_number": 12, "usage_type": "attribute"}, {"api_name": "numpy.tanh", "line_number": 28, "usage_type": "call"}, {"api_name": "numpy.tanh", "line_number": 29, "usage_type": "call"}, {"api_name": "numpy.tanh", "line_number": 30, "usage_type": "call"}, {"api_name": "numpy.tanh", "line_number": 31, "usage_type": "call"}, {"api_name": "numpy.tanh", "line_number": 32, "usage_type": "call"}, {"api_name": "numpy.tanh", "line_number": 39, "usage_type": "call"}, {"api_name": "numpy.tanh", "line_number": 40, "usage_type": "call"}, {"api_name": "numpy.tanh", "line_number": 41, "usage_type": "call"}, {"api_name": "numpy.tanh", "line_number": 42, "usage_type": "call"}, {"api_name": "numpy.tanh", "line_number": 43, "usage_type": "call"}, {"api_name": "numpy.tanh", "line_number": 44, "usage_type": "call"}, {"api_name": "numpy.tanh", "line_number": 45, "usage_type": "call"}, {"api_name": "numpy.tanh", "line_number": 46, "usage_type": "call"}, {"api_name": "numpy.tanh", "line_number": 47, "usage_type": "call"}, {"api_name": "numpy.tanh", "line_number": 48, "usage_type": "call"}, {"api_name": "scipy.integrate.odeint", "line_number": 59, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 64, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 66, "usage_type": "call"}, {"api_name": "scipy.integrate.odeint", "line_number": 83, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 88, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 90, "usage_type": "call"}, {"api_name": "scipy.integrate.odeint", "line_number": 114, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 119, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 121, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 129, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 129, "usage_type": "name"}, {"api_name": "tsmoothie.smoother.LowessSmoother", "line_number": 138, "usage_type": "call"}, {"api_name": "scipy.signal.find_peaks", "line_number": 144, "usage_type": "call"}, {"api_name": "scipy.signal.find_peaks", "line_number": 146, "usage_type": "call"}, {"api_name": "numpy.linspace", "line_number": 152, "usage_type": "call"}, {"api_name": "numpy.linspace", "line_number": 153, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 154, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 157, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 161, "usage_type": "call"}, {"api_name": "scipy.signal.argrelmin", "line_number": 167, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 179, "usage_type": "call"}, {"api_name": "lmfit.Parameters", "line_number": 183, "usage_type": "call"}, {"api_name": "lmfit.Minimizer", "line_number": 191, "usage_type": "call"}, {"api_name": "numpy.var", "line_number": 199, "usage_type": "call"}, {"api_name": "numpy.var", "line_number": 209, "usage_type": "call"}, {"api_name": "lmfit.Minimizer", "line_number": 237, "usage_type": "call"}, {"api_name": "numpy.var", "line_number": 245, "usage_type": "call"}, {"api_name": "numpy.var", "line_number": 253, "usage_type": "call"}, {"api_name": "lmfit.Minimizer", "line_number": 294, "usage_type": "call"}, {"api_name": "numpy.var", "line_number": 302, "usage_type": "call"}, {"api_name": "numpy.var", "line_number": 311, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 329, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 355, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 355, "usage_type": "name"}, {"api_name": "numpy.linspace", "line_number": 362, "usage_type": "call"}, {"api_name": "scipy.integrate.odeint", "line_number": 363, "usage_type": "call"}, {"api_name": "numpy.linspace", "line_number": 368, "usage_type": "call"}, {"api_name": "scipy.integrate.odeint", "line_number": 369, "usage_type": "call"}, {"api_name": "scipy.signal.find_peaks", "line_number": 376, "usage_type": "call"}, {"api_name": "scipy.signal.find_peaks", "line_number": 377, "usage_type": "call"}]}
+{"seq_id": "17585532709", "text": "from html import entities\nfrom app01.models import *\nimport os\nfrom datetime import datetime\n\nfrom app01.utils.reasoning import mask_name\n\nkb_path = \"./kb\"\nquad_path = os.path.join(kb_path,\"quads.txt\")\nrelation_path = os.path.join(kb_path,\"relations.txt\")\nentity_dir = os.path.join(kb_path,\"entities\")\nif not os.path.exists(kb_path):\n os.mkdir(kb_path)\nif not os.path.exists(entity_dir):\n os.mkdir(entity_dir)\nquads = []\nrelation2id = {}\n\npid2peid = {}\npnames = {}\nfor p in App01Patient.objects.all():\n pid = p.pid\n peid = \"patient-{}\".format(pid)\n pid2peid[pid]=peid\n pnames[p.name]=peid\n with open(os.path.join(entity_dir,\"{}.txt\".format(peid)),\"w\",encoding=\"utf8\") as f:\n f.write(\"id: {}\\n\".format(peid))\n f.write(\"type: patient\\n\")\n f.write(\"pid: {}\\n\".format(pid))\n f.write(\"name: {}\\n\".format(mask_name(p.name)))\n f.write(\"age: {}\\n\".format(p.age))\n f.write(\"gender: {}\\n\".format(\"male\" if p.gender==1 else \"female\"))\n f.write(\"vocation: {}\\n\".format(p.vocation))\n f.write(\"height: {}\\n\".format(p.height))\n f.write(\"weight: {}\\n\".format(p.weight))\n f.write(\"smoking: {}\\n\".format(p.smoking==1))\n f.write(\"vaccine: {}\\n\".format(p.vaccine))\n f.write(\"diagnoseddate: {}\\n\".format(p.diagnoseddate))\n f.write(\"hospitaldate: {}\\n\".format(p.hospitaldate))\n f.write(\"note: {}\\n\".format(p.note))\n\nlname2leid = {}\nlid = 0\nfor l in App01Location.objects.all():\n lname = l.name5\n leid = \"location-{}\".format(lid)\n lid+=1\n lname2leid[lname]=leid\n with open(os.path.join(entity_dir,\"{}.txt\".format(leid)),\"w\",encoding=\"utf8\") as f:\n f.write(\"id: {}\\n\".format(leid))\n f.write(\"type: location\\n\")\n f.write(\"name1: {}\\n\".format(l.name1))\n f.write(\"name2: {}\\n\".format(l.name2))\n f.write(\"name3: {}\\n\".format(l.name3))\n f.write(\"name4: {}\\n\".format(l.name4))\n f.write(\"name5: {}\\n\".format(l.name5))\n f.write(\"gps: {}\\n\".format(l.gps))\n\ndlid2dleid = {}\nfor dl in App01Dynamiclocation.objects.all():\n dlid = dl.id\n dleid = \"Dlocation-{}\".format(dlid)\n dlid2dleid[dlid]=dleid\n with open(os.path.join(entity_dir,\"{}.txt\".format(dleid)),\"w\",encoding=\"utf8\") as f:\n f.write(\"id: {}\\n\".format(dleid))\n f.write(\"type: Dlocation\\n\")\n f.write(\"name: {}\\n\".format(dl.name))\n f.write(\"note: {}\\n\".format(dl.note))\n\niid2ieid = {}\nfor i in App01Item.objects.all():\n iid = i.id\n ieid = \"item-{}\".format(iid)\n iid2ieid[iid]=ieid\n with open(os.path.join(entity_dir,\"{}.txt\".format(ieid)),\"w\",encoding=\"utf8\") as f:\n f.write(\"id: {}\\n\".format(ieid))\n f.write(\"type: item\\n\")\n f.write(\"name: {}\\n\".format(i.name))\n f.write(\"note: {}\\n\".format(i.note))\n\ncontact_types = ['同居','同事','同学','同车','共餐','同行','短暂接触','开会']\n\nrelation2id[\"发生密接\"] = len(relation2id)\nfor t in contact_types:\n relation2id[t] = len(relation2id)\nfor c in App01Contact.objects.all():\n name = c.pid2\n if name in pnames:\n eid = pnames[name]\n else:\n eid = \"contact-{}\".format(c.id)\n with open(os.path.join(entity_dir,\"{}.txt\".format(eid)),\"w\",encoding=\"utf8\") as f:\n f.write(\"id: {}\\n\".format(eid))\n f.write(\"type: contact\\n\")\n f.write(\"name: {}\\n\".format(mask_name(name)))\n f.write(\"phone: {}\\n\".format(c.phone))\n if c.contacttravel_id is None:\n leid = lname2leid[c.contactaddressname_id]\n else:\n leid = dlid2dleid[c.contacttravel_id]\n quads.append((eid,relation2id[\"发生密接\"],leid,None,None))\n quads.append((pid2peid[c.pid1_id],relation2id[\"发生密接\"],leid,None,None))\n quads.append((eid,relation2id[contact_types[c.type]],pid2peid[c.pid1_id],None,None))\n\nfor s in App01Stay.objects.all():\n peid = pid2peid[s.pid_id]\n start = datetime.strptime(\"--\".join([str(s.startdate),str(s.starttime)]),\"%Y-%m-%d--%H:%M:%S\")\n if s.enddate is None or s.endtime is None:\n end = None\n else:\n end = datetime.strptime(\"--\".join([str(s.enddate),str(s.endtime)]),\"%Y-%m-%d--%H:%M:%S\")\n action = s.action\n if action not in relation2id:\n relation2id[action] = len(relation2id)\n leid = lname2leid[s.lname_id]\n quads.append((peid,relation2id[action],leid,start,end))\n\nrelation2id[\"乘坐\"] = len(relation2id)\nfor d in App01Ride.objects.all():\n peid = pid2peid[d.pid_id]\n start = datetime.strptime(\"--\".join([str(d.startdate),str(d.starttime)]),\"%Y-%m-%d--%H:%M:%S\")\n if d.enddate is None or d.endtime is None:\n end = None\n else:\n end = datetime.strptime(\"--\".join([str(d.enddate),str(d.endtime)]),\"%Y-%m-%d--%H:%M:%S\")\n dleid = dlid2dleid[d.did_id]\n quads.append((peid,relation2id[\"乘坐\"],dleid,start,end))\n\nrelation2id[\"接触\"] = len(relation2id)\nrelation2id[\"位于\"] = len(relation2id)\nfor t in App01Touch.objects.all():\n peid = pid2peid[t.pid1_id]\n start = datetime.strptime(\"--\".join([str(t.touchdate),str(t.touchtime)]),\"%Y-%m-%d--%H:%M:%S\")\n end = None\n ieid = iid2ieid[t.iid]\n quads.append((peid,relation2id[\"接触\"],ieid,start,end))\n leid = lname2leid[t.touchaddressname_id]\n quads.append((ieid,relation2id[\"位于\"],leid,None,None))\n\nfor p in App01Patient.objects.all():\n peid = pid2peid[p.pid]\n heid = lname2leid[p.homeaddressname_id]\n quads.append((peid,relation2id[\"居住\"],heid,None,None))\n if p.workingaddressname_id is not None:\n weid = lname2leid[p.workingaddressname_id]\n quads.append((peid,relation2id[\"工作\"],weid,None,None))\n\nwith open(relation_path,\"w\",encoding=\"utf8\") as f:\n for name,id in relation2id.items():\n f.write(\"{}\\t{}\\n\".format(name,id))\n\nquads = [\"{}\\t{}\\t{}\\t{}\\t{}\\n\".format(q[0],q[1],q[2],\"--\".join(str(q[3]).split()),\"--\".join(str(q[4]).split())) for q in quads]\nquads = set(quads)\n\nwith open(quad_path,\"w\",encoding=\"utf8\") as f:\n for q in quads:\n f.write(q)\n\n\n\n\n\n\n", "repo_name": "rubickkcibur/cdc", "sub_path": "lc/django_app/app01/utils/get_kb.py", "file_name": "get_kb.py", "file_ext": "py", "file_size_in_byte": 5998, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 7, "dataset": "github-code", "pt": "76", "api": [{"api_name": "os.path.join", "line_number": 9, "usage_type": "call"}, {"api_name": "os.path", "line_number": 9, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 10, "usage_type": "call"}, {"api_name": "os.path", "line_number": 10, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 11, "usage_type": "call"}, {"api_name": "os.path", "line_number": 11, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 12, "usage_type": "call"}, {"api_name": "os.path", "line_number": 12, "usage_type": "attribute"}, {"api_name": "os.mkdir", "line_number": 13, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 14, "usage_type": "call"}, {"api_name": "os.path", "line_number": 14, "usage_type": "attribute"}, {"api_name": "os.mkdir", "line_number": 15, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 26, "usage_type": "call"}, {"api_name": "os.path", "line_number": 26, "usage_type": "attribute"}, {"api_name": "app01.utils.reasoning.mask_name", "line_number": 30, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 49, "usage_type": "call"}, {"api_name": "os.path", "line_number": 49, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 64, "usage_type": "call"}, {"api_name": "os.path", "line_number": 64, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 75, "usage_type": "call"}, {"api_name": "os.path", "line_number": 75, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 92, "usage_type": "call"}, {"api_name": "os.path", "line_number": 92, "usage_type": "attribute"}, {"api_name": "app01.utils.reasoning.mask_name", "line_number": 95, "usage_type": "call"}, {"api_name": "datetime.datetime.strptime", "line_number": 107, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 107, "usage_type": "name"}, {"api_name": "datetime.datetime.strptime", "line_number": 111, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 111, "usage_type": "name"}, {"api_name": "datetime.datetime.strptime", "line_number": 121, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 121, "usage_type": "name"}, {"api_name": "datetime.datetime.strptime", "line_number": 125, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 125, "usage_type": "name"}, {"api_name": "datetime.datetime.strptime", "line_number": 133, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 133, "usage_type": "name"}]}
+{"seq_id": "31274248698", "text": "import pygame\nfrom bang import Bang\nfrom constants import BULLET_SIZE\n\npygame.init()\n# Загрузка изображений (и звуков) для вражеской пули, используя конверт_альфа для снижения нагрузки\nb_Up = pygame.image.load('Images/EnBull_up.png').convert_alpha()\nUpEnBull = pygame.transform.scale(b_Up, (BULLET_SIZE, BULLET_SIZE))\nb_Right = pygame.image.load('Images/EnBull_right.png').convert_alpha()\nRightEnBull = pygame.transform.scale(b_Right, (BULLET_SIZE, BULLET_SIZE))\nb_Left = pygame.image.load('Images/EnBull_left.png').convert_alpha()\nLeftEnBull = pygame.transform.scale(b_Left, (BULLET_SIZE, BULLET_SIZE))\nb_Down = pygame.image.load('Images/EnBull_down.png').convert_alpha()\nDownEnBull = pygame.transform.scale(b_Down, (BULLET_SIZE, BULLET_SIZE))\nbullet_explosion = pygame.mixer.Sound('Sounds/bullet_exp.mp3')\n\n\nclass EnemyBullet(pygame.sprite.Sprite):\n def __init__(self, screen, all_objects, enemy):\n super(EnemyBullet, self).__init__()\n all_objects.add(self)\n self.screen = screen\n self.screen_rect = self.screen.get_rect()\n self.type = 'EnBull'\n self.image = UpEnBull\n self.rect = self.image.get_rect()\n self.rect.centerx = enemy.rect.centerx - (BULLET_SIZE / 2)\n self.rect.centery = enemy.rect.centery - (BULLET_SIZE / 2)\n self.y = float(self.rect.centery)\n self.x = float(self.rect.centerx)\n self.sound_exp = bullet_explosion\n self.btUp = False\n self.btRight = False\n self.btLeft = False\n self.btDown = False\n\n def update(self, delta_ms, blocks, bangs, screen):\n \"\"\"Перемещение пули врагов\"\"\"\n self.speed = 350 * delta_ms / 1000 # Скорость от фпс\n\n if self.btDown: # Пуля летит вниз\n self.image = DownEnBull\n self.y += self.speed\n # Пуля летит вверх\n elif self.btUp:\n self.image = UpEnBull\n self.y -= self.speed\n # Пуля летит вправо\n elif self.btRight:\n self.image = RightEnBull\n self.x += self.speed\n # Пуля летит влево\n elif self.btLeft:\n self.image = LeftEnBull\n self.x -= self.speed\n\n for block in blocks:\n if self.rect.colliderect(block.rect):\n self.kill()\n block.kill()\n new_bang = Bang(screen, self.rect.centerx, self.rect.centery)\n bangs.add(new_bang)\n break\n\n if self.rect.bottom > self.screen_rect.bottom or \\\n self.rect.top <= self.screen_rect.top or \\\n self.rect.right > self.screen_rect.right or \\\n self.rect.left < self.screen_rect.left:\n pygame.mixer.Sound.play(self.sound_exp)\n new_bang = Bang(screen, self.rect.centerx, self.rect.centery)\n bangs.add(new_bang)\n self.kill()\n\n self.rect.y = self.y\n self.rect.x = self.x\n\n def draw(self):\n self.screen.blit(self.image, self.rect)\n", "repo_name": "ZectOff/TanksProjectMUIV", "sub_path": "enemy_bullet.py", "file_name": "enemy_bullet.py", "file_ext": "py", "file_size_in_byte": 3154, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "77", "api": [{"api_name": "pygame.init", "line_number": 5, "usage_type": "call"}, {"api_name": "pygame.image.load", "line_number": 7, "usage_type": "call"}, {"api_name": "pygame.image", "line_number": 7, "usage_type": "attribute"}, {"api_name": "pygame.transform.scale", "line_number": 8, "usage_type": "call"}, {"api_name": "pygame.transform", "line_number": 8, "usage_type": "attribute"}, {"api_name": "constants.BULLET_SIZE", "line_number": 8, "usage_type": "name"}, {"api_name": "pygame.image.load", "line_number": 9, "usage_type": "call"}, {"api_name": "pygame.image", "line_number": 9, "usage_type": "attribute"}, {"api_name": "pygame.transform.scale", "line_number": 10, "usage_type": "call"}, {"api_name": "pygame.transform", "line_number": 10, "usage_type": "attribute"}, {"api_name": "constants.BULLET_SIZE", "line_number": 10, "usage_type": "name"}, {"api_name": "pygame.image.load", "line_number": 11, "usage_type": "call"}, {"api_name": "pygame.image", "line_number": 11, "usage_type": "attribute"}, {"api_name": "pygame.transform.scale", "line_number": 12, "usage_type": "call"}, {"api_name": "pygame.transform", "line_number": 12, "usage_type": "attribute"}, {"api_name": "constants.BULLET_SIZE", "line_number": 12, "usage_type": "name"}, {"api_name": "pygame.image.load", "line_number": 13, "usage_type": "call"}, {"api_name": "pygame.image", "line_number": 13, "usage_type": "attribute"}, {"api_name": "pygame.transform.scale", "line_number": 14, "usage_type": "call"}, {"api_name": "pygame.transform", "line_number": 14, "usage_type": "attribute"}, {"api_name": "constants.BULLET_SIZE", "line_number": 14, "usage_type": "name"}, {"api_name": "pygame.mixer.Sound", "line_number": 15, "usage_type": "call"}, {"api_name": "pygame.mixer", "line_number": 15, "usage_type": "attribute"}, {"api_name": "pygame.sprite", "line_number": 18, "usage_type": "attribute"}, {"api_name": "constants.BULLET_SIZE", "line_number": 27, "usage_type": "name"}, {"api_name": "constants.BULLET_SIZE", "line_number": 28, "usage_type": "name"}, {"api_name": "bang.Bang", "line_number": 61, "usage_type": "call"}, {"api_name": "pygame.mixer.Sound.play", "line_number": 69, "usage_type": "call"}, {"api_name": "pygame.mixer", "line_number": 69, "usage_type": "attribute"}, {"api_name": "bang.Bang", "line_number": 70, "usage_type": "call"}]}
+{"seq_id": "41388167038", "text": "import numpy as np\nimport matplotlib.pyplot as plt\n\nfs = 8000 # częstotliwość próbkowania\ntc = 6.4 # czas trwania sygnału\n#ts okres próbkowania\nn = tc * fs #sygnał\nt = np.arange(0, 6.4, 1/fs)\nu = []\nfor i in t:\n if 0.5 > i and i>=0:\n u.append(0.9 * np.sin(2 * np.pi * i * 8 - (np.pi/3)) + np.log2(np.abs(np.cos(7*(i**2)) + 2.2)))\n if 1.9 > i and i >= 0.5:\n u.append((np.sin(2*np.cos(4*np.pi*i)*np.pi*i))/(2*(i**2)+1))\n if 3.7 > i and i >= 1.9:\n u.append((i-1.9)**2 - np.cos(13*i))\n if 4.9 > i and i >= 3.7:\n u.append(0.5*(i**0.7)*np.sin(8*i))\n if 6.4 > i and i >= 4.9:\n u.append((2+np.sin(18*i)/(3+np.cos(28*i))))\nplt.plot(t, u)\nplt.show()", "repo_name": "Szek1/Transmisja-Danych", "sub_path": "lab-1/Zad3/zad3.py", "file_name": "zad3.py", "file_ext": "py", "file_size_in_byte": 696, "program_lang": "python", "lang": "pl", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "76", "api": [{"api_name": "numpy.arange", "line_number": 8, "usage_type": "call"}, {"api_name": "numpy.sin", "line_number": 12, "usage_type": "call"}, {"api_name": "numpy.pi", "line_number": 12, "usage_type": "attribute"}, {"api_name": "numpy.log2", "line_number": 12, "usage_type": "call"}, {"api_name": "numpy.abs", "line_number": 12, "usage_type": "call"}, {"api_name": "numpy.cos", "line_number": 12, "usage_type": "call"}, {"api_name": "numpy.sin", "line_number": 14, "usage_type": "call"}, {"api_name": "numpy.cos", "line_number": 14, "usage_type": "call"}, {"api_name": "numpy.pi", "line_number": 14, "usage_type": "attribute"}, {"api_name": "numpy.cos", "line_number": 16, "usage_type": "call"}, {"api_name": "numpy.sin", "line_number": 18, "usage_type": "call"}, {"api_name": "numpy.sin", "line_number": 20, "usage_type": "call"}, {"api_name": "numpy.cos", "line_number": 20, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 21, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 21, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 22, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 22, "usage_type": "name"}]}
+{"seq_id": "23271813418", "text": "import streamlit as st\nimport pandas as pd\nimport plotly.express as px\nfrom PIL import Image\n\nst.set_page_config(\n page_title=\"Visualize Your Connections\", \n page_icon=\"💽\", \n layout=\"wide\",\n initial_sidebar_state=\"collapsed\")\n\ninstructions = Image.open('images/inst.png')\n\n# \\\\\\ Sidebar /// #\n\n@st.cache_data\ndef load_data(csv, dataset):\n if csv is not None: # if file is uploaded\n df = pd.read_csv(csv, skiprows=3, parse_dates=['Connected On'])\n df['year'] = df['Connected On'].dt.year\n df['Company'] = df['Company'].fillna('No Company Data')\n df['Position'] = df['Position'].fillna('No Position Data')\n\n else: # if no file is uploaded or removed\n df = pd.read_csv(f'data/{dataset}.csv', skiprows=3, parse_dates=['Connected On'])\n df['year'] = df['Connected On'].dt.year\n df['Company'] = df['Company'].fillna('No Company Data')\n df['Position'] = df['Position'].fillna('No Position Data')\n\n return df\n\ndef bar_px(df):\n year = df['year'].value_counts().reset_index()\n\n bar = px.bar(\n year,\n y='year',\n x='count',\n orientation='h',\n text_auto=True,\n color='count',\n height=200,\n color_continuous_scale=px.colors.sequential.Aggrnyl,\n labels={'year':'','count':''}\n )\n bar.update_traces(textfont_size=14, textposition='outside', \n marker_line_width=0, hovertemplate=None, hoverinfo='skip')\n\n bar.update_layout(margin=dict(t=0, l=0, r=0, b=0),\n plot_bgcolor='rgba(0,0,0,0)',\n paper_bgcolor='rgba(0,0,0,0)')\n \n bar.update_coloraxes(showscale=False)\n\n bar.update_xaxes(color='#03b5aa',\n gridcolor='white',\n linecolor='rgba(0,0,0,0)')\n\n bar.update_yaxes(color='#03b5aa',\n linecolor='rgba(0,0,0,0)',\n dtick=1)\n\n return bar \n\ndef treemap_px(df, px_height):\n fig = px.treemap(\n df,\n height=px_height,\n path=['Company','Position'],\n color='Company',\n color_discrete_sequence=px.colors.sequential.Aggrnyl\n )\n fig.update_layout(margin=dict(t=0, l=0, r=0, b=0), \n font=dict(family='Arial', size=14),\n plot_bgcolor='rgba(0,0,0,0)')\n\n fig.update_traces(root_color='rgba(0,0,0,0)', # to match background color of app\n marker=dict(cornerradius=10),\n hovertemplate='%{value} Connection(s)
at %{label}')\n \n return fig\n\ndef polar_px(df):\n df['month'] = df['Connected On'].dt.month_name()\n month = df['month'].value_counts().reset_index()\n month_order = ['January', 'February', 'March', 'April', 'May', 'June', 'July', 'August', 'September', 'October', 'November', 'December']\n\n chart = px.bar_polar(\n month,\n theta='month',\n r='count',\n color='count',\n template='plotly_dark',\n color_discrete_map=px.colors.sequential.Redor,\n category_orders={'month': month_order})\n\n return chart\n\n# \\\\\\ Header /// #\n\nst.title(\"LinkedIn connections\")\n\nwith st.container():\n left, right = st.columns((3, 2))\n with left:\n st.subheader(\"the visual: \")\n st.write(\"\"\"\n \n\n my goal was to make this app interactive and allow users to create their own visualization by using their data\n \"\"\")\n st.subheader(\"couple notes:\")\n st.write(\"\"\"\n big thanks to my brother [alberto](https://www.linkedin.com/in/albertoreyes2021/) for letting me use his data \n \n and want to give credit to [isaac](https://www.linkedin.com/in/tuckerrasbury/) and his project that I took inspiration from\n \"\"\")\n with right:\n st.subheader(\"\")\n st.write(\"\")\n dataset = st.selectbox('choose a sample dataset ', ('diego','alberto'))\n csv_file = st.file_uploader('upload your file here 👇 ')\n df = load_data(csv_file, dataset)\n tree_height = st.slider(\"increase the size of the chart 🔍\", 500, 2000, 1000)\n\nwith st.container():\n\n left, right = st.columns((3, 2))\n with left:\n st.subheader(\"how to get your own data\")\n how_to = st.expander(\"steps: \")\n how_to.write(\"\"\"\n [click on this link](https://www.linkedin.com/mypreferences/d/download-my-data) and select \"request archive\" of your data\n\n then, you will receive an email in about 5 minutes with a link to download your data\n\n after that, just extract the file from the zipped folder and you are ready to visualize your connections! \n \"\"\")\n how_to.image(instructions, width=500, use_column_width='auto', output_format='PNG')\n \n\nst.write(\"##\") \n\n# \\\\\\ Treemap /// #\n\ntreemap = treemap_px(df, tree_height)\n\nwith st.container():\n st.plotly_chart(treemap, use_container_width=True)\n\n# \\\\\\ Bar Chart /// #\n\nst.write(\"##\")\n\nst.subheader(\"break it down! 🤸\")\n\nbar = bar_px(df)\n\nwith st.container():\n st.write(\"by year:\")\n st.plotly_chart(bar, use_container_width=True)\n", "repo_name": "donutdiego/linkedin", "sub_path": "linkedin.py", "file_name": "linkedin.py", "file_ext": "py", "file_size_in_byte": 4990, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "76", "api": [{"api_name": "streamlit.set_page_config", "line_number": 6, "usage_type": "call"}, {"api_name": "PIL.Image.open", "line_number": 12, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 12, "usage_type": "name"}, {"api_name": "pandas.read_csv", "line_number": 19, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 25, "usage_type": "call"}, {"api_name": "streamlit.cache_data", "line_number": 16, "usage_type": "attribute"}, {"api_name": "plotly.express.bar", "line_number": 35, "usage_type": "call"}, {"api_name": "plotly.express", "line_number": 35, "usage_type": "name"}, {"api_name": "plotly.express.colors", "line_number": 43, "usage_type": "attribute"}, {"api_name": "plotly.express", "line_number": 43, "usage_type": "name"}, {"api_name": "plotly.express.treemap", "line_number": 66, "usage_type": "call"}, {"api_name": "plotly.express", "line_number": 66, "usage_type": "name"}, {"api_name": "plotly.express.colors", "line_number": 71, "usage_type": "attribute"}, {"api_name": "plotly.express", "line_number": 71, "usage_type": "name"}, {"api_name": "plotly.express.bar_polar", "line_number": 88, "usage_type": "call"}, {"api_name": "plotly.express", "line_number": 88, "usage_type": "name"}, {"api_name": "plotly.express.colors", "line_number": 94, "usage_type": "attribute"}, {"api_name": "plotly.express", "line_number": 94, "usage_type": "name"}, {"api_name": "streamlit.title", "line_number": 101, "usage_type": "call"}, {"api_name": "streamlit.container", "line_number": 103, "usage_type": "call"}, {"api_name": "streamlit.columns", "line_number": 104, "usage_type": "call"}, {"api_name": "streamlit.subheader", "line_number": 106, "usage_type": "call"}, {"api_name": "streamlit.write", "line_number": 107, "usage_type": "call"}, {"api_name": "streamlit.subheader", "line_number": 112, "usage_type": "call"}, {"api_name": "streamlit.write", "line_number": 113, "usage_type": "call"}, {"api_name": "streamlit.subheader", "line_number": 119, "usage_type": "call"}, {"api_name": "streamlit.write", "line_number": 120, "usage_type": "call"}, {"api_name": "streamlit.selectbox", "line_number": 121, "usage_type": "call"}, {"api_name": "streamlit.file_uploader", "line_number": 122, "usage_type": "call"}, {"api_name": "streamlit.slider", "line_number": 124, "usage_type": "call"}, {"api_name": "streamlit.container", "line_number": 126, "usage_type": "call"}, {"api_name": "streamlit.columns", "line_number": 128, "usage_type": "call"}, {"api_name": "streamlit.subheader", "line_number": 130, "usage_type": "call"}, {"api_name": "streamlit.expander", "line_number": 131, "usage_type": "call"}, {"api_name": "streamlit.write", "line_number": 142, "usage_type": "call"}, {"api_name": "streamlit.container", "line_number": 148, "usage_type": "call"}, {"api_name": "streamlit.plotly_chart", "line_number": 149, "usage_type": "call"}, {"api_name": "streamlit.write", "line_number": 153, "usage_type": "call"}, {"api_name": "streamlit.subheader", "line_number": 155, "usage_type": "call"}, {"api_name": "streamlit.container", "line_number": 159, "usage_type": "call"}, {"api_name": "streamlit.write", "line_number": 160, "usage_type": "call"}, {"api_name": "streamlit.plotly_chart", "line_number": 161, "usage_type": "call"}]}
+{"seq_id": "13602740163", "text": "#!/usr/bin/env python\n# coding: utf-8\n\nimport argparse\nimport datetime\nimport logging\nimport os\nimport pickle\nimport signal\nimport warnings\n\nimport logzero\nimport numpy as np\nimport torch\nimport torch.nn as nn\nimport torch.backends.cudnn as cudnn\nimport torch.utils.data.distributed\n\nfrom multiprocessing import cpu_count\nfrom PIL import Image, ImageFile\nfrom sklearn.metrics import classification_report\nfrom torchvision import transforms\nfrom tqdm import tqdm\nfrom logzero import logger\nfrom torch.nn import functional as F\n\nfrom util.dataloader import ImageFolderWithPaths, CSVDataset\nfrom util.functions import accuracy, load_checkpoint, load_model_from_checkpoint, Metric, CustomTenCrop, CustomTwentyCrop, CustomSixCrop, CustomSevenCrop\n\nwarnings.filterwarnings(\"ignore\", \"(Possibly )?corrupt EXIF data\", UserWarning)\nsignal.signal(signal.SIGINT, signal.default_int_handler)\nImageFile.LOAD_TRUNCATED_IMAGES = True\n\nparser = argparse.ArgumentParser(description='test')\nparser.add_argument('test', metavar='valid_csv', help='path to test dataset list')\nparser.add_argument('--prefix', default='auto',\n help=\"prefix of model and logs (default: auto)\")\nparser.add_argument('--log-dir', default='logs',\n help='log directory (default: logs)')\nparser.add_argument('--model', '-m', type=str,\n help='model file to test')\nparser.add_argument('-j', '--workers', type=int, default=None,\n help='number of data loading workers (default: 80%% of the number of cores)')\n\nparser.add_argument('-b', '--batch-size', type=int, default=128, help='the batch size')\nparser.add_argument('--topk', type=int, default=3,\n help='report the top-k accuracy (default: 3)')\nparser.add_argument('--print-cr', action='store_true', default=False,\n help='print classification report (default: False)')\nparser.add_argument('--onehot', action='store_true', default=False,\n help='use onehot label (default: False)')\n\n\n# Test Time Augmentation\nparser.add_argument('--tta', action='store_true', default=False,\n help='test time augmentation (use FiveCrop)')\nparser.add_argument('--tta-ten-crop', action='store_true', default=False,\n help='test time augmentation (use TenCrop)')\nparser.add_argument('--tta-custom-six-crop', action='store_true', default=False,\n help='test time augmentation (use CustomSixCrop)')\nparser.add_argument('--tta-custom-seven-crop', action='store_true', default=False,\n help='test time augmentation (use CustomSevenCrop)')\nparser.add_argument('--tta-custom-ten-crop', action='store_true', default=False,\n help='test time augmentation (use CustomTenCrop)')\nparser.add_argument('--tta-custom-twenty-crop', action='store_true', default=False,\n help='test time augmentation (use CustomTwentyCrop)')\n\n# data preprocess\nparser.add_argument('--scale-size', type=int, default=None,\n help='scale size (default: auto)')\nparser.add_argument('--input-size', type=int, default=None,\n help='input size (default: auto)')\nparser.add_argument('--rgb-mean', type=str, default=None,\n help='RGB mean (default: auto)')\nparser.add_argument('--rgb-std', type=str, default=None,\n help='RGB std (default: auto)')\nparser.add_argument('--interpolation', type=str, default=None,\n choices=[None, 'BILINEAR', 'BICUBIC', 'NEAREST'],\n help='interpolation. (default: auto)')\nparser.add_argument('--grayscale', action='store_true', default=False,\n help='change input channel from 3 to 1.')\n\n# misc\nparser.add_argument('--no-cuda', action='store_true', default=False,\n help='disables CUDA training')\nparser.add_argument('--seed', default=None, type=int,\n help='seed for initializing training. ')\n\n\ndef main():\n global args\n args = parser.parse_args()\n args.cuda = not args.no_cuda and torch.cuda.is_available()\n\n if args.prefix == 'auto':\n args.prefix = datetime.datetime.now().strftime('%Y%m%d%H%M%S')\n\n formatter = logging.Formatter('%(message)s')\n logzero.formatter(formatter)\n\n if not os.path.exists(args.log_dir):\n os.makedirs(args.log_dir, exist_ok=True)\n\n log_filename = \"{}-test.log\".format(args.prefix)\n log_filepath = os.path.join(args.log_dir, log_filename)\n logzero.logfile(log_filepath)\n\n if args.workers is None:\n args.workers = max(1, int(0.8 * cpu_count()))\n elif args.workers == -1:\n args.workers = cpu_count()\n\n cudnn.benchmark = True\n\n logger.info('Running script with args: {}'.format(str(args)))\n\n checkpoint = load_checkpoint(args, args.model)\n logger.info(\"=> loaded the model (epoch {})\".format(checkpoint['epoch']))\n model_arch = checkpoint['arch']\n model_args = checkpoint['args']\n\n if model_arch.startswith('efficientnet-b4'):\n scale_size = 200\n input_size = 190\n else:\n scale_size = 120\n input_size = 112\n\n if args.scale_size:\n scale_size = args.scale_size\n else:\n args.scale_size = scale_size\n if args.input_size:\n input_size = args.input_size\n else:\n args.input_size = input_size\n\n if args.rgb_mean:\n rgb_mean = args.rgb_mean\n rgb_mean = [float(mean) for mean in rgb_mean.split(',')]\n else:\n rgb_mean = model_args.rgb_mean\n\n if args.rgb_std:\n rgb_std = args.rgb_std\n rgb_std = [float(std) for std in rgb_std.split(',')]\n else:\n rgb_std = model_args.rgb_std\n\n if args.interpolation:\n interpolation = args.interpolation\n else:\n try:\n interpolation = model_args.interpolation\n except AttributeError:\n interpolation = 'BICUBIC'\n\n logger.info(\"scale_size: {} input_size: {}\".format(scale_size, input_size))\n logger.info(\"rgb_mean: {}\".format(rgb_mean))\n logger.info(\"rgb_std: {}\".format(rgb_std))\n logger.info(\"interpolation: {}\".format(interpolation))\n\n interpolation = getattr(Image, interpolation, 3)\n\n try:\n args.grayscale = model_args.grayscale\n except:\n pass\n\n # Data augmentation and normalization for test\n if args.grayscale:\n if len(rgb_mean) == 1:\n gray_mean = rgb_mean\n gray_std = rgb_std\n else:\n # gray_mean = [0.5,]\n # gray_std = [0.5,]\n gray_mean = (rgb_mean[0], )\n gray_std = (rgb_std[0], )\n\n data_transforms = {\n 'test': transforms.Compose([\n transforms.Grayscale(),\n transforms.Resize((scale_size, scale_size), interpolation=interpolation),\n transforms.CenterCrop(input_size),\n transforms.ToTensor(),\n transforms.Normalize(gray_mean, gray_std),\n ]),\n 'test_FiveCrop': transforms.Compose([\n transforms.Grayscale(),\n transforms.Resize((scale_size, scale_size), interpolation=interpolation),\n transforms.FiveCrop(input_size),\n transforms.Lambda(lambda crops: torch.stack(\n [transforms.ToTensor()(crop) for crop in crops])),\n transforms.Lambda(lambda crops: torch.stack(\n [transforms.Normalize(gray_mean, gray_std)(crop) for crop in crops]))\n ]),\n 'test_TenCrop': transforms.Compose([\n transforms.Grayscale(),\n transforms.Resize((scale_size, scale_size), interpolation=interpolation),\n transforms.TenCrop(input_size),\n transforms.Lambda(lambda crops: torch.stack(\n [transforms.ToTensor()(crop) for crop in crops])),\n transforms.Lambda(lambda crops: torch.stack(\n [transforms.Normalize(gray_mean, gray_std)(crop) for crop in crops]))\n ]),\n 'test_CustomSixCrop': transforms.Compose([\n transforms.Grayscale(),\n transforms.Resize((scale_size, scale_size), interpolation=interpolation),\n CustomSixCrop(input_size),\n transforms.Lambda(lambda crops: torch.stack(\n [transforms.ToTensor()(crop) for crop in crops])),\n transforms.Lambda(lambda crops: torch.stack(\n [transforms.Normalize(gray_mean, gray_std)(crop) for crop in crops]))\n ]),\n 'test_CustomSevenCrop': transforms.Compose([\n transforms.Grayscale(),\n transforms.Resize((scale_size, scale_size), interpolation=interpolation),\n CustomSevenCrop(input_size),\n transforms.Lambda(lambda crops: torch.stack(\n [transforms.ToTensor()(crop) for crop in crops])),\n transforms.Lambda(lambda crops: torch.stack(\n [transforms.Normalize(gray_mean, gray_std)(crop) for crop in crops]))\n ]),\n 'test_CustomTenCrop': transforms.Compose([\n transforms.Grayscale(),\n transforms.Resize((scale_size, scale_size), interpolation=interpolation),\n CustomTenCrop(input_size),\n transforms.Lambda(lambda crops: torch.stack(\n [transforms.ToTensor()(crop) for crop in crops])),\n transforms.Lambda(lambda crops: torch.stack(\n [transforms.Normalize(gray_mean, gray_std)(crop) for crop in crops]))\n ]),\n 'test_CustomTwentyCrop': transforms.Compose([\n transforms.Grayscale(),\n transforms.Resize((scale_size, scale_size), interpolation=interpolation),\n CustomTwentyCrop(input_size),\n transforms.Lambda(lambda crops: torch.stack(\n [transforms.ToTensor()(crop) for crop in crops])),\n transforms.Lambda(lambda crops: torch.stack(\n [transforms.Normalize(gray_mean, gray_std)(crop) for crop in crops]))\n ])\n }\n else:\n data_transforms = {\n 'test': transforms.Compose([\n transforms.Resize((scale_size, scale_size), interpolation=interpolation),\n transforms.CenterCrop(input_size),\n transforms.ToTensor(),\n transforms.Normalize(rgb_mean, rgb_std)\n ]),\n 'test_FiveCrop': transforms.Compose([\n transforms.Resize((scale_size, scale_size), interpolation=interpolation),\n transforms.FiveCrop(input_size),\n transforms.Lambda(lambda crops: torch.stack(\n [transforms.ToTensor()(crop) for crop in crops])),\n transforms.Lambda(lambda crops: torch.stack(\n [transforms.Normalize(rgb_mean, rgb_std)(crop) for crop in crops]))\n ]),\n 'test_TenCrop': transforms.Compose([\n transforms.Resize((scale_size, scale_size), interpolation=interpolation),\n transforms.TenCrop(input_size),\n transforms.Lambda(lambda crops: torch.stack(\n [transforms.ToTensor()(crop) for crop in crops])),\n transforms.Lambda(lambda crops: torch.stack(\n [transforms.Normalize(rgb_mean, rgb_std)(crop) for crop in crops]))\n ]),\n 'test_CustomSixCrop': transforms.Compose([\n transforms.Resize((scale_size, scale_size), interpolation=interpolation),\n CustomSixCrop(input_size),\n transforms.Lambda(lambda crops: torch.stack(\n [transforms.ToTensor()(crop) for crop in crops])),\n transforms.Lambda(lambda crops: torch.stack(\n [transforms.Normalize(rgb_mean, rgb_std)(crop) for crop in crops]))\n ]),\n 'test_CustomSevenCrop': transforms.Compose([\n transforms.Resize((scale_size, scale_size), interpolation=interpolation),\n CustomSevenCrop(input_size),\n transforms.Lambda(lambda crops: torch.stack(\n [transforms.ToTensor()(crop) for crop in crops])),\n transforms.Lambda(lambda crops: torch.stack(\n [transforms.Normalize(rgb_mean, rgb_std)(crop) for crop in crops]))\n ]),\n 'test_CustomTenCrop': transforms.Compose([\n transforms.Resize((scale_size, scale_size), interpolation=interpolation),\n CustomTenCrop(input_size),\n transforms.Lambda(lambda crops: torch.stack(\n [transforms.ToTensor()(crop) for crop in crops])),\n transforms.Lambda(lambda crops: torch.stack(\n [transforms.Normalize(rgb_mean, rgb_std)(crop) for crop in crops]))\n ]),\n 'test_CustomTwentyCrop': transforms.Compose([\n transforms.Resize((scale_size, scale_size), interpolation=interpolation),\n CustomTwentyCrop(input_size),\n transforms.Lambda(lambda crops: torch.stack(\n [transforms.ToTensor()(crop) for crop in crops])),\n transforms.Lambda(lambda crops: torch.stack(\n [transforms.Normalize(rgb_mean, rgb_std)(crop) for crop in crops]))\n ])\n }\n\n tfms = 'test'\n if args.tta:\n tfms = 'test_FiveCrop'\n batch_size = args.batch_size // 5\n elif args.tta_ten_crop:\n tfms = 'test_TenCrop'\n batch_size = args.batch_size // 10\n elif args.tta_custom_six_crop:\n tfms = 'test_CustomSixCrop'\n batch_size = args.batch_size // 6\n elif args.tta_custom_seven_crop:\n tfms = 'test_CustomSevenCrop'\n batch_size = args.batch_size // 7\n elif args.tta_custom_ten_crop:\n tfms = 'test_CustomTenCrop'\n batch_size = args.batch_size // 10\n elif args.tta_custom_twenty_crop:\n tfms = 'test_CustomTwentyCrop'\n batch_size = args.batch_size // 20\n else:\n batch_size = args.batch_size\n\n\n image_datasets = {\n 'test': CSVDataset(args.test, data_transforms[tfms], onehot=args.onehot)\n }\n\n test_num_classes = len(image_datasets['test'].classes)\n test_class_names = image_datasets['test'].classes\n\n kwargs = {'num_workers': args.workers, 'pin_memory': True} if args.cuda else {}\n test_loader = torch.utils.data.DataLoader(\n image_datasets['test'], batch_size=batch_size, shuffle=False, **kwargs)\n\n logger.info(\"number of test dataset: {}\".format(len(test_loader.dataset)))\n logger.info(\"number of classes: {}\".format(len(test_class_names)))\n\n model, metric_fc, criterion_state_dict, num_classes, class_names = load_model_from_checkpoint(args, checkpoint, test_num_classes, test_class_names, grayscale=args.grayscale)\n\n if args.topk > num_classes:\n logger.warn('--topk must be less than or equal to the class number of the model')\n args.topk = num_classes\n logger.warn('--topk set to {}'.format(num_classes))\n\n # check test and train class names\n do_report = True\n if test_num_classes != num_classes:\n logger.info(\"The number of classes for train and test is different.\")\n logger.info(\"Skip accuracy report.\")\n do_report = False\n\n test(args, model_arch, model, metric_fc, test_loader, class_names, do_report, logger)\n\n logger.info(\"=> Saved test log to \\\"{}\\\"\".format(log_filepath))\n\n\ndef test(args, model_arch, model, metric_fc, test_loader, class_names, do_report, logger):\n model.module.eval()\n if metric_fc:\n metric_fc.module.eval()\n test_accuracy = Metric('test_accuracy')\n test_loss = Metric('test_loss')\n\n\n pred = []\n Y = []\n correct_num = 0\n\n filepath = '{}-test-results.log'.format(args.prefix)\n savepath = os.path.join(args.log_dir, filepath)\n f = open(savepath, 'w')\n\n softmax = torch.nn.Softmax(dim=1)\n criterion = nn.CrossEntropyLoss()\n\n with tqdm(total=len(test_loader), desc='Test') as t:\n with torch.no_grad():\n for (data, target, paths) in test_loader:\n if args.cuda:\n data = data.cuda(non_blocking=True)\n target = target.cuda(non_blocking=True)\n\n if args.tta or args.tta_ten_crop or \\\n args.tta_custom_ten_crop or args.tta_custom_twenty_crop or \\\n args.tta_custom_six_crop or args.tta_custom_seven_crop:\n bs, ncrops, c, h, w = data.size()\n if metric_fc:\n feature = model(data.view(-1, c, h, w))\n output = metric_fc(feature.reshape(feature.shape[:-2]))\n else:\n output = model(data.view(-1, c, h, w))\n output = output.view(bs, ncrops, -1).mean(1)\n else:\n if metric_fc:\n feature = model(data)\n output = metric_fc(feature.reshape(feature.shape[:-2]))\n else:\n output = model(data)\n\n if do_report:\n pred += [int(l.argmax()) for l in output]\n Y += [int(l) for l in target]\n\n for path, y, preds in zip(paths, target, softmax(output)):\n probabilities, labels = preds.topk(args.topk)\n preds_text = ''\n for i in range(args.topk):\n preds_text += \" {} {}\".format(labels[i], probabilities[i])\n f.write(\"{} {}{}\\n\".format(path, int(y), preds_text))\n\n if str(y.item()) == str(labels[0].item()):\n correct_num += 1\n\n if do_report:\n test_accuracy.update(accuracy(output, target))\n test_loss.update(criterion(output, target))\n t.set_postfix({'loss': test_loss.avg.item(),\n 'accuracy': 100. * test_accuracy.avg.item()})\n t.update(1)\n\n f.close()\n logger.info(\"=> Saved test results to \\\"{}\\\"\".format(savepath))\n\n if do_report:\n\n cr_filepath = '{}-test-classification_report.log'.format(args.prefix)\n cr_savepath = os.path.join(args.log_dir, cr_filepath)\n\n cr = classification_report(Y, pred, target_names=class_names)\n if args.print_cr:\n print(cr)\n with open(cr_savepath, 'w') as crf:\n crf.write(cr)\n logger.info(\"=> Saved classification report to \\\"{}\\\"\".format(cr_savepath))\n\n logger.info(\"model: {}\".format(args.model))\n logger.info(\"Test-loss: {}\".format(test_loss.avg))\n logger.info(\"Test-accuracy: {} ({}/{})\".format((correct_num / len(test_loader.dataset)), correct_num, len(test_loader.dataset)))\n\n\n\nif __name__ == '__main__':\n main()\n", "repo_name": "knjcode/kaggle-kuzushiji-recognition-2019", "sub_path": "test.py", "file_name": "test.py", "file_ext": "py", "file_size_in_byte": 18920, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 28, "dataset": "github-code", "pt": "76", "api": [{"api_name": "warnings.filterwarnings", "line_number": 30, "usage_type": "call"}, {"api_name": "signal.signal", "line_number": 31, "usage_type": "call"}, {"api_name": "signal.SIGINT", "line_number": 31, "usage_type": "attribute"}, {"api_name": "signal.default_int_handler", "line_number": 31, "usage_type": "attribute"}, {"api_name": "PIL.ImageFile.LOAD_TRUNCATED_IMAGES", "line_number": 32, "usage_type": "attribute"}, {"api_name": "PIL.ImageFile", "line_number": 32, "usage_type": "name"}, {"api_name": "argparse.ArgumentParser", "line_number": 34, "usage_type": "call"}, {"api_name": "torch.cuda.is_available", "line_number": 93, "usage_type": "call"}, {"api_name": "torch.cuda", "line_number": 93, "usage_type": "attribute"}, {"api_name": "datetime.datetime.now", "line_number": 96, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 96, "usage_type": "attribute"}, {"api_name": "logging.Formatter", "line_number": 98, "usage_type": "call"}, {"api_name": "logzero.formatter", "line_number": 99, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 101, "usage_type": "call"}, {"api_name": "os.path", "line_number": 101, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 102, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 105, "usage_type": "call"}, {"api_name": "os.path", "line_number": 105, "usage_type": "attribute"}, {"api_name": "logzero.logfile", "line_number": 106, "usage_type": "call"}, {"api_name": "multiprocessing.cpu_count", "line_number": 109, "usage_type": "call"}, {"api_name": "multiprocessing.cpu_count", "line_number": 111, "usage_type": "call"}, {"api_name": "torch.backends.cudnn.benchmark", "line_number": 113, "usage_type": "attribute"}, {"api_name": "torch.backends.cudnn", "line_number": 113, "usage_type": "name"}, {"api_name": "logzero.logger.info", "line_number": 115, "usage_type": "call"}, {"api_name": "logzero.logger", "line_number": 115, "usage_type": "name"}, {"api_name": "util.functions.load_checkpoint", "line_number": 117, "usage_type": "call"}, {"api_name": "logzero.logger.info", "line_number": 118, "usage_type": "call"}, {"api_name": "logzero.logger", "line_number": 118, "usage_type": "name"}, {"api_name": "logzero.logger.info", "line_number": 158, "usage_type": "call"}, {"api_name": "logzero.logger", "line_number": 158, "usage_type": "name"}, {"api_name": "logzero.logger.info", "line_number": 159, "usage_type": "call"}, {"api_name": "logzero.logger", "line_number": 159, "usage_type": "name"}, {"api_name": "logzero.logger.info", "line_number": 160, "usage_type": "call"}, {"api_name": "logzero.logger", "line_number": 160, "usage_type": "name"}, {"api_name": "logzero.logger.info", "line_number": 161, "usage_type": "call"}, {"api_name": "logzero.logger", "line_number": 161, "usage_type": "name"}, {"api_name": "PIL.Image", "line_number": 163, "usage_type": "argument"}, {"api_name": "torchvision.transforms.Compose", "line_number": 182, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 182, "usage_type": "name"}, {"api_name": "torchvision.transforms.Grayscale", "line_number": 183, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 183, "usage_type": "name"}, {"api_name": "torchvision.transforms.Resize", "line_number": 184, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 184, "usage_type": "name"}, {"api_name": "torchvision.transforms.CenterCrop", "line_number": 185, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 185, "usage_type": "name"}, {"api_name": "torchvision.transforms.ToTensor", "line_number": 186, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 186, "usage_type": "name"}, {"api_name": "torchvision.transforms.Normalize", "line_number": 187, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 187, "usage_type": "name"}, {"api_name": "torchvision.transforms.Compose", "line_number": 189, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 189, "usage_type": "name"}, {"api_name": "torchvision.transforms.Grayscale", "line_number": 190, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 190, "usage_type": "name"}, {"api_name": "torchvision.transforms.Resize", "line_number": 191, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 191, "usage_type": "name"}, {"api_name": "torchvision.transforms.FiveCrop", "line_number": 192, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 192, "usage_type": "name"}, {"api_name": "torchvision.transforms.Lambda", "line_number": 193, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 193, "usage_type": "name"}, {"api_name": "torch.stack", "line_number": 193, "usage_type": "call"}, {"api_name": "torchvision.transforms.ToTensor", "line_number": 194, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 194, "usage_type": "name"}, {"api_name": "torchvision.transforms.Lambda", "line_number": 195, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 195, "usage_type": "name"}, {"api_name": "torch.stack", "line_number": 195, "usage_type": "call"}, {"api_name": "torchvision.transforms.Normalize", "line_number": 196, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 196, "usage_type": "name"}, {"api_name": "torchvision.transforms.Compose", "line_number": 198, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 198, "usage_type": "name"}, {"api_name": "torchvision.transforms.Grayscale", "line_number": 199, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 199, "usage_type": "name"}, {"api_name": "torchvision.transforms.Resize", "line_number": 200, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 200, "usage_type": "name"}, {"api_name": "torchvision.transforms.TenCrop", "line_number": 201, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 201, "usage_type": "name"}, {"api_name": "torchvision.transforms.Lambda", "line_number": 202, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 202, "usage_type": "name"}, {"api_name": "torch.stack", "line_number": 202, "usage_type": "call"}, {"api_name": "torchvision.transforms.ToTensor", "line_number": 203, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 203, "usage_type": "name"}, {"api_name": "torchvision.transforms.Lambda", "line_number": 204, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 204, "usage_type": "name"}, {"api_name": "torch.stack", "line_number": 204, "usage_type": "call"}, {"api_name": "torchvision.transforms.Normalize", "line_number": 205, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 205, "usage_type": "name"}, {"api_name": "torchvision.transforms.Compose", "line_number": 207, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 207, "usage_type": "name"}, {"api_name": "torchvision.transforms.Grayscale", "line_number": 208, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 208, "usage_type": "name"}, {"api_name": "torchvision.transforms.Resize", "line_number": 209, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 209, "usage_type": "name"}, {"api_name": "util.functions.CustomSixCrop", "line_number": 210, "usage_type": "call"}, {"api_name": "torchvision.transforms.Lambda", "line_number": 211, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 211, "usage_type": "name"}, {"api_name": "torch.stack", "line_number": 211, "usage_type": "call"}, {"api_name": "torchvision.transforms.ToTensor", "line_number": 212, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 212, "usage_type": "name"}, {"api_name": "torchvision.transforms.Lambda", "line_number": 213, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 213, "usage_type": "name"}, {"api_name": "torch.stack", "line_number": 213, "usage_type": "call"}, {"api_name": "torchvision.transforms.Normalize", "line_number": 214, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 214, "usage_type": "name"}, {"api_name": "torchvision.transforms.Compose", "line_number": 216, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 216, "usage_type": "name"}, {"api_name": "torchvision.transforms.Grayscale", "line_number": 217, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 217, "usage_type": "name"}, {"api_name": "torchvision.transforms.Resize", "line_number": 218, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 218, "usage_type": "name"}, {"api_name": "util.functions.CustomSevenCrop", "line_number": 219, "usage_type": "call"}, {"api_name": "torchvision.transforms.Lambda", "line_number": 220, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 220, "usage_type": "name"}, {"api_name": "torch.stack", "line_number": 220, "usage_type": "call"}, {"api_name": "torchvision.transforms.ToTensor", "line_number": 221, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 221, "usage_type": "name"}, {"api_name": "torchvision.transforms.Lambda", "line_number": 222, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 222, "usage_type": "name"}, {"api_name": "torch.stack", "line_number": 222, "usage_type": "call"}, {"api_name": "torchvision.transforms.Normalize", "line_number": 223, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 223, "usage_type": "name"}, {"api_name": "torchvision.transforms.Compose", "line_number": 225, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 225, "usage_type": "name"}, {"api_name": "torchvision.transforms.Grayscale", "line_number": 226, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 226, "usage_type": "name"}, {"api_name": "torchvision.transforms.Resize", "line_number": 227, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 227, "usage_type": "name"}, {"api_name": "util.functions.CustomTenCrop", "line_number": 228, "usage_type": "call"}, {"api_name": "torchvision.transforms.Lambda", "line_number": 229, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 229, "usage_type": "name"}, {"api_name": "torch.stack", "line_number": 229, "usage_type": "call"}, {"api_name": "torchvision.transforms.ToTensor", "line_number": 230, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 230, "usage_type": "name"}, {"api_name": "torchvision.transforms.Lambda", "line_number": 231, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 231, "usage_type": "name"}, {"api_name": "torch.stack", "line_number": 231, "usage_type": "call"}, {"api_name": "torchvision.transforms.Normalize", "line_number": 232, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 232, "usage_type": "name"}, {"api_name": "torchvision.transforms.Compose", "line_number": 234, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 234, "usage_type": "name"}, {"api_name": "torchvision.transforms.Grayscale", "line_number": 235, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 235, "usage_type": "name"}, {"api_name": "torchvision.transforms.Resize", "line_number": 236, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 236, "usage_type": "name"}, {"api_name": "util.functions.CustomTwentyCrop", "line_number": 237, "usage_type": "call"}, {"api_name": "torchvision.transforms.Lambda", "line_number": 238, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 238, "usage_type": "name"}, {"api_name": "torch.stack", "line_number": 238, "usage_type": "call"}, {"api_name": "torchvision.transforms.ToTensor", "line_number": 239, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 239, "usage_type": "name"}, {"api_name": "torchvision.transforms.Lambda", "line_number": 240, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 240, "usage_type": "name"}, {"api_name": "torch.stack", "line_number": 240, "usage_type": "call"}, {"api_name": "torchvision.transforms.Normalize", "line_number": 241, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 241, "usage_type": "name"}, {"api_name": "torchvision.transforms.Compose", "line_number": 246, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 246, "usage_type": "name"}, {"api_name": "torchvision.transforms.Resize", "line_number": 247, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 247, "usage_type": "name"}, {"api_name": "torchvision.transforms.CenterCrop", "line_number": 248, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 248, "usage_type": "name"}, {"api_name": "torchvision.transforms.ToTensor", "line_number": 249, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 249, "usage_type": "name"}, {"api_name": "torchvision.transforms.Normalize", "line_number": 250, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 250, "usage_type": "name"}, {"api_name": "torchvision.transforms.Compose", "line_number": 252, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 252, "usage_type": "name"}, {"api_name": "torchvision.transforms.Resize", "line_number": 253, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 253, "usage_type": "name"}, {"api_name": "torchvision.transforms.FiveCrop", "line_number": 254, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 254, "usage_type": "name"}, {"api_name": "torchvision.transforms.Lambda", "line_number": 255, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 255, "usage_type": "name"}, {"api_name": "torch.stack", "line_number": 255, "usage_type": "call"}, {"api_name": "torchvision.transforms.ToTensor", "line_number": 256, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 256, "usage_type": "name"}, {"api_name": "torchvision.transforms.Lambda", "line_number": 257, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 257, "usage_type": "name"}, {"api_name": "torch.stack", "line_number": 257, "usage_type": "call"}, {"api_name": "torchvision.transforms.Normalize", "line_number": 258, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 258, "usage_type": "name"}, {"api_name": "torchvision.transforms.Compose", "line_number": 260, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 260, "usage_type": "name"}, {"api_name": "torchvision.transforms.Resize", "line_number": 261, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 261, "usage_type": "name"}, {"api_name": "torchvision.transforms.TenCrop", "line_number": 262, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 262, "usage_type": "name"}, {"api_name": "torchvision.transforms.Lambda", "line_number": 263, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 263, "usage_type": "name"}, {"api_name": "torch.stack", "line_number": 263, "usage_type": "call"}, {"api_name": "torchvision.transforms.ToTensor", "line_number": 264, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 264, "usage_type": "name"}, {"api_name": "torchvision.transforms.Lambda", "line_number": 265, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 265, "usage_type": "name"}, {"api_name": "torch.stack", "line_number": 265, "usage_type": "call"}, {"api_name": "torchvision.transforms.Normalize", "line_number": 266, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 266, "usage_type": "name"}, {"api_name": "torchvision.transforms.Compose", "line_number": 268, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 268, "usage_type": "name"}, {"api_name": "torchvision.transforms.Resize", "line_number": 269, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 269, "usage_type": "name"}, {"api_name": "util.functions.CustomSixCrop", "line_number": 270, "usage_type": "call"}, {"api_name": "torchvision.transforms.Lambda", "line_number": 271, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 271, "usage_type": "name"}, {"api_name": "torch.stack", "line_number": 271, "usage_type": "call"}, {"api_name": "torchvision.transforms.ToTensor", "line_number": 272, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 272, "usage_type": "name"}, {"api_name": "torchvision.transforms.Lambda", "line_number": 273, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 273, "usage_type": "name"}, {"api_name": "torch.stack", "line_number": 273, "usage_type": "call"}, {"api_name": "torchvision.transforms.Normalize", "line_number": 274, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 274, "usage_type": "name"}, {"api_name": "torchvision.transforms.Compose", "line_number": 276, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 276, "usage_type": "name"}, {"api_name": "torchvision.transforms.Resize", "line_number": 277, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 277, "usage_type": "name"}, {"api_name": "util.functions.CustomSevenCrop", "line_number": 278, "usage_type": "call"}, {"api_name": "torchvision.transforms.Lambda", "line_number": 279, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 279, "usage_type": "name"}, {"api_name": "torch.stack", "line_number": 279, "usage_type": "call"}, {"api_name": "torchvision.transforms.ToTensor", "line_number": 280, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 280, "usage_type": "name"}, {"api_name": "torchvision.transforms.Lambda", "line_number": 281, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 281, "usage_type": "name"}, {"api_name": "torch.stack", "line_number": 281, "usage_type": "call"}, {"api_name": "torchvision.transforms.Normalize", "line_number": 282, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 282, "usage_type": "name"}, {"api_name": "torchvision.transforms.Compose", "line_number": 284, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 284, "usage_type": "name"}, {"api_name": "torchvision.transforms.Resize", "line_number": 285, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 285, "usage_type": "name"}, {"api_name": "util.functions.CustomTenCrop", "line_number": 286, "usage_type": "call"}, {"api_name": "torchvision.transforms.Lambda", "line_number": 287, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 287, "usage_type": "name"}, {"api_name": "torch.stack", "line_number": 287, "usage_type": "call"}, {"api_name": "torchvision.transforms.ToTensor", "line_number": 288, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 288, "usage_type": "name"}, {"api_name": "torchvision.transforms.Lambda", "line_number": 289, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 289, "usage_type": "name"}, {"api_name": "torch.stack", "line_number": 289, "usage_type": "call"}, {"api_name": "torchvision.transforms.Normalize", "line_number": 290, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 290, "usage_type": "name"}, {"api_name": "torchvision.transforms.Compose", "line_number": 292, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 292, "usage_type": "name"}, {"api_name": "torchvision.transforms.Resize", "line_number": 293, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 293, "usage_type": "name"}, {"api_name": "util.functions.CustomTwentyCrop", "line_number": 294, "usage_type": "call"}, {"api_name": "torchvision.transforms.Lambda", "line_number": 295, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 295, "usage_type": "name"}, {"api_name": "torch.stack", "line_number": 295, "usage_type": "call"}, {"api_name": "torchvision.transforms.ToTensor", "line_number": 296, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 296, "usage_type": "name"}, {"api_name": "torchvision.transforms.Lambda", "line_number": 297, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 297, "usage_type": "name"}, {"api_name": "torch.stack", "line_number": 297, "usage_type": "call"}, {"api_name": "torchvision.transforms.Normalize", "line_number": 298, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 298, "usage_type": "name"}, {"api_name": "util.dataloader.CSVDataset", "line_number": 326, "usage_type": "call"}, {"api_name": "torch.utils.data.DataLoader", "line_number": 333, "usage_type": "call"}, {"api_name": "torch.utils", "line_number": 333, "usage_type": "attribute"}, {"api_name": "logzero.logger.info", "line_number": 336, "usage_type": "call"}, {"api_name": "logzero.logger", "line_number": 336, "usage_type": "name"}, {"api_name": "logzero.logger.info", "line_number": 337, "usage_type": "call"}, {"api_name": "logzero.logger", "line_number": 337, "usage_type": "name"}, {"api_name": "util.functions.load_model_from_checkpoint", "line_number": 339, "usage_type": "call"}, {"api_name": "logzero.logger.warn", "line_number": 342, "usage_type": "call"}, {"api_name": "logzero.logger", "line_number": 342, "usage_type": "name"}, {"api_name": "logzero.logger.warn", "line_number": 344, "usage_type": "call"}, {"api_name": "logzero.logger", "line_number": 344, "usage_type": "name"}, {"api_name": "logzero.logger.info", "line_number": 349, "usage_type": "call"}, {"api_name": "logzero.logger", "line_number": 349, "usage_type": "name"}, {"api_name": "logzero.logger.info", "line_number": 350, "usage_type": "call"}, {"api_name": "logzero.logger", "line_number": 350, "usage_type": "name"}, {"api_name": "logzero.logger", "line_number": 353, "usage_type": "argument"}, {"api_name": "logzero.logger.info", "line_number": 355, "usage_type": "call"}, {"api_name": "logzero.logger", "line_number": 355, "usage_type": "name"}, {"api_name": "util.functions.Metric", "line_number": 362, "usage_type": "call"}, {"api_name": "util.functions.Metric", "line_number": 363, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 371, "usage_type": "call"}, {"api_name": "os.path", "line_number": 371, "usage_type": "attribute"}, {"api_name": "torch.nn.Softmax", "line_number": 374, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 374, "usage_type": "attribute"}, {"api_name": "torch.nn.CrossEntropyLoss", "line_number": 375, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 375, "usage_type": "name"}, {"api_name": "tqdm.tqdm", "line_number": 377, "usage_type": "call"}, {"api_name": "torch.no_grad", "line_number": 378, "usage_type": "call"}, {"api_name": "util.functions.accuracy", "line_number": 416, "usage_type": "call"}, {"api_name": "logzero.logger.info", "line_number": 423, "usage_type": "call"}, {"api_name": "logzero.logger", "line_number": 423, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 428, "usage_type": "call"}, {"api_name": "os.path", "line_number": 428, "usage_type": "attribute"}, {"api_name": "sklearn.metrics.classification_report", "line_number": 430, "usage_type": "call"}, {"api_name": "logzero.logger.info", "line_number": 435, "usage_type": "call"}, {"api_name": "logzero.logger", "line_number": 435, "usage_type": "name"}, {"api_name": "logzero.logger.info", "line_number": 437, "usage_type": "call"}, {"api_name": "logzero.logger", "line_number": 437, "usage_type": "name"}, {"api_name": "logzero.logger.info", "line_number": 438, "usage_type": "call"}, {"api_name": "logzero.logger", "line_number": 438, "usage_type": "name"}, {"api_name": "logzero.logger.info", "line_number": 439, "usage_type": "call"}, {"api_name": "logzero.logger", "line_number": 439, "usage_type": "name"}]}
+{"seq_id": "12970557805", "text": "from django.core.management.base import BaseCommand, CommandError\nfrom django.contrib.auth.models import User\nfrom Atreya.appointments.models import * \nfrom Atreya.appointments.serializers import *\nfrom rest_framework.authtoken.models import Token\nfrom .data.appointment_types import appointment_types\nfrom .data.appointments import appointments\nfrom .data.pre_appointment_questions import pre_appointment_questions\n# from .data.pre_appointment_responses import pre_appointment_responses\n\nclass Command(BaseCommand):\n help = 'create/delete sample data'\n def add_arguments(self, parser):\n parser.add_argument('command', type=str, help='create/delete sample data')\n\n def handle(self, *args, **options):\n if options['command']== 'create':\n try:\n print('Appointment Types')\n for appointment_type in appointment_types:\n serializer = AppointmentTypeSerializer(None, data=appointment_type)\n if serializer.is_valid():\n appointment_type = serializer.save()\n else:\n print(serializer.errors)\n raise CommandError('failure in creating sample data')\n\n print('Pre Appointment Questions')\n for pre_appointment_question in pre_appointment_questions:\n serializer = PreAppointmentQuestionSerializer(None, data=pre_appointment_question)\n if serializer.is_valid():\n pre_appointment_question = serializer.save()\n else:\n print(serializer.errors)\n raise CommandError('failure in creating sample data')\n\n print('Appointments')\n for appointment in appointments:\n for response in appointment.get('pre_appointment_responses',[]):\n # import pdb; pdb.set_trace()\n try:\n response['question'] = PreAppointmentQuestion.objects.get(appointment_type=appointment['appointment_type'],question=response['question']).id\n except PreAppointmentQuestion.DoesNotExist:\n print('\\n\\n\\nHERE\\n\\n\\n')\n # import pdb; pdb.set_trace()\n print(response)\n serializer = AppointmentSerializer(None, data=appointment)\n if serializer.is_valid():\n appointment = serializer.save()\n else:\n print(serializer.errors)\n raise CommandError('failure in creating sample data')\n\n # print('Pre appointment Responses') \n # for pre_appointment_response in pre_appointment_responses:\n # serializer = PreAppointmentResponseSerializer(None, data=pre_appointment_response)\n # if serializer.is_valid():\n # pre_appointment_response = serializer.save()\n # else:\n # print(serializer.errors)\n # raise CommandError('failure in creating sample data')\n\n except Exception as e:\n print(e)\n raise CommandError('failure in creating sample data')\n self.stdout.write(self.style.SUCCESS('Successfully created data'))\n elif options[\"command\"] == \"delete\":\n try:\n PreAppointmentResponse.objects.all().delete()\n Appointment.objects.all().delete()\n PreAppointmentQuestion.objects.all().delete()\n AppointmentType.objects.all().delete()\n\n except Exception as e:\n print(e)\n raise CommandError('failure in deleting sample data')\n\n self.stdout.write(self.style.SUCCESS('Successfully deleted data'))\n else:\n raise CommandError(\"not a valid command\") \n ", "repo_name": "Seva-Solutions/MyClinic_Backend", "sub_path": "Atreya/appointments/management/commands/appointments.py", "file_name": "appointments.py", "file_ext": "py", "file_size_in_byte": 4029, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "76", "api": [{"api_name": "django.core.management.base.BaseCommand", "line_number": 11, "usage_type": "name"}, {"api_name": "data.appointment_types.appointment_types", "line_number": 20, "usage_type": "name"}, {"api_name": "django.core.management.base.CommandError", "line_number": 26, "usage_type": "call"}, {"api_name": "data.pre_appointment_questions.pre_appointment_questions", "line_number": 29, "usage_type": "name"}, {"api_name": "django.core.management.base.CommandError", "line_number": 35, "usage_type": "call"}, {"api_name": "data.appointments.appointments", "line_number": 38, "usage_type": "name"}, {"api_name": "django.core.management.base.CommandError", "line_number": 52, "usage_type": "call"}, {"api_name": "django.core.management.base.CommandError", "line_number": 65, "usage_type": "call"}, {"api_name": "django.core.management.base.CommandError", "line_number": 76, "usage_type": "call"}, {"api_name": "django.core.management.base.CommandError", "line_number": 80, "usage_type": "call"}]}
+{"seq_id": "27638896538", "text": "#!/usr/bin/env python\n# @Date : 2020-07-04\n# @Author : Bright Li (brt2@qq.com)\n# @Link : https://gitee.com/brt2\n# @Version : 0.1.3\n\nimport os\nimport shutil\nfrom pathlib import Path\nimport json\nimport xmlrpc.client\nfrom time import sleep\n\nfrom fmt_md import MarkdownFormatter, format_one_doc\nfrom db_mgr import DocumentsMgr\n\ntry:\n from utils.log import getLogger\nexcept ImportError:\n from logging import getLogger\nlogger = getLogger()\n\n\nTIME_FOR_FREQUENCE_LIMIT = 5\nTESTING = False\nif TESTING:\n print(\"\\n\" + \"#\"*49)\n print(\"注意:当前为模拟上传环境\")\n print(\"#\"*49 + \"\\n\")\n\n\nclass PostidNotUnique(Exception):\n \"\"\" 获取到postid不唯一,可能是存在同名title的文档 \"\"\"\n\nclass CnblogManager:\n def __init__(self, path_cnblog_account):\n self.dict_conf = {\n # \"blog_url\": \"\",\n # \"blog_id\" : \"\",\n # \"app_key\" : \"\",\n # \"user_id\" : \"\",\n # \"username\": \"\",\n # \"password\": \"\",\n # \"repo_dir\": \"\"\n }\n self.load_cnblog_conf(path_cnblog_account)\n self.cnblog_server = xmlrpc.client.ServerProxy(self.dict_conf[\"blog_url\"])\n self.mime = None\n\n self.md_fmt = MarkdownFormatter()\n self.md_fmt.set_ignore_websites([\"cnblogs.com/blog/\" + self.dict_conf[\"user_id\"]])\n\n repo_dir = self.get_repodir()\n assert os.path.isabs(repo_dir), \"[repo_dir]必须为绝对路径\"\n assert repo_dir, \"请先为配置文件指定操作的repo目录...\"\n self.db_mgr = DocumentsMgr(repo_dir)\n\n def get_repodir(self):\n repo_dir = self.dict_conf[\"repo_dir\"]\n if isinstance(repo_dir, dict):\n from platform import system\n repo_dir = repo_dir[system()]\n return repo_dir\n\n def load_cnblog_conf(self, path_conf):\n with open(path_conf, \"r\") as fp:\n dict_conf = json.load(fp)\n for key, value in dict_conf.items():\n self.dict_conf[key] = value\n\n # def load_repo_conf(self, path_conf):\n\n def get_postid(self, path=None, title=None):\n # if path.isdecimal():\n # return path # just the postid\n if path:\n if os.path.abspath(path):\n path = os.path.relpath(path, self.get_repodir())\n return self.db_mgr.get_postid_by_path(path)\n elif title:\n return self.db_mgr.get_postid_by_title(title)\n\n def get_user_info(self):\n \"\"\" return a list of user-info \"\"\"\n user_info = self.cnblog_server.blogger.getUsersBlogs(\n self.dict_conf[\"blog_url\"],\n self.dict_conf[\"username\"],\n self.dict_conf[\"password\"])\n return user_info\n\n def pull_img(self, path_md):\n self.md_fmt.load_file(path_md)\n\n if self.md_fmt.get_images(\"http\"):\n self.md_fmt.download_img()\n\n def _upload_img(self, path_img):\n if TESTING:\n return \"https://img2020.cnblogs.com/blog/2039866/202005/2039866-20200525195318772-1131646535.jpg\"\n\n file_name = os.path.basename(path_img)\n # from download_img_link import format_ext\n # file_name = format_ext(file_name)\n _, suffix = os.path.splitext(file_name)\n\n try:\n type_ = self.mime[suffix]\n except KeyError:\n logger.error(f\"未定义的扩展名类型【{suffix}】,使用默认值'image/jpeg'\")\n type_ = \"image/jpeg\"\n\n with open(path_img, 'rb') as fp:\n file = {\n \"bits\": fp.read(),\n \"name\": file_name,\n \"type\": type_\n }\n url_new = self.cnblog_server.metaWeblog.newMediaObject(\n self.dict_conf[\"blog_id\"],\n self.dict_conf[\"username\"],\n self.dict_conf[\"password\"],\n file)\n return url_new[\"url\"]\n\n def _load_mime(self):\n with open(\"mime.json\", \"r\") as fp:\n self.mime = json.load(fp)\n\n def _new_blog(self, struct_post):\n if TESTING: # 模拟博客上传\n postid = \"12960953\"\n else:\n postid = self.cnblog_server.metaWeblog.newPost(\n self.dict_conf[\"blog_id\"],\n self.dict_conf[\"username\"],\n self.dict_conf[\"password\"],\n struct_post, True)\n print(f\">> 完成blog的上传:【{postid}】\")\n self.db_mgr.add_doc(self.md_fmt, str(postid))\n\n def _repost_blog(self, postid, struct_post):\n \"\"\" 重新发布 \"\"\"\n if TESTING: # 模拟博客上传\n status = True\n else:\n status = self.cnblog_server.metaWeblog.editPost(\n postid,\n self.dict_conf[\"username\"],\n self.dict_conf[\"password\"],\n struct_post, True)\n print(f\">> 完成blog的更新:【{status}】\")\n self.db_mgr.modify_doc(self.md_fmt)\n\n def _is_article(self, path_md):\n abspath_article = os.path.join(self.db_mgr.repo_dir, self.db_mgr.data[\"dir_article\"])\n return path_md.find(abspath_article) >= 0\n\n def _update_categories(self, path_md):\n assert os.path.isabs(path_md)\n assert path_md.find(os.path.abspath(self.db_mgr.repo_dir)) == 0\n\n # 通过相对路径\n def get_categories(key_dirname):\n # path_dir = Path(os.path.dirname(path_md)).as_posix()\n path_parts = Path(os.path.dirname(path_md)).parts # tuple\n assert key_dirname in path_parts, f\"Error: {key_dirname} not in {path_parts}\"\n index = path_parts.index(key_dirname)\n return list(path_parts[index +1:])\n\n # categories = get_categories(article_dirname if self._is_article(path_md) else essay_dirname)\n categories = get_categories(self.db_mgr.data[\"dir_essay\"])\n if self.md_fmt.metadata[\"categories\"] != categories:\n self.md_fmt.metadata[\"categories\"] = categories\n self.md_fmt.update_meta()\n return True\n else:\n return False # 无需更新\n\n def _rebuild_images(self, path_md):\n dir_img = path_md[:-3] # 同名文件夹\n has_dir = os.path.exists(dir_img)\n\n md_parser = self.md_fmt\n\n # 上传图片\n dict_images_relpath = md_parser.get_images(\"local\", force_abspath=False)\n if not has_dir:\n assert not dict_images_relpath, f\"Markdown文档引用的图像未存储在同名文件夹下: {dict_images_relpath}\"\n md_parser.unlock_text()\n return False\n\n # 删除未被引用的(多余)图像\n list_dir = os.listdir(dir_img)\n dict_images_backup = md_parser.get_images(\"backup\", force_abspath=False)\n dict_images_local = {**dict_images_relpath, **dict_images_backup}\n if not dict_images_local:\n md_parser.unlock_text()\n logger.warning(f\"Markdown文档并未引用本地图像,同名dir内容如下: {list_dir}\")\n if input(\"是否清除同名文件夹? [Y/n]: \").lower() != \"n\":\n shutil.rmtree(dir_img)\n logger.warning(f\"已清除未引用文件夹:【{dir_img}】\")\n return False\n\n set_redundant = set(list_dir) - {os.path.basename(i) for i in dict_images_local.values()}\n str_redundant = '\\n'.join(set_redundant)\n if set_redundant and input(f\"\"\"################ 是否删除多余图片文件:\n{str_redundant}\n################ [Y/n]:\"\"\").lower() != \"n\":\n for file in set_redundant:\n os.remove(os.path.join(dir_img, file))\n\n # 将图像链接地址改写为cnblog_link\n dict_images = {}\n dir_md = os.path.dirname(path_md)\n # if dict_images_relpath:\n for line_idx, rel_path in dict_images_relpath.items():\n dict_images[line_idx] = os.path.join(dir_md, rel_path)\n md_parser.process_images(dict_images, self._upload_img)\n\n # 备注原本地图像链接\n text_lines = md_parser.get_text()\n # if dict_images_relpath:\n for line, url_local in dict_images_relpath.items():\n # path_rel = os.path.relpath(url_local, md_parser.file_name)\n md_parser.modify_text(line, f\"{text_lines[line].rstrip()} \")\n return True\n\n def post_blog(self, path_md):\n md_parser = self.md_fmt\n\n if self.mime is None:\n self._load_mime()\n\n # md_parser读取文档,并初步格式化\n format_one_doc(md_parser, path_md)\n # 图片的处理\n self._rebuild_images(path_md)\n # 更新category\n self._update_categories(path_md)\n # 保存修改url的Markdown\n md_parser.overwrite()\n\n # if self._is_article(path_md):\n # # 貌似没有用 ??\n # md_parser.metadata[\"categories\"] = [\"[文章分类]\"] + md_parser.metadata[\"categories\"]\n\n blog_title = self.md_fmt.make_title()\n struct_post = {\n \"title\": blog_title,\n \"categories\": [\"[Markdown]\"] + md_parser.metadata[\"categories\"],\n \"description\": \"\".join(md_parser.get_text()),\n 'mt_keywords': \",\".join(md_parser.metadata[\"tags\"])\n }\n\n postid = self.get_postid(path=self.md_fmt.file_path)\n if postid:\n self._repost_blog(postid, struct_post)\n else:\n while True:\n try:\n self._new_blog(struct_post)\n except xmlrpc.client.Fault as e:\n err_type = str(e).split(':', 1)[0]\n if err_type == \"\n print(f\"cnblog限制了发送频率,请静候{TIME_FOR_FREQUENCE_LIMIT}s\\n程序正在后台运行,请勿退出...\")\n sleep(TIME_FOR_FREQUENCE_LIMIT)\n elif err_type == \"'等类似标签字符?\")\n else:\n raise Exception(f\"未知的上传问题: {e}\")\n else:\n break\n\n def download_blog(self, title_or_postid, ignore_img=True):\n if not ignore_img:\n raise Exception(\"尚未开发,敬请期待\")\n\n postid = title_or_postid if title_or_postid.isdecimal() else self.get_postid(title=title_or_postid)\n if not postid:\n logger.error(f\"本地数据库未存储blog: 【{title_or_postid}】,\\\n但不确定博客园服务器状态。如有必要,请指定postid值,重新查询。\")\n return\n\n dict_data = self.cnblog_server.metaWeblog.getPost(\n postid,\n self.dict_conf[\"username\"],\n self.dict_conf[\"password\"])\n\n dir_download = \"cnblog_bak\"\n if not os.path.exists(dir_download):\n os.makedirs(dir_download)\n path_save = f\"{dir_download}/{postid}.md\"\n with open(path_save, \"w\", encoding=\"utf8\") as fp:\n fp.write(dict_data['description'])\n print(f\">> 已下载blog:【{path_save}】\")\n\n def delete_blog(self, path_file):\n \"\"\" postid: str_id or path_file \"\"\"\n # if not postid.isdecimal():\n postid = self.get_postid(path=path_file)\n\n try:\n self.cnblog_server.blogger.deletePost(\n self.dict_conf[\"app_key\"],\n postid,\n self.dict_conf[\"username\"],\n self.dict_conf[\"password\"],\n True)\n except xmlrpc.client.Fault:\n # logger.error(e) # \n title = self.db_mgr.get_title_by_postid(postid)\n logger.error(f\"Web操作失败,请手动删除博客【{title}】\")\n else:\n print(f\">> 已删除blog:【{postid}】\")\n\n path_rel = self.db_mgr.data[\"postids\"][postid]\n dir_md = path_file[:-3]\n if os.path.exists(dir_md):\n os.rmdir(dir_md)\n self.db_mgr.remove_doc(path_rel)\n\n def move_blog(self, path_from, path_to):\n # 无需cnblog变更\n self.db_mgr.move_doc(path_from, path_to)\n\n def get_recent_post(self, num=9999):\n \"\"\"\n return: [{\n 'dateCreated': ,\n 'description': '...',\n 'title': 'Python数据结构',\n 'categories': ['[随笔分类]33-python', '[随笔分类]3-syntax'],\n 'enclosure': {'length': 0},\n 'link': 'https://www.cnblogs.com/brt2/p/12944353.html',\n 'permalink': 'https://www.cnblogs.com/brt2/p/12944353.html',\n 'postid': '12944353',\n 'source': {},\n 'userid': '-2'\n }, ...]\n \"\"\"\n recent_post = self.cnblog_server.metaWeblog.getRecentPosts(\n self.dict_conf[\"blog_id\"],\n self.dict_conf[\"username\"],\n self.dict_conf[\"password\"],\n num)\n return recent_post\n", "repo_name": "brt2cv/md2blog", "sub_path": "cnblog_mgr.py", "file_name": "cnblog_mgr.py", "file_ext": "py", "file_size_in_byte": 13260, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "76", "api": [{"api_name": "logging.getLogger", "line_number": 21, "usage_type": "call"}, {"api_name": "xmlrpc.client.client.ServerProxy", "line_number": 47, "usage_type": "call"}, {"api_name": "xmlrpc.client.client", "line_number": 47, "usage_type": "attribute"}, {"api_name": "xmlrpc.client", "line_number": 47, "usage_type": "name"}, {"api_name": "fmt_md.MarkdownFormatter", "line_number": 50, "usage_type": "call"}, {"api_name": "os.path.isabs", "line_number": 54, "usage_type": "call"}, {"api_name": "os.path", "line_number": 54, "usage_type": "attribute"}, {"api_name": "db_mgr.DocumentsMgr", "line_number": 56, "usage_type": "call"}, {"api_name": "platform.system", "line_number": 62, "usage_type": "call"}, {"api_name": "json.load", "line_number": 67, "usage_type": "call"}, {"api_name": "os.path.abspath", "line_number": 77, "usage_type": "call"}, {"api_name": "os.path", "line_number": 77, "usage_type": "attribute"}, {"api_name": "os.path.relpath", "line_number": 78, "usage_type": "call"}, {"api_name": "os.path", "line_number": 78, "usage_type": "attribute"}, {"api_name": "os.path.basename", "line_number": 101, "usage_type": "call"}, {"api_name": "os.path", "line_number": 101, "usage_type": "attribute"}, {"api_name": "os.path.splitext", "line_number": 104, "usage_type": "call"}, {"api_name": "os.path", "line_number": 104, "usage_type": "attribute"}, {"api_name": "json.load", "line_number": 127, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 155, "usage_type": "call"}, {"api_name": "os.path", "line_number": 155, "usage_type": "attribute"}, {"api_name": "os.path.isabs", "line_number": 159, "usage_type": "call"}, {"api_name": "os.path", "line_number": 159, "usage_type": "attribute"}, {"api_name": "os.path.abspath", "line_number": 160, "usage_type": "call"}, {"api_name": "os.path", "line_number": 160, "usage_type": "attribute"}, {"api_name": "pathlib.Path", "line_number": 165, "usage_type": "call"}, {"api_name": "os.path.dirname", "line_number": 165, "usage_type": "call"}, {"api_name": "os.path", "line_number": 165, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 181, "usage_type": "call"}, {"api_name": "os.path", "line_number": 181, "usage_type": "attribute"}, {"api_name": "os.listdir", "line_number": 193, "usage_type": "call"}, {"api_name": "shutil.rmtree", "line_number": 200, "usage_type": "call"}, {"api_name": "os.path.basename", "line_number": 204, "usage_type": "call"}, {"api_name": "os.path", "line_number": 204, "usage_type": "attribute"}, {"api_name": "os.remove", "line_number": 210, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 210, "usage_type": "call"}, {"api_name": "os.path", "line_number": 210, "usage_type": "attribute"}, {"api_name": "os.path.dirname", "line_number": 214, "usage_type": "call"}, {"api_name": "os.path", "line_number": 214, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 217, "usage_type": "call"}, {"api_name": "os.path", "line_number": 217, "usage_type": "attribute"}, {"api_name": "fmt_md.format_one_doc", "line_number": 235, "usage_type": "call"}, {"api_name": "xmlrpc.client.client", "line_number": 262, "usage_type": "attribute"}, {"api_name": "xmlrpc.client", "line_number": 262, "usage_type": "name"}, {"api_name": "time.sleep", "line_number": 267, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 291, "usage_type": "call"}, {"api_name": "os.path", "line_number": 291, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 292, "usage_type": "call"}, {"api_name": "xmlrpc.client.client", "line_number": 310, "usage_type": "attribute"}, {"api_name": "xmlrpc.client", "line_number": 310, "usage_type": "name"}, {"api_name": "os.path.exists", "line_number": 319, "usage_type": "call"}, {"api_name": "os.path", "line_number": 319, "usage_type": "attribute"}, {"api_name": "os.rmdir", "line_number": 320, "usage_type": "call"}]}
+{"seq_id": "14779464591", "text": "from dash import Dash, html\nfrom dash.dependencies import Input, Output\n\nimport dash_blueprint as dbp\n\n\napp = Dash(__name__)\n\napp.scripts.config.serve_locally = True\napp.css.config.serve_locally = True\n\napp.layout = html.Div(\n [\n dbp.Menu(\n children=[\n dbp.MenuItem(text=\"Top level\", children=[\n dbp.MenuItem(text=\"Sub Menu 1\", href=\"/sub1\"),\n dbp.MenuItem(text=\"Sub Menu 2\", href=\"/sub2\"),\n ])\n ]\n )\n ]\n)\n\nif __name__ == \"__main__\":\n app.run_server(debug=False)\n", "repo_name": "bsgip/dash-blueprint", "sub_path": "tests/app/menu.py", "file_name": "menu.py", "file_ext": "py", "file_size_in_byte": 576, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "76", "api": [{"api_name": "dash.Dash", "line_number": 7, "usage_type": "call"}, {"api_name": "dash.html.Div", "line_number": 12, "usage_type": "call"}, {"api_name": "dash.html", "line_number": 12, "usage_type": "name"}, {"api_name": "dash_blueprint.Menu", "line_number": 14, "usage_type": "call"}, {"api_name": "dash_blueprint.MenuItem", "line_number": 16, "usage_type": "call"}, {"api_name": "dash_blueprint.MenuItem", "line_number": 17, "usage_type": "call"}, {"api_name": "dash_blueprint.MenuItem", "line_number": 18, "usage_type": "call"}]}
+{"seq_id": "73336527285", "text": "from django.db.models import Q\nfrom django.contrib import admin\nfrom django.contrib.admin.views.main import ChangeList\n\nclass InputFilter(admin.SimpleListFilter):\n template = 'admin/input_filter.html'\n\n def lookups(self, request, model_admin):\n # Dummy, required to show the filter.\n return ((),)\n\n def choices(self, changelist):\n # Grab only the \"all\" option.\n all_choice = next(super().choices(changelist))\n all_choice['query_parts'] = (\n (k, v)\n for k, v in changelist.get_filters_params().items()\n if k != self.parameter_name\n )\n yield all_choice\n\n'''\nclass YearFilter(InputFilter):\n parameter_name = 'year'\n title = 'year' \n def queryset(self, request, queryset):\n term = self.value() \n if term is None:\n return \n any_name = Q()\n for bit in term.split():\n any_name &= (\n Q(vehicle_year__year__icontains=bit)\n ) \n return queryset.filter(any_name)\n'''\nclass YearFilter(InputFilter):\n parameter_name = 'year'\n title = 'year' \n def queryset(self, request, queryset):\n term = self.value() \n if term is None:\n return \n any_name = Q()\n for bit in term.split():\n any_name &= (\n Q(year__icontains=bit)\n ) \n return queryset.filter(any_name).distinct()\n\nclass MakeModelFilter(InputFilter):\n parameter_name = 'makemodel'\n title = 'make and model' \n def queryset(self, request, queryset):\n term = self.value()\n if term is None:\n return \n any_name = Q()\n for bit in term.split():\n any_name &= (\n #Q(vehicle_makemodel__make__icontains=bit) |\n #Q(vehicle_makemodel__vehiclemodel__icontains=bit) |\n Q(vehicle_makemodel__makemodel__icontains=bit)\n ) \n return queryset.filter(any_name).distinct()\n\n'''\nclass MakeFilter(InputFilter):\n parameter_name = 'make'\n title = 'make' \n def queryset(self, request, queryset):\n term = self.value()\n if term is None:\n return \n any_name = Q()\n for bit in term.split():\n any_name &= (\n Q(vehicle_make__make__icontains=bit)\n ) \n return queryset.filter(any_name)\n\nclass VehicleModelFilter(InputFilter):\n parameter_name = 'vehiclemodel'\n title = 'model' \n def queryset(self, request, queryset):\n term = self.value()\n if term is None:\n return \n any_name = Q()\n for bit in term.split():\n any_name &= (\n Q(vehicle_vehiclemodel__vehiclemodel__icontains=bit)\n ) \n return queryset.filter(any_name)\n'''\n\nclass TrimFilter(InputFilter):\n parameter_name = 'trim'\n title = 'trim' \n def queryset(self, request, queryset):\n term = self.value()\n if term is None:\n return \n any_name = Q()\n for bit in term.split():\n any_name &= (\n Q(vehicle_trim__trim__icontains=bit)\n ) \n return queryset.filter(any_name).distinct()\n\nclass EngineSizeFilter(InputFilter):\n parameter_name = 'enginesize'\n title = 'engine size' \n def queryset(self, request, queryset):\n term = self.value() \n if term is None:\n return \n any_name = Q()\n for bit in term.split():\n any_name &= (\n Q(vehicle_enginesize__enginesize__icontains=bit)\n ) \n return queryset.filter(any_name).distinct()\n\nclass EngineCodeFilter(InputFilter):\n parameter_name = 'enginecode'\n title = 'engine code' \n def queryset(self, request, queryset):\n term = self.value() \n if term is None:\n return \n any_name = Q()\n for bit in term.split():\n any_name &= (\n Q(vehicle_enginecode__enginecode__icontains=bit)\n ) \n return queryset.filter(any_name).distinct()", "repo_name": "reinali07/autoshop-manager", "sub_path": "vehicles_db/filters.py", "file_name": "filters.py", "file_ext": "py", "file_size_in_byte": 4216, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "76", "api": [{"api_name": "django.contrib.admin.SimpleListFilter", "line_number": 5, "usage_type": "attribute"}, {"api_name": "django.contrib.admin", "line_number": 5, "usage_type": "name"}, {"api_name": "django.db.models.Q", "line_number": 44, "usage_type": "call"}, {"api_name": "django.db.models.Q", "line_number": 47, "usage_type": "call"}, {"api_name": "django.db.models.Q", "line_number": 58, "usage_type": "call"}, {"api_name": "django.db.models.Q", "line_number": 63, "usage_type": "call"}, {"api_name": "django.db.models.Q", "line_number": 104, "usage_type": "call"}, {"api_name": "django.db.models.Q", "line_number": 107, "usage_type": "call"}, {"api_name": "django.db.models.Q", "line_number": 118, "usage_type": "call"}, {"api_name": "django.db.models.Q", "line_number": 121, "usage_type": "call"}, {"api_name": "django.db.models.Q", "line_number": 132, "usage_type": "call"}, {"api_name": "django.db.models.Q", "line_number": 135, "usage_type": "call"}]}
+{"seq_id": "32279961678", "text": "from flask import (Blueprint, redirect, render_template, request, url_for)\nfrom matchFinder.models import praeferenz_model\nfrom matchFinder.models import teilnehmer_model\nfrom . import database_helper\nfrom . import limiter\nfrom . import helper\nimport hashlib\nimport json\n\nbp = Blueprint('preference', __name__, url_prefix='/preference')\n\n@bp.route('')\ndef set_preference(verteilung_id):\n\t\"\"\"\n\tloads the verteilung to and id, presents the user with a form\n\tasking him to enter some credentials\n\t\"\"\"\n\n\tverteilung = database_helper.get_verteilung_by_hashed_id(verteilung_id)\n\tif verteilung != None:\n\t\treturn render_template('validate.html', id=verteilung_id,\n\t\t\tprotected=True if verteilung.protected else False)\n\telse:\n\t\treturn render_template('validate.html', id=verteilung_id,\n\t\t\terror=\"Keine gültige Verteilung!\")\n\n\n@bp.route('/validate/', methods=['POST'])\n@limiter.limit(\"5 per minute\", error_message=\"Too many requests! Try again later.\")\ndef validate():\n\t\"\"\"\n\tValidates a user by its matrikelnummer.\n\tIf the entered number is valid, the user is redirected to the next page.\n\tIf not, an error is displayed and the form is presented again.\n\t\"\"\"\n\n\tdata = request.form.get('data', None)\n\tobj = json.loads(data)\n\thashed_verteilung_id = obj['id']\n\tprotected = obj[\"protected\"]\n\tmatr_nr = request.form.get('matr_nr', None)\n\terror, verteilung, teilnehmer = helper.check_user_credentials(matr_nr,\n\t\t\t\t\t\t\t\t\t\thashed_verteilung_id)\n\tif error:\n\t\treturn render_template('validate.html', id=hashed_verteilung_id,\n\t\t\t\tprotected=protected, error=error)\n\telse:\n\t\tthemen = database_helper.get_thema_list_by_id(verteilung.thema_list_id).themen\n\t\treturn render_template(\"preference.html\", teilnehmer=teilnehmer,\n\t\t\t\tthemen=themen, verteilung_id=verteilung.id,\n\t\t\t\tveto_allowed=verteilung.veto_allowed, min_votes = verteilung.min_votes)\n\n@bp.route('/register/', methods=['POST'])\n@limiter.limit(\"5 per minute\", error_message=\"Too many requests! Try again later.\")\ndef register():\n\t\"\"\"\n\tregisters a new user, redirects him to the next page\n\t\"\"\"\n\n\tdata = request.form.get('data', None)\n\tobj = json.loads(data)\n\thashed_verteilung_id = obj['id']\n\tfirst_name = request.form.get('first_name', None)\n\tlast_name = request.form.get('last_name', None)\n\tverteilung = database_helper.get_verteilung_by_hashed_id(hashed_verteilung_id)\n\tif verteilung != None:\n\t\tteilnehmer = teilnehmer_model.Teilnehmer(first_name=first_name, matr_nr=0,\n\t\t\tlast_name=last_name, list_id=verteilung.teilnehmer_list_id)\n\t\tdatabase_helper.insert_teilnehmer(teilnehmer)\n\t\tthemen = database_helper.get_thema_list_by_id(verteilung.thema_list_id).themen\n\t\treturn render_template(\"preference.html\", teilnehmer=teilnehmer,\n\t\t\t\tthemen=themen, verteilung_id=verteilung.id,\n\t\t\t\tveto_allowed=verteilung.veto_allowed, min_votes = verteilung.min_votes)\n\treturn render_template('validate.html', id = hashed_verteilung_id,\n\t\tprotected=False, error=\"Es ist ein Fehler aufgetreten!\")\n\n@bp.route('save', methods=['POST'])\ndef save():\n\t\"\"\"\n\tsave the Präferenzen of a user.\n\tIf this user updated already existing präferenzen instead\n\tof entering new ones, the old präferenzen get overwritten.\n\t\"\"\"\n\n\tinformation_object = request.form.get('information', None)\n\n\tobj = json.loads(information_object)\n\tverteilung_id = obj[\"verteilung_id\"]\n\tteilnehmer_id = obj[\"teilnehmer_id\"]\n\tverteilung = database_helper.get_verteilung_by_id(verteilung_id)\n\tnumber_of_themen_in_verteilung = len(verteilung.thema_list.themen)\n\tpreferences = []\n\tfor index in range(number_of_themen_in_verteilung):\n\t\tpreference = request.form.get(str(index + 1), None)\n\t\tpreferences.append(preference)\n\tpreference_string = helper.convert_preferences(preferences)\n\texisting_praef = database_helper.get_praeferenz(teilnehmer_id, verteilung_id)\n\tif existing_praef != None:\n\t\tif not verteilung.editable:\n\t\t\thashed_verteilung_id = hashlib.sha256(str(verteilung.id).encode()).hexdigest()\n\t\t\treturn render_template('validate.html', id = hashed_verteilung_id,\n\t\t\t\tprotected=verteilung.protected,\n\t\t\t\terror=\"Das Bearbeiten der Präferenzen bei dieser Verteilung ist nicht erlaubt!\")\n\t\tdatabase_helper.update_praef(existing_praef, preference_string)\n\t\treturn redirect(url_for('home.index_with_message',\n\t\t\tmessage=\"Deine Präferenzen wurden aktualisiert!\"))\n\telse:\n\t\tpraeferenz = praeferenz_model.Praeferenz(\n\t\t\tteilnehmer_id=teilnehmer_id,\n\t\t\tverteilung_id=verteilung_id,\n\t\t\tpraeferenzen=preference_string)\n\t\tdatabase_helper.insert_praeferenz(praeferenz)\n\t\treturn redirect(url_for('home.index_with_message',\n\t\t\tmessage=\"Deine Präferenzen wurden gespeichert!\"))", "repo_name": "felix-wolf/MatchFinder", "sub_path": "matchFinder/preference.py", "file_name": "preference.py", "file_ext": "py", "file_size_in_byte": 4564, "program_lang": "python", "lang": "de", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "76", "api": [{"api_name": "flask.Blueprint", "line_number": 10, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 21, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 24, "usage_type": "call"}, {"api_name": "flask.request.form.get", "line_number": 37, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 37, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 37, "usage_type": "name"}, {"api_name": "json.loads", "line_number": 38, "usage_type": "call"}, {"api_name": "flask.request.form.get", "line_number": 41, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 41, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 41, "usage_type": "name"}, {"api_name": "flask.render_template", "line_number": 45, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 49, "usage_type": "call"}, {"api_name": "flask.request.form.get", "line_number": 60, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 60, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 60, "usage_type": "name"}, {"api_name": "json.loads", "line_number": 61, "usage_type": "call"}, {"api_name": "flask.request.form.get", "line_number": 63, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 63, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 63, "usage_type": "name"}, {"api_name": "flask.request.form.get", "line_number": 64, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 64, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 64, "usage_type": "name"}, {"api_name": "matchFinder.models.teilnehmer_model.Teilnehmer", "line_number": 67, "usage_type": "call"}, {"api_name": "matchFinder.models.teilnehmer_model", "line_number": 67, "usage_type": "name"}, {"api_name": "flask.render_template", "line_number": 71, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 74, "usage_type": "call"}, {"api_name": "flask.request.form.get", "line_number": 85, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 85, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 85, "usage_type": "name"}, {"api_name": "json.loads", "line_number": 87, "usage_type": "call"}, {"api_name": "flask.request.form.get", "line_number": 94, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 94, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 94, "usage_type": "name"}, {"api_name": "hashlib.sha256", "line_number": 100, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 101, "usage_type": "call"}, {"api_name": "flask.redirect", "line_number": 105, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 105, "usage_type": "call"}, {"api_name": "matchFinder.models.praeferenz_model.Praeferenz", "line_number": 108, "usage_type": "call"}, {"api_name": "matchFinder.models.praeferenz_model", "line_number": 108, "usage_type": "name"}, {"api_name": "flask.redirect", "line_number": 113, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 113, "usage_type": "call"}]}
+{"seq_id": "36803156263", "text": "import pygame\nimport time\nimport random\nfrom global_vars import *\nimport global_vars\nfrom player_ball1 import Ball\nfrom map13 import map1\nfrom put_coin import put_coin\nfrom enemy3 import Enemy\nfrom score_it import score_display\nfrom exit_menu import exit_menu\n#from wall import Wall\n#from move_it import move_it\n\n\npygame.init()\ngameDisplay = pygame.display.set_mode((d_width,d_height))\npygame.display.set_caption(\"Pac-Man\")\nclock = pygame.time.Clock()\n\n\ndef check_enemy_collision(ball):\n\tif ball.immune_t > 0:\n\t\tball.immune_t += -1\n\t\t\n\tif ball.immune_t == 0:\n\t\tenemies = pygame.sprite.spritecollide(ball,enemy_sprites,True)\n\t\tfor enemmy in enemies:\n\t\t\tball.x_inc = 0\n\t\t\tball.y_inc = 0\n\t\t\tcrash_sound.play()\n\t\t\ttime.sleep(2)\n\t\t\tj = random.randrange(0,9)\n\t\t\ti = random.randrange(0,10) \n\t\t\t#print \"death = i :\",(enemmy.rect.y-y00-e-2)/box_w,\"j :\",(enemmy.rect.x-x00-e-2)/box_w\n\t\t\t#print \"birth = i :\",i,\"j :\",j\n\t\t\n\t\t\t\n\t\t\tif (i == 4 and ( j == 0 or j == 1 or j == 8 or j == 9)) or (i == 6 and ( j == 0 or j == 1 or j == 8 or j == 9)) or (i == 5 and ( j == 4 or j == 5)) or (i == 1 and (j == 1 or j == 3 or j == 6 or j == 8)):\n\t\t\t\ti += 1\n\t\t\t#ghost = Enemy(x00+e+2+j*box_w,y00+e+2+i*box_w)\t\n\t\t\tghost = Enemy(x00+e+2+j*box_w,y00+e+2+i*box_w,enemmy.immage)\n\t\t\t#for enemyy in enemies:\n\t\t\t#\tghost = Enemy(x00+e+2+j*box_w,y00+e+2+i*box_w,enemyy.color)\n\t\t\n\t\tif enemies :\n\t\t\tball.lifes += -1\n\t\t\tball.immune_t = 40\n\t\t\tif(ball.lifes == 0):\n\t\t\t\ttime.sleep(2)\n\t\t\t\tpygame.quit()\n\t\t\t\tquit()\n\t\t\ndef gameLoop():\n\t#player_x\n\t#player_y\n\t\n\tmap1(gameDisplay)\n\tput_coin()\n\tball = Ball(x00+e+2,y00+e+2)\t\n\t\n\t#if enemy is imported from enemy1\n\tghost1 = Enemy(x00+e+2+9*box_w,y00+e+2,'images3.jpeg')\n\tghost2 = Enemy(x00+e+2+9*box_w,y00+e+2+10*box_w,'images4.png')\n\tghost3 = Enemy(x00+e+2,y00+e+2+10*box_w,'images6.png')\n\t\n\t#if enemy is imported from enemy \n\t#ghost1 = Enemy(x00+e+2+9*box_w,y00+e+2,l_green)\n\t#ghost2 = Enemy(x00+e+2+9*box_w,y00+e+2+10*box_w,l_red)\n\t#ghost3 = Enemy(x00+e+2,y00+e+2+10*box_w,white)\t\n\t\t\n\twhile True:\n\t\tfor event in pygame.event.get():\n\t\t\tif event.type == pygame.QUIT :\n\t\t\t\tpygame.quit()\n\t\t\t\tquit()\n\t\t\telif event.type == pygame.KEYDOWN:\n\t\t\t\tif event.key == pygame.K_ESCAPE:\n\t\t\t\t\n\t\t\t\t\t#pygame.quit()\n\t\t\t\t\t#quit()\n\t\t\t\t\texit_menu(gameDisplay)\n\t\t\t\t\ttime.sleep(1)\n\t\t\t\t\n\t\t\t\t#else :\n\t\t\t\t#\tball.move(event.key)\t\n\t\t\n\t\t\t#elif event.type == pygame.KEYUP:\n\t\t\t#\tball.move(event.key)\n\t\t\n\t\tball.move(event)\n\t\t\n\t\tglobal_vars.player_x = (ball.rect.x )\n\t\tglobal_vars.player_y = (ball.rect.y )\n\t\t\n\t\tall_sprites.update()\n\t\t\t\n\t\tgameDisplay.fill(bg_color)\n\t\t\n\t\tscore_display(gameDisplay,ball.score,ball.lifes)\n\t\tall_sprites.draw(gameDisplay)\n\t\tpygame.display.update()\n\t\n\t\tcheck_enemy_collision(ball)\n\t\t\t\t\t\t\t\t\t\n\t\tclock.tick(30)\n\t\ngameLoop()\npygame.quit()\nquit()\n\t\n", "repo_name": "shivam-dev-singh/PacMan-Copy", "sub_path": "PacMan/start_game.py", "file_name": "start_game.py", "file_ext": "py", "file_size_in_byte": 2742, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "76", "api": [{"api_name": "pygame.init", "line_number": 16, "usage_type": "call"}, {"api_name": "pygame.display.set_mode", "line_number": 17, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 17, "usage_type": "attribute"}, {"api_name": "pygame.display.set_caption", "line_number": 18, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 18, "usage_type": "attribute"}, {"api_name": "pygame.time.Clock", "line_number": 19, "usage_type": "call"}, {"api_name": "pygame.time", "line_number": 19, "usage_type": "attribute"}, {"api_name": "pygame.sprite.spritecollide", "line_number": 27, "usage_type": "call"}, {"api_name": "pygame.sprite", "line_number": 27, "usage_type": "attribute"}, {"api_name": "time.sleep", "line_number": 32, "usage_type": "call"}, {"api_name": "random.randrange", "line_number": 33, "usage_type": "call"}, {"api_name": "random.randrange", "line_number": 34, "usage_type": "call"}, {"api_name": "enemy3.Enemy", "line_number": 42, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 50, "usage_type": "call"}, {"api_name": "pygame.quit", "line_number": 51, "usage_type": "call"}, {"api_name": "map13.map1", "line_number": 58, "usage_type": "call"}, {"api_name": "put_coin.put_coin", "line_number": 59, "usage_type": "call"}, {"api_name": "player_ball1.Ball", "line_number": 60, "usage_type": "call"}, {"api_name": "enemy3.Enemy", "line_number": 63, "usage_type": "call"}, {"api_name": "enemy3.Enemy", "line_number": 64, "usage_type": "call"}, {"api_name": "enemy3.Enemy", "line_number": 65, "usage_type": "call"}, {"api_name": "pygame.event.get", "line_number": 73, "usage_type": "call"}, {"api_name": "pygame.event", "line_number": 73, "usage_type": "attribute"}, {"api_name": "pygame.QUIT", "line_number": 74, "usage_type": "attribute"}, {"api_name": "pygame.quit", "line_number": 75, "usage_type": "call"}, {"api_name": "pygame.KEYDOWN", "line_number": 77, "usage_type": "attribute"}, {"api_name": "pygame.K_ESCAPE", "line_number": 78, "usage_type": "attribute"}, {"api_name": "exit_menu.exit_menu", "line_number": 82, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 83, "usage_type": "call"}, {"api_name": "global_vars.player_x", "line_number": 93, "usage_type": "attribute"}, {"api_name": "global_vars.player_y", "line_number": 94, "usage_type": "attribute"}, {"api_name": "score_it.score_display", "line_number": 100, "usage_type": "call"}, {"api_name": "pygame.display.update", "line_number": 102, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 102, "usage_type": "attribute"}, {"api_name": "pygame.quit", "line_number": 109, "usage_type": "call"}]}
+{"seq_id": "3480636072", "text": "from __future__ import absolute_import, division, print_function, with_statement\n\nimport os\nimport sys\ntry:\n import builtins\nexcept ImportError:\n path = os.path.realpath(os.path.join(os.path.dirname(__file__),\n \"..\", \"..\", \"..\", \"deps\", \"future\", \"src\"))\n sys.path.append(path)\n\nfrom builtins import input\nfrom builtins import str\nfrom builtins import range\n\n# Standard library imports\nimport argparse\nimport cmd\nimport fnmatch\nimport glob\nimport io\nimport math\nimport readline\nimport subprocess\nimport time\n\n# iRODS imports\nfrom irods.exception import (CATALOG_ALREADY_HAS_ITEM_BY_THAT_NAME,\n CAT_NAME_EXISTS_AS_COLLECTION,\n CollectionDoesNotExist, DataObjectDoesNotExist,\n USER_FILE_DOES_NOT_EXIST)\nfrom irods.session import iRODSSession\nfrom irods.data_object import chunks, irods_basename\nfrom irods.keywords import FORCE_FLAG_KW\ntry:\n from irods.manager.data_object_manager import (READ_BUFFER_SIZE,\n WRITE_BUFFER_SIZE)\nexcept ImportError:\n READ_BUFFER_SIZE = 1024 * io.DEFAULT_BUFFER_SIZE\n WRITE_BUFFER_SIZE = 1024 * io.DEFAULT_BUFFER_SIZE\n\n__all__ = [\"IShell\"]\n\n\n# Redefine the delimiters according to file name syntax. This is required\n# for autocompletion of file names.\nreadline.set_completer_delims(\" \\t\\n\")\n\n\nclass IShell(cmd.Cmd, object):\n \"\"\"Shell like client for managing iRODS data\n \"\"\"\n\n cursor = None\n\n interactive = False\n\n parser = {}\n\n session = None\n\n class _IShellError(Exception):\n pass\n\n def default(self, line):\n \"\"\"Handle unknown commands\n \"\"\"\n args = line.split(None, 1)\n self.errorln(\"error: unknown command `{:}'\", args[0])\n\n def completedefault(self, text, line, begidx, endidx):\n dirname, _, content = self.get_content(text + \"*\")\n completion = list(content.keys())\n if dirname:\n if dirname == \"/\":\n dirname = \"\"\n else: \n dirname = dirname.replace(\"/\", r\"/\")\n completion = [r\"/\".join((dirname, c)) for c in completion]\n return completion\n\n def get_content(self, pattern, data=True, collections=True, base=None):\n \"\"\"Get items within the collection that match the pattern\n \"\"\"\n if base is None:\n base = self.cursor\n if pattern.endswith(\"/\"):\n pattern += \"*\"\n elif pattern.endswith(\"..\"):\n pattern += \"/*\"\n elif pattern.endswith(\"/.\"):\n pattern = pattern[:-1] + \"*\"\n elif pattern == \".\":\n pattern = \"*\"\n\n if pattern.startswith(\"~\"):\n pattern = self.home + pattern[1:]\n\n try:\n dirname, basename = pattern.rsplit(\"/\", 1)\n except ValueError:\n dirname = None\n else:\n if dirname == \"\":\n dirname = \"/\"\n path = self.get_path(dirname, base)\n try:\n base = self.session.collections.get(path)\n except CollectionDoesNotExist:\n return None, base, None\n pattern = basename\n\n content, n = {}, 0\n if collections:\n for c in base.subcollections:\n n += 1\n if c.name == \"\":\n continue\n if fnmatch.fnmatch(c.name, pattern):\n content[c.name] = (True, c)\n if data:\n for d in base.data_objects:\n n += 1\n if fnmatch.fnmatch(d.name, pattern):\n content[d.name] = (False, d)\n\n if (n > 0) and not content:\n content = None\n return dirname, base, content\n\n def get_path(self, path, base=None):\n if path.startswith(\"/\"):\n return os.path.normpath(path)\n else:\n if base is None:\n base = self.cursor\n path = os.path.join(base.path, path)\n return os.path.normpath(path)\n\n def parse_command(self, command, options, noargs=False):\n \"\"\"Parse a command line for arguments and options\n \"\"\"\n args = self._command[1:]\n try:\n opts = vars(self.parser[command].parse_args(args))\n try:\n args = opts.pop(\"args\")\n except KeyError:\n arg = opts.pop(\"arg\")\n if arg is None:\n args = []\n else:\n args = [arg]\n except SystemExit:\n raise self._IShellError()\n\n if (not noargs) and (not args):\n self.errorln(\"{:}: missing operand\", command)\n raise self._IShellError()\n return opts, args\n\n def parse_line(self, line):\n \"\"\"Parse a line and strip commands\n \"\"\"\n cmds, cmd, arg = [], [], []\n quote, commented = None, False\n for c in line:\n if commented:\n if c in \"\\r\\n\":\n commented = False\n elif quote is None:\n if c in \"#;\\r\\n\":\n if arg:\n cmd.append(\"\".join(arg))\n arg = []\n if cmd:\n cmds.append(cmd)\n cmd = []\n if c == \"#\":\n commented = True\n elif c in \" \\t\":\n if arg:\n cmd.append(\"\".join(arg))\n arg = []\n elif c in \"'\\\"\":\n quote = c\n else:\n arg.append(c)\n else:\n if c == quote:\n quote = None\n else:\n arg.append(c)\n if arg:\n cmd.append(\"\".join(arg))\n if cmd:\n cmds.append(cmd)\n return cmds\n\n def println(self, text, *opts, **kwopts):\n self.printfmt(text, *opts, **kwopts)\n print()\n\n def printfmt(self, text, *opts, **kwopts):\n if opts or kwopts:\n text = text.format(*opts, **kwopts)\n else:\n text = str(text)\n print(text, end=\"\")\n\n def errorln(self, text, *opts, **kwopts):\n if opts or kwopts:\n text = text.format(*opts, **kwopts)\n else:\n text = str(text)\n if self.interactive:\n text = \"\\033[93m{:}\\033[0m\".format(text)\n print(text, file=sys.stderr)\n\n def ask_for_confirmation(self, text, *args):\n self.printfmt(text, *args)\n try:\n answer = input()\n except EOFError:\n return False\n if answer in (\"y\", \"Y\", \"yes\", \"Yes\"):\n return True\n return False\n\n def _register_cd(self):\n if \"cd\" not in self.parser:\n p = argparse.ArgumentParser(\n prog=\"cd\",\n description=\"Change the iRODS collection to the given path. \"\n \"If no path is provided then get back to HOME ({:}).\".format(\n self.home))\n p.add_argument(\"arg\", metavar=\"path\", type=str, nargs=\"?\",\n help=\"the path to change the current collection to\")\n self.parser[\"cd\"] = p\n\n def help_cd(self):\n \"\"\"Print a help message for the `cd' command\n \"\"\"\n self._register_cd()\n self.println(self.parser[\"cd\"].format_help())\n\n def do_cd(self, line):\n \"\"\"Change the current iRODS collection\n \"\"\"\n self._register_cd()\n try:\n opts, args = self.parse_command(\"cd\", \"\", noargs=True)\n except self._IShellError:\n return\n if not args:\n path = self.home\n else:\n path = args[0]\n if path.startswith(\"~\"):\n path = path.replace(\"~\", self.home)\n path = self.get_path(path)\n\n # Fetch the corresponding iRODS collection\n try:\n self.cursor = self.session.collections.get(path)\n except CollectionDoesNotExist:\n self.errorln(\"cd: path `{:}' does not exist\", args[0])\n else:\n # Update the prompt\n current = irods_basename(self.cursor.path)\n self.prompt = \"[{:} \\033[94m{:}\\033[0m]$ \".format(\n self.prompt_prefix, current)\n\n def _register_ls(self):\n if \"ls\" not in self.parser:\n p = argparse.ArgumentParser(\n prog=\"ls\",\n description=\"List the objects inside the given iRODS \"\n \"collection. If no path is provided then list the current \"\n \"collection.\")\n p.add_argument(\"args\", metavar=\"path\", type=str, nargs=\"*\",\n help=\"the path(s) to list\")\n self.parser[\"ls\"] = p\n\n def help_ls(self):\n \"\"\"Print a help message for the `ls' command\n \"\"\"\n self._register_ls()\n self.println(self.parser[\"ls\"].format_help())\n\n def do_ls(self, line):\n \"\"\"List the objects inside the given iRODS collection\n \"\"\"\n self._register_ls()\n try:\n opts, args = self.parse_command(\"ls\", \"\", noargs=True)\n except self._IShellError:\n return\n list_subcol = True\n if not args:\n args = (\"*\",)\n\n for iteration, pattern in enumerate(args):\n # Find items that match the pattern\n if ((pattern == \".\") or (pattern == \"*\") or (pattern == \"/\") or\n pattern.endswith(\"/.\") or pattern.endswith(\"/*\")):\n list_subcol = False\n\n dirname, base, content = self.get_content(pattern)\n if content is None:\n \tself.errorln(\"ls: cannot access `{:}':\"\n \" No such data object or collection\",\n pattern)\n \tbreak\n elif len(content) == 0:\n break\n\n # Print the result\n if iteration > 0:\n self.println(\"\")\n if len(args) > 1:\n self.println(\"{:}:\", pattern)\n if (len(content) == 1) and list_subcol:\n pattern = list(content.keys())[0]\n if pattern[-1] == \"/\":\n pattern += \"*\"\n else:\n pattern += \"/*\"\n if dirname:\n pattern = os.path.join(dirname, pattern)\n _, _, content = self.get_content(pattern)\n keys = sorted([str(s) for s in content.keys()], key=str.lower)\n\n if len(keys) == 0:\n continue\n\n if self.interactive:\n # Get the current terminal's width\n p = subprocess.Popen(\"stty size\", shell=True,\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE)\n out, err = p.communicate()\n if err:\n screen_width = 80\n else:\n screen_width = int(out.split()[-1])\n\n # Compute the layout\n tokens = []\n for item in keys:\n n = len(item) + 2\n if content[item][0]:\n item = \"\\033[94m{:}\\033[0m\".format(item)\n extra = len(item) - n + 3\n else:\n extra = 1\n tokens.append((n, item, extra))\n max_width = max(tokens)[0]\n n_columns = max(screen_width // max_width, 1)\n n_tokens = len(tokens)\n if n_columns >= n_tokens:\n n_columns = n_tokens\n n_rows = 1\n else:\n n_rows = int(math.ceil(n_tokens / float(n_columns)))\n\n column_width = n_columns * [0]\n for i in range(n_columns):\n w = 0\n for j in range(n_rows):\n index = i * n_rows + j\n if index >= n_tokens:\n continue\n wj = tokens[index][0]\n if wj > w:\n w = wj\n column_width[i] = w - 1\n\n # Print the result\n for i in range(n_rows):\n for j in range(n_columns):\n index = j * n_rows + i\n if index >= n_tokens:\n continue\n self.printfmt(\"{:<{width}}\", tokens[index][1],\n width=column_width[j] + tokens[index][2])\n self.println(\"\")\n else:\n self.println(os.linesep.join(keys))\n\n def _register_mkdir(self):\n if \"mkdir\" not in self.parser:\n p = argparse.ArgumentParser(\n prog=\"mkdir\", description=\"Create new iRODS collection(s)\")\n p.add_argument(\"args\", metavar=\"path\", type=str, nargs=\"+\",\n help=\"the path(s) of the new collection(s)\")\n self.parser[\"mkdir\"] = p\n\n def help_mkdir(self):\n \"\"\"Print a help message for the `mkdir' command\n \"\"\"\n self._register_mkdir()\n self.println(self.parser[\"mkdir\"].format_help())\n\n def do_mkdir(self, line):\n \"\"\"Create new iRODS collection(s)\n \"\"\"\n self._register_mkdir()\n try:\n opts, args = self.parse_command(\"mkdir\", \"\")\n except self._IShellError:\n return\n\n for arg in args:\n path = self.get_path(arg)\n try:\n self.session.collections.create(path)\n except CATALOG_ALREADY_HAS_ITEM_BY_THAT_NAME:\n self.errorln(\"mkdir: cannot create collection `{:}':\"\n \" Object exists\", irods_basename(path))\n break\n\n def _register_pwd(self):\n if \"pwd\" not in self.parser:\n p = argparse.ArgumentParser(\n prog=\"mkdir\", description=\"Show the current iRODS collection.\")\n self.parser[\"pwd\"] = p\n\n def help_pwd(self):\n \"\"\"Print a help message for the `pwd' command\n \"\"\"\n self._register_pwd()\n self.println(self.parser[\"pwd\"].format_help())\n\n def do_pwd(self, line):\n \"\"\"Show the current iRODS collection\n \"\"\"\n self._register_pwd()\n self.println(self.cursor.path)\n\n def _register_rm(self):\n if \"rm\" not in self.parser:\n p = argparse.ArgumentParser(\n prog=\"rm\", description=\"Remove collection(s) or data object(s) \"\n \"from iRODS.\")\n p.add_argument(\"args\", metavar=\"path\", type=str, nargs=\"+\",\n help=\"the path(s) of the object(s) to remove\")\n p.add_argument(\"-f\", \"--force\", action=\"store_true\",\n help=\"do not prompt before removal\")\n p.add_argument(\"-r\", \"--recursive\", action=\"store_true\",\n help=\"remove collections and their content \"\n \"recursively\")\n p.add_argument(\"-T\", \"--no-trash\", action=\"store_true\",\n help=\"do not put the erased object in the trash.\"\n \" Remove them definitively\")\n self.parser[\"rm\"] = p\n\n def help_rm(self):\n \"\"\"Print a help message for the `rm' command\n \"\"\"\n self._register_rm()\n self.println(self.parser[\"rm\"].format_help())\n\n def do_rm(self, line):\n \"\"\"Remove collection(s) or data object(s) from iRODS\n \"\"\"\n self._register_rm()\n try:\n opts, args = self.parse_command(\"rm\", \"rfT\")\n except self._IShellError:\n return\n protect_collections = not opts[\"recursive\"]\n request_confirmation = not opts[\"force\"]\n skip_trash = opts[\"no_trash\"]\n\n for arg in args:\n # Check that the object exist and what is its type\n path = self.get_path(arg)\n basename = irods_basename(path)\n try:\n target = self.session.data_objects.get(path)\n except DataObjectDoesNotExist:\n try:\n target = self.session.collections.get(path)\n except CollectionDoesNotExist:\n self.errorln(\"rm: cannot remove object `{:}':\"\n \"No such data or collection\", basename)\n return\n else:\n itype = \"collection\"\n else:\n itype = \"data object\"\n\n # Check for the recursive mode\n if protect_collections and (itype == \"collection\"):\n self.errorln(\"rm: cannot remove `{:}': Is a collection\",\n basename)\n return\n\n # Check for a confirmation\n if request_confirmation:\n if not self.ask_for_confirmation(\n \"rm: remove {:} `{:}'?\", itype, basename):\n continue\n\n # Now we can remove the data\n try:\n if itype == \"collection\":\n self.session.collections.remove(path)\n else:\n self.session.data_objects.unlink(path, force=skip_trash)\n except USER_FILE_DOES_NOT_EXIST:\n self.errorln(\"rm: cannot remove object `{:}':\"\n \"No such data or collection\", basename)\n return\n\n def _register_put(self):\n if \"put\" not in self.parser:\n p = argparse.ArgumentParser(\n prog=\"put\", description=\"Upload collection(s) or data \"\n \"object(s) to iRODS.\")\n p.add_argument(\"args\", metavar=\"path\", type=str, nargs=\"+\",\n help=\"the local path(s) of the object(s) to \"\n \"download. If more than one argument is given, the \"\n \"last one specifies the iRODS destination path\")\n p.add_argument(\"-f\", \"--force\", action=\"store_true\",\n help=\"do not prompt before overwriting\")\n p.add_argument(\"-r\", \"--recursive\", action=\"store_true\",\n help=\"upload directories and their content \"\n \"recursively\")\n self.parser[\"put\"] = p\n\n def help_put(self):\n \"\"\"Print a help message for the `put' command\n \"\"\"\n self._register_put()\n self.println(self.parser[\"put\"].format_help())\n\n @staticmethod\n def _hrdb(x):\n \"\"\"Format a number as human readable text\n \"\"\"\n if x > 1125899906842624:\n return \"{:.1f}P\".format(x / 1125899906842624.)\n elif x == 0:\n return \"0.0B\"\n i = int(math.floor(math.log(x) / math.log(1024)))\n unit = (\"B\", \"kB\", \"MB\", \"GB\", \"TB\")\n return \"{:.1f} {:}\".format(x / 1024**i, unit[i])\n\n def do_put(self, line):\n \"\"\"Upload collection(s) or data object(s) to iRODS\n \"\"\"\n self._register_put()\n try:\n opts, args = self.parse_command(\"put\", \"rf\")\n except self._IShellError:\n return\n recursive = opts[\"recursive\"]\n request_confirmation = not opts[\"force\"]\n\n # Parse the src(s) and the destination\n if len(args) == 1:\n srcs = args\n dst = self.cursor.path\n else:\n if len(args) == 2:\n srcs = (args[0],)\n else:\n srcs = args[:-1]\n dst = self.get_path(args[-1])\n\n # Expand the source(s)\n expanded = []\n for src in srcs:\n s = glob.glob(src)\n if not s:\n self.errorln(\"cannot access {:}: No such file or directory\",\n os.path.basename(src))\n return\n expanded += s\n srcs = expanded\n\n # Check if the destination is an existing collection\n if self.session.collections.exists(dst):\n if not dst.endswith(\"/\"):\n dst += \"/\"\n elif len(srcs) > 1:\n self.errorln(\"put: target `{:}' is not a directory\", basename)\n return\n\n # Upload the data\n def upload(srcs, dst):\n for src in srcs:\n basename = os.path.basename(src)\n if not os.path.exists(src):\n self.errorln(\"cannot access {:}: No such file or directory\",\n basename)\n raise self._IShellError()\n if dst.endswith(\"/\"):\n target = dst + basename\n else:\n target = dst\n\n if os.path.isdir(src):\n if not recursive:\n self.errorln(\"put: omitting collection `{:}'\",\n basename)\n raise self._IShellError()\n if not self.session.collections.exists(target):\n self.session.collections.create(target)\n children = [os.path.join(src, f) for f in os.listdir(src)]\n upload(children, target + \"/\")\n else:\n if self.session.data_objects.exists(target):\n if request_confirmation:\n if not self.ask_for_confirmation(\n \"put: overwrite data object `{:}'?\", basename):\n continue\n\n size = os.stat(src).st_size\n done = 0\n t0 = t1 = time.time()\n if self.interactive:\n red, blue, reset = \"\\033[91m\", \"\\033[94m\", \"\\033[0m\"\n else:\n red, blue, reset = \"\", \"\", \"\"\n self.printfmt(\"Uploading {:}{:}{:} ...\",\n red, basename, reset),\n sys.stdout.flush()\n dmgr = self.session.data_objects\n try:\n with open(src, \"rb\") as f, dmgr.open(\n target, \"w\", oprType=1) as o:\n for chunk in chunks(f, WRITE_BUFFER_SIZE):\n o.write(chunk)\n\n n_chunk = len(chunk)\n done += n_chunk\n if done < size:\n status = int(100 * done / float(size))\n t2 = time.time()\n dt, t1 = t2 - t1, t2\n self.printfmt(\n \"\\rUploading {:}{:}{:} ({:2d}%), \"\n \"size={:}{:}{:}, speed={:}{:}/s{:}\",\n red, basename, reset, status,\n blue, self._hrdb(done), reset,\n blue, self._hrdb(n_chunk / dt),\n reset),\n sys.stdout.flush()\n dt = time.time() - t0\n self.println(\n \"\\rUploaded {:}{:}{:} as {:} ({:}{:}{:} at \"\n \"{:}{:}/s{:})\", red, basename, reset,\n irods_basename(target), blue, self._hrdb(done),\n reset, blue, self._hrdb(done / dt), reset)\n except CAT_NAME_EXISTS_AS_COLLECTION:\n self.errorln(\"put: `{:}' is an existing collection\",\n basename)\n raise self._IShellError()\n except KeyboardInterrupt:\n print(\"^C\")\n raise self._IShellError\n except EOFError:\n print(\"^D\")\n raise self._IShellError\n\n try:\n upload(srcs, dst)\n except self._IShellError:\n return\n\n def complete_put(self, text, line, begidx, endidx):\n self._register_put()\n self._command = self.parse_line(line)[0]\n try:\n opts, args = self.parse_command(\"put\", \"rf\", noargs=True)\n except self._IShellError:\n return []\n nargs = len(args)\n if (nargs < 1) or ((nargs == 1) and (line[-1] != \" \")):\n dirname = os.path.dirname(text)\n if not dirname:\n pattern = text + \"*\"\n return [s for s in os.listdir(\".\")\n if fnmatch.fnmatch(s, pattern)]\n else:\n pattern = os.path.basename(text) + \"*\"\n completion = [os.path.join(dirname, s)\n for s in os.listdir(dirname)\n if fnmatch.fnmatch(s, pattern)]\n return completion\n else:\n return self.completedefault(text, line, begidx, endidx)\n\n def _register_get(self):\n if \"get\" not in self.parser:\n p = argparse.ArgumentParser(\n prog=\"get\", description=\"Download collection(s) or data \"\n \"object(s) from iRODS.\")\n p.add_argument(\"args\", metavar=\"path\", type=str, nargs=\"+\",\n help=\"the iRODS path(s) of the object(s) to \"\n \"download. If more than one argument is given, the \"\n \"last one specifies the local destination path\")\n p.add_argument(\"-f\", \"--force\", action=\"store_true\",\n help=\"do not prompt before overwriting\")\n p.add_argument(\"-r\", \"--recursive\", action=\"store_true\",\n help=\"Download collections and their content \"\n \"recursively\")\n self.parser[\"get\"] = p\n\n def help_get(self):\n \"\"\"Print a help message for the `get' command\n \"\"\"\n self._register_get()\n self.println(self.parser[\"get\"].format_help())\n\n def do_get(self, line):\n \"\"\"Download collection(s) or data object(s) from iRODS\n \"\"\"\n self._register_get()\n try:\n opts, args = self.parse_command(\"get\", \"rf\")\n except self._IShellError:\n return\n recursive = opts[\"recursive\"]\n request_confirmation = not opts[\"force\"]\n\n # Parse the src(s) and the destination\n if len(args) == 1:\n srcs = args\n dst = \".\"\n else:\n if len(args) == 2:\n srcs = (args[0],)\n else:\n srcs = args[:-1]\n dst = args[-1]\n\n # Check the consistency of the inputs\n if os.path.isdir(dst):\n isdir = True\n else:\n isdir = False\n if len(srcs) > 1:\n self.errorln(\"get: target `{:}' is not a directory\",\n os.path.basename(dst))\n return\n\n # Download the data\n def download(srcs, dst, isdir):\n for src in srcs:\n basename = os.path.basename(src)\n if isdir:\n target = os.path.join(dst, basename)\n else:\n target = dst\n\n if self.session.collections.exists(src):\n if not recursive:\n self.errorln(\"get: omitting collection `{:}'\",\n irods_basename(src))\n raise self._IShellError()\n\n if os.path.exists(target):\n if not os.path.isdir(target):\n self.println(\"get: cannot overwrite non-directory \"\n \"`{:}'\", target)\n raise self._IShellError()\n else:\n os.makedirs(target)\n\n base = self.session.collections.get(src)\n _, _, content = self.get_content(\"*\", base=base)\n newsrcs = [self.get_path(src, base=base)\n for src in content.keys()]\n download(newsrcs, target, True)\n else:\n if not self.session.data_objects.exists(src):\n self.errorln(\"get: cannot stat `{:}': No such data \"\n \"object or collection\",\n irods_basename(src))\n raise self._IShellError()\n\n if os.path.exists(target) and request_confirmation:\n if not self.ask_for_confirmation(\n \"get: overwrite file `{:}'?\", basename):\n continue\n\n dmgr = self.session.data_objects\n obj = dmgr.get(src)\n size = obj.size\n done = 0\n t0 = t1 = time.time()\n if self.interactive:\n red, blue, reset = \"\\033[91m\", \"\\033[94m\", \"\\033[0m\"\n else:\n red, blue, reset = \"\", \"\", \"\"\n self.printfmt(\"Downloading {:}{:}{:} ...\",\n red, irods_basename(src), reset),\n sys.stdout.flush()\n try:\n with open(target, \"wb\") as f, dmgr.open(\n src, \"r\", forceFlag=True) as o:\n for chunk in chunks(o, READ_BUFFER_SIZE):\n f.write(chunk)\n\n n_chunk = len(chunk)\n done += n_chunk\n if done < size:\n status = int(100 * done / float(size))\n t2 = time.time()\n dt, t1 = t2 - t1, t2\n self.printfmt(\n \"\\rDownloading {:}{:}{:} ({:2d}%), \"\n \"size={:}{:}{:}, speed={:}{:}/s{:}\",\n red, irods_basename(src), reset, status,\n blue, self._hrdb(done), reset,\n blue, self._hrdb(n_chunk / dt),\n reset),\n sys.stdout.flush()\n dt = time.time() - t0\n self.println(\n \"\\rDownloaded {:}{:}{:} as {:} ({:}{:}{:} at \"\n \"{:}{:}/s{:})\", red, irods_basename(src), reset,\n target, blue, self._hrdb(done), reset,\n blue, self._hrdb(done / dt), reset)\n except KeyboardInterrupt:\n print(\"^C\")\n raise self._IShellError\n except EOFError:\n print(\"^D\")\n raise self._IShellError\n\n srcs = [self.get_path(src) for src in srcs]\n try:\n download(srcs, dst, isdir)\n except self._IShellError:\n return\n\n def _register_shell(self):\n if \"shell\" not in self.parser:\n p = argparse.ArgumentParser(\n prog=\"shell\", description=\"escape with a local shell command.\")\n p.add_argument(\"args\", metavar=\"command\", type=str, nargs=1,\n help=\"the local command\")\n p.add_argument(\"args\", metavar=\"argument\", type=str, nargs=\"*\",\n help=\"the argument(s) of the local command\",\n action=\"append\")\n self.parser[\"shell\"] = p\n\n def help_shell(self):\n \"\"\"Print a help message for the `get' command\n \"\"\"\n self._register_shell()\n self.println(self.parser[\"shell\"].format_help())\n\n def do_shell(self, line):\n \"\"\"Escape with a local shell command\n \"\"\"\n self._register_shell()\n args = line.split(None, 1)\n if args and (args[0] == \"cd\"):\n os.chdir(args[1])\n else:\n p = subprocess.Popen(line, shell=True)\n p.communicate()\n\n def do_EOF(self, line):\n \"\"\"Exit to the OS\n \"\"\"\n return True\n\n def do_exit(self, line):\n \"\"\"Exit to the OS\n \"\"\"\n return True\n\n def onecmd(self, line):\n \"\"\"Override the default command processing in order to strip commands\n \"\"\"\n for self._command in self.parse_line(line):\n if super(IShell, self).onecmd(\" \".join(self._command)):\n return True\n\n def cmdloop(self, intro=None):\n \"\"\"Override the default command loop in order to catch Ctrl+C\n \"\"\"\n # Initialise the session\n self.initialise()\n\n # Run the command loop\n self.interactive = True\n\n while True:\n try:\n super(IShell, self).cmdloop(intro=\"\")\n break\n except KeyboardInterrupt:\n print(\"^C\")\n print()\n\n # Finalise the session\n self.finalise()\n\n def initialise(self):\n \"\"\"Start an iRODS session and initialise the environment\n \"\"\"\n # Start the iRODS session\n environment = os.path.expanduser(\"~/.irods/irods_environment.json\")\n self.session = iRODSSession(irods_env_file=environment)\n\n # Fetch the environment\n env = self.session.get_irods_env(environment)\n self.home = env[\"irods_home\"]\n self.user = env[\"irods_user_name\"]\n self.host = env[\"irods_host\"]\n self.prompt_prefix = \"\\033[91m{:}@{:}\\033[0m\".format(\n self.host.split(\".\", 1)[0], self.user)\n\n # Go to the home directory\n self._command = [\"cd\"]\n self.do_cd(\"\")\n\n def finalise(self):\n \"\"\"Close the current iRODS session\n \"\"\"\n self.session.cleanup()\n self.session = None\n", "repo_name": "niess/ishell", "sub_path": "ishell/core.py", "file_name": "core.py", "file_ext": "py", "file_size_in_byte": 34304, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "76", "api": [{"api_name": "os.path.realpath", "line_number": 8, "usage_type": "call"}, {"api_name": "os.path", "line_number": 8, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 8, "usage_type": "call"}, {"api_name": "os.path.dirname", "line_number": 8, "usage_type": "call"}, {"api_name": "sys.path.append", "line_number": 10, "usage_type": "call"}, {"api_name": "sys.path", "line_number": 10, "usage_type": "attribute"}, {"api_name": "irods.manager.data_object_manager.READ_BUFFER_SIZE", "line_number": 39, "usage_type": "name"}, {"api_name": "io.DEFAULT_BUFFER_SIZE", "line_number": 39, "usage_type": "attribute"}, {"api_name": "irods.manager.data_object_manager.WRITE_BUFFER_SIZE", "line_number": 40, "usage_type": "name"}, {"api_name": "io.DEFAULT_BUFFER_SIZE", "line_number": 40, "usage_type": "attribute"}, {"api_name": "readline.set_completer_delims", "line_number": 47, "usage_type": "call"}, {"api_name": "cmd.Cmd", "line_number": 50, "usage_type": "attribute"}, {"api_name": "irods.exception.CollectionDoesNotExist", "line_number": 109, "usage_type": "name"}, {"api_name": "fnmatch.fnmatch", "line_number": 119, "usage_type": "call"}, {"api_name": "fnmatch.fnmatch", "line_number": 124, "usage_type": "call"}, {"api_name": "os.path.normpath", "line_number": 133, "usage_type": "call"}, {"api_name": "os.path", "line_number": 133, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 137, "usage_type": "call"}, {"api_name": "os.path", "line_number": 137, "usage_type": "attribute"}, {"api_name": "os.path.normpath", "line_number": 138, "usage_type": "call"}, {"api_name": "os.path", "line_number": 138, "usage_type": "attribute"}, {"api_name": "cmd.append", "line_number": 174, "usage_type": "call"}, {"api_name": "cmd.append", "line_number": 183, "usage_type": "call"}, {"api_name": "cmd.append", "line_number": 195, "usage_type": "call"}, {"api_name": "builtins.str", "line_number": 208, "usage_type": "call"}, {"api_name": "builtins.str", "line_number": 215, "usage_type": "call"}, {"api_name": "sys.stderr", "line_number": 218, "usage_type": "attribute"}, {"api_name": "builtins.input", "line_number": 223, "usage_type": "call"}, {"api_name": "argparse.ArgumentParser", "line_number": 232, "usage_type": "call"}, {"api_name": "builtins.str", "line_number": 237, "usage_type": "name"}, {"api_name": "irods.exception.CollectionDoesNotExist", "line_number": 266, "usage_type": "name"}, {"api_name": "irods.data_object.irods_basename", "line_number": 270, "usage_type": "call"}, {"api_name": "argparse.ArgumentParser", "line_number": 276, "usage_type": "call"}, {"api_name": "builtins.str", "line_number": 281, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 330, "usage_type": "call"}, {"api_name": "os.path", "line_number": 330, "usage_type": "attribute"}, {"api_name": "builtins.str", "line_number": 332, "usage_type": "call"}, {"api_name": "builtins.str.lower", "line_number": 332, "usage_type": "attribute"}, {"api_name": "subprocess.Popen", "line_number": 339, "usage_type": "call"}, {"api_name": "subprocess.PIPE", "line_number": 340, "usage_type": "attribute"}, {"api_name": "subprocess.PIPE", "line_number": 341, "usage_type": "attribute"}, {"api_name": "math.ceil", "line_number": 365, "usage_type": "call"}, {"api_name": "builtins.range", "line_number": 368, "usage_type": "call"}, {"api_name": "builtins.range", "line_number": 370, "usage_type": "call"}, {"api_name": "builtins.range", "line_number": 380, "usage_type": "call"}, {"api_name": "builtins.range", "line_number": 381, "usage_type": "call"}, {"api_name": "os.linesep.join", "line_number": 389, "usage_type": "call"}, {"api_name": "os.linesep", "line_number": 389, "usage_type": "attribute"}, {"api_name": "argparse.ArgumentParser", "line_number": 393, "usage_type": "call"}, {"api_name": "builtins.str", "line_number": 395, "usage_type": "name"}, {"api_name": "irods.exception.CATALOG_ALREADY_HAS_ITEM_BY_THAT_NAME", "line_number": 418, "usage_type": "name"}, {"api_name": "irods.data_object.irods_basename", "line_number": 420, "usage_type": "call"}, {"api_name": "argparse.ArgumentParser", "line_number": 425, "usage_type": "call"}, {"api_name": "argparse.ArgumentParser", "line_number": 443, "usage_type": "call"}, {"api_name": "builtins.str", "line_number": 446, "usage_type": "name"}, {"api_name": "irods.data_object.irods_basename", "line_number": 479, "usage_type": "call"}, {"api_name": "irods.exception.DataObjectDoesNotExist", "line_number": 482, "usage_type": "name"}, {"api_name": "irods.exception.CollectionDoesNotExist", "line_number": 485, "usage_type": "name"}, {"api_name": "irods.exception.USER_FILE_DOES_NOT_EXIST", "line_number": 512, "usage_type": "name"}, {"api_name": "argparse.ArgumentParser", "line_number": 519, "usage_type": "call"}, {"api_name": "builtins.str", "line_number": 522, "usage_type": "name"}, {"api_name": "math.floor", "line_number": 547, "usage_type": "call"}, {"api_name": "math.log", "line_number": 547, "usage_type": "call"}, {"api_name": "glob.glob", "line_number": 576, "usage_type": "call"}, {"api_name": "os.path.basename", "line_number": 579, "usage_type": "call"}, {"api_name": "os.path", "line_number": 579, "usage_type": "attribute"}, {"api_name": "os.path.basename", "line_number": 595, "usage_type": "call"}, {"api_name": "os.path", "line_number": 595, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 596, "usage_type": "call"}, {"api_name": "os.path", "line_number": 596, "usage_type": "attribute"}, {"api_name": "os.path.isdir", "line_number": 605, "usage_type": "call"}, {"api_name": "os.path", "line_number": 605, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 612, "usage_type": "call"}, {"api_name": "os.path", "line_number": 612, "usage_type": "attribute"}, {"api_name": "os.listdir", "line_number": 612, "usage_type": "call"}, {"api_name": "os.stat", "line_number": 621, "usage_type": "call"}, {"api_name": "time.time", "line_number": 623, "usage_type": "call"}, {"api_name": "sys.stdout.flush", "line_number": 630, "usage_type": "call"}, {"api_name": "sys.stdout", "line_number": 630, "usage_type": "attribute"}, {"api_name": "irods.data_object.chunks", "line_number": 635, "usage_type": "call"}, {"api_name": "irods.manager.data_object_manager.WRITE_BUFFER_SIZE", "line_number": 635, "usage_type": "argument"}, {"api_name": "time.time", "line_number": 642, "usage_type": "call"}, {"api_name": "sys.stdout.flush", "line_number": 651, "usage_type": "call"}, {"api_name": "sys.stdout", "line_number": 651, "usage_type": "attribute"}, {"api_name": "time.time", "line_number": 652, "usage_type": "call"}, {"api_name": "irods.data_object.irods_basename", "line_number": 656, "usage_type": "call"}, {"api_name": "irods.exception.CAT_NAME_EXISTS_AS_COLLECTION", "line_number": 658, "usage_type": "name"}, {"api_name": "os.path.dirname", "line_number": 683, "usage_type": "call"}, {"api_name": "os.path", "line_number": 683, "usage_type": "attribute"}, {"api_name": "os.listdir", "line_number": 686, "usage_type": "call"}, {"api_name": "fnmatch.fnmatch", "line_number": 687, "usage_type": "call"}, {"api_name": "os.path.basename", "line_number": 689, "usage_type": "call"}, {"api_name": "os.path", "line_number": 689, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 690, "usage_type": "call"}, {"api_name": "os.path", "line_number": 690, "usage_type": "attribute"}, {"api_name": "os.listdir", "line_number": 691, "usage_type": "call"}, {"api_name": "fnmatch.fnmatch", "line_number": 692, "usage_type": "call"}, {"api_name": "argparse.ArgumentParser", "line_number": 699, "usage_type": "call"}, {"api_name": "builtins.str", "line_number": 702, "usage_type": "name"}, {"api_name": "os.path.isdir", "line_number": 742, "usage_type": "call"}, {"api_name": "os.path", "line_number": 742, "usage_type": "attribute"}, {"api_name": "os.path.basename", "line_number": 748, "usage_type": "call"}, {"api_name": "os.path", "line_number": 748, "usage_type": "attribute"}, {"api_name": "os.path.basename", "line_number": 754, "usage_type": "call"}, {"api_name": "os.path", "line_number": 754, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 756, "usage_type": "call"}, {"api_name": "os.path", "line_number": 756, "usage_type": "attribute"}, {"api_name": "irods.data_object.irods_basename", "line_number": 763, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 766, "usage_type": "call"}, {"api_name": "os.path", "line_number": 766, "usage_type": "attribute"}, {"api_name": "os.path.isdir", "line_number": 767, "usage_type": "call"}, {"api_name": "os.path", "line_number": 767, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 772, "usage_type": "call"}, {"api_name": "irods.data_object.irods_basename", "line_number": 783, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 786, "usage_type": "call"}, {"api_name": "os.path", "line_number": 786, "usage_type": "attribute"}, {"api_name": "time.time", "line_number": 795, "usage_type": "call"}, {"api_name": "irods.data_object.irods_basename", "line_number": 801, "usage_type": "call"}, {"api_name": "sys.stdout.flush", "line_number": 802, "usage_type": "call"}, {"api_name": "sys.stdout", "line_number": 802, "usage_type": "attribute"}, {"api_name": "irods.data_object.chunks", "line_number": 806, "usage_type": "call"}, {"api_name": "irods.manager.data_object_manager.READ_BUFFER_SIZE", "line_number": 806, "usage_type": "argument"}, {"api_name": "time.time", "line_number": 813, "usage_type": "call"}, {"api_name": "irods.data_object.irods_basename", "line_number": 818, "usage_type": "call"}, {"api_name": "sys.stdout.flush", "line_number": 822, "usage_type": "call"}, {"api_name": "sys.stdout", "line_number": 822, "usage_type": "attribute"}, {"api_name": "time.time", "line_number": 823, "usage_type": "call"}, {"api_name": "irods.data_object.irods_basename", "line_number": 826, "usage_type": "call"}, {"api_name": "argparse.ArgumentParser", "line_number": 844, "usage_type": "call"}, {"api_name": "builtins.str", "line_number": 846, "usage_type": "name"}, {"api_name": "builtins.str", "line_number": 848, "usage_type": "name"}, {"api_name": "os.chdir", "line_number": 865, "usage_type": "call"}, {"api_name": "subprocess.Popen", "line_number": 867, "usage_type": "call"}, {"api_name": "os.path.expanduser", "line_number": 911, "usage_type": "call"}, {"api_name": "os.path", "line_number": 911, "usage_type": "attribute"}, {"api_name": "irods.session.iRODSSession", "line_number": 912, "usage_type": "call"}]}
+{"seq_id": "43457936572", "text": "# -*- coding: utf-8 -*-\r\n\"\"\"\r\nSpyder Editor\r\n\r\nThis is a temporary script file.\r\n\"\"\"\r\nfrom tkinter import *\r\nimport tkinter as tk\r\nfrom tkinter import colorchooser, ttk,filedialog,Entry\r\nfrom PIL import ImageTk,Image,ImageDraw,ImageOps\r\nimport cv2\r\nimport io\r\nimport subprocess\r\nimport os\r\nimport numpy as np\r\nimport pandas as pd\r\nfrom sklearn.neighbors import KNeighborsClassifier\r\n\r\n\r\nclass main(object):\r\n\r\n def __init__(self, master):\r\n self.master = master\r\n self.master.title=\"Button Example\"\r\n self.color_fg = \"black\"\r\n self.color_bg = \"white\"\r\n self.old_x = None\r\n self.old_y = None\r\n self.pen_width = 20\r\n self.darwWidget()\r\n self.c.bind()\r\n self.c.bind('', self.paint)\r\n self.c.bind(\"\", self.reset)\r\n # self.button1=tk.Button(master,text=\"clear\",command=self.clearcanvas)\r\n # self.button1.pack()\r\n # self.button2=tk.Button(master,text=\"exit\",command=self.master.destroy)\r\n # self.button2.pack()\r\n self.button3=tk.Button(master,text=\"learn\",command=self.savefile)\r\n self.label = tk.Label(master, text=\"Enter answer:\")\r\n self.label.pack()\r\n self.lrn_input=Entry(master)\r\n self.lrn_input.pack()\r\n self.button3.pack()\r\n self.button5=tk.Button(master,text=\"pridict\",command=self.pridict)\r\n self.button5.pack()\r\n self.button4=tk.Button(master,text=\"result\",command=self.show_digits)\r\n self.button4.pack()\r\n self.digits=0\r\n self.tcells=[]\r\n self.tcells_array=0\r\n self.targets=[]\r\n self.df=\"\"\r\n self.result=0\r\n self.targets_array=0\r\n self.X_array=0\r\n self.y_array=0\r\n self.data=pd.read_csv(\"mydf.csv\")\r\n # self.data=self.data.drop(\"Unnamed: 0\",axis=1)\r\n \r\n def paint(self, e):\r\n if self.old_x and self.old_y:\r\n # print(\"draw\")\r\n self.c.create_line(self.old_x, self.old_y, e.x, e.y, width=self.pen_width,fill=self.color_fg, capstyle=\"round\", smoot=True)\r\n self.old_x = e.x\r\n self.old_y = e.y\r\n\r\n def reset(self, e):\r\n self.old_x = None\r\n self.old_y = None\r\n\r\n def changedW(self, width): \r\n self.pen_width=width\r\n \r\n def clearcanvas(self):\r\n self.c.delete(ALL)\r\n\r\n def change_fg(self):\r\n self.color_fg = colorchooser.askcolor(color=self.color_fg)[1]\r\n\r\n def change_bg(self):\r\n self.color_bg = colorchooser.askcolor(color=self.color_bg)[1]\r\n self.c['bg']=self.color_bg\r\n \r\n\r\n def darwWidget(self):\r\n self.controls=Frame(self.master,padx=5,pady=5)\r\n # textpw=Label(self.controls,text=\"pen Width\",font=\"Georgia 16\")\r\n # textpw.grid(row=0,column=0)\r\n # self.slider=ttk.Scale(self.controls,from_=5 , to=100, command=self.changedW,orient=\"vertical\" )\r\n # self.slider.set(self.pen_width)\r\n # self.slider.grid(row=0,column=1)\r\n # self.controls.pack(side=\"left\")\r\n self.c=Canvas(self.master,width=500,height=400,bg=self.color_bg)\r\n self.c.pack(fill=BOTH,expand=True)\r\n \r\n menu=Menu(self.master)\r\n self.master.config(menu=menu)\r\n optionmenu=Menu(menu)\r\n menu.add_cascade(label=\"Menu\",menu=optionmenu)\r\n optionmenu.add_command(label='brush color',command=self.change_fg)\r\n optionmenu.add_command(label='backgrond color',command=self.change_bg)\r\n optionmenu.add_command(label='clear convas',command=self.clearcanvas) \r\n optionmenu.add_command(label='Exit',command=self.master.destroy)\r\n \r\n \r\n def savefile(self):\r\n target=self.lrn_input.get()\r\n \r\n \r\n if target==\"\":\r\n print(\"do nothing\")\r\n else:\r\n ps = self.c.postscript(colormode='gray')\r\n img = Image.open(io.BytesIO(ps.encode('utf-8')))\r\n img=ImageOps.invert(img)\r\n img=img.resize((40, 40), Image.ANTIALIAS)\r\n img.save('test.png')\r\n self.digits=cv2.imread(\"test.png\",cv2.IMREAD_GRAYSCALE)\r\n self.tcells.append(self.digits.flatten())\r\n print(target)\r\n self.targets.append(target)\r\n self.clearcanvas()\r\n self. lrn_input.delete(0,\"end\")\r\n self.tcells_array=np.array(self.tcells,dtype=np.float32)\r\n self.targets_array=np.array(self.targets,dtype=np.float32)\r\n self.df=pd.DataFrame(self.tcells_array)\r\n self.df['target']=self.targets_array\r\n # self.df.to_csv(\"mydf.csv\",index=False)\r\n print(self.df)\r\n result=pd.concat([self.data,self.df],axis=0)\r\n # print(result)\r\n result.to_csv(\"mydf.csv\",index=False)\r\n \r\n def show_digits(self):\r\n print(self.digits)\r\n digits=self.digits\r\n \r\n self.tcells_array=np.array(self.tcells,dtype=np.float32)\r\n self.targets_array=np.array(self.targets,dtype=np.float32)\r\n self.master.destroy()\r\n # test_digits=cv2.imread(\"test_digits2.png\",cv2.IMREAD_GRAYSCALE)\r\n \r\n \r\n def pridict(self):\r\n\r\n X=self.df.loc[:,self.df.columns!=\"target\"]\r\n y=self.df.loc[:,self.df.columns==\"target\"]\r\n self.X_array=np.array(X,dtype=np.float32)\r\n self.y_array=np.array(y,dtype=np.float32)\r\n \r\n knn=cv2.ml.KNearest_create()\r\n knn.train(self.X_array,cv2.ml.ROW_SAMPLE,self.y_array)\r\n ret,self.result,neighbours,dist=knn.findNearest(self.X_array,k=3)\r\n message =neighbours \r\n text = self.c.create_text(10, 100, text=message, font=(\"Helvetica\", 10))\r\n print(self.result)\r\n \r\n \r\n \r\nwin = Tk()\r\n\r\nwin.title(\"my app\")\r\npaint=main(win)\r\nwin.mainloop()\r\n# mydf=paint.df\r\n# data=paint.data\r\n\r\n", "repo_name": "armandabir/handwrithing-AI", "sub_path": "final3.py", "file_name": "final3.py", "file_ext": "py", "file_size_in_byte": 5852, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "76", "api": [{"api_name": "tkinter.Button", "line_number": 38, "usage_type": "call"}, {"api_name": "tkinter.Label", "line_number": 39, "usage_type": "call"}, {"api_name": "tkinter.Entry", "line_number": 41, "usage_type": "call"}, {"api_name": "tkinter.Button", "line_number": 44, "usage_type": "call"}, {"api_name": "tkinter.Button", "line_number": 46, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 57, "usage_type": "call"}, {"api_name": "tkinter.colorchooser.askcolor", "line_number": 78, "usage_type": "call"}, {"api_name": "tkinter.colorchooser", "line_number": 78, "usage_type": "name"}, {"api_name": "tkinter.colorchooser.askcolor", "line_number": 81, "usage_type": "call"}, {"api_name": "tkinter.colorchooser", "line_number": 81, "usage_type": "name"}, {"api_name": "PIL.Image.open", "line_number": 114, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 114, "usage_type": "name"}, {"api_name": "io.BytesIO", "line_number": 114, "usage_type": "call"}, {"api_name": "PIL.ImageOps.invert", "line_number": 115, "usage_type": "call"}, {"api_name": "PIL.ImageOps", "line_number": 115, "usage_type": "name"}, {"api_name": "PIL.Image.ANTIALIAS", "line_number": 116, "usage_type": "attribute"}, {"api_name": "PIL.Image", "line_number": 116, "usage_type": "name"}, {"api_name": "cv2.imread", "line_number": 118, "usage_type": "call"}, {"api_name": "cv2.IMREAD_GRAYSCALE", "line_number": 118, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 124, "usage_type": "call"}, {"api_name": "numpy.float32", "line_number": 124, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 125, "usage_type": "call"}, {"api_name": "numpy.float32", "line_number": 125, "usage_type": "attribute"}, {"api_name": "pandas.DataFrame", "line_number": 126, "usage_type": "call"}, {"api_name": "pandas.concat", "line_number": 130, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 138, "usage_type": "call"}, {"api_name": "numpy.float32", "line_number": 138, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 139, "usage_type": "call"}, {"api_name": "numpy.float32", "line_number": 139, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 148, "usage_type": "call"}, {"api_name": "numpy.float32", "line_number": 148, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 149, "usage_type": "call"}, {"api_name": "numpy.float32", "line_number": 149, "usage_type": "attribute"}, {"api_name": "cv2.ml.KNearest_create", "line_number": 151, "usage_type": "call"}, {"api_name": "cv2.ml", "line_number": 151, "usage_type": "attribute"}, {"api_name": "cv2.ml", "line_number": 152, "usage_type": "attribute"}]}
+{"seq_id": "24900274857", "text": "from model.storage import Storage\nfrom schemas.storage import StorageCreate, StorageUpdate\nfrom typing import Union\nfrom fastapi.responses import JSONResponse\nfrom sqlalchemy import Column\nfrom sqlalchemy.orm import Session\n\n\ndef check_storage_exists(project_id: str, db: Session) -> bool:\n '''\n Returns if a storage account already exists for the given project.\n \n :param project_id: id of the corresponding project\n :param db: active database session\n '''\n\n return(db.query(Storage).filter(Storage.project==project_id, Storage.is_deleted==False).count() > 0)\n\n\ndef create_storage(project_id: str, data: StorageCreate, db:Session) -> JSONResponse:\n '''\n Stores the storage account credentials for the project.\n \n :param project_id: id of the corresponding project\n :param data: storage account details\n :param db: active database session\n '''\n \n storage = Storage()\n storage_data = data.dict(exclude_none=True, by_alias=False)\n\n for key, value in storage_data.items():\n setattr(storage, key, value)\n\n storage.project = project_id\n\n db.add(storage)\n db.commit()\n db.refresh(storage)\n\n return JSONResponse({\"status\": 201, \"message\": \"storage created\", \"data\": [{}]}, status_code=201)\n\n\ndef get_storage(project_id: str, db: Session) -> Union[Storage, None]:\n '''\n Returns the storage account associated with the specified project.\n \n :param project_id: id of the corresponding project\n :param db: active database session\n '''\n return(db.query(Storage).filter(Storage.project==project_id, Storage.is_deleted==False).first())\n\n\ndef get_storageid(project_id: str, db: Session) -> Column[str]:\n '''\n Returns the id for the storage account data of the given project.\n\n :param project_id: unique id of the project\n :param db: active database session\n '''\n\n return(db.query(Storage).filter(Storage.project==project_id, Storage.is_deleted==False).first().id)\n\n\ndef get_storage_by_id(storage_id: str, db: Session) -> Storage:\n '''\n Returns the storage account associated with the active project.\n \n :param storage_id: id of the corresponding storage account data\n :param db: active database session\n '''\n \n return(db.query(Storage).filter(Storage.id==storage_id).first())\n\n\ndef update_storage(data: StorageUpdate, db: Session) -> JSONResponse:\n '''\n Update the storage account details.\n \n :param data: storage account details for update\n :param db: active database session\n '''\n\n db_storage = get_storage_by_id(data.id, db)\n storage_data = data.dict(exclude_none=True, by_alias=False)\n\n for key, value in storage_data.items():\n setattr(db_storage, key, value)\n\n db.add(db_storage)\n db.commit()\n db.refresh(db_storage)\n\n return JSONResponse({\"status\": 204, \"message\": \"storage updated\", \"data\": [{}]}, status_code=204)", "repo_name": "xmigrate/xmigrate", "sub_path": "services/storage.py", "file_name": "storage.py", "file_ext": "py", "file_size_in_byte": 2891, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 39, "dataset": "github-code", "pt": "76", "api": [{"api_name": "sqlalchemy.orm.Session", "line_number": 9, "usage_type": "name"}, {"api_name": "model.storage.Storage", "line_number": 17, "usage_type": "argument"}, {"api_name": "model.storage.Storage.project", "line_number": 17, "usage_type": "attribute"}, {"api_name": "model.storage.Storage.is_deleted", "line_number": 17, "usage_type": "attribute"}, {"api_name": "schemas.storage.StorageCreate", "line_number": 20, "usage_type": "name"}, {"api_name": "sqlalchemy.orm.Session", "line_number": 20, "usage_type": "name"}, {"api_name": "model.storage.Storage", "line_number": 29, "usage_type": "call"}, {"api_name": "fastapi.responses.JSONResponse", "line_number": 41, "usage_type": "call"}, {"api_name": "fastapi.responses.JSONResponse", "line_number": 20, "usage_type": "name"}, {"api_name": "sqlalchemy.orm.Session", "line_number": 44, "usage_type": "name"}, {"api_name": "model.storage.Storage", "line_number": 51, "usage_type": "argument"}, {"api_name": "model.storage.Storage.project", "line_number": 51, "usage_type": "attribute"}, {"api_name": "model.storage.Storage.is_deleted", "line_number": 51, "usage_type": "attribute"}, {"api_name": "typing.Union", "line_number": 44, "usage_type": "name"}, {"api_name": "model.storage.Storage", "line_number": 44, "usage_type": "name"}, {"api_name": "sqlalchemy.orm.Session", "line_number": 54, "usage_type": "name"}, {"api_name": "model.storage.Storage", "line_number": 62, "usage_type": "argument"}, {"api_name": "model.storage.Storage.project", "line_number": 62, "usage_type": "attribute"}, {"api_name": "model.storage.Storage.is_deleted", "line_number": 62, "usage_type": "attribute"}, {"api_name": "sqlalchemy.Column", "line_number": 54, "usage_type": "name"}, {"api_name": "sqlalchemy.orm.Session", "line_number": 65, "usage_type": "name"}, {"api_name": "model.storage.Storage", "line_number": 73, "usage_type": "argument"}, {"api_name": "model.storage.Storage.id", "line_number": 73, "usage_type": "attribute"}, {"api_name": "model.storage.Storage", "line_number": 65, "usage_type": "name"}, {"api_name": "schemas.storage.StorageUpdate", "line_number": 76, "usage_type": "name"}, {"api_name": "sqlalchemy.orm.Session", "line_number": 76, "usage_type": "name"}, {"api_name": "fastapi.responses.JSONResponse", "line_number": 94, "usage_type": "call"}, {"api_name": "fastapi.responses.JSONResponse", "line_number": 76, "usage_type": "name"}]}
+{"seq_id": "11847754637", "text": "import logging\nfrom argparse import ArgumentParser\nfrom os import mkdir\nfrom os.path import exists\nfrom tempfile import TemporaryDirectory\nfrom pathlib import Path\n\nimport numpy as np\nfrom scipy.signal import savgol_filter\n\nimport sys\nsys.path.append('./DataProcessing')\nfrom DataProcessing.reconstruct_data import load_mean, denormalize\nfrom DataProcessing.process_motions import create_bvh\n\nlogging.basicConfig(level=logging.INFO)\nlogger = logging.getLogger(__name__)\nlogger.setLevel(logging.INFO)\n\n\ndef smoothing(motion):\n\n smoothed = [savgol_filter(motion[:,i], 9, 3) for i in range(motion.shape[1])]\n new_motion = np.array(smoothed).transpose()\n return new_motion\n\n\nif __name__ == \"__main__\":\n parser = ArgumentParser()\n parser.add_argument(\n \"--pred\",\n \"--prediction\",\n type=str,\n required=True,\n help=\"directory with .npy files with predictions of \" \"shape (N x 45)\",\n )\n parser.add_argument(\n \"--dest\",\n type=str,\n required=True,\n help=\"directory to save results\",\n )\n parser.add_argument(\n \"--mean\",\n type=str,\n default=\"DataProcessing/mean_pose.npz\",\n help=\"File with normalization values.\",\n )\n parser.add_argument(\n \"--pipe\",\n type=str,\n default=\"pipe\",\n help=\"pipe folder with pre/post processing.\"\n )\n parser.add_argument(\n \"--smooth\",\n action=\"store_true\",\n default=False,\n help=\"Flag to apply smoothing.\"\n )\n args = parser.parse_args()\n if not exists(args.dest):\n mkdir(args.dest)\n for pred_file in Path(args.pred).glob('*.npy'):\n logging.info(str(pred_file))\n prediction = np.load(str(pred_file))\n if args.smooth:\n logger.info(\"Smoothing prediction\")\n prediction = smoothing(prediction)\n\n logging.info(\"Reconstructing data by denormalizing it.\")\n max_val, mean_pose = load_mean(args.mean)\n prediction = denormalize(prediction, max_val, mean_pose)\n\n logging.info(\"Creating .bvh. This requires pipe\")\n\n with TemporaryDirectory() as tmpdir:\n tmpdir = Path(tmpdir)\n np.save(tmpdir / pred_file.name, prediction)\n create_bvh(tmpdir / pred_file.name, args.dest, args.pipe)\n", "repo_name": "FineMotion/GENEA_2020", "sub_path": "create_bvh.py", "file_name": "create_bvh.py", "file_ext": "py", "file_size_in_byte": 2303, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 5, "dataset": "github-code", "pt": "76", "api": [{"api_name": "sys.path.append", "line_number": 12, "usage_type": "call"}, {"api_name": "sys.path", "line_number": 12, "usage_type": "attribute"}, {"api_name": "logging.basicConfig", "line_number": 16, "usage_type": "call"}, {"api_name": "logging.INFO", "line_number": 16, "usage_type": "attribute"}, {"api_name": "logging.getLogger", "line_number": 17, "usage_type": "call"}, {"api_name": "logging.INFO", "line_number": 18, "usage_type": "attribute"}, {"api_name": "scipy.signal.savgol_filter", "line_number": 23, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 24, "usage_type": "call"}, {"api_name": "argparse.ArgumentParser", "line_number": 29, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 62, "usage_type": "call"}, {"api_name": "os.mkdir", "line_number": 63, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 64, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 65, "usage_type": "call"}, {"api_name": "numpy.load", "line_number": 66, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 71, "usage_type": "call"}, {"api_name": "DataProcessing.reconstruct_data.load_mean", "line_number": 72, "usage_type": "call"}, {"api_name": "DataProcessing.reconstruct_data.denormalize", "line_number": 73, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 75, "usage_type": "call"}, {"api_name": "tempfile.TemporaryDirectory", "line_number": 77, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 78, "usage_type": "call"}, {"api_name": "numpy.save", "line_number": 79, "usage_type": "call"}, {"api_name": "DataProcessing.process_motions.create_bvh", "line_number": 80, "usage_type": "call"}]}
+{"seq_id": "9315383120", "text": "# -*- coding: utf-8 -*-\nimport pandas as pd\nfrom sklearn import linear_model\n\n# CSVァイルを読み込んでデータフレームに格納\ndf = pd.read_csv(\n \"C:/github/sample/python/scikit/quantification/class1/input.csv\")\n\n# 説明変数をダミー変数に変換\nx = pd.get_dummies(df[['性別', '文理']])\n\n# 目的変数:満足度\ny = df['満足度'].values\n\n# 予測モデルを作成(重回帰)\nclf = linear_model.LinearRegression()\nclf.fit(x, y)\n\n# 回帰係数と切片の抽出\na = clf.coef_\nb = clf.intercept_ \n\n# 回帰係数\nprint(\"説明変数:\", x) # \nprint(\"回帰係数:\", a) # 回帰係数: [-1. 1. -1. 1.]\nprint(\"切片:\", b) # 切片: 3.0\nprint(\"決定係数:\", clf.score(x, y)) # 決定係数: 0.8\n\n\"\"\"\n説明変数:\n 性別_女 性別_男 文理_文系 文理_理系\n0 0 1 0 1\n1 1 0 0 1\n2 0 1 1 0\n3 1 0 1 0\n4 0 1 1 0\n\n回帰係数: [-1. 1. -1. 1.]\n\n切片: 2.9999999999999996\n\n決定係数: 0.8\n\"\"\"\n", "repo_name": "JunkeyMonkeyBaby/sample", "sub_path": "python/scikit/quantification/class1/ex1.py", "file_name": "ex1.py", "file_ext": "py", "file_size_in_byte": 1036, "program_lang": "python", "lang": "ja", "doc_type": "code", "dataset": "github-code", "pt": "76", "api": [{"api_name": "pandas.read_csv", "line_number": 6, "usage_type": "call"}, {"api_name": "pandas.get_dummies", "line_number": 10, "usage_type": "call"}, {"api_name": "sklearn.linear_model.LinearRegression", "line_number": 16, "usage_type": "call"}, {"api_name": "sklearn.linear_model", "line_number": 16, "usage_type": "name"}]}
+{"seq_id": "30906592871", "text": "from unittest import TestCase\n\nfrom testfixtures import compare\n\nfrom archivist.sources.packages import Plugin\nfrom tests.helpers import ShouldFailSchemaWith, SingleCommandMixin\n\n\nclass TestPackages(SingleCommandMixin, TestCase):\n\n def test_rpm(self):\n self.Popen.set_command('rpm -qa', stdout=b'some packages')\n plugin = Plugin(**Plugin.schema(dict(type='packages', name='rpm')))\n plugin.process(self.dir.path)\n self.dir.compare(expected=['rpm'])\n compare(b'some packages', self.dir.read('rpm'))\n\n def test_dpkg(self):\n self.Popen.set_command('dpkg -l', stdout=b'some packages')\n plugin = Plugin(**Plugin.schema(dict(type='packages', name='dpkg')))\n plugin.process(self.dir.path)\n self.dir.compare(expected=['dpkg'])\n compare(b'some packages', self.dir.read('dpkg'))\n\n def test_wrong(self):\n text = \"not a valid value for dictionary value @ data['name']\"\n with ShouldFailSchemaWith(text):\n Plugin.schema(dict(type='packages', name='foo'))\n", "repo_name": "simplistix/archivist", "sub_path": "tests/test_source_packages.py", "file_name": "test_source_packages.py", "file_ext": "py", "file_size_in_byte": 1044, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "76", "api": [{"api_name": "tests.helpers.SingleCommandMixin", "line_number": 9, "usage_type": "name"}, {"api_name": "unittest.TestCase", "line_number": 9, "usage_type": "name"}, {"api_name": "archivist.sources.packages.Plugin", "line_number": 13, "usage_type": "call"}, {"api_name": "archivist.sources.packages.Plugin.schema", "line_number": 13, "usage_type": "call"}, {"api_name": "testfixtures.compare", "line_number": 16, "usage_type": "call"}, {"api_name": "archivist.sources.packages.Plugin", "line_number": 20, "usage_type": "call"}, {"api_name": "archivist.sources.packages.Plugin.schema", "line_number": 20, "usage_type": "call"}, {"api_name": "testfixtures.compare", "line_number": 23, "usage_type": "call"}, {"api_name": "tests.helpers.ShouldFailSchemaWith", "line_number": 27, "usage_type": "call"}, {"api_name": "archivist.sources.packages.Plugin.schema", "line_number": 28, "usage_type": "call"}, {"api_name": "archivist.sources.packages.Plugin", "line_number": 28, "usage_type": "name"}]}
+{"seq_id": "29696552866", "text": "\n# coding: utf-8\n\n# # Define function to return an array of file names\n\n# In[240]:\n\n\nimport os\n\ndef return_list_of_files(rootdir, printname=False):\n all_files = []\n\n for subdir, dirs, files in os.walk(rootdir):\n for file in files:\n all_files.append(os.path.join(subdir, file))\n if printname: \n print(os.path.join(subdir, file))\n return np.asarray(all_files) \n\n\n# # Function to load data from file names into features + labels\n\n# In[241]:\n\n\nimport matplotlib.pyplot as plt\nimport matplotlib.image as mpimg\nimport numpy as np\n\n\ndef load_data(dataset_path):\n images_list = return_list_of_files(dataset_path)\n \n #print(images_list)\n print(len(images_list))\n \n features = np.ndarray(shape=(len(images_list), 28, 28),\n dtype=np.uint8)\n labels = []\n for i in range(len(images_list)):\n try:\n im = mpimg.imread(images_list[i])\n\n features[i] = im\n #features[i] = im.flatten()\n labels.append(images_list[i].split(\"/\")[2])\n except:\n print(images_list[i])\n \n return features, np.asarray(labels)\n\n\n# In[242]:\n\n\nfeatures, labels = load_data(\"../TrainingDataAll\")\ntest_features, test_labels = load_data(\"../TestData\")\n\n\n# In[243]:\n\n\nprint(\"\\n\", features.shape, \"\\n\", labels.shape)\nprint(\"\\n\", test_features.shape, \"\\n\", test_labels.shape)\n\n\n# In[244]:\n\n\nplt.imshow(test_features[3].reshape(28, 28))\n\n\n# # Label encoder to convert string labels into integers\n\n# In[245]:\n\n\nfrom sklearn import preprocessing\n\nle = preprocessing.LabelEncoder()\nle.fit(labels)\nlabels_encoded = le.transform(labels) \n\ntest_le = preprocessing.LabelEncoder()\ntest_le.fit(test_labels)\ntest_labels_encoded = test_le.transform(test_labels) \n\n#list(le.inverse_transform([2, 2, 1]))\n\n\n# In[246]:\n\n\nle.classes_\ntest_le.classes_\n\n\n# In[247]:\n\n\nfrom sklearn.model_selection import train_test_split\n\nX_train=features\nX_test=test_features\nY_train=labels_encoded\nY_test=test_labels_encoded\n\nprint(\"[STATUS] splitted train and test data...\")\nprint(\"Train data : {}\".format(X_train.shape))\nprint(\"Test data : {}\".format(X_test.shape))\nprint(\"Train labels: {}\".format(Y_train.shape))\nprint(\"Test labels : {}\".format(Y_test.shape))\n\n\n# In[248]:\n\n\n#28×28 の二次元で表現されている入力��なる画像の情報が、784個になるようにしたいので、1次元になるように変形させる\nX_train = X_train.reshape(len(X_train), 784)\nX_test = X_test.reshape(len(X_test), 784)\n\n\n# In[249]:\n\n\nprint(X_train.shape)\nprint(Y_train.shape)\n\n\n# In[253]:\n\n\nimport numpy as np\nimport cv2\nfrom matplotlib import pyplot as plt\n\n# Now we prepare train_data and test_data.\ntrain = X_train.astype(np.float32)\ntest = X_test.astype(np.float32)\n\n\n# Create labels for train and test data\ntrain_labels = Y_train[:,np.newaxis]\ntest_labels = Y_test[:,np.newaxis]\n\n# Initiate kNN, train the data, then test it with test data for k=1\nknn = cv2.ml.KNearest_create() \nknn.train(train, cv2.ml.ROW_SAMPLE, train_labels) \nret,result,neighbours,dist = knn.findNearest(test,k=3)\n#print(result)\n#print(test_labels)\n#print(result.size)\n# Now we check the accuracy of classification\n# For that, compare the result with test_labels and check which are wrong\nmatches = result==test_labels\ncorrect = np.count_nonzero(matches)\nprint(correct)\naccuracy = correct/result.size*100\nprint(accuracy)\n\n", "repo_name": "natsuwata/Hackfest-Reading-Hand-Written-Digits", "sub_path": "KNN_Eno.py", "file_name": "KNN_Eno.py", "file_ext": "py", "file_size_in_byte": 3406, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "76", "api": [{"api_name": "os.walk", "line_number": 14, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 16, "usage_type": "call"}, {"api_name": "os.path", "line_number": 16, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 18, "usage_type": "call"}, {"api_name": "os.path", "line_number": 18, "usage_type": "attribute"}, {"api_name": "numpy.ndarray", "line_number": 38, "usage_type": "call"}, {"api_name": "numpy.uint8", "line_number": 39, "usage_type": "attribute"}, {"api_name": "matplotlib.image.imread", "line_number": 43, "usage_type": "call"}, {"api_name": "matplotlib.image", "line_number": 43, "usage_type": "name"}, {"api_name": "numpy.asarray", "line_number": 51, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.imshow", "line_number": 71, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 71, "usage_type": "name"}, {"api_name": "sklearn.preprocessing.LabelEncoder", "line_number": 81, "usage_type": "call"}, {"api_name": "sklearn.preprocessing", "line_number": 81, "usage_type": "name"}, {"api_name": "sklearn.preprocessing.LabelEncoder", "line_number": 85, "usage_type": "call"}, {"api_name": "sklearn.preprocessing", "line_number": 85, "usage_type": "name"}, {"api_name": "numpy.float32", "line_number": 139, "usage_type": "attribute"}, {"api_name": "numpy.float32", "line_number": 140, "usage_type": "attribute"}, {"api_name": "numpy.newaxis", "line_number": 144, "usage_type": "attribute"}, {"api_name": "numpy.newaxis", "line_number": 145, "usage_type": "attribute"}, {"api_name": "cv2.ml.KNearest_create", "line_number": 148, "usage_type": "call"}, {"api_name": "cv2.ml", "line_number": 148, "usage_type": "attribute"}, {"api_name": "cv2.ml", "line_number": 149, "usage_type": "attribute"}, {"api_name": "numpy.count_nonzero", "line_number": 157, "usage_type": "call"}]}
+{"seq_id": "17044611304", "text": "import requests\nimport threading\nfrom lxml import etree\n# 解析网页,并得到网页中的IP代理\ndef get_proxy(html):\n selector = etree.HTML(html)\n proxies = []\n for each in selector.xpath(\"//tr[@class='odd']\"):\n ip = each.xpath(\"./td[2]/text()\")[0]\n port = each.xpath(\"./td[3]/text()\")[0]\n # 拼接IP地址,端口号6\n proxy = ip + \":\" + port\n proxies.append(proxy)\n test_proxies(proxies)\n\ndef thread_write_proxy(proxy):\n with open(\"./ip_proxy.txt\", 'a+') as f:\n f.write(proxy + '\\n')\n# 验证已得到IP的可用性\ndef thread_test_proxy(proxy):\n url = \"http://www.baidu.com/\"\n header = {\n \"User-Agent\":\"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/74.0.3724.8 Safari/537.36\",\n }\n try:\n response = requests.get(url, headers=header, proxies={\"http\": proxy}, timeout=1)\n if response.status_code == 200:\n thread_write_proxy(proxy)\n except Exception:\n pass\n# 添加线程模式\ndef test_proxies(proxies):\n proxies = proxies\n for proxy in proxies:\n test = threading.Thread(target=thread_test_proxy, args=(proxy,))\n test.start()\ndef get_html(url):\n header = {\n \"User-Agent\":\"Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/67.0.3396.99 Safari/537.36\",\n }\n\n response = requests.get(url,headers=header)\n get_proxy(response.text)\nif __name__ == \"__main__\":\n url = \"http://www.xicidaili.com/nn/\"\n for i in range(1,30):\n get_html(url+str(i))\n\n", "repo_name": "zyq914014125/spider", "sub_path": "the end test/ip_set.py", "file_name": "ip_set.py", "file_ext": "py", "file_size_in_byte": 1581, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "76", "api": [{"api_name": "lxml.etree.HTML", "line_number": 6, "usage_type": "call"}, {"api_name": "lxml.etree", "line_number": 6, "usage_type": "name"}, {"api_name": "requests.get", "line_number": 26, "usage_type": "call"}, {"api_name": "threading.Thread", "line_number": 35, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 42, "usage_type": "call"}]}
+{"seq_id": "9259649153", "text": "import argparse\nfrom console_progressbar import ProgressBar\nimport io\nimport time\n\nfrom ngram import NGramModel\nfrom ngram_zhuyin import NGramPYModel\nfrom fenci_ngram import XNGramModel\n\nparser = argparse.ArgumentParser(description='Pinyin input with N-gram.')\nparser.add_argument('-f', '--fenci', dest='fenci', action='store_true',\n help='N-gram on single character or phrase')\nparser.add_argument('-z', '--no-zhuyin', dest='zhuyin', action='store_false',\n help='To disable N-gram with zhuyin')\nparser.add_argument('-i', '--input', dest='input', type=str,\n metavar='FILE', help='Path to input pinyin file')\nparser.add_argument('-o', '--output', dest='output', type=str,\n metavar='FILE', help='Path to output file')\nparser.add_argument('-s', '--source', dest='source', type=str, default='train',\n metavar='FILEPATH', help='Path to training source file')\nparser.add_argument('-m', '--model', dest='model', type=str, default='models/n-gram',\n metavar='FILEPATH', help='Path to model files')\nparser.add_argument('-n', dest='n', default=3, type=int,\n metavar='NGRAM', help='Default as 3')\nparser.add_argument('task', type=str, default='translate',\n choices=['train', 'retrain', 'translate', 'test', 'console'],\n help='Train, translate only, test accuracy, or use console mode')\n\ndef check_result(output: list, truth: list) -> float:\n correct_sentence_cnt = 0\n word_cnt = 0\n correct_word_cnt = 0\n for o, t in zip(output, truth):\n if o.strip() == t.strip():\n correct_sentence_cnt += 1\n word_cnt += len(o)\n for i in range(len(o.strip())):\n if o[i] == t[i]:\n correct_word_cnt += 1\n return (correct_sentence_cnt / len(output), correct_word_cnt / word_cnt)\n\nif __name__ == '__main__':\n args = parser.parse_args()\n\n if args.fenci:\n model = XNGramModel(\n n=args.n,\n table_path='pinyin_table',\n file_path=args.source,\n model_path=args.model,\n zhuyin=args.zhuyin)\n else:\n if args.zhuyin:\n model = NGramPYModel(\n n=args.n,\n table_path='pinyin_table',\n file_path=args.source,\n model_path=args.model)\n else:\n model = NGramModel(\n n=args.n,\n table_path='pinyin_table',\n file_path=args.source,\n model_path=args.model)\n\n if args.task == 'train':\n model.train([args.n-1])\n elif args.task == 'retrain':\n model.train(range(args.n))\n elif args.task == 'translate':\n if args.input is None:\n print('[Error] Missing input file.')\n exit(-1)\n model.load_model()\n lines = io.open(args.input, mode='r', encoding='utf-8').readlines()\n pb = ProgressBar(len(lines), length=50, prefix='Translating')\n result = []\n for i, l in enumerate(lines):\n result.append(model.translate(l))\n pb.print_progress_bar(i+1)\n print()\n print(\"[Info] Translated %d lines.\" % len(result))\n if args.output is None:\n for l in result:\n print(l)\n else:\n output = io.open(args.output, mode='w', encoding='utf-8')\n for l in result:\n output.write(l + '\\n')\n print(\"[Info] Results saved to \", args.output)\n elif args.task == 'test':\n if args.input is None:\n print('[Error] Missing input file.')\n exit(-1)\n model.load_model()\n lines = io.open(args.input, mode='r', encoding='utf-8').readlines()\n pb = ProgressBar(len(lines) / 2, length=50, prefix='Translating')\n result = []\n for i, l in enumerate(lines[0::2]):\n result.append(model.translate(l))\n pb.print_progress_bar(i+1)\n print()\n if args.output is not None:\n output = io.open(args.output, mode='w', encoding='utf-8')\n for l in result:\n output.write(l + '\\n')\n print(\"[Info] Results saved to \", args.output)\n accuracy = check_result(result, lines[1::2])\n print('[Info] Generated %d lines, with accuracy =' % len(result), accuracy)\n elif args.task == 'console':\n model.load_model()\n print(\"[Info] Entering console mode. Use Ctrl-C/D to exit.\")\n while True:\n in_s = input(\">> Input: \")\n time_d = time.time()\n result = model.translate(in_s)\n time_d = round(time.time()-time_d, 5)\n print(result)\n print(\"Used %fs\" % time_d)\n", "repo_name": "baocvcv/intro-to-ai", "sub_path": "a1-pinyin/src/pinyin.py", "file_name": "pinyin.py", "file_ext": "py", "file_size_in_byte": 4746, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "76", "api": [{"api_name": "argparse.ArgumentParser", "line_number": 10, "usage_type": "call"}, {"api_name": "fenci_ngram.XNGramModel", "line_number": 46, "usage_type": "call"}, {"api_name": "ngram_zhuyin.NGramPYModel", "line_number": 54, "usage_type": "call"}, {"api_name": "ngram.NGramModel", "line_number": 60, "usage_type": "call"}, {"api_name": "io.open", "line_number": 75, "usage_type": "call"}, {"api_name": "console_progressbar.ProgressBar", "line_number": 76, "usage_type": "call"}, {"api_name": "io.open", "line_number": 87, "usage_type": "call"}, {"api_name": "io.open", "line_number": 96, "usage_type": "call"}, {"api_name": "console_progressbar.ProgressBar", "line_number": 97, "usage_type": "call"}, {"api_name": "io.open", "line_number": 104, "usage_type": "call"}, {"api_name": "time.time", "line_number": 115, "usage_type": "call"}, {"api_name": "time.time", "line_number": 117, "usage_type": "call"}]}
+{"seq_id": "15449005526", "text": "import datetime\nimport re\n\nimport numpy as np\nfrom aocd.models import Puzzle\n\nYEAR = datetime.datetime.today().year\nDAY = datetime.datetime.today().day\n\npuzzle = Puzzle(year=YEAR, day=DAY)\n\n\n# Part a\ndef a(data):\n print(data)\n breakpoint()\n\n\nexample_answer = a(puzzle.example_data)\nprint(example_answer)\nassert example_answer == ...\nanswer = a(puzzle.input_data)\nprint(\"a:\", answer)\npuzzle.answer_a = answer\n\n\n# Part b\ndef b(data):\n exit()\n\n\nexample_answer = b(puzzle.example_data)\nprint(example_answer)\nassert example_answer == ...\nanswer = b(puzzle.input_data)\nprint(\"b:\", answer)\npuzzle.answer_b = answer\n", "repo_name": "SimonSegerblomRex/aoc", "sub_path": "template.py", "file_name": "template.py", "file_ext": "py", "file_size_in_byte": 617, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "76", "api": [{"api_name": "datetime.datetime.today", "line_number": 7, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 7, "usage_type": "attribute"}, {"api_name": "datetime.datetime.today", "line_number": 8, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 8, "usage_type": "attribute"}, {"api_name": "aocd.models.Puzzle", "line_number": 10, "usage_type": "call"}]}
+{"seq_id": "71443662326", "text": "# -*- coding: utf-8 -*-\n\nfrom sklearn import neighbors\nfrom numpy import genfromtxt, savetxt\n\n# DIR = '/home/elder/projetos/kaggle/digit/'\nDIR = '/home/projects/github/kaggle/'\ndados = genfromtxt(open(DIR+'train.csv', 'r'), delimiter=',')[1:]\n\nlabels = [i[0] for i in dados]\ntreino = [i[1:] for i in dados]\n\nteste = genfromtxt(open(DIR+'test.csv', 'r'), delimiter=',')[1:]\n\nknn = neighbors.KNeighborsClassifier(n_jobs=-1)\nknn.fit(treino,labels)\n\n# rforest = RandomForestClassifier(n_estimators=300, n_jobs=-1)\n# rforest.fit(treino, labels)\n\nsavetxt(DIR+'outputknn1.csv', knn.predict(teste), delimiter=',', fmt='%d')\n", "repo_name": "elderbeserra/digits", "sub_path": "knn_teste.py", "file_name": "knn_teste.py", "file_ext": "py", "file_size_in_byte": 616, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "76", "api": [{"api_name": "numpy.genfromtxt", "line_number": 8, "usage_type": "call"}, {"api_name": "numpy.genfromtxt", "line_number": 13, "usage_type": "call"}, {"api_name": "sklearn.neighbors.KNeighborsClassifier", "line_number": 15, "usage_type": "call"}, {"api_name": "sklearn.neighbors", "line_number": 15, "usage_type": "name"}, {"api_name": "numpy.savetxt", "line_number": 21, "usage_type": "call"}]}
+{"seq_id": "33523769191", "text": "import os\nimport sys\nimport mmap\nimport imp\nfrom tkinter import *\nfrom tkinter import filedialog\nfrom tkinter import messagebox\nimport xlrd\nfrom xml.sax.saxutils import quoteattr as xml_quoteattr\n\ndef stage():\n #Initilize window\n gui = Tk()\n gui.geometry('350x200+500+300')\n gui.title('Import Library')\n\n def load_dir(): #Directory Selection Location\n global sel_dir\n sel_dir = filedialog.askdirectory()\n dir_title = Label(gui,text = \"Selected Directory : \" ).grid(row = 1, column = 0, sticky = W)\n chosen_dir = Label(gui,text = sel_dir ).grid(row = 1, column = 1, columnspan = 3, sticky = W)\n return\n\n def load_excel(): #Excel Selection Location\n global sel_excel\n sel_excel = filedialog.askopenfile()\n excel_title = Label(gui,text = \"Selected Excel File : \" ).grid(row = 3, column = 0, sticky = W)\n chosen_excel = Label(gui,text = sel_excel.name ).grid(row = 3, column = 1, columnspan = 3, sticky = W)\n return\n\n def opt_set():\n g_img = c_img.get().lower()\n g_title = c_title.get().lower()\n g_desc = c_desc.get().lower()\n g_collection = c_collection.get().lower()\n g_subcollection = c_subcollection.get().lower()\n\n global ex_opt\n ex_opt = [g_img, g_title, g_desc, g_collection, g_subcollection]\n\n if ex_opt[0]==\"\":\n messagebox.showwarning(title = \"Error\", message = \"You must select a column for at least the file names\")\n\n try:\n sel_dir\n sel_excel\n\n except NameError:\n messagebox.showwarning(title = \"Error\", message = \"You must choose a directory , excel sheet, and at least the File Names!\")\n else:\n for i in range (0,5):\n if ex_opt[i]==\"\":\n ex_opt[i] = -1\n else:\n ex_opt[i] = ord(ex_opt[i])-97\n parse()\n gui.quit()\n return\n\n #Choose image library directory\n labelDir = Label(gui,text =\"Find Image Directory\").grid(row = 0, column = 0, sticky = W)\n buttonDir = Button(gui, text =\"Browse\", command = load_dir).grid(row = 0, column = 1, sticky = W)\n\n #Choose Excel sheet\n labelExcel = Label(gui,text =\"Find Excel File\").grid(row = 2, column = 0, sticky = W)\n buttonExcel = Button(gui, text =\"Browse\", command = load_excel).grid(row = 2, column = 1, sticky = W)\n\n #Excel Column Options\n labelMatch = Label(gui,text = \"Match excel column letters to desired image attributes.\").grid(row = 4, column = 0, columnspan = 4, sticky = W)\n\n d_img = Label(gui,text = \"File names\").grid(row = 5, column = 0, sticky = W)\n d_title = Label(gui,text = \"Titles\").grid(row = 5, column = 1, sticky = W)\n d_desc = Label(gui,text = \"Descriptions\").grid(row = 7, column = 0, sticky = W)\n d_collection = Label(gui,text = \"Collections\").grid(row = 7, column = 1, sticky = W)\n d_subcollection = Label(gui,text = \"Subcollection\").grid(row = 9, column = 0, sticky = W)\n\n c_img = StringVar()\n c_title = StringVar()\n c_desc = StringVar()\n c_collection = StringVar()\n c_subcollection = StringVar()\n\n e_img = Entry(gui, textvariable = c_img).grid(row = 6, column = 0, sticky = W)\n e_title = Entry(gui, textvariable = c_title).grid(row = 6, column = 1, sticky = W)\n e_desc = Entry(gui, textvariable = c_desc).grid(row = 8, column = 0, sticky = W)\n e_collection = Entry(gui, textvariable = c_collection).grid(row = 8, column = 1, sticky = W)\n e_subcollection = Entry(gui, textvariable = c_subcollection).grid(row = 10, column = 0, sticky = W)\n\n buttonDone = Button(gui, text = \"Generate CML File\", command = opt_set).grid(row =14, column = 0)\n mainloop()\n return 1\n\ndef parse():\n #Open excel file and sets variable sh to the first worksheet\n wb=xlrd.open_workbook(sel_excel.name)\n sh = wb.sheet_by_index(0)\n\n\n #Stores the data from colums of the selected row\n def find_info(row):\n img=[]\n\n for i in range (1,5):\n if ex_opt[i] < 0:\n img.append(\"\")\n else:\n try:\n img.append(sh.cell(rowx=row, colx=ex_opt[i]).value)\n except UnicodeEncodeError:\n img_app = img.append(sh.cell(rowx=row, colx=ex_opt[i]).value)\n img_app.encode('ascii','xmlcharrefreplace')\n\n info_list = [img[0], img[1], img[2], img[3]]\n return info_list\n\n #Loops through excel sheet rows and generates data content for each row.\n def gencon(path):\n\t\t#Determines how many rows there are\n column = len(sh.col_values(ex_opt[0]))\n sourceId = 0\n result=\"\"\n\n for row in range(column):\n #Increments SourceID\n sourceId = sourceId + 1\n result += '
\\n%d\\n' % sourceId\n\n #Appends Picture name to Directory Path\n cell_value = sh.cell(rowx=row,colx=ex_opt[0]).value\n localPath = path+'/'+cell_value\n\n #Insurance Parantheses are correct\n parenSwitch = localPath.replace('\\\\','/')\n info_list = find_info(row)\n\n result += '\t%s\\n' % (parenSwitch[cleanPath:])\n result += '''\\\n\t\t%s\n\t\t%s\n\t\t%s\n\t\t%s\n\t\\n''' % (info_list[0],info_list[1],info_list[2],info_list[3])\n result += '\\n'\n return result\n\n\n #Number of characters to delete for path to start at local dir (location of the script).\n def refinePath(path):\n fullPath = path\n startPath = os.path.basename(path)\n changeNum = len(fullPath) - len(startPath)\n #localPath = fullPath[changeNum:]\n return changeNum\n\n\n #Default settings for the collection Viewer\n end = '''\\\n\n!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!>\n\n '''\n outfile = open('../ImageViewer_Config_template.cml','w')\n global cleanPath\n cleanPath = refinePath(os.getcwd())\n print ('Creating XML Template...')\n print ('\\n\\n \\n' + gencon(sel_dir) + end, file = outfile)\n print ('\\nDone!')\n return\n\nif __name__ == '__main__':\n stage()\n\n\n\n", "repo_name": "pdbeard/cv_scripts", "sub_path": "ImageViewer3.0.py", "file_name": "ImageViewer3.0.py", "file_ext": "py", "file_size_in_byte": 6576, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "76", "api": [{"api_name": "tkinter.filedialog.askdirectory", "line_number": 19, "usage_type": "call"}, {"api_name": "tkinter.filedialog", "line_number": 19, "usage_type": "name"}, {"api_name": "tkinter.filedialog.askopenfile", "line_number": 26, "usage_type": "call"}, {"api_name": "tkinter.filedialog", "line_number": 26, "usage_type": "name"}, {"api_name": "tkinter.messagebox.showwarning", "line_number": 42, "usage_type": "call"}, {"api_name": "tkinter.messagebox", "line_number": 42, "usage_type": "name"}, {"api_name": "tkinter.messagebox.showwarning", "line_number": 49, "usage_type": "call"}, {"api_name": "tkinter.messagebox", "line_number": 49, "usage_type": "name"}, {"api_name": "xlrd.open_workbook", "line_number": 95, "usage_type": "call"}, {"api_name": "os.path.basename", "line_number": 150, "usage_type": "call"}, {"api_name": "os.path", "line_number": 150, "usage_type": "attribute"}, {"api_name": "os.getcwd", "line_number": 164, "usage_type": "call"}]}
+{"seq_id": "7801534653", "text": "import os, sys, errno, time, tempfile\n\nMAX_RETRIES = 5\nRETRY_DELAY = 0.5 # seconds\n\n##### ProcessLock (context manager) class to implement a file based process lock with retries and logging.\nclass ProcessLock:\n\n def __init__(self, lockfile_name, logger):\n self.lockfile_path = os.path.join(tempfile.gettempdir(), lockfile_name)\n self.logger = logger\n self.lock_file = None\n\n def __enter__(self):\n self.logger.debug(f'ProcessLock attempting to acquire lock file: {self.lockfile_path}')\n for i in range(MAX_RETRIES):\n attempt = i + 1\n try:\n self.lock_file = open(self.lockfile_path, 'x')\n self.logger.debug(f'ProcessLock lock file acquired: {self.lockfile_path}')\n break\n except OSError as e:\n if e.errno != errno.EEXIST:\n self.logger.debug(f'Unexpected error in attempt {attempt} of {MAX_RETRIES} to acquire lock file: {e}')\n raise\n self.logger.debug(f'ProcessLock failed to acquire lock file on attempt: {attempt} of {MAX_RETRIES}')\n if attempt == MAX_RETRIES:\n print('Unable to acquire lock, process is already running.')\n print(f'Lock file, {self.lockfile_path}, already exists, exiting.')\n self.logger.debug(f'Exiting script! Lock file already exists: {self.lockfile_path}')\n self.logger.close('Closing logger instance from ProcessLock before sys.exit().')\n sys.exit(1)\n time.sleep(RETRY_DELAY) \n return self\n\n def __exit__(self, exc_type, exc_value, traceback):\n self.release()\n if exc_type is not None:\n if issubclass(exc_type, SystemExit) and exc_value.code == 0:\n self.logger.debug('ProcessLock exited context successfully.')\n else:\n self.logger.debug(f'In ProcessLock __exit__ method an exception of type {exc_type} occurred with value {exc_value}')\n return False # If True, suppresses any exception that occurred\n\n def release(self):\n if self.lock_file:\n try:\n self.lock_file.close()\n os.unlink(self.lockfile_path)\n self.lock_file = None\n self.logger.debug(f'ProcessLock lock file released: {self.lockfile_path}')\n except OSError as e:\n print(f'Unexpected error releasing lock file, {self.lockfile_path}: {e}')\n", "repo_name": "7alpha3/SQLite_WSS_Data", "sub_path": "process_lock.py", "file_name": "process_lock.py", "file_ext": "py", "file_size_in_byte": 2530, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "76", "api": [{"api_name": "os.path.join", "line_number": 10, "usage_type": "call"}, {"api_name": "os.path", "line_number": 10, "usage_type": "attribute"}, {"api_name": "tempfile.gettempdir", "line_number": 10, "usage_type": "call"}, {"api_name": "errno.EEXIST", "line_number": 23, "usage_type": "attribute"}, {"api_name": "sys.exit", "line_number": 32, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 33, "usage_type": "call"}, {"api_name": "os.unlink", "line_number": 49, "usage_type": "call"}]}
+{"seq_id": "3888518083", "text": "import re\nfrom nltk.corpus import stopwords\n\n# identify which sentences may have a company in it\ndef identify_potential_sentence(sentence):\n matches = re.findall(r\"(?:(?:[A-Z]+[a-z]*) ?)+\", sentence)\n if matches:\n return matches\n else:\n return False\n\n# removes the stop words from a found match\ndef remove_stop_words(word):\n stop_words = set(stopwords.words('english'))\n cleaned_word = ' '.join([x for x in word.split(' ') if x.lower() not in stop_words]).rstrip()\n return cleaned_word\n\ndef find_potential_companies(list_of_sentences):\n potential_matches = []\n for sentence_index, sentence in enumerate(list_of_sentences):\n matches = identify_potential_sentence(sentence) \n if matches:\n # filter out the matches with annoying APBloomberg or AP stuff in it\n cleaned_matches = [remove_stop_words(x) for x in matches]\n remove_empty_words = [x for x in cleaned_matches if x != '']\n for match in remove_empty_words:\n potential_matches.append(match)\n return potential_matches", "repo_name": "mattwparas/iems308qasystem", "sub_path": "find_companies.py", "file_name": "find_companies.py", "file_ext": "py", "file_size_in_byte": 1083, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "76", "api": [{"api_name": "re.findall", "line_number": 6, "usage_type": "call"}, {"api_name": "nltk.corpus.stopwords.words", "line_number": 14, "usage_type": "call"}, {"api_name": "nltk.corpus.stopwords", "line_number": 14, "usage_type": "name"}]}
+{"seq_id": "11212911881", "text": "import argparse\nfrom libs.phonebooter import PhoneBooter\n\n\ndef main(targetNum, threads, bootLength, wav):\n\n booter = PhoneBooter()\n booter.launch (targetNum, threads, bootLength, wav)\n\n\nif __name__ == '__main__':\n banner = \"\"\"\n\n _____ _ ____ _ \n | __ \\| | | _ \\ | | \n | |__) | |__ ___ _ __ ___| |_) | ___ ___ | |_ ___ _ __ \n | ___/| '_ \\ / _ \\| '_ \\ / _ \\ _ < / _ \\ / _ \\| __/ _ \\ '__|\n | | | | | | (_) | | | | __/ |_) | (_) | (_) | || __/ | \n |_| |_| |_|\\___/|_| |_|\\___|____/ \\___/ \\___/ \\__\\___|_| \n\n\n FUCKING MICROSOFT SUPPORT SCAMMING PIECES OF SHIT!\n ex. usage: phonebooter.py -p -l 600 -s ducktales\n \"\"\"\n print(banner)\n parser = argparse.ArgumentParser(description='PhoneBooter CLI')\n parser.add_argument('-p', '--phonenumber', action='store', dest='targetNum', required=True,\n help='Specify the target phone number to attack. Example: 18001234567')\n parser.add_argument('-l', '--length', action='store', dest='bootLength', required=True,\n help='Length of time in seconds to run the phone')\n parser.add_argument('-s', '--sound', action='store', dest='wav',\n help='Specify the *.ulaw file to play. Store it under /usr/share/asterisk/sounds. '\n 'Do not include the extension', default='hello-world')\n parser.add_argument('-t', '--threads', action='store', dest='threads',\n help='Number of async processes to kick off. Default is 2. '\n '2 is sufficient for cellular devices in most cases.', default=2)\n args = parser.parse_args()\n\n if args.wav is not None:\n main(args.targetNum, int(args.threads), int(args.bootLength), args.wav)\n\n else:\n main(args.targetNum, int(args.threads), int(args.bootLength), 'hello-world')\n", "repo_name": "BraveLittleRoaster/PhoneBooter", "sub_path": "booter-cli.py", "file_name": "booter-cli.py", "file_ext": "py", "file_size_in_byte": 1947, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "76", "api": [{"api_name": "libs.phonebooter.PhoneBooter", "line_number": 7, "usage_type": "call"}, {"api_name": "argparse.ArgumentParser", "line_number": 26, "usage_type": "call"}]}
+{"seq_id": "34963871383", "text": "from email import message\nfrom turtle import title\n\nfirst_name = \"anger\"\nlast_name = \"liu\"\n# 在字符串中插入变量\n# f 表示 format ,用 f 格式化的字符串叫做 f-字符串\n# python 格式化变量值为 String 的格式 f\"{varialble}\"\nfull_name = f\"{first_name} {last_name}\"\nprint(full_name)\n# 可以在 f 字符串使用任意字符串拼接变量\n# full_name.title() 将变量调用title() 方法的值转换成字符串进行拼接\nprint(f\"Hello,{full_name.title()} !\")\n# 将 f 字符串赋值给变量\nmessage = f\"this is a message\".title(), {full_name}\nprint(message)\n", "repo_name": "elevenanger/python-course", "sub_path": "variables_and_simple_data_types/strings/full_name.py", "file_name": "full_name.py", "file_ext": "py", "file_size_in_byte": 589, "program_lang": "python", "lang": "zh", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "76", "api": [{"api_name": "email.message", "line_number": 15, "usage_type": "name"}, {"api_name": "email.message", "line_number": 16, "usage_type": "argument"}]}
+{"seq_id": "19271742927", "text": "import datetime\nfrom pyexpat import model\nfrom fastapi import FastAPI, HTTPException, Depends\nfrom pydantic import BaseModel, Field\nimport models\nfrom database import engine, SessionLocal\nfrom sqlalchemy.orm import Session\nfrom typing import Union\nfrom functools import wraps\nimport requests\n\n\napp = FastAPI()\nemployee_api_endpoint = \"http://127.0.0.1:8090/employees/\"\n\nmodels.Base.metadata.create_all(bind=engine)\n\n\ndef get_db():\n try:\n db = SessionLocal()\n yield db\n finally:\n db.close()\n\n\ndef check_employee(func):\n @wraps(func)\n async def wrapper(*args, **kwargs):\n employee_id = kwargs[\"employee_id\"]\n r = requests.get(employee_api_endpoint + str(employee_id), timeout=5)\n if r.status_code == 404:\n raise HTTPException(\n status_code=404, detail=f\"Employee ID {employee_id} : Does not exist\"\n )\n return await func(*args, **kwargs)\n\n return wrapper\n\n\nclass TimeSheet(BaseModel):\n employe_id: int = Field()\n date: Union[datetime.date] = Field()\n hours_worked: int = Field()\n\n\n@app.get(\"/timesheet/{employee_id}\")\n@check_employee\nasync def list_timesheet_records(employee_id: int, db: Session = Depends(get_db)):\n return (\n db.query(models.TimeSheet)\n .filter(models.TimeSheet.employee_id == employee_id)\n .values(\n models.TimeSheet.employee_id,\n models.TimeSheet.date,\n models.TimeSheet.hours_worked,\n )\n )\n\n\n@app.post(\"/timesheet\")\ndef create_timesheet(timesheet: TimeSheet, db: Session = Depends(get_db)):\n\n tm_model = models.TimeSheet()\n tm_model.employee_id = timesheet.employe_id\n tm_model.date = timesheet.date\n tm_model.hours_worked = timesheet.hours_worked\n\n db.add(tm_model)\n db.commit()\n\n return timesheet\n\n\n@app.get(\"/timesheet/{employee_id}/{date}\")\n@check_employee\nasync def get_timesheet_record(\n employee_id: int, date: datetime.date, db: Session = Depends(get_db)\n):\n print(date)\n return (\n db.query(models.TimeSheet)\n .filter(\n models.TimeSheet.employee_id == employee_id, models.TimeSheet.date == date\n )\n .values(\n models.TimeSheet.employee_id,\n models.TimeSheet.date,\n models.TimeSheet.hours_worked,\n )\n )\n", "repo_name": "Ansh111222/timesheet_api", "sub_path": "timesheet.py", "file_name": "timesheet.py", "file_ext": "py", "file_size_in_byte": 2317, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "76", "api": [{"api_name": "fastapi.FastAPI", "line_number": 13, "usage_type": "call"}, {"api_name": "models.Base.metadata.create_all", "line_number": 16, "usage_type": "call"}, {"api_name": "models.Base", "line_number": 16, "usage_type": "attribute"}, {"api_name": "database.engine", "line_number": 16, "usage_type": "name"}, {"api_name": "database.SessionLocal", "line_number": 21, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 31, "usage_type": "call"}, {"api_name": "fastapi.HTTPException", "line_number": 33, "usage_type": "call"}, {"api_name": "functools.wraps", "line_number": 28, "usage_type": "call"}, {"api_name": "pydantic.BaseModel", "line_number": 41, "usage_type": "name"}, {"api_name": "pydantic.Field", "line_number": 42, "usage_type": "call"}, {"api_name": "typing.Union", "line_number": 43, "usage_type": "name"}, {"api_name": "datetime.date", "line_number": 43, "usage_type": "attribute"}, {"api_name": "pydantic.Field", "line_number": 43, "usage_type": "call"}, {"api_name": "pydantic.Field", "line_number": 44, "usage_type": "call"}, {"api_name": "sqlalchemy.orm.Session", "line_number": 49, "usage_type": "name"}, {"api_name": "fastapi.Depends", "line_number": 49, "usage_type": "call"}, {"api_name": "models.TimeSheet", "line_number": 51, "usage_type": "attribute"}, {"api_name": "models.TimeSheet", "line_number": 52, "usage_type": "attribute"}, {"api_name": "models.TimeSheet", "line_number": 54, "usage_type": "attribute"}, {"api_name": "models.TimeSheet", "line_number": 55, "usage_type": "attribute"}, {"api_name": "models.TimeSheet", "line_number": 56, "usage_type": "attribute"}, {"api_name": "sqlalchemy.orm.Session", "line_number": 62, "usage_type": "name"}, {"api_name": "fastapi.Depends", "line_number": 62, "usage_type": "call"}, {"api_name": "models.TimeSheet", "line_number": 64, "usage_type": "call"}, {"api_name": "datetime.date", "line_number": 78, "usage_type": "attribute"}, {"api_name": "sqlalchemy.orm.Session", "line_number": 78, "usage_type": "name"}, {"api_name": "fastapi.Depends", "line_number": 78, "usage_type": "call"}, {"api_name": "models.TimeSheet", "line_number": 82, "usage_type": "attribute"}, {"api_name": "models.TimeSheet", "line_number": 84, "usage_type": "attribute"}, {"api_name": "models.TimeSheet", "line_number": 87, "usage_type": "attribute"}, {"api_name": "models.TimeSheet", "line_number": 88, "usage_type": "attribute"}, {"api_name": "models.TimeSheet", "line_number": 89, "usage_type": "attribute"}]}
+{"seq_id": "18472204991", "text": "import numpy as n\r\nfrom scipy.integrate import simps\r\ndef FDHFQ(Q0prime,Actual_Multiplication_Factor,Burnup,Q0primedangerous,Le):\r\n Qprime = n.zeros(401)\r\n Qprimed = n.zeros(401)\r\n avgQprime = 0\r\n Len = n.linspace(0,Le,401)\r\n for v in range(401):\r\n Qprime[v] = Q0prime*Actual_Multiplication_Factor[v]\r\n avgQprime += Qprime[v]\r\n Qprimed[v] = Q0primedangerous*Actual_Multiplication_Factor[v]\r\n G = n.polyfit(Len[:],Qprime[:],2)\r\n H = n.polyfit(Len[:],Qprimed[:],2)\r\n AverageQprime = avgQprime / (v+1)\r\n FQ = Q0prime/AverageQprime\r\n FQdan = Q0primedangerous/AverageQprime\r\n UFr = 2.6/FQ #Uncertanty factor to make FQ the same as a nominal AP1000 reactor\r\n UFdr = 2.6/FQdan\r\n A = simps(G)\r\n B = simps(H)\r\n FDH = B/A\r\n return FQ,FQdan,FDH,G,UFr,UFdr,FQ,FQdan", "repo_name": "notxesnayr/sub-channel", "sub_path": "FDHFQ.py", "file_name": "FDHFQ.py", "file_ext": "py", "file_size_in_byte": 822, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "76", "api": [{"api_name": "numpy.zeros", "line_number": 4, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 5, "usage_type": "call"}, {"api_name": "numpy.linspace", "line_number": 7, "usage_type": "call"}, {"api_name": "numpy.polyfit", "line_number": 12, "usage_type": "call"}, {"api_name": "numpy.polyfit", "line_number": 13, "usage_type": "call"}, {"api_name": "scipy.integrate.simps", "line_number": 19, "usage_type": "call"}, {"api_name": "scipy.integrate.simps", "line_number": 20, "usage_type": "call"}]}
+{"seq_id": "21383971355", "text": "from scrapy.utils.project import get_project_settings\nfrom scrapy.crawler import CrawlerProcess\n\n\n# 执行多个爬虫\nclass Runner(object):\n @classmethod\n def run(self):\n setting = get_project_settings()\n process = CrawlerProcess(setting)\n didntWorkSpider = []\n\n for spider_name in process.spiders.list():\n if spider_name in didntWorkSpider:\n continue\n print(\"Running spider %s\" % (spider_name))\n process.crawl(spider_name)\n process.start()\n\n\nif __name__ == '__main__':\n Runner().run()\n", "repo_name": "zonectmac/amazonspider", "sub_path": "AmazonScrapy/AmazonScrapy/run.py", "file_name": "run.py", "file_ext": "py", "file_size_in_byte": 579, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "76", "api": [{"api_name": "scrapy.utils.project.get_project_settings", "line_number": 9, "usage_type": "call"}, {"api_name": "scrapy.crawler.CrawlerProcess", "line_number": 10, "usage_type": "call"}]}
+{"seq_id": "24649281718", "text": "import argparse\nimport os\nimport pprint\nimport shutil\nimport sys\nimport random\nimport logging\nimport time\nimport timeit\nfrom pathlib import Path\nimport time\nimport numpy as np\n\nimport torch\nimport torch.nn as nn\nfrom pp_liteseg import PPLiteSeg\nimport cv2\nimport torch.nn.functional as F\nimport datasets\n\n\n\ndef parse_args():\n parser = argparse.ArgumentParser(description='Train segmentation network')\n\n parser.add_argument('--image',\n help='test image path',\n default=\"mainz_000001_009328_leftImg8bit.png\",\n type=str)\n parser.add_argument('--weights',\n help='cityscape pretrained weights',\n default=\"ppliteset_pp2torch_cityscape_pretrained.pth\",\n type=str)\n parser.add_argument('opts',\n help=\"Modify config options using the command-line\",\n default=None,\n nargs=argparse.REMAINDER)\n\n args = parser.parse_args()\n\n return args\n\n\ndef colorEncode(labelmap, colors, mode='RGB'):\n labelmap = labelmap.astype('int')\n labelmap_rgb = np.zeros((labelmap.shape[0], labelmap.shape[1], 3),\n dtype=np.uint8)\n for label in np.unique(labelmap):\n if label < 0:\n continue\n labelmap_rgb = labelmap_rgb + (labelmap == label)[:, :, np.newaxis] * \\\n np.tile(colors[label],\n (labelmap.shape[0], labelmap.shape[1], 1))\n\n if mode == 'BGR':\n return labelmap_rgb[:, :, ::-1]\n else:\n return labelmap_rgb\n\n\ndef main():\n base_size = 512\n wh = 2\n mean = [0.5, 0.5, 0.5],\n std = [0.5, 0.5, 0.5]\n args = parse_args()\n\n model = PPLiteSeg()\n\n model.eval()\n\n print(\"ppliteseg:\", model)\n ckpt = torch.load(args.weights)\n model = model.cuda()\n if 'state_dict' in ckpt:\n model.load_state_dict(ckpt['state_dict'])\n else:\n model.load_state_dict(ckpt)\n\n img = cv2.imread(args.image)\n imgor = img.copy()\n img = cv2.resize(img, (wh * base_size, base_size))\n image = img.astype(np.float32)[:, :, ::-1]\n image = image / 255.0\n image -= mean\n image /= std\n\n image = image.transpose((2, 0, 1))\n image = torch.from_numpy(image)\n\n # image = image.permute((2, 0, 1))\n\n image = image.unsqueeze(0)\n image = image.cuda()\n start = time.time()\n out = model(image)\n end = time.time()\n print(\"infer time:\", end - start, \" s\")\n out = out[0].squeeze(dim=0)\n outadd = F.softmax(out, dim=0)\n outadd = torch.argmax(outadd, dim=0)\n predadd = outadd.detach().cpu().numpy()\n pred = np.int32(predadd)\n colors = np.random.randint(0, 255, 19 * 3)\n colors = np.reshape(colors, (19, 3))\n # colorize prediction\n pred_color = colorEncode(pred, colors).astype(np.uint8)\n pred_color = cv2.resize(pred_color,(imgor.shape[1],imgor.shape[0]))\n\n im_vis = cv2.addWeighted(imgor, 0.7, pred_color, 0.3, 0)\n cv2.imwrite(\"results.jpg\", im_vis)\n\n\nif __name__ == '__main__':\n main()\n", "repo_name": "midasklr/PPLiteSeg.pytorch", "sub_path": "demo.py", "file_name": "demo.py", "file_ext": "py", "file_size_in_byte": 3093, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 16, "dataset": "github-code", "pt": "76", "api": [{"api_name": "argparse.ArgumentParser", "line_number": 24, "usage_type": "call"}, {"api_name": "argparse.REMAINDER", "line_number": 37, "usage_type": "attribute"}, {"api_name": "numpy.zeros", "line_number": 46, "usage_type": "call"}, {"api_name": "numpy.uint8", "line_number": 47, "usage_type": "attribute"}, {"api_name": "numpy.unique", "line_number": 48, "usage_type": "call"}, {"api_name": "numpy.newaxis", "line_number": 51, "usage_type": "attribute"}, {"api_name": "numpy.tile", "line_number": 52, "usage_type": "call"}, {"api_name": "pp_liteseg.PPLiteSeg", "line_number": 68, "usage_type": "call"}, {"api_name": "torch.load", "line_number": 73, "usage_type": "call"}, {"api_name": "cv2.imread", "line_number": 80, "usage_type": "call"}, {"api_name": "cv2.resize", "line_number": 82, "usage_type": "call"}, {"api_name": "numpy.float32", "line_number": 83, "usage_type": "attribute"}, {"api_name": "torch.from_numpy", "line_number": 89, "usage_type": "call"}, {"api_name": "time.time", "line_number": 95, "usage_type": "call"}, {"api_name": "time.time", "line_number": 97, "usage_type": "call"}, {"api_name": "torch.nn.functional.softmax", "line_number": 100, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 100, "usage_type": "name"}, {"api_name": "torch.argmax", "line_number": 101, "usage_type": "call"}, {"api_name": "numpy.int32", "line_number": 103, "usage_type": "call"}, {"api_name": "numpy.random.randint", "line_number": 104, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 104, "usage_type": "attribute"}, {"api_name": "numpy.reshape", "line_number": 105, "usage_type": "call"}, {"api_name": "numpy.uint8", "line_number": 107, "usage_type": "attribute"}, {"api_name": "cv2.resize", "line_number": 108, "usage_type": "call"}, {"api_name": "cv2.addWeighted", "line_number": 110, "usage_type": "call"}, {"api_name": "cv2.imwrite", "line_number": 111, "usage_type": "call"}]}
+{"seq_id": "72021419124", "text": "# This Python 3 environment comes with many helpful analytics libraries installed\n\n# It is defined by the kaggle/python docker image: https://github.com/kaggle/docker-python\n\n# For example, here's several helpful packages to load in \n\n\n\nimport numpy as np # linear algebra\n\nimport pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)\n\n\n\n# Input data files are available in the \"../input/\" directory.\n\n# For example, running this (by clicking run or pressing Shift+Enter) will list the files in the input directory\n\n\n\nfrom subprocess import check_output\n\nprint(check_output([\"ls\", \"../input\"]).decode(\"utf8\"))\n\n\n\n# Any results you write to the current directory are saved as output.\nimport kagglegym\n\nimport numpy as np\n\nimport pandas as pd\n\nimport tensorflow as tf\n\nimport matplotlib.pyplot as plt\n\nfrom sklearn import preprocessing as pp\n\n\n\nenv = kagglegym.make()\n\no = env.reset()\ncol = ['technical_20']\n\ntrain = o.train[col + ['id', 'timestamp', 'y']].copy(deep=True)\n\n\n\nim = pp.Imputer(strategy='median')\n\ntrain[col] = im.fit_transform(train[col])\n\nsX = pp.StandardScaler()\n\ntrain[col] = sX.fit_transform(train[col])\n\ntrain['b'] = 1\n\n\n\ny_min = train.y.min()\n\ny_max = train.y.max()\n\n\n\ndf_id = train[['id', 'timestamp']].groupby('id').agg([np.min])\n\ndf_id.reset_index(level=0, inplace=True)\n\ntrain = pd.merge(train, df_id, on='id', how='inner')\n\ntrain = train.rename(columns={train.columns[len(train.columns)-1]: 'min_ts'})\n\ntrain = train.loc[(train.min_ts > 1) & (train.yy_min)].copy(deep=True)\n\n\n\n\n\nfeatures = ['b']+col\n\nn = len(features)\n\n\n\nlearning_rate = 0.01\n\ntraining_epochs = 1000\n\ncost_history = np.empty(shape=[1],dtype=float)\n\n\n\nX = tf.placeholder(tf.float32,[None,n])\n\nY = tf.placeholder(tf.float32,[None,1])\n\nW = tf.Variable(tf.zeros([n,1]))\n\n\n\ninit = tf.global_variables_initializer()\n\n\n\ny_ = tf.matmul(X, W)\n\n\n\ncost = tf.add(tf.reduce_mean(tf.square(y_ - Y)), tf.reduce_mean(tf.square(W)))\n\ntraining_step = tf.train.GradientDescentOptimizer(learning_rate).minimize(cost)\n\n\n\nsess = tf.Session()\n\nsess.run(init)\n\n\n\nfor epoch in range(training_epochs):\n\n sess.run(training_step,feed_dict={X: train[features], Y: train[['y']].values})\nwhile True:\n\n o.features[col] = im.transform(o.features[col])\n\n o.features[col] = sX.transform(o.features[col])\n\n o.features['b'] = 1\n\n \n\n o.target.y = sess.run(y_, feed_dict={X:o.features[features]})\n\n o.target.y = np.clip(o.target.y, y_min, y_max)\n\n \n\n o, reward, done, info = env.step(o.target)\n\n if done:\n\n print(info)\n\n break\n\n if o.features.timestamp[0] % 100 == 0:\n\n print(reward)", "repo_name": "aorursy/new-nb-7.2", "sub_path": "tarobxl_the-power-of-less.py", "file_name": "tarobxl_the-power-of-less.py", "file_ext": "py", "file_size_in_byte": 2618, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "76", "api": [{"api_name": "subprocess.check_output", "line_number": 23, "usage_type": "call"}, {"api_name": "kagglegym.make", "line_number": 42, "usage_type": "call"}, {"api_name": "sklearn.preprocessing.Imputer", "line_number": 51, "usage_type": "call"}, {"api_name": "sklearn.preprocessing", "line_number": 51, "usage_type": "name"}, {"api_name": "sklearn.preprocessing.StandardScaler", "line_number": 55, "usage_type": "call"}, {"api_name": "sklearn.preprocessing", "line_number": 55, "usage_type": "name"}, {"api_name": "numpy.min", "line_number": 69, "usage_type": "attribute"}, {"api_name": "pandas.merge", "line_number": 73, "usage_type": "call"}, {"api_name": "numpy.empty", "line_number": 93, "usage_type": "call"}, {"api_name": "tensorflow.placeholder", "line_number": 97, "usage_type": "call"}, {"api_name": "tensorflow.float32", "line_number": 97, "usage_type": "attribute"}, {"api_name": "tensorflow.placeholder", "line_number": 99, "usage_type": "call"}, {"api_name": "tensorflow.float32", "line_number": 99, "usage_type": "attribute"}, {"api_name": "tensorflow.Variable", "line_number": 101, "usage_type": "call"}, {"api_name": "tensorflow.zeros", "line_number": 101, "usage_type": "call"}, {"api_name": "tensorflow.global_variables_initializer", "line_number": 105, "usage_type": "call"}, {"api_name": "tensorflow.matmul", "line_number": 109, "usage_type": "call"}, {"api_name": "tensorflow.add", "line_number": 113, "usage_type": "call"}, {"api_name": "tensorflow.reduce_mean", "line_number": 113, "usage_type": "call"}, {"api_name": "tensorflow.square", "line_number": 113, "usage_type": "call"}, {"api_name": "tensorflow.train.GradientDescentOptimizer", "line_number": 115, "usage_type": "call"}, {"api_name": "tensorflow.train", "line_number": 115, "usage_type": "attribute"}, {"api_name": "tensorflow.Session", "line_number": 119, "usage_type": "call"}, {"api_name": "numpy.clip", "line_number": 140, "usage_type": "call"}]}
+{"seq_id": "74017002486", "text": "import shutil\nimport tempfile\n\nfrom django import forms\nfrom django.conf import settings\nfrom django.contrib.auth import get_user_model\nfrom django.core.cache import cache\nfrom django.core.files.uploadedfile import SimpleUploadedFile\nfrom django.test import Client, TestCase\nfrom django.urls import reverse\n\nfrom posts.models import Comment, Follow, Group, Post\n\n\nclass PostsViewTests(TestCase):\n\n AUTH_USER_NAME = 'TestUser'\n PAGE_TEXT = 'Тестовое сообщение1'\n PAGE_GROUP = 'Тестовая группа'\n GROUP_SLUG = 'test-group'\n GROUP_DESCR = 'Описание группы'\n\n @classmethod\n def setUpClass(cls):\n super().setUpClass()\n\n cls.user = get_user_model().objects.create(\n username=cls.AUTH_USER_NAME\n )\n Group.objects.bulk_create([\n Group(title=f'{cls.PAGE_GROUP}{i}',\n slug=f'{cls.GROUP_SLUG}{i}',\n description=f'{cls.GROUP_DESCR}{i}')\n for i in range(1, 3)]\n )\n\n cls.post = Post.objects.create(\n text=cls.PAGE_TEXT,\n author=cls.user,\n group=Group.objects.get(title=cls.PAGE_GROUP+'1')\n )\n\n cls.unfollower = get_user_model().objects.create(\n username='Unfoollowuser',\n email='testunfoll@gmail.com',\n password='unfolow',\n )\n\n cls.follower = get_user_model().objects.create(\n username='folow',\n email='testsfoll@gmail.com',\n password='follow',\n )\n\n def setUp(self):\n self.guest_user = Client()\n self.authorized_client = Client()\n self.authorized_client.force_login(self.user)\n self.authorized_follower = Client()\n self.authorized_follower.force_login(self.follower)\n self.authorized_unfollower = Client()\n self.authorized_unfollower.force_login(self.unfollower)\n\n def test_auth_user_can_unfollow(self):\n \"\"\"Авторизированный пользователь может отписаться от автора поста\"\"\"\n Follow.objects.create(user=self.follower,\n author=self.user)\n self.authorized_follower.get(\n reverse(\n 'profile_unfollow',\n kwargs={'username': self.user}\n )\n )\n self.assertFalse(\n Follow.objects.filter(\n user=self.follower,\n author=self.user\n ),\n )\n\n def test_unfollower_follow_index(self):\n \"\"\"Посты не появляются у неподписчика\"\"\"\n self.authorized_follower.get(reverse(\n 'profile_follow',\n kwargs={\n 'username': self.user\n }))\n\n posts = Post.objects.filter(\n author__following__user=self.follower)\n\n response_follower = self.authorized_follower.get(\n reverse('follow_index'))\n response_author = self.authorized_client.get(\n reverse('follow_index'))\n\n self.assertIn(\n posts.get(),\n response_follower.context['paginator'].object_list,\n )\n self.assertNotIn(\n posts.get(),\n response_author.context['paginator'].object_list,\n )\n\n def test_auth_user_can_comment(self):\n \"\"\"Только авторизированный пользователь может комментировать посты\"\"\"\n form_data = {\n 'post': self.post,\n 'author': self.user,\n 'text': 'TESTTESXT'\n }\n self.authorized_client.post(\n reverse('add_comment', args=(self.user, self.post.id)),\n data=form_data, follow=True\n )\n comment = Comment.objects.first()\n self.assertEqual(comment.text, form_data['text'])\n self.assertEqual(comment.author, self.user)\n self.assertEqual(self.post.comments.count(), 1)\n self.assertEqual(comment.post, self.post)\n\n def test_urls_uses_correct_template(self):\n \"\"\"URL-адрес использует соответствующий шаблон.\"\"\"\n templates_url_names = {\n 'posts/index.html': reverse('index'),\n 'posts/new_post.html': reverse('post_new'),\n 'group.html': reverse('group_posts', kwargs={\n 'slug': f'{self.GROUP_SLUG}1'}),\n }\n for template, url in templates_url_names.items():\n with self.subTest(url=url):\n response = self.authorized_client.get(url)\n self.assertTemplateUsed(response, template)\n\n def test_context_in_post_new_page(self):\n \"\"\"Тестирование содержания context в post_new\"\"\"\n response = self.authorized_client.get(reverse('post_new'))\n\n form_fields = {\n 'text': forms.fields.CharField,\n 'group': forms.fields.ChoiceField,\n }\n for value, expected in form_fields.items():\n form_field = response.context.get('form').fields.get(value)\n self.assertIsInstance(form_field, expected)\n\n def test_context_in_index_page(self):\n \"\"\"Тестирование содержания context в index\"\"\"\n response = self.authorized_client.get(reverse('index'))\n all_post_count = Post.objects.count()\n resp_page = response.context['page'][0]\n\n context_post = {\n all_post_count: response.context['paginator'].count,\n self.PAGE_TEXT: resp_page.text,\n self.AUTH_USER_NAME: resp_page.author.username,\n f'{self.PAGE_GROUP}1': resp_page.group.title\n }\n\n for expected, value in context_post.items():\n with self.subTest(value=value):\n self.assertEqual(value, expected)\n\n def test_context_in_group_page(self):\n \"\"\"Тестирование содержания context в group\"\"\"\n response = self.authorized_client.get(\n reverse('group_posts', kwargs={'slug': f'{self.GROUP_SLUG}1'})\n )\n\n resp_page = response.context['page'][0]\n resp_group = response.context['group']\n\n context_group = {\n self.PAGE_TEXT: resp_page.text,\n self.AUTH_USER_NAME: resp_page.author.username,\n f'{self.PAGE_GROUP}1': resp_group.title,\n f'{self.GROUP_SLUG}1': resp_group.slug,\n f'{self.GROUP_DESCR}1': resp_group.description\n }\n\n for expected, value in context_group.items():\n with self.subTest(value=value):\n self.assertEqual(value, expected)\n\n def test_context_in_edit_post_page(self):\n \"\"\"Тестирование содержания context при редактировании поста\"\"\"\n response = self.authorized_client.get(\n reverse(\n 'post_edit',\n kwargs={\n 'username': self.AUTH_USER_NAME,\n 'post_id': self.post.id\n }\n )\n )\n\n context_edit_page = {\n self.PAGE_TEXT: response.context.get('post').text,\n f'{self.PAGE_GROUP}1': response.context.get('post').group.title,\n }\n\n for expected, value in context_edit_page.items():\n with self.subTest():\n self.assertEqual(value, expected)\n\n def test_context_in_profile_page(self):\n \"\"\"Тестирование содержания context для profile\"\"\"\n response = self.guest_user.get(\n reverse(\n 'profile',\n kwargs={'username': self.AUTH_USER_NAME}\n )\n )\n resp_page = response.context['page'][0]\n\n context_edit_page = {\n self.PAGE_TEXT: resp_page.text,\n f'{self.PAGE_GROUP}1': resp_page.group.title,\n self.AUTH_USER_NAME: resp_page.author.username,\n }\n\n for expected, value in context_edit_page.items():\n with self.subTest():\n self.assertEqual(value, expected)\n\n def test_context_in_post_id_page(self):\n \"\"\"Тестирование context для страницы индивидуального поста\"\"\"\n response = self.guest_user.get(\n reverse(\n 'post',\n kwargs={\n 'username': self.AUTH_USER_NAME,\n 'post_id': self.post.id\n }\n )\n )\n\n context_edit_page = {\n self.PAGE_TEXT: response.context.get('post').text,\n f'{self.PAGE_GROUP}1': response.context.get('post').group.title,\n self.AUTH_USER_NAME: response.context.get('post').author.username,\n }\n\n for expected, value in context_edit_page.items():\n with self.subTest():\n self.assertEqual(value, expected)\n\n def test_post_added_in_index_page(self):\n \"\"\"Тестирование наличия поста на главной странице сайта\"\"\"\n response = self.authorized_client.get(\n reverse('index'))\n post_id = response.context.get('page')[0].pk\n self.assertEqual(post_id, self.post.pk)\n\n def test_post_added_in_group_page(self):\n \"\"\"Тестирование наличия поста присвоенного группе на странице группы\"\"\"\n post = Post.objects.first()\n response = self.authorized_client.get(\n reverse('group_posts', kwargs={'slug': f'{self.GROUP_SLUG}1'}))\n self.assertEqual(post.text, response.context.get('page')[0].text)\n\n def test_post_added_in_correct_group(self):\n \"\"\"Тестирование на правильность назначения групп для постов\"\"\"\n group = Group.objects.first()\n posts_out_of_group = Post.objects.exclude(group=group)\n response = self.authorized_client.get(\n reverse('group_posts', kwargs={'slug': f'{self.GROUP_SLUG}1'}))\n group_list_posts_set = set(posts_out_of_group)\n all_posts_of_group_page = response.context.get(\n 'paginator').object_list\n self.assertTrue(\n group_list_posts_set.isdisjoint(all_posts_of_group_page)\n )\n\n\nclass StaticViewsTests(TestCase):\n\n def test_templates_static_pages(self):\n \"\"\"Тестирование шаблонов для статических страниц\"\"\"\n templates_url_names = {\n 'about/author.html': reverse('about:author'),\n 'about/tech.html': reverse('about:tech'),\n }\n\n for template, reverse_name in templates_url_names.items():\n with self.subTest(reverse_name=reverse_name):\n response = self.client.get(reverse_name)\n self.assertTemplateUsed(response, template)\n\n\nclass PostImageViewTest(TestCase):\n AUTH_USER_NAME = 'TestUser'\n GROUP_SLUG = 'test-group'\n\n @classmethod\n def setUpClass(cls):\n super().setUpClass()\n settings.MEDIA_ROOT = tempfile.mkdtemp(dir=settings.BASE_DIR)\n\n cls.follower = get_user_model().objects.create(\n username='SecondFollow',\n email='teswes@gmail.com',\n password='Second',\n )\n\n cls.small_gif = (b'\\x47\\x49\\x46\\x38\\x39\\x61\\x02\\x00'\n b'\\x01\\x00\\x80\\x00\\x00\\x00\\x00\\x00'\n b'\\xFF\\xFF\\xFF\\x21\\xF9\\x04\\x00\\x00'\n b'\\x00\\x00\\x00\\x2C\\x00\\x00\\x00\\x00'\n b'\\x02\\x00\\x01\\x00\\x00\\x02\\x02\\x0C'\n b'\\x0A\\x00\\x3B'\n )\n\n cls.uploaded = SimpleUploadedFile(\n name='small.gif',\n content=cls.small_gif,\n content_type='image/gif'\n )\n cls.user = get_user_model().objects.create(\n username=cls.AUTH_USER_NAME\n )\n cls.group = Group.objects.create(\n title='Тестовая группа',\n slug='test-group'\n )\n cls.post = Post.objects.create(\n text='Тестовая запись',\n group=cls.group,\n author=cls.user,\n image=cls.uploaded\n )\n\n @classmethod\n def tearDownClass(cls):\n shutil.rmtree(settings.MEDIA_ROOT, ignore_errors=True)\n super().tearDownClass()\n\n def setUp(self):\n self.guest_client = Client()\n self.authorized_follower = Client()\n self.authorized_follower.force_login(self.follower)\n\n def test_follower_follow_user(self):\n \"\"\"Посты появляются у подписчика\"\"\"\n self.authorized_follower.get(\n reverse('profile_follow',\n kwargs={'username': self.user})\n )\n response = self.authorized_follower.get(\n reverse('follow_index')\n )\n self.assertContains(response, '
95000:\n errors.append(error)\n plt.plot(errors)\n plt.show()\nif __name__ == '__main__':\n exp_mackyglass_rnn()", "repo_name": "Amir-19/prediction-pillar", "sub_path": "experiment/exp_mackeyglass_onestep.py", "file_name": "exp_mackeyglass_onestep.py", "file_ext": "py", "file_size_in_byte": 997, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 2, "dataset": "github-code", "pt": "76", "api": [{"api_name": "numpy.random.seed", "line_number": 14, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 14, "usage_type": "attribute"}, {"api_name": "torch.manual_seed", "line_number": 15, "usage_type": "call"}, {"api_name": "agent.online_rnn_backprop.RecurrentNet", "line_number": 17, "usage_type": "call"}, {"api_name": "environment.synthetic_online.MackeyGlass", "line_number": 20, "usage_type": "call"}, {"api_name": "torch.tensor", "line_number": 27, "usage_type": "call"}, {"api_name": "torch.tensor", "line_number": 29, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 36, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 36, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 37, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 37, "usage_type": "name"}]}
+{"seq_id": "74735413045", "text": "import labrad\nimport numpy\nimport matplotlib\n\nfrom matplotlib import pyplot\n#get access to servers\ncxn = labrad.connect()\ndv = cxn.data_vault\nfigure = pyplot.figure()\npyplot.title(\"Sideband Rabi Flopping Drift, all same parameters\")\n\nfor dataset in ['2217_20','2211_05', '2219_42','2218_33']:\n dv.cd(['', 'Experiments', '729Experiments', 'RabiFlopping', '2013Jan25', dataset])\n dv.open(1)\n data = dv.get().asarray\n x = data[:,0] * 10**6 #now in microseconds\n pyplot.plot(x, data[:,1],'o-', label = '2013Jan25_{0}'.format(dataset))\n\npyplot.ylabel('Excitation Probability')\npyplot.xlabel(r'Excitation Time $\\mu s$')\npyplot.ylim([0,1])\npyplot.legend()\npyplot.show()\n", "repo_name": "HaeffnerLab/cct", "sub_path": "old_scripts/dataAnalysis/729Experiments/2013Jan25/sideband_flop_drift.py", "file_name": "sideband_flop_drift.py", "file_ext": "py", "file_size_in_byte": 678, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 6, "dataset": "github-code", "pt": "76", "api": [{"api_name": "labrad.connect", "line_number": 7, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 9, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 9, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 10, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 10, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 17, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 17, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 19, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 19, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 20, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 20, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylim", "line_number": 21, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 21, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 22, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 22, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 23, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 23, "usage_type": "name"}]}
+{"seq_id": "19560818471", "text": "import abc\nimport sortedcontainers\nfrom collections import Counter, namedtuple\nfrom operator import methodcaller\n\nimport requests\nfrom bs4 import BeautifulSoup\nfrom loguru import logger\n\nfrom src.request import LinkDownloader\n\n\nclass BaseLinkParser(metaclass=abc.ABCMeta):\n \"\"\"\n Parses all pages and gathers all links.\n \"\"\"\n\n def __init__(self, start_url: str, tools: list):\n self.start_url = start_url\n self.tools = tools\n\n @abc.abstractmethod\n def get_all_links(self):\n \"\"\"\n Returns list of all links for a single job offer.\n \"\"\"\n ...\n\n @staticmethod\n def get_page(page_url: str):\n response = requests.get(page_url)\n return response.text\n\n @abc.abstractmethod\n def get_last_page_index(self):\n \"\"\"\n Return amount of pages. If possible.\n \"\"\"\n ...\n\n\nclass DjinniLinkParser(BaseLinkParser):\n\n def __init__(self, start_url, tools):\n self.all_data = \"\"\n self.start_page_data = requests.get(start_url).text\n self.offers_links = None\n self.BASE_URL = start_url\n super().__init__(start_url, tools)\n self.get_all_links()\n\n def get_base_url(self):\n return self.BASE_URL + \"&page=\"\n\n def get_all_links(self):\n links = []\n for page_index in range(1, self.get_last_page_index() + 1):\n page_url = self.get_base_url() + str(page_index)\n logger.info(\"Getting '%s'\" % page_url)\n page = self.get_page(page_url)\n parser = BeautifulSoup(page, \"html.parser\")\n links.extend(parser.findAll(\"a\", class_=\"profile\"))\n self.offers_links = links\n\n def get_last_page_index(self):\n html_parser = BeautifulSoup(self.start_page_data, \"html.parser\")\n try:\n # Black magic. Specific for djinni.\n last_tag = html_parser.findAll(\"a\", class_=\"page-link\")[-2]\n except IndexError:\n return 1\n return int(last_tag.text) + 1\n\n def get_data(self):\n all_data = []\n links_downloader = LinkDownloader(\n list(map(lambda x: \"https://djinni.co\" + x[\"href\"], self.offers_links))\n )\n links_downloader.download_all()\n for item in links_downloader.results:\n try:\n parser = BeautifulSoup(item, \"html.parser\")\n profile = parser.find(\"p\", class_=\"profile\")\n data = parser.findAll(\"div\", class_=\"profile-page-section\")\n res = list(map(methodcaller(\"getText\"), data))\n try:\n profile_text = profile.getText()\n all_data.append(profile_text)\n except AttributeError:\n logger.error(\"Job has no profile section\")\n t = ' '.join(res)\n all_data.append(t)\n except Exception as e:\n logger.critical(f\"Failed to get page {e}\")\n self.all_data = \" \".join(all_data).replace(\"\\n\", ' ').lower()\n\n def handle_data(self):\n counter = Counter(self.all_data.lower().split())\n\n results = sortedcontainers.SortedList(key=lambda x: -x.results)\n SkillResult = namedtuple(\"Skill\", \"skill results\")\n for tool in self.tools:\n amount = counter.get(tool) or 0\n results.add(\n SkillResult(tool, amount)\n )\n return results\n", "repo_name": "dborodin836/job-statistics", "sub_path": "src/parsers.py", "file_name": "parsers.py", "file_ext": "py", "file_size_in_byte": 3398, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "76", "api": [{"api_name": "abc.ABCMeta", "line_number": 13, "usage_type": "attribute"}, {"api_name": "abc.abstractmethod", "line_number": 22, "usage_type": "attribute"}, {"api_name": "requests.get", "line_number": 31, "usage_type": "call"}, {"api_name": "abc.abstractmethod", "line_number": 34, "usage_type": "attribute"}, {"api_name": "requests.get", "line_number": 46, "usage_type": "call"}, {"api_name": "loguru.logger.info", "line_number": 59, "usage_type": "call"}, {"api_name": "loguru.logger", "line_number": 59, "usage_type": "name"}, {"api_name": "bs4.BeautifulSoup", "line_number": 61, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 66, "usage_type": "call"}, {"api_name": "src.request.LinkDownloader", "line_number": 76, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 82, "usage_type": "call"}, {"api_name": "operator.methodcaller", "line_number": 85, "usage_type": "call"}, {"api_name": "loguru.logger.error", "line_number": 90, "usage_type": "call"}, {"api_name": "loguru.logger", "line_number": 90, "usage_type": "name"}, {"api_name": "loguru.logger.critical", "line_number": 94, "usage_type": "call"}, {"api_name": "loguru.logger", "line_number": 94, "usage_type": "name"}, {"api_name": "collections.Counter", "line_number": 98, "usage_type": "call"}, {"api_name": "sortedcontainers.SortedList", "line_number": 100, "usage_type": "call"}, {"api_name": "collections.namedtuple", "line_number": 101, "usage_type": "call"}]}
+{"seq_id": "74053649206", "text": "from ray.rllib.algorithms.ppo import PPOConfig\nfrom ray.rllib.algorithms.sac import SACConfig\nfrom ray.rllib.algorithms.mbmpo import MBMPOConfig\nfrom ray.tune.logger import pretty_print\nfrom env.SimpleEnvironment import SimpleRobotEnviroment\nfrom env.SimpleEnvironment_condensed_obs import SimpleRobotEnviromentCO\nfrom env.SimpleEnvironment_waypoints import SimpleRobotEnvironmentWP\nimport numpy as np\nimport torch\nimport random\nfrom ray.rllib.algorithms.algorithm import Algorithm\nimport cv2\n\n# for the custom callback\nfrom typing import Dict\nfrom ray.rllib.env import BaseEnv\nfrom ray.rllib.policy import Policy\nfrom ray.rllib.evaluation import MultiAgentEpisode, RolloutWorker\nfrom ray.rllib.algorithms.callbacks import DefaultCallbacks\n\nSEED = 4096\n\nclass GoalCallbacks(DefaultCallbacks):\n\n def on_episode_end(self, worker: RolloutWorker, base_env: BaseEnv,\n policies: Dict[str, Policy], episode: MultiAgentEpisode,\n **kwargs):\n final_x = episode.last_observation_for()[0]\n final_y = episode.last_observation_for()[1]\n final_yaw = episode.last_observation_for()[2]\n goal_x = episode.last_observation_for()[3]\n goal_y = episode.last_observation_for()[4]\n goal_yaw = episode.last_observation_for()[5]\n success = episode.last_info_for()[\"Success\"]\n crash = episode.last_info_for()[\"Crash\"]\n\n episode.custom_metrics[\"final_distance\"] = np.linalg.norm(np.array([goal_x, goal_y]) - np.array([final_x,final_y]))\n episode.custom_metrics[\"final_angle_difference\"] = min(np.abs(goal_yaw - final_yaw), 2*np.pi - np.abs(goal_yaw - final_yaw))\n episode.custom_metrics[\"reached_goal\"] = success\n episode.custom_metrics[\"crash\"] = crash\n\n \nclass GoalCallbacksCO(DefaultCallbacks):\n\n def on_episode_end(self, worker: RolloutWorker, base_env: BaseEnv,\n policies: Dict[str, Policy], episode: MultiAgentEpisode,\n **kwargs):\n final_distance = episode.last_observation_for()[0]\n final_angle_diff = abs(episode.last_observation_for()[2])\n success = episode.last_info_for()[\"Success\"]\n crash = episode.last_info_for()[\"Crash\"]\n # goal_yaw = episode.last_observation_for()[3]\n # final_yaw = episode.last_observation_for()[2]\n # final_angle_diff = min(np.abs(goal_yaw - final_yaw), 2*np.pi - np.abs(goal_yaw - final_yaw))\n\n episode.custom_metrics[\"final_distance\"] = final_distance\n episode.custom_metrics[\"final_angle_difference\"] = final_angle_diff\n episode.custom_metrics[\"reached_goal\"] = success\n episode.custom_metrics[\"crash\"] = crash\n\ndef set_seeds(seed):\n torch.manual_seed(seed) # Sets seed for PyTorch RNG\n torch.cuda.manual_seed_all(seed) # Sets seeds of GPU RNG\n np.random.seed(seed=seed) # Set seed for NumPy RNG\n random.seed(seed)\n\ndef convert_images_to_video(rgb_images):\n\n x = len(rgb_images[0])\n y = len(rgb_images[0][0])\n size = (x,y)\n\n out = cv2.VideoWriter('working_solution.mp4',cv2.VideoWriter_fourcc(*'mp4v'),15, size)\n\n for i in range(len(rgb_images)):\n rgb_img = cv2.cvtColor(rgb_images[i], cv2.COLOR_RGB2BGR)\n out.write(rgb_img)\n out.release()\n\n\nif __name__ == '__main__':\n\n # algo = (\n # PPOConfig()\n # # .training(lr=1e-4)\n # # .training(model={'use_lstm':True})\n # # .training(train_batch_size=60000, sgd_minibatch_size=4096)\n # # Increase horizon from 200 to 400 as robot was ending before reaching goal\n # .rollouts(num_rollout_workers=1,horizon=600)\n # .resources(num_gpus=0)\n # .environment(SimpleRobotEnviroment, env_config={\"render_mode\":\"rgb_array\"})\n # .callbacks(GoalCallbacks)\n # Seed for reproducibility and statistical significance\n # .debugging(seed=SEED)\n # .build()\n # )\n\n # algo = (\n # PPOConfig()\n # # .training(lr=1e-4)\n # # .training(model={'use_lstm':True})\n # # .training(train_batch_size=60000, sgd_minibatch_size=4096)\n # # Increase horizon from 200 to 400 as robot was ending before reaching goal\n # .rollouts(num_rollout_workers=1,horizon=600)\n # .resources(num_gpus=0)\n # .environment(SimpleRobotEnviromentCO, env_config={\"render_mode\":\"rgb_array\"})\n # .callbacks(GoalCallbacksCO)\n # Seed for reproducibility and statistical significance\n # .debugging(seed=SEED)\n # .build()\n # )\n # print(algo.config.horizon)\n # print(\"m\")\n \n\n horizon_val = 600 \n # algo = (\n # SACConfig()\n # .rollouts(num_rollout_workers=8,horizon=horizon_val)\n # .resources(num_gpus=0)\n # .environment(SimpleRobotEnviromentCO, env_config={\"horizon\":horizon_val,\"render_mode\":\"rgb_array\"})\n # .callbacks(GoalCallbacksCO)\n # .framework(framework=\"torch\")\n # # Seed for reproducibility and statistical significance\n # .debugging(seed=SEED)\n # .build()\n # )\n\n \n # algo = (\n # SACConfig()\n # .rollouts(num_rollout_workers=8,horizon=horizon_val)\n # .resources(num_gpus=0)\n # .environment(SimpleRobotEnviroment, env_config={\"horizon\":horizon_val, \"render_mode\":\"rgb_array\"})\n # .callbacks(GoalCallbacks)\n # .framework(framework=\"torch\")\n # # Seed for reproducibility and statistical significance\n # # .debugging(seed=SEED)\n # .build()\n # )\n\n algo = (\n SACConfig()\n .rollouts(num_rollout_workers=8,horizon=horizon_val)\n .resources(num_gpus=0)\n .environment(SimpleRobotEnviromentCO, env_config={\"horizon\":horizon_val, \"render_mode\":\"rgb_array\"})\n .callbacks(GoalCallbacksCO)\n .framework(framework=\"torch\")\n # Seed for reproducibility and statistical significance\n # .debugging(seed=SEED)\n .build()\n )\n \n # For testing\n algo.restore(\"/Users/emilymorris/ray_results/SAC_SimpleRobotEnviromentCO_2023-01-11_11-48-18_3vmn6e3/checkpoint_003501\")\n\n # num_episodes = 6000\n # for i in range(num_episodes):\n # print(i)\n # result = algo.train()\n # # print(result[\"custom_metrics\"])\n # # print(pretty_print(result))\n\n # if i % 10 == 0 or i==num_episodes-1:\n # checkpoint_dir = algo.save()\n # print(f\"Checkpoint saved in directory {checkpoint_dir}\")\n\n # Set all our seeds for the environment\n # set_seeds(seed=SEED)\n\n # i = 0\n # while True:\n # print(i)\n # result = algo.train()\n # # print(result[\"custom_metrics\"])\n # # print(pretty_print(result))\n\n # if i % 10 == 0:\n # checkpoint_dir = algo.save()\n # print(f\"Checkpoint saved in directory {checkpoint_dir}\")\n \n # i+=1\n\n \n env = SimpleRobotEnviromentCO(num_obstacles=6, init_distance=0.3)\n # env = SimpleRobotEnviroment(num_obstacles=1, init_distance=0.9)\n obs = env.reset()\n done = False\n print(obs)\n\n import matplotlib.pyplot as plt\n def displayImage(image):\n plt.imshow(image)\n plt.axis('off')\n plt.show()\n\n x = env.render()\n # displayImage(x)\n \n images = [x]\n for i in range(300):\n print(i)\n if not done:\n action = algo.compute_single_action(obs)\n print(\"Action: \", action)\n obs, reward, done, _ = env.step(action)\n print(\"ROBOT: \", env.robot.pose)\n print(\"Observation:\",obs)\n print(\"Reward: \", reward)\n print(\"Done\",done)\n images.append(env.render())\n else:\n print(\"Done\")\n\n\n # x = env.render()\n # displayImage(x)\n convert_images_to_video(images)\n\n ", "repo_name": "emorris7/robot_navigation_rl", "sub_path": "PathFinder.py", "file_name": "PathFinder.py", "file_ext": "py", "file_size_in_byte": 7756, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 2, "dataset": "github-code", "pt": "76", "api": [{"api_name": "ray.rllib.algorithms.callbacks.DefaultCallbacks", "line_number": 23, "usage_type": "name"}, {"api_name": "ray.rllib.evaluation.RolloutWorker", "line_number": 25, "usage_type": "name"}, {"api_name": "ray.rllib.env.BaseEnv", "line_number": 25, "usage_type": "name"}, {"api_name": "typing.Dict", "line_number": 26, "usage_type": "name"}, {"api_name": "ray.rllib.policy.Policy", "line_number": 26, "usage_type": "name"}, {"api_name": "ray.rllib.evaluation.MultiAgentEpisode", "line_number": 26, "usage_type": "name"}, {"api_name": "numpy.linalg.norm", "line_number": 37, "usage_type": "call"}, {"api_name": "numpy.linalg", "line_number": 37, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 37, "usage_type": "call"}, {"api_name": "numpy.abs", "line_number": 38, "usage_type": "call"}, {"api_name": "numpy.pi", "line_number": 38, "usage_type": "attribute"}, {"api_name": "ray.rllib.algorithms.callbacks.DefaultCallbacks", "line_number": 43, "usage_type": "name"}, {"api_name": "ray.rllib.evaluation.RolloutWorker", "line_number": 45, "usage_type": "name"}, {"api_name": "ray.rllib.env.BaseEnv", "line_number": 45, "usage_type": "name"}, {"api_name": "typing.Dict", "line_number": 46, "usage_type": "name"}, {"api_name": "ray.rllib.policy.Policy", "line_number": 46, "usage_type": "name"}, {"api_name": "ray.rllib.evaluation.MultiAgentEpisode", "line_number": 46, "usage_type": "name"}, {"api_name": "torch.manual_seed", "line_number": 62, "usage_type": "call"}, {"api_name": "torch.cuda.manual_seed_all", "line_number": 63, "usage_type": "call"}, {"api_name": "torch.cuda", "line_number": 63, "usage_type": "attribute"}, {"api_name": "numpy.random.seed", "line_number": 64, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 64, "usage_type": "attribute"}, {"api_name": "random.seed", "line_number": 65, "usage_type": "call"}, {"api_name": "cv2.VideoWriter", "line_number": 73, "usage_type": "call"}, {"api_name": "cv2.VideoWriter_fourcc", "line_number": 73, "usage_type": "call"}, {"api_name": "cv2.cvtColor", "line_number": 76, "usage_type": "call"}, {"api_name": "cv2.COLOR_RGB2BGR", "line_number": 76, "usage_type": "attribute"}, {"api_name": "env.SimpleEnvironment_condensed_obs.SimpleRobotEnviromentCO", "line_number": 146, "usage_type": "argument"}, {"api_name": "ray.rllib.algorithms.sac.SACConfig", "line_number": 143, "usage_type": "call"}, {"api_name": "env.SimpleEnvironment", "line_number": 185, "usage_type": "name"}, {"api_name": "env.SimpleEnvironment_condensed_obs.SimpleRobotEnviromentCO", "line_number": 185, "usage_type": "call"}, {"api_name": "env.SimpleEnvironment.reset", "line_number": 187, "usage_type": "call"}, {"api_name": "env.SimpleEnvironment", "line_number": 187, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.imshow", "line_number": 193, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 193, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.axis", "line_number": 194, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 194, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 195, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 195, "usage_type": "name"}, {"api_name": "env.SimpleEnvironment.render", "line_number": 197, "usage_type": "call"}, {"api_name": "env.SimpleEnvironment", "line_number": 197, "usage_type": "name"}, {"api_name": "env.SimpleEnvironment.step", "line_number": 206, "usage_type": "call"}, {"api_name": "env.SimpleEnvironment", "line_number": 206, "usage_type": "name"}, {"api_name": "env.SimpleEnvironment.robot", "line_number": 207, "usage_type": "attribute"}, {"api_name": "env.SimpleEnvironment", "line_number": 207, "usage_type": "name"}, {"api_name": "env.SimpleEnvironment.render", "line_number": 211, "usage_type": "call"}, {"api_name": "env.SimpleEnvironment", "line_number": 211, "usage_type": "name"}]}
+{"seq_id": "8554640355", "text": "import numpy as np\nimport matplotlib.pyplot as plt\nimport os\nimport random\nfrom tensorflow.keras.preprocessing.image import img_to_array, load_img\nfrom tensorflow.keras.models import Model\n\n\ndef plot_feature_maps(model, img):\n \"\"\"\n Given a convolutional neural network model and an appropriately shaped\n image, it plots the feature maps of the convolutional and pooling layers.\n\n Args:\n model: keras `Model` object, containing the model to plot feature\n maps of\n img: numpy array. Image represented in the form of a numpy array of\n shape (1, size, size, num_channels)\n \"\"\"\n # lets first create a list of outputs from successive layers of the model\n successive_outputs = [layer.output for layer in model.layers[1:]]\n\n # now lets create another model with the input of the model passed in,\n # and outputs of each layer of the passed in model\n visualization_model = Model(inputs=model.input, outputs=successive_outputs)\n\n # now run the image through the network, obtaining intermediate feature maps\n successive_feature_maps = visualization_model.predict(img)\n\n # layer names to have them as part of our plot for readability purposes\n layer_names = [layer.name for layer in model.layers]\n\n # loop through the layers\n for layer_name, feature_map in zip(layer_names, successive_feature_maps):\n # plot feature maps only for conv/pool layers\n if len(feature_map.shape) == 4:\n n_channels = feature_map.shape[-1]\n size = feature_map.shape[1]\n\n # We will tile our images in this matrix\n display_grid = np.zeros((size, size * n_channels))\n\n # looping through each filter of a layer\n for i in range(n_channels):\n # postprocessing the image to be visually palatable\n img = feature_map[0, :, :, i]\n img -= img.mean()\n img /= img.std()\n img *= 64\n img += 128\n img = np.clip(img, 0, 255).astype('uint8')\n # tile each filter into a horizontal grid\n display_grid[:, i * size: (i + 1) * size] = img\n\n # displaying the feature map grid\n scale = 20. / n_channels\n plt.figure(figsize=(scale * n_channels, scale))\n plt.title(layer_name)\n plt.grid(False)\n plt.imshow(display_grid, aspect='auto', cmap='viridis')\n\n\ndef find_image(path, model):\n \"\"\"\n Given a path to a folder containing images, it returns a numpy array\n representation of the image suitable for passing through the keras\n model (also passed in) to plot feature maps.\n\n Args:\n path: str. Path to the folder containing images\n model: keras `Model` object.\n\n Returns:\n img: numpy array. Representation of the image suitable for\n visualizing feature maps of the model\n \"\"\"\n # string formatting for reliability\n if not path.endswith('/'):\n path += '/'\n\n # creating a list of all images in directory passed in\n all_images = [f for f in os.listdir(path) if f.endswith('.jpg')]\n\n # finding a random image from the directory passed in\n img_path = path + random.choice(all_images)\n img = load_img(img_path, target_size=(model.input_shape[1],\n model.input_shape[2]))\n\n # convert the image into its numpy representation\n img = img_to_array(img) # shape: (img_size, img_size, 3)\n img = img.reshape((1,) + img.shape) # shape: (1, size, size, 3)\n return img / 255.0\n\n\ndef plot_feature_maps_from_random_img(model, folder_path):\n \"\"\"\n randomly chooses an image from a directory containing images and plots\n the feature maps of that image as gone through the model.\n\n Args:\n model: keras `Model` object, containing the model to print feature\n maps of\n folder_path: str. Path to the folder containing images\n \"\"\"\n img = find_image(folder_path, model)\n plot_feature_maps(model, img)\n\n\ndef plot_loss(history):\n \"\"\"\n Plots the loss of the model as a function of training epochs\n\n Args:\n history: `History` object, containing the training history of the model\n \"\"\"\n loss = history.history['loss']\n\n # plotting the validation loss if a validation set exists\n if 'val_loss' in history.history:\n val_loss = history.history['val_loss']\n plt.plot(val_loss, label='validation loss')\n\n plt.plot(loss, label='training loss')\n plt.xlabel('Epochs')\n plt.ylabel('Loss')\n plt.legend()\n plt.title('Model loss vs. training epochs')\n plt.show()\n\n\ndef plot_accuracy(history):\n \"\"\"\n Plots the accuracy history of the model as a function of training epochs\n\n Args:\n history: `History` object, containing the training history of the model\n \"\"\"\n acc = history.history['acc']\n\n # plotting the validation accuracy if there exists a validation set\n if 'val_acc' in history.history:\n val_acc = history.history['val_acc']\n plt.plot(val_acc, label='validation accuracy')\n\n plt.plot(acc, label='training accuracy')\n plt.xlabel('Epochs')\n plt.ylabel('Accuracy')\n plt.legend()\n plt.title('Model accuracy vs. training epochs')\n plt.show()\n\n\nif __name__ == '__main__':\n from C2.W1.cats_dogs_reduced import train_model\n\n path = '../../Data/cats-and-dogs_reduced/'\n model, history = train_model()\n\n plot_feature_maps_from_random_img(model, path+'train/cats')\n plot_loss(history)\n plot_accuracy(history)\n", "repo_name": "connected-ftarlan/tf-specialization", "sub_path": "C2/W1/visualize_feature_maps.py", "file_name": "visualize_feature_maps.py", "file_ext": "py", "file_size_in_byte": 5574, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "76", "api": [{"api_name": "tensorflow.keras.models.Model", "line_number": 25, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 41, "usage_type": "call"}, {"api_name": "numpy.clip", "line_number": 51, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 57, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 57, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 58, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 58, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.grid", "line_number": 59, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 59, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.imshow", "line_number": 60, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 60, "usage_type": "name"}, {"api_name": "os.listdir", "line_number": 82, "usage_type": "call"}, {"api_name": "random.choice", "line_number": 85, "usage_type": "call"}, {"api_name": "tensorflow.keras.preprocessing.image.load_img", "line_number": 86, "usage_type": "call"}, {"api_name": "tensorflow.keras.preprocessing.image.img_to_array", "line_number": 90, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 121, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 121, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 123, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 123, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 124, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 124, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 125, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 125, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 126, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 126, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 127, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 127, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 128, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 128, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 143, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 143, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 145, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 145, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 146, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 146, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 147, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 147, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 148, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 148, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 149, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 149, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 150, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 150, "usage_type": "name"}, {"api_name": "C2.W1.cats_dogs_reduced.train_model", "line_number": 157, "usage_type": "call"}]}
+{"seq_id": "43048741840", "text": "\"\"\"jobproject URL Configuration\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/4.0/topics/http/urls/\nExamples:\nFunction views\n 1. Add an import: from my_app import views\n 2. Add a URL to urlpatterns: path('', views.home, name='home')\nClass-based views\n 1. Add an import: from other_app.views import Home\n 2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')\nIncluding another URLconf\n 1. Import the include() function: from django.urls import include, path\n 2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))\n\"\"\"\n\nfrom django.urls import path\nfrom jobapp import views\nurlpatterns = [\n\n path('',views.home),\n\n path('login/',views.joblogin),\n path('jobpost/',views.post_job),\n path('register/',views.regis),\n path('success/',views.emailsuccess),\n path('verify/',views.emailverify),\n path('verify/',views.verify),\n path('error/',views.error),\n path('jobpro/',views.jobpro),\n path('edit_comp//',views.edit_comp),\n path('regcomp/',views.regcomp),\n path('userregister/',views.userregister),\n path('userlogin/',views.userlogin),\n path('jobshow/',views.jobshow),\n path('jobshow1/',views.jobshow1),\n path('applyjob/',views.apply_job),\n path('user_profile/',views.user_profile),\n path('view_profile/',views.view_profile),\n path('useredit/',views.user_edit)\n\n]\n", "repo_name": "filmiya/jobproject", "sub_path": "jobapp/urls.py", "file_name": "urls.py", "file_ext": "py", "file_size_in_byte": 1477, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "76", "api": [{"api_name": "django.urls.path", "line_number": 21, "usage_type": "call"}, {"api_name": "jobapp.views.home", "line_number": 21, "usage_type": "attribute"}, {"api_name": "jobapp.views", "line_number": 21, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 23, "usage_type": "call"}, {"api_name": "jobapp.views.joblogin", "line_number": 23, "usage_type": "attribute"}, {"api_name": "jobapp.views", "line_number": 23, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 24, "usage_type": "call"}, {"api_name": "jobapp.views.post_job", "line_number": 24, "usage_type": "attribute"}, {"api_name": "jobapp.views", "line_number": 24, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 25, "usage_type": "call"}, {"api_name": "jobapp.views.regis", "line_number": 25, "usage_type": "attribute"}, {"api_name": "jobapp.views", "line_number": 25, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 26, "usage_type": "call"}, {"api_name": "jobapp.views.emailsuccess", "line_number": 26, "usage_type": "attribute"}, {"api_name": "jobapp.views", "line_number": 26, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 27, "usage_type": "call"}, {"api_name": "jobapp.views.emailverify", "line_number": 27, "usage_type": "attribute"}, {"api_name": "jobapp.views", "line_number": 27, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 28, "usage_type": "call"}, {"api_name": "jobapp.views.verify", "line_number": 28, "usage_type": "attribute"}, {"api_name": "jobapp.views", "line_number": 28, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 29, "usage_type": "call"}, {"api_name": "jobapp.views.error", "line_number": 29, "usage_type": "attribute"}, {"api_name": "jobapp.views", "line_number": 29, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 30, "usage_type": "call"}, {"api_name": "jobapp.views.jobpro", "line_number": 30, "usage_type": "attribute"}, {"api_name": "jobapp.views", "line_number": 30, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 31, "usage_type": "call"}, {"api_name": "jobapp.views.edit_comp", "line_number": 31, "usage_type": "attribute"}, {"api_name": "jobapp.views", "line_number": 31, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 32, "usage_type": "call"}, {"api_name": "jobapp.views.regcomp", "line_number": 32, "usage_type": "attribute"}, {"api_name": "jobapp.views", "line_number": 32, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 33, "usage_type": "call"}, {"api_name": "jobapp.views.userregister", "line_number": 33, "usage_type": "attribute"}, {"api_name": "jobapp.views", "line_number": 33, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 34, "usage_type": "call"}, {"api_name": "jobapp.views.userlogin", "line_number": 34, "usage_type": "attribute"}, {"api_name": "jobapp.views", "line_number": 34, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 35, "usage_type": "call"}, {"api_name": "jobapp.views.jobshow", "line_number": 35, "usage_type": "attribute"}, {"api_name": "jobapp.views", "line_number": 35, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 36, "usage_type": "call"}, {"api_name": "jobapp.views.jobshow1", "line_number": 36, "usage_type": "attribute"}, {"api_name": "jobapp.views", "line_number": 36, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 37, "usage_type": "call"}, {"api_name": "jobapp.views.apply_job", "line_number": 37, "usage_type": "attribute"}, {"api_name": "jobapp.views", "line_number": 37, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 38, "usage_type": "call"}, {"api_name": "jobapp.views.user_profile", "line_number": 38, "usage_type": "attribute"}, {"api_name": "jobapp.views", "line_number": 38, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 39, "usage_type": "call"}, {"api_name": "jobapp.views.view_profile", "line_number": 39, "usage_type": "attribute"}, {"api_name": "jobapp.views", "line_number": 39, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 40, "usage_type": "call"}, {"api_name": "jobapp.views.user_edit", "line_number": 40, "usage_type": "attribute"}, {"api_name": "jobapp.views", "line_number": 40, "usage_type": "name"}]}
+{"seq_id": "73594945524", "text": "import random\n\nimport flask\n\napp = flask.Flask(__name__)\n\n\n# @app.route('/')\n# def index():\n# night = random.random() # Генератор случайных чисел от 0 до 1\n# return flask.render_template('index.html', night=night)\n\n\n@app.route('/')\ndef index():\n return flask.render_template('main.html')\n\n\n@app.route('/about')\ndef about_index():\n return flask.render_template('about.html')\n\n\n@app.route('/students')\ndef students_view():\n students = [\n \"Смирнов Хольгер Филиппович\",\n \"Демидович Налина Кирилловна\",\n \"Рыбакова Хитер Валерьевна\",\n \"Жуков Орион Святославович\"\n ]\n return flask.render_template('students.html', students=students)\n\n\n# Передача словаря в шалон\n@app.route('/roses')\ndef roses_view():\n # Ключ - Red\n # Списоок - [\"Freedom\", \"Forever young\", \"Explorer\"]\n roses = {\n \"Red\": [\"Freedom\", \"Forever young\", \"Explorer\"],\n \"White\": [\"Polar star\", \"Mondial\", \"Vendella\"],\n \"other\": [\"Engagement\", \"Topaz\", \"Miss Piggy\"]\n }\n return flask.render_template('roses.html', roses=roses)\n\n# Фильтры\n@app.route('/galaxies')\ndef galaxies_view():\n nearby_galaxies = {\n 1: {\"galaxy\": \"Карликовая галактика в Большом Псе\",\n \"distance_trillionkm\": 241248.627051,\n \"distance_ly\": 25500,\n \"description\": \"Галактика Местной группы, находящаяся в созвездии Большого Пса...\"},\n 2: {\"galaxy\": \"Большое Магелланово Облако\",\n \"distance_trillionkm\": 1542099.06703,\n \"distance_ly\": 163000,\n \"description\": \"Спутник Млечного Пути, расположенная на расстоянии около 163 тыс. св. лет...\"},\n 3: {\"galaxy\": \"Карликовая эллиптическая галактика в Стрельце\",\n \"distance_trillionkm\": 662251.133081,\n \"distance_ly\": 70000,\n \"description\": \"Эллиптическая галактика-спутник Млечного Пути. Проме обычного...\"}\n }\n return flask.render_template('galaxies.html', nearby_galaxies=nearby_galaxies)\n\nif __name__ == '__main__':\n app.run(debug=True)\n", "repo_name": "kuznetsi/flask_test", "sub_path": "app.py", "file_name": "app.py", "file_ext": "py", "file_size_in_byte": 2446, "program_lang": "python", "lang": "ru", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "76", "api": [{"api_name": "flask.Flask", "line_number": 5, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 16, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 21, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 32, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 45, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 64, "usage_type": "call"}]}
+{"seq_id": "25474223956", "text": "import io,os\nimport avro.schema\nimport avro.io\n\nschema = avro.schema.Parse(open(\"SimpleClass.avsc\", \"rb\").read())\n\nfd = os.open('SimpleClass.avro', os.O_RDONLY)\nBUFSIZE = 2**32-1\nbyte_data = os.read(fd, BUFSIZE)\nos.close(fd)\n\nbytes_reader = io.BytesIO(byte_data)\ndecoder = avro.io.BinaryDecoder(bytes_reader)\nreader = avro.io.DatumReader(schema)\n\n# How do I know how many records are encoded?\ndata = reader.read(decoder)\nprint(data)\ndata = reader.read(decoder)\nprint(data)\n\n", "repo_name": "IRISMeister/IRIS-PEX-MQTT-dotnet", "sub_path": "datavol/share/SimpleClass-decoder.py", "file_name": "SimpleClass-decoder.py", "file_ext": "py", "file_size_in_byte": 474, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "76", "api": [{"api_name": "avro.schema.schema.Parse", "line_number": 5, "usage_type": "call"}, {"api_name": "avro.schema.schema", "line_number": 5, "usage_type": "attribute"}, {"api_name": "avro.schema", "line_number": 5, "usage_type": "name"}, {"api_name": "os.open", "line_number": 7, "usage_type": "call"}, {"api_name": "os.O_RDONLY", "line_number": 7, "usage_type": "attribute"}, {"api_name": "os.read", "line_number": 9, "usage_type": "call"}, {"api_name": "os.close", "line_number": 10, "usage_type": "call"}, {"api_name": "io.BytesIO", "line_number": 12, "usage_type": "call"}, {"api_name": "avro.schema.io.BinaryDecoder", "line_number": 13, "usage_type": "call"}, {"api_name": "avro.schema.io", "line_number": 13, "usage_type": "attribute"}, {"api_name": "avro.schema", "line_number": 13, "usage_type": "name"}, {"api_name": "avro.schema.io.DatumReader", "line_number": 14, "usage_type": "call"}, {"api_name": "avro.schema.io", "line_number": 14, "usage_type": "attribute"}, {"api_name": "avro.schema", "line_number": 14, "usage_type": "name"}]}
+{"seq_id": "32994452577", "text": "\"\"\"Class that represents the interaction network of Stack overflow.\"\"\"\n\n# Imports\nimport numpy as np\nfrom sklearn.linear_model import LinearRegression\n\nimport user as agent\nimport utils\n\n\nclass network:\n \"\"\"\n Framework of the model that represents the interaction network of Stack overflow.\n\n Attributes\n ----------\n new_users : int\n number of users added every timestep\n upvote_treshold : int\n minimum reputation to gain upvoting privilige\n upvote_bias : int\n number of upvotes a user is satisfied with\n distr : list\n contains the type and parameters of the distributions from which the probabilities are sampled\n tag_cdf : numpy.ndarray\n cummulative distribution function of the tags (communities)\n tags : list\n contains the ids of the users with a certain tag for all tags\n users : list\n contains all the users in the system\n questions : list\n all the questions ever asked during the simulation\n\n Methods\n -------\n determine_tag()\n Determine the tag of a user.\n create_user(i)\n Create a new user.\n step()\n Single timestep of the model.\n run(t)\n Execute the model for a certain number of timesteps.\n \"\"\"\n\n def __init__(self, n, tags, treshold=15, bias=12, distr=[[0.5, 0.25], [0.5, 0.25], [0.5, 0.25], [0.5, 0.25]]):\n \"\"\"\n Initialize an interaction network.\n\n Parameters\n ----------\n n : int\n number of users added every timestep\n tags : str\n .txt file containing probabilities of the different communities\n treshold : int\n minimum reputation to gain upvoting privilige, default is 15\n bias : int\n number of upvotes a user is satisfied with, default is 12\n distr : list (4 x 2)\n contains the mean and std (as a list) of the distributions from which the probabilities are sampled\n the first list is for p_ask followed by p_answer, p_interact and p_active\n default values are mean 0.5 and std 0.25 (normal distribution)\n for uniform distribution, set the mean to None\n for exponential distribution set the mean equal to the rate and the std to None\n \"\"\"\n self.new_users = n\n self.upvote_treshold = treshold\n self.upvote_bias = bias\n\n # Distributions for the interaction parameters of the users\n self.distr = distr\n\n # Calculate the cummulative distribution of the tags\n tag_pdf = np.loadtxt(tags, usecols=1)\n tag_pdf = tag_pdf / np.sum(tag_pdf)\n self.tag_cdf = utils.calc_cdf(tag_pdf)\n\n self.tags = [[] for _ in range(len(tag_pdf))]\n self.users = []\n self.questions = []\n\n def determine_tag(self):\n \"\"\"\n Determine the tag of a user.\n\n Returns\n -------\n tag : int\n tag of the user\n \"\"\"\n tag = 0\n u = np.random.uniform()\n while u > self.tag_cdf[tag]:\n tag += 1\n\n return tag\n\n def create_user(self, i):\n \"\"\"\n Create a new user.\n\n Parameters\n ----------\n i : int\n id of the user\n\n Returns\n -------\n new_user : .user\n new user\n \"\"\"\n # Tag\n tag = self.determine_tag()\n self.tags[tag].append(i)\n\n # User\n new_user = agent.user(self, i, tag)\n\n # Probabilities\n attributes = ['p_ask', 'p_answer', 'p_interact', 'p_active']\n for i, param in enumerate(self.distr):\n if param[0] is None:\n # Uniform distribution\n p = utils.draw_uniform()\n elif param[1] is None:\n # Exponential distribution\n p = utils.draw_exponential(param[0])\n else:\n # Normal distribution\n p = utils.draw_normal(param[0], param[1])\n\n setattr(new_user, attributes[i], p)\n setattr(new_user, attributes[i] + '_begin', p)\n\n return new_user\n\n def step(self):\n \"\"\"Single timestep of the model.\"\"\"\n # Add new users to the system\n for _ in range(self.new_users):\n user = self.create_user(len(self.users))\n self.users.append(user)\n\n # Iterate over users based on activity, most active users go first\n order = list(np.copy(self.users))\n order.sort(key=lambda x: x.p_active, reverse=True)\n for user in order:\n user.step()\n\n def run(self, t):\n \"\"\"\n Execute the model for a certain number of timesteps.\n\n Parameters\n ----------\n t : int\n number of timesteps\n \"\"\"\n for _ in range(t):\n self.step()\n\n def reset(self):\n \"\"\"Reset the system (does not change the parameter settings).\"\"\"\n self.tags = [[] for _ in range(len(self.tag_cdf))]\n self.users = []\n self.questions = []\n\n def get_upvote_distr(self, binsize):\n \"\"\"\n Get the distribution of upvotes given per user.\n\n Parameters\n ----------\n binsize : float\n length of one interval\n\n Returns\n -------\n pdf : numpy.ndarray\n probability density function of the number of upvotes\n bins : numpy.ndarray\n edges of the bins\n \"\"\"\n # Get the data on the upvotes\n upvotes = []\n for user in self.users:\n upvotes.append(user.n_questions_upvoted + user.n_answers_upvoted)\n\n bins = np.arange(binsize, max(upvotes) + binsize + 1, binsize)\n pdf = np.zeros(len(bins))\n for value in upvotes:\n pdf[(value // binsize)] += 1\n\n pdf /= np.sum(pdf)\n\n return pdf, bins\n\n def get_reputation_distr(self, binsize):\n \"\"\"\n Get the distribution of reputation.\n\n Parameters\n ----------\n binsize : float\n length of one interval\n\n Returns\n -------\n pdf : numpy.ndarray\n probability density function of the reputation\n bins : numpy.ndarray\n edges of the bins\n \"\"\"\n # Get the data on reputation\n reputation = []\n for user in self.users:\n reputation.append(user.reputation)\n\n bins = np.arange(binsize, max(reputation) + binsize + 1, binsize)\n pdf = np.zeros(len(bins))\n for value in reputation:\n pdf[(value // binsize)] += 1\n\n pdf /= np.sum(pdf)\n\n return pdf, bins\n\n def get_regression_coeff(self, data='upvotes', binsize=20):\n \"\"\"\n Calculate the linear regression coefficient of the distribution of upvotes or reputation.\n\n Parameters\n ----------\n data : str ('upvotes' or 'reputation')\n specifies for which distribution the coefficient should be calculated, default is upvotes\n binsize : float\n length of the interval used in calculating the pdf\n\n Returns\n -------\n coeff : float\n coefficient of the linear regression line\n \"\"\"\n if data == 'upvotes':\n pdf, bins = self.get_upvote_distr(binsize)\n else:\n pdf, bins = self.get_reputation_distr(binsize)\n\n # Calculate the log of the data\n pdf_log = []\n bins_log = []\n\n for ind, value in enumerate(pdf):\n if value != 0:\n bins_log.append(np.log10(bins[ind]))\n pdf_log.append(np.log10(value))\n\n lin_model = LinearRegression().fit(np.array(bins_log).reshape((-1, 1)), pdf_log)\n\n return (lin_model.coef_)[0]\n", "repo_name": "AaronDC60/ABM_stackoverflow_network", "sub_path": "code/model.py", "file_name": "model.py", "file_ext": "py", "file_size_in_byte": 7678, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "76", "api": [{"api_name": "numpy.loadtxt", "line_number": 75, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 76, "usage_type": "call"}, {"api_name": "utils.calc_cdf", "line_number": 77, "usage_type": "call"}, {"api_name": "numpy.random.uniform", "line_number": 93, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 93, "usage_type": "attribute"}, {"api_name": "user.user", "line_number": 118, "usage_type": "call"}, {"api_name": "utils.draw_uniform", "line_number": 125, "usage_type": "call"}, {"api_name": "utils.draw_exponential", "line_number": 128, "usage_type": "call"}, {"api_name": "utils.draw_normal", "line_number": 131, "usage_type": "call"}, {"api_name": "numpy.copy", "line_number": 146, "usage_type": "call"}, {"api_name": "user.step", "line_number": 149, "usage_type": "call"}, {"api_name": "user.n_questions_upvoted", "line_number": 188, "usage_type": "attribute"}, {"api_name": "user.n_answers_upvoted", "line_number": 188, "usage_type": "attribute"}, {"api_name": "numpy.arange", "line_number": 190, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 191, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 195, "usage_type": "call"}, {"api_name": "user.reputation", "line_number": 218, "usage_type": "attribute"}, {"api_name": "numpy.arange", "line_number": 220, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 221, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 225, "usage_type": "call"}, {"api_name": "numpy.log10", "line_number": 256, "usage_type": "call"}, {"api_name": "numpy.log10", "line_number": 257, "usage_type": "call"}, {"api_name": "sklearn.linear_model.LinearRegression", "line_number": 259, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 259, "usage_type": "call"}]}
+{"seq_id": "74208335605", "text": "import os\r\nfrom mutagen.mp3 import MP3\r\nfrom functions import ceil\r\n# Format information: \r\n# 0: nothing\r\n# 1: single hit\r\n# 2: hold\r\n\r\nresolution = 50 # ms\r\nsongLength = MP3('audio.mp3').info.length*1000 # ms\r\n\r\noriginal_file = open(\"original.osu\", \"r\")\r\noriginal_lines = original_file.readlines()\r\n\r\n# Remove line returns from original file\r\nfor i in range(0, len(original_lines)):\r\n original_lines[i] = original_lines[i].rstrip('\\r\\n')\r\n\r\noriginal_hit_objects = original_lines[original_lines.index(\"[HitObjects]\")+1:len(original_lines)]\r\n\r\nposes = []\r\nfor i in range(0, len(original_hit_objects)):\r\n try:\r\n output = str(original_hit_objects[i][:-1]).split(',')\r\n \r\n # Numberize normal inputs\r\n for j in range(0, len(output)-1):\r\n output[j] = int(output[j])\r\n\r\n # Numberize extra inputs\r\n last_index = len(output)-1\r\n output[last_index] = output[last_index].split(':')\r\n for j in range(0, len(output[last_index])):\r\n try:\r\n output[last_index][j] = int(output[last_index][j])\r\n except Exception:\r\n output[last_index][j] = output[last_index][j]\r\n\r\n # Add to list of poses\r\n pose = output[0]\r\n if not pose in poses:\r\n poses.append(pose)\r\n\r\n # Update original array with new data\r\n original_hit_objects[i] = output\r\n except Exception:\r\n print(\"Failed to analyze line \" + (i+1) + \" (\\\"\" + original_hit_objects[i] + \"\\\") because of: \" + Exception)\r\n\r\n# Make it so you can map pos directly to a column number\r\nposes.sort()\r\nposToColumn = {}\r\nfor i in range(0, len(poses)):\r\n posToColumn[poses[i]] = i\r\n\r\n# Create empy hit objects array\r\noutput_hit_objects = []\r\nfor i in range(0, ceil(float(songLength)/resolution)):\r\n val = []\r\n for j in range(0, len(poses)):\r\n val.append(0)\r\n output_hit_objects.append(val)\r\n\r\n# Translate original hit objects to new hit objects\r\nfor hit_object in original_hit_objects:\r\n index = int(round(float(hit_object[2])/resolution))\r\n column = posToColumn[hit_object[0]]\r\n if hit_object[3] == 1 or 5:\r\n output_hit_objects[index][column] = 1\r\n if hit_object[3] == 128:\r\n output_hit_objects[index][column] = 2\r\n for i in range(index+1, ceil(float(hit_object[5][0])/resolution)-1):\r\n #print(i)\r\n output_hit_objects[i][column] = 2\r\n\r\n# Output to file\r\nif os.path.exists(\"encoded.asu\"):\r\n os.remove(\"encoded.asu\")\r\noutput_file = open(\"encoded.asu\", \"a\")\r\noutput_file.write(str(resolution) + \"\\n\")\r\nfor hit_object in output_hit_objects:\r\n #print(hit_object)\r\n for i in range(0, len(hit_object)):\r\n output_file.write(str(hit_object[i]))\r\n if not i == len(hit_object)-1:\r\n output_file.write(\",\")\r\n output_file.write(\"\\n\")", "repo_name": "clay53/Osu-Mania-Beatmap-AI", "sub_path": "Osu-Mania-Beatmap-AI/Format/encode.py", "file_name": "encode.py", "file_ext": "py", "file_size_in_byte": 2827, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "76", "api": [{"api_name": "mutagen.mp3.MP3", "line_number": 10, "usage_type": "call"}, {"api_name": "functions.ceil", "line_number": 57, "usage_type": "call"}, {"api_name": "functions.ceil", "line_number": 71, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 76, "usage_type": "call"}, {"api_name": "os.path", "line_number": 76, "usage_type": "attribute"}, {"api_name": "os.remove", "line_number": 77, "usage_type": "call"}]}
+{"seq_id": "9108801951", "text": "import pysam\nimport argparse\n\nif __name__ == '__main__':\n\n\tparser = argparse.ArgumentParser('Bed to vcf')\n\tparser.add_argument('-b','--bed', help=\"in.bed\")\n\tparser.add_argument('-r','--ref', help=\"reference.fasta\")\n\tparser.add_argument('-o','--out', help=\"out.vcf\")\n\n\tglobal opts\n\topts = parser.parse_args()\n\tfa = pysam.Fastafile(opts.ref)\n\tvcf=open(opts.out,'w')\n\tvcf.write('##fileformat=VCFv4.1'+'\\n')\n\tvcf.write('##INFO='+'\\n')\n\tvcf.write('#CHROM\\tPOS\\tID\\tREF\\tALT\\tQUAL\\tFILTER\\tINFO\\tFORMAT\\tSAMPLE\"'+'\\n')\n\twith open(opts.bed,'r') as bed:\n\t\tfor line in bed:\n\t\t\tif line.startswith('#') or line.startswith('@'):\n\t\t\t\tcontinue\n\t\t\telse:\n\t\t\t\tchrom,start,end,ref,alt=line.rstrip().split('\\t')[:5]\n\t\t\t\tid=line.rstrip().split('\\t')[-1]\n\n\t\t\t\ttry:\n\t\t\t\t\tfa.fetch(chrom,start)\n\t\t\t\texcept:\n\t\t\t\t\tchrom='chr'+chrom\n\n\t\t\t\tif ref == '-':\n\t\t\t\t\tref = fa.fetch(chrom, int(start)-1, int(start)).upper()\n\n\t\t\t\t\talt = ref.upper() + alt.upper()\n\t\t\t\telif alt== '-':\n\t\t\t\t\tref = fa.fetch(chrom, int(start)-1, int(end)).upper()\n\t\t\t\t\talt = ref[0].upper()\n\n\t\t\t\tvcf.write('\\t'.join([chrom, start, '.', ref, alt,'.','.','ID='+id,'.','.'])+'\\n')", "repo_name": "urtism/CMG", "sub_path": "SCRIPT_CMG/SCRIPT_PYTHON/FILE_MANIPULATION/bed_to_vcf.py", "file_name": "bed_to_vcf.py", "file_ext": "py", "file_size_in_byte": 1169, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "76", "api": [{"api_name": "argparse.ArgumentParser", "line_number": 6, "usage_type": "call"}, {"api_name": "pysam.Fastafile", "line_number": 13, "usage_type": "call"}]}
+{"seq_id": "18685850245", "text": "import numpy as np\nimport scipy.spatial.distance as scidist\nfrom keras.models import Model\nfrom common.Enums import DistanceMetrics\n\n\ndef computeFeatureWiseMetric(consumer_batch, shop_features, metric):\n assert consumer_batch.shape[1] == shop_features.shape[1], \"Consumer batch and shop features must have same feature dimensin\"\n consumer_count = consumer_batch.shape[0]\n shop_count = shop_features.shape[0]\n consumer_batch = np.expand_dims(consumer_batch, axis=1)\n consumer_batch = np.tile(consumer_batch, (1, shop_count, 1))\n shop_features = np.expand_dims(shop_features, axis = 0)\n shop_features = np.tile(shop_features, (consumer_count, 1, 1))\n\n diff = consumer_batch - shop_features\n\n if metric == DistanceMetrics.L1:\n return np.abs(diff)\n elif metric == DistanceMetrics.L2:\n return np.square(diff)\n else:\n raise Exception(\"Invalid metric\")\n\ndef computeDistances(consumer_features, shop_features, metric=DistanceMetrics.L1, model = None, batchSize = 100):\n assert isinstance(consumer_features, np.ndarray), 'Consumer features must be an numpy array of size n * d'\n assert isinstance(shop_features, np.ndarray), 'Shop features must be a numpy array of size m * d'\n assert consumer_features.shape[1] == shop_features.shape[1], 'Consumer and shop features must have same dimension'\n\n if model is not None:\n print(\"Computing Trained Model based distance metric\")\n assert isinstance(model, Model), \"model must be a keras model\"\n result = np.array([]).reshape((-1, shop_features.shape[0]))\n num_batches = consumer_features.shape[0] // batchSize + 1\n batch_iter = 1\n for start in range(0, consumer_features.shape[0], batchSize):\n last_index = min(consumer_features.shape[0], start + batchSize)\n\n consumer_batch = consumer_features[start: last_index]\n feature_wise_metric = computeFeatureWiseMetric(consumer_batch, shop_features, metric)\n feature_wise_metric = feature_wise_metric.reshape((-1, feature_wise_metric.shape[2]))\n\n similarity = model.predict(feature_wise_metric)\n # We multiply by negative 1 since higher scores means they are more similar, aka negative of distance.\n similarity = -1 * similarity.reshape((consumer_batch.shape[0], -1))\n\n print(\"Finished batch {} of {}\".format(batch_iter, num_batches))\n batch_iter +=1\n result = np.concatenate((result, similarity))\n\n return result\n\n else:\n print(\"Computing Vanilla Distance Metric\")\n if(metric == DistanceMetrics.L1):\n metric_string = 'cityblock'\n elif(metric == DistanceMetrics.L2):\n metric_string = 'euclidean'\n elif (metric == DistanceMetrics.Cosine):\n metric_string = 'cosine'\n else:\n raise Exception(\"Invalid Distance Metric\")\n return scidist.cdist(consumer_features, shop_features, metric=metric_string)", "repo_name": "wutenghu/CS231nFinalProject", "sub_path": "common/helpers/computeDistances.py", "file_name": "computeDistances.py", "file_ext": "py", "file_size_in_byte": 2929, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "github-code", "pt": "76", "api": [{"api_name": "numpy.expand_dims", "line_number": 11, "usage_type": "call"}, {"api_name": "numpy.tile", "line_number": 12, "usage_type": "call"}, {"api_name": "numpy.expand_dims", "line_number": 13, "usage_type": "call"}, {"api_name": "numpy.tile", "line_number": 14, "usage_type": "call"}, {"api_name": "common.Enums.DistanceMetrics.L1", "line_number": 18, "usage_type": "attribute"}, {"api_name": "common.Enums.DistanceMetrics", "line_number": 18, "usage_type": "name"}, {"api_name": "numpy.abs", "line_number": 19, "usage_type": "call"}, {"api_name": "common.Enums.DistanceMetrics.L2", "line_number": 20, "usage_type": "attribute"}, {"api_name": "common.Enums.DistanceMetrics", "line_number": 20, "usage_type": "name"}, {"api_name": "numpy.square", "line_number": 21, "usage_type": "call"}, {"api_name": "common.Enums.DistanceMetrics.L1", "line_number": 25, "usage_type": "attribute"}, {"api_name": "common.Enums.DistanceMetrics", "line_number": 25, "usage_type": "name"}, {"api_name": "numpy.ndarray", "line_number": 26, "usage_type": "attribute"}, {"api_name": "numpy.ndarray", "line_number": 27, "usage_type": "attribute"}, {"api_name": "keras.models.Model", "line_number": 32, "usage_type": "argument"}, {"api_name": "numpy.array", "line_number": 33, "usage_type": "call"}, {"api_name": "numpy.concatenate", "line_number": 49, "usage_type": "call"}, {"api_name": "common.Enums.DistanceMetrics.L1", "line_number": 55, "usage_type": "attribute"}, {"api_name": "common.Enums.DistanceMetrics", "line_number": 55, "usage_type": "name"}, {"api_name": "common.Enums.DistanceMetrics.L2", "line_number": 57, "usage_type": "attribute"}, {"api_name": "common.Enums.DistanceMetrics", "line_number": 57, "usage_type": "name"}, {"api_name": "common.Enums.DistanceMetrics.Cosine", "line_number": 59, "usage_type": "attribute"}, {"api_name": "common.Enums.DistanceMetrics", "line_number": 59, "usage_type": "name"}, {"api_name": "scipy.spatial.distance.cdist", "line_number": 63, "usage_type": "call"}, {"api_name": "scipy.spatial.distance", "line_number": 63, "usage_type": "name"}]}
+{"seq_id": "3628553790", "text": "\"\"\"\nIrondomo Protocol client example. Uses the IDPClient API to hide all IDP aspects\nAuthor: Matteo Ferrabone \n\"\"\"\nimport os\nimport sys\nimport zmq.auth\nfrom IronDomo import IDPClient\n\ndef main():\n verbose = '-v' in sys.argv\n base_dir = os.path.dirname(__file__)\n keys_dir = os.path.join(base_dir, 'certificates')\n public_keys_dir = os.path.join(base_dir, 'public_keys')\n secret_keys_dir = os.path.join(base_dir, 'private_keys')\n client_secret_file = os.path.join(secret_keys_dir, \"client.key_secret\")\n client_public, client_secret = zmq.auth.load_certificate(client_secret_file)\n server_public_file = os.path.join(public_keys_dir, \"server.key\")\n print('Server Secret File: {0}'.format(server_public_file))\n server_public, dummy = zmq.auth.load_certificate(server_public_file)\n\n print('Server Key: {0}'.format(server_public))\n\n client = IDPClient.IronDomoClient(\"tcp://127.0.0.1:6556\", verbose, ('P+S690P{iVPfx {0}\".format(count)\n try:\n reply = client.send(b\"echo\", request)#.encode())\n print('Message: {0}'.format(count))\n except KeyboardInterrupt:\n break\n else:\n # also break on failure to reply:\n if reply is None:\n break\n count += 1\n client.close()\n print(\"%i requests/replies processed\" % count)\n\nif __name__ == '__main__':\n main()\n\n", "repo_name": "waterviewsrl/IronDomo", "sub_path": "examples/python/client/client_curve.py", "file_name": "client_curve.py", "file_ext": "py", "file_size_in_byte": 1591, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 4, "dataset": "github-code", "pt": "76", "api": [{"api_name": "sys.argv", "line_number": 11, "usage_type": "attribute"}, {"api_name": "os.path.dirname", "line_number": 12, "usage_type": "call"}, {"api_name": "os.path", "line_number": 12, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 13, "usage_type": "call"}, {"api_name": "os.path", "line_number": 13, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 14, "usage_type": "call"}, {"api_name": "os.path", "line_number": 14, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 15, "usage_type": "call"}, {"api_name": "os.path", "line_number": 15, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 16, "usage_type": "call"}, {"api_name": "os.path", "line_number": 16, "usage_type": "attribute"}, {"api_name": "zmq.auth.auth.load_certificate", "line_number": 17, "usage_type": "call"}, {"api_name": "zmq.auth.auth", "line_number": 17, "usage_type": "attribute"}, {"api_name": "zmq.auth", "line_number": 17, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 18, "usage_type": "call"}, {"api_name": "os.path", "line_number": 18, "usage_type": "attribute"}, {"api_name": "zmq.auth.auth.load_certificate", "line_number": 20, "usage_type": "call"}, {"api_name": "zmq.auth.auth", "line_number": 20, "usage_type": "attribute"}, {"api_name": "zmq.auth", "line_number": 20, "usage_type": "name"}, {"api_name": "IronDomo.IDPClient.IronDomoClient", "line_number": 24, "usage_type": "call"}, {"api_name": "IronDomo.IDPClient", "line_number": 24, "usage_type": "name"}, {"api_name": "os.urandom", "line_number": 26, "usage_type": "call"}]}
+{"seq_id": "32463430374", "text": "import cx_Oracle as cx\ncx.init_oracle_client(lib_dir=r\"C:\\DEV_WorkSpace\\Rep_Python\\env_python\\cli_oracle\\instantclient_19_8\")\n\ndef connectBd():\n host_name = \"192.168.56.3\"\n port_number = \"1521\"\n service_name = \"orcl\"\n pass_1 = \"useraction\"\n user = \"useraction\"\n dbschema = \"BD_ACOES\"\n host = host_name + \":\" + port_number + \"/\" + service_name\n\n conn = cx.connect(user, pass_1, host , encoding=\"UTF-8\")\n #print(conn)\n #cursor = conn.cursor()\n return conn\n\ndef select(psql):\n #psql = \"select * from ticket\"\n conn = connectBd()\n cursor = conn.cursor()\n print(cursor)\n\n for row in cursor.execute(psql):\n print(row)\n\n conn.close()\n\n\ndef insertToBd(sigla, tipo, bolsa):\n conn = connectBd()\n cursor = conn.cursor()\n #sql = (\"insert into ticket(id_ticket, nome, tipo, pais) VALUES(SEQ_IDTICKET.nextval,'EGIE3.SA','ON','BRASIL')\")\n p1 = \"','\"\n psql = \"insert into acoes(id_acao, sigla, tipo, bolsa) VALUES(SEQ_IDACAO.nextval,'\"+sigla+p1+tipo+p1+bolsa+\"')\"\n print(psql)\n cursor.execute(psql)\n cursor.execute(\"commit\")\n\ndef getCursor(psql):\n conn = connectBd()\n cursor = conn.cursor()\n return cursor\n\n#insertToBd(\"PG\",\"ON\",\"NYSE\")\n#select()\n\ndef geraSqlInsert(nome_tabela, dados):\n sql_insert = 'insert into ' + nome_tabela + '('\n sql_str_campo = ''\n sql_str_valor = ''\n\n for p_campo, p_valor in dados.items():\n sql_str_campo = sql_str_campo + p_campo + ','\n sql_str_valor = sql_str_valor + \"'\" + str(p_valor) + \"',\"\n sql = sql_insert + sql_str_campo[:-1] + ') values(' + sql_str_valor[:-1] + ')'\n return sql\n\ndef insertCotacoes(id_sigla, pcotacao):\n conn = connectBd()\n cursor = conn.cursor()\n dados = {}\n print('print pcotacao = »»»» ',pcotacao)\n for t in pcotacao:\n dic1 = dict(list(pcotacao)[list(pcotacao).index(t)])\n print(dic1)\n for p_data, p_open, p_high, p_low, p_close, p_adj_close, p_volume, p_dividend_amount, p_split in dic1:\n #for x in lst1:\n print('print teste »»' ,p_data,p_close)\n #print(list(dic1)[list(dic1).index(x)], ' = ',dic1[x])\n \"\"\"dados = {'ID_TICKET': id_ticket,\n 'DATA_COTACAO': p_data,\n 'HIGH': p_high,\n 'LOW': p_low,\n 'OPEN': p_open,\n 'CLOSE': p_close,\n 'VOLUME': p_volume,\n 'ADJ_CLOSE': p_adj_close\n }\"\"\"\n\n print('print dados = »»»» ',dados)\n sql = geraSqlInsert('cotacao_diaria', dados)\n try:\n print(sql)\n #cursor.execute(sql)\n except Exception as err:\n print(err)\n #conn.commit()\n\n \"\"\"\n {\n 'data': '2020-10-23',\n 'open': '82.2500',\n 'high': '83.2600', \n 'low': '80.6400',\n 'close': '81.6700',\n 'adjusted close': '81.6700',\n 'volume': '8068300',\n 'dividend amount': '0.0000',\n 'split coefficient': '1.0'\n }\n \"\"\"", "repo_name": "alexsfraga/Curso-Python-com-financas", "sub_path": "conectaBD.py", "file_name": "conectaBD.py", "file_ext": "py", "file_size_in_byte": 2984, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "76", "api": [{"api_name": "cx_Oracle.init_oracle_client", "line_number": 2, "usage_type": "call"}, {"api_name": "cx_Oracle.connect", "line_number": 13, "usage_type": "call"}]}
+{"seq_id": "23859777336", "text": "from django.conf import settings\n\nfrom celery import shared_task\nfrom os.path import join\nfrom subprocess import Popen, STDOUT, CalledProcessError, PIPE\n\nfrom .models import Submission\n\nimport traceback\n\n\n@shared_task\ndef run_code(sub_pk):\n submission = Submission.objects.get(pk=sub_pk)\n try:\n with Popen(\"python3 -u {} {}\".format(\n join(settings.GRADER_DIRECTORY,\n submission.lab.grader_filename),\n submission.code).split(), stdout=PIPE, stderr=STDOUT,\n cwd=settings.GRADER_DIRECTORY) as proc:\n for line in proc.stdout:\n if line.decode() == \"\\n\":\n continue\n submission.output += line.decode()\n submission.save()\n if proc.returncode != 0:\n raise CalledProcessError(\n proc.returncode, proc.args,\n output=(b\"\" if proc.stdout.closed else proc.stdout.read()),\n stderr=None)\n except CalledProcessError as e:\n submission.output = e.output.decode()\n except Exception:\n submission.output = traceback.format_exc()\n submission.complete = True\n submission.save()\n return True\n", "repo_name": "ovkulkarni/ai-grader", "sub_path": "grader/tasks.py", "file_name": "tasks.py", "file_ext": "py", "file_size_in_byte": 1211, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "76", "api": [{"api_name": "models.Submission.objects.get", "line_number": 14, "usage_type": "call"}, {"api_name": "models.Submission.objects", "line_number": 14, "usage_type": "attribute"}, {"api_name": "models.Submission", "line_number": 14, "usage_type": "name"}, {"api_name": "subprocess.Popen", "line_number": 16, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 17, "usage_type": "call"}, {"api_name": "django.conf.settings.GRADER_DIRECTORY", "line_number": 17, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 17, "usage_type": "name"}, {"api_name": "subprocess.PIPE", "line_number": 19, "usage_type": "name"}, {"api_name": "subprocess.STDOUT", "line_number": 19, "usage_type": "name"}, {"api_name": "django.conf.settings.GRADER_DIRECTORY", "line_number": 20, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 20, "usage_type": "name"}, {"api_name": "subprocess.CalledProcessError", "line_number": 27, "usage_type": "call"}, {"api_name": "subprocess.CalledProcessError", "line_number": 31, "usage_type": "name"}, {"api_name": "traceback.format_exc", "line_number": 34, "usage_type": "call"}, {"api_name": "celery.shared_task", "line_number": 12, "usage_type": "name"}]}
+{"seq_id": "5008701188", "text": "\"\"\"poll models #2\n\nRevision ID: c4a90fed59fb\nRevises: 7505195b2951\nCreate Date: 2023-05-28 17:11:55.304035\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\nimport sqlmodel\n\n\n# revision identifiers, used by Alembic.\nrevision = 'c4a90fed59fb'\ndown_revision = '7505195b2951'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade() -> None:\n # ### commands auto generated by Alembic - please adjust! ###\n op.add_column('question', sa.Column('text', sqlmodel.sql.sqltypes.AutoString(), nullable=False))\n # ### end Alembic commands ###\n\n\ndef downgrade() -> None:\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_column('question', 'text')\n # ### end Alembic commands ###\n", "repo_name": "fearsd/leaders_hack_2023", "sub_path": "backend/migrations/versions/c4a90fed59fb_poll_models_2.py", "file_name": "c4a90fed59fb_poll_models_2.py", "file_ext": "py", "file_size_in_byte": 708, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "76", "api": [{"api_name": "alembic.op.add_column", "line_number": 22, "usage_type": "call"}, {"api_name": "alembic.op", "line_number": 22, "usage_type": "name"}, {"api_name": "sqlalchemy.Column", "line_number": 22, "usage_type": "call"}, {"api_name": "sqlmodel.sql.sqltypes.AutoString", "line_number": 22, "usage_type": "call"}, {"api_name": "sqlmodel.sql", "line_number": 22, "usage_type": "attribute"}, {"api_name": "alembic.op.drop_column", "line_number": 28, "usage_type": "call"}, {"api_name": "alembic.op", "line_number": 28, "usage_type": "name"}]}
+{"seq_id": "28621580368", "text": "# # Globals that tell the script the layout of the csv files\n# from matplotlib import pyplot as plt\nimport internal.csv_utils as utils\nimport statistics\n\n\ndef export_number_revs(data, csv_name, no_compile_fail=False):\n # Export the number of revisions to a file\n\n # Clean the data\n cleaned_data = utils.clean_data(data, omit=['EmptyCommit', 'NoCoverage'])\n\n # Get the dates\n exit_statuses, dates = utils.get_columns(cleaned_data, ['exit', 'time'])\n\n # Count the number of revisions for each of the exit statuses\n num_revs = {'OK': 0, 'SomeTestFailed': 0, 'TimedOut': 0, 'compileError': 0, }\n for exit_status in exit_statuses:\n if exit_status not in num_revs:\n num_revs[exit_status] = 0\n num_revs[exit_status] += 1\n\n # Get the first and last dates\n first_date = dates[0]\n last_date = dates[-1]\n\n # Get the number of months between the first and last dates\n num_months = (last_date.year - first_date.year) * 12 + (last_date.month - first_date.month)\n\n # Append num_months with mo\n num_months = f'{num_months}mo'\n\n # Print the number of revisions and the number of days\n print(f'Number of revisions for {csv_name}: {num_revs}')\n print(f'Number of months for {csv_name}: {num_months}')\n\n set_to_analyze = ['OK', 'SomeTestFailed', 'TimedOut', 'compileError']\n if no_compile_fail:\n set_to_analyze.remove('compileError')\n\n # Transform the num_revs dict into a list of numbers\n status_numbers = ','.join(\n [str(num_revs[status]) for status in set_to_analyze])\n\n # Append the sum of the numbers to the end of the list apart from compileError\n status_numbers += f',{sum([num_revs[status] for status in [\"OK\", \"SomeTestFailed\", \"TimedOut\"]])}'\n\n # Construct a CSV row with format csv_name, status numbers, num_months\n csv_row = f'{csv_name},{status_numbers},{num_months}'\n\n return csv_row\n\ndef export_date_range(data, csv_name):\n # Clean the data\n cleaned_data = utils.clean_data(data, omit=['EmptyCommit', 'NoCoverage', 'compileError'])\n\n # Get the dates\n covlines, notcovlines, changed_test_files, dates = utils.get_columns(cleaned_data, ['covlines', 'notcovlines', 'changed_test_files', 'time'])\n\n # Get the indices of lines where covlines + notcovlines > 0 or changed_test_files > 0\n indices = [i for i in range(len(covlines)) if covlines[i] + notcovlines[i] > 0 or changed_test_files[i] > 0]\n\n # Get the first and last dates given the indices\n first_date = dates[indices[0]]\n last_date = dates[indices[-1]]\n\n # Write the dates as month(space)year with month as a string (like Apr)\n first_date = f'{first_date.strftime(\"%b\")} {first_date.year}'\n last_date = f'{last_date.strftime(\"%b\")} {last_date.year}'\n\n # Construct a CSV row with format csv_name, first_date, last_date\n csv_row = f'{csv_name},{first_date},{last_date}'\n\n return csv_row\n\n\ndef export_eloc_tloc(data, csv_name):\n # Export stats like ELOC and TLOC and language\n\n cleaned_data = utils.clean_data(data)\n\n # Get eloc, tloc, and language\n eloc_data, tloc_data = utils.get_columns(cleaned_data, ['eloc', 'testsize'])\n\n lang_map = {\n 'Binutils': ('C', 'DejaGNU'),\n 'Git': ('C', 'C/Perl'),\n 'Lighttpd2': ('C', 'Python'),\n 'Memcached': ('C', 'C/Perl'),\n 'Redis': ('C', 'Tcl'),\n 'ZeroMQ': ('C++', 'C++'),\n 'Apr': ('C', 'C'),\n 'Curl': ('C', 'Perl/Python'),\n 'Vim': ('C', 'Vim Script'),\n }\n\n # Convert all the map keys to lowercase\n lang_map = {k.lower(): v for k, v in lang_map.items()}\n\n # Get the language (first is code language, second is test language) finding if csv_name contains part of the key in lang_map\n try:\n lang = [lang_map[key][0] for key in lang_map if key in csv_name.lower()][0]\n test_lang = [lang_map[key][1] for key in lang_map if key in csv_name.lower()][0]\n except IndexError:\n print(f'Could not find language for {csv_name}')\n return None\n\n # Get the eloc and tloc - from the last row\n eloc = eloc_data[-1]\n tloc = tloc_data[-1]\n\n # Construct a CSV row with format csv_name, lang, eloc, test_lang, tloc\n csv_row = f'{csv_name},{lang},\"{eloc:,}\",{test_lang},\"{tloc:,}\"'\n\n return csv_row\n\n\ndef export_delta_eloc_tloc(data, csv_name):\n cleaned_data = utils.clean_data(data)\n\n # Get eloc, tloc, and language\n revs, eloc_data, tloc_data = utils.get_columns(cleaned_data, ['rev', 'eloc', 'testsize'])\n\n if csv_name == 'Lighttpd2':\n # Get index of revision 21d9d5e\n index = revs.index('21d9d5e')\n # Now filter revisions, eloc_data, and coverage to only include revisions after (and including) 21d9d5e\n revisions = revs[index:]\n eloc_data = eloc_data[index:]\n tloc_data = tloc_data[index:]\n\n # Get the last eloc and tloc\n eloc = eloc_data[-1]\n tloc = tloc_data[-1]\n\n # Get the first eloc and tloc\n first_eloc = eloc_data[0]\n first_tloc = tloc_data[0]\n\n # Calculate the delta eloc and tloc\n delta_eloc = eloc - first_eloc\n delta_tloc = tloc - first_tloc\n\n # Calculate the delta eloc and tloc as a percentage of the first eloc and tloc\n delta_eloc_percent = delta_eloc / first_eloc * 100\n delta_tloc_percent = delta_tloc / first_tloc * 100\n\n # Construct a CSV row with format csv_name, final eloc, delta eloc, final tloc, delta tloc\n # csv_row = f'{csv_name},\"{eloc:,}\",\"{delta_eloc:,}\",\"{tloc:,}\",\"{delta_tloc:,}\"'\n\n # Construct a CSV row with format csv_name, delta eloc, delta eloc percent, delta tloc, delta tloc percent\n csv_row = f'{csv_name},\"{delta_eloc:,}\",+{delta_eloc_percent:.1f}%,\"{delta_tloc:,}\",+{delta_tloc_percent:.1f}%'\n\n return csv_row\n\n\ndef export_code_coverage(data, csv_name):\n # Export the final percentage code coverage\n\n cleaned_data = utils.clean_data(data)\n\n # Get the eloc and coverage\n eloc_data, coverage_data, covlines, notcovlines, patch_coverage = utils.get_columns(cleaned_data, ['eloc', 'coverage', 'covlines', 'notcovlines', 'patchcoverage'])\n\n # Calculate the percentage code coverage which is the last coverage divided by the last eloc multiplied by 100\n percent_coverage = coverage_data[-1] / eloc_data[-1] * 100\n\n # Get all indices where covlines + notcovlines is not 0\n nonzero_indices = [i for i in range(len(covlines)) if covlines[i] + notcovlines[i] != 0]\n\n lines_modified = [covlines[i] + notcovlines[i] for i in nonzero_indices]\n\n patch_coverage = [patch_coverage[i] for i in nonzero_indices]\n\n # Normalize the lines modified into line weights (so that the sum of all line weights is 1)\n line_weights = [lines_modified[i] / sum(lines_modified) for i in range(len(lines_modified))]\n\n weighted_patch_coverage = [patch_coverage[i] * line_weights[i] for i in range(len(patch_coverage))]\n\n avg_weighted_patch_coverage = sum(weighted_patch_coverage)\n\n # Calculate the average patch coverage\n avg_patch_coverage = sum(patch_coverage) / len(patch_coverage)\n\n # Construct a CSV row with format csv_name, percent coverage\n csv_row = f'{csv_name},{percent_coverage:.1f}%,{avg_patch_coverage:.1f}%,{avg_weighted_patch_coverage:.1f}%'\n\n return csv_row\n\n\ndef export_lines_hunks_files(data, csv_name):\n # Export stats like lines, hunks, and files\n\n cleaned_data = utils.clean_data(data)\n\n # Get the lines, hunks, and files\n cov_lines, not_cov_lines, hunks, files = utils.get_columns(cleaned_data,\n ['covlines', 'notcovlines', 'ehunks3', 'echanged_files'])\n\n # # Get the differences between consecutive elocs\n # eloc_diffs = [abs(eloc[i] - eloc[i - 1]) for i in range(1, len(eloc))]\n #\n # # Set the first eloc_diff to 0\n # eloc_diffs = [0] + eloc_diffs\n\n # sum the covlines and notcovlines to get the total lines of code\n lines = [cov_lines[i] + not_cov_lines[i] for i in range(len(cov_lines))]\n\n # # Limit to the first 250 revisions\n # lines = lines[:250]\n # hunks = hunks[:250]\n # files = files[:250]\n\n # Find indices of rows where either of cov_lines and not_cov_lines are nonzero\n # (can do covlines + notcovlines > 0 since they are always positive)\n nonzero_indices = [i for i in range(len(lines)) if cov_lines[i] + not_cov_lines[i] > 0]\n\n # Filter lines and hunks to only include nonzero indices\n lines = [lines[i] for i in nonzero_indices]\n hunks = [hunks[i] for i in nonzero_indices]\n files = [files[i] for i in nonzero_indices]\n\n # Get the median of the lines, hunks, and files\n lines = int(statistics.median(lines))\n hunks = int(statistics.median(hunks))\n files = int(statistics.median(files))\n\n # # Get the mean of the lines, hunks, and files\n # lines = int(statistics.mean(lines))\n # hunks = int(statistics.mean(hunks))\n # files = int(statistics.mean(files))\n\n # Construct a CSV row with format csv_name, lines, hunks, files\n csv_row = f'{csv_name},{lines},{hunks},{files}'\n\n return csv_row\n\n\ndef export_bucketed_patch_coverage(data, csv_name):\n # Clean the data\n cleaned_data = utils.clean_data(data)\n\n # Get the coverage data, eloc and echanged_files\n eloc_data, coveredlines, notcoveredlines = utils.get_columns(cleaned_data, ['eloc', 'covlines', 'notcovlines'])\n\n eloc_diffs = [coveredlines[i] + notcoveredlines[i] for i in range(len(coveredlines))]\n\n nonzero_indices = []\n for i in range(len(eloc_data)):\n if eloc_data[i] > 0:\n if coveredlines[i] + notcoveredlines[i] > 0:\n nonzero_indices.append(i)\n\n eloc_diffs = [eloc_diffs[i] for i in nonzero_indices]\n coveredlines = [coveredlines[i] for i in nonzero_indices]\n\n bins = [10, 100, 1000, float('inf')]\n\n bucketed_cov_perc_data = [0] * len(bins)\n total_covered = [0] * len(bins)\n total_total = [0] * len(bins)\n\n for i in range(len(eloc_diffs)):\n for j in range(len(bins)):\n if eloc_diffs[i] <= bins[j]:\n bucketed_cov_perc_data[j] += 1\n total_covered[j] += coveredlines[i]\n total_total[j] += eloc_diffs[i]\n break\n\n # Get the average coverage percentages\n bucketed_cov_perc_data_av = [total_covered[i] * 100 / total_total[i] if total_total[i] != 0 else 0 for i in range(len(total_covered))]\n\n # Also replace any 0s in bucketed_cov_perc_data with -\n csv_data = [csv_name] + [f'{data},{av:.1f}%' if av != 0 else f'{data},-' for data, av in\n zip(bucketed_cov_perc_data, bucketed_cov_perc_data_av)]\n csv_row = ','.join(csv_data)\n\n return csv_row\n\n\ndef export_non_det_revisions(data, csv_name):\n # Clean the data (not removing all OK rows since some repos return OK but have different return values under the\n # hood which make for some interesting results)\n cleaned_data = utils.clean_data(data, omit=['EmptyCommit', 'NoCoverage', 'compileError', 'TimedOut'])\n\n # Get the commit hash, repeats, and non_det columns\n commit_hash, repeats, non_det = utils.get_columns(cleaned_data, ['rev', 'repeats', 'non_det'])\n\n # Get the indices of the non_det revisions\n non_det_indices = [i for i in range(len(non_det)) if non_det[i] == 'True']\n\n # Get the commit hashes of the non_det revisions\n non_det_commit_hashes = [commit_hash[i] for i in non_det_indices]\n\n # Get the number of repeats of the non_det revisions\n non_det_repeats = [repeats[i] for i in non_det_indices][0]\n\n # Get the number of non_det revisions\n num_non_det_revs = len(non_det_commit_hashes)\n\n # Get the number of non_det revisions as a percentage of the total number of revisions\n num_non_det_revs_perc = num_non_det_revs / len(commit_hash) * 100\n\n # Construct a csv row with the format csv_name, num_non_det_revs, num_not_det_revs_perc, non_det_repeats, non_det_commit_hashes*\n csv_row = f'{csv_name},{num_non_det_revs},{num_non_det_revs_perc:.1f},{non_det_repeats},\\\"[{\",\".join(non_det_commit_hashes)}]\\\"'\n\n return csv_row\n\n\ndef export_coverage_delta(data, csv_name):\n # Assumes the last test failure registered doesn't massively affect the coverage\n cleaned_data = utils.clean_data(data,\n omit=['EmptyCommit', 'NoCoverage', 'compileError', 'TimedOut'])\n\n # Get the coverage data and eloc\n revisions, eloc_data, coverage = utils.get_columns(cleaned_data, ['rev','eloc', 'coverage'])\n\n # Filtering out initial bug that cause coverage to be 2% from a bug whereas it should be at least 34% for Lighttpd2.\n if csv_name == 'Lighttpd2':\n # Get index of revision 21d9d5e\n index = revisions.index('21d9d5e')\n # Now filter revisions, eloc_data, and coverage to only include revisions after (and including) 21d9d5e\n revisions = revisions[index:]\n eloc_data = eloc_data[index:]\n coverage = coverage[index:]\n\n\n # Calculate the coverage at the start and end of the project\n start_coverage = coverage[0] / eloc_data[0] * 100\n end_coverage = coverage[-1] / eloc_data[-1] * 100\n\n # Calculate the delta in coverage\n delta_coverage = end_coverage - start_coverage\n\n # Calculate the percentage delta in coverage\n delta_coverage_perc = delta_coverage / start_coverage * 100\n\n # If delta_coverage and delta_coverage_perc are positive, add a + to the start of the string\n\n # Construct a csv row with the format csv_name, start_coverage, end_coverage, delta_coverage, delta_coverage_perc\n csv_row = f'{csv_name},{start_coverage:.1f},{end_coverage:.1f},{delta_coverage_perc:+.1f}%'\n\n return csv_row\n\n\ndef write_stats(paths, csv_names, limit=None):\n write_multiple_csv(export_number_revs, paths, csv_names, ['App', 'OK', 'TF', 'TO', 'CF', 'Total Working', 'Time'],\n 'num_revs_all', limit=limit, no_filter=True)\n write_multiple_csv(export_number_revs, paths, csv_names, ['App', 'OK', 'TF', 'TO', 'MCT', 'Time'],\n 'num_revs_mct', limit=limit, no_compile_fail=True)\n write_multiple_csv(export_date_range, paths, csv_names, ['App', 'Start Date', 'End Date'], 'date_range',\n limit=limit)\n write_multiple_csv(export_eloc_tloc, paths, csv_names, ['App', 'Lang.', 'ELOC', 'Lang.', 'TLOC'], 'eloc_tloc',\n limit=limit)\n write_multiple_csv(export_delta_eloc_tloc, paths, csv_names, ['App', 'ΔELOC', 'ΔELOC%', 'ΔTLOC', 'ΔTLOC%'],\n 'delta_eloc_tloc', limit=limit)\n write_multiple_csv(export_lines_hunks_files, paths, csv_names, ['App', 'Lines', 'Hunks', 'Files'],\n 'lines_hunks_files', limit=limit)\n write_multiple_csv(export_bucketed_patch_coverage, paths, csv_names,\n ['App', '<= 10 NP', '<= 10 C', '11-100 NP', '11-100 C', '101-1000 NP', '101-1000 C', '> 1000 NP', '> 1000 C'],\n 'bucketed_patch_coverage', limit=limit)\n write_multiple_csv(export_code_coverage, paths, csv_names, ['App', 'Final Cov. %', 'Avg. Patch Cov. %', 'Line-Weighted A.P.C. %'], 'code_coverage', limit=limit)\n write_multiple_csv(export_coverage_delta, paths, csv_names, ['App', 'Start Cov. %', 'End Cov. %', 'Cov. % Δ'],\n 'coverage_delta', limit=limit)\n\n paths, csv_names = utils.filter_to_non_det_supported(paths, csv_names)\n\n write_multiple_csv(export_non_det_revisions, paths, csv_names,\n ['App', 'Nondet. Result', '% Total Working Flaky', 'Repeats', 'Nondet. Commits'], 'non_det_revs')\n\n\ndef write_multiple_csv(func, paths, csv_names, header, name, limit=None, no_filter=False, **kwargs):\n # Run a function on multiple CSV files\n rows = []\n for i in range(len(csv_names)):\n csv_data = utils.extract_data(f'{paths[i]}', csv_names[i])\n if csv_data is None:\n continue\n if not no_filter:\n # Filter the data to only include revisions that modify executable code or test files\n csv_data, _ = utils.filter_data_by_exec_test(csv_data)\n if limit is not None:\n csv_data = csv_data[-limit:]\n res = func(csv_data, csv_names[i], **kwargs)\n if res is not None:\n rows.append(res)\n\n # Convert header to a CSV row\n header = ','.join(header)\n\n write_csv(f'stats/{name}.csv', header, rows)\n\n # Print wrote to file\n print(f'Wrote to stats/{name}.csv')\n\n\ndef write_csv(csv_name, header, rows):\n # Write the rows to the csv\n with open(csv_name, 'w') as f:\n # Write the header\n f.write(header + '\\n')\n\n # Write the rows\n for row in rows:\n f.write(row + '\\n')\n\n return\n\n\nif __name__ == '__main__':\n import os\n import argparse\n import glob\n\n # argparse the location of the input file (e.g. remotedata/apr/Apr.csv)\n parser = argparse.ArgumentParser()\n # argparse for either an input file or a directory\n parser.add_argument('input', help='The input file or directory to process')\n # add a directory option so if --dir is present, the input is a directory, otherwise it is a file\n parser.add_argument('--dir', action='store_true',\n help='The input is a directory (dir/repo1/*.csv, dir/repo2/*.csv)')\n # add a limit option to limit the number of revisions to process\n parser.add_argument('--limit', type=int, help='The number of revisions to process')\n\n args = parser.parse_args()\n\n print(args.limit)\n\n # Make a stats directory if it doesn't exist\n if not os.path.isdir('stats'):\n os.mkdir('stats')\n\n if args.dir:\n # Make sure the input is a directory\n if not os.path.isdir(args.input):\n raise NotADirectoryError(f'{args.input} is not a directory')\n\n # Get the names of the CSV files (basenames)\n paths = glob.glob(f'{args.input}/*/*.csv')\n\n # Add to paths a level up\n if len(paths) == 0:\n paths += glob.glob(f'{args.input}/*.csv')\n\n included_paths = ['remotedata/apr/Apr_repeats.csv',\n 'remotedata/binutils-gdb/BinutilsGdb_repeats.csv',\n 'remotedata/curl/Curl_repeats.csv',\n 'remotedata/git/Git_repeats.csv',\n 'remotedata/lighttpd2/Lighttpd2_repeats.csv',\n 'remotedata/memcached/Memcached_repeats.csv',\n 'remotedata/redis/Redis_repeats.csv',\n 'remotedata/vim/Vim_repeats.csv',\n 'remotedata/zeromq/Zeromq_repeats.csv']\n\n # Get indices of all paths that contain the word 'diffcov'\n diffcov_indices = [i for i in range(len(paths)) if 'diffcov' in paths[i]]\n # Remove all paths that contain the word 'diffcov'\n paths = [paths[i] for i in range(len(paths)) if i not in diffcov_indices]\n\n # Make sure we have at least one CSV file\n if len(paths) == 0:\n raise FileNotFoundError(f'No CSV files found in {args.input}')\n\n paths = [x for x in paths if x in included_paths]\n\n # Make sure we have at least one valid CSV file\n if len(paths) == 0:\n raise FileNotFoundError(f'No non-excepted CSV files found in {args.input}')\n\n csv_names = [os.path.basename(x) for x in paths]\n\n # Remove the .csv extension\n csv_names = [x[:-4] for x in csv_names]\n\n # Trim CSV names\n csv_names = utils.reformat_csv_names(csv_names)\n\n csv_paths = sorted(zip(csv_names, paths))\n csv_names, paths = zip(*csv_paths)\n csv_names = list(csv_names)\n paths = list(paths)\n\n print(f'Paths: {paths}')\n # Print the names of the CSV files\n print(f'CSV names: {csv_names}')\n print(\"=====================================================\")\n\n # Stats for number of revs\n write_stats(paths, csv_names, limit=args.limit)\n\n else:\n # Make sure we have a file not a directory and that it is a CSV, throw a nice error otherwise\n if os.path.isdir(args.input):\n raise IsADirectoryError(\n f'Input {args.input} is a directory (single input should be a file, try using --dir)')\n if not os.path.isfile(args.input):\n raise FileNotFoundError(f'Input {args.input} is not a file')\n if not args.input.endswith('.csv'):\n raise TypeError(f'File {args.input} is not a CSV file')\n\n # Get the name of the CSV file (basename)\n csv_name = os.path.basename(args.input)\n\n # Remove the .csv extension\n csv_name = csv_name[:-4]\n\n # Stats for number of revs\n write_stats([args.input], [csv_name], limit=args.limit)\n\n print(\"All done!\")\n", "repo_name": "srg-imperial/covrig", "sub_path": "postprocessing/get_stats.py", "file_name": "get_stats.py", "file_ext": "py", "file_size_in_byte": 20690, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "76", "api": [{"api_name": "internal.csv_utils.clean_data", "line_number": 11, "usage_type": "call"}, {"api_name": "internal.csv_utils", "line_number": 11, "usage_type": "name"}, {"api_name": "internal.csv_utils.get_columns", "line_number": 14, "usage_type": "call"}, {"api_name": "internal.csv_utils", "line_number": 14, "usage_type": "name"}, {"api_name": "internal.csv_utils.clean_data", "line_number": 55, "usage_type": "call"}, {"api_name": "internal.csv_utils", "line_number": 55, "usage_type": "name"}, {"api_name": "internal.csv_utils.get_columns", "line_number": 58, "usage_type": "call"}, {"api_name": "internal.csv_utils", "line_number": 58, "usage_type": "name"}, {"api_name": "internal.csv_utils.clean_data", "line_number": 80, "usage_type": "call"}, {"api_name": "internal.csv_utils", "line_number": 80, "usage_type": "name"}, {"api_name": "internal.csv_utils.get_columns", "line_number": 83, "usage_type": "call"}, {"api_name": "internal.csv_utils", "line_number": 83, "usage_type": "name"}, {"api_name": "internal.csv_utils.clean_data", "line_number": 119, "usage_type": "call"}, {"api_name": "internal.csv_utils", "line_number": 119, "usage_type": "name"}, {"api_name": "internal.csv_utils.get_columns", "line_number": 122, "usage_type": "call"}, {"api_name": "internal.csv_utils", "line_number": 122, "usage_type": "name"}, {"api_name": "internal.csv_utils.clean_data", "line_number": 160, "usage_type": "call"}, {"api_name": "internal.csv_utils", "line_number": 160, "usage_type": "name"}, {"api_name": "internal.csv_utils.get_columns", "line_number": 163, "usage_type": "call"}, {"api_name": "internal.csv_utils", "line_number": 163, "usage_type": "name"}, {"api_name": "internal.csv_utils.clean_data", "line_number": 194, "usage_type": "call"}, {"api_name": "internal.csv_utils", "line_number": 194, "usage_type": "name"}, {"api_name": "internal.csv_utils.get_columns", "line_number": 197, "usage_type": "call"}, {"api_name": "internal.csv_utils", "line_number": 197, "usage_type": "name"}, {"api_name": "statistics.median", "line_number": 224, "usage_type": "call"}, {"api_name": "statistics.median", "line_number": 225, "usage_type": "call"}, {"api_name": "statistics.median", "line_number": 226, "usage_type": "call"}, {"api_name": "internal.csv_utils.clean_data", "line_number": 241, "usage_type": "call"}, {"api_name": "internal.csv_utils", "line_number": 241, "usage_type": "name"}, {"api_name": "internal.csv_utils.get_columns", "line_number": 244, "usage_type": "call"}, {"api_name": "internal.csv_utils", "line_number": 244, "usage_type": "name"}, {"api_name": "internal.csv_utils.clean_data", "line_number": 285, "usage_type": "call"}, {"api_name": "internal.csv_utils", "line_number": 285, "usage_type": "name"}, {"api_name": "internal.csv_utils.get_columns", "line_number": 288, "usage_type": "call"}, {"api_name": "internal.csv_utils", "line_number": 288, "usage_type": "name"}, {"api_name": "internal.csv_utils.clean_data", "line_number": 313, "usage_type": "call"}, {"api_name": "internal.csv_utils", "line_number": 313, "usage_type": "name"}, {"api_name": "internal.csv_utils.get_columns", "line_number": 317, "usage_type": "call"}, {"api_name": "internal.csv_utils", "line_number": 317, "usage_type": "name"}, {"api_name": "internal.csv_utils.filter_to_non_det_supported", "line_number": 367, "usage_type": "call"}, {"api_name": "internal.csv_utils", "line_number": 367, "usage_type": "name"}, {"api_name": "internal.csv_utils.extract_data", "line_number": 377, "usage_type": "call"}, {"api_name": "internal.csv_utils", "line_number": 377, "usage_type": "name"}, {"api_name": "internal.csv_utils.filter_data_by_exec_test", "line_number": 382, "usage_type": "call"}, {"api_name": "internal.csv_utils", "line_number": 382, "usage_type": "name"}, {"api_name": "argparse.ArgumentParser", "line_number": 417, "usage_type": "call"}, {"api_name": "os.path.isdir", "line_number": 431, "usage_type": "call"}, {"api_name": "os.path", "line_number": 431, "usage_type": "attribute"}, {"api_name": "os.mkdir", "line_number": 432, "usage_type": "call"}, {"api_name": "os.path.isdir", "line_number": 436, "usage_type": "call"}, {"api_name": "os.path", "line_number": 436, "usage_type": "attribute"}, {"api_name": "glob.glob", "line_number": 440, "usage_type": "call"}, {"api_name": "glob.glob", "line_number": 444, "usage_type": "call"}, {"api_name": "os.path.basename", "line_number": 471, "usage_type": "call"}, {"api_name": "os.path", "line_number": 471, "usage_type": "attribute"}, {"api_name": "internal.csv_utils.reformat_csv_names", "line_number": 477, "usage_type": "call"}, {"api_name": "internal.csv_utils", "line_number": 477, "usage_type": "name"}, {"api_name": "os.path.isdir", "line_number": 494, "usage_type": "call"}, {"api_name": "os.path", "line_number": 494, "usage_type": "attribute"}, {"api_name": "os.path.isfile", "line_number": 497, "usage_type": "call"}, {"api_name": "os.path", "line_number": 497, "usage_type": "attribute"}, {"api_name": "os.path.basename", "line_number": 503, "usage_type": "call"}, {"api_name": "os.path", "line_number": 503, "usage_type": "attribute"}]}
+{"seq_id": "71106903605", "text": "#!/usr/bin/env python3\n#coding:utf-8\n\nimport time\nimport utils\nimport config\nimport logging\nimport numpy as np\nfrom pynvml import *\n\nimport torch\nfrom torch.utils.data import DataLoader\nfrom torch.nn.utils.rnn import pad_sequence\n\nfrom train import translate\nfrom utils import english_tokenizer_load\nfrom model import make_model, LabelSmoothing\nimport torch.multiprocessing as mp\n\ndef one_sentence_translate(sent, beam_search=True, online=0):\n model = make_model(config.src_vocab_size, config.tgt_vocab_size, config.n_layers,\n config.d_model, config.d_ff, config.n_heads, config.dropout)\n model.load_state_dict(torch.load(config.model_path))\n model.eval()\n \n BOS = english_tokenizer_load().bos_id() # 2\n EOS = english_tokenizer_load().eos_id() # 3\n if online:\n while 1:\n try:\n print('-'*40)\n sent = input('input sentences:').strip()\n except :\n sent = ''\n if sent in [''] : continue\n if sent in ['Q', 'q'] : break\n\n src_tokens = [[BOS] + english_tokenizer_load().EncodeAsIds(sent) + [EOS]]\n batch_input = torch.LongTensor(np.array(src_tokens)).to(config.device)\n ret = translate(batch_input, model, use_beam=beam_search)\n print(ret)\n else:\n src_tokens = [[BOS] + english_tokenizer_load().EncodeAsIds(sent) + [EOS]]\n batch_input = torch.LongTensor(np.array(src_tokens)).to(config.device)\n ret = translate(batch_input, model, use_beam=beam_search)\n print(ret)\n\ndef translate_example():\n sent = \"The near-term policy remedies are clear: raise the minimum wage to a level that will keep a \" \\\n \"fully employed worker and his or her family out of poverty, and extend the earned-income tax credit \" \\\n \"to childless workers.\"\n one_sentence_translate(sent, beam_search=True)\n\ndef online_translate():\n config.beam_size = 4\n one_sentence_translate('', beam_search=True, online=1)\n#-----------------------------------------\ndef get_one_sample(sent):\n BOS = english_tokenizer_load().bos_id()\n EOS = english_tokenizer_load().eos_id()\n src_tokens = [[BOS] + english_tokenizer_load().EncodeAsIds(sent) + [EOS]]\n return np.array(src_tokens)\n\ndef get_sample(sents):\n BOS = english_tokenizer_load().bos_id()\n EOS = english_tokenizer_load().eos_id()\n PAD = english_tokenizer_load().pad_id()\n src_tokens = [[BOS] + english_tokenizer_load().EncodeAsIds(sent) + [EOS] for sent in sents]\n ret = pad_sequence([torch.from_numpy(np.array(x)) for x in src_tokens], \n batch_first=True, padding_value=PAD)\n return ret\n\ndef translate_one_sample(txt, model, beam_search=True):\n BOS = english_tokenizer_load().bos_id()\n EOS = english_tokenizer_load().eos_id()\n sample = [[BOS] + english_tokenizer_load().EncodeAsIds(txt.strip()) + [EOS]]\n batch_input = torch.LongTensor(np.array(sample)).to(config.device)\n ret = translate(batch_input, model, use_beam=beam_search)\n #print('translate:', ret)\n return ret\n\ndef translate_texts(sentences, model, beam_search=True):\n result = []\n for txt in sentences:\n if txt:\n ret = translate_one_sample(txt, model, beam_search=beam_search)\n result.append(ret)\n return result\n\ndef translate_batch(sentences, model, beam_search=True, batch_size=64):\n total = len(sentences)\n #if total > batch_size:\n sentlist = [sentences[i*batch_size:(i+1)*batch_size] for i in range(np.ceil(total/batch_size).astype(int))]\n result = []\n torch.cuda.empty_cache()\n \n for txts in sentlist:\n batch_dat = get_sample(txts)\n batch_input = torch.LongTensor(batch_dat).to(config.device)\n ret = translate(batch_input, model, use_beam=beam_search)\n #print('translate:\\n', '\\n'.join(ret))\n result.extend(ret)\n torch.cuda.empty_cache()\n return result\n\ndef readtxt(fname, encoding='utf-8'):\n try:\n with open(fname, 'r', encoding=encoding) as f: \n data = f.read()\n return data\n except Exception as e:\n return ''\n\ndef savetofile(txt, filename, encoding='utf-8', method='a+'):\n try:\n with open(filename, method, encoding=encoding) as f: \n f.write(str(txt)+ '\\n')\n return 1\n except :\n return 0\n\n\ndef GPU_memory(gpuid=0):\n NUM_EXPAND = 1024 * 1024\n handle = nvmlDeviceGetHandleByIndex(gpuid)\n info = nvmlDeviceGetMemoryInfo(handle)\n\n gpu_memory_used = info.used / NUM_EXPAND\n #print('Total Memory:%d MB, Used Memory:%d MB'% (gpu_memory_total, gpu_memory_used))\n return gpu_memory_used\n\nclass GPU_MEM():\n def __init__(self, gupid=0, interval=1):\n self.gupid = gupid\n self.interval = interval\n self.status = 0\n self.data = []\n self.queue = mp.Queue()\n self.process = None\n\n def get_gpu_memory(self):\n while True:\n mem = GPU_memory(self.gupid)\n #print('memory:', mem)\n self.queue.put(mem)\n if self.interval>0:\n time.sleep(self.interval)\n else:\n break;\n \n def build(self):\n pass\n self.data = []\n self.process = mp.Process(target=self.get_gpu_memory)\n\n def start(self, interval=1):\n self.interval = interval\n if not self.process is None:\n self.process.start()\n #self.process.join()\n \n def stop(self):\n self.interval = 0\n #self.process.stop()\n self.process.terminate()\n self.data = self.get_queue()\n\n def mem_ave(self):\n if self.data == []:\n ret = 0\n else:\n ret = np.average(self.data)\n \n return ret\n \n def mem_max(self):\n if self.data == []:\n ret = 0\n else:\n ret = np.max(self.data)\n \n return ret\n\n def get_queue(self):\n ret = []\n while not self.queue.empty():\n dat = self.queue.get()\n ret.append(dat)\n return ret\n\nif __name__ == \"__main__\":\n import os\n os.environ['CUDA_VISIBLE_DEVICES'] = '0'\n import warnings\n warnings.filterwarnings('ignore')\n #translate_example()\n online_translate()\n\n\n", "repo_name": "cfl2005/ECTransformer", "sub_path": "translate.py", "file_name": "translate.py", "file_ext": "py", "file_size_in_byte": 6296, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "76", "api": [{"api_name": "model.make_model", "line_number": 21, "usage_type": "call"}, {"api_name": "config.src_vocab_size", "line_number": 21, "usage_type": "attribute"}, {"api_name": "config.tgt_vocab_size", "line_number": 21, "usage_type": "attribute"}, {"api_name": "config.n_layers", "line_number": 21, "usage_type": "attribute"}, {"api_name": "config.d_model", "line_number": 22, "usage_type": "attribute"}, {"api_name": "config.d_ff", "line_number": 22, "usage_type": "attribute"}, {"api_name": "config.n_heads", "line_number": 22, "usage_type": "attribute"}, {"api_name": "config.dropout", "line_number": 22, "usage_type": "attribute"}, {"api_name": "model.load_state_dict", "line_number": 23, "usage_type": "call"}, {"api_name": "torch.load", "line_number": 23, "usage_type": "call"}, {"api_name": "config.model_path", "line_number": 23, "usage_type": "attribute"}, {"api_name": "model.eval", "line_number": 24, "usage_type": "call"}, {"api_name": "utils.english_tokenizer_load", "line_number": 26, "usage_type": "call"}, {"api_name": "utils.english_tokenizer_load", "line_number": 27, "usage_type": "call"}, {"api_name": "utils.english_tokenizer_load", "line_number": 38, "usage_type": "call"}, {"api_name": "torch.LongTensor", "line_number": 39, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 39, "usage_type": "call"}, {"api_name": "config.device", "line_number": 39, "usage_type": "attribute"}, {"api_name": "train.translate", "line_number": 40, "usage_type": "call"}, {"api_name": "utils.english_tokenizer_load", "line_number": 43, "usage_type": "call"}, {"api_name": "torch.LongTensor", "line_number": 44, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 44, "usage_type": "call"}, {"api_name": "config.device", "line_number": 44, "usage_type": "attribute"}, {"api_name": "train.translate", "line_number": 45, "usage_type": "call"}, {"api_name": "config.beam_size", "line_number": 55, "usage_type": "attribute"}, {"api_name": "utils.english_tokenizer_load", "line_number": 59, "usage_type": "call"}, {"api_name": "utils.english_tokenizer_load", "line_number": 60, "usage_type": "call"}, {"api_name": "utils.english_tokenizer_load", "line_number": 61, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 62, "usage_type": "call"}, {"api_name": "utils.english_tokenizer_load", "line_number": 65, "usage_type": "call"}, {"api_name": "utils.english_tokenizer_load", "line_number": 66, "usage_type": "call"}, {"api_name": "utils.english_tokenizer_load", "line_number": 67, "usage_type": "call"}, {"api_name": "utils.english_tokenizer_load", "line_number": 68, "usage_type": "call"}, {"api_name": "torch.nn.utils.rnn.pad_sequence", "line_number": 69, "usage_type": "call"}, {"api_name": "torch.from_numpy", "line_number": 69, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 69, "usage_type": "call"}, {"api_name": "utils.english_tokenizer_load", "line_number": 74, "usage_type": "call"}, {"api_name": "utils.english_tokenizer_load", "line_number": 75, "usage_type": "call"}, {"api_name": "utils.english_tokenizer_load", "line_number": 76, "usage_type": "call"}, {"api_name": "torch.LongTensor", "line_number": 77, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 77, "usage_type": "call"}, {"api_name": "config.device", "line_number": 77, "usage_type": "attribute"}, {"api_name": "train.translate", "line_number": 78, "usage_type": "call"}, {"api_name": "numpy.ceil", "line_number": 93, "usage_type": "call"}, {"api_name": "torch.cuda.empty_cache", "line_number": 95, "usage_type": "call"}, {"api_name": "torch.cuda", "line_number": 95, "usage_type": "attribute"}, {"api_name": "torch.LongTensor", "line_number": 99, "usage_type": "call"}, {"api_name": "config.device", "line_number": 99, "usage_type": "attribute"}, {"api_name": "train.translate", "line_number": 100, "usage_type": "call"}, {"api_name": "torch.cuda.empty_cache", "line_number": 103, "usage_type": "call"}, {"api_name": "torch.cuda", "line_number": 103, "usage_type": "attribute"}, {"api_name": "torch.multiprocessing.Queue", "line_number": 138, "usage_type": "call"}, {"api_name": "torch.multiprocessing", "line_number": 138, "usage_type": "name"}, {"api_name": "time.sleep", "line_number": 147, "usage_type": "call"}, {"api_name": "torch.multiprocessing.Process", "line_number": 154, "usage_type": "call"}, {"api_name": "torch.multiprocessing", "line_number": 154, "usage_type": "name"}, {"api_name": "numpy.average", "line_number": 172, "usage_type": "call"}, {"api_name": "numpy.max", "line_number": 180, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 193, "usage_type": "attribute"}, {"api_name": "warnings.filterwarnings", "line_number": 195, "usage_type": "call"}]}
+{"seq_id": "19899139839", "text": "from django.shortcuts import render\nfrom django.http import HttpResponse\n\ndef first(request):\n html = \"\"\"\n First page
\n Home
\n Second page\n \"\"\"\n return HttpResponse(html)\n\ndef second(request):\n html = \"\"\"\n Second page
\n First page\n \"\"\"\n return HttpResponse(html)\n", "repo_name": "azizdevfull/django-lessons", "sub_path": "lesson1/home/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 395, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "76", "api": [{"api_name": "django.http.HttpResponse", "line_number": 10, "usage_type": "call"}, {"api_name": "django.http.HttpResponse", "line_number": 17, "usage_type": "call"}]}
+{"seq_id": "13332761005", "text": "# Read agent\nimport pickle\n\nimport gym\n\nwith open(\"models/unit2/taxi_model.pkl\", \"rb\") as f:\n model = pickle.load(f)\n\n# Evaluate our Agent\nfrom unit2.general.q_learn import evaluate_agent\n\n# Get env from gym\nenv = gym.make(model[\"env_id\"])\n\n# Get parameters from model\nmax_steps = model[\"max_steps\"]\nn_eval_episodes = model[\"n_eval_episodes\"]\nQtable_taxi = model[\"qtable\"]\neval_seed = model[\"eval_seed\"]\n\nmean_reward, std_reward = evaluate_agent(env, max_steps, n_eval_episodes, Qtable_taxi, eval_seed)\nprint(f\"Mean_reward={mean_reward:.2f} +/- {std_reward:.2f}\")", "repo_name": "Akrielz/Reinforcement-Learning-HF", "sub_path": "unit2/taxi_v3/eval_taxi.py", "file_name": "eval_taxi.py", "file_ext": "py", "file_size_in_byte": 566, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "76", "api": [{"api_name": "pickle.load", "line_number": 7, "usage_type": "call"}, {"api_name": "gym.make", "line_number": 13, "usage_type": "call"}, {"api_name": "unit2.general.q_learn.evaluate_agent", "line_number": 21, "usage_type": "call"}]}
+{"seq_id": "74779728244", "text": "\nimport sys\nimport os\nimport platform\nimport json\nimport re\n\n# Globally load in bundled images\nglobals()[\"qbit.resources\"] = __import__(\"qbit.resources\")\n\nimport qbit.colour\n\nAPPLICATION_NAME = \"Qbit\"\nAPPLICATION_VERSION = \"0.0024\"\nAPPLICATION_DESCRIPTION = \"IRC Micro-Client\"\n\nHOST_OS = platform.system()\nHOST_OS_VERSION = platform.release()\nHOST_PLATFORM = platform.platform(aliased=1)\n\nDEFAULT_IRC_NICKNAME = \"qbit\"\nDEFAULT_IRC_USERNAME = \"qbit\"\nDEFAULT_IRC_IRCNAME = f\"Qbit {APPLICATION_VERSION}\"\n\nINSTALL_DIRECTORY = sys.path[0]\nQBIT_DIRECTORY = os.path.join(INSTALL_DIRECTORY, \"qbit\")\nIMAGE_SPINNER = os.path.join(QBIT_DIRECTORY, \"spinner.gif\")\n\nINITIAL_WINDOW_WIDTH = 500\nINITIAL_WINDOW_HEIGHT = 300\n\nINSTALL_DIRECTORY = sys.path[0]\nQBIT_DIRECTORY = os.path.join(INSTALL_DIRECTORY, \"qbit\")\nCONFIG_DIRECTORY = os.path.join(INSTALL_DIRECTORY, \"config\")\n\nDISPLAY_CONFIGURATION = os.path.join(CONFIG_DIRECTORY, \"display.json\")\nLAST_SERVER_INFORMATION_FILE = os.path.join(CONFIG_DIRECTORY, \"lastserver.json\")\nAUTOJOIN_FILE = os.path.join(CONFIG_DIRECTORY, \"autojoin.json\")\nUSER_FILE = os.path.join(CONFIG_DIRECTORY, \"user.json\")\n\nIMAGE_QBIT_ICON = \":/qbit.png\"\nIMAGE_PAGE_ICON = \":/page.png\"\nIMAGE_EXIT_ICON = \":/exit.png\"\nIMAGE_RESTART_ICON = \":/restart.png\"\nIMAGE_USER_ICON = \":/user.png\"\nIMAGE_CLEAR_ICON = \":/clear.png\"\nIMAGE_SERVER_ICON = \":/server.png\"\nIMAGE_ABOUT_ICON = \":/about.png\"\nIMAGE_PLUS_ICON = \":/plus.png\"\nIMAGE_MINUS_ICON = \":/minus.png\"\nIMAGE_NO_ICON = \":/no.png\"\nIMAGE_UNIGNORE_ICON = \":/unignore.png\"\nIMAGE_X_ICON = \":/x.png\"\nIMAGE_SAVE_ICON = \":/save.png\"\n\nIMAGE_LOGO = \":/logo.png\"\nIMAGE_PYTHON = \":/python.png\"\nIMAGE_QT = \":/qt.png\"\nIMAGE_GPL = \":/gpl.png\"\n\n# Set display defaults\nQBIT_FONT = \"Consolas\"\n\nNORMAL_FONT_SIZE = 10\nBIG_FONT_SIZE = 12\nSMALL_FONT_SIZE = 8\n\nLINK_URLS = True\n\nTEXT_BACKGROUND_COLOR = \"#ffffff\"\nTEXT_COLOR = \"#000000\"\nERROR_COLOR = \"#FF0000\"\nSYSTEM_COLOR = \"#FF9C00\"\nSELF_COLOR = \"#FF0000\"\nUSERNAME_COLOR = \"#00007F\"\nACTION_COLOR = \"#009300\"\nLINK_COLOR = \"#00007F\"\nNOTICE_COLOR = \"#9C009C\"\nMOTD_COLOR = \"#00007F\"\nERROR_COLOR = \"#FF0000\"\n\nGRADIENT_LIGHTEN = 0.95\n\nMAX_USERNAME_SIZE = 16\n\nCHANNEL_INFO_NAME = 0\nCHANNEL_INFO_KEY = 1\nCHANNEL_INFO_LIMIT = 2\nCHANNEL_INFO_INVITEONLY = 3\nCHANNEL_INFO_ALLOWEXTERNAL = 4\nCHANNEL_INFO_TOPICLOCKED = 5\nCHANNEL_INFO_PROTECTED = 6\nCHANNEL_INFO_SECRET = 7\nCHANNEL_INFO_MODERATED = 8\nCHANNEL_INFO_NOCOLORS = 9\n\ndef is_integer(n):\n\ttry:\n\t\tint(n)\n\texcept ValueError:\n\t\treturn False\n\treturn True\n\ndef save_display_config(config):\n\twith open(DISPLAY_CONFIGURATION, \"w\") as write_data:\n\t\tjson.dump(config, write_data)\n\ndef loadDisplayConfig():\n\tif os.path.isfile(DISPLAY_CONFIGURATION):\n\t\twith open(DISPLAY_CONFIGURATION, \"r\") as read_data:\n\t\t\tdata = json.load(read_data)\n\t\t\treturn data\n\telse:\n\t\tdc = {\n\t\t\t\"font\": QBIT_FONT,\n\t\t\t\"fontsize\": NORMAL_FONT_SIZE,\n\t\t\t\"fontbig\": BIG_FONT_SIZE,\n\t\t\t\"fontsmall\": SMALL_FONT_SIZE,\n\t\t\t\"background\": TEXT_BACKGROUND_COLOR,\n\t\t\t\"text\": TEXT_COLOR,\n\t\t\t\"error\": ERROR_COLOR,\n\t\t\t\"system\": SYSTEM_COLOR,\n\t\t\t\"self\": SELF_COLOR,\n\t\t\t\"user\": USERNAME_COLOR,\n\t\t\t\"action\": ACTION_COLOR,\n\t\t\t\"link\": LINK_COLOR,\n\t\t\t\"notice\": NOTICE_COLOR,\n\t\t\t\"motd\": MOTD_COLOR,\n\t\t\t\"links\": LINK_URLS,\n\t\t\t\"width\": INITIAL_WINDOW_WIDTH,\n\t\t\t\"height\": INITIAL_WINDOW_HEIGHT\n\t\t}\n\t\twith open(DISPLAY_CONFIGURATION, \"w\") as write_data:\n\t\t\tjson.dump(dc, write_data)\n\t\treturn dc\n\n# Load in display settings from file\nDC = loadDisplayConfig()\nQBIT_FONT = DC[\"font\"]\nNORMAL_FONT_SIZE = DC[\"fontsize\"]\nBIG_FONT_SIZE = DC[\"fontbig\"]\nSMALL_FONT_SIZE = DC[\"fontsmall\"]\nTEXT_BACKGROUND_COLOR = DC[\"background\"]\nTEXT_COLOR = DC[\"text\"]\nERROR_COLOR = DC[\"error\"]\nSYSTEM_COLOR = DC[\"system\"]\nSELF_COLOR = DC[\"self\"]\nUSERNAME_COLOR = DC[\"user\"]\nACTION_COLOR = DC[\"action\"]\nLINK_COLOR = DC[\"link\"]\nNOTICE_COLOR = DC[\"notice\"]\nMOTD_COLOR = DC[\"motd\"]\nLINK_URLS = DC[\"links\"]\nINITIAL_WINDOW_WIDTH = DC[\"width\"]\nINITIAL_WINDOW_HEIGHT = DC[\"height\"]\n\nCHAT_TEMPLATE = f\"\"\"\n\n \n\t\n\t | !USER! | \n\t | \n\t !MESSAGE! | \n\t
\n \n
\n\"\"\"\n\nACTION_TEMPLATE = \"\"\"\n\n \n\t\n\t | !USER! !MESSAGE! | \n\t
\n \n
\n\"\"\"\n\nSYSTEM_TEMPLATE = \"\"\"\n\n \n\t\n\t | !MESSAGE! | \n\t
\n \n
\n\"\"\"\n\ndef inject_www_links(txt):\n\tif not LINK_URLS: return txt\n\turls = re.findall('http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\\(\\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+', txt)\n\tfor u in urls:\n\t\tu = re.sub('<[^<]+?>', '', u)\n\t\tlink = f\"{u}\"\n\t\ttxt = txt.replace(u,link)\n\treturn txt\n\ndef pad_nick(nick,size):\n\tx = size - len(nick)\n\tif x<0 : x = 0\n\ty = ' '*x\n\treturn f\"{y}{nick}\"\n\ndef system_display(text):\n\tmsg = SYSTEM_TEMPLATE.replace('!COLOR!',SYSTEM_COLOR)\n\tmsg = msg.replace('!BACKGROUND!',TEXT_BACKGROUND_COLOR)\n\tmsg = msg.replace('!MESSAGE!',text)\n\n\treturn msg\n\ndef error_display(text):\n\tmsg = SYSTEM_TEMPLATE.replace('!COLOR!',ERROR_COLOR)\n\tmsg = msg.replace('!BACKGROUND!',TEXT_BACKGROUND_COLOR)\n\tmsg = msg.replace('!MESSAGE!',text)\n\n\treturn msg\n\ndef chat_display(user,text,max):\n\ttext = remove_html_markup(text)\n\tuser = pad_nick(user,max)\n\ttext = inject_www_links(text)\n\tmsg = CHAT_TEMPLATE.replace('!USER!',user)\n\tmsg = msg.replace('!COLOR!',USERNAME_COLOR)\n\tmsg = msg.replace('!BACKGROUND!',TEXT_BACKGROUND_COLOR)\n\tmsg = msg.replace('!CCHAT!',TEXT_COLOR)\n\tmsg = msg.replace('!MESSAGE!',text)\n\n\t# Gradient\n\tBG = qbit.colour.Color(TEXT_BACKGROUND_COLOR)\n\tLIGHT_COLOR = qbit.colour.Color(USERNAME_COLOR,luminance=GRADIENT_LIGHTEN)\n\tUSER_BACKGROUND_GRADIENT = f\"background: qlineargradient( x1:0 y1:0, x2:1 y2:0, stop:0 {BG}, stop:1 {LIGHT_COLOR});\"\n\tmsg = msg.replace(\"!GRADIENT!\",USER_BACKGROUND_GRADIENT)\n\n\treturn msg\n\ndef mychat_display(user,text,max):\n\ttext = remove_html_markup(text)\n\tuser = pad_nick(user,max)\n\ttext = inject_www_links(text)\n\tmsg = CHAT_TEMPLATE.replace('!USER!',user)\n\tmsg = msg.replace('!COLOR!',SELF_COLOR)\n\tmsg = msg.replace('!BACKGROUND!',TEXT_BACKGROUND_COLOR)\n\tmsg = msg.replace('!CCHAT!',TEXT_COLOR)\n\tmsg = msg.replace('!MESSAGE!',text)\n\n\t# Gradient\n\tBG = qbit.colour.Color(TEXT_BACKGROUND_COLOR)\n\tLIGHT_COLOR = qbit.colour.Color(SELF_COLOR,luminance=GRADIENT_LIGHTEN)\n\tUSER_BACKGROUND_GRADIENT = f\"background: qlineargradient( x1:0 y1:0, x2:1 y2:0, stop:0 {BG}, stop:1 {LIGHT_COLOR});\"\n\tmsg = msg.replace(\"!GRADIENT!\",USER_BACKGROUND_GRADIENT)\n\n\treturn msg\n\ndef action_display(user,text):\n\ttext = remove_html_markup(text)\n\ttext = inject_www_links(text)\n\tmsg = ACTION_TEMPLATE.replace('!USER!',user)\n\tmsg = msg.replace('!COLOR!',ACTION_COLOR)\n\tmsg = msg.replace('!MESSAGE!',text)\n\n\t# Gradient\n\tBG = qbit.colour.Color(TEXT_BACKGROUND_COLOR)\n\tLIGHT_COLOR = qbit.colour.Color(ACTION_COLOR,luminance=GRADIENT_LIGHTEN)\n\tUSER_BACKGROUND_GRADIENT = f\"background: qlineargradient( x1:0 y1:0, x2:1 y2:0, stop:0 {BG}, stop:1 {LIGHT_COLOR});\"\n\tmsg = msg.replace(\"!GRADIENT!\",USER_BACKGROUND_GRADIENT)\n\n\treturn msg\n\ndef notice_display(user,text,max):\n\ttext = remove_html_markup(text)\n\tuser = pad_nick(user,max)\n\ttext = inject_www_links(text)\n\tmsg = CHAT_TEMPLATE.replace('!USER!',user)\n\tmsg = msg.replace('!COLOR!',NOTICE_COLOR)\n\tmsg = msg.replace('!BACKGROUND!',TEXT_BACKGROUND_COLOR)\n\tmsg = msg.replace('!CCHAT!',TEXT_COLOR)\n\tmsg = msg.replace('!MESSAGE!',text)\n\n\t# Gradient\n\tBG = qbit.colour.Color(TEXT_BACKGROUND_COLOR)\n\tLIGHT_COLOR = qbit.colour.Color(NOTICE_COLOR,luminance=GRADIENT_LIGHTEN)\n\tUSER_BACKGROUND_GRADIENT = f\"background: qlineargradient( x1:0 y1:0, x2:1 y2:0, stop:0 {BG}, stop:1 {LIGHT_COLOR});\"\n\tmsg = msg.replace(\"!GRADIENT!\",USER_BACKGROUND_GRADIENT)\n\n\treturn msg\n\ndef motd_display(text,max):\n\tuser = pad_nick(\"MOTD\",max)\n\ttext = inject_www_links(text)\n\tmsg = CHAT_TEMPLATE.replace('!USER!',user)\n\tmsg = msg.replace('!COLOR!',MOTD_COLOR)\n\tmsg = msg.replace('!BACKGROUND!',TEXT_BACKGROUND_COLOR)\n\tmsg = msg.replace('!CCHAT!',TEXT_COLOR)\n\tmsg = msg.replace('!MESSAGE!',text)\n\n\t# Gradient\n\tBG = qbit.colour.Color(TEXT_BACKGROUND_COLOR)\n\tLIGHT_COLOR = qbit.colour.Color(MOTD_COLOR,luminance=GRADIENT_LIGHTEN)\n\tUSER_BACKGROUND_GRADIENT = f\"background: qlineargradient( x1:0 y1:0, x2:1 y2:0, stop:0 {BG}, stop:1 {LIGHT_COLOR});\"\n\tmsg = msg.replace(\"!GRADIENT!\",USER_BACKGROUND_GRADIENT)\n\n\treturn msg\n\ndef remove_html_markup(s):\n\ttag = False\n\tquote = False\n\tout = \"\"\n\n\tfor c in s:\n\t\t\tif c == '<' and not quote:\n\t\t\t\ttag = True\n\t\t\telif c == '>' and not quote:\n\t\t\t\ttag = False\n\t\t\telif (c == '\"' or c == \"'\") and tag:\n\t\t\t\tquote = not quote\n\t\t\telif not tag:\n\t\t\t\tout = out + c\n\n\treturn out\n\ndef save_last_server(host,port,password,ssl):\n\tsinfo = {\n\t\t\t\"host\": host,\n\t\t\t\"port\": port,\n\t\t\t\"password\": password,\n\t\t\t\"ssl\": ssl\n\t\t}\n\twith open(LAST_SERVER_INFORMATION_FILE, \"w\") as write_data:\n\t\tjson.dump(sinfo, write_data)\n\ndef get_last_server():\n\tif os.path.isfile(LAST_SERVER_INFORMATION_FILE):\n\t\twith open(LAST_SERVER_INFORMATION_FILE, \"r\") as read_server:\n\t\t\tdata = json.load(read_server)\n\t\t\treturn data\n\telse:\n\t\tsi = {\n\t\t\t\"host\": '',\n\t\t\t\"port\": '',\n\t\t\t\"password\": '',\n\t\t\t\"ssl\": False\n\t\t}\n\t\treturn si\n\ndef save_autojoin_channels(chans):\n\twith open(AUTOJOIN_FILE, \"w\") as write_data:\n\t\tjson.dump(chans, write_data)\n\ndef get_autojoins():\n\tif os.path.isfile(AUTOJOIN_FILE):\n\t\twith open(AUTOJOIN_FILE, \"r\") as read_server:\n\t\t\tdata = json.load(read_server)\n\t\t\treturn data\n\telse:\n\t\treturn []\n\ndef get_user():\n\tif os.path.isfile(USER_FILE):\n\t\twith open(USER_FILE, \"r\") as read_user:\n\t\t\tdata = json.load(read_user)\n\t\t\treturn data\n\telse:\n\t\tsi = {\n\t\t\t\"nick\": DEFAULT_IRC_NICKNAME,\n\t\t\t\"username\": DEFAULT_IRC_USERNAME,\n\t\t\t\"realname\": DEFAULT_IRC_IRCNAME\n\t\t}\n\t\treturn si\n\ndef save_user(user):\n\twith open(USER_FILE, \"w\") as write_data:\n\t\tjson.dump(user, write_data)\n", "repo_name": "danhetrick/qbit", "sub_path": "qbit/common.py", "file_name": "common.py", "file_ext": "py", "file_size_in_byte": 10311, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "76", "api": [{"api_name": "platform.system", "line_number": 17, "usage_type": "call"}, {"api_name": "platform.release", "line_number": 18, "usage_type": "call"}, {"api_name": "platform.platform", "line_number": 19, "usage_type": "call"}, {"api_name": "sys.path", "line_number": 25, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 26, "usage_type": "call"}, {"api_name": "os.path", "line_number": 26, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 27, "usage_type": "call"}, {"api_name": "os.path", "line_number": 27, "usage_type": "attribute"}, {"api_name": "sys.path", "line_number": 32, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 33, "usage_type": "call"}, {"api_name": "os.path", "line_number": 33, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 34, "usage_type": "call"}, {"api_name": "os.path", "line_number": 34, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 36, "usage_type": "call"}, {"api_name": "os.path", "line_number": 36, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 37, "usage_type": "call"}, {"api_name": "os.path", "line_number": 37, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 38, "usage_type": "call"}, {"api_name": "os.path", "line_number": 38, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 39, "usage_type": "call"}, {"api_name": "os.path", "line_number": 39, "usage_type": "attribute"}, {"api_name": "json.dump", "line_number": 106, "usage_type": "call"}, {"api_name": "os.path.isfile", "line_number": 109, "usage_type": "call"}, {"api_name": "os.path", "line_number": 109, "usage_type": "attribute"}, {"api_name": "json.load", "line_number": 111, "usage_type": "call"}, {"api_name": "json.dump", "line_number": 134, "usage_type": "call"}, {"api_name": "re.findall", "line_number": 191, "usage_type": "call"}, {"api_name": "re.sub", "line_number": 193, "usage_type": "call"}, {"api_name": "qbit.colour.colour.Color", "line_number": 229, "usage_type": "call"}, {"api_name": "qbit.colour.colour", "line_number": 229, "usage_type": "attribute"}, {"api_name": "qbit.colour", "line_number": 229, "usage_type": "name"}, {"api_name": "qbit.colour.colour.Color", "line_number": 230, "usage_type": "call"}, {"api_name": "qbit.colour.colour", "line_number": 230, "usage_type": "attribute"}, {"api_name": "qbit.colour", "line_number": 230, "usage_type": "name"}, {"api_name": "qbit.colour.colour.Color", "line_number": 247, "usage_type": "call"}, {"api_name": "qbit.colour.colour", "line_number": 247, "usage_type": "attribute"}, {"api_name": "qbit.colour", "line_number": 247, "usage_type": "name"}, {"api_name": "qbit.colour.colour.Color", "line_number": 248, "usage_type": "call"}, {"api_name": "qbit.colour.colour", "line_number": 248, "usage_type": "attribute"}, {"api_name": "qbit.colour", "line_number": 248, "usage_type": "name"}, {"api_name": "qbit.colour.colour.Color", "line_number": 262, "usage_type": "call"}, {"api_name": "qbit.colour.colour", "line_number": 262, "usage_type": "attribute"}, {"api_name": "qbit.colour", "line_number": 262, "usage_type": "name"}, {"api_name": "qbit.colour.colour.Color", "line_number": 263, "usage_type": "call"}, {"api_name": "qbit.colour.colour", "line_number": 263, "usage_type": "attribute"}, {"api_name": "qbit.colour", "line_number": 263, "usage_type": "name"}, {"api_name": "qbit.colour.colour.Color", "line_number": 280, "usage_type": "call"}, {"api_name": "qbit.colour.colour", "line_number": 280, "usage_type": "attribute"}, {"api_name": "qbit.colour", "line_number": 280, "usage_type": "name"}, {"api_name": "qbit.colour.colour.Color", "line_number": 281, "usage_type": "call"}, {"api_name": "qbit.colour.colour", "line_number": 281, "usage_type": "attribute"}, {"api_name": "qbit.colour", "line_number": 281, "usage_type": "name"}, {"api_name": "qbit.colour.colour.Color", "line_number": 297, "usage_type": "call"}, {"api_name": "qbit.colour.colour", "line_number": 297, "usage_type": "attribute"}, {"api_name": "qbit.colour", "line_number": 297, "usage_type": "name"}, {"api_name": "qbit.colour.colour.Color", "line_number": 298, "usage_type": "call"}, {"api_name": "qbit.colour.colour", "line_number": 298, "usage_type": "attribute"}, {"api_name": "qbit.colour", "line_number": 298, "usage_type": "name"}, {"api_name": "json.dump", "line_number": 329, "usage_type": "call"}, {"api_name": "os.path.isfile", "line_number": 332, "usage_type": "call"}, {"api_name": "os.path", "line_number": 332, "usage_type": "attribute"}, {"api_name": "json.load", "line_number": 334, "usage_type": "call"}, {"api_name": "json.dump", "line_number": 347, "usage_type": "call"}, {"api_name": "os.path.isfile", "line_number": 350, "usage_type": "call"}, {"api_name": "os.path", "line_number": 350, "usage_type": "attribute"}, {"api_name": "json.load", "line_number": 352, "usage_type": "call"}, {"api_name": "os.path.isfile", "line_number": 358, "usage_type": "call"}, {"api_name": "os.path", "line_number": 358, "usage_type": "attribute"}, {"api_name": "json.load", "line_number": 360, "usage_type": "call"}, {"api_name": "json.dump", "line_number": 372, "usage_type": "call"}]}
+{"seq_id": "11884099093", "text": "from os.path import dirname, join\nimport os\nimport pandas as pd\nfrom bokeh.io import curdoc\nfrom bokeh.layouts import row, column\nfrom bokeh.models import Button, Slider, Toggle, FactorRange, Div, ColumnDataSource, LabelSet, Select,Legend, LegendItem, DataTable, TableColumn, HoverTool, Slope\nfrom bokeh.plotting import figure\nfrom bokeh.events import ButtonClick\nfrom classes import parameters, moments, var\nfrom data_funcs import compute_rough_jacobian,rough_dyn_fixed_point_solver\nimport numpy as np\nimport itertools\nfrom bokeh.palettes import Category10, Dark2\nCategory18 = Category10[10]+Dark2[8]\nimport time\nimport warnings\nwarnings.simplefilter('ignore', np.RankWarning)\n# warnings.simplefilter('ignore', np.RuntimeWarning)\nwarnings.filterwarnings('ignore')\n\nstart = time.perf_counter()\n\ndef load(path, data_path=None, \n dir_path = None, context = 'calibration'):\n # p = parameters(data_path=data_path)\n p = parameters()\n # p.load_data(path)\n p.load_run(path,dir_path=dir_path)\n sol = var.var_from_vector(p.guess, p, compute=True, context = context)\n sol.scale_P(p)\n sol.compute_price_indices(p)\n sol.compute_non_solver_quantities(p)\n m = moments()\n # m.load_data(data_path)\n m.load_run(path,dir_path=dir_path)\n m.compute_moments(sol, p)\n m.compute_moments_deviations()\n return p,m,sol\n\ndef init_dic_of_dataframes_with_baseline(p_baseline,m_baseline,sol_baseline,list_of_moments):\n dic_df_param = {}\n dic_df_mom = {}\n dic_df_sol = {}\n params = p_baseline.calib_parameters\n params.append('kappa')\n params.append('r_hjort')\n if 'theta' not in params:\n params.append('theta')\n params.append('theta')\n # params.append('d*fe')\n # params.append('nu/deltaUS')\n df_scalar_params = pd.DataFrame(columns = ['baseline'])\n df_scalar_params.index.name='x'\n \n for param in params:\n if hasattr(p_baseline,param):\n if len(getattr(p_baseline,param)[p_baseline.mask[param]]) == 1:\n if param == 'k':\n df_scalar_params.loc[param,'baseline'] = float(getattr(p_baseline,param)[p_baseline.mask[param]])-1\n else:\n df_scalar_params.loc[param,'baseline'] = float(getattr(p_baseline,param)[p_baseline.mask[param]])\n if param in ['eta','delta']:\n df = pd.DataFrame(index = p_baseline.countries, columns = ['baseline'], data = getattr(p_baseline,param)[...,1])\n df.index.name='x'\n dic_df_param[param] = df\n if param in ['r_hjort']:\n df = pd.DataFrame(index = p_baseline.countries, columns = ['baseline'], data = getattr(p_baseline,param))\n df.index.name='x'\n dic_df_param[param] = df\n if param in ['T']:\n df = pd.DataFrame(index = p_baseline.countries, columns = ['baseline'], data = getattr(p_baseline,param)[...,0])\n df.index.name='x'\n dic_df_param[param+' non patent sector'] = df\n df = pd.DataFrame(index = p_baseline.countries, columns = ['baseline'], data = getattr(p_baseline,param)[...,1])\n df.index.name='x'\n dic_df_param[param+' patent sector'] = df\n elif param == 'd*fe':\n df_scalar_params.loc[param,'baseline'] = float(getattr(p_baseline,'d')[p_baseline.mask['d']])*float(getattr(p_baseline,'fe')[p_baseline.mask['fe']])\n elif param == 'nu/deltaUS':\n df_scalar_params.loc[param,'baseline'] = float(getattr(p_baseline,'nu')[1])/float(getattr(p_baseline,'delta')[0,1])\n dic_df_param['scalars'] = df_scalar_params\n \n df_scalar_moments = pd.DataFrame(columns = ['target','baseline'])\n df_scalar_moments.index.name='x'\n for mom in list_of_moments:\n if mom != 'objective':\n if len(m_baseline.idx[mom]) == 1:\n if mom != 'OUT':\n try:\n df_scalar_moments.loc[mom,'target'] = float(getattr(m_baseline,mom+'_target'))\n df_scalar_moments.loc[mom,'baseline'] = float(getattr(m_baseline,mom))\n except:\n pass\n else:\n try:\n df = pd.DataFrame(index = m_baseline.idx[mom], \n columns = ['target','baseline'], \n # data = np.array([getattr(m_baseline,mom+'_target').ravel(), getattr(m_baseline,mom).ravel()])\n )\n df.index.name='x'\n df['target'] = getattr(m_baseline,mom+'_target').ravel()\n df['baseline'] = getattr(m_baseline,mom).ravel()\n dic_df_mom[mom] = df\n except:\n pass\n \n for sol_qty in ['semi_elast_patenting_delta','DT','psi_o_star']:\n df = pd.DataFrame(index = p_baseline.countries, \n columns = ['baseline'], \n )\n df.index.name='x'\n df['baseline'] = getattr(sol_baseline,sol_qty)[...,1]\n dic_df_sol[sol_qty] = df\n \n for sol_qty in ['l_R']:\n df = pd.DataFrame(index = p_baseline.countries, \n columns = ['baseline'], \n )\n df.index.name='x'\n df['baseline'] = getattr(sol_baseline,sol_qty)[...,1]/p_baseline.labor\n dic_df_sol[sol_qty] = df\n \n for sol_qty in ['min_psi_m_star_inward']:\n df = pd.DataFrame(index = p_baseline.countries, \n columns = ['baseline'], \n )\n df.index.name='x'\n df['baseline'] = getattr(sol_baseline,'psi_m_star')[:,:,1].min(axis=1)\n dic_df_sol[sol_qty] = df\n \n for sol_qty in ['min_psi_m_star_outward']:\n df = pd.DataFrame(index = p_baseline.countries, \n columns = ['baseline'], \n )\n df.index.name='x'\n df['baseline'] = getattr(sol_baseline,'psi_m_star')[:,:,1].min(axis=0)\n dic_df_sol[sol_qty] = df\n \n df_scalar_moments.loc['objective','target'] = 0.01\n df_scalar_moments.loc['objective','baseline'] = m_baseline.objective_function()*28\n dic_df_mom['scalars'] = df_scalar_moments\n return dic_df_param, dic_df_mom, dic_df_sol\n\ndef append_dic_of_dataframes_with_variation(dic_df_param, dic_df_mom, dic_df_sol, p, m, sol, run_name):\n for k in dic_df_param.keys():\n if k == 'scalars':\n for i in dic_df_param[k].index:\n if i == 'k':\n dic_df_param[k].loc[i,run_name] = float(getattr(p,i)[p.mask[i]])-1\n elif i == 'd*fe':\n dic_df_param[k].loc[i,run_name] = float(getattr(p,'d')[p.mask['d']])*float(getattr(p,'fe')[p.mask['fe']])\n elif i == 'nu/deltaUS':\n dic_df_param[k].loc[i,run_name] = float(getattr(p,'nu')[1])/float(getattr(p,'delta')[0,1])\n else:\n dic_df_param[k].loc[i,run_name] = float(getattr(p,i)[p.mask[i]])\n \n if k in ['eta','delta']:\n dic_df_param[k][run_name] = getattr(p,k)[...,1]\n if k in ['r_hjort']:\n dic_df_param[k][run_name] = getattr(p,k)\n if k == 'T non patent sector':\n dic_df_param[k][run_name] = getattr(p,'T')[...,0]\n if k == 'T patent sector':\n dic_df_param[k][run_name] = getattr(p,'T')[...,1]\n \n for k in dic_df_mom.keys():\n if k == 'scalars':\n for i in dic_df_mom[k].index:\n if i == 'objective':\n dic_df_mom[k].loc[i,run_name] = m.objective_function()*28\n else:\n dic_df_mom[k].loc[i,run_name] = float(getattr(m,i))\n if k == 'scalar deviations':\n for i in dic_df_mom[k].index:\n dic_df_mom[k].loc[i,run_name] = float(getattr(m,i+'_deviation'))/m.weights_dict[i]\n if k not in ['scalars','scalar deviations']:\n dic_df_mom[k][run_name] = getattr(m,k).ravel()\n \n for k in dic_df_sol.keys():\n if k in ['semi_elast_patenting_delta','DT','psi_o_star']:\n dic_df_sol[k][run_name] = getattr(sol,k)[...,1]\n if k in ['l_R']:\n dic_df_sol[k][run_name] = getattr(sol,k)[...,1]/p.labor\n if k in ['min_psi_m_star_outward']:\n dic_df_sol[k][run_name] = getattr(sol,'psi_m_star')[:,:,1].min(axis=0)\n if k in ['min_psi_m_star_inward']:\n dic_df_sol[k][run_name] = getattr(sol,'psi_m_star')[:,:,1].min(axis=1)\n \n return dic_df_param, dic_df_mom, dic_df_sol\n\n#%% path\ndir_path = dirname(__file__)+'/'\ndata_path = join(dirname(__file__), 'data/')\n# dir_path = './'\n# data_path = 'data/'\n# results_path = 'calibration_results_matched_economy/'\nresults_path = join(dirname(__file__), 'calibration_results_matched_economy/')\ncf_path = join(dirname(__file__), 'counterfactual_recaps/unilateral_patent_protection/')\naround_dyn_eq_path = join(dirname(__file__), 'counterfactual_recaps/')\nnash_eq_path = join(dirname(__file__), 'nash_eq_recaps/')\ncoop_eq_path = join(dirname(__file__), 'coop_eq_recaps/')\n\n\n#%% moments / parameters for variations\n\nlist_of_moments = ['GPDIFF','GROWTH','KM', 'OUT',\n 'RD', 'RP', 'SPFLOWDOM', 'SPFLOW','STFLOW','STFLOWSDOM',\n 'SRGDP','UUPCOST','SINNOVPATUS',\n 'TO','TE','DOMPATINUS','DOMPATUS',\n 'TWSPFLOW','TWSPFLOWDOM','SDOMTFLOW','objective']\n# list_of_moments = ['GPDIFF','GROWTH','KM', 'OUT',\n# 'RD', 'RP', 'SPFLOWDOM', 'SPFLOW','STFLOW','STFLOWSDOM',\n# 'SRDUS', 'SRGDP','UUPCOST', 'PCOST','PCOSTINTER','PCOSTNOAGG','PCOSTINTERNOAGG','SINNOVPATUS',\n# 'SINNOVPATEU', 'TO','TP',\n# 'DOMPATUS','DOMPATEU','DOMPATINUS','DOMPATINEU','TWSPFLOW','TWSPFLOWDOM','SDOMTFLOW','objective']\n\ncomments_dic = {}\n\ncomments_dic['403'] = {'baseline':'bsln:TO:0.0183',\n '1.0':'1.0: TO: 0.01',\n'1.1':'1.1: TO: 0.0105',\n'1.2':'1.2: TO: 0.011',\n'1.3':'1.3: TO: 0.0115',\n'1.4':'1.4: TO: 0.012',\n'1.5':'1.5: TO: 0.0125',\n'1.6':'1.6: TO: 0.013',\n'1.7':'1.7: TO: 0.0135',\n'1.8':'1.8: TO: 0.014',\n'1.9':'1.9: TO: 0.0145',\n'1.10':'1.10: TO: 0.015',\n'1.11':'1.11: TO: 0.0155',\n'1.12':'1.12: TO: 0.016',\n'1.13':'1.13: TO: 0.0165',\n'1.14':'1.14: TO: 0.017',\n'1.15':'1.15: TO: 0.0175',\n'1.16':'1.16: TO: 0.018',\n'1.17':'1.17: TO: 0.0185',\n'1.18':'1.18: TO: 0.019',\n'1.19':'1.19: TO: 0.0195',\n'1.20':'1.20: TO: 0.02',\n'1.21':'1.21: TO: 0.0205',\n'1.22':'1.22: TO: 0.021',\n'1.23':'1.23: TO: 0.0215',\n'1.24':'1.24: TO: 0.022',\n'1.25':'1.25: TO: 0.0225',\n'1.26':'1.26: TO: 0.023',\n'1.27':'1.27: TO: 0.0235',\n'1.28':'1.28: TO: 0.024',\n'1.29':'1.29: TO: 0.0245',\n'1.30':'1.30: TO: 0.025',\n'1.31':'1.31: TO: 0.0255',\n'1.32':'1.32: TO: 0.026',\n'1.33':'1.33: TO: 0.0265',\n'1.34':'1.34: TO: 0.027',\n'1.35':'1.35: TO: 0.0275',\n'1.36':'1.36: TO: 0.028',\n'1.37':'1.37: TO: 0.0285',\n'1.38':'1.38: TO: 0.029',\n'1.39':'1.39: TO: 0.0295',\n'1.40':'1.40: TO: 0.03'\n }\ncomments_dic['405'] = {'baseline':'bsln:TO:0.0183',\n '1.0':'1.0: TO: 0.01',\n'1.1':'1.1: TO: 0.0105',\n'1.2':'1.2: TO: 0.011',\n'1.3':'1.3: TO: 0.0115',\n'1.4':'1.4: TO: 0.012',\n'1.5':'1.5: TO: 0.0125',\n'1.6':'1.6: TO: 0.013',\n'1.7':'1.7: TO: 0.0135',\n'1.8':'1.8: TO: 0.014',\n'1.9':'1.9: TO: 0.0145',\n'1.10':'1.10: TO: 0.015',\n'1.11':'1.11: TO: 0.0155',\n'1.12':'1.12: TO: 0.016',\n'1.13':'1.13: TO: 0.0165',\n'1.14':'1.14: TO: 0.017',\n'1.15':'1.15: TO: 0.0175',\n'1.16':'1.16: TO: 0.018',\n'1.17':'1.17: TO: 0.0185',\n'1.18':'1.18: TO: 0.019',\n'1.19':'1.19: TO: 0.0195',\n'1.20':'1.20: TO: 0.02',\n'1.21':'1.21: TO: 0.0205',\n'1.22':'1.22: TO: 0.021',\n'1.23':'1.23: TO: 0.0215',\n'1.24':'1.24: TO: 0.022',\n'1.25':'1.25: TO: 0.0225',\n'1.26':'1.26: TO: 0.023',\n'1.27':'1.27: TO: 0.0235',\n'1.28':'1.28: TO: 0.024',\n'1.29':'1.29: TO: 0.0245',\n'1.30':'1.30: TO: 0.025',\n'1.31':'1.31: TO: 0.0255',\n'1.32':'1.32: TO: 0.026',\n'1.33':'1.33: TO: 0.0265',\n'1.34':'1.34: TO: 0.027',\n'1.35':'1.35: TO: 0.0275',\n'1.36':'1.36: TO: 0.028',\n'1.37':'1.37: TO: 0.0285',\n'1.38':'1.38: TO: 0.029',\n'1.39':'1.39: TO: 0.0295',\n'1.40':'1.40: TO: 0.03'\n }\n\ncomments_dic['404'] = {\n 'baseline':'baseline',\n '1.0':'1.0: SRDUS, UUPCOST, log loss',\n '1.1':'1.1: SRDUS, UUPCOST, ratio loss',\n '1.2':'1.2: SRDUS, PCOSTNOAGG, log loss',\n '1.3':'1.3: SRDUS, PCOSTNOAGG, ratio loss',\n '1.4':'1.4: no SRDUS, UUPCOST, log loss',\n '1.5':'1.5: no SRDUS, UUPCOST, ratio loss',\n '1.6':'1.6: no SRDUS, PCOSTNOAGG, log loss',\n '1.7':'1.7: no SRDUS, PCOSTNOAGG, ratio loss',\n '1.8':'1.8: no RD, UUPCOST, log loss',\n '1.9':'1.9: no RD, UUPCOST, ratio loss',\n '1.10':'1.10: no RD, PCOSTNOAGG, log loss',\n '1.11':'1.11: no RD, PCOSTNOAGG, ratio loss',\n '2.0':'2.0: sigma=2.7, SRDUS, UUPCOST',\n '2.1':'2.1: sigma=2.7, no SRDUS, UUPCOST',\n '2.2':'2.2: sigma=2.7, SRDUS, PCOSTNOAGG',\n '2.3':'2.3: sigma=2.7, no SRDUS, PCOSTNOAGG',\n }\n\ncomments_dic['501'] = {\n \"baseline\":\"baseline\",\n '1.0':'1.0: Higher growth weight',\n '2.0':'2.0: Hjort correc real GDP',\n '3.0':'3.0: No drop RD South',\n '4.0':'4.0: New data',\n '5.0':'5.0: New data v2',\n }\n\ncomments_dic['601'] = {\n \"baseline\":\"baseline : 2005\",\n \"1.0\" : \"1.0 : 1990\",\n \"1.1\" : \"1.1 : 1991\",\n \"1.2\" : \"1.2 : 1992\",\n \"1.3\" : \"1.3 : 1993\",\n \"1.4\" : \"1.4 : 1994\",\n \"1.5\" : \"1.5 : 1995\",\n \"1.6\" : \"1.6 : 1996\",\n \"1.7\" : \"1.7 : 1997\",\n \"1.8\" : \"1.8 : 1998\",\n \"1.9\" : \"1.9 : 1999\",\n \"1.10\" : \"1.10 : 2000\",\n \"1.11\" : \"1.11 : 2001\",\n \"1.12\" : \"1.12 : 2002\",\n \"1.13\" : \"1.13 : 2003\",\n \"1.14\" : \"1.14 : 2004\",\n \"1.15\" : \"1.15 : 2005\",\n \"1.16\" : \"1.16 : 2006\",\n \"1.17\" : \"1.17 : 2007\",\n \"1.18\" : \"1.18 : 2008\",\n \"1.19\" : \"1.19 : 2009\",\n \"1.20\" : \"1.20 : 2010\",\n \"1.21\" : \"1.21 : 2011\",\n \"1.22\" : \"1.22 : 2012\",\n \"1.23\" : \"1.23 : 2013\",\n \"1.24\" : \"1.24 : 2014\",\n \"1.25\" : \"1.25 : 2015\",\n \"1.26\" : \"1.26 : 2016\",\n \"1.27\" : \"1.27 : 2017\",\n \"1.28\" : \"1.28 : 2018\",\n}\n\ncomments_dic['602'] = comments_dic['601']\ncomments_dic['603'] = comments_dic['601']\n\ncomments_dic['606'] = {\n \"baseline\":\"baseline:SRGDP weight < RP weight\",\n \"2.0\" : \"2.0:SRGDP weight = RP weight\",\n \"3.0\" : \"3.0:SRGDP weight > RP weight\",\n}\ncomments_dic['607'] = comments_dic['601']\ncomments_dic['608'] = comments_dic['601']\ncomments_dic['609'] = comments_dic['601']\ncomments_dic['610'] = comments_dic['601']\ncomments_dic['614'] = comments_dic['601']\ncomments_dic['615'] = comments_dic['601']\ncomments_dic['616'] = comments_dic['601']\ncomments_dic['617'] = comments_dic['601']\ncomments_dic['620'] = comments_dic['601']\ncomments_dic['619'] = comments_dic['601']\n\n\ncomments_dic['611'] = {'baseline':'bsln:TO:0.0183',\n '1.0':'1.0: TO: 0.01',\n'1.1':'1.1: TO: 0.0105',\n'1.2':'1.2: TO: 0.011',\n'1.3':'1.3: TO: 0.0115',\n'1.4':'1.4: TO: 0.012',\n'1.5':'1.5: TO: 0.0125',\n'1.6':'1.6: TO: 0.013',\n'1.7':'1.7: TO: 0.0135',\n'1.8':'1.8: TO: 0.014',\n'1.9':'1.9: TO: 0.0145',\n'1.10':'1.10: TO: 0.015',\n'1.11':'1.11: TO: 0.0155',\n'1.12':'1.12: TO: 0.016',\n'1.13':'1.13: TO: 0.0165',\n'1.14':'1.14: TO: 0.017',\n'1.15':'1.15: TO: 0.0175',\n'1.16':'1.16: TO: 0.018',\n'1.17':'1.17: TO: 0.0185',\n'1.18':'1.18: TO: 0.019',\n'1.19':'1.19: TO: 0.0195',\n'1.20':'1.20: TO: 0.02',\n'1.21':'1.21: TO: 0.0205',\n'1.22':'1.22: TO: 0.021',\n'1.23':'1.23: TO: 0.0215',\n'1.24':'1.24: TO: 0.022',\n'1.25':'1.25: TO: 0.0225',\n'1.26':'1.26: TO: 0.023',\n'1.27':'1.27: TO: 0.0235',\n'1.28':'1.28: TO: 0.024',\n'1.29':'1.29: TO: 0.0245',\n'1.30':'1.30: TO: 0.025',\n'1.31':'1.31: TO: 0.0255',\n'1.32':'1.32: TO: 0.026',\n'1.33':'1.33: TO: 0.0265',\n'1.34':'1.34: TO: 0.027',\n'1.35':'1.35: TO: 0.0275',\n'1.36':'1.36: TO: 0.028',\n'1.37':'1.37: TO: 0.0285',\n'1.38':'1.38: TO: 0.029',\n'1.39':'1.39: TO: 0.0295',\n'1.40':'1.40: TO: 0.03'\n }\n\ncomments_dic['618'] = {\n 'baseline':'baseline',\n '1.0':'1.0:full calibration 2005',\n '1.1':'1.1:full calibration 1992',\n '2.0':'2.0:free f, target UUPCOST, 2005',\n '2.1':'2.1:free f, target UUPCOST, 1992',\n '3.0':'3.0:free f, target UUPCOST and TP, 2005',\n '3.1':'3.1:free f, target UUPCOST and TP, 1992',\n '4.0':'4.0:free f, target UUPCOST and inter_TP, 2005',\n '4.1':'4.1:free f, target UUPCOST and inter_TP, 1992',\n '5.0':'5.0:fixed f, target UUPCOST, 2005',\n '5.1':'5.1:fixed f, target UUPCOST, 1992',\n '6.0':'6.0:fixed f, target UUPCOST and TP, 2005',\n '6.1':'6.1:fixed f, target UUPCOST and TP, 1992',\n '7.0':'7.0:fixed f, target UUPCOST and inter_TP, 2005',\n '7.1':'7.1:fixed f, target UUPCOST and inter_TP, 1992',\n '8.0':'8.0:fixed f, drop UUPCOST, 2005',\n '8.1':'8.1:fixed f, drop UUPCOST, 1992',\n '9.0':'9.0:fixed f, drop UUPCOST, target TP, 2005',\n '9.1':'9.1:fixed f, drop UUPCOST, target TP, 1992',\n '10.0':'10.0:fixed f, drop UUPCOST,target inter_TP, 2005',\n '10.1':'10.1:fixed f, drop UUPCOST,target inter_TP, 1992',\n '11.0':'11.0:fixed f, target UUPCOST, drop SINNOV, 2005',\n '11.1':'11.1:fixed f, target UUPCOST, drop SINNOV, 1992',\n '12.0':'12.0:fixed f, target UUPCOST, inter_TP, drop SINNOV, 2005',\n '12.1':'12.1:fixed f, target UUPCOST, inter_TP, drop SINNOV, 1992',\n '13.0':'13.0:full calibration without SINNOV, 2005',\n '15.0':'15.0:nu=0.1, drop TO',\n '16.0':'16.0:fixed f, target UUPCOST and KM, drop SINNOV, 2005',\n '16.1':'16.1:fixed f, target UUPCOST and KM, drop SINNOV, 1992',\n '17.0':'17.0:fixed f, target UUPCOST and KM and inter_TP, drop SINNOV, 2005',\n '17.1':'17.1:fixed f, target UUPCOST and KM and inter_TP, drop SINNOV, 1992',\n '18.0':'18.0:fixed f, target UUPCOST and KM, 2005',\n '18.1':'18.1:fixed f, target UUPCOST and KM, 1992',\n '19.0':'19.0:fixed f, target UUPCOST and KM and inter_TP, 2005',\n '19.1':'19.1:fixed f, target UUPCOST and KM and inter_TP, 1992',\n '20.0':'20.0:fixed f, target UUPCOST and KM, drop SINNOVUS, 2005',\n '20.1':'20.1:fixed f, target UUPCOST and KM, drop SINNOVUS, 1992',\n '21.0':'21.0:fixed f, drop SINNOV, KM, UUPCOST, keep delta_north fixed 2005',\n '21.1':'21.1:fixed f, drop SINNOV, KM, UUPCOST, keep delta_north fixed 1992',\n '22.1':'22.1:full calibration 1992, scale up nbr of patents',\n }\n\ncomments_dic['701'] = {\n 'baseline':'baseline: same as 607/618 without SINNOVPATEU',\n '1.0':'1.0:full calibration 2005',\n '1.1':'1.1:full calibration 1992',\n '2.0':'2.0:[delta], [SPFLOW], deltaUS fixed',\n '2.1':'2.1:[delta], [SPFLOW], deltaUS fixed',\n '3.0':'3.0:[delta], [SPFLOW,DOMPATIN], deltaUS fixed',\n '3.1':'3.1:[delta], [SPFLOW,DOMPATIN], deltaUS fixed',\n '4.0':'4.0:[delta], [SPFLOW,DOMPATIN], deltaUS fixed',\n '4.1':'4.1:[delta], [SPFLOW,DOMPATIN], deltaUS_1995 = 1.17647 deltaUS_2005',\n '5.0':'5.0:[delta,T], [SPFLOW,DOMPATIN,OUT], deltaUS fixed',\n '5.1':'5.1:[delta,T], [SPFLOW,DOMPATIN,OUT], deltaUS fixed',\n '6.0':'6.0:[delta,T,eta], [SPFLOW,DOMPATIN,OUT,RD,RP,SRGDP], deltaUS fixed',\n '6.1':'6.1:[delta,T,eta], [SPFLOW,DOMPATIN,OUT,RD,RP,SRGDP], deltaUS fixed',\n '7.0':'7.0:[delta,T,eta], [SPFLOW,DOMPATIN,OUT,RD,RP,SRGDP], delta,etaUS fixed',\n '7.1':'7.1:[delta,T,eta], [SPFLOW,DOMPATIN,OUT,RD,RP,SRGDP], delta,etaUS fixed',\n '8.0':'8.0:[delta,T,eta], [SPFLOW,DOMPATIN,OUT,RD,RP,SRGDP,KM], deltaUS fixed',\n '8.1':'8.1:[delta,T,eta], [SPFLOW,DOMPATIN,OUT,RD,RP,SRGDP,KM], deltaUS fixed',\n '9.0':'9.0:[delta,T,eta], [SPFLOW,DOMPATIN,OUT,RD,RP,SRGDP,KM], deltaUS fixed, KM weight=10',\n '9.1':'9.1:[delta,T,eta], [SPFLOW,DOMPATIN,OUT,RD,RP,SRGDP,KM], deltaUS fixed, KM weight=10',\n }\n\ncomments_dic['702'] = {\n 'baseline':'baseline: same as 607/618 without SINNOVPATEU, DOMPATINEU',\n '1.0':'1.0:full calibration 2005',\n '1.1':'1.1:full calibration 1992',\n '2.0':'2.0:[delta], [SPFLOW], deltaUS fixed',\n '2.1':'2.1:[delta], [SPFLOW], deltaUS fixed',\n '3.0':'3.0:[delta], [SPFLOW,DOMPATINUS], deltaUS fixed',\n '3.1':'3.1:[delta], [SPFLOW,DOMPATINUS], deltaUS fixed',\n '4.0':'4.0:[delta], [SPFLOW,DOMPATINUS], deltaUS fixed',\n '4.1':'4.1:[delta], [SPFLOW,DOMPATINUS], deltaUS_1995 = 1.17647 deltaUS_2005',\n '5.0':'5.0:[delta,T], [SPFLOW,DOMPATINUS,OUT], deltaUS fixed',\n '5.1':'5.1:[delta,T], [SPFLOW,DOMPATINUS,OUT], deltaUS fixed',\n '6.0':'6.0:[delta,T,eta], [SPFLOW,DOMPATINUS,OUT,RD,RP,SRGDP], deltaUS fixed',\n '6.1':'6.1:[delta,T,eta], [SPFLOW,DOMPATINUS,OUT,RD,RP,SRGDP], deltaUS fixed',\n '7.0':'7.0:[delta,T,eta], [SPFLOW,DOMPATINUS,OUT,RD,RP,SRGDP], delta,etaUS fixed',\n '7.1':'7.1:[delta,T,eta], [SPFLOW,DOMPATINUS,OUT,RD,RP,SRGDP], delta,etaUS fixed',\n '8.0':'8.0:[delta,T,eta], [SPFLOW,DOMPATINUS,OUT,RD,RP,SRGDP,KM], deltaUS fixed',\n '8.1':'8.1:[delta,T,eta], [SPFLOW,DOMPATINUS,OUT,RD,RP,SRGDP,KM], deltaUS fixed',\n '9.0':'9.0:[delta,T,eta], [SPFLOW,DOMPATINUS,OUT,RD,RP,SRGDP,KM], deltaUS fixed, KM weight=10',\n '9.1':'9.1:[delta,T,eta], [SPFLOW,DOMPATINUS,OUT,RD,RP,SRGDP,KM], deltaUS fixed, KM weight=10',\n '10.0':'10.0:[delta,eta], [SPFLOW,DOMPATINUS], deltaUS fixed',\n '10.1':'10.1:[delta,eta], [SPFLOW,DOMPATINUS], deltaUS fixed',\n }\n\ncomments_dic['801'] = {\n 'baseline':'baseline',\n '0.0':'0.0',\n '0.1':'0.1',\n '0.2':'0.2',\n '0.3':'0.3',\n '0.4':'0.4',\n '0.5':'0.5',\n }\ncomments_dic['802'] = {\n 'baseline':'baseline,target RD US/EUR/JAP,theta=7',\n '1.0':'1.0: 1992',\n '2.0':'2.0: 2005 no Hjort correc',\n '3.0':'3.0: target RD US/EU/JP/CA/KR/RU/AU/MX',\n '4.0':'4.0: from now: target RD US/EU/JP/CA/KR/AU',\n '4.1':'4.1: drop SRDUS',\n '4.2':'4.2: drop SRDUS, higher weight on RD',\n '4.3':'4.3: drop SRDUS, higher weight on SPFLOW',\n '5.0':'5.0: lin loss',\n '5.1':'5.1: weight on large pflows lin loss',\n '5.2':'5.2: higher weight on large pflows lin loss',\n '6.0':'6.0: weight on large pflows log loss',\n '7.0':'7.0: higher weight on large pflows log loss',\n }\n\ncomments_dic['803'] = {\n 'baseline':'baseline: 802_7.0 with improved weights',\n '1.0':'1.0: calibrated theta',\n '1.1':'1.1: drop SRDUS',\n '1.2':'1.2: drop SINNOVPATEU',\n '1.3':'1.3: drop DOMPATINEU',\n '1.4':'1.4: drop SINNOVPATEU and DOMPATINEU',\n '1.5':'1.5: drop SRDUS, SINNOVPATEU and DOMPATINEU',\n '1.5.0':'1.5.0: sigma = 2',\n '1.5.1':'1.5.1: sigma = 2.25',\n '1.5.2':'1.5.2: sigma = 2.5',\n '1.5.3':'1.5.3: sigma = 2.75',\n '1.5.4':'1.5.4: sigma = 3',\n '1.5.5':'1.5.5: sigma = 3.5',\n '1.5.6':'1.5.6: sigma = 4.5',\n '1.6':'1.6: drop SINNOVPATUS',\n '1.7':'1.7: drop DOMPATINUS',\n '1.8':'1.8: drop SINNOVPATUS and DOMPATINUS',\n '1.9':'1.9: drop SRDUS, SINNOVPATUS and DOMPATINUS',\n }\n\ncomments_dic['804'] = {'baseline':'bsln:2005',\n '1.0':'1.0: TO: 0.01',\n'1.1':'1.1: TO: 0.0105',\n'1.2':'1.2: TO: 0.011',\n'1.3':'1.3: TO: 0.0115',\n'1.4':'1.4: TO: 0.012',\n'1.5':'1.5: TO: 0.0125',\n'1.6':'1.6: TO: 0.013',\n'1.7':'1.7: TO: 0.0135',\n'1.8':'1.8: TO: 0.014',\n'1.9':'1.9: TO: 0.0145',\n'1.10':'1.10: TO: 0.015',\n'1.11':'1.11: TO: 0.0155',\n'1.12':'1.12: TO: 0.016',\n'1.13':'1.13: TO: 0.0165',\n'1.14':'1.14: TO: 0.017',\n'1.15':'1.15: TO: 0.0175',\n'1.16':'1.16: TO: 0.018',\n'1.17':'1.17: TO: 0.0185',\n'1.18':'1.18: TO: 0.019',\n'1.19':'1.19: TO: 0.0195',\n'1.20':'1.20: TO: 0.02',\n'1.23':'1.23: TO: TO = 0.022',\n'1.24':'1.24: TO: TO = 0.024',\n'1.25':'1.25: TO: TO = 0.026',\n'1.26':'1.26: TO: TO = 0.028',\n'1.27':'1.27: TO: TO = 0.03',\n'1.40':'1.40: TO: 0.014603',\n'1.41':'1.41: TO: TO = 0.019661',\n }\n\ncomments_dic['805'] = {'baseline':'bsln:2015',\n '1.0':'1.0: TO: 0.01',\n'1.1':'1.1: TO: 0.0105',\n'1.2':'1.2: TO: 0.011',\n'1.3':'1.3: TO: 0.0115',\n'1.4':'1.4: TO: 0.012',\n'1.5':'1.5: TO: 0.0125',\n'1.6':'1.6: TO: 0.013',\n'1.7':'1.7: TO: 0.0135',\n'1.8':'1.8: TO: 0.014',\n'1.9':'1.9: TO: 0.0145',\n'1.10':'1.10: TO: 0.015',\n'1.11':'1.11: TO: 0.0155',\n'1.12':'1.12: TO: 0.016',\n'1.13':'1.13: TO: 0.0165',\n'1.14':'1.14: TO: 0.017',\n'1.15':'1.15: TO: 0.0175',\n'1.16':'1.16: TO: 0.018',\n'1.17':'1.17: TO: 0.0185',\n'1.18':'1.18: TO: 0.019',\n'1.19':'1.19: TO: 0.0195',\n'1.20':'1.20: TO: 0.02',\n'1.23':'1.23: TO: TO = 0.022',\n'1.24':'1.24: TO: TO = 0.024',\n'1.25':'1.25: TO: TO = 0.026',\n'1.26':'1.26: TO: TO = 0.028',\n'1.27':'1.27: TO: TO = 0.03',\n'1.40':'1.40: TO: 0.014603',\n'1.41':'1.41: TO: TO = 0.019661',\n }\n\ncomments_dic['806'] = {\n \"baseline\":\"baseline : 2015\",\n \"1.0\" : \"1.0 : 1990\",\n \"1.1\" : \"1.1 : 1991\",\n \"1.2\" : \"1.2 : 1992\",\n \"1.3\" : \"1.3 : 1993\",\n \"1.4\" : \"1.4 : 1994\",\n \"1.5\" : \"1.5 : 1995\",\n \"1.6\" : \"1.6 : 1996\",\n \"1.7\" : \"1.7 : 1997\",\n \"1.8\" : \"1.8 : 1998\",\n \"1.9\" : \"1.9 : 1999\",\n \"1.10\" : \"1.10 : 2000\",\n \"1.11\" : \"1.11 : 2001\",\n \"1.12\" : \"1.12 : 2002\",\n \"1.13\" : \"1.13 : 2003\",\n \"1.14\" : \"1.14 : 2004\",\n \"1.15\" : \"1.15 : 2005\",\n \"1.16\" : \"1.16 : 2006\",\n \"1.17\" : \"1.17 : 2007\",\n \"1.18\" : \"1.18 : 2008\",\n \"1.19\" : \"1.19 : 2009\",\n \"1.20\" : \"1.20 : 2010\",\n \"1.21\" : \"1.21 : 2011\",\n \"1.22\" : \"1.22 : 2012\",\n \"1.23\" : \"1.23 : 2013\",\n \"1.24\" : \"1.24 : 2014\",\n \"1.25\" : \"1.25 : 2015\",\n \"1.26\" : \"1.26 : 2016\",\n \"1.27\" : \"1.27 : 2017\",\n \"1.28\" : \"1.28 : 2018\",\n}\n\ncomments_dic['807'] = {\n \"baseline\":\"baseline : 2015\",\n \"0.1\" : \"0.1 : dont drop RD in South\",\n \"1.0\" : \"1.0 : ratio loss\",\n \"1.1\" : \"1.1 : ratio loss and dont drop RD in South\",\n \"2.0\" : \"2.0 : no weight on large flows\",\n \"3.0\" : \"3.0 : ratio loss and no weight on large flows\",\n}\n\ncomments_dic['808'] = {\n 'baseline':'baseline',\n '1.0':'1.0:full calibration 2015',\n '1.1':'1.1:full calibration 1992',\n '2.0':'2.0:[delta,T,eta], [SPFLOW,DOMPATINUS,OUT,RD,RP,SRGDP]',\n '2.1':'2.1:[delta,T,eta], [SPFLOW,DOMPATINUS,OUT,RD,RP,SRGDP]',\n '3.0':'3.0:[delta,T,eta], [SPFLOW,DOMPATINUS,OUT,RD,RP,SRGDP], delta_US fixed',\n '3.1':'3.1:[delta,T,eta], [SPFLOW,DOMPATINUS,OUT,RD,RP,SRGDP], delta_US fixed',\n '4.0':'4.0:[delta,T,eta], [SPFLOW,DOMPATINUS,OUT,RD,RP,SRGDP], delta,eta_US fixed',\n '4.1':'4.1:[delta,T,eta], [SPFLOW,DOMPATINUS,OUT,RD,RP,SRGDP], delta,eta_US fixed',\n '5.0':'5.0:[delta,T,eta], [SPFLOW,DOMPATINUS,OUT,RD,RP,SRGDP,KM]',\n '5.1':'5.1:[delta,T,eta], [SPFLOW,DOMPATINUS,OUT,RD,RP,SRGDP,KM]',\n '6.0':'6.0:[delta,T,eta], [SPFLOW,DOMPATINUS,OUT,RD,RP,SRGDP,SINNOVPATUS]',\n '6.1':'6.1:[delta,T,eta], [SPFLOW,DOMPATINUS,OUT,RD,RP,SRGDP,SINNOVPATUS]',\n '7.0':'7.0:[delta,T,eta], [SPFLOW,DOMPATINUS,OUT,RD,RP,SRGDP,GROWTH]',\n '7.1':'7.1:[delta,T,eta], [SPFLOW,DOMPATINUS,OUT,RD,RP,SRGDP,GROWTH]',\n '8.0':'8.0:[delta,T,eta], [SPFLOW,DOMPATINUS,OUT,RD,RP,SRGDP,UUPCOST]',\n '8.1':'8.1:[delta,T,eta], [SPFLOW,DOMPATINUS,OUT,RD,RP,SRGDP,UUPCOST]',\n '9.0':'9.0:[delta,T,eta], [SPFLOW,DOMPATINUS,OUT,RD,RP,SRGDP,TO]',\n '9.1':'9.1:[delta,T,eta], [SPFLOW,DOMPATINUS,OUT,RD,RP,SRGDP,TO]',\n '17.0':'17.0:[delta,T,eta,g_0], [SPFLOW,DOMPATINUS,OUT,RD,RP,SRGDP,GROWTH]',\n '17.1':'17.1:[delta,T,eta,g_0], [SPFLOW,DOMPATINUS,OUT,RD,RP,SRGDP,GROWTH]',\n '18.0':'18.0:[delta,T,eta,fe,fo], [SPFLOW,DOMPATINUS,OUT,RD,RP,SRGDP,UUPCOST]',\n '18.1':'18.1:[delta,T,eta,fe,fo], [SPFLOW,DOMPATINUS,OUT,RD,RP,SRGDP,UUPCOST]',\n '19.0':'19.0:[delta,T,eta,nu], [SPFLOW,DOMPATINUS,OUT,RD,RP,SRGDP,TO]',\n '19.1':'19.1:[delta,T,eta,nu], [SPFLOW,DOMPATINUS,OUT,RD,RP,SRGDP,TO]',\n }\n\ncomments_dic['901'] = {\n \"baseline\":\"baseline : 2015\",\n '1.0':'1.0:same as bsln',\n '2.0':'2.0:calibrated theta, new weights',\n '3.0':'3.0:more weights SPFLOW',\n '4.0':'4.0:more weights high SPFLOW',\n '5.0':'5.0:special weight on USA-EUR',\n '6.0':'6.0:more weight on high SPFLOW',\n '7.0':'7.0',\n '8.0':'8.0',\n '9.0':'9.0',\n '10.0':'10.0:doubling eta IDN',\n '11.0':'11.0',\n '12.0':'12.0',\n '13.0':'13.0',\n '14.0':'14.0',\n '15.0':'15.0:only TE, theta',\n '16.0':'16.0',\n '17.0':'17.0',\n '18.0':'18.0',\n '19.0':'19.0',\n '20.0':'20.0',\n '21.0':'21.0',\n }\n\ncomments_dic['902'] = {\n \"baseline\":\"baseline : 2015\",\n \"1.0\" : \"1.0 : 1990\",\n \"1.1\" : \"1.1 : 1991\",\n \"1.2\" : \"1.2 : 1992\",\n \"1.3\" : \"1.3 : 1993\",\n \"1.4\" : \"1.4 : 1994\",\n \"1.5\" : \"1.5 : 1995\",\n \"1.6\" : \"1.6 : 1996\",\n \"1.7\" : \"1.7 : 1997\",\n \"1.8\" : \"1.8 : 1998\",\n \"1.9\" : \"1.9 : 1999\",\n \"1.10\" : \"1.10 : 2000\",\n \"1.11\" : \"1.11 : 2001\",\n \"1.12\" : \"1.12 : 2002\",\n \"1.13\" : \"1.13 : 2003\",\n \"1.14\" : \"1.14 : 2004\",\n \"1.15\" : \"1.15 : 2005\",\n \"1.16\" : \"1.16 : 2006\",\n \"1.17\" : \"1.17 : 2007\",\n \"1.18\" : \"1.18 : 2008\",\n \"1.19\" : \"1.19 : 2009\",\n \"1.20\" : \"1.20 : 2010\",\n \"1.21\" : \"1.21 : 2011\",\n \"1.22\" : \"1.22 : 2012\",\n \"1.23\" : \"1.23 : 2013\",\n \"1.24\" : \"1.24 : 2014\",\n \"1.25\" : \"1.25 : 2015\",\n \"1.26\" : \"1.26 : 2016\",\n \"1.27\" : \"1.27 : 2017\",\n \"1.28\" : \"1.28 : 2018\",\n}\n\ncomments_dic['903'] = {\n \"baseline\":\"baseline : 2015\",\n \"1.0\" : \"1.0 : 1990 smooth 3y\",\n \"1.1\" : \"1.1 : 1991 smooth 3y\",\n \"1.2\" : \"1.2 : 1992 smooth 3y\",\n \"1.3\" : \"1.3 : 1993 smooth 3y\",\n \"1.4\" : \"1.4 : 1994 smooth 3y\",\n \"1.5\" : \"1.5 : 1995 smooth 3y\",\n \"1.6\" : \"1.6 : 1996 smooth 3y\",\n \"1.7\" : \"1.7 : 1997 smooth 3y\",\n \"1.8\" : \"1.8 : 1998 smooth 3y\",\n \"1.9\" : \"1.9 : 1999 smooth 3y\",\n \"1.10\" : \"1.10 : 2000 smooth 3y\",\n \"1.11\" : \"1.11 : 2001 smooth 3y\",\n \"1.12\" : \"1.12 : 2002 smooth 3y\",\n \"1.13\" : \"1.13 : 2003 smooth 3y\",\n \"1.14\" : \"1.14 : 2004 smooth 3y\",\n \"1.15\" : \"1.15 : 2005 smooth 3y\",\n \"1.16\" : \"1.16 : 2006 smooth 3y\",\n \"1.17\" : \"1.17 : 2007 smooth 3y\",\n \"1.18\" : \"1.18 : 2008 smooth 3y\",\n \"1.19\" : \"1.19 : 2009 smooth 3y\",\n \"1.20\" : \"1.20 : 2010 smooth 3y\",\n \"1.21\" : \"1.21 : 2011 smooth 3y\",\n \"1.22\" : \"1.22 : 2012 smooth 3y\",\n \"1.23\" : \"1.23 : 2013 smooth 3y\",\n \"1.24\" : \"1.24 : 2014 smooth 3y\",\n \"1.25\" : \"1.25 : 2015 smooth 3y\",\n \"1.26\" : \"1.26 : 2016 smooth 3y\",\n \"1.27\" : \"1.27 : 2017 smooth 3y\",\n \"1.28\" : \"1.28 : 2018 smooth 3y\",\n}\n\ncomments_dic['1001'] = {\n \"baseline\":\"baseline : 2015\",\n \"1.0\":\"1.0:same as bsln\",\n \"2.0\":\"2.0:less weights on big flows\",\n \"3.0\":\"3.0:1 weight on all moments\",\n \"4.0\":\"4.0:increase weight on SPFLOW\",\n \"5.0\":\"5.0:corect RD\",\n \"6.0\":\"6.0:no weight on high pflow\",\n}\n\ncomments_dic['1002'] = {\n \"baseline\":\"baseline : 2015\",\n \"1.0\" : \"1.0 : 1990 smooth 3y\",\n \"1.1\" : \"1.1 : 1991 smooth 3y\",\n \"1.2\" : \"1.2 : 1992 smooth 3y\",\n \"1.3\" : \"1.3 : 1993 smooth 3y\",\n \"1.4\" : \"1.4 : 1994 smooth 3y\",\n \"1.5\" : \"1.5 : 1995 smooth 3y\",\n \"1.6\" : \"1.6 : 1996 smooth 3y\",\n \"1.7\" : \"1.7 : 1997 smooth 3y\",\n \"1.8\" : \"1.8 : 1998 smooth 3y\",\n \"1.9\" : \"1.9 : 1999 smooth 3y\",\n \"1.10\" : \"1.10 : 2000 smooth 3y\",\n \"1.11\" : \"1.11 : 2001 smooth 3y\",\n \"1.12\" : \"1.12 : 2002 smooth 3y\",\n \"1.13\" : \"1.13 : 2003 smooth 3y\",\n \"1.14\" : \"1.14 : 2004 smooth 3y\",\n \"1.15\" : \"1.15 : 2005 smooth 3y\",\n \"1.16\" : \"1.16 : 2006 smooth 3y\",\n \"1.17\" : \"1.17 : 2007 smooth 3y\",\n \"1.18\" : \"1.18 : 2008 smooth 3y\",\n \"1.19\" : \"1.19 : 2009 smooth 3y\",\n \"1.20\" : \"1.20 : 2010 smooth 3y\",\n \"1.21\" : \"1.21 : 2011 smooth 3y\",\n \"1.22\" : \"1.22 : 2012 smooth 3y\",\n \"1.23\" : \"1.23 : 2013 smooth 3y\",\n \"1.24\" : \"1.24 : 2014 smooth 3y\",\n \"1.25\" : \"1.25 : 2015 smooth 3y\",\n \"1.26\" : \"1.26 : 2016 smooth 3y\",\n \"1.27\" : \"1.27 : 2017 smooth 3y\",\n \"1.28\" : \"1.28 : 2018 smooth 3y\",\n}\n\ncomments_dic['1005'] = comments_dic['1002']\ncomments_dic['1011'] = comments_dic['1002']\n\ncomments_dic['1003'] = {\n \"baseline\":\"baseline : 2015\",\n # '0.1':'0.1:better RD targeting',\n # '0.2':'0.2:better RD and GROWTH targeting',\n # '0.3':'0.3:better RD and GROWTH/TO/TE targeting',\n '0.4':'0.4:better RD targeting',\n '0.5':'0.5:0.4 with different UUPCOST/DOMPATINUS tension',\n '1.0':'1.0:[delta,T,eta], [SPFLOW,DOMPATINUS,OUT,RD,RP,SRGDP],delta_US fixed',\n '1.1':'1.1:[delta,T,eta], [SPFLOW,DOMPATINUS,OUT,RD,RP,SRGDP],delta_US fixed',\n '2.0':'2.0:[delta,T,eta], [SPFLOW,DOMPATINUS,OUT,RD,RP,SRGDP]',\n '2.1':'2.1:[delta,T,eta], [SPFLOW,DOMPATINUS,OUT,RD,RP,SRGDP]',\n '3.0':'3.0:full calibration',\n '3.1':'3.1:full calibration',\n '4.0':'4.0:[delta,T,eta], [SPFLOW,DOMPATINUS,OUT,RD,RP,SRGDP,TP]',\n '4.1':'4.1:[delta,T,eta], [SPFLOW,DOMPATINUS,OUT,RD,RP,SRGDP,TP]',\n '5.0':'5.0:[delta,T,eta], [SPFLOW,DOMPATINUS,OUT,RD,RP,SRGDP,inter-TP]',\n '5.1':'5.1:[delta,T,eta], [SPFLOW,DOMPATINUS,OUT,RD,RP,SRGDP,inter-TP]',\n }\n\ncomments_dic['1004'] = {\n \"baseline\":\"baseline : 2015, same as 1003_0.4\",\n '1.0':'1.0:[delta,T,eta], [SPFLOW,DOMPATINUS,OUT,RD,RP,SRGDP],delta_US fixed',\n '1.1':'1.1:[delta,T,eta], [SPFLOW,DOMPATINUS,OUT,RD,RP,SRGDP],delta_US fixed',\n '2.0':'2.0:[delta,T,eta], [SPFLOW,DOMPATINUS,OUT,RD,RP,SRGDP]',\n '2.1':'2.1:[delta,T,eta], [SPFLOW,DOMPATINUS,OUT,RD,RP,SRGDP]',\n '3.0':'3.0:full calibration',\n '3.1':'3.1:full calibration',\n '4.0':'4.0:[delta,T,eta], [SPFLOW,DOMPATINUS,OUT,RD,RP,SRGDP,TP]',\n '4.1':'4.1:[delta,T,eta], [SPFLOW,DOMPATINUS,OUT,RD,RP,SRGDP,TP]',\n '5.0':'5.0:[delta,T,eta], [SPFLOW,DOMPATINUS,OUT,RD,RP,SRGDP,inter-TP]',\n '5.1':'5.1:[delta,T,eta], [SPFLOW,DOMPATINUS,OUT,RD,RP,SRGDP,inter-TP]',\n '6.0':'6.0:[delta,T,eta], [SPFLOW,DOMPATINUS,OUT,RD,RP,SRGDP],delta_North fixed',\n '6.1':'6.1:[delta,T,eta], [SPFLOW,DOMPATINUS,OUT,RD,RP,SRGDP],delta_North fixed',\n '8.0':'8.0:[delta,T,eta], [SPFLOW,DOMPATINUS,OUT,RD,RP,SRGDP],delta_US bertolotti',\n '8.1':'8.1:[delta,T,eta], [SPFLOW,DOMPATINUS,OUT,RD,RP,SRGDP],delta_US bertolotti',\n '9.0':'9.0:[delta,T,eta], [SPFLOW,DOMPATINUS,OUT,RD,RP,SRGDP,UUPCOST]',\n '9.1':'9.1:[delta,T,eta], [SPFLOW,DOMPATINUS,OUT,RD,RP,SRGDP,UUPCOST]',\n '9.2':'9.2:1995',\n '10.0':'10.0:full calibration, delta_US fixed',\n '10.1':'10.1:full calibration, delta_US fixed',\n '11.0':'11.0:[delta,T,eta,nu], [SPFLOW,DOMPATINUS,OUT,RD,RP,SRGDP,TO(updated)]',\n '11.1':'11.1:[delta,T,eta,nu], [SPFLOW,DOMPATINUS,OUT,RD,RP,SRGDP,TO(updated)]',\n '12.0':'12.0:full calibration except delta_US fixed, KM and TO not targeted',\n '12.1':'12.1:full calibration except delta_US fixed, KM and TO not targeted',\n '13.0':'13.0:full calibration except delta_US and nu fixed, KM and TO not targeted',\n '13.1':'13.1:full calibration except delta_US and nu fixed, KM and TO not targeted',\n '14.0':'14.0:[delta,T,eta,fe,fo], [SPFLOW,DOMPATINUS,OUT,RD,RP,SRGDP,UUPCOST], d_US fixed',\n '14.1':'14.1:[delta,T,eta,fe,fo], [SPFLOW,DOMPATINUS,OUT,RD,RP,SRGDP,UUPCOST], d_US fixed',\n '15.0':'15.0:[delta,T,eta], [SPFLOW,DOMPATINUS,OUT,RD,RP,SRGDP,UUPCOST], d_US fixed',\n '15.1':'15.1:[delta,T,eta], [SPFLOW,DOMPATINUS,OUT,RD,RP,SRGDP,UUPCOST], d_US fixed',\n }\n\ncomments_dic['1006'] = {\n \"baseline\":\"baseline : 2015, same as 1004\",\n '1.0':'1.0:SPFLOWDOM instead of SPFLOW',\n '2.0':'2.0:DOMPATUS instead of DOMPATINUS',\n '2.1':'2.1:1992 partial calibration',\n '3.0':'3.0:DOMPATUS and DOMPATINUS',\n '3.1':'3.1:1992 partial calibration',\n '4.0':'4.0:2.0 with higher weight on DOMPATUS',\n '4.1':'4.1:1992 partial calibration',\n '5.0':'5.0:3.0 with higher weight on DOMPAT(IN)US',\n '5.1':'5.1:1992 partial calibration',\n }\n\ncomments_dic['1010'] = {\n \"baseline\":\"baseline : 2015, new correction US flows and new TO\",\n '2.0':'2.0:[delta,T,eta], [SPFLOW,DOMPATINUS,OUT,RD,RP,SRGDP]',\n '2.1':'2.1:[delta,T,eta], [SPFLOW,DOMPATINUS,OUT,RD,RP,SRGDP]',\n '3.0':'3.0:full calibration 2015',\n '3.1':'3.1:full calibration 1992',\n '9.0':'9.0:[delta,T,eta],[SPFLOW,DOMPATINUS,OUT,RD,RP,SRGDP,UUPCOST] 2015',\n '9.1':'9.1:[delta,T,eta],[SPFLOW,DOMPATINUS,OUT,RD,RP,SRGDP,UUPCOST] 1992',\n '9.2':'9.2:same conditions, 3-year smoothed out data 1992',\n '10.0':'10.0 corrected mistake denominator in the Gamma function',\n }\n\ncomments_dic['1020'] = {\n \"baseline\":\"baseline : 2015, with corrected term in Gamma function\",\n \"1.0\" : \"1.0 : 1990 smooth 3y\",\n \"1.1\" : \"1.1 : 1991 smooth 3y\",\n \"1.2\" : \"1.2 : 1992 smooth 3y\",\n \"1.3\" : \"1.3 : 1993 smooth 3y\",\n \"1.4\" : \"1.4 : 1994 smooth 3y\",\n \"1.5\" : \"1.5 : 1995 smooth 3y\",\n \"1.6\" : \"1.6 : 1996 smooth 3y\",\n \"1.7\" : \"1.7 : 1997 smooth 3y\",\n \"1.8\" : \"1.8 : 1998 smooth 3y\",\n \"1.9\" : \"1.9 : 1999 smooth 3y\",\n \"1.10\" : \"1.10 : 2000 smooth 3y\",\n \"1.11\" : \"1.11 : 2001 smooth 3y\",\n \"1.12\" : \"1.12 : 2002 smooth 3y\",\n \"1.13\" : \"1.13 : 2003 smooth 3y\",\n \"1.14\" : \"1.14 : 2004 smooth 3y\",\n \"1.15\" : \"1.15 : 2005 smooth 3y\",\n \"1.16\" : \"1.16 : 2006 smooth 3y\",\n \"1.17\" : \"1.17 : 2007 smooth 3y\",\n \"1.18\" : \"1.18 : 2008 smooth 3y\",\n \"1.19\" : \"1.19 : 2009 smooth 3y\",\n \"1.20\" : \"1.20 : 2010 smooth 3y\",\n \"1.21\" : \"1.21 : 2011 smooth 3y\",\n \"1.22\" : \"1.22 : 2012 smooth 3y\",\n \"1.23\" : \"1.23 : 2013 smooth 3y\",\n \"1.24\" : \"1.24 : 2014 smooth 3y\",\n \"1.25\" : \"1.25 : 2015 smooth 3y\",\n \"1.26\" : \"1.26 : 2016 smooth 3y\",\n \"1.27\" : \"1.27 : 2017 smooth 3y\",\n \"1.28\" : \"1.28 : 2018 smooth 3y\",\n '2.0':'2.0:[delta,T,eta], [SPFLOW,DOMPATINUS,OUT,RD,RP,SRGDP]',\n '2.1':'2.1:[delta,T,eta], [SPFLOW,DOMPATINUS,OUT,RD,RP,SRGDP]',\n '3.0':'3.0:full calibration 2015',\n '3.1':'3.1:full calibration 1992',\n '9.0':'9.0:[delta,T,eta],[SPFLOW,DOMPATINUS,OUT,RD,RP,SRGDP,UUPCOST] 2015',\n '9.1':'9.1:[delta,T,eta],[SPFLOW,DOMPATINUS,OUT,RD,RP,SRGDP,UUPCOST] 1992',\n '9.2':'9.2:same conditions, 3-year smoothed out data 1992',\n }\n\nbaselines_dic_param = {}\nbaselines_dic_mom = {}\nbaselines_dic_sol_qty = {}\n\n# baseline_list = ['311','312','401','402','403'] \n# baseline_list = ['402','403','404'] \n# baseline_list = ['403','404','405'] \n# baseline_list = ['501','607','608','609','610','614','615','616','617'] \n# baseline_list = ['618','701','702'] \n# baseline_list = ['901','803','806','808'] \nbaseline_list = ['1020'] \nbaseline_mom = '1020'\n\ndef section(s):\n return [int(_) for _ in s.split(\".\")]\n \nfor baseline_nbr in baseline_list:\n print(baseline_nbr)\n print(time.perf_counter() - start)\n baseline_path = results_path+baseline_nbr+'/'\n baseline_variations_path = results_path+'baseline_'+baseline_nbr+'_variations/'\n p_baseline,m_baseline,sol_baseline = load(baseline_path,data_path = data_path,\n dir_path=dir_path)\n # print(baseline_nbr)\n baselines_dic_param[baseline_nbr], baselines_dic_mom[baseline_nbr], baselines_dic_sol_qty[baseline_nbr]\\\n = init_dic_of_dataframes_with_baseline(p_baseline,m_baseline,sol_baseline,list_of_moments)\n try:\n files_in_dir = next(os.walk(baseline_variations_path))[1]\n run_list = [f for f in files_in_dir if f[0].isnumeric()]\n # lists = sorted([s.split('.') for s in run_list], key=lambda x:map(int, x)) \n # run_list#.sort()\n run_list = sorted(run_list, key=section)\n \n for run in run_list:\n if run not in ['2.1.9','99']:\n p_to_add,m_to_add,sol_to_add = load(baseline_variations_path+run+'/',\n data_path = data_path,\n dir_path=dir_path)\n a, b, c = append_dic_of_dataframes_with_variation(baselines_dic_param[baseline_nbr], \n baselines_dic_mom[baseline_nbr], \n baselines_dic_sol_qty[baseline_nbr],\n p_to_add, \n m_to_add, \n sol_to_add,\n run)\n baselines_dic_param[baseline_nbr] = a\n baselines_dic_mom[baseline_nbr] = b\n baselines_dic_sol_qty[baseline_nbr] = c\n except:\n pass\n\n# gather full list run\nfull_run_list = []\nfor baseline_nbr in baseline_list:\n baseline_path = results_path+baseline_nbr+'/'\n baseline_variations_path = results_path+'baseline_'+baseline_nbr+'_variations/'\n files_in_dir = next(os.walk(baseline_variations_path))[1]\n for f in files_in_dir:\n if f[0].isnumeric() and f not in full_run_list:\n full_run_list.append(f)\nfull_run_list = ['target','baseline']+sorted(full_run_list,key = section)\n#add empty columns to dfs\nfor baseline_nbr in baseline_list:\n for df_name in baselines_dic_mom[baseline_nbr].keys():\n baselines_dic_mom[baseline_nbr][df_name] = baselines_dic_mom[baseline_nbr][df_name].reindex(columns=full_run_list)\n for df_name in baselines_dic_param[baseline_nbr].keys():\n baselines_dic_param[baseline_nbr][df_name] = baselines_dic_param[baseline_nbr][df_name].reindex(columns=full_run_list[1:])\n for df_name in baselines_dic_sol_qty[baseline_nbr].keys():\n baselines_dic_sol_qty[baseline_nbr][df_name] = baselines_dic_sol_qty[baseline_nbr][df_name].reindex(columns=full_run_list[1:])\n\ncountries = p_baseline.countries\n\nTOOLS=\"pan,wheel_zoom,box_zoom,reset,save\"\n\n# baseline_mom = '101'\n# baseline_mom = '618'\n\nmom = 'SPFLOW'\n\nbaseline_mom_select = Select(value=baseline_mom, title='Baseline', options=sorted(baselines_dic_mom.keys()))\nmom_select = Select(value=mom, title='Quantity', options=sorted(baselines_dic_mom[baseline_mom].keys()))\nx_mom_select = Select(value='baseline', title='x-axis target', options=list(comments_dic[baseline_mom].keys()))\nlabels_mom_toggle = Toggle(label=\"Labels On/Off\",align='end')\n\ndef update_x_axis_mom_matching_options(attr, old, new):\n x_mom_select.options = list(comments_dic[new].keys())\n\nds_mom = ColumnDataSource(baselines_dic_mom[baseline_mom][mom])\np_mom = figure(title=\"Moment matching\", \n width = 1200,\n height = 875,\n x_axis_type=\"log\",\n y_axis_type=\"log\",\n x_axis_label='Target', \n y_axis_label='Model implied',\n tools = TOOLS)\nhover_tool_mom = HoverTool()\nhover_tool_mom.tooltips = [\n (\"index\", \"@x\"),\n (\"(target,value)\", \"($x,$y)\"),\n ]\nlabels_mom = LabelSet(x='target', y='baseline', text='x',\n x_offset=2, y_offset=2, source=ds_mom, text_font_size=\"7pt\")\np_mom.add_layout(labels_mom)\np_mom.add_tools(hover_tool_mom)\nslope1 = Slope(gradient=1, y_intercept=0,\n line_color='black', line_dash='dashed', line_width=1)\n# slope2 = Slope(gradient=1.4876, y_intercept=0,\n# line_color='black', line_dash='dashed', line_width=0.25)\n# slope3 = Slope(gradient=0.5124, y_intercept=0,\n# line_color='black', line_dash='dashed', line_width=0.25)\n# slope4 = Slope(gradient=0.756198, y_intercept=0,\n# line_color='black', line_dash='dashed', line_width=0.25)\n# slope5 = Slope(gradient=1.546, y_intercept=0,\n# line_color='black', line_dash='dashed', line_width=0.25)\n# slope6 = Slope(gradient=2.20, y_intercept=0,\n# line_color='black', line_dash='dashed', line_width=0.25)\n\nfor slope in [slope1]:\n# for slope in [slope1,slope2,slope3,slope4,slope5,slope6]:\n p_mom.add_layout(slope)\n \n# slope2.visible = False\n# slope3.visible = False\n# slope4.visible = False\n# slope5.visible = False\n# slope6.visible = False\n\ncolors_mom = itertools.cycle(Category18)\n\nlines_mom = {}\n# for i,col in enumerate(ds_mom.data.keys()):\nfor i,col in enumerate(ds_mom.data.keys()):\n if col not in ['x','target']:\n lines_mom[col] = p_mom.circle('target', col, \n source = ds_mom, \n size=5, color=next(colors_mom))\n if col != 'baseline':\n lines_mom[col].visible = False\n \nlegend_items_mom = [LegendItem(label=comments_dic[baseline_mom][col], renderers=[lin_mom]) \n for col, lin_mom in lines_mom.items() if col in comments_dic[baseline_mom]]\n# legend_items_mom = [LegendItem(label=comments_dic[baseline_mom][col], renderers=[lines_mom[i]]) for i,col in enumerate(ds_mom.data)]\nlegend_mom = Legend(items=legend_items_mom, click_policy=\"hide\", \n label_text_font_size=\"8pt\",\n spacing = 0)\np_mom.add_layout(legend_mom, 'right')\n\n# legend_mom_split_1 = Legend(items=legend_items_mom[:round((len(legend_items_mom)+1)/2)], click_policy=\"hide\", \n# label_text_font_size=\"8pt\",\n# spacing = 0, \n# # location=(10, -60)\n# )\n# legend_mom_split_2 = Legend(items=legend_items_mom[round((len(legend_items_mom)+1)/2):], click_policy=\"hide\", \n# label_text_font_size=\"8pt\",\n# spacing = 0\n# # , location=(10, -60)\n# )\n# p_mom.add_layout(legend_mom_split_1, 'right')\n# p_mom.add_layout(legend_mom_split_2, 'right')\n# columns_mom = [TableColumn(field=col) for col in list(ds_mom.data.keys())]\ncolumns_mom = [\n TableColumn(field=\"x\"),\n ]+[TableColumn(field=col) for col in ['target']+list(comments_dic[baseline_mom].keys())]\ndata_table_mom = DataTable(source=ds_mom, columns = columns_mom, width=1200, height=400)\n \ndef update_baseline_mom(attrname, old, new):\n mom = mom_select.value\n ds_mom.data = baselines_dic_mom[new][mom]\n \n # legend_items_mom = [LegendItem(label=comments_dic[new][col], \n # renderers=[lines_mom[i]]) for i,col in enumerate(ds_mom.data) if col not in ['x','target']]\n legend_items_mom = [LegendItem(label=comments_dic[new][col], renderers=[lines_mom[col]]) \n for col in ds_mom.data if col in comments_dic[new]]\n legend_mom.items = legend_items_mom\n # legend_mom_split_1.items = legend_items_mom[:round((len(legend_items_mom)+1)/2)]\n # legend_mom_split_2.items = legend_items_mom[round((1+len(legend_items_mom))/2):]\n data_table_mom.columns = [\n TableColumn(field=\"x\"),\n ]+[TableColumn(field=col) for col in ['target']+list(comments_dic[new].keys())]\n x_mom_select.value = 'baseline'\n \ndef update_mom(attrname, old, new):\n baseline_mom = baseline_mom_select.value\n ds_mom.data = baselines_dic_mom[baseline_mom][new]\n # if new == 'scalars':\n # slope2.visible = True\n # slope3.visible = True\n # slope4.visible = True\n # else:\n # slope2.visible = False\n # slope3.visible = False\n # slope4.visible = False\n x_mom_select.value = 'baseline'\n \ndef update_x_axis_target(attrname, old, new):\n baseline_mom = baseline_mom_select.value\n mom = mom_select.value\n df_temp = ds_mom.data.copy()\n if new == 'baseline':\n path_x_axis = results_path+baseline_mom+'/'\n else:\n path_x_axis = results_path+'baseline_'+baseline_mom+'_variations/'+new+'/'\n if mom != 'scalars':\n m_temp = moments()\n m_temp.load_run(path_x_axis,\n dir_path=dir_path)\n df_temp['target'] = getattr(m_temp,mom+'_target').ravel()\n # df_temp['target'] = pd.read_csv(path_x_axis+mom)['target']\n else:\n m_temp = moments()\n m_temp.load_run(path_x_axis,\n dir_path=dir_path)\n for i,x in enumerate(df_temp['x']):\n if x != 'objective':\n df_temp['target'][i] = float(getattr(m_temp,x+'_target'))\n ds_mom.data = df_temp\n \ndef toggle_labels(event):\n labels_mom.visible = not labels_mom.visible\n \ncontrols_mom = row(baseline_mom_select, mom_select, x_mom_select, labels_mom_toggle)\n\nbaseline_mom_select.on_change('value', update_baseline_mom)\nbaseline_mom_select.on_change('value', update_x_axis_mom_matching_options)\nlabels_mom_toggle.on_click(toggle_labels)\n\nmom_select.on_change('value', update_mom)\nx_mom_select.on_change('value', update_x_axis_target)\n\nbaseline_par = baseline_mom\npar = 'delta'\n\nbaseline_par_select = Select(value=baseline_par, title='Baseline', options=sorted(baselines_dic_param.keys()))\npar_select = Select(value=par, title='Quantity', options=sorted(baselines_dic_param[baseline_par].keys()))\n\ncountry_sort = {\n 'USA':\t1,\n 'JAP':\t2,\n 'CAN':\t3,\n 'AUS':\t13,\n 'EUR':\t5,\n 'KOR':\t6,\n 'MEX':\t7,\n 'RUS':\t8,\n 'BRA':\t9,\n 'ROW':\t10,\n 'CHN':\t11,\n 'IND':\t12,\n 'IDN':\t14\n }\n\nx_range = baselines_dic_param[baseline_par][par_select.value].index.to_list()\nx_range = sorted(x_range, key = country_sort.get)\nds_par = ColumnDataSource(baselines_dic_param[baseline_par][par].loc[x_range])\np_par = figure(title=\"Parameters\", \n width = 1200,\n height = 875,\n x_range = x_range,\n y_axis_label='Model implied',\n tools = TOOLS)\nhover_tool_par = HoverTool()\nhover_tool_par.tooltips = [\n (\"index\", \"@x\"),\n (\"value\", \"$y\")\n ]\n\np_par.add_tools(hover_tool_par)\ncolors_par = itertools.cycle(Category18)\nlines_par = {}\n\nfor col in baselines_dic_param[baseline_par][par].columns:\n lines_par[col] = p_par.line(x='x', y=col, source = ds_par, color=next(colors_par),\n line_width = 2)\n if col != 'baseline':\n lines_par[col].visible = False\n\nlegend_items_par = [LegendItem(label=comments_dic[baseline_par][col], renderers=[lin_par])\n for col, lin_par in lines_par.items() if col in comments_dic[baseline_par]]\nlegend_par = Legend(items=legend_items_par, click_policy=\"hide\", \n label_text_font_size=\"8pt\",\n spacing = 0, \n )\np_par.add_layout(legend_par, 'right')\n\n# legend_par_split_1 = Legend(items=legend_items_par[:round((len(legend_items_par)+1)/2)], click_policy=\"hide\", \n# label_text_font_size=\"8pt\",\n# spacing = 0, \n# )\n# legend_par_split_2 = Legend(items=legend_items_par[round((1+len(legend_items_par))/2):], click_policy=\"hide\", \n# label_text_font_size=\"8pt\",\n# spacing = 0\n# )\n# p_par.add_layout(legend_par_split_1, 'right')\n# p_par.add_layout(legend_par_split_2, 'right')\n\ncolumns_par = [\n TableColumn(field=\"x\"),\n ]+[TableColumn(field=col) for col in list(comments_dic[baseline_par].keys())]\n\ndata_table_par = DataTable(source=ds_par, columns = columns_par, width=1200, height=400)\n\ndef update_baseline_par(attrname, old, new):\n par = par_select.value\n x_range_factors = baselines_dic_param[new][par].index.to_list()\n if new != 'scalars':\n x_range_factors = sorted(x_range_factors, key = country_sort.get)\n ds_par.data = baselines_dic_param[new][par].loc[x_range_factors]\n legend_items_par = [LegendItem(label=comments_dic[new][col], renderers=[lines_par[col]])\n for col in ds_par.data if col in comments_dic[new]]\n legend_par.items = legend_items_par\n # legend_par_split_1.items = legend_items_par[:round((1+len(legend_items_par))/2)]\n # legend_par_split_2.items = legend_items_par[round((len(legend_items_par)+1)/2):]\n \n data_table_par.columns = [\n TableColumn(field=\"x\"),\n ]+[TableColumn(field=col) for col in list(comments_dic[new].keys())]\n\ndef update_par(attrname, old, new):\n baseline_par = baseline_par_select.value\n x_range_factors = baselines_dic_param[baseline_par][new].index.to_list()\n if new != 'scalars':\n x_range_factors = sorted(x_range_factors, key = country_sort.get)\n p_par.x_range.factors = x_range_factors\n ds_par.data = baselines_dic_param[baseline_par][new].loc[x_range_factors]\n\ncontrols_par = row(baseline_par_select, par_select)\n\nbaseline_par_select.on_change('value', update_baseline_par)\npar_select.on_change('value', update_par)\n\nbaseline_sol_qty = baseline_mom\nsol_qty = 'psi_o_star'\n\nbaseline_sol_qty_select = Select(value=baseline_sol_qty, title='Baseline', options=sorted(baselines_dic_sol_qty.keys()))\nsol_qty_select = Select(value=sol_qty, title='Quantity', options=sorted(baselines_dic_sol_qty[baseline_sol_qty].keys()))\nx_range_par = baselines_dic_sol_qty[baseline_sol_qty][sol_qty_select.value].index.to_list()\nx_range_par = sorted(x_range_par, key = country_sort.get)\nds_sol_qty = ColumnDataSource(baselines_dic_sol_qty[baseline_sol_qty][sol_qty].loc[x_range_par])\np_sol_qty = figure(title=\"Solution quantities\", \n width = 1200,\n height = 875,\n x_range = x_range,\n y_axis_label='Model implied',\n tools = TOOLS)\nhover_tool_sol_qty = HoverTool()\nhover_tool_sol_qty.tooltips = [\n (\"index\", \"@x\"),\n (\"value\", \"$y\")\n ]\n\np_sol_qty.add_tools(hover_tool_sol_qty)\ncolors_sol_qty = itertools.cycle(Category18)\nlines_sol_qty = {}\n\nfor col in baselines_dic_sol_qty[baseline_sol_qty][sol_qty].columns:\n lines_sol_qty[col] = p_sol_qty.line(x='x', y=col, source = ds_sol_qty, \n color=next(colors_sol_qty),\n line_width = 2)\n if col != 'baseline':\n lines_sol_qty[col].visible = False\n\nlegend_items_sol_qty = [LegendItem(label=comments_dic[baseline_sol_qty][col], renderers=[lin_sol_qty]) \n for col, lin_sol_qty in lines_sol_qty.items() if col in comments_dic[baseline_sol_qty]]\n\nlegend_sol_qty = Legend(items=legend_items_sol_qty, click_policy=\"hide\", \n label_text_font_size=\"8pt\",\n spacing = 0, \n )\np_sol_qty.add_layout(legend_sol_qty, 'right')\n\n# legend_sol_qty_split_1 = Legend(items=legend_items_sol_qty[:round((len(legend_items_sol_qty)+1)/2)], click_policy=\"hide\", \n# label_text_font_size=\"8pt\",\n# spacing = 0, \n# )\n# legend_sol_qty_split_2 = Legend(items=legend_items_sol_qty[round((len(legend_items_sol_qty)+1)/2):], click_policy=\"hide\", \n# label_text_font_size=\"8pt\",\n# spacing = 0\n# )\n# p_sol_qty.add_layout(legend_sol_qty_split_1, 'right')\n# p_sol_qty.add_layout(legend_sol_qty_split_2, 'right')\n\n\ncolumns_sol_qty = [\n TableColumn(field=\"x\"),\n ]+[TableColumn(field=col) for col in list(comments_dic[baseline_sol_qty].keys())]\n\ndata_table_sol_qty = DataTable(source=ds_sol_qty, columns = columns_sol_qty, width=1200, height=400)\n\ndef update_baseline_sol_qty(attrname, old, new):\n sol_qty = sol_qty_select.value\n x_range_factors = baselines_dic_sol_qty[new][sol_qty].index.to_list()\n if new != 'scalars':\n x_range_factors = sorted(x_range_factors, key = country_sort.get)\n ds_sol_qty.data = baselines_dic_sol_qty[new][sol_qty].loc[x_range_factors]\n legend_items_sol_qty = [LegendItem(label=comments_dic[new][col], renderers=[lines_sol_qty[col]]) \n for col in ds_sol_qty.data if col in comments_dic[new]]\n legend_sol_qty.items = legend_items_sol_qty\n # legend_sol_qty_split_1.items = legend_items_sol_qty[:round((len(legend_items_sol_qty)+1)/2)]\n # legend_sol_qty_split_2.items = legend_items_sol_qty[round((len(legend_items_sol_qty)+1)/2):]\n data_table_sol_qty.columns = [TableColumn(field=col) for col in list(comments_dic[new].keys())]\n \ndef update_sol_qty(attrname, old, new):\n baseline_sol_qty = baseline_sol_qty_select.value\n # p_sol_qty.x_range.factors = baselines_dic_sol_qty[baseline_sol_qty][new].index.to_list()\n ds_sol_qty.data = baselines_dic_sol_qty[baseline_sol_qty][new].loc[x_range_par]\n\ncontrols_sol_qty = row(baseline_sol_qty_select, sol_qty_select)\n\nbaseline_sol_qty_select.on_change('value', update_baseline_sol_qty)\nsol_qty_select.on_change('value', update_sol_qty)\n\nmoment_report = column(controls_mom,p_mom,data_table_mom)\nparam_report = column(controls_par, p_par, data_table_par)\nsol_qty_report = column(controls_sol_qty, p_sol_qty, data_table_sol_qty)\n\n#!!! first panel\nfirst_panel = row(moment_report,param_report,sol_qty_report)\n# first_panel = row(moment_report,param_report)\nprint(time.perf_counter() - start)\n\n#%% Time series\n\nbaseline_time = '1020'\n# baseline_time_list = ['607','608','609','610','614','615','616','617'] \n# baseline_time_list = ['607','806','903']\nbaseline_time_list = ['1020']\npar_time = 'delta'\npar_time_select = Select(value=par_time, title='Quantity', options=sorted(baselines_dic_param[baseline_time].keys()))\nbaseline_time_select = Select(value=baseline_time, title='Baseline', options=baseline_time_list)\n\n\nyears_time = [y for y in range(1990,2019)]\nruns_time = ['1.'+str(i) for i in range(29)]\n\ndef build_time_series(baseline_time,par_time):\n # df = baselines_dic_param[baseline_time][par_time].T.reindex(\n # columns=countries+baselines_dic_param[baseline_time]['scalars'].index.to_list()\n # )\n df = baselines_dic_param[baseline_time][par_time].copy()\n df = df[runs_time]\n # print(df)\n df.columns = years_time\n df = df.T\n df = df.reindex(\n columns=countries+baselines_dic_param[baseline_time]['scalars'].index.to_list()\n )\n df.index.name = 'year'\n return df\n\ndf_par_time = build_time_series(baseline_time,par_time)\nds_par_time = ColumnDataSource(df_par_time)\np_par_time = figure(title=\"Time series\", \n width = 1500,\n height = 850,\n y_axis_label='Parameter',\n tools = TOOLS)\nhover_tool_par_time = HoverTool()\nhover_tool_par_time.tooltips = [\n (\"Year\", \"@year\"),\n (\"value\", \"$y\")\n ]\n\np_par_time.add_tools(hover_tool_par_time)\ncolors_par_time = itertools.cycle(Category18)\nlines_par_time = {}\n\nfor col in df_par_time.columns:\n if col != 'kappa':\n lines_par_time[col] = p_par_time.line(x='year', y=col, \n source = ds_par_time, \n color=next(colors_par_time),\n line_width = 2,\n # legend_label=col\n )\n\nlegend_items_par_time = [LegendItem(label=col, renderers=[lines_par_time[col]]) \n for col in countries]\nlegend_par_time = Legend(items=legend_items_par_time, click_policy=\"hide\", \n label_text_font_size=\"10pt\",\n )\np_par_time.add_layout(legend_par_time , 'right')\n \ndef update_par_time(attrname, old, new):\n df_par_time = build_time_series(baseline_time_select.value,new)\n ds_par_time.data = df_par_time\n if new!='scalars':\n legend_items_par_time = [LegendItem(label=col, renderers=[lines_par_time[col]]) \n for col in countries]\n else:\n legend_items_par_time = [LegendItem(label=col, renderers=[lines_par_time[col]]) \n for col in baselines_dic_param[baseline_time]['scalars'].index.to_list() if col != 'kappa']\n legend_par_time.items = legend_items_par_time\n \ndef update_baseline_time(attrname, old, new):\n df_par_time = build_time_series(new,par_time_select.value)\n ds_par_time.data = df_par_time\n if new!='scalars':\n legend_items_par_time = [LegendItem(label=col, renderers=[lines_par_time[col]]) \n for col in countries]\n else:\n legend_items_par_time = [LegendItem(label=col, renderers=[lines_par_time[col]]) \n for col in baselines_dic_param[baseline_time]['scalars'].index.to_list() if col != 'kappa']\n legend_par_time.items = legend_items_par_time\n\ncontrols_par_time = row(baseline_time_select,par_time_select)\n\npar_time_select.on_change('value', update_par_time)\nbaseline_time_select.on_change('value', update_baseline_time)\n\npar_time_report = column(controls_par_time, p_par_time) \n\n# explication_calib_params = Div(text=\n# \"607 variations :
\\\n# calibrated parameters : eta,k,fe,T,zeta,g_0,delta,nu,fo,theta
\\\n# targeted moments : GPDIFF,GROWTH,KM,OUT,RD,RP,SRDUS,SRGDP,SINNOVPATUS,\\\n# TO,SPFLOW,UUPCOST,SINNOVPATEU,DOMPATINUS,DOMPATINEU,TE
\\\n# 608 variations :
\\\n# calibrated parameters : eta,fe,T,delta,fo
\\\n# targeted moments : OUT,RD,RP,SRGDP,SINNOVPATUS,\\\n# SPFLOW,UUPCOST,SINNOVPATEU,DOMPATINUS,DOMPATINEU
\\\n# 609 variations :
\\\n# calibrated parameters : eta,T,delta
\\\n# targeted moments : OUT,RD,RP,SRGDP,SINNOVPATUS,\\\n# SPFLOW,SINNOVPATEU,DOMPATINUS,DOMPATINEU
\\\n# 610 variations :
\\\n# calibrated parameters : eta,T,delta
\\\n# targeted moments : OUT,RD,RP,SRDUS,SRGDP,SINNOVPATUS,\\\n# SPFLOW,SINNOVPATEU,DOMPATINUS,DOMPATINEU
\\\n# \")\n\n#!!! second_panel\n# second_panel = row(par_time_report, explication_calib_params)\nsecond_panel = row(par_time_report)\n\n\n#%% counterfactuals\n\n# baseline_cf = '101'\nbaseline_cf = '1020'\ncountry_cf = 'USA'\n\ndef section_end(s):\n return [int(_) for _ in s.split(\"_\")[-1].split(\".\")]\n# cf_list = sorted([s for s in os.listdir(cf_path) \n# if s[9:].startswith('604') and s.startswith('baseline')], key=section_end)+\\\ncf_list = sorted([s for s in os.listdir(cf_path) \n if s[9:].startswith('1020') and s.startswith('baseline')], key=section_end)#+\\\n # sorted([s for s in os.listdir(cf_path) \n # if s[9:].startswith('803') and s.startswith('baseline')], key=section_end)+\\\n # sorted([s for s in os.listdir(cf_path) \n # if s[9:].startswith('804') and s.startswith('baseline')], key=section_end)+\\\n # sorted([s for s in os.listdir(cf_path) \n # if s[9:].startswith('805') and s.startswith('baseline')], key=section_end)#+\\\n # sorted([s for s in os.listdir(cf_path) \n # if s[9:].startswith('608') and s.startswith('baseline')], key=section_end)+\\\n # sorted([s for s in os.listdir(cf_path) \n # if s[9:].startswith('609') and s.startswith('baseline')], key=section_end)+\\\n # sorted([s for s in os.listdir(cf_path) \n # if s[9:].startswith('618') and s.startswith('baseline')], key=section_end)+\\\n # sorted([s for s in os.listdir(cf_path) \n # if s[9:].startswith('501') and s.startswith('baseline')], key=section_end)#+\\\n # sorted([s for s in os.listdir(cf_path) \n # if s[9:].startswith('601') and s.startswith('baseline')], key=section_end)+\\\n # sorted([s for s in os.listdir(cf_path) \n # if s[9:].startswith('602') and s.startswith('baseline')], key=section_end)+\\\n # sorted([s for s in os.listdir(cf_path) \n # if s[9:].startswith('603') and s.startswith('baseline')], key=section_end)+\\\n # sorted([s for s in os.listdir(cf_path) \n # if s[9:].startswith('404') and s.startswith('baseline')], key=section_end)#+\\\n # sorted([s for s in os.listdir(cf_path) \n # if s[9:].startswith('312') and s.startswith('baseline')], key=section_end)+\\\n # sorted([s for s in os.listdir(cf_path) \n # if s[9:].startswith('311') and s.startswith('baseline')], key=section_end)\n\nbaseline_cf_select = Select(value=baseline_cf, title='Baseline', options=[s[9:] for s in cf_list])\ncountry_cf_select = Select(value=country_cf, \n title='Country', \n options=countries+['World','Harmonizing','Upper_harmonizing',\n 'Uniform_delta','Upper_uniform_delta'])\n\ndef get_data_cf(baseline,country):\n df_cf = pd.read_csv(cf_path+'baseline_'+baseline+'/'+country+'.csv')\n if country != 'Harmonizing':\n df_cf['Growth rate'] = df_cf['growth']/df_cf.loc[np.argmin(np.abs(df_cf.delt-1))].growth\n if country == 'Harmonizing':\n df_cf['Growth rate'] = df_cf['growth']/df_cf.loc[np.argmin(np.abs(df_cf.delt))].growth\n df_cf.set_index('delt',inplace=True)\n return df_cf\n\ndef build_max(df_cf):\n df_max = pd.concat([df_cf.idxmax(),df_cf.max()],axis=1)\n df_max.index.name = 'label'\n df_max.columns = ['xmax','max'] \n df_max = df_max.loc[countries]\n df_max['colors'] = Category18[:len(df_max)]\n return df_max\n\ndf_cf = get_data_cf(baseline_cf,country_cf)\nds_cf = ColumnDataSource(df_cf)\ndf_cf_max = build_max(df_cf)\nds_cf_max = ColumnDataSource(df_cf_max)\n\ncolors_cf = itertools.cycle(Category18)\ncolors_cf_max = itertools.cycle(Category18)\n\np_cf = figure(title=\"Patent protection counterfactual\", \n width = 1200,\n height = 850,\n x_axis_label='Change in delta',\n y_axis_label='Normalized Consumption equivalent welfare / Growth rate',\n x_axis_type=\"log\",\n tools = TOOLS) \n\nfor col in df_cf.columns:\n if col not in [0,'delt','growth']:\n p_cf.line(x='delt', y=col, source = ds_cf, color=next(colors_cf),line_width = 2, legend_label=col)\n\np_cf.circle(x = 'xmax', y = 'max', source = ds_cf_max, size=4, color='colors')\n \np_cf.legend.click_policy=\"hide\"\np_cf.legend.label_text_font_size = '8pt'\np_cf.add_layout(p_cf.legend[0], 'right')\n\ndef update_baseline_cf(attrname, old, new):\n country_cf = country_cf_select.value\n ds_cf.data = get_data_cf(new,country_cf)\n df_cf = get_data_cf(new,country_cf)\n ds_cf.data = df_cf\n ds_cf_max.data = build_max(df_cf)\n \ndef update_country_cf(attrname, old, new):\n baseline_cf = baseline_cf_select.value\n df_cf = get_data_cf(baseline_cf,new)\n ds_cf.data = df_cf\n ds_cf_max.data = build_max(df_cf)\n \ncontrols_cf = row(baseline_cf_select, country_cf_select)\n\nbaseline_cf_select.on_change('value', update_baseline_cf)\ncountry_cf_select.on_change('value', update_country_cf)\n\ncounterfactuals_report = column(controls_cf,p_cf)\n\n#%% counterfactuals 805 TO target\n\n# country_to_cf = 'USA'\n# to_target = 0.0155\n# baseline_to_cf = '804'\n\n# # list_of_to_targets = np.linspace(0.01,0.03,41)\n# list_of_to_targets = np.array(np.linspace(0.01,0.02,21).tolist()\n# +[0.022,0.024,0.026,0.028,0.03])\n\n# def section_end(s):\n# return [int(_) for _ in s.split(\"_\")[-1].split(\".\")]\n# cf_to_list = {'804':sorted([s for s in os.listdir(cf_path) \n# if s[9:].startswith('804') and s.startswith('baseline')], key=section_end),\n# '805':sorted([s for s in os.listdir(cf_path) \n# if s[9:].startswith('805') and s.startswith('baseline')], key=section_end)}\n\n# def get_data_to_cf(to_target,country,baseline_to_cf):\n# idx_to_cf = np.argmin(np.abs(list_of_to_targets-to_target))\n# df_to_cf = pd.read_csv(cf_path+cf_to_list[baseline_to_cf][min(idx_to_cf,len(cf_to_list[baseline_to_cf])-1)]+'/'+country+'.csv')\n# if country == 'Harmonizing':\n# df_to_cf['Growth rate'] = df_to_cf['growth']/df_to_cf.loc[np.argmin(np.abs(df_to_cf.delt))].growth\n# elif country == 'Uniform_delta':\n# df_to_cf['Growth rate'] = np.nan\n# else:\n# df_to_cf['Growth rate'] = df_to_cf['growth']/df_to_cf.loc[np.argmin(np.abs(df_to_cf.delt-1))].growth\n# df_to_cf.set_index('delt',inplace=True)\n# return df_to_cf\n\n# def build_max(df_to_cf):\n# df_max = pd.concat([df_to_cf.idxmax(),df_to_cf.max()],axis=1)\n# df_max.index.name = 'label'\n# df_max.columns = ['xmax','max'] \n# df_max = df_max.loc[countries]\n# df_max['colors'] = Category18[:len(df_max)]\n# return df_max\n\n# baseline_to_cf_select = Select(value=baseline_to_cf, title='Baseline', options=['804','805'])\n# country_to_cf_select = Select(value=country_to_cf, \n# title='Country', \n# options=countries+['World','Harmonizing','Uniform_delta'])\n\n# df_to_cf = get_data_to_cf(to_target,country_to_cf,baseline_to_cf)\n# ds_to_cf = ColumnDataSource(df_to_cf)\n# df_to_cf_max = build_max(df_to_cf)\n# ds_to_cf_max = ColumnDataSource(df_to_cf_max)\n\n# colors_to_cf = itertools.cycle(Category18)\n# colors_to_cf_max = itertools.cycle(Category18)\n\n# p_to_cf = figure(title=\"Patent protection counterfactual as function of TO target, baselines 804(2005) and 805 (2015)\", \n# width = 1200,\n# height = 850,\n# x_axis_label='Change in delta',\n# y_axis_label='Normalized Consumption equivalent welfare / Growth rate',\n# x_axis_type=\"log\",\n# tools = TOOLS) \n\n# for col in df_to_cf.columns:\n# if col not in [0,'delt','growth']:\n# p_to_cf.line(x='delt', y=col, source = ds_to_cf, color=next(colors_to_cf),line_width = 2, legend_label=col)\n\n# p_to_cf.circle(x = 'xmax', y = 'max', source = ds_to_cf_max, size=4, color='colors')\n \n# p_to_cf.legend.click_policy=\"hide\"\n# p_to_cf.legend.label_text_font_size = '8pt'\n# p_to_cf.add_layout(p_to_cf.legend[0], 'right')\n\n# def update_target_to_cf(attrname, old, new):\n# country_to_cf = country_to_cf_select.value\n# baseline_to_cf = baseline_to_cf_select.value\n# df_to_cf = get_data_to_cf(new/100,country_to_cf,baseline_to_cf)\n# ds_to_cf.data = df_to_cf\n# ds_to_cf_max.data = build_max(df_to_cf)\n \n# def update_baseline_to_cf(attrname, old, new):\n# country_to_cf = country_to_cf_select.value\n# to_target = slider_to_cf.value/100\n# df_to_cf = get_data_to_cf(to_target,country_to_cf,new)\n# ds_to_cf.data = df_to_cf\n# ds_to_cf_max.data = build_max(df_to_cf)\n \n# def update_country_to_cf(attrname, old, new):\n# to_target = slider_to_cf.value/100\n# baseline_to_cf = baseline_to_cf_select.value\n# df_to_cf = get_data_to_cf(to_target,new,baseline_to_cf)\n# ds_to_cf.data = df_to_cf\n# ds_to_cf_max.data = build_max(df_to_cf)\n \n# slider_to_cf = Slider(start=1, end=3, value=1.55, step=0.05, title=\"Turnover target in %\") \n \n# controls_to_cf = row(baseline_to_cf_select, slider_to_cf, country_to_cf_select)\n# country_to_cf_select.on_change('value', update_country_to_cf)\n# slider_to_cf.on_change('value', update_target_to_cf)\n# baseline_to_cf_select.on_change('value', update_baseline_to_cf)\n\n# counterfactuals_to_report = column(controls_to_cf,p_to_cf)\n\n#%% dynamic counterfactuals\n\nbaseline_dyn_cf = '1020'\ncountry_dyn_cf = 'USA'\n\nbaseline_dyn_cf_select = Select(value=baseline_dyn_cf, title='Baseline', options=['1020'])\ncountry_dyn_cf_select = Select(value=country_dyn_cf, \n title='Country', \n options=countries+['World','Harmonizing','Upper_harmonizing',\n 'Uniform_delta','Upper_uniform_delta'])\n\ndef get_data_dyn_cf(baseline,country):\n df_dyn_cf = pd.read_csv(cf_path+'baseline_'+baseline+'/dyn_'+country+'.csv')\n df_dyn_cf.set_index('delt',inplace=True)\n return df_dyn_cf\n\ndef build_max(df_dyn_cf):\n df_max = pd.concat([df_dyn_cf.idxmax(),df_dyn_cf.max()],axis=1)\n df_max.index.name = 'label'\n df_max.columns = ['xmax','max'] \n df_max = df_max.loc[countries]\n df_max['colors'] = Category18[:len(df_max)]\n return df_max\n\ndf_dyn_cf = get_data_dyn_cf(baseline_dyn_cf,country_dyn_cf)\nds_dyn_cf = ColumnDataSource(df_dyn_cf)\ndf_dyn_cf_max = build_max(df_dyn_cf)\nds_dyn_cf_max = ColumnDataSource(df_dyn_cf_max)\n\ncolors_dyn_cf = itertools.cycle(Category18)\ncolors_dyn_cf_max = itertools.cycle(Category18)\n\np_dyn_cf = figure(title=\"With transitional dynamics patent protection counterfactual\", \n width = 1200,\n height = 850,\n x_axis_label='Change in delta',\n y_axis_label='Normalized Consumption equivalent welfare / Growth rate',\n x_axis_type=\"log\",\n tools = TOOLS) \n\nfor col in df_dyn_cf.columns:\n if col not in [0,'delt']:\n p_dyn_cf.line(x='delt', y=col, source = ds_dyn_cf, \n color=next(colors_dyn_cf),line_width = 2, legend_label=col)\n\np_dyn_cf.circle(x = 'xmax', y = 'max', source = ds_dyn_cf_max, size=4, color='colors')\n \np_dyn_cf.legend.click_policy=\"hide\"\np_dyn_cf.legend.label_text_font_size = '8pt'\np_dyn_cf.add_layout(p_dyn_cf.legend[0], 'right')\n\ndef update_baseline_dyn_cf(attrname, old, new):\n country_dyn_cf = country_dyn_cf_select.value\n ds_dyn_cf.data = get_data_dyn_cf(new,country_dyn_cf)\n df_dyn_cf = get_data_dyn_cf(new,country_dyn_cf)\n ds_dyn_cf.data = df_dyn_cf\n ds_dyn_cf_max.data = build_max(df_dyn_cf)\n \ndef update_country_dyn_cf(attrname, old, new):\n baseline_dyn_cf = baseline_dyn_cf_select.value\n df_dyn_cf = get_data_dyn_cf(baseline_dyn_cf,new)\n ds_dyn_cf.data = df_dyn_cf\n ds_dyn_cf_max.data = build_max(df_dyn_cf)\n \ncontrols_dyn_cf = row(baseline_dyn_cf_select, country_dyn_cf_select)\n\nbaseline_dyn_cf_select.on_change('value', update_baseline_dyn_cf)\ncountry_dyn_cf_select.on_change('value', update_country_dyn_cf)\n\ncounterfactuals_dyn_report = column(controls_dyn_cf,p_dyn_cf)\n\n#%% counterfactuals 405 TO target with dynamics\n\n# country_to_cf_dyn = 'USA'\n# to_target_dyn = 0.016\n\n# list_of_to_targets_dyn = np.linspace(0.01,0.03,41)\n\n# def section_end(s):\n# return [int(_) for _ in s.split(\"_\")[-1].split(\".\")]\n# cf_to_list = sorted([s for s in os.listdir(cf_path) \n# if s[9:].startswith('405') and s.startswith('baseline')], key=section_end)\n\n# def get_data_to_cf_dyn(to_target_dyn,country):\n# idx_to_cf_dyn = np.argmin(np.abs(list_of_to_targets_dyn-to_target_dyn))\n# df_to_cf_dyn = pd.read_csv(cf_path+cf_to_list[min(idx_to_cf_dyn,len(cf_to_list)-1)]+'/dyn_'+country+'.csv')\n# df_to_cf_dyn.set_index('delt',inplace=True)\n# if country not in ['World','Harmonizing']:\n# df_to_cf_dyn['static_for_main_country'] = pd.read_csv(\n# cf_path+cf_to_list[min(idx_to_cf_dyn,len(cf_to_list)-1)]+'/'+country+'.csv'\n# )[country].values\n# else:\n# df_to_cf_dyn['static_for_main_country'] = np.nan\n# return df_to_cf_dyn\n\n# def build_max(df_to_cf):\n# df_max = pd.concat([df_to_cf.idxmax(),df_to_cf.max()],axis=1)\n# df_max.index.name = 'label'\n# df_max.columns = ['xmax','max'] \n# df_max = df_max.loc[countries]\n# df_max['colors'] = Category18[:len(df_max)]\n# return df_max\n\n# country_to_cf_dyn_select = Select(value=country_to_cf_dyn, \n# title='Country', \n# options=countries+['World','Harmonizing'])\n\n# df_to_cf_dyn = get_data_to_cf_dyn(to_target_dyn,country_to_cf_dyn)\n# ds_to_cf_dyn = ColumnDataSource(df_to_cf_dyn)\n# df_to_cf_dyn_max = build_max(df_to_cf_dyn)\n# ds_to_cf_dyn_max = ColumnDataSource(df_to_cf_dyn_max)\n\n# colors_to_cf_dyn = itertools.cycle(Category18)\n# colors_to_cf_dyn_max = itertools.cycle(Category18)\n\n# p_to_cf_dyn = figure(title=\"With transitional dynamics patent protection counterfactual as function of TO target, baseline 405\", \n# width = 1200,\n# height = 850,\n# x_axis_label='Change in delta',\n# y_axis_label='Normalized Consumption equivalent welfare',\n# x_axis_type=\"log\",\n# tools = TOOLS) \n\n# for col in df_to_cf_dyn.columns:\n# if col not in [0,'delt','static_for_main_country']:\n# p_to_cf_dyn.line(x='delt', y=col, source = ds_to_cf_dyn, \n# color=next(colors_to_cf_dyn),line_width = 2, legend_label=col)\n# if col == 'static_for_main_country':\n# p_to_cf_dyn.line(x='delt', y=col, source = ds_to_cf_dyn, \n# color='grey',line_width = 2, legend_label=col, \n# line_dash = 'dashed')\n\n# p_to_cf_dyn.circle(x = 'xmax', y = 'max', source = ds_to_cf_dyn_max, size=4, color='colors')\n\n# p_to_cf_dyn.legend.click_policy=\"hide\"\n# p_to_cf_dyn.legend.label_text_font_size = '8pt'\n# p_to_cf_dyn.add_layout(p_to_cf_dyn.legend[0], 'right')\n\n# def update_baseline_to_cf_dyn(attrname, old, new):\n# country_to_cf_dyn = country_to_cf_dyn_select.value\n# df_to_cf_dyn = get_data_to_cf_dyn(new/100,country_to_cf_dyn)\n# ds_to_cf_dyn.data = df_to_cf_dyn\n# ds_to_cf_dyn_max.data = build_max(df_to_cf_dyn)\n \n# def update_country_to_cf_dyn(attrname, old, new):\n# to_target_dyn = slider_to_cf_dyn.value/100\n# df_to_cf_dyn = get_data_to_cf_dyn(to_target_dyn,new)\n# ds_to_cf_dyn.data = df_to_cf_dyn\n# ds_to_cf_dyn_max.data = build_max(df_to_cf_dyn)\n \n# slider_to_cf_dyn = Slider(start=1, end=3, value=1.85, step=0.05, title=\"Turnover target in %\") \n \n# controls_to_cf_dyn = row(slider_to_cf_dyn, country_to_cf_dyn_select)\n# country_to_cf_dyn_select.on_change('value', update_country_to_cf_dyn)\n# slider_to_cf_dyn.on_change('value', update_baseline_to_cf_dyn)\n\n# counterfactuals_to_dyn_report = column(controls_to_cf_dyn,p_to_cf_dyn)\n\n#!!! third panel\n# third_panel = row(counterfactuals_dyn_report, counterfactuals_to_dyn_report, dyn_report)\nthird_panel = row(counterfactuals_dyn_report,counterfactuals_report)\n\n#%% Dynamic Nash / coop equilibrium and deviations from it\n\nbaseline_dyn_nash_coop = '1020'\nvariation_dyn_nash_coop = 'baseline'\nequilibrium_type ='Nash'\n\nbaseline_dyn_nash_coop_select = Select(value=baseline_dyn_nash_coop, title='Baseline', options=[\n # '607','501'\n '1020'\n ])\ndic_of_possible_variations_dyn_nash_coop = {\n # '1003':['baseline','0.4'],\n '1020':['baseline'],\n # '607':['baseline'],\n # '501':['1.0','2.0']\n }\nvariation_dyn_nash_coop_select = Select(value=variation_dyn_nash_coop, \n title='Variation', \n options=dic_of_possible_variations_dyn_nash_coop[baseline_dyn_nash_coop])\nequilibrium_type_select = Select(value=equilibrium_type, title='Equilibrium', options=['Nash','Coop eq','Coop negishi'])\n\ndef get_dyn_eq_deltas_welfares(baseline_dyn_nash_coop,variation_dyn_nash_coop,equilibrium_type):\n if equilibrium_type == 'Nash':\n deltas = pd.read_csv(nash_eq_path+'dyn_deltas.csv'\n ,index_col=0\n ,dtype={'baseline':str,'variation':str}).drop_duplicates(['baseline','variation','method'],keep='last')\n eq_deltas = deltas.loc[\n (deltas.baseline == baseline_dyn_nash_coop)\n & (deltas.variation == variation_dyn_nash_coop)\n ][countries].values.squeeze()\n welfares = pd.read_csv(nash_eq_path+'dyn_cons_eq_welfares.csv'\n ,index_col=0\n ,dtype={'baseline':str,'variation':str}).drop_duplicates(['baseline','variation','method'],keep='last')\n eq_welfares = welfares.loc[\n (welfares.baseline == baseline_dyn_nash_coop)\n & (welfares.variation == variation_dyn_nash_coop)\n ][countries].values.squeeze()\n \n if equilibrium_type == 'Coop eq':\n deltas = pd.read_csv(coop_eq_path+'dyn_deltas.csv'\n ,index_col=0\n ,dtype={'baseline':str,'variation':str}).drop_duplicates(['baseline','variation','aggregation_method'],keep='last')\n eq_deltas = deltas.loc[\n (deltas.baseline == baseline_dyn_nash_coop)\n & (deltas.variation == variation_dyn_nash_coop)\n & (deltas.aggregation_method == 'pop_weighted')\n ][countries].values.squeeze()\n welfares = pd.read_csv(coop_eq_path+'dyn_cons_eq_welfares.csv'\n ,index_col=0\n ,dtype={'baseline':str,'variation':str}).drop_duplicates(['baseline','variation','aggregation_method'],keep='last')\n eq_welfares = welfares.loc[\n (welfares.baseline == baseline_dyn_nash_coop)\n & (welfares.variation == variation_dyn_nash_coop)\n & (welfares.aggregation_method == 'pop_weighted')\n ][countries].values.squeeze()\n \n if equilibrium_type == 'Coop negishi':\n deltas = pd.read_csv(coop_eq_path+'dyn_deltas.csv'\n ,index_col=0\n ,dtype={'baseline':str,'variation':str}).drop_duplicates(['baseline','variation','aggregation_method'],keep='last')\n eq_deltas = deltas.loc[\n (deltas.baseline == baseline_dyn_nash_coop)\n & (deltas.variation == variation_dyn_nash_coop)\n & (deltas.aggregation_method == 'negishi')\n ][countries].values.squeeze()\n welfares = pd.read_csv(coop_eq_path+'dyn_cons_eq_welfares.csv'\n ,index_col=0\n ,dtype={'baseline':str,'variation':str}).drop_duplicates(['baseline','variation','aggregation_method'],keep='last')\n eq_welfares = welfares.loc[\n (welfares.baseline == baseline_dyn_nash_coop)\n & (welfares.variation == variation_dyn_nash_coop)\n & (welfares.aggregation_method == 'negishi')\n ][countries].values.squeeze()\n \n df = pd.DataFrame(index = pd.Index(countries,name='country'))\n df['deltas'] = eq_deltas\n df['welfares'] = eq_welfares\n df['colors'] = Category18[:len(df)]\n \n return df\n\ndef get_dyn_deviation_recap(baseline_dyn_nash_coop,variation_dyn_nash_coop,equilibrium_type):\n if variation_dyn_nash_coop == 'baseline':\n temp_run = baseline_dyn_nash_coop\n else:\n temp_run = baseline_dyn_nash_coop+'_'+variation_dyn_nash_coop\n \n if equilibrium_type == 'Nash':\n dyn_deviation_recap = pd.read_csv(around_dyn_eq_path+f'around_dyn_nash_eq/baseline_{temp_run}/all_countries.csv')\n \n if equilibrium_type == 'Coop eq':\n dyn_deviation_recap = pd.read_csv(around_dyn_eq_path+f'around_dyn_coop_equal_eq/baseline_{temp_run}/all_countries.csv')\n \n if equilibrium_type == 'Coop negishi':\n dyn_deviation_recap = pd.read_csv(around_dyn_eq_path+f'around_dyn_coop_negishi_eq/baseline_{temp_run}/all_countries.csv')\n\n return dyn_deviation_recap\n \nds_dyn_eq = ColumnDataSource(get_dyn_eq_deltas_welfares(baseline_dyn_nash_coop,variation_dyn_nash_coop,equilibrium_type))\n# ds_dyn_eq_dev = ColumnDataSource(get_dyn_deviation_recap(baseline_dyn_nash_coop,variation_dyn_nash_coop,equilibrium_type))\n\ncolors_dyn_eq_dev = itertools.cycle(Category18)\n\np_dyn_eq_dev = figure(title=\"With transitional dynamics Equilibria and unilateral deviations from it\", \n width = 1200,\n height = 850,\n x_axis_label='Delta',\n y_axis_label='Normalized Consumption equivalent welfare change',\n x_axis_type=\"log\",\n tools = TOOLS) \n\n# for country_eq_dev in countries:\n# color = next(colors_dyn_eq_dev)\n# p_dyn_eq_dev.line(x=country_eq_dev+'_delta', \n# y=country_eq_dev+'_welfare', \n# source = ds_dyn_eq_dev, \n# color=color,\n# line_width = 2, \n# legend_label=country_eq_dev+'_welfare')\n# p_dyn_eq_dev.line(x=country_eq_dev+'_delta', \n# y=country_eq_dev+'_world_negishi', \n# source = ds_dyn_eq_dev, \n# color=color,\n# line_width = 2, \n# line_dash='dashed',\n# legend_label=country_eq_dev+'_world_negishi')\n# p_dyn_eq_dev.line(x=country_eq_dev+'_delta', \n# y=country_eq_dev+'_world_equal', \n# source = ds_dyn_eq_dev, \n# color=color,\n# line_width = 2, \n# line_dash='dotted',\n# legend_label=country_eq_dev+'_world_equal')\n\np_dyn_eq_dev.circle(x = 'deltas', y = 'welfares', source = ds_dyn_eq, size=4,color = 'colors')\n\n# p_dyn_eq_dev.legend.click_policy=\"hide\"\n# p_dyn_eq_dev.legend.label_text_font_size = '8pt'\n# p_dyn_eq_dev.add_layout(p_dyn_eq_dev.legend[0], 'right')\n\nhover_tool_eq = HoverTool()\nhover_tool_eq.tooltips = [\n (\"delta\", \"$x\"),\n (\"welfare\", \"$y\")\n ] \np_dyn_eq_dev.add_tools(hover_tool_eq)\n\n\nlabels_dyn_eq_dev = LabelSet(x='deltas', y='welfares', text='country',\n x_offset=2, y_offset=2, source=ds_dyn_eq, text_font_size=\"7pt\")\n\np_dyn_eq_dev.add_layout(labels_dyn_eq_dev)\n\n\ndef update_baseline_dyn_nash(attrname, old, new):\n variation_dyn_nash_coop_select.value = dic_of_possible_variations_dyn_nash_coop[new][0]\n variation_dyn_nash_coop_select.options = dic_of_possible_variations_dyn_nash_coop[new]\n ds_dyn_eq.data = get_dyn_eq_deltas_welfares(new,\n variation_dyn_nash_coop_select.value,\n equilibrium_type_select.value)\n # ds_dyn_eq_dev.data = get_dyn_deviation_recap(new,\n # variation_dyn_nash_coop_select.value,\n # equilibrium_type_select.value)\n \ndef update_variation_dyn_nash_coop(attrname, old, new):\n ds_dyn_eq.data = get_dyn_eq_deltas_welfares(baseline_dyn_nash_coop_select.value,\n new,\n equilibrium_type_select.value)\n # ds_dyn_eq_dev.data = get_dyn_deviation_recap(baseline_dyn_nash_coop_select.value,\n # new,\n # equilibrium_type_select.value)\n \ndef update_equilibrium_type(attrname, old, new):\n ds_dyn_eq.data = get_dyn_eq_deltas_welfares(baseline_dyn_nash_coop_select.value,\n variation_dyn_nash_coop_select.value,\n new)\n # ds_dyn_eq_dev.data = get_dyn_deviation_recap(baseline_dyn_nash_coop_select.value,\n # variation_dyn_nash_coop_select.value,\n # new)\n\ncontrols_dyn_eq_dev = row(baseline_dyn_nash_coop_select, variation_dyn_nash_coop_select, equilibrium_type_select)\n\nbaseline_dyn_nash_coop_select.on_change('value', update_baseline_dyn_nash)\nvariation_dyn_nash_coop_select.on_change('value', update_variation_dyn_nash_coop)\nequilibrium_type_select.on_change('value', update_equilibrium_type)\n\ndyn_eq_dev_report = column(controls_dyn_eq_dev,p_dyn_eq_dev)\n\n\n#%% Nash / coop equilibrium\ndef section_ser(s):\n return pd.Series([[int(_) for _ in s_e.split(\".\")] for s_e in s])\n\nbaseline_nash_coop = '1020'\n\ndic_change_labels_for_405 = {'405, '+k:comments_dic['403'][k] for k in comments_dic['405']}\n\ndef get_data_nash_coop(baseline_nash_number):\n\n welf_coop = pd.read_csv(coop_eq_path+'cons_eq_welfares.csv',index_col=0).drop_duplicates(['baseline', \n 'variation','aggregation_method'],keep='last').sort_values(['baseline','variation'])\n welf_nash = pd.read_csv(nash_eq_path+'cons_eq_welfares.csv',index_col=0).drop_duplicates(['baseline', \n 'variation'],keep='last').sort_values(['baseline','variation'])\n \n welf_coop['run'] = welf_coop['baseline'].astype('str')+', '+welf_coop['variation']\n welf_nash['run'] = welf_nash['baseline'].astype('str')+', '+welf_nash['variation']\n\n welf_coop['run'] = welf_coop['run'].replace(dic_change_labels_for_405)\n welf_nash['run'] = welf_nash['run'].replace(dic_change_labels_for_405)\n \n welf_coop['sorting'] = welf_coop['variation'].str.replace('baseline','0')#.astype(float)\n welf_nash['sorting'] = welf_nash['variation'].str.replace('baseline','0')#.astype(float)\n \n welf_coop = welf_coop.sort_values('sorting',key=section_ser)#.sort_values('baseline')\n welf_nash = welf_nash.sort_values('sorting',key=section_ser)#.sort_values('baseline')\n \n welf_coop = welf_coop[welf_coop['baseline'].isin([int(baseline_nash_number)])]\n welf_nash = welf_nash[welf_nash['baseline'].isin([int(baseline_nash_number)])]\n \n welf_negishi = welf_coop[welf_coop['aggregation_method'] == 'negishi']\n welf_pop_weighted = welf_coop[welf_coop['aggregation_method'] == 'pop_weighted']\n \n return welf_pop_weighted, welf_negishi, welf_nash\n\nbaseline_nash_coop_select = Select(value=baseline_nash_coop, title='Baseline', \n # options=['404','405','501','601'])\n # options=['501','607','618','619'])\n # options=['802','803','804','805','806'])\n options=['1020'])\n\nwelf_pop_weighted, welf_negishi, welf_nash = get_data_nash_coop(baseline_nash_coop)\n \nds_pop_weighted = ColumnDataSource(welf_pop_weighted)\nds_negishi = ColumnDataSource(welf_negishi)\nds_nash = ColumnDataSource(welf_nash)\n\ncolors_pop_weighted = itertools.cycle(Category18)\ncolors_negishi = itertools.cycle(Category18)\ncolors_nash = itertools.cycle(Category18)\n\nx_range_nash = welf_nash['run'].to_list()\n\np_eq = figure(title=\"Static cooperative and Nash equilibrium\", \n width = 1200,\n height = 900,\n x_range = x_range_nash,\n # x_axis_label='Run',\n y_axis_label='Consumption eqivalent welfare change',\n tools = TOOLS\n ) \np_eq.xaxis.major_label_orientation = 3.14/3\n\nlines_nash = {}\nfor col in p_baseline.countries+['Equal']+['Negishi']:\n lines_nash[col+' Nash'] = p_eq.line(x='run', y=col, source = ds_nash, color=next(colors_nash),line_width = 2, legend_label=col+' Nash')\n lines_nash[col+' coop equal'] = p_eq.line(x='run', y=col, source = ds_pop_weighted, color=next(colors_pop_weighted), line_dash='dashed', line_width = 2, legend_label=col+' coop equal')\n lines_nash[col+' coop negishi'] = p_eq.line(x='run', y=col, source = ds_negishi, color=next(colors_negishi), line_dash='dotted', line_width = 2, legend_label=col+' coop negishi')\n if col != 'Negishi' and col != 'Equal':\n lines_nash[col+' Nash'].visible = False\n lines_nash[col+' coop equal'].visible = False\n lines_nash[col+' coop negishi'].visible = False\n \n \np_eq.legend.click_policy=\"hide\"\np_eq.legend.label_text_font_size = '8pt'\np_eq.legend.spacing = 0\np_eq.add_layout(p_eq.legend[0], 'right') \n\nhover_tool_eq = HoverTool()\nhover_tool_eq.tooltips = [\n (\"run\", \"@run\"),\n (\"value\", \"$y\")\n ] \np_eq.add_tools(hover_tool_eq)\n\ncolumns = [\n TableColumn(field=\"runs\", title=\"Runs\"),\n TableColumn(field=\"comments\", title=\"Description\"),\n ]\n\nexplication = Div(text=\"In the legend, first is the quantity displayed and last\\\n is the quantity maximized
'Negishi coop equal' means that:
\\\n - we display the Change in cons equivalent of world welfare
according to Negishi weights aggregation
\\\n - we maximize according to the Change in cons equivalent of world welfare
according to equal weights aggregation\\\n \")\n\ndata_table_welfares = pd.concat([welf_nash.set_index('run'),\n welf_negishi.set_index('run'),\n welf_pop_weighted.set_index('run')],\n axis=0,\n keys=['Nash','Coop Negishi','Coop equal'],\n names=['type','run'],\n sort=False\n ).reset_index().sort_values('sorting',key=section_ser)[['run','type']+p_baseline.countries+['Equal']+['Negishi']]\n\nsource_table_welfares = ColumnDataSource(data_table_welfares)\ncolumns_welf = [TableColumn(field=col) for col in ['run','type']+p_baseline.countries+['Equal']+['Negishi']]\n\ntable_widget_welfares = DataTable(source=source_table_welfares, columns=columns_welf, width=1100, height=400,\n )\n\ndef get_delta_nash_coop(baseline_number):\n deltas_coop = pd.read_csv(coop_eq_path+'deltas.csv',index_col=0).drop_duplicates(['baseline', \n 'variation','aggregation_method'],keep='last').sort_values(['baseline','variation'])\n deltas_nash = pd.read_csv(nash_eq_path+'deltas.csv',index_col=0).drop_duplicates(['baseline', \n 'variation'],keep='last').sort_values(['baseline','variation'])\n \n deltas_coop['run'] = deltas_coop['baseline'].astype('str')+', '+deltas_coop['variation']\n deltas_nash['run'] = deltas_nash['baseline'].astype('str')+', '+deltas_nash['variation']\n \n deltas_coop['run'] = deltas_coop['run'].replace(dic_change_labels_for_405)\n deltas_nash['run'] = deltas_nash['run'].replace(dic_change_labels_for_405)\n \n deltas_coop['sorting'] = deltas_coop['variation'].str.replace('baseline','0')#.astype(float)\n deltas_nash['sorting'] = deltas_nash['variation'].str.replace('baseline','0')#.astype(float)\n \n deltas_coop = deltas_coop.sort_values('sorting',key=section_ser)#.sort_values('baseline')\n deltas_nash = deltas_nash.sort_values('sorting',key=section_ser)#.sort_values('baseline')\n \n deltas_coop = deltas_coop[deltas_coop['baseline'].isin([int(baseline_number)])]\n deltas_nash = deltas_nash[deltas_nash['baseline'].isin([int(baseline_number)])]\n \n deltas_negishi = deltas_coop[deltas_coop['aggregation_method'] == 'negishi']\n deltas_pop_weighted = deltas_coop[deltas_coop['aggregation_method'] == 'pop_weighted']\n \n return deltas_pop_weighted, deltas_negishi, deltas_nash\n\ndeltas_pop_weighted, deltas_negishi, deltas_nash = get_delta_nash_coop(baseline_nash_coop)\n\nds_deltas_negishi = ColumnDataSource(deltas_negishi)\nds_deltas_pop_weighted = ColumnDataSource(deltas_pop_weighted)\nds_deltas_nash = ColumnDataSource(deltas_nash)\n\ncolors_deltas_negishi = itertools.cycle(Category18)\ncolors_deltas_pop_weighted = itertools.cycle(Category18)\ncolors_deltas_nash = itertools.cycle(Category18)\n\np_deltas_eq = figure(title=\"Static cooperative and Nash equilibrium\", \n width = 1200,\n height = 900,\n x_range = x_range_nash,\n y_axis_type=\"log\",\n y_axis_label='Delta',\n tools = TOOLS\n ) \np_deltas_eq.xaxis.major_label_orientation = 3.14/3\n\nlines_delta={}\nfor col in p_baseline.countries:\n lines_delta[col+' Nash'] = p_deltas_eq.line(x='run', y=col, \n source = ds_deltas_nash, color=next(colors_deltas_nash),\n line_width = 2, legend_label=col+' Nash')\n lines_delta[col+' coop equal'] = p_deltas_eq.line(x='run', y=col, \n source = ds_deltas_pop_weighted, color=next(colors_deltas_pop_weighted), line_dash='dashed', \n line_width = 2, legend_label=col+' coop equal')\n lines_delta[col+' coop negishi'] = p_deltas_eq.line(x='run', y=col, \n source = ds_deltas_negishi, color=next(colors_deltas_negishi), line_dash='dotted', \n line_width = 2, legend_label=col+' coop negishi')\n lines_delta[col+' coop equal'].visible = False\n lines_delta[col+' coop negishi'].visible = False\n \np_deltas_eq.legend.click_policy=\"hide\"\np_deltas_eq.legend.label_text_font_size = '8pt'\np_deltas_eq.legend.spacing = 0\np_deltas_eq.add_layout(p_deltas_eq.legend[0], 'right') \nhover_tool_deltas_eq = HoverTool()\nhover_tool_deltas_eq.tooltips = [\n (\"run\", \"@run\"),\n (\"value\", \"$y\")\n ] \np_deltas_eq.add_tools(hover_tool_deltas_eq)\n\ndata_table_deltas = pd.concat([deltas_nash.set_index('run'),\n deltas_negishi.set_index('run'),\n deltas_pop_weighted.set_index('run')],\n axis=0,\n keys=['Nash','Coop Negishi','Coop equal'],\n names=['type','run'],\n sort=False\n ).reset_index().sort_values('sorting',key=section_ser)[['run','type']+p_baseline.countries]\n\nsource_table_deltas = ColumnDataSource(data_table_deltas)\ncolumns_deltas = [TableColumn(field=col) for col in ['run','type']+p_baseline.countries+['Equal']+['Negishi']]\n\ntable_widget_deltas = DataTable(source=source_table_deltas, columns=columns_deltas, width=1100, height=400,\n )\n\ndef update_baseline_nash(attrname, old, new):\n baseline_nash_number = new\n welf_pop_weighted, welf_negishi, welf_nash = get_data_nash_coop(baseline_nash_number)\n \n ds_pop_weighted.data = welf_pop_weighted\n ds_negishi.data = welf_negishi\n ds_nash.data = welf_nash\n \n deltas_pop_weighted, deltas_negishi, deltas_nash = get_delta_nash_coop(baseline_nash_number)\n\n ds_deltas_negishi.data = deltas_negishi\n ds_deltas_pop_weighted.data = deltas_pop_weighted\n ds_deltas_nash.data = deltas_nash\n \n p_eq.x_range.factors = welf_nash['run'].to_list()\n p_deltas_eq.x_range.factors = welf_nash['run'].to_list()\n\nbaseline_nash_coop_select.on_change('value', update_baseline_nash)\n\nnash_coop_welfare_report = column(baseline_nash_coop_select,p_eq,table_widget_welfares)\nnash_coop_deltas_report = column(p_deltas_eq,table_widget_deltas)\n\n#!!! fourth panel\nfourth_panel = row(dyn_eq_dev_report, nash_coop_welfare_report, nash_coop_deltas_report)\n# fourth_panel = row(nash_coop_welfare_report, nash_coop_deltas_report)\n\n#%% dynamic solver\n\nbaseline_dyn = '1020'\ncountry_dyn = 'USA'\nsector_dyn = 'Patent'\n\nbaseline_dyn_select = Select(value=baseline_dyn, title='Baseline', \n # options=['501','604','607','608','609','610']\n options=['1020']\n )\n\nbaseline_dyn_path = results_path+'baseline_'+baseline_dyn+'_variations/'\nfiles_in_dir = next(os.walk(baseline_dyn_path))[1]\nrun_list = [f for f in files_in_dir if f[0].isnumeric()]\nrun_list = sorted(run_list, key=section)\nvariation_dyn_select = Select(value='baseline', title='Variation', \n options=['baseline']+run_list)\n\ndef update_list_of_runs_dyn(attr, old, new):\n baseline_dyn_path = results_path+'baseline_'+new+'_variations/'\n files_in_dir = next(os.walk(baseline_dyn_path))[1]\n run_list = [f for f in files_in_dir if f[0].isnumeric()]\n run_list = sorted(run_list, key=section)\n variation_dyn_select.options = ['baseline']+run_list\n\ncountry_dyn_select = Select(value='USA', title='Country delta to change', options=['USA', 'EUR', 'JAP', 'CHN', 'BRA', \n 'IND','CAN','KOR','RUS','AUS',\n 'MEX', 'ROW','World'])\nslider_dyn = Slider(start=-1, end=0.5, value=0, step=0.01, title=\"Log change of delta\") \n\nstate_computation = Div(text=\"Done\")\n\ndef make_time_evolution_df(dyn_sol):\n qties = ['w','l_R','l_Ae','l_Ao','price_indices','Z','g','r','profit']\n df = pd.DataFrame(index = pd.Index(qties,name='Quantity'), \n columns = ['Initial jump mean','Initial jump median',\n 'Typical time of evolution\\nmean','Typical time of evolution\\nmedian'])\n for qty in qties:\n a = dyn_sol.get_jump(qty)\n df.loc[qty,'Initial jump mean'] = a[0].round(2)\n df.loc[qty,'Initial jump median'] = a[1].round(2)\n b = dyn_sol.get_typical_time_evolution(qty)\n df.loc[qty,'Typical time of evolution\\nmean'] = b[0].round(2)\n df.loc[qty,'Typical time of evolution\\nmedian'] = b[1].round(2)\n return df\n\ndef fit_and_eval(vec,dyn_sol):\n fit = np.polyval(np.polyfit(dyn_sol.t_real,\n vec,\n dyn_sol.Nt),np.linspace(0,dyn_sol.t_inf,2001))\n return fit\n\ndef create_column_data_source_from_dyn_sol(dyn_sol):\n data_dyn = {}\n data_dyn['time'] = np.linspace(0,dyn_sol.t_inf,2001)\n for agg_qty in ['g']:\n data_dyn[agg_qty] = fit_and_eval(getattr(dyn_sol,agg_qty),dyn_sol)\n for c_qty in ['Z','r','price_indices','w','nominal_final_consumption','ratios_of_consumption_levels_change_not_normalized',\n 'integrand_welfare','second_term_sum_welfare','integral_welfare']:\n for i,c in enumerate(dyn_sol.countries):\n data_dyn[c_qty+c] = fit_and_eval(getattr(dyn_sol,c_qty)[i,:].ravel(),dyn_sol)\n for c_s_qty in ['l_R','psi_o_star','PSI_CD','l_Ao']:\n for i,c in enumerate(dyn_sol.countries):\n if c_s_qty in ['PSI_CD']:\n data_dyn[c_s_qty+c] = fit_and_eval(\n (getattr(dyn_sol,c_s_qty)+getattr(dyn_sol,c_s_qty+'_0')[...,None])[i,1,:].ravel(),dyn_sol)\n else:\n data_dyn[c_s_qty+c] = fit_and_eval(\n getattr(dyn_sol,c_s_qty)[i,1,:].ravel(),dyn_sol)\n for c_c_s_qty in ['l_Ae','PSI_MPD','PSI_MPND','PSI_MNP','profit']:\n if c_c_s_qty in ['PSI_MPD','PSI_MPND','PSI_MNP']:\n temp_sum_n = (getattr(dyn_sol,c_c_s_qty)+getattr(dyn_sol,c_c_s_qty+'_0')[...,None]).sum(axis=0)\n temp_sum_i = (getattr(dyn_sol,c_c_s_qty)+getattr(dyn_sol,c_c_s_qty+'_0')[...,None]).sum(axis=1)\n else:\n temp_sum_n = getattr(dyn_sol,c_c_s_qty).sum(axis=0)\n temp_sum_i = getattr(dyn_sol,c_c_s_qty).sum(axis=1)\n for i,c in enumerate(dyn_sol.countries):\n data_dyn['sum_n_'+c_c_s_qty+c] = fit_and_eval(temp_sum_n[i,1,:].ravel(),dyn_sol)\n data_dyn['sum_i_'+c_c_s_qty+c] = fit_and_eval(temp_sum_i[i,1,:].ravel(),dyn_sol)\n for i,c in enumerate(dyn_sol.countries):\n data_dyn['real_final_consumption'+c] = fit_and_eval((getattr(dyn_sol,'nominal_final_consumption')[i,:]\n /getattr(dyn_sol,'price_indices')[i,:]).ravel(),dyn_sol)\n \n data_dyn_init = {}\n data_dyn_init['time'] = [0]\n for agg_qty in ['g']:\n data_dyn_init[agg_qty] = [getattr(dyn_sol.sol_init,agg_qty)]\n for c_qty in ['Z','price_indices','w','nominal_final_consumption']:\n for i,c in enumerate(dyn_sol.countries):\n data_dyn_init[c_qty+c] = [getattr(dyn_sol.sol_init,c_qty)[i]]\n for c_s_qty in ['l_R','psi_o_star','PSI_CD','l_Ao']:\n for i,c in enumerate(dyn_sol.countries):\n data_dyn_init[c_s_qty+c] = [getattr(dyn_sol.sol_init,c_s_qty)[i,1]]\n for c_c_s_qty in ['l_Ae','PSI_MPD','PSI_MPND','PSI_MNP','profit']:\n if c_c_s_qty == 'profit':\n temp_sum_n = (getattr(dyn_sol.sol_init,c_c_s_qty)*getattr(dyn_sol.sol_init,'w')[None,:,None]).sum(axis=0)[:,1]\n temp_sum_i = (getattr(dyn_sol.sol_init,c_c_s_qty)*getattr(dyn_sol.sol_init,'w')[None,:,None]).sum(axis=1)[:,1]\n else:\n temp_sum_n = getattr(dyn_sol.sol_init,c_c_s_qty).sum(axis=0)[:,1]\n temp_sum_i = getattr(dyn_sol.sol_init,c_c_s_qty).sum(axis=1)[:,1]\n for i,c in enumerate(dyn_sol.countries):\n data_dyn_init['sum_n_'+c_c_s_qty+c] = [temp_sum_n[i]]\n data_dyn_init['sum_i_'+c_c_s_qty+c] = [temp_sum_i[i]]\n for i,c in enumerate(dyn_sol.countries):\n data_dyn_init['real_final_consumption'+c] = [getattr(dyn_sol.sol_init,'nominal_final_consumption')[i]/getattr(dyn_sol.sol_init,'price_indices')[i]]\n data_dyn_init['r'+c] = [getattr(dyn_sol.sol_init,'r')]\n data_dyn_init['integrand_welfare'+c] = [None]\n data_dyn_init['integral_welfare'+c] = [None]\n data_dyn_init['second_term_sum_welfare'+c] = [None]\n data_dyn_init['ratios_of_consumption_levels_change_not_normalized'+c] = [None]\n \n data_dyn_fin = {}\n data_dyn_fin['time'] = [dyn_sol.t_inf]\n for agg_qty in ['g']:\n data_dyn_fin[agg_qty] = [getattr(dyn_sol.sol_fin,agg_qty)]\n for c_qty in ['Z','price_indices','w','nominal_final_consumption']:\n for i,c in enumerate(dyn_sol.countries):\n data_dyn_fin[c_qty+c] = [getattr(dyn_sol.sol_fin,c_qty)[i]]\n for c_s_qty in ['l_R','psi_o_star','PSI_CD','l_Ao']:\n for i,c in enumerate(dyn_sol.countries):\n data_dyn_fin[c_s_qty+c] = [getattr(dyn_sol.sol_fin,c_s_qty)[i,1]]\n for c_c_s_qty in ['l_Ae','PSI_MPD','PSI_MPND','PSI_MNP','profit']:\n if c_c_s_qty == 'profit':\n temp_sum_n = (getattr(dyn_sol.sol_fin,c_c_s_qty)*getattr(dyn_sol.sol_fin,'w')[None,:,None]).sum(axis=0)[:,1]\n temp_sum_i = (getattr(dyn_sol.sol_fin,c_c_s_qty)*getattr(dyn_sol.sol_fin,'w')[None,:,None]).sum(axis=1)[:,1]\n else:\n temp_sum_n = getattr(dyn_sol.sol_fin,c_c_s_qty).sum(axis=0)[:,1]\n temp_sum_i = getattr(dyn_sol.sol_fin,c_c_s_qty).sum(axis=1)[:,1]\n for i,c in enumerate(dyn_sol.countries):\n data_dyn_fin['sum_n_'+c_c_s_qty+c] = [temp_sum_n[i]]\n data_dyn_fin['sum_i_'+c_c_s_qty+c] = [temp_sum_i[i]]\n for i,c in enumerate(dyn_sol.countries):\n data_dyn_fin['real_final_consumption'+c] = [getattr(dyn_sol.sol_fin,'nominal_final_consumption')[i]/getattr(dyn_sol.sol_fin,'price_indices')[i]]\n data_dyn_fin['r'+c] = [getattr(dyn_sol.sol_fin,'r')]\n data_dyn_fin['integrand_welfare'+c] = [None]\n data_dyn_fin['integral_welfare'+c] = [None]\n data_dyn_fin['second_term_sum_welfare'+c] = [None]\n data_dyn_fin['ratios_of_consumption_levels_change_not_normalized'+c] = [None]\n \n return data_dyn, data_dyn_init, data_dyn_fin\n\ndef compute_dyn(event):\n if variation_dyn_select.value == 'baseline':\n path = results_path+baseline_dyn_select.value+'/'\n else:\n path = results_path+'baseline_'+baseline_dyn_select.value+'_variations/'+variation_dyn_select.value+'/'\n p_dyn, m_dyn, sol_dyn = load(path, data_path=data_path,\n dir_path=dir_path)\n p_dyn_cf = p_dyn.copy()\n if country_dyn_select.value != 'World':\n p_dyn_cf.delta[p_dyn.countries.index(country_dyn_select.value),1] = p_dyn_cf.delta[p_dyn.countries.index(country_dyn_select.value),1]*(10**slider_dyn.value)\n else:\n p_dyn_cf.delta[:,1] = p_dyn_cf.delta[:,1]*(10**slider_dyn.value)\n start = time.perf_counter()\n dyn_sol, sol_c, convergence = rough_dyn_fixed_point_solver(p_dyn_cf, sol_dyn, sol_fin = None,Nt=25,\n t_inf=500, x0=None, tol = 1e-14, max_count=1e6, safe_convergence=0.1,damping=50, damping_post_acceleration=10)\n end = time.perf_counter()\n if country_dyn_select.value == 'World':\n message = 'Done, computation for all deltas multiplied by a factor '+str(10**slider_dyn.value)+'
Convergence : '+str(convergence)+'
Computation time : '+str(end-start)\n else:\n message = 'Done, computation for delta '+country_dyn_select.value+' = '+str(p_dyn_cf.delta[p_dyn.countries.index(country_dyn_select.value),1])+'
Convergence : '+str(convergence)+'
Computation time : '+str(end-start)\n state_computation.text = message\n temp = create_column_data_source_from_dyn_sol(dyn_sol)\n ds_dyn.data = temp[0]\n ds_dyn_init.data = temp[1]\n ds_dyn_fin.data = temp[2]\n source_table_time_evol.data = make_time_evolution_df(dyn_sol)\n \nif variation_dyn_select.value == 'baseline':\n path = results_path+baseline_dyn_select.value+'/'\nelse:\n path = results_path+'baseline_'+baseline_dyn_select.value+'_variations/'+variation_dyn_select.value+'/'\np_dyn, m_dyn, sol_dyn = load(path, data_path=data_path,\n dir_path=dir_path)\np_dyn_cf = p_dyn.copy()\nif country_dyn_select.value != 'World':\n p_dyn_cf.delta[p_dyn.countries.index(country_dyn_select.value),1] = p_dyn_cf.delta[p_dyn.countries.index(country_dyn_select.value),1]*10**slider_dyn.value\nelse:\n p_dyn_cf.delta[:,1] = p_dyn_cf.delta[:,1]*slider_dyn.value\ndyn_sol, sol_c, convergence = rough_dyn_fixed_point_solver(p_dyn_cf, sol_dyn, sol_fin = None,Nt=25,\n t_inf=500, x0=None, tol = 1e-14, max_count=1e6, safe_convergence=0.1,damping=50, damping_post_acceleration=10)\n\nsource_table_time_evol = ColumnDataSource(make_time_evolution_df(dyn_sol))\ncolumns_time_evol = [TableColumn(field=col) for col in \n ['Quantity','Initial jump mean','Initial jump median',\n 'Typical time of evolution\\nmean','Typical time of evolution\\nmedian']]\ntable_widget_time_evol = DataTable(source=source_table_time_evol, columns=columns_time_evol, width=600, height=750)\n\nbutton_compute_dyn = Button(label=\"Compute\",align='end')\nbutton_compute_dyn.on_event(ButtonClick, compute_dyn)\n\nqty_dyn_display_select = Select(value='g', title='Quantity', options=['g','Z','r','price_indices','w','nominal_final_consumption',\n 'real_final_consumption','ratios_of_consumption_levels_change_not_normalized',\n 'l_R','l_Ao','psi_o_star',\n 'PSI_CD','integrand_welfare','integral_welfare','second_term_sum_welfare',\n 'sum_n_l_Ae','sum_n_PSI_MPD','sum_n_PSI_MPND','sum_n_PSI_MNP','sum_n_profit',\n 'sum_i_l_Ae','sum_i_PSI_MPD','sum_i_PSI_MPND','sum_i_PSI_MNP','sum_i_profit'])\ncountry_dyn_display_select = Select(value='USA', title='Country', options=['USA', 'EUR', 'JAP', 'CHN', 'BRA', \n 'IND','CAN','KOR','RUS','AUS',\n 'MEX', 'ROW'])\n\ntemp = create_column_data_source_from_dyn_sol(dyn_sol)\ndata_dyn_default = temp[0]\ndata_dyn_init_default = temp[1]\ndata_dyn_fin_default = temp[2]\nds_dyn = ColumnDataSource(data_dyn_default)\nds_dyn_init = ColumnDataSource(data_dyn_init_default)\nds_dyn_fin = ColumnDataSource(data_dyn_fin_default)\nup_max = max([max(ds_dyn.data['g']), max(ds_dyn_fin.data['g']), max(ds_dyn_init.data['g'])])\ndown_min = min([min(ds_dyn.data['g']), min(ds_dyn_fin.data['g']), min(ds_dyn_init.data['g'])])\ndelta = up_max-down_min\nif delta == 0:\n delta = 1\np_dyn_figure = figure(title=\"Dynamic solver\",\n width = 1200,\n height = 750,\n x_axis_label='Time',\n y_axis_label='Value',\n tools = TOOLS,\n x_range = (-20,dyn_sol.t_inf+20),\n y_range=(down_min-delta*0.1,up_max+delta*0.1)\n )\n\nhover_tool_eq = HoverTool()\nhover_tool_eq.tooltips = [\n (\"Time\", \"$x\"),\n (\"value\", \"$y\")\n ] \np_dyn_figure.add_tools(hover_tool_eq)\n\nlines_dyn = {}\nfor col in data_dyn_default.keys():\n if col != time:\n lines_dyn[col] = p_dyn_figure.line(x='time', y=col, source = ds_dyn)\n if col != 'g':\n lines_dyn[col].visible = False\n\ninit_dyn = {}\nfor col in data_dyn_init_default.keys():\n if col != time:\n init_dyn[col] = p_dyn_figure.circle(x='time', y=col, source = ds_dyn_init, color='red',size=8)\n if col != 'g':\n init_dyn[col].visible = False\n \nfin_dyn = {}\nfor col in data_dyn_fin_default.keys():\n if col != time:\n fin_dyn[col] = p_dyn_figure.circle(x='time', y=col, source = ds_dyn_fin, color='red',size=8)\n if col != 'g':\n fin_dyn[col].visible = False\n\ndef update_graph_dyn(event):\n if qty_dyn_display_select.value in ['g']:\n col = qty_dyn_display_select.value\n elif qty_dyn_display_select.value in ['Z','r','price_indices','w','nominal_final_consumption','real_final_consumption',\n 'ratios_of_consumption_levels_change_not_normalized',\n 'l_R','l_Ao','psi_o_star','PSI_CD','integrand_welfare','integral_welfare','second_term_sum_welfare',\n 'sum_n_l_Ae','sum_n_PSI_MPD','sum_n_PSI_MPND','sum_n_PSI_MNP','sum_n_profit',\n 'sum_i_l_Ae','sum_i_PSI_MPD','sum_i_PSI_MPND','sum_i_PSI_MNP','sum_i_profit']:\n col = qty_dyn_display_select.value+country_dyn_display_select.value\n lines_dyn[col].visible = True\n if qty_dyn_display_select.value not in ['integrand_welfare','integral_welfare',\n 'second_term_sum_welfare','ratios_of_consumption_levels_change_not_normalized']:\n init_dyn[col].visible = True\n fin_dyn[col].visible = True\n else:\n init_dyn[col].visible = False\n fin_dyn[col].visible = False\n\n for other_column in lines_dyn:\n if other_column != col:\n lines_dyn[other_column].visible = False\n init_dyn[other_column].visible = False\n fin_dyn[other_column].visible = False\n try:\n up_max = max([max(ds_dyn.data[col]), max(ds_dyn_fin.data[col]), max(ds_dyn_init.data[col])])\n down_min = min([min(ds_dyn.data[col]), min(ds_dyn_fin.data[col]), min(ds_dyn_init.data[col])])\n except:\n up_max = max(ds_dyn.data[col])\n down_min = min(ds_dyn.data[col])\n delta = up_max-down_min\n if delta == 0:\n delta = 1\n p_dyn_figure.y_range.start=down_min-delta*0.1\n p_dyn_figure.y_range.end=up_max+delta*0.1\n p_dyn_figure.x_range.start=-20\n p_dyn_figure.x_range.end=dyn_sol.t_inf+20\n \nbutton_display_dyn = Button(label=\"Display\",align='end')\nbutton_display_dyn.on_event(ButtonClick, update_graph_dyn)\n\ncontrols_dyn = row(baseline_dyn_select, variation_dyn_select, country_dyn_select, slider_dyn, button_compute_dyn, state_computation)\ncontrols_display_dyn = row(qty_dyn_display_select, \n country_dyn_display_select,\n button_display_dyn)\n\nbaseline_dyn_select.on_change('value', update_list_of_runs_dyn)\n\ndyn_report = column(controls_dyn,controls_display_dyn,p_dyn_figure)\n\n\n#!!! fifth_panel\n# fifth_panel = row(counterfactuals_report, counterfactuals_to_report)\nfifth_panel = row(dyn_report,table_widget_time_evol)\n\n#%% sensitivities\n\nbaselines_dic_sensi = {}\n\nfor baseline_nbr in ['1004']:\n baselines_dic_sensi[baseline_nbr] = {} \n baseline_sensi_path = results_path+'baseline_'+baseline_nbr+'_sensitivity_tables/'\n files_in_dir = os.listdir(baseline_sensi_path)\n files_in_dir = [ filename for filename in files_in_dir if filename.endswith('.csv') ]\n for f in files_in_dir:\n baselines_dic_sensi[baseline_nbr][f[:-4]] = pd.read_csv(baseline_sensi_path+f,index_col = 0)\n \nbaseline_sensi = '1004'\nqty_sensi = 'objective'\n\nbaseline_sensi_select = Select(value=baseline_sensi, title='Baseline', options=sorted(baselines_dic_sensi.keys()))\nqty_sensi_select = Select(value=qty_sensi, title='Quantity', options=sorted(baselines_dic_sensi[baseline_sensi].keys()))\n\nds_sensi = ColumnDataSource(baselines_dic_sensi[baseline_sensi][qty_sensi])\np_sensi = figure(title=\"Sensitivity\", \n width = 1200,\n height = 850,\n x_axis_label='Change in moment or parameter',\n y_axis_label='Value',\n tools = TOOLS)\n\ncolors_sensi = itertools.cycle(Category18)\n\nfor col in baselines_dic_sensi[baseline_sensi][qty_sensi].columns[1:]:\n if col!='zeta':\n p_sensi.line(x='Change', y=col, source = ds_sensi, color=next(colors_sensi),line_width = 2, legend_label=col)\n\np_sensi.legend.click_policy=\"hide\"\np_sensi.legend.label_text_font_size = '8pt'\np_sensi.add_layout(p_sensi.legend[0], 'right')\n\ndef update_baseline_sensi(attrname, old, new):\n qty_sensi = qty_sensi_select.value\n ds_sensi.data = baselines_dic_sensi[new][qty_sensi]\n \ndef update_qty_sensi(attrname, old, new):\n baseline_sensi = baseline_sensi_select.value\n ds_sensi.data = baselines_dic_sensi[baseline_sensi][new]\n\ncontrols_sensi = row(baseline_sensi_select, qty_sensi_select)\n\nbaseline_sensi_select.on_change('value', update_baseline_sensi)\nqty_sensi_select.on_change('value', update_qty_sensi)\n\nsensitivity_report = column(controls_sensi,p_sensi)\n\n# %% weights sensitivities\n\nbaselines_dic_sensi_weights = {}\n\n# for baseline_nbr in ['101','102','104']:\nfor baseline_nbr in ['802']:\n baselines_dic_sensi_weights[baseline_nbr] = {}\n baseline_sensi_weights_path = results_path+'baseline_'+baseline_nbr+'_sensitivity_weights_tables/'\n files_in_dir = os.listdir(baseline_sensi_weights_path)\n files_in_dir = [ filename for filename in files_in_dir if filename.endswith('.csv') ]\n for f in files_in_dir:\n # if f not in ['GPDIFF.csv','GROWTH.csv']:\n baselines_dic_sensi_weights[baseline_nbr][f[:-4]] = pd.read_csv(baseline_sensi_weights_path+f,index_col = 0)\n \nbaseline_sensi_weights = '802'\nqty_sensi_weights = 'objective'\n\nbaseline_sensi_weights_select = Select(value=baseline_sensi_weights, title='Baseline', options=sorted(baselines_dic_sensi_weights.keys()))\nqty_sensi_weights_select = Select(value=qty_sensi_weights, title='Quantity', options=sorted(baselines_dic_sensi_weights[baseline_sensi_weights].keys()))\n\nds_sensi_weights = ColumnDataSource(baselines_dic_sensi_weights[baseline_sensi_weights][qty_sensi_weights])\np_sensi_weights = figure(title=\"Sensitivity to the weights\", \n width = 1200,\n height = 850,\n x_axis_label='Change in weight',\n y_axis_label='Objective function or contribution to objective function: loss(moment,target)',\n y_axis_type=\"log\",\n tools = TOOLS)\n\ncolors_sensi_weights = itertools.cycle(Category18)\n\nfor col in baselines_dic_sensi_weights[baseline_sensi_weights][qty_sensi_weights].columns[1:]:\n # if col not in ['zeta','GPDIFF_weight','GROWTH_weight']:\n p_sensi_weights.line(x='Change', y=col, source = ds_sensi_weights, color=next(colors_sensi_weights),line_width = 2, \n legend_label=col)\n\np_sensi_weights.legend.click_policy=\"hide\"\np_sensi_weights.legend.label_text_font_size = '8pt'\np_sensi_weights.add_layout(p_sensi_weights.legend[0], 'right')\n\ndef update_baseline_sensi_weights(attrname, old, new):\n qty_sensi_weights = qty_sensi_weights_select.value\n ds_sensi_weights.data = baselines_dic_sensi_weights[new][qty_sensi_weights]\n \ndef update_qty_sensi_weights(attrname, old, new):\n baseline_sensi_weights = baseline_sensi_weights_select.value\n ds_sensi_weights.data = baselines_dic_sensi_weights[baseline_sensi_weights][new]\n\ncontrols_sensi_weights = row(baseline_sensi_weights_select, qty_sensi_weights_select)\n\nbaseline_sensi_weights_select.on_change('value', update_baseline_sensi_weights)\nqty_sensi_weights_select.on_change('value', update_qty_sensi_weights)\n\nsensitivity_weights_report = column(controls_sensi_weights,p_sensi_weights)\n\n#%% Jacobian panel\n\n# baseline_jac = '1010'\n# country_jac = 'USA'\n# sector_jac = 'Patent'\n\n# # baseline_jac_select = Select(value=baseline_jac, title='Baseline', options=['501','604','607','608','609','610'])\n# baseline_jac_select = Select(value=baseline_jac, title='Baseline', options=['1010'])\n\n# baseline_jac_path = results_path+'baseline_'+baseline_jac+'_variations/'\n# files_in_dir = next(os.walk(baseline_jac_path))[1]\n# run_list = [f for f in files_in_dir if f[0].isnumeric()]\n# run_list = sorted(run_list, key=section)\n# variation_jac_select = Select(value='baseline', title='Variation', \n# options=['baseline']+run_list)\n\n# def update_list_of_runs_jac(attr, old, new):\n# baseline_jac_path = results_path+'baseline_'+new+'_variations/'\n# files_in_dir = next(os.walk(baseline_jac_path))[1]\n# run_list = [f for f in files_in_dir if f[0].isnumeric()]\n# run_list = sorted(run_list, key=section)\n# variation_jac_select.options = ['baseline']+run_list\n\n# if variation_jac_select.value == 'baseline':\n# path = results_path+baseline_jac_select.value+'/'\n# else:\n# path = results_path+'baseline_'+baseline_jac_select.value+'_variations/'+variation_jac_select.value+'/'\n \n# p_jac, m_jac, sol_jac = load(path, data_path=data_path,\n# dir_path=dir_path)\n\n# qty_jac_select = Select(value='delta', title='Parameter', options=p_jac.calib_parameters)\n# country_jac_select = Select(value='USA', title='Country', options=p_jac.countries)\n# sector_jac_select = Select(value='Patent', title='Sector', options=p_jac.sectors)\n\n# if qty_jac_select.value in ['eta','T','delta','nu']:\n# idx_to_change_jac = p_jac.countries.index(country_jac_select.value),p_jac.sectors.index(sector_jac_select.value)\n# if qty_jac_select.value in ['fe','zeta','nu', 'fo']:\n# idx_to_change_jac = 0,p_jac.sectors.index(sector_jac_select.value)\n# if qty_jac_select.value in ['k','g_0']:\n# idx_to_change_jac = 0\n\n# qty_to_change_jac = qty_jac_select.value\n\n# x_jac = compute_rough_jacobian(p_jac, m_jac, qty_to_change_jac, idx_to_change_jac, \n# change_by = 0.25, tol = 1e-14, damping = 5,\n# max_count = 5e3)\n\n# p_jac_fig = figure(title=\"Rough jacobian computation\", \n# y_range=FactorRange(factors=m_jac.get_signature_list()),\n# width = 1500,\n# height = 1200,\n# x_axis_label='Change in contribution to objective function',\n# y_axis_label='Moment',\n# tools = TOOLS) \n\n# data_jac = pd.DataFrame(columns = ['Moment','Contribution'], data=np.array([np.array(m_jac.get_signature_list()),x_jac]).T)\n# src_jac = ColumnDataSource(data_jac)\n\n# p_jac_fig.hbar(y = 'Moment',right = 'Contribution', source = src_jac)\n\n# hover_tool_jac = HoverTool()\n# hover_tool_jac.tooltips = [\n# (\"(Moment)\", \"(@Moment)\"),\n# ]\n# p_jac_fig.add_tools(hover_tool_jac)\n\n\n# def update_jac(event):\n# if variation_jac_select.value == 'baseline':\n# path = results_path+baseline_jac_select.value+'/'\n# else:\n# path = results_path+'baseline_'+baseline_jac_select.value+'_variations/'+variation_jac_select.value+'/'\n# par_jac, m_jac, sol_jac = load(path, data_path=data_path,\n# dir_path=dir_path)\n# if qty_jac_select.value in ['eta','T','delta','nu']:\n# idx_to_change_jac = par_jac.countries.index(country_jac_select.value),par_jac.sectors.index(sector_jac_select.value)\n# if qty_jac_select.value in ['fe','zeta','nu', 'fo']:\n# idx_to_change_jac = par_jac.sectors.index(sector_jac_select.value)\n# if qty_jac_select.value in ['k','g_0']:\n# idx_to_change_jac = None\n# x_jac = compute_rough_jacobian(par_jac, m_jac, qty_jac_select.value, idx_to_change_jac, \n# change_by = 0.1, tol = 1e-14, damping = 5,\n# max_count = 5e3)\n# data_jac = pd.DataFrame(columns = ['Moment','Contribution'], data=np.array([np.array(m_jac.get_signature_list()),x_jac]).T)\n# src_jac.data = data_jac\n# p_jac_fig.y_range.factors = m_jac.get_signature_list()\n\n# button_jac = Button(label=\"Compute\")\n# button_jac.on_event(ButtonClick, update_jac)\n\n# controls_jac = row(baseline_jac_select, variation_jac_select, qty_jac_select, \n# country_jac_select, sector_jac_select, button_jac)\n\n# baseline_jac_select.on_change('value', update_list_of_runs_jac)\n\n# jac_report = column(controls_jac,p_jac_fig)\n\n#!!! sixth panel\n# sixth_panel = row(sensitivity_report,sensitivity_weights_report,jac_report)\nsixth_panel = row(sensitivity_report,sensitivity_weights_report)\n\n#%% Kogan paper\n\ncolors_kog = itertools.cycle(Category18)\n\ndf_kog = pd.read_csv(data_path+'koga_updated.csv')\nds_kog = ColumnDataSource(df_kog)\n\np_kog = figure(title=\"Kogan moment updated / extrapolated\", \n width = 1200,\n height = 850,\n x_axis_label='Issue Date',\n y_axis_type=\"log\",\n tools = TOOLS) \n\nl_kog = {}\n\nfor i,col in enumerate(df_kog.columns):\n if col not in ['issue_date']:\n l_kog[i] = p_kog.line(x='issue_date', y=col, \n source = ds_kog, \n line_width = 2, legend_label=col, color=next(colors_kog),\n name = col)\n\nhover_tool_kog = HoverTool(\n tooltips = [\n (\"Issue date\", \"$x\"),\n ('ValuePerPatent', '@ValuePerPatent'),\n ('CostPerPatent', '@CostPerPatent'),\n ('KM_article', '@KM_article'),\n ('ValuePerPatentUpdated', '@ValuePerPatentUpdated'),\n ('CostPerPatentExtrapolated', '@CostPerPatentExtrapolated'),\n ('KM_extrapolatedCost', '@KM_extrapolatedCost')\n ],\n mode='vline',\n renderers = [l_kog[4]]\n)\np_kog.add_tools(hover_tool_kog)\n\np_kog.legend.click_policy=\"hide\"\np_kog.legend.label_text_font_size = '8pt'\np_kog.add_layout(p_kog.legend[0], 'right')\n\n\n# colors_kog2 = itertools.cycle(Category18)\n\n# df_kog2 = pd.read_csv(data_path+'KM_prior.csv')\n# ds_kog2 = ColumnDataSource(df_kog2)\n\n# p_kog2 = figure(title=\"Kogan moment\", \n# width = 1200,\n# height = 850,\n# x_axis_label='Market Prior',\n# tools = TOOLS) \n\n# l_kog2 = {}\n\n# for i,col in enumerate(df_kog2.columns):\n# if col not in ['market prior']:\n# l_kog2[i] = p_kog2.line(x='market prior', y=col, \n# source = ds_kog2, \n# line_width = 2, legend_label=col, color=next(colors_kog2))\n\n# hover_tool_kog2 = HoverTool(\n# tooltips = [\n# (\"market prior\", \"$x\"),\n# ('1950 to 2007', '@from1950to2007'),\n# ('1980 to 2007', '@from1980to2007'),\n# ('1995 to 2007', '@from1995to2007'),\n# ('2002 to 2007', '@from2002to2007'),\n# ('1950 to 2020', '@from1950to2020'),\n# ('1980 to 2020', '@from1980to2020'),\n# ('1995 to 2020', '@from1995to2020'),\n# ('2002 to 2020', '@from2002to2020'),\n# ],\n# mode='vline',\n# renderers = [l_kog2[4]]\n# )\n\n# p_kog2.legend.click_policy=\"hide\"\n# p_kog2.legend.label_text_font_size = '8pt'\n# p_kog2.add_layout(p_kog2.legend[0], 'right')\n# p_kog2.add_tools(hover_tool_kog2)\n\n\ncolors_to_data = itertools.cycle(Category18)\n\ndf_to_data = pd.read_csv(data_path+'turnover_imports_weighted_11_countries.csv'\n )[['year','HS_digits','A3']].pivot(\n columns= 'HS_digits',\n index = 'year',\n values = 'A3'\n )[[6,8,10]]\ndf_to_data = df_to_data.rename(columns={6:'6',\n 8:'8',\n 10:'10'})\nds_to_data = ColumnDataSource(df_to_data)\n\np_to_data = figure(title=\"Turnover moment for rule A3, time window (y,y+5)\", \n width = 1200,\n height = 850,\n x_axis_label='Year',\n # y_axis_type=\"log\",\n tools = TOOLS) \n\nl_to_data = {}\n\nfor i,col in enumerate(['6','8','10']):\n if col not in ['year']:\n l_to_data[i] = p_to_data.line(x='year', y=col, \n source = ds_to_data, \n line_width = 2, legend_label=col, color=next(colors_to_data),\n name = col)\n\nhover_tool_to_data = HoverTool(\n tooltips = [\n (\"Year\", \"$x\"),\n ('HS6', '@6'),\n ('HS8', '@8'),\n ('HS10', '@10'),\n ],\n mode='vline',\n renderers = [l_to_data[1]]\n)\np_to_data.add_tools(hover_tool_to_data)\n\np_to_data.legend.click_policy=\"hide\"\np_to_data.legend.label_text_font_size = '8pt'\np_to_data.add_layout(p_to_data.legend[0], 'right')\n\n#!!! seventh_panel\n# seventh_panel = row(p_kog,p_kog2)\nseventh_panel = row(p_kog,p_to_data)\n\n#%% 7 countries comparison of patent flows data\n\n# # labels_leg_patstat = {\n# # 'baseline':'pre IN treatment',\n# # 'calibration data':'calibration data',\n# # 'WIPO data':'WIPO data',\n# # 'alternative 1':'alt 1 : no sector filtering',\n# # 'alternative 2':'alt 2 : first applicant only',\n# # 'alternative 3':'alt 3 : diff origin weight',\n# # 'alternative 4':'alt 4 : no domestic allocation',\n# # 'alternative 5':'alt 5 : only granted patents',\n# # 'alternative 6':'alt 6 : no ML predi for EPO',\n# # 'alternative 7':'alt 7 : with ML predi for WIPO',\n# # 'after IN treatment':'baseline',\n# # 'julian latest code':'julian latest code',\n# # }\n# # tot = pd.read_csv(join(dirname(__file__),'patstat_compar.csv')).set_index(\n# # ['destination_code','origin_code']\n# # ).sort_index(\n# # ).round()\n\n# # ds_patstat = ColumnDataSource(tot)\n# # # TOOLS=\"pan,wheel_zoom,box_zoom,reset,save\"\n# # p_patstat = figure(title=\"Patent flows\", \n# # width = 1200,\n# # height = 850,\n# # x_axis_type=\"log\",\n# # y_axis_type=\"log\",\n# # x_axis_label='Baseline', \n# # # y_axis_label='Model implied',\n# # tools = TOOLS)\n# # hover_tool = HoverTool()\n# # hover_tool.tooltips = [\n# # (\"index\", \"@x\"),\n# # (\"(baseline,alternative)\", \"($x,$y)\"),\n# # ]\n# # # labels_patstat = LabelSet(x='calibration data', y='baseline', text='x',\n# # labels_patstat = LabelSet(y='WIPO data', x='after IN treatment', text='x',\n# # x_offset=2, y_offset=2, source=ds_patstat, text_font_size=\"7pt\")\n# # p_patstat.add_layout(labels_patstat)\n# # p_patstat.add_tools(hover_tool)\n\n# # slope_patstat = Slope(gradient=1, y_intercept=0,\n# # line_color='black', line_dash='dashed', line_width=1)\n# # p_patstat.add_layout(slope_patstat)\n# # lines_patstat = {}\n# # colors_patstat = itertools.cycle(Category18)\n# # for i,col in enumerate(tot.columns):\n# # if col not in ['x','after IN treatment']:\n# # # lines_patstat[col] = p_patstat.circle('calibration data', col, \n# # lines_patstat[col] = p_patstat.circle('after IN treatment', col, \n# # source = ds_patstat, \n# # size=5, color=next(colors_patstat))\n# # if col != 'WIPO data':\n# # lines_patstat[col].visible = False\n \n# # legend_items = [LegendItem(label=labels_leg_patstat[col], renderers=[lin_par])\n# # for col, lin_par in lines_patstat.items() if col not in \n# # # ['x','calibration data']]\n# # ['x','after IN treatment']]\n\n# # legend = Legend(items=legend_items, click_policy=\"hide\", \n# # label_text_font_size=\"8pt\",\n# # spacing = 0, \n# # )\n# # p_patstat.add_layout(legend, 'right')\n\n# # columns_patstat = [\n# # TableColumn(field=\"x\"),\n# # ]+[TableColumn(field=col) for col in tot.columns]\n# # data_table_patstat = DataTable(source=ds_patstat, columns = columns_patstat, width=1200, height=400)\n\n# # #%% 13 countries comparison of patent flows data\n\n# # tot_13 = pd.read_csv(join(dirname(__file__),'patstat_compar_13.csv')).set_index(\n# # ['destination_code','origin_code']\n# # ).sort_index(\n# # ).round()\n\n# # ds_patstat_13 = ColumnDataSource(tot_13)\n# # # TOOLS=\"pan,wheel_zoom,box_zoom,reset,save\"\n# # p_patstat_13 = figure(title=\"Patent flows\", \n# # width = 1200,\n# # height = 850,\n# # x_axis_type=\"log\",\n# # y_axis_type=\"log\",\n# # x_axis_label='Baseline', \n# # # y_axis_label='Model implied',\n# # tools = TOOLS)\n# # hover_tool = HoverTool()\n# # hover_tool.tooltips = [\n# # (\"index\", \"@x\"),\n# # (\"(baseline,alternative)\", \"($x,$y)\"),\n# # ]\n# # # labels_patstat = LabelSet(x='calibration data', y='baseline', text='x',\n# # labels_patstat_13 = LabelSet(y='WIPO data', x='baseline', text='x',\n# # x_offset=2, y_offset=2, source=ds_patstat_13, text_font_size=\"7pt\")\n# # p_patstat_13.add_layout(labels_patstat_13)\n# # p_patstat_13.add_tools(hover_tool)\n\n# # slope_patstat_13 = Slope(gradient=1, y_intercept=0,\n# # line_color='black', line_dash='dashed', line_width=1)\n# # p_patstat_13.add_layout(slope_patstat_13)\n# # lines_patstat_13 = {}\n# # colors_patstat_13 = itertools.cycle(Category18)\n# # for i,col in enumerate(tot_13.columns):\n# # if col not in ['x','baseline']:\n# # # lines_patstat[col] = p_patstat.circle('calibration data', col, \n# # lines_patstat_13[col] = p_patstat_13.circle('baseline', col, \n# # source = ds_patstat_13, \n# # size=5, color=next(colors_patstat_13))\n# # legend_items_13 = [LegendItem(label=labels_leg_patstat[col], renderers=[lin_par])\n# # for col, lin_par in lines_patstat_13.items() if col not in \n# # # ['x','calibration data']]\n# # ['x','baseline']]\n\n# # legend_13 = Legend(items=legend_items_13, click_policy=\"hide\", \n# # label_text_font_size=\"8pt\",\n# # spacing = 0, \n# # )\n# # p_patstat_13.add_layout(legend_13, 'right')\n\n# # columns_patstat_13 = [\n# # TableColumn(field=\"x\"),\n# # ]+[TableColumn(field=col) for col in tot_13.columns]\n# # data_table_patstat_13 = DataTable(source=ds_patstat_13, columns = columns_patstat_13, width=1200, height=400)\n\n\n#!!! eigth_panel\n# # eigth_panel = row(column(p_patstat,data_table_patstat),\n# # column(p_patstat_13,data_table_patstat_13))\n\n#%% build curdoc\nprint(time.perf_counter() - start)\ncurdoc().add_root(column(\n first_panel, \n second_panel, \n third_panel, \n fourth_panel, \n fifth_panel, \n sixth_panel,\n seventh_panel,\n # eigth_panel\n )\n )\n", "repo_name": "todortodor/pyTRIPS", "sub_path": "bokeh-app/main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 133340, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "76", "api": [{"api_name": "bokeh.palettes.Category10", "line_number": 14, "usage_type": "name"}, {"api_name": "bokeh.palettes.Dark2", "line_number": 14, "usage_type": "name"}, {"api_name": "warnings.simplefilter", "line_number": 17, "usage_type": "call"}, {"api_name": "numpy.RankWarning", "line_number": 17, "usage_type": "attribute"}, {"api_name": "warnings.filterwarnings", "line_number": 19, "usage_type": "call"}, {"api_name": "time.perf_counter", "line_number": 21, "usage_type": "call"}, {"api_name": "classes.parameters", "line_number": 26, "usage_type": "call"}, {"api_name": "classes.var.var_from_vector", "line_number": 29, "usage_type": "call"}, {"api_name": "classes.var", "line_number": 29, "usage_type": "name"}, {"api_name": "classes.moments", "line_number": 33, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 52, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 63, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 67, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 71, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 74, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 83, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 96, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 108, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 116, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 124, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 132, "usage_type": "call"}, {"api_name": "os.path.dirname", "line_number": 192, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 193, "usage_type": "call"}, {"api_name": "os.path.dirname", "line_number": 193, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 197, "usage_type": "call"}, {"api_name": "os.path.dirname", "line_number": 197, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 198, "usage_type": "call"}, {"api_name": "os.path.dirname", "line_number": 198, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 199, "usage_type": "call"}, {"api_name": "os.path.dirname", "line_number": 199, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 200, "usage_type": "call"}, {"api_name": "os.path.dirname", "line_number": 200, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 201, "usage_type": "call"}, {"api_name": "os.path.dirname", "line_number": 201, "usage_type": "call"}, {"api_name": "time.perf_counter", "line_number": 971, "usage_type": "call"}, {"api_name": "os.walk", "line_number": 980, "usage_type": "call"}, {"api_name": "os.walk", "line_number": 1009, "usage_type": "call"}, {"api_name": "bokeh.models.Select", "line_number": 1032, "usage_type": "call"}, {"api_name": "bokeh.models.Select", "line_number": 1033, "usage_type": "call"}, {"api_name": "bokeh.models.Select", "line_number": 1034, "usage_type": "call"}, {"api_name": "bokeh.models.Toggle", "line_number": 1035, "usage_type": "call"}, {"api_name": "bokeh.models.ColumnDataSource", "line_number": 1040, "usage_type": "call"}, {"api_name": "bokeh.plotting.figure", "line_number": 1041, "usage_type": "call"}, {"api_name": "bokeh.models.HoverTool", "line_number": 1049, "usage_type": "call"}, {"api_name": "bokeh.models.LabelSet", "line_number": 1054, "usage_type": "call"}, {"api_name": "bokeh.models.Slope", "line_number": 1058, "usage_type": "call"}, {"api_name": "itertools.cycle", "line_number": 1081, "usage_type": "call"}, {"api_name": "bokeh.models.LegendItem", "line_number": 1093, "usage_type": "call"}, {"api_name": "bokeh.models.Legend", "line_number": 1096, "usage_type": "call"}, {"api_name": "bokeh.models.TableColumn", "line_number": 1115, "usage_type": "call"}, {"api_name": "bokeh.models.TableColumn", "line_number": 1116, "usage_type": "call"}, {"api_name": "bokeh.models.DataTable", "line_number": 1117, "usage_type": "call"}, {"api_name": "bokeh.models.LegendItem", "line_number": 1125, "usage_type": "call"}, {"api_name": "bokeh.models.TableColumn", "line_number": 1131, "usage_type": "call"}, {"api_name": "bokeh.models.TableColumn", "line_number": 1132, "usage_type": "call"}, {"api_name": "classes.moments", "line_number": 1157, "usage_type": "call"}, {"api_name": "classes.moments", "line_number": 1163, "usage_type": "call"}, {"api_name": "bokeh.layouts.row", "line_number": 1174, "usage_type": "call"}, {"api_name": "bokeh.models.Select", "line_number": 1186, "usage_type": "call"}, {"api_name": "bokeh.models.Select", "line_number": 1187, "usage_type": "call"}, {"api_name": "bokeh.models.ColumnDataSource", "line_number": 1207, "usage_type": "call"}, {"api_name": "bokeh.plotting.figure", "line_number": 1208, "usage_type": "call"}, {"api_name": "bokeh.models.HoverTool", "line_number": 1214, "usage_type": "call"}, {"api_name": "itertools.cycle", "line_number": 1221, "usage_type": "call"}, {"api_name": "bokeh.models.LegendItem", "line_number": 1230, "usage_type": "call"}, {"api_name": "bokeh.models.Legend", "line_number": 1232, "usage_type": "call"}, {"api_name": "bokeh.models.TableColumn", "line_number": 1250, "usage_type": "call"}, {"api_name": "bokeh.models.TableColumn", "line_number": 1251, "usage_type": "call"}, {"api_name": "bokeh.models.DataTable", "line_number": 1253, "usage_type": "call"}, {"api_name": "bokeh.models.LegendItem", "line_number": 1261, "usage_type": "call"}, {"api_name": "bokeh.models.TableColumn", "line_number": 1268, "usage_type": "call"}, {"api_name": "bokeh.models.TableColumn", "line_number": 1269, "usage_type": "call"}, {"api_name": "bokeh.layouts.row", "line_number": 1279, "usage_type": "call"}, {"api_name": "bokeh.models.Select", "line_number": 1287, "usage_type": "call"}, {"api_name": "bokeh.models.Select", "line_number": 1288, "usage_type": "call"}, {"api_name": "bokeh.models.ColumnDataSource", "line_number": 1291, "usage_type": "call"}, {"api_name": "bokeh.plotting.figure", "line_number": 1292, "usage_type": "call"}, {"api_name": "bokeh.models.HoverTool", "line_number": 1298, "usage_type": "call"}, {"api_name": "itertools.cycle", "line_number": 1305, "usage_type": "call"}, {"api_name": "bokeh.models.LegendItem", "line_number": 1315, "usage_type": "call"}, {"api_name": "bokeh.models.Legend", "line_number": 1318, "usage_type": "call"}, {"api_name": "bokeh.models.TableColumn", "line_number": 1337, "usage_type": "call"}, {"api_name": "bokeh.models.TableColumn", "line_number": 1338, "usage_type": "call"}, {"api_name": "bokeh.models.DataTable", "line_number": 1340, "usage_type": "call"}, {"api_name": "bokeh.models.LegendItem", "line_number": 1348, "usage_type": "call"}, {"api_name": "bokeh.models.TableColumn", "line_number": 1353, "usage_type": "call"}, {"api_name": "bokeh.layouts.row", "line_number": 1360, "usage_type": "call"}, {"api_name": "bokeh.layouts.column", "line_number": 1365, "usage_type": "call"}, {"api_name": "bokeh.layouts.column", "line_number": 1366, "usage_type": "call"}, {"api_name": "bokeh.layouts.column", "line_number": 1367, "usage_type": "call"}, {"api_name": "bokeh.layouts.row", "line_number": 1370, "usage_type": "call"}, {"api_name": "time.perf_counter", "line_number": 1372, "usage_type": "call"}, {"api_name": "bokeh.models.Select", "line_number": 1381, "usage_type": "call"}, {"api_name": "bokeh.models.Select", "line_number": 1382, "usage_type": "call"}, {"api_name": "bokeh.models.ColumnDataSource", "line_number": 1404, "usage_type": "call"}, {"api_name": "bokeh.plotting.figure", "line_number": 1405, "usage_type": "call"}, {"api_name": "bokeh.models.HoverTool", "line_number": 1410, "usage_type": "call"}, {"api_name": "itertools.cycle", "line_number": 1417, "usage_type": "call"}, {"api_name": "bokeh.models.LegendItem", "line_number": 1429, "usage_type": "call"}, {"api_name": "bokeh.models.Legend", "line_number": 1431, "usage_type": "call"}, {"api_name": "bokeh.models.LegendItem", "line_number": 1440, "usage_type": "call"}, {"api_name": "bokeh.models.LegendItem", "line_number": 1443, "usage_type": "call"}, {"api_name": "bokeh.models.LegendItem", "line_number": 1451, "usage_type": "call"}, {"api_name": "bokeh.models.LegendItem", "line_number": 1454, "usage_type": "call"}, {"api_name": "bokeh.layouts.row", "line_number": 1458, "usage_type": "call"}, {"api_name": "bokeh.layouts.column", "line_number": 1463, "usage_type": "call"}, {"api_name": "bokeh.layouts.row", "line_number": 1486, "usage_type": "call"}, {"api_name": "os.listdir", "line_number": 1499, "usage_type": "call"}, {"api_name": "bokeh.models.Select", "line_number": 1528, "usage_type": "call"}, {"api_name": "bokeh.models.Select", "line_number": 1529, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 1535, "usage_type": "call"}, {"api_name": "numpy.argmin", "line_number": 1537, "usage_type": "call"}, {"api_name": "numpy.abs", "line_number": 1537, "usage_type": "call"}, {"api_name": "numpy.argmin", "line_number": 1539, "usage_type": "call"}, {"api_name": "numpy.abs", "line_number": 1539, "usage_type": "call"}, {"api_name": "pandas.concat", "line_number": 1544, "usage_type": "call"}, {"api_name": "bokeh.models.ColumnDataSource", "line_number": 1552, "usage_type": "call"}, {"api_name": "bokeh.models.ColumnDataSource", "line_number": 1554, "usage_type": "call"}, {"api_name": "itertools.cycle", "line_number": 1556, "usage_type": "call"}, {"api_name": "itertools.cycle", "line_number": 1557, "usage_type": "call"}, {"api_name": "bokeh.plotting.figure", "line_number": 1559, "usage_type": "call"}, {"api_name": "bokeh.layouts.row", "line_number": 1590, "usage_type": "call"}, {"api_name": "bokeh.layouts.column", "line_number": 1595, "usage_type": "call"}, {"api_name": "bokeh.models.Select", "line_number": 1700, "usage_type": "call"}, {"api_name": "bokeh.models.Select", "line_number": 1701, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 1707, "usage_type": "call"}, {"api_name": "pandas.concat", "line_number": 1712, "usage_type": "call"}, {"api_name": "bokeh.models.ColumnDataSource", "line_number": 1720, "usage_type": "call"}, {"api_name": "bokeh.models.ColumnDataSource", "line_number": 1722, "usage_type": "call"}, {"api_name": "itertools.cycle", "line_number": 1724, "usage_type": "call"}, {"api_name": "itertools.cycle", "line_number": 1725, "usage_type": "call"}, {"api_name": "bokeh.plotting.figure", "line_number": 1727, "usage_type": "call"}, {"api_name": "bokeh.layouts.row", "line_number": 1759, "usage_type": "call"}, {"api_name": "bokeh.layouts.column", "line_number": 1764, "usage_type": "call"}, {"api_name": "bokeh.layouts.row", "line_number": 1855, "usage_type": "call"}, {"api_name": "bokeh.models.Select", "line_number": 1863, "usage_type": "call"}, {"api_name": "bokeh.models.Select", "line_number": 1873, "usage_type": "call"}, {"api_name": "bokeh.models.Select", "line_number": 1876, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 1880, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 1887, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 1896, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 1904, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 1914, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 1922, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 1931, "usage_type": "call"}, {"api_name": "pandas.Index", "line_number": 1931, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 1945, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 1948, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 1951, "usage_type": "call"}, {"api_name": "bokeh.models.ColumnDataSource", "line_number": 1955, "usage_type": "call"}, {"api_name": "itertools.cycle", "line_number": 1958, "usage_type": "call"}, {"api_name": "bokeh.plotting.figure", "line_number": 1960, "usage_type": "call"}, {"api_name": "bokeh.models.HoverTool", "line_number": 1997, "usage_type": "call"}, {"api_name": "bokeh.models.LabelSet", "line_number": 2005, "usage_type": "call"}, {"api_name": "bokeh.layouts.row", "line_number": 2037, "usage_type": "call"}, {"api_name": "bokeh.layouts.column", "line_number": 2043, "usage_type": "call"}, {"api_name": "pandas.Series", "line_number": 2048, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 2056, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 2058, "usage_type": "call"}, {"api_name": "bokeh.models.Select", "line_number": 2081, "usage_type": "call"}, {"api_name": "bokeh.models.ColumnDataSource", "line_number": 2089, "usage_type": "call"}, {"api_name": "bokeh.models.ColumnDataSource", "line_number": 2090, "usage_type": "call"}, {"api_name": "bokeh.models.ColumnDataSource", "line_number": 2091, "usage_type": "call"}, {"api_name": "itertools.cycle", "line_number": 2093, "usage_type": "call"}, {"api_name": "itertools.cycle", "line_number": 2094, "usage_type": "call"}, {"api_name": "itertools.cycle", "line_number": 2095, "usage_type": "call"}, {"api_name": "bokeh.plotting.figure", "line_number": 2099, "usage_type": "call"}, {"api_name": "bokeh.models.HoverTool", "line_number": 2125, "usage_type": "call"}, {"api_name": "bokeh.models.TableColumn", "line_number": 2133, "usage_type": "call"}, {"api_name": "bokeh.models.TableColumn", "line_number": 2134, "usage_type": "call"}, {"api_name": "bokeh.models.Div", "line_number": 2137, "usage_type": "call"}, {"api_name": "pandas.concat", "line_number": 2143, "usage_type": "call"}, {"api_name": "bokeh.models.ColumnDataSource", "line_number": 2152, "usage_type": "call"}, {"api_name": "bokeh.models.TableColumn", "line_number": 2153, "usage_type": "call"}, {"api_name": "bokeh.models.DataTable", "line_number": 2155, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 2159, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 2161, "usage_type": "call"}, {"api_name": "bokeh.models.ColumnDataSource", "line_number": 2186, "usage_type": "call"}, {"api_name": "bokeh.models.ColumnDataSource", "line_number": 2187, "usage_type": "call"}, {"api_name": "bokeh.models.ColumnDataSource", "line_number": 2188, "usage_type": "call"}, {"api_name": "itertools.cycle", "line_number": 2190, "usage_type": "call"}, {"api_name": "itertools.cycle", "line_number": 2191, "usage_type": "call"}, {"api_name": "itertools.cycle", "line_number": 2192, "usage_type": "call"}, {"api_name": "bokeh.plotting.figure", "line_number": 2194, "usage_type": "call"}, {"api_name": "bokeh.models.HoverTool", "line_number": 2222, "usage_type": "call"}, {"api_name": "pandas.concat", "line_number": 2229, "usage_type": "call"}, {"api_name": "bokeh.models.ColumnDataSource", "line_number": 2238, "usage_type": "call"}, {"api_name": "bokeh.models.TableColumn", "line_number": 2239, "usage_type": "call"}, {"api_name": "bokeh.models.DataTable", "line_number": 2241, "usage_type": "call"}, {"api_name": "bokeh.layouts.column", "line_number": 2263, "usage_type": "call"}, {"api_name": "bokeh.layouts.column", "line_number": 2264, "usage_type": "call"}, {"api_name": "bokeh.layouts.row", "line_number": 2267, "usage_type": "call"}, {"api_name": "bokeh.models.Select", "line_number": 2276, "usage_type": "call"}, {"api_name": "os.walk", "line_number": 2282, "usage_type": "call"}, {"api_name": "bokeh.models.Select", "line_number": 2285, "usage_type": "call"}, {"api_name": "os.walk", "line_number": 2290, "usage_type": "call"}, {"api_name": "bokeh.models.Select", "line_number": 2295, "usage_type": "call"}, {"api_name": "bokeh.models.Slider", "line_number": 2298, "usage_type": "call"}, {"api_name": "bokeh.models.Div", "line_number": 2300, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 2304, "usage_type": "call"}, {"api_name": "pandas.Index", "line_number": 2304, "usage_type": "call"}, {"api_name": "numpy.polyval", "line_number": 2317, "usage_type": "call"}, {"api_name": "numpy.polyfit", "line_number": 2317, "usage_type": "call"}, {"api_name": "numpy.linspace", "line_number": 2319, "usage_type": "call"}, {"api_name": "numpy.linspace", "line_number": 2324, "usage_type": "call"}, {"api_name": "time.perf_counter", "line_number": 2423, "usage_type": "call"}, {"api_name": "data_funcs.rough_dyn_fixed_point_solver", "line_number": 2424, "usage_type": "call"}, {"api_name": "time.perf_counter", "line_number": 2426, "usage_type": "call"}, {"api_name": "data_funcs.rough_dyn_fixed_point_solver", "line_number": 2449, "usage_type": "call"}, {"api_name": "bokeh.models.ColumnDataSource", "line_number": 2452, "usage_type": "call"}, {"api_name": "bokeh.models.TableColumn", "line_number": 2453, "usage_type": "call"}, {"api_name": "bokeh.models.DataTable", "line_number": 2456, "usage_type": "call"}, {"api_name": "bokeh.models.Button", "line_number": 2458, "usage_type": "call"}, {"api_name": "bokeh.events.ButtonClick", "line_number": 2459, "usage_type": "argument"}, {"api_name": "bokeh.models.Select", "line_number": 2461, "usage_type": "call"}, {"api_name": "bokeh.models.Select", "line_number": 2467, "usage_type": "call"}, {"api_name": "bokeh.models.ColumnDataSource", "line_number": 2475, "usage_type": "call"}, {"api_name": "bokeh.models.ColumnDataSource", "line_number": 2476, "usage_type": "call"}, {"api_name": "bokeh.models.ColumnDataSource", "line_number": 2477, "usage_type": "call"}, {"api_name": "bokeh.plotting.figure", "line_number": 2483, "usage_type": "call"}, {"api_name": "bokeh.models.HoverTool", "line_number": 2493, "usage_type": "call"}, {"api_name": "bokeh.models.Button", "line_number": 2558, "usage_type": "call"}, {"api_name": "bokeh.events.ButtonClick", "line_number": 2559, "usage_type": "argument"}, {"api_name": "bokeh.layouts.row", "line_number": 2561, "usage_type": "call"}, {"api_name": "bokeh.layouts.row", "line_number": 2562, "usage_type": "call"}, {"api_name": "bokeh.layouts.column", "line_number": 2568, "usage_type": "call"}, {"api_name": "bokeh.layouts.row", "line_number": 2573, "usage_type": "call"}, {"api_name": "os.listdir", "line_number": 2582, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 2585, "usage_type": "call"}, {"api_name": "bokeh.models.Select", "line_number": 2590, "usage_type": "call"}, {"api_name": "bokeh.models.Select", "line_number": 2591, "usage_type": "call"}, {"api_name": "bokeh.models.ColumnDataSource", "line_number": 2593, "usage_type": "call"}, {"api_name": "bokeh.plotting.figure", "line_number": 2594, "usage_type": "call"}, {"api_name": "itertools.cycle", "line_number": 2601, "usage_type": "call"}, {"api_name": "bokeh.layouts.row", "line_number": 2619, "usage_type": "call"}, {"api_name": "bokeh.layouts.column", "line_number": 2624, "usage_type": "call"}, {"api_name": "os.listdir", "line_number": 2634, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 2638, "usage_type": "call"}, {"api_name": "bokeh.models.Select", "line_number": 2643, "usage_type": "call"}, {"api_name": "bokeh.models.Select", "line_number": 2644, "usage_type": "call"}, {"api_name": "bokeh.models.ColumnDataSource", "line_number": 2646, "usage_type": "call"}, {"api_name": "bokeh.plotting.figure", "line_number": 2647, "usage_type": "call"}, {"api_name": "itertools.cycle", "line_number": 2655, "usage_type": "call"}, {"api_name": "bokeh.layouts.row", "line_number": 2674, "usage_type": "call"}, {"api_name": "bokeh.layouts.column", "line_number": 2679, "usage_type": "call"}, {"api_name": "bokeh.layouts.row", "line_number": 2781, "usage_type": "call"}, {"api_name": "itertools.cycle", "line_number": 2785, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 2787, "usage_type": "call"}, {"api_name": "bokeh.models.ColumnDataSource", "line_number": 2788, "usage_type": "call"}, {"api_name": "bokeh.plotting.figure", "line_number": 2790, "usage_type": "call"}, {"api_name": "bokeh.models.HoverTool", "line_number": 2806, "usage_type": "call"}, {"api_name": "itertools.cycle", "line_number": 2867, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 2869, "usage_type": "call"}, {"api_name": "bokeh.models.ColumnDataSource", "line_number": 2878, "usage_type": "call"}, {"api_name": "bokeh.plotting.figure", "line_number": 2880, "usage_type": "call"}, {"api_name": "bokeh.models.HoverTool", "line_number": 2896, "usage_type": "call"}, {"api_name": "bokeh.layouts.row", "line_number": 2914, "usage_type": "call"}, {"api_name": "time.perf_counter", "line_number": 3049, "usage_type": "call"}, {"api_name": "bokeh.io.curdoc", "line_number": 3050, "usage_type": "call"}, {"api_name": "bokeh.layouts.column", "line_number": 3050, "usage_type": "call"}]}
+{"seq_id": "39563526664", "text": "import random\nimport cv2\nimport ctypes\nfrom PIL import Image\nimport numpy as np\nimport re\nimport math\n\nSET_WIDTH = 2560\nSET_HEIGHT = 1440\n\n\ndef preprocess(img):\n img = img.convert(\"RGB\")\n\n # Convert the image to a NumPy array\n img_np = np.array(img)\n\n # Define the color codes to filter for\n color_codes = [\n (255, 255, 255),\n ]\n\n # Define the color similarity threshold\n threshold = 60 # Adjust this value to control the leeway in color matching\n\n # Filter for colors in the screenshot using NumPy operations\n filtered_mask = np.zeros_like(img_np[:, :, 0], dtype=bool)\n for color_code in color_codes:\n color_code_np = np.array(color_code)\n color_difference = np.sum(np.abs(img_np[:, :, :3] - color_code_np), axis=2)\n matches = color_difference <= threshold\n filtered_mask |= matches\n\n # Expand the filtered mask to have the same shape as the screenshot image array\n filtered_mask_expanded = np.expand_dims(filtered_mask, axis=2)\n filtered_mask_expanded = np.repeat(filtered_mask_expanded, 3, axis=2)\n\n # Create a filtered image using the filtered mask\n filtered_image_np = np.where(filtered_mask_expanded, img_np, 0)\n thres_img = cv2.threshold(filtered_image_np, 127, 255, cv2.THRESH_BINARY_INV)[1]\n\n return Image.fromarray(thres_img)\n\n\ndef get_left_top_width_height(pos):\n \"\"\"\n Returns a tuple of four values representing the left, top, width, and height of a rectangle\n defined by the two corners of a rectangle given as the argument 'pos'.\n\n Args:\n pos (tuple): A tuple of two tuples, representing the top-left and bottom-right corners of a rectangle.\n\n Returns:\n tuple: A tuple of four integers representing the left, top, width, and height of the rectangle.\n\n Example:\n If pos = ((10, 20), (50, 80)), the function returns (10, 20, 40, 60), which represents a rectangle\n with top-left corner at (10, 20), width 40, and height 60.\n \"\"\"\n l_x = pos[0][0]\n r_x = pos[1][0]\n\n l_y = pos[0][1]\n r_y = pos[1][1]\n\n return (l_x, l_y, r_x - l_x, r_y - l_y)\n\n\ndef get_monitor_resolution():\n \"\"\"\n Returns the resolution of the primary monitor in pixels as a tuple (width, height).\n This function uses the `GetSystemMetrics()` function from the Windows user32.dll library,\n so it is only compatible with Windows operating systems.\n\n Returns:\n - tuple: A tuple of integers representing the width and height of the primary monitor's resolution.\n \"\"\"\n user32 = ctypes.windll.user32\n return user32.GetSystemMetrics(0), user32.GetSystemMetrics(1)\n\n\ndef get_scalars():\n \"\"\"\n Returns the scaling factors to fit the desired width and height\n (SET_WIDTH and SET_HEIGHT) into the user's monitor resolution.\n\n Returns:\n - w_scalar (float): The scaling factor for the width dimension.\n - h_scalar (float): The scaling factor for the height dimension.\n \"\"\"\n u_width, u_height = get_monitor_resolution()\n\n w_scalar = SET_WIDTH / u_width\n h_scalar = SET_HEIGHT / u_height\n\n if u_width < SET_WIDTH:\n w_scalar = u_width / SET_WIDTH\n\n if u_height < SET_HEIGHT:\n h_scalar = u_height / SET_HEIGHT\n\n return w_scalar, h_scalar\n\n\ndef get_scaled_position(x, y):\n \"\"\"\n Takes in the x and y coordinates and returns their scaled position on the user's screen, based on the user's monitor resolution and the desired width and height set in SET_WIDTH and SET_HEIGHT constants.\n\n Args:\n x (int): The x-coordinate of the position to be scaled.\n y (int): The y-coordinate of the position to be scaled.\n\n Returns:\n A tuple (x, y) containing the scaled position of the input coordinates on the user's screen.\n \"\"\"\n w_scalar, h_scalar = get_scalars()\n\n x *= w_scalar\n y *= h_scalar\n\n return (x, y)\n\n\ndef get_scaled_pos(pos):\n \"\"\"\n Scales the position of a rectangular region by the user's monitor resolution to ensure\n that the screenshot is taken with the same aspect ratio regardless of the user's screen resolution.\n\n Args:\n pos (tuple): A tuple of four coordinates (x1, y1, x2, y2) defining a rectangular region.\n\n Returns:\n list: A list of two tuples containing the scaled coordinates of the top-left and bottom-right corners\n of the rectangular region.\n \"\"\"\n scaled_pos = []\n\n for corner in pos:\n corner_pos = get_scaled_position(*corner)\n scaled_pos.append(corner_pos)\n\n return scaled_pos\n\n\ndef clean_string(str):\n \"\"\"\n Cleans a given string by removing all non-alphanumeric characters except for spaces and periods.\n\n Args:\n str (str): The string to be cleaned.\n\n Returns:\n str: The cleaned string.\n\n Example:\n >>> clean_string(\"Hi! This is a string with (a lot) of [punctuation] and $ymbols.\")\n 'Hi This is a string with a lot of punctuation and yymbols'\n \"\"\"\n res = re.findall(r\"([\\d\\w\\s\\.]+)\\W\", str)\n return res[0].strip(\"\\n\") if len(res) else \"\"\n\n\ndef get_pos_in_area(area):\n \"\"\"\n Generate a random position within the given area.\n\n Args:\n area: A tuple containing two tuples representing the top-left and bottom-right corners\n of the area, respectively. Each corner is represented by a tuple of (x, y) coordinates.\n\n Returns:\n A tuple containing the x and y coordinates of the randomly generated position within the area.\n \"\"\"\n x = random.randint(area[0][0], area[1][0])\n y = random.randint(area[0][1], area[1][1])\n return x, y\n\n\ndef get_centre_pos_from_box(box):\n \"\"\"Calculate the center position of a bounding box object.\n\n Args:\n box (pyautogui.Box): A bounding box object.\n\n Returns:\n tuple: A tuple of the x and y coordinates of the center of the box.\n \"\"\"\n return (box.left + box.width // 2, box.top + box.height // 2)\n\n\ndef get_centre_pos(left, top, width, height):\n \"\"\"Calculate the center position of a rectangle given its coordinates.\n\n Args:\n left (int): The x-coordinate of the top-left corner of the rectangle.\n top (int): The y-coordinate of the top-left corner of the rectangle.\n width (int): The width of the rectangle.\n height (int): The height of the rectangle.\n\n Returns:\n tuple: A tuple of the x and y coordinates of the center of the rectangle.\n \"\"\"\n return (left + width // 2, top + height // 2)\n\n\ndef get_distance(pos1, pos2):\n x1, y1 = pos1\n x2, y2 = pos2\n return math.sqrt(((x1 - x2) ** 2) + ((y1 - y2) ** 2))\n\n\n", "repo_name": "lindenhutchinson/overwatch-settings-sync", "sub_path": "utils.py", "file_name": "utils.py", "file_ext": "py", "file_size_in_byte": 6516, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "76", "api": [{"api_name": "numpy.array", "line_number": 17, "usage_type": "call"}, {"api_name": "numpy.zeros_like", "line_number": 28, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 30, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 31, "usage_type": "call"}, {"api_name": "numpy.abs", "line_number": 31, "usage_type": "call"}, {"api_name": "numpy.expand_dims", "line_number": 36, "usage_type": "call"}, {"api_name": "numpy.repeat", "line_number": 37, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 40, "usage_type": "call"}, {"api_name": "cv2.threshold", "line_number": 41, "usage_type": "call"}, {"api_name": "cv2.THRESH_BINARY_INV", "line_number": 41, "usage_type": "attribute"}, {"api_name": "PIL.Image.fromarray", "line_number": 43, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 43, "usage_type": "name"}, {"api_name": "ctypes.windll", "line_number": 79, "usage_type": "attribute"}, {"api_name": "re.findall", "line_number": 160, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 175, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 176, "usage_type": "call"}, {"api_name": "math.sqrt", "line_number": 210, "usage_type": "call"}]}
+{"seq_id": "1549436510", "text": "import json\nimport time\nimport re\nimport requests\nfrom bs4 import BeautifulSoup\n\n\ndef yahoo_scraper():\n headers = {\"User-Agent\": \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) \"\n \"Chrome/70.0.3538.102 Safari/537.36 Edge/18.19582\"}\n\n tickers = []\n exchange = 'amex'\n with open(f'../02_data_ticker/{exchange}.json', encoding='utf-8') as file:\n tickers += json.load(file)\n\n url = 'https://finance.yahoo.com/quote/{}/profile?p={}'\n data = []\n count = 1\n\n for row in tickers:\n try:\n symbol = row[1]\n response = requests.get(url.format(symbol, symbol), headers=headers) # url profile website is scraped\n\n soup = BeautifulSoup(response.text, 'html.parser')\n pattern = re.compile(r'\\s--\\sData\\s--\\s')\n script_data = soup.find('script', text=pattern).contents[0]\n start = script_data.find(\"context\")-2\n json_data = json.loads(script_data[start:-12])\n\n sector = json_data['context']['dispatcher']['stores']['QuoteSummaryStore']['assetProfile']['sector']\n description = (json_data['context']['dispatcher']['stores']['QuoteSummaryStore']\n ['assetProfile']['longBusinessSummary'])\n\n data.append({\"name\": row[0], 'sector': sector, \"description\": description})\n print(\"added: \", row[0])\n count += 1\n\n except KeyError as e:\n print(\"not found: \", row[0], str(e))\n\n print(\"Results:\", len(tickers), \"tickers\", count, \"added and\", len(tickers)-count, \"not found\")\n\n # save the data list into a JSON file\n with open('sync_data.json', 'w') as f:\n json.dump(data, f)\n\n\ndef main():\n start = time.time()\n yahoo_scraper()\n end = time.time()\n print(\"Took {} seconds.\".format(end - start))\n\n\nif __name__ == \"__main__\":\n main()", "repo_name": "commutativity/project_NLP", "sub_path": "06_sync_async_comparison/1_sync_scraping.py", "file_name": "1_sync_scraping.py", "file_ext": "py", "file_size_in_byte": 1916, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "76", "api": [{"api_name": "json.load", "line_number": 15, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 24, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 26, "usage_type": "call"}, {"api_name": "re.compile", "line_number": 27, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 30, "usage_type": "call"}, {"api_name": "json.dump", "line_number": 47, "usage_type": "call"}, {"api_name": "time.time", "line_number": 51, "usage_type": "call"}, {"api_name": "time.time", "line_number": 53, "usage_type": "call"}]}
+{"seq_id": "35629294061", "text": "from mrjob.job import MRJob \n\nclass MovingAverage(MRJob):\n window = 3\n\n def mapper(self, _, line):\n company, timestamp, value = line.split(',')\n\n yield company, (timestamp, float(value))\n\n def reducer(self, key, values):\n items = sorted(list(values))\n\n sum = 0.0\n for i in range(len(items)):\n item = items[i]\n timestamp = item[0]\n value = item[1]\n\n sum += value\n\n if(i + 1) > self.window:\n sum -= items[i-self.window][1]\n \n q = min(i + 1, self.window)\n \n moving = sum / q\n\n yield key, (item, moving)\n\nif __name__ == '__main__':\n MovingAverage.run()", "repo_name": "synara/map-reduce", "sub_path": "202104-09-10/movingavg.py", "file_name": "movingavg.py", "file_ext": "py", "file_size_in_byte": 721, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "76", "api": [{"api_name": "mrjob.job.MRJob", "line_number": 3, "usage_type": "name"}]}
+{"seq_id": "32232130789", "text": "import itertools\nN, M = map(int, input().split())\nC = [0] * M\na = []\nfor i in range(M):\n C[i] = int(input())\n a.append(list(map(int, input().split())))\n\nnum = [i for i in range(1, N+1)]\nS = list(itertools.product([0, 1], repeat=M))\ncnt = 0\nfor i in range(len(S)):\n tmp = set()\n for j in range(len(S[i])):\n if S[i][j] == 1:\n tmp = tmp | set(a[j])\n if len(tmp) == N:\n cnt += 1\n\nprint(cnt)", "repo_name": "arakisota/kyopro", "sub_path": "ABC/第289回/C.py", "file_name": "C.py", "file_ext": "py", "file_size_in_byte": 426, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "76", "api": [{"api_name": "itertools.product", "line_number": 10, "usage_type": "call"}]}
+{"seq_id": "14882768516", "text": "import yaml\nimport random\nwords = {}\n\nclass WordPair(object):\n def __init__(self, ger, eng):\n self.ger = ger\n self.eng = eng\n\ndef load_words():\n words = list()\n with open('words.yaml') as f:\n d = yaml.load(f)\n for k, v in d.items():\n words.append(WordPair(k,v))\n return words\n\n\ndef main():\n d = load_words()\n limit = len (d)\n for i in range(10):\n print(\"\\n - - - - - - - - - - -\")\n print(i+1)\n x = random.randrange(0, limit)\n print(\"->Eng: \" + d[x].eng)\n input(\"Guess: \")\n print(\"-= threshold:\n child = Node(itemset=next_itemset, freq=freq, tail=i)\n child._recurse_all(next_data, threshold, num_items)\n self.children.append(child)\n\n def _recurse_closed(self, database, threshold, num_items):\n \"\"\" Find ONLY closed Itemsets. \"\"\"\n for i in range(self.tail+1, num_items):\n next_data = database[database[:,i]==1,:]\n freq = len(next_data)\n if freq >= threshold:\n add_itemset = i+np.where(np.all(next_data[:,i:], axis=0))[0]\n next_itemset = self.itemset + add_itemset.tolist()\n child = Node(itemset=next_itemset, freq=freq, tail=max(add_itemset))\n child._recurse_closed(next_data, threshold, num_items)\n self.children.append(child)\n\nclass FrequentSet():\n def __init__(self, threshold):\n self.root = None\n self.threshold = threshold\n self.freq_sets = []\n\n def fit(self, database, method=\"closed\"):\n \"\"\"\n @param database: Binary Matrix. shape=(num_transactions, num_items)\n \"\"\"\n method = method.lower()\n handleKeyError(lst=ITEM_MINING_METHODS, method=method)\n num_transactions, num_items = database.shape\n self.root = Node(itemset=[], freq=num_transactions, tail=-1)\n self.root.__getattribute__({\n \"all\" : \"_recurse_all\",\n \"closed\" : \"_recurse_closed\",\n }[method]).__call__(database, self.threshold, num_items)\n self.num_items = num_items\n self.all = self.get_itemsets(self.root)\n\n def get_itemsets(self, node):\n freq_sets = [node.itemset]\n for child in node.children:\n freq_sets.extend(self.get_itemsets(node=child))\n return freq_sets\n\n def export_graphviz(self, out_file=None, feature_names=None,\n class_names=None, cmap=\"jet\", filled=True,\n rounded=True, precision=3):\n if class_names is None:\n class_names = np.arange(self.num_items)\n exporter = ItemsetTreeDOTexporter(\n cmap=cmap, class_names=class_names,\n filled=filled, rounded=rounded, precision=precision\n )\n return DOTexporterHandler(exporter, root=self.root, out_file=out_file)\n", "repo_name": "iwasakishuto/Kerasy", "sub_path": "kerasy/search/itemset.py", "file_name": "itemset.py", "file_ext": "py", "file_size_in_byte": 3973, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 2, "dataset": "github-code", "pt": "76", "api": [{"api_name": "utils.flatten_dual", "line_number": 19, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 23, "usage_type": "call"}, {"api_name": "numpy.int32", "line_number": 23, "usage_type": "attribute"}, {"api_name": "numpy.asarray", "line_number": 25, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 61, "usage_type": "call"}, {"api_name": "numpy.all", "line_number": 61, "usage_type": "call"}, {"api_name": "utils.handleKeyError", "line_number": 78, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 98, "usage_type": "call"}, {"api_name": "utils.ItemsetTreeDOTexporter", "line_number": 99, "usage_type": "call"}, {"api_name": "utils.DOTexporterHandler", "line_number": 103, "usage_type": "call"}]}
+{"seq_id": "43281913590", "text": "'''\nName: PrintLoss\nDesriptption: Experiment One \nEmail: yesunhuang@mail.ustc.edu.cn\nOpenSource: https://github.com/yesunhuang\nMsg: Experiment One\nAuthor: YesunHuang\nDate: 2022-04-17 20:40:50\n'''\n#import all the things we need\nimport os\nos.environ['KMP_DUPLICATE_LIB_OK']='True'\nimport torch\nimport matplotlib.pyplot as plt\n\nDRAW_LOSS=True\n #Save path:\nif __name__=='__main__':\n currentPath=os.getcwd()\n netSavepath=os.path.join(currentPath,'TrainedNet','Exp')\n figSavepath=os.path.join(currentPath,'data','figures')\n\nif __name__=='__main__':\n QFPM_filename='QExpF1.pt'\n QFPM_FT_filename='QExpFT1.pt'\n CSM_filename='CExpFN.pt'\n CQCM_filename='CExpFQC.pt'\n QISMM_filename='QExpFM.pt'\n QFPMD_filename='QExpFD.pt'\n\n# Load loss\nif __name__=='__main__':\n loss_QFPM=torch.load(os.path.join(netSavepath,QFPM_filename))['Loss']\n trainLoss_QFPM=[l[0] for l in loss_QFPM];testLoss_QFPM=[l[1] for l in loss_QFPM]\n loss_QFPM_FT=torch.load(os.path.join(netSavepath,QFPM_FT_filename))['Loss']\n trainLoss_QFPM_FT=[l[0] for l in loss_QFPM_FT];testLoss_QFPM_FT=[l[1] for l in loss_QFPM_FT]\n loss_CSM=torch.load(os.path.join(netSavepath,CSM_filename))['Loss']\n trainLoss_CSM=[l[0] for l in loss_CSM];testLoss_CSM=[l[1] for l in loss_CSM]\n loss_CQCM=torch.load(os.path.join(netSavepath,CQCM_filename))['Loss']\n trainLoss_CQCM=[l[0] for l in loss_CQCM];testLoss_CQCM=[l[1] for l in loss_CQCM]\n loss_QISMM=torch.load(os.path.join(netSavepath,QISMM_filename))['Loss']\n trainLoss_QISMM=[l[0] for l in loss_QISMM];testLoss_QISMM=[l[1] for l in loss_QISMM]\n loss_QFPMD=torch.load(os.path.join(netSavepath,QFPMD_filename))['Loss']\n trainLoss_QFPMD=[l[0] for l in loss_QFPMD];testLoss_QFPMD=[l[1] for l in loss_QFPMD]\n \n# Draw loss\nif DRAW_LOSS and __name__=='__main__':\n figName='TrainLoss'\n fig,axes=plt.subplots(1,1,figsize=(8,6))\n axes.set_xlabel('Epoch')\n axes.set_ylabel('Train Loss')\n #axes.set_xlim(0,300)\n cor={'QDMM':'lightskyblue','QDMM_FT':'limegreen','CSM':'lightcoral',\\\n 'CQCM':'khaki','QDMMM':'orange','QDMMD':'violet'}\n axes.plot(range(0,len(trainLoss_QFPM)),trainLoss_QFPM,color=cor['QDMM'],linestyle='-',label='QDMM')\n axes.plot(range(0,len(trainLoss_QFPMD)),trainLoss_QFPMD,color=cor['QDMMD'],linestyle='-',label='QDMMD')\n axes.plot(range(0,len(trainLoss_QISMM)),trainLoss_QISMM,color=cor['QDMMM'],linestyle='-',label='QDMMM')\n axes.plot(range(0,len(trainLoss_QFPM_FT[302:])),trainLoss_QFPM_FT[302:],color=cor['QDMM_FT'],linestyle='-',label='QDMM(FT)')\n axes.plot(range(0,len(trainLoss_CSM)),trainLoss_CSM,color=cor['CSM'],linestyle='-',label='CSM')\n axes.plot(range(0,len(trainLoss_CQCM)),trainLoss_CQCM,color=cor['CQCM'],linestyle='-',label='CQCM')\n plt.legend()\n plt.show()\n fig.savefig(os.path.join(figSavepath,figName+'.svg'),dpi=600,format='svg',bbox_inches='tight')\n fig.savefig(os.path.join(figSavepath,figName+'.pdf'),dpi=600,format='pdf',bbox_inches='tight')", "repo_name": "yesunhuang/QRNNs_Memory", "sub_path": "src/PrintLoss.py", "file_name": "PrintLoss.py", "file_ext": "py", "file_size_in_byte": 2986, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "76", "api": [{"api_name": "os.environ", "line_number": 12, "usage_type": "attribute"}, {"api_name": "os.getcwd", "line_number": 19, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 20, "usage_type": "call"}, {"api_name": "os.path", "line_number": 20, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 21, "usage_type": "call"}, {"api_name": "os.path", "line_number": 21, "usage_type": "attribute"}, {"api_name": "torch.load", "line_number": 33, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 33, "usage_type": "call"}, {"api_name": "os.path", "line_number": 33, "usage_type": "attribute"}, {"api_name": "torch.load", "line_number": 35, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 35, "usage_type": "call"}, {"api_name": "os.path", "line_number": 35, "usage_type": "attribute"}, {"api_name": "torch.load", "line_number": 37, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 37, "usage_type": "call"}, {"api_name": "os.path", "line_number": 37, "usage_type": "attribute"}, {"api_name": "torch.load", "line_number": 39, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 39, "usage_type": "call"}, {"api_name": "os.path", "line_number": 39, "usage_type": "attribute"}, {"api_name": "torch.load", "line_number": 41, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 41, "usage_type": "call"}, {"api_name": "os.path", "line_number": 41, "usage_type": "attribute"}, {"api_name": "torch.load", "line_number": 43, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 43, "usage_type": "call"}, {"api_name": "os.path", "line_number": 43, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot.subplots", "line_number": 49, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 49, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 61, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 61, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 62, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 62, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 63, "usage_type": "call"}, {"api_name": "os.path", "line_number": 63, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 64, "usage_type": "call"}, {"api_name": "os.path", "line_number": 64, "usage_type": "attribute"}]}
+{"seq_id": "38101161893", "text": "# us_census_data.py\n# Youyou Tian\n# SDS293\n# Pulls from the US 2010-2014 census api\n\nimport requests\n\napi_key = \"12c3359bc69c96ac440e62d8d521d5919f6eb842\"\n\n# Given the zipcode, returns the:\n# total population\n# percentage white population\n# average household size\n# median household income\n# as a dictionary\ndef getDataFromUSCensusWithZipcode(zip_code):\n # total pop\n # white pop not hispanic not latino\n # average household size\n # median household income\n # median household income white homeowner\n # median household income asian homeowner\n # http://api.census.gov/data/2014/acs5/variables.html\n # http://api.census.gov/data/2014/acs5/examples.html\n\n parameters = {\"get\":\"B01001_001E,B01001H_001E,B25010_001E,B19013_001E\", #,B19013A_001E,B19013D_001E\",\n \"for\": \"zip code tabulation area:\" + zip_code}#,\n #\"in\": \"state:\" + names_to_states_dict[name]}\n\t\t\t\t\n r = requests.get(\"http://api.census.gov/data/2014/acs5?key=\" + \\\n api_key, params = parameters)\n\n # error handling if no data or incorrect data\n if r.status_code != 200:\n print(\"error in census data\")\n return {}\n\n if r.json()[1][2] == None:\n print(\"error in census data\")\n return {}\n\n data = {}\n predictors = [\"total_population\", \"percent_white_people\", \"people_per_household\", \"yearly_household_income\"]\n print(zip_code)\n float_data = [float(i) for i in r.json()[1]]\n # need to fix but ok for now\n float_data[1] = round(float_data[1]/float_data[0], 10)\n predictors_dict = dict(zip(predictors, float_data))\n data[zip_code] = predictors_dict\n return data\n \n\ngetDataFromUSCensusWithZipcode(\"89109\")\n\n# first creaet a dictionary matching state name to state id\n#parameters = {\"get\": \"NAME\",\n# \"for\": \"state\"}\n#r = requests.get(\"http://api.census.gov/data/2014/acs5?key=\" + \\\n# api_key, params = parameters)\n#states_json = r.json()[1:]\n#names_to_states_dict = dict(map(lambda x: [x[0], x[1]], r.json()[1:]))\n\n \n#for zip_code in zips: #name, zip_code in zips:\n # pulled from the 2010 census data based on a tutorial Alice gave me from\n # the data visualization class that works\n #parameters2010 = {\"get\": \"P0010001,P0080003\",\n # \"for\": \"zip code tabulation area:\" + zip_code,\n # \"in\": \"state:\" + names_to_states_dict[name]}\n\n # total pop\n # white pop not hispanic not latino\n # average household size\n # median household income\n # median household income white homeowner\n # median household income asian homeowner\n # http://api.census.gov/data/2014/acs5/variables.html\n # http://api.census.gov/data/2014/acs5/examples.html\n #print(names_to_states_dict[name])\n #parameters = {\"get\":\"B01001_001E,B01001H_001E,B25010_001E,B19013_001E,B19013A_001E,B19013D_001E\",\n # \"for\": \"zip code tabulation area:\" + zip_code}#,\n # #\"in\": \"state:\" + names_to_states_dict[name]}\n\t\t\t\t\n #r = requests.get(\"http://api.census.gov/data/2014/acs5?key=\" + \\\n # api_key, params = parameters)\n\n #print(r.text)\n", "repo_name": "AliceFanYang/MachineLearning-FinalProject", "sub_path": "us_census_data.py", "file_name": "us_census_data.py", "file_ext": "py", "file_size_in_byte": 3136, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "76", "api": [{"api_name": "requests.get", "line_number": 30, "usage_type": "call"}]}
+{"seq_id": "25365050229", "text": "import json\n\nfrom classes.room import Room\nfrom classes.teacher import Teacher\n\n\nwith open(\"data/teachers.json\", \"r\") as read_file:\n teacher_data = json.load(read_file)\n\nwith open(\"data/rooms.json\", \"r\") as read_file:\n room_data = json.load(read_file)\n\nwith open(\"data/class_sets.json\", \"r\") as read_file:\n class_set_data = json.load(read_file)\n\nTEACHERS = [\n Teacher(v[\"Name\"], v[\"Subjects\"], v[\"PreferredRoom\"]) for v in teacher_data.values()\n]\n\nROOMS = [\n Room(v[\"RoomNumber\"], v[\"Available\"], v[\"Subjects\"], v[\"Capacity\"])\n for v in room_data.values()\n]\n\nCLASS_SETS = [v for v in class_set_data.values()]\n", "repo_name": "braddotcoffee/timetablegenerator", "sub_path": "controllers/data_controller.py", "file_name": "data_controller.py", "file_ext": "py", "file_size_in_byte": 627, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "76", "api": [{"api_name": "json.load", "line_number": 8, "usage_type": "call"}, {"api_name": "json.load", "line_number": 11, "usage_type": "call"}, {"api_name": "json.load", "line_number": 14, "usage_type": "call"}, {"api_name": "classes.teacher.Teacher", "line_number": 17, "usage_type": "call"}, {"api_name": "classes.room.Room", "line_number": 21, "usage_type": "call"}]}
+{"seq_id": "27548305337", "text": "# Librairies\n\nimport json\nimport requests\nfrom bs4 import BeautifulSoup\n\n\n\n\n\n\n# Constantes\n\nN = 3 # nombre de commentaires souhaités\n\n\n\n\n\n\n# Fonctions\n\ndef videosId() :\n\n with open(\"input.json\", 'r') as input_file :\n input_data = json.load(input_file)\n\n return input_data[\"videos_id\"]\n\n\n\ndef videoUrl(video_id) :\n\n url = \"https://www.youtube.com/watch?v=\"\n\n return url + video_id\n\n\n\ndef videoData(video_url) :\n\n video_requete =requests.get(video_url).text\n\n return BeautifulSoup(video_requete, \"html.parser\")\n\n\n\ndef videoTitre(video_data) :\n\n titre = video_data.find(\"meta\", attrs = {\"name\":\"title\"})\n\n return titre.get(\"content\")\n\n\n\ndef videoAuteur(video_data) :\n\n auteur = video_data.find(\"link\", attrs = {\"itemprop\":\"name\"})\n\n return auteur.get(\"content\")\n\n\n\ndef videoPoucesBleus(video_data) :\n\n scripts = video_data.find_all(\"script\") \n for script in scripts :\n if \"clics\" in script.get_text() :\n position_debut = script.string.index(\"LIKE\") + 69\n position_fin = script.string.index(\"clics\") - 1\n pouces_bleus = script.string[position_debut:position_fin].replace(\"\\u202f\", \"\")\n break\n\n return int(pouces_bleus)\n\n\n\ndef videoDescription(video_data) :\n\n scripts = video_data.find_all(\"script\")\n for script in scripts :\n if \"shortDescription\" in script.get_text() :\n position_debut = script.string.index(\"shortDescription\") + 19\n position_fin = script.string.index(\"isCrawlable\") - 3\n description = script.string[position_debut:position_fin]\n break\n\n return description\n\n\n\ndef videoLiensTimestamp(video_data) :\n\n scripts = video_data.find_all(\"script\")\n nombre = 0\n liens = []\n for script in scripts :\n if \"continuePlayback\" in script.get_text() :\n nombre = script.string.count(\"continuePlayback\")//4\n marqueur = 0\n for i in range(nombre) :\n position = script.string[marqueur:].index(\"continuePlayback\") + marqueur\n position_debut = script.string[:position].rindex(\"url\") + 6\n position_fin = script.string[:position].rindex(\"webPageType\") - 3\n liens.append(\"https://www.youtube.com\" + script.string[position_debut:position_fin].replace(\"\\\\u0026\", \"&\"))\n marqueur = position + 1\n break\n\n return liens\n\n\n\ndef videoLiensAutres(video_data) :\n\n scripts = video_data.find_all(\"script\")\n liens_https = []\n liens_http = []\n for script in scripts :\n if \"shortDescription\" in script.get_text() :\n position = script.string.index(\"shortDescription\")\n marqueur = script.string.index(\"isCrawlable\")\n nombre_https = script.string[position:marqueur].count(\"https://\")\n while len(liens_https) < nombre_https :\n position_debut = script.string[position:].index(\"https://\") + position\n position_fin = script.string[position_debut:].index(\"\\\\n\") + position_debut\n liens_https.append(script.string[position_debut:position_fin])\n position = position_fin + 1\n position = script.string.index(\"shortDescription\")\n marqueur = script.string.index(\"isCrawlable\")\n nombre_http = script.string[position:marqueur].count(\"http://\")\n while len(liens_http) < nombre_http :\n position_debut = script.string[position:].index(\"http://\") + position\n position_fin = script.string[position_debut:].index(\"\\\\n\") + position_debut\n liens_http.append(script.string[position_debut:position_fin])\n position = position_fin + 1\n break\n\n return liens_https + liens_http\n\n\n\ndef videoCommentaires(video_data, n = N) :\n\n return 0\n\n\n\n\n\n\n# Script\n\nids = videosId()\nurls = list(map(videoUrl, ids))\nvideos = list(map(videoData, urls))\ntitres = list(map(videoTitre, videos))\nauteurs = list(map(videoAuteur, videos))\nlikes = list(map(videoPoucesBleus, videos))\ndescriptions = list(map(videoDescription, videos))\nliens_timestamp = list(map(videoLiensTimestamp, videos))\nliens_autres = list(map(videoLiensAutres, videos))\ncommentaires = list(map(videoCommentaires, videos))\n\nclass VideoYoutube :\n\n def __init__(self, i) :\n self.titre = titres[i]\n self.auteur = auteurs[i]\n self.likes = likes[i]\n self.description = descriptions[i]\n self.liens_timestamp = liens_timestamp[i]\n self.liens_autres = liens_autres[i]\n self.id = ids[i]\n self.commentaires = commentaires[i]\n\n def dictionnaire(self) :\n return {\"titre\":self.titre,\n \"auteur\":self.auteur,\n \"likes\":self.likes,\n \"description\":self.description,\n \"liens\":(self.liens_timestamp + self.liens_autres),\n \"id\":self.id,\n \"commentaires\":self.commentaires}\n\nvideos_youtube = dict()\nfor i in range(len(ids)) :\n video_youtube = VideoYoutube(i)\n videos_youtube[\"video_{}\".format(i)] = video_youtube.dictionnaire()\n\nwith open(\"output.json\", 'w') as output_file :\n json.dump(videos_youtube, output_file)", "repo_name": "Wael382/TP_Youtube_Scraper", "sub_path": "script.py", "file_name": "script.py", "file_ext": "py", "file_size_in_byte": 5195, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "76", "api": [{"api_name": "json.load", "line_number": 26, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 42, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 44, "usage_type": "call"}, {"api_name": "json.dump", "line_number": 191, "usage_type": "call"}]}
+{"seq_id": "31171740861", "text": "#对于部分预训练的VGG16风格的网络进行微调\r\n\r\n\r\nimport argparse\r\nfrom os.path import join\r\n\r\nimport json\r\n\r\n\r\n# mean = (0.485, 0.456, 0.406)\r\n# std = (0.229, 0.224, 0.225)#这是归一化之后的数值\r\n# mean = (123.6800, 116.7790, 103.9390)\r\n\r\nparser = argparse.ArgumentParser()\r\nparser.add_argument('--datasetdir', default=r'C:\\Users\\于涵\\Desktop\\Caltech-UCSD Birds-200 2011\\Caltech-UCSD Birds-200-2011\\CUB_200_2011', help=\"path to cub200_2011 dir\")\r\nparser.add_argument('--imgdir', default='images', help=\"path to train img dir\")\r\nparser.add_argument('--tr_te_split_txt', default=r'train_test_split.txt', help=\"关于训练集与测试集的划分,0代表测试集,1代表训练集\")\r\nparser.add_argument('--tr_te_image_name_txt', default=r'images.txt', help=\"关于训练集与测试集的图片的相对路径名字\")\r\nparser.add_argument('--image_labels_txt', default=r'image_class_labels.txt', help=\"图像的类别标签标记\")\r\nparser.add_argument('--class_name_txt', default=r'classes.txt', help=\"图像的200个类别名称\")\r\nargs = parser.parse_args()\r\n\r\n\r\ntrain_index=[]\r\ntest_index=[]\r\n# print(join(args.datasetdir,args.tr_te_split_txt))\r\nwith open(join(args.datasetdir,args.tr_te_split_txt)) as miki:\r\n for line in miki:\r\n # print(line.rstrip())\r\n line=line.rstrip()\r\n index,tr_te_type=line.split(' ')\r\n index=int(index)\r\n tr_te_type=int(tr_te_type)\r\n if tr_te_type==0:\r\n test_index.append(index)\r\n else:\r\n train_index.append(index)\r\n\r\n\r\ntrain_labels=[]\r\ntest_labels=[]\r\nwith open(join(args.datasetdir,args.image_labels_txt)) as lisalisa:\r\n for line in lisalisa:\r\n # print(line.rstrip())\r\n line=line.rstrip()\r\n index,label=line.split(' ')\r\n index=int(index)\r\n label=int(label)-1 #类别索引是0~199,用于在loss部分自动生成one_hot\r\n tr_te_type=int(tr_te_type)\r\n if index in train_index:\r\n train_labels.append(label)\r\n else:\r\n test_labels.append(label)\r\n# print(len(train_labels))\r\n# print(len(test_labels))\r\nfilename1='./datafile/train_labels.json'\r\nfilename2='./datafile/test_labels.json'\r\nwith open(filename1,'w') as f_obj:\r\n json.dump(train_labels,f_obj)\r\nwith open(filename2,'w') as f_obj:\r\n json.dump(test_labels,f_obj)\r\n\r\n\r\ntrain_paths=[]\r\ntest_paths=[]\r\nwith open(join(args.datasetdir,args.tr_te_image_name_txt)) as iggy:\r\n for line in iggy:\r\n # print(line.rstrip())\r\n line = line.rstrip()\r\n index, img_name = line.split(' ')\r\n index = int(index)\r\n img_name=img_name.strip()\r\n # print(index)\r\n # print(img_name)\r\n if index in train_index:\r\n train_paths.append(join(args.datasetdir,args.imgdir,img_name))\r\n else:\r\n test_paths.append(join(args.datasetdir,args.imgdir,img_name))\r\n# print(len(train_paths))\r\n# print(len(test_paths))\r\nfilename3='./datafile/train_paths.json'\r\nfilename4='./datafile/test_paths.json'\r\nwith open(filename3,'w') as f_obj:\r\n json.dump(train_paths,f_obj)\r\nwith open(filename4,'w') as f_obj:\r\n json.dump(test_paths,f_obj)\r\n\r\nindex_class_dict={}\r\nwith open(join(args.datasetdir,args.class_name_txt)) as hata:\r\n for line in hata:\r\n # print(line.rstrip())\r\n line=line.rstrip()\r\n index,class_name=line.split(' ')\r\n index=int(index)\r\n index_class_dict[str(index)]=class_name\r\n# print(index_class_dict)\r\n", "repo_name": "mikiyukio/SCDA_pytorch", "sub_path": "SCDA_pytorch/SCDA_for_LL/files.py", "file_name": "files.py", "file_ext": "py", "file_size_in_byte": 3478, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 15, "dataset": "github-code", "pt": "76", "api": [{"api_name": "argparse.ArgumentParser", "line_number": 14, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 27, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 42, "usage_type": "call"}, {"api_name": "json.dump", "line_number": 59, "usage_type": "call"}, {"api_name": "json.dump", "line_number": 61, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 66, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 76, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 78, "usage_type": "call"}, {"api_name": "json.dump", "line_number": 84, "usage_type": "call"}, {"api_name": "json.dump", "line_number": 86, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 89, "usage_type": "call"}]}
+{"seq_id": "15875733674", "text": "from typing import Literal\r\n\r\nimport rev\r\nimport wpilib\r\nimport wpilib.drive\r\nimport wpiutil\r\nfrom photonvision import PhotonCamera, SimVisionSystem, SimVisionTarget\r\nfrom robotpy_apriltag import AprilTagField, loadAprilTagLayoutField\r\nfrom wpilib import DriverStation\r\nfrom wpilib import RobotBase, RobotController\r\nfrom wpilib.simulation import DifferentialDrivetrainSim\r\nfrom wpimath.estimator import DifferentialDrivePoseEstimator\r\nfrom wpimath.geometry import Pose2d, Rotation3d, Translation3d, Transform3d\r\nfrom wpimath.kinematics import DifferentialDriveKinematics\r\nfrom wpimath.system import LinearSystemId\r\nfrom wpimath.system.plant import DCMotor\r\n\r\nimport ports\r\nfrom gyro import NavX, ADIS16448, ADIS16470, ADXRS, Empty\r\nfrom utils.property import autoproperty, defaultSetter\r\nfrom utils.safesubsystem import SafeSubsystem\r\nfrom utils.sparkmaxsim import SparkMaxSim\r\nfrom utils.sparkmaxutils import configureFollower, configureLeader\r\n\r\n\r\nselect_gyro: Literal[\"navx\", \"adis16448\", \"adis16470\", \"adxrs\", \"empty\"] = \"adis16470\"\r\napril_tag_field = loadAprilTagLayoutField(AprilTagField.k2023ChargedUp)\r\ncam_to_robot = Transform3d(Translation3d(-0.375, 0.0, -0.165), Rotation3d(0, 0, 0))\r\n\r\n\r\nclass Drivetrain(SafeSubsystem):\r\n encoder_conversion_factor = autoproperty(0.056)\r\n\r\n def __init__(self) -> None:\r\n super().__init__()\r\n\r\n # Motors\r\n self._motor_left = rev.CANSparkMax(ports.drivetrain_motor_front_left, rev.CANSparkMax.MotorType.kBrushless)\r\n configureLeader(self._motor_left, \"brake\")\r\n\r\n self._motor_left_follower = rev.CANSparkMax(ports.drivetrain_motor_rear_left,\r\n rev.CANSparkMax.MotorType.kBrushless)\r\n configureFollower(self._motor_left_follower, self._motor_left, \"brake\")\r\n\r\n self._motor_right = rev.CANSparkMax(ports.drivetrain_motor_front_right,\r\n rev.CANSparkMax.MotorType.kBrushless)\r\n configureLeader(self._motor_right, \"brake\", True)\r\n\r\n self._motor_right_follower = rev.CANSparkMax(ports.drivetrain_motor_rear_right,\r\n rev.CANSparkMax.MotorType.kBrushless)\r\n configureFollower(self._motor_right_follower, self._motor_right, \"brake\")\r\n\r\n self._drive = wpilib.drive.DifferentialDrive(self._motor_left, self._motor_right)\r\n self.addChild(\"DifferentialDrive\", self._drive)\r\n\r\n # Photon Vision\r\n self.latest = None\r\n\r\n # Encoders\r\n self._encoder_left = self._motor_left.getEncoder()\r\n self._encoder_right = self._motor_right.getEncoder()\r\n self._left_encoder_offset = self._encoder_left.getPosition()\r\n self._right_encoder_offset = self._encoder_right.getPosition()\r\n\r\n # Gyro\r\n self._gyro = {\r\n \"navx\": NavX,\r\n \"adis16448\": ADIS16448,\r\n \"adis16470\": ADIS16470,\r\n \"adxrs\": ADXRS,\r\n \"empty\": Empty,\r\n }[select_gyro]()\r\n\r\n # Odometry\r\n self._kinematics = DifferentialDriveKinematics(trackWidth=0.56)\r\n self._estimator = DifferentialDrivePoseEstimator(self._kinematics, self._gyro.getRotation2d(), 0, 0,\r\n initialPose=Pose2d(0, 0, 0))\r\n\r\n self._field = wpilib.Field2d()\r\n wpilib.SmartDashboard.putData(\"Field\", self._field)\r\n\r\n self.alliance = DriverStation.getAlliance()\r\n\r\n self.addChild(\"Gyro\", self._gyro)\r\n\r\n if RobotBase.isReal():\r\n self.cam = PhotonCamera(\"mainCamera\")\r\n PhotonCamera.setVersionCheckEnabled(False)\r\n else: # sim\r\n self._motor_left_sim = SparkMaxSim(self._motor_left)\r\n self._motor_right_sim = SparkMaxSim(self._motor_right)\r\n self._system = LinearSystemId.identifyDrivetrainSystem(1.98, 0.2, 5, 0.3)\r\n self._drive_sim = DifferentialDrivetrainSim(self._system, 0.64, DCMotor.NEO(4), 1.5, 0.08, [\r\n 0.001, 0.001, 0.001, 0.1, 0.1, 0.005, 0.005])\r\n\r\n # Cam sim\r\n cam_diag_fov = 75.0\r\n max_led_range = 20\r\n cam_resolution_width = 320\r\n cam_resolution_height = 240\r\n min_target_area = 10\r\n self.sim_vision = SimVisionSystem(\"cam\", cam_diag_fov, cam_to_robot, max_led_range,\r\n cam_resolution_width, cam_resolution_height, min_target_area)\r\n for i in range(1, 9):\r\n self.sim_vision.addSimVisionTarget(SimVisionTarget(april_tag_field.getTagPose(i), 8, 8, i))\r\n self.cam = self.sim_vision.cam\r\n\r\n self.use_vision = True\r\n\r\n\r\n def arcadeDrive(self, forward: float, rotation: float) -> None:\r\n self._drive.arcadeDrive(forward, rotation, False)\r\n\r\n def tankDrive(self, left: float, right: float) -> None:\r\n self._drive.tankDrive(left, right, False)\r\n\r\n def simulationPeriodic(self):\r\n self._drive_sim.setInputs(\r\n self._motor_left.get() * RobotController.getInputVoltage(),\r\n self._motor_right.get() * RobotController.getInputVoltage())\r\n self._drive_sim.update(0.02)\r\n self._motor_left_sim.setPosition(self._drive_sim.getLeftPosition() / self.encoder_conversion_factor + self._left_encoder_offset)\r\n self._motor_left_sim.setVelocity(self._drive_sim.getLeftVelocity())\r\n self._motor_right_sim.setPosition(self._drive_sim.getRightPosition() / self.encoder_conversion_factor + self._right_encoder_offset)\r\n self._motor_right_sim.setVelocity(self._drive_sim.getRightVelocity())\r\n self._gyro.setSimAngle(self._drive_sim.getHeading().degrees())\r\n self.sim_vision.processFrame(self._drive_sim.getPose())\r\n\r\n def getRotation(self):\r\n return self._gyro.getRotation2d()\r\n\r\n def getPitch(self):\r\n return self._gyro.getPitch()\r\n\r\n def getLeftEncoderPosition(self):\r\n return (self._encoder_left.getPosition() - self._left_encoder_offset) * self.encoder_conversion_factor\r\n\r\n def getRightEncoderPosition(self):\r\n return (self._encoder_right.getPosition() - self._right_encoder_offset) * self.encoder_conversion_factor\r\n\r\n def getAverageEncoderPosition(self):\r\n return (self.getLeftEncoderPosition() + self.getRightEncoderPosition()) / 2\r\n\r\n def getPose(self):\r\n return self._estimator.getEstimatedPosition()\r\n\r\n def getField(self):\r\n return self._field\r\n\r\n def periodic(self):\r\n self._estimator.update(self._gyro.getRotation2d(), self.getLeftEncoderPosition(),\r\n self.getRightEncoderPosition())\r\n\r\n self.latest = self.cam.getLatestResult()\r\n if self.use_vision and self.latest.hasTargets():\r\n img_capture_time = self.latest.getTimestamp()\r\n cam_to_target = self.latest.getBestTarget().getBestCameraToTarget()\r\n target_to_cam = cam_to_target.inverse()\r\n target_on_field = april_tag_field.getTagPose(self.latest.getBestTarget().getFiducialId())\r\n if target_on_field is not None:\r\n camera_on_field = target_on_field.transformBy(target_to_cam)\r\n robot_on_field = camera_on_field.transformBy(cam_to_robot).toPose2d()\r\n self._estimator.addVisionMeasurement(robot_on_field, img_capture_time)\r\n\r\n self._field.setRobotPose(self._estimator.getEstimatedPosition())\r\n\r\n def initSendable(self, builder: wpiutil.SendableBuilder) -> None:\r\n super().initSendable(builder)\r\n builder.addDoubleProperty(\"Left motor\", lambda: self._motor_left.get() or -999.0, defaultSetter)\r\n builder.addDoubleProperty(\"Right Motor\", lambda: self._motor_right.get() or -999.0, defaultSetter)\r\n builder.addDoubleProperty(\"Left Encoder Position\", self.getLeftEncoderPosition, defaultSetter)\r\n builder.addDoubleProperty(\"Right Encoder Position\", self.getRightEncoderPosition, defaultSetter)\r\n\r\n", "repo_name": "Ultime5528/FRC2023", "sub_path": "subsystems/drivetrain.py", "file_name": "drivetrain.py", "file_ext": "py", "file_size_in_byte": 7979, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "76", "api": [{"api_name": "typing.Literal", "line_number": 26, "usage_type": "name"}, {"api_name": "robotpy_apriltag.loadAprilTagLayoutField", "line_number": 27, "usage_type": "call"}, {"api_name": "robotpy_apriltag.AprilTagField.k2023ChargedUp", "line_number": 27, "usage_type": "attribute"}, {"api_name": "robotpy_apriltag.AprilTagField", "line_number": 27, "usage_type": "name"}, {"api_name": "wpimath.geometry.Transform3d", "line_number": 28, "usage_type": "call"}, {"api_name": "wpimath.geometry.Translation3d", "line_number": 28, "usage_type": "call"}, {"api_name": "wpimath.geometry.Rotation3d", "line_number": 28, "usage_type": "call"}, {"api_name": "utils.safesubsystem.SafeSubsystem", "line_number": 31, "usage_type": "name"}, {"api_name": "utils.property.autoproperty", "line_number": 32, "usage_type": "call"}, {"api_name": "rev.CANSparkMax", "line_number": 38, "usage_type": "call"}, {"api_name": "ports.drivetrain_motor_front_left", "line_number": 38, "usage_type": "attribute"}, {"api_name": "utils.sparkmaxutils.configureLeader", "line_number": 39, "usage_type": "call"}, {"api_name": "rev.CANSparkMax", "line_number": 41, "usage_type": "call"}, {"api_name": "ports.drivetrain_motor_rear_left", "line_number": 41, "usage_type": "attribute"}, {"api_name": "rev.CANSparkMax", "line_number": 42, "usage_type": "attribute"}, {"api_name": "utils.sparkmaxutils.configureFollower", "line_number": 43, "usage_type": "call"}, {"api_name": "rev.CANSparkMax", "line_number": 45, "usage_type": "call"}, {"api_name": "ports.drivetrain_motor_front_right", "line_number": 45, "usage_type": "attribute"}, {"api_name": "rev.CANSparkMax", "line_number": 46, "usage_type": "attribute"}, {"api_name": "utils.sparkmaxutils.configureLeader", "line_number": 47, "usage_type": "call"}, {"api_name": "rev.CANSparkMax", "line_number": 49, "usage_type": "call"}, {"api_name": "ports.drivetrain_motor_rear_right", "line_number": 49, "usage_type": "attribute"}, {"api_name": "rev.CANSparkMax", "line_number": 50, "usage_type": "attribute"}, {"api_name": "utils.sparkmaxutils.configureFollower", "line_number": 51, "usage_type": "call"}, {"api_name": "wpilib.drive.DifferentialDrive", "line_number": 53, "usage_type": "call"}, {"api_name": "wpilib.drive", "line_number": 53, "usage_type": "attribute"}, {"api_name": "gyro.NavX", "line_number": 67, "usage_type": "name"}, {"api_name": "gyro.ADIS16448", "line_number": 68, "usage_type": "name"}, {"api_name": "gyro.ADIS16470", "line_number": 69, "usage_type": "name"}, {"api_name": "gyro.ADXRS", "line_number": 70, "usage_type": "name"}, {"api_name": "gyro.Empty", "line_number": 71, "usage_type": "name"}, {"api_name": "wpimath.kinematics.DifferentialDriveKinematics", "line_number": 75, "usage_type": "call"}, {"api_name": "wpimath.estimator.DifferentialDrivePoseEstimator", "line_number": 76, "usage_type": "call"}, {"api_name": "wpimath.geometry.Pose2d", "line_number": 77, "usage_type": "call"}, {"api_name": "wpilib.Field2d", "line_number": 79, "usage_type": "call"}, {"api_name": "wpilib.SmartDashboard.putData", "line_number": 80, "usage_type": "call"}, {"api_name": "wpilib.SmartDashboard", "line_number": 80, "usage_type": "attribute"}, {"api_name": "wpilib.DriverStation.getAlliance", "line_number": 82, "usage_type": "call"}, {"api_name": "wpilib.DriverStation", "line_number": 82, "usage_type": "name"}, {"api_name": "wpilib.RobotBase.isReal", "line_number": 86, "usage_type": "call"}, {"api_name": "wpilib.RobotBase", "line_number": 86, "usage_type": "name"}, {"api_name": "photonvision.PhotonCamera", "line_number": 87, "usage_type": "call"}, {"api_name": "photonvision.PhotonCamera.setVersionCheckEnabled", "line_number": 88, "usage_type": "call"}, {"api_name": "photonvision.PhotonCamera", "line_number": 88, "usage_type": "name"}, {"api_name": "utils.sparkmaxsim.SparkMaxSim", "line_number": 90, "usage_type": "call"}, {"api_name": "utils.sparkmaxsim.SparkMaxSim", "line_number": 91, "usage_type": "call"}, {"api_name": "wpimath.system.LinearSystemId.identifyDrivetrainSystem", "line_number": 92, "usage_type": "call"}, {"api_name": "wpimath.system.LinearSystemId", "line_number": 92, "usage_type": "name"}, {"api_name": "wpilib.simulation.DifferentialDrivetrainSim", "line_number": 93, "usage_type": "call"}, {"api_name": "wpimath.system.plant.DCMotor.NEO", "line_number": 93, "usage_type": "call"}, {"api_name": "wpimath.system.plant.DCMotor", "line_number": 93, "usage_type": "name"}, {"api_name": "photonvision.SimVisionSystem", "line_number": 102, "usage_type": "call"}, {"api_name": "photonvision.SimVisionTarget", "line_number": 105, "usage_type": "call"}, {"api_name": "wpilib.RobotController.getInputVoltage", "line_number": 119, "usage_type": "call"}, {"api_name": "wpilib.RobotController", "line_number": 119, "usage_type": "name"}, {"api_name": "wpilib.RobotController.getInputVoltage", "line_number": 120, "usage_type": "call"}, {"api_name": "wpilib.RobotController", "line_number": 120, "usage_type": "name"}, {"api_name": "wpiutil.SendableBuilder", "line_number": 167, "usage_type": "attribute"}, {"api_name": "utils.property.defaultSetter", "line_number": 169, "usage_type": "argument"}, {"api_name": "utils.property.defaultSetter", "line_number": 170, "usage_type": "argument"}, {"api_name": "utils.property.defaultSetter", "line_number": 171, "usage_type": "argument"}, {"api_name": "utils.property.defaultSetter", "line_number": 172, "usage_type": "argument"}]}
+{"seq_id": "36807492545", "text": "from torch.utils.data import Dataset\nimport numpy as np\nimport cv2\n\nfrom PIL import Image\n\nfrom utils.proc import sample_cnt, sample_cnt_with_idx \n\n\nclass ValDataset(Dataset):\n def __init__(self, dataset, opt):\n self.dataset = dataset\n self.opt = opt\n\n if self.dataset.anno_type == 'pointset':\n self.read_anno = self._read_pointset\n elif self.dataset.anno_type == 'mask':\n self.read_anno = self._read_mask\n \n def __getitem__(self, idx):\n seq = self.dataset[idx]\n\n img_files, anno_files, others = seq\n if 'init_path' in others:\n init_path = others['init_path']\n\n if 'idx_path' in others:\n idx_path = others['idx_path']\n idx = np.loadtxt(idx_path)\n else:\n idx = None\n \n init = self.read_anno(init_path)\n\n if not idx is None:\n init, idx = sample_cnt_with_idx(init, idx, self.opt.num_cp)\n else:\n init = sample_cnt(init, self.opt.num_cp)\n\n imgs = []\n annos = []\n\n for img_path in img_files:\n img = Image.open(img_path)\n img = np.array(img)\n imgs.append(img)\n\n for anno_path in anno_files:\n if anno_path:\n anno = self.read_anno(anno_path)\n else:\n anno = []\n annos.append(anno)\n\n others['img_files'] = img_files\n others['anno_files'] = anno_files \n\n return imgs, annos, init, idx, others\n\n\n @staticmethod\n def _read_pointset(txt_path):\n return np.loadtxt(txt_path)\n\n @staticmethod\n def _read_mask(self, png_path):\n mask = Image.open(png_path)\n cnt, _ = cv2.findContours(mask, \n cv2.RETR_EXTERNAL, \n cv2.CHAIN_APPROX_NONE)\n if not cnt:\n return []\n\n if len(cnt) > 1 or len(cnt[0]) > 1:\n raise NotImplementedError('more than 1 contour in a mask is not supported')\n \n return cnt[0][0]\n\n def __len__(self):\n return len(self.dataset)\n\n \n\n\n\n", "repo_name": "ghnam-ken/PoST", "sub_path": "dataset/val_dataset.py", "file_name": "val_dataset.py", "file_ext": "py", "file_size_in_byte": 2130, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 14, "dataset": "github-code", "pt": "76", "api": [{"api_name": "torch.utils.data.Dataset", "line_number": 10, "usage_type": "name"}, {"api_name": "numpy.loadtxt", "line_number": 29, "usage_type": "call"}, {"api_name": "utils.proc.sample_cnt_with_idx", "line_number": 36, "usage_type": "call"}, {"api_name": "utils.proc.sample_cnt", "line_number": 38, "usage_type": "call"}, {"api_name": "PIL.Image.open", "line_number": 44, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 44, "usage_type": "name"}, {"api_name": "numpy.array", "line_number": 45, "usage_type": "call"}, {"api_name": "numpy.loadtxt", "line_number": 63, "usage_type": "call"}, {"api_name": "PIL.Image.open", "line_number": 67, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 67, "usage_type": "name"}, {"api_name": "cv2.findContours", "line_number": 68, "usage_type": "call"}, {"api_name": "cv2.RETR_EXTERNAL", "line_number": 69, "usage_type": "attribute"}, {"api_name": "cv2.CHAIN_APPROX_NONE", "line_number": 70, "usage_type": "attribute"}]}
+{"seq_id": "33784621251", "text": "import streamlit as st\nimport plotly.express as px\nimport plotly.graph_objects as go\nimport backend as bd\n\n# LAYOUT SETTINGS\npage_title = \"Air composition\"\npage_title_2 = \"Air Composition Forecast\"\npage_icon = \":mask:\"\nlayout = \"wide\"\n\n# APP LAYOUT\nst.set_page_config(page_title=page_title,\n page_icon=page_icon,\n layout=layout)\nst.title(page_title_2)\nst.info(\"\"\"\n**_Note:_** You can turn off and turn on lines in each graph by clicking \non the plot legend.\n\"\"\")\n\ntry:\n # CURRENT TIME\n current_time = st.session_state['current_time']\n\n # DATA\n location = st.session_state['location']\n latitude = location[0]\n longitude = location[1]\n\n # Data for graph of meteo forecast\n air_data = bd.get_air_composition_data(latitude, longitude)\n air_dates = air_data['hourly']['time']\n pm10 = air_data['hourly']['pm10']\n pm2_5 = air_data['hourly']['pm2_5']\n carbon_monoxide = air_data['hourly']['carbon_monoxide']\n nitrogen_dioxide = air_data['hourly']['nitrogen_dioxide']\n sulphur_dioxide = air_data['hourly']['sulphur_dioxide']\n ozone = air_data['hourly']['ozone']\n pollen_dates = air_data['hourly']['time'][:96]\n alder_pollen = air_data['hourly']['alder_pollen']\n birch_pollen = air_data['hourly']['birch_pollen']\n grass_pollen = air_data['hourly']['grass_pollen']\n mugwort_pollen = air_data['hourly']['mugwort_pollen']\n olive_pollen = air_data['hourly']['olive_pollen']\n ragweed_pollen = air_data['hourly']['ragweed_pollen']\n european_aqi = air_data['hourly']['european_aqi']\n\n # Create air composition plot\n air_fig = go.Figure()\n # Add pm10\n air_fig.add_trace(\n go.Scatter(x=air_dates, y=pm10,\n name=\"pm10\",\n marker_color=px.colors.qualitative.G10[3]\n )\n )\n # Add pm2.5\n air_fig.add_trace(\n go.Scatter(x=air_dates, y=pm2_5,\n name=\"pm2.5\",\n marker_color=px.colors.qualitative.G10[7]\n )\n )\n air_fig.add_trace(\n go.Scatter(x=air_dates, y=carbon_monoxide,\n name=\"carbon monoxide\",\n marker_color=px.colors.qualitative.G10[5]\n )\n )\n air_fig.add_trace(\n go.Scatter(x=air_dates, y=nitrogen_dioxide,\n name=\"nitrogen dioxide\",\n marker_color=px.colors.qualitative.G10[6]\n )\n )\n air_fig.add_trace(\n go.Scatter(x=air_dates, y=sulphur_dioxide,\n name=\"sulphur dioxide\",\n marker_color=px.colors.qualitative.G10[2]\n )\n )\n air_fig.add_trace(\n go.Scatter(x=air_dates, y=ozone,\n name=\"ozone\",\n marker_color=px.colors.qualitative.G10[9]\n )\n )\n # Add current time\n air_fig.add_vline(x=current_time,\n line_width=1,\n line_dash=\"dot\",\n line_color=\"red\"\n )\n # Update axis names, hover, legend\n air_fig.update_layout(\n title=dict(text=\"Air composition (μg/m³)\"),\n xaxis_title=\"Date\",\n yaxis_title=\"μg/m³\",\n hovermode='x',\n legend=dict(orientation=\"h\",\n yanchor=\"bottom\",\n y=1.02,\n xanchor=\"right\",\n x=1\n )\n )\n\n # Create pollen situation plot\n pollen_fig = go.Figure()\n pollen_fig.add_trace(\n go.Scatter(x=pollen_dates, y=alder_pollen,\n name=\"alder pollen\",\n marker_color=px.colors.qualitative.Dark2[7]\n )\n )\n pollen_fig.add_trace(\n go.Scatter(x=pollen_dates, y=birch_pollen,\n name=\"birch pollen\",\n marker_color=px.colors.qualitative.Dark2[6]\n )\n )\n pollen_fig.add_trace(\n go.Scatter(x=pollen_dates, y=grass_pollen,\n name=\"grass pollen\",\n marker_color=px.colors.qualitative.Dark2[0]\n )\n )\n pollen_fig.add_trace(\n go.Scatter(x=pollen_dates, y=olive_pollen,\n name=\"olive pollen\",\n marker_color=px.colors.qualitative.Dark2[4]\n )\n )\n pollen_fig.add_trace(\n go.Scatter(x=pollen_dates, y=mugwort_pollen,\n name=\"mugwort pollen\",\n marker_color=px.colors.qualitative.Dark2[1]\n )\n )\n pollen_fig.add_trace(\n go.Scatter(x=pollen_dates, y=ragweed_pollen,\n name=\"ragweed pollen\",\n marker_color=px.colors.qualitative.Dark2[3]\n )\n )\n # Add current time\n pollen_fig.add_vline(x=current_time,\n line_width=1,\n line_dash=\"dot\",\n line_color=\"red\"\n )\n # Update axis names, hover, legend\n pollen_fig.update_layout(\n title=dict(text=\"Pollen in the Air (grains/m³)\"),\n xaxis_title=\"Date\",\n yaxis_title=\"grains/m³\",\n hovermode='x',\n legend=dict(orientation=\"h\",\n yanchor=\"bottom\",\n y=1.02,\n xanchor=\"right\",\n x=1\n )\n )\n\n # Show plot in app and set auto resize with browser window\n st.plotly_chart(air_fig, use_container_width=True)\n st.info(\"\"\"\n Pollen forecast is only available for Europe during pollen season.\"\"\")\n st.plotly_chart(pollen_fig, use_container_width=True)\nexcept (KeyError) as error:\n st.markdown(\"Go to the main page and **select location**!\")\n\n", "repo_name": "ivan4an/web-forecast-app", "sub_path": "pages/3_Air_composition_forecast.py", "file_name": "3_Air_composition_forecast.py", "file_ext": "py", "file_size_in_byte": 6024, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "76", "api": [{"api_name": "streamlit.set_page_config", "line_number": 13, "usage_type": "call"}, {"api_name": "streamlit.title", "line_number": 16, "usage_type": "call"}, {"api_name": "streamlit.info", "line_number": 17, "usage_type": "call"}, {"api_name": "streamlit.session_state", "line_number": 24, "usage_type": "attribute"}, {"api_name": "streamlit.session_state", "line_number": 27, "usage_type": "attribute"}, {"api_name": "backend.get_air_composition_data", "line_number": 32, "usage_type": "call"}, {"api_name": "plotly.graph_objects.Figure", "line_number": 50, "usage_type": "call"}, {"api_name": "plotly.graph_objects", "line_number": 50, "usage_type": "name"}, {"api_name": "plotly.graph_objects.Scatter", "line_number": 53, "usage_type": "call"}, {"api_name": "plotly.graph_objects", "line_number": 53, "usage_type": "name"}, {"api_name": "plotly.express.colors", "line_number": 55, "usage_type": "attribute"}, {"api_name": "plotly.express", "line_number": 55, "usage_type": "name"}, {"api_name": "plotly.graph_objects.Scatter", "line_number": 60, "usage_type": "call"}, {"api_name": "plotly.graph_objects", "line_number": 60, "usage_type": "name"}, {"api_name": "plotly.express.colors", "line_number": 62, "usage_type": "attribute"}, {"api_name": "plotly.express", "line_number": 62, "usage_type": "name"}, {"api_name": "plotly.graph_objects.Scatter", "line_number": 66, "usage_type": "call"}, {"api_name": "plotly.graph_objects", "line_number": 66, "usage_type": "name"}, {"api_name": "plotly.express.colors", "line_number": 68, "usage_type": "attribute"}, {"api_name": "plotly.express", "line_number": 68, "usage_type": "name"}, {"api_name": "plotly.graph_objects.Scatter", "line_number": 72, "usage_type": "call"}, {"api_name": "plotly.graph_objects", "line_number": 72, "usage_type": "name"}, {"api_name": "plotly.express.colors", "line_number": 74, "usage_type": "attribute"}, {"api_name": "plotly.express", "line_number": 74, "usage_type": "name"}, {"api_name": "plotly.graph_objects.Scatter", "line_number": 78, "usage_type": "call"}, {"api_name": "plotly.graph_objects", "line_number": 78, "usage_type": "name"}, {"api_name": "plotly.express.colors", "line_number": 80, "usage_type": "attribute"}, {"api_name": "plotly.express", "line_number": 80, "usage_type": "name"}, {"api_name": "plotly.graph_objects.Scatter", "line_number": 84, "usage_type": "call"}, {"api_name": "plotly.graph_objects", "line_number": 84, "usage_type": "name"}, {"api_name": "plotly.express.colors", "line_number": 86, "usage_type": "attribute"}, {"api_name": "plotly.express", "line_number": 86, "usage_type": "name"}, {"api_name": "plotly.graph_objects.Figure", "line_number": 110, "usage_type": "call"}, {"api_name": "plotly.graph_objects", "line_number": 110, "usage_type": "name"}, {"api_name": "plotly.graph_objects.Scatter", "line_number": 112, "usage_type": "call"}, {"api_name": "plotly.graph_objects", "line_number": 112, "usage_type": "name"}, {"api_name": "plotly.express.colors", "line_number": 114, "usage_type": "attribute"}, {"api_name": "plotly.express", "line_number": 114, "usage_type": "name"}, {"api_name": "plotly.graph_objects.Scatter", "line_number": 118, "usage_type": "call"}, {"api_name": "plotly.graph_objects", "line_number": 118, "usage_type": "name"}, {"api_name": "plotly.express.colors", "line_number": 120, "usage_type": "attribute"}, {"api_name": "plotly.express", "line_number": 120, "usage_type": "name"}, {"api_name": "plotly.graph_objects.Scatter", "line_number": 124, "usage_type": "call"}, {"api_name": "plotly.graph_objects", "line_number": 124, "usage_type": "name"}, {"api_name": "plotly.express.colors", "line_number": 126, "usage_type": "attribute"}, {"api_name": "plotly.express", "line_number": 126, "usage_type": "name"}, {"api_name": "plotly.graph_objects.Scatter", "line_number": 130, "usage_type": "call"}, {"api_name": "plotly.graph_objects", "line_number": 130, "usage_type": "name"}, {"api_name": "plotly.express.colors", "line_number": 132, "usage_type": "attribute"}, {"api_name": "plotly.express", "line_number": 132, "usage_type": "name"}, {"api_name": "plotly.graph_objects.Scatter", "line_number": 136, "usage_type": "call"}, {"api_name": "plotly.graph_objects", "line_number": 136, "usage_type": "name"}, {"api_name": "plotly.express.colors", "line_number": 138, "usage_type": "attribute"}, {"api_name": "plotly.express", "line_number": 138, "usage_type": "name"}, {"api_name": "plotly.graph_objects.Scatter", "line_number": 142, "usage_type": "call"}, {"api_name": "plotly.graph_objects", "line_number": 142, "usage_type": "name"}, {"api_name": "plotly.express.colors", "line_number": 144, "usage_type": "attribute"}, {"api_name": "plotly.express", "line_number": 144, "usage_type": "name"}, {"api_name": "streamlit.plotly_chart", "line_number": 168, "usage_type": "call"}, {"api_name": "streamlit.info", "line_number": 169, "usage_type": "call"}, {"api_name": "streamlit.plotly_chart", "line_number": 171, "usage_type": "call"}, {"api_name": "streamlit.markdown", "line_number": 173, "usage_type": "call"}]}
+{"seq_id": "8031809410", "text": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Nov 2 19:28:53 2015\n\n@author: florian\n\"\"\"\n\n\"\"\"\nL'objectif est de générer un fichier de données sur le prix des Renault Zoé \nsur le marché de l'occasion en Ile de France, PACA et Aquitaine. \nVous utiliserezleboncoin.fr comme source. Le fichier doit être propre et contenir \nles infos suivantes : version ( il y en a 3), année, kilométrage, prix, \ntéléphone du propriétaire, est ce que la voiture est vendue par un professionnel ou un particulier.\nVous ajouterez une colonne sur le prix de l'Argus du modèle \nque vous récupérez sur ce site http://www.lacentrale.fr/cote-voitures-renault-zoe--2013-.html.\n\nLes données quanti (prix, km notamment) devront être manipulables (pas de string, pas d'unité).\nVous ajouterez une colonne si la voiture est plus chere ou moins chere que sa cote moyenne.\n\"\"\"\n\nimport requests\nfrom bs4 import BeautifulSoup\nimport pandas as pd\n\ndef getSoupFromUrl(url):\n #Execute q request toward Youtube\n request = requests.get(url)\n #parse the restult of the request\n soup = BeautifulSoup(request.text, 'html.parser')\n return soup\n \ndef extractIntFromText(text):\n return int(text.replace(\" \",\"\"))\n\nZoeLeBoncoin = pd.DataFrame(columns = ['titre', 'reg', 'version', 'année','km','prix','tél','vendeur','argus'])\n\ntypeAcheteur = {\"professionnel\":\"c\",\"particulier\":\"p\"}\nregion = [\"ile_de_france\",\"provence_alpes_cote_d_azur\",\"aquitaine\"]\n \nurl_liste = 'http://www.leboncoin.fr/voitures/offres/ile_de_france/?q=zoe&f=c'\nsoup = getSoupFromUrl(url_liste)\nuneAnnonce={}\nuneAnnonceList=[]\n\nentries = soup.find(\"div\", { \"class\" : \"list-lbc\" }).find_all(\"a\") \nfor entry in entries:\n if \"zoe\" in entry['title'].lower():\n uneAnnonce[\"titre\"]=entry['title']\n uneAnnonce[\"region\"]=\"ile de France\"\n uneAnnonce[\"type vendeur\"]=\"Pro\"\n url_annonce = entry['href']\n soupAnnonce = getSoupFromUrl(url_annonce)\n uneAnnonce[\"prix en euros\"] = int(soupAnnonce.find(\"div\", { \"class\" : \"lbcParams withborder\" }).find(\"span\", { \"class\" : \"price\" })['content']) \n uneAnnonce[\"année\"] = int(soupAnnonce.find(\"div\", { \"class\" : \"lbcParams criterias\" }).find(\"td\", { \"itemprop\" : \"releaseDate\" }).get_text())\n uneAnnonce[\"kimometrage\"] = extractIntFromText(soupAnnonce.find(\"div\", { \"class\" : \"lbcParams criterias\" }).select(\"tr:nth-of-type(3) > td:nth-of-type(1)\")[0].text[:-3])\n \n uneAnnonce[\"description\"]=soupAnnonce.find(\"div\", { \"itemprop\" : \"description\"}).get_text() \n if \"life\" in uneAnnonce[\"description\"].lower():\n uneAnnonce[\"version\"]=\"LIFE CHARGE RAPIDE\"\n elif \"intens\" in uneAnnonce[\"description\"].lower():\n uneAnnonce[\"version\"]=\"INTENS CHARGE RAPIDE\" \n elif \"zen\" in uneAnnonce[\"description\"].lower():\n uneAnnonce[\"version\"]=\"ZEN CHARGE RAPIDE\"\n else:\n uneAnnonce[\"version\"] = \"VERSION INCONNUE\"\n \n uneAnnonceList = pd.DataFrame({'titre':uneAnnonce[\"titre\"],'reg':uneAnnonce[\"region\"],'version':uneAnnonce[\"version\"],'année':uneAnnonce[\"année\"],'km':uneAnnonce[\"kimometrage\"],'prix':uneAnnonce[\"prix en euros\"],'tél':\"\",'vendeur':uneAnnonce[\"type vendeur\"],'argus':\"\"},index=[0]) \n ZoeLeBoncoin=pd.concat([ZoeLeBoncoin,uneAnnonceList])\n ZoeLeBoncoin.reset_index()\n\n\n# GESTION DE L4ARGUS\nurlargus=\"http://www.lacentrale.fr/cote-voitures-renault-zoe--2013-.html\"\nsoupargus = getSoupFromUrl(urlargus)\ndicoargus={}\nentries=soupargus.find(\"div\",{\"id\" : \"listing_quot\"}).find_all(\"a\",{\"style\" : \"color:#007EFF; text-decoration:underline\"})\nfor entry in entries:\n soupargusmodele = getSoupFromUrl(\"http://www.lacentrale.fr/\"+entry[\"href\"])\n coteargus=extractIntFromText(soupargusmodele.find(\"span\",{\"class\":\"Result_Cote arial tx20\"}).text[:-2])\n dicoargus[entry.text]=coteargus\ncoteArgusData=pd.DataFrame(dicoargus,index=[\"cote argus\"]).T\n\netudefinale = pd.merge(ZoeLeBoncoin,coteArgusData,left_on=\"version\",right_index=True,how=\"left\" )\netudefinale['position_argus']=etudefinale.apply(lambda x:\"supérieur à l'argus\" if x[\"prix\"]>x[\"cote argus\"] else \"inferieur à l'argus\",axis=1)\nprint (etudefinale)\n \n \n\n \n \n \n \n ", "repo_name": "rachidalili/MS-BGD2015", "sub_path": "florian-firmin/Lesson4/exo_dom_Lesson04.py", "file_name": "exo_dom_Lesson04.py", "file_ext": "py", "file_size_in_byte": 4227, "program_lang": "python", "lang": "fr", "doc_type": "code", "stars": 6, "dataset": "github-code", "pt": "76", "api": [{"api_name": "requests.get", "line_number": 27, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 29, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 35, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 67, "usage_type": "call"}, {"api_name": "pandas.concat", "line_number": 68, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 81, "usage_type": "call"}, {"api_name": "pandas.merge", "line_number": 83, "usage_type": "call"}]}
+{"seq_id": "9120315149", "text": "import configparser\nfrom colorama import Fore, Back, Style\nimport os\n\ndef get_key():\n script_dir = os.path.dirname(os.path.abspath(__file__))\n config_path = os.path.join(script_dir, 'config.ini')\n # 读取INI文件\n config = configparser.ConfigParser()\n config.read(config_path)\n # config.read('config.ini')\n\n # 获取key变量\n key = config.get('Section1', 'key', fallback='')\n\n # 如果key为空,则要求用户输入并保存到INI文件\n if not key:\n key = input('Please enter your api-key:')\n config.set('Section1', 'key', key)\n with open('config.ini', 'w') as configfile:\n config.write(configfile)\n\n print('api-key set successfully')\n\n return key\n\ndef change_key(newKey):\n config = configparser.ConfigParser()\n config.read('config.ini')\n config.set('Section1', 'key', newKey)\n with open('config.ini', 'w') as configfile:\n config.write(configfile)\n print( Fore.YELLOW +'api-key changed successfully' + Style.RESET_ALL)", "repo_name": "ForestTrees/TerminalGPT", "sub_path": "keyConfig.py", "file_name": "keyConfig.py", "file_ext": "py", "file_size_in_byte": 1021, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "76", "api": [{"api_name": "os.path.dirname", "line_number": 6, "usage_type": "call"}, {"api_name": "os.path", "line_number": 6, "usage_type": "attribute"}, {"api_name": "os.path.abspath", "line_number": 6, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 7, "usage_type": "call"}, {"api_name": "os.path", "line_number": 7, "usage_type": "attribute"}, {"api_name": "configparser.ConfigParser", "line_number": 9, "usage_type": "call"}, {"api_name": "configparser.ConfigParser", "line_number": 28, "usage_type": "call"}, {"api_name": "colorama.Fore.YELLOW", "line_number": 33, "usage_type": "attribute"}, {"api_name": "colorama.Fore", "line_number": 33, "usage_type": "name"}, {"api_name": "colorama.Style.RESET_ALL", "line_number": 33, "usage_type": "attribute"}, {"api_name": "colorama.Style", "line_number": 33, "usage_type": "name"}]}
+{"seq_id": "9353801842", "text": "import math\nimport pandas as pd\nimport numpy as np\nimport csv\nimport os\nimport peakutils\nimport matplotlib.pyplot as plt\nimport matplotlib\nimport sys\nfrom scipy.signal import find_peaks, peak_prominences, filtfilt, butter\nfrom matplotlib.backend_bases import MouseButton\n\n\n\"\"\"Reads in all csv files in folder and creates an array of pandas dataframes\n returns array of dfs\"\"\"\ndef read_csvs():\n cellData = []\n # when we have a folder of, files, read in from directory path\n filename = \"realResults.csv\" #file outputted by ROI manager\n path = \"plugins/CalciumSignal/pythonscript/cell_data/\"\n df = pd.read_csv((path + filename))\n df = df.filter(regex=\"Mean\")\n df = df.dropna(axis=\"columns\") ##eliminate columns with NaN values\n return df\n\n\n\n\"\"\"Stores all relevant graph data to a csv for the ImageJ plugin to use\"\"\"\ndef write_csv(df):\n # when we have a folder of, files, read in from directory path\n path = \"plugins/CalciumSignal/pythonscript/cell_data/\"\n df.to_csv(os.path.join(path, \"graph_data.csv\"))\n return\n\n\n\n\"\"\" Stores peak locations at the correct frame # in dataframe.\nIf there is a peak, value will be 1, if no peak detected, value is -1.\"\"\"\ndef writePeaksToDf(peakIndx,df, cellnum):\n peaks = [-1] * len(df)\n colName = \"Cell\" + str(cellnum+1) + \"_Peaks\"\n for peakFrame in peakIndx:\n peaks[peakFrame] = 1\n newDf = df.copy()\n newDf[colName] = peaks\n return newDf\n\n\n\n\"\"\"Finds first rough baseline from data\n Looks at all elements below the average and averages them\n returns the base\"\"\"\ndef findBaseline(avg, intensities, cellDf):\n for elem in intensities:\n if elem > avg:\n intensities.remove(elem)\n base = sum(intensities) / len(intensities)\n cellDf['baseline'] = base\n return (base)\n\n\n\n\"\"\"Creates a new df column with normalized data\"\"\"\n\"\"\"TODO: maybe later changed so can use findBaseline function instead\"\"\"\n\"\"\"For now, only use when passing in normalized data\"\"\"\ndef findNormalizedBase(ndata, df):\n #ndata -> normalized data\n average = ndata.mean()\n df[\"ndata\"] = ndata\n baselineArray = df[\"ndata\"].values.tolist()\n for elem in baselineArray:\n if elem > average:\n baselineArray.remove(elem)\n newBase = sum(baselineArray) / len(baselineArray)\n df['normalbaseline'] = newBase\n\n\n\n\"\"\"ONLY USE WHEN NOT SMOOTHING THE DATA\"\"\"\n\"\"\"Normalizes the baseline for the original data\"\"\"\ndef normalizeData(base1, df, cellMean):\n y = df[cellMean] #list of intensities\n base2 = peakutils.baseline(y, math.floor(base1))\n normalizedData = y - base2\n findNormalizedBase(normalizedData, df) #new normbaseline column created\n return base2\n\n\n\"\"\"Smooths data points so signal is more clean\nCalls findNormalizedBase to find baseline of smoothed data\n\"\"\"\ndef smoothDataPoints(normalBase, df, cellMean):\n data = df[cellMean].values.tolist()\n c, d = butter(3, 0.1, 'lowpass') #.3 for less smoothed data\n filteredLowPass = filtfilt(c, d, data)\n newbase = peakutils.baseline(filteredLowPass, math.floor(normalBase))\n findNormalizedBase(filteredLowPass-newbase,df)\n return filteredLowPass, newbase\n\n\n\n\"\"\"\nThis function is for testing only\n\"\"\"\ndef plotPeakCellData(x,y,df):\n plt.figure()\n plt.xlabel(\"Video Frame (#)\")\n plt.ylabel(\"Normalized Calcium Intensity\")\n plt.title(\"Calcium Intensity Over Time; Normalized and Smoothed Data with Peaks\")\n plt.plot(y)\n plt.plot(x,y[x],\"x\")\n plt.plot(df[\"normalbaseline\"],color='red',label=\"baseline\")\n\n\n\"\"\"\nPlots the calcium signaling data on a graph\n\"\"\"\ndef plotOriginalCellData(y, figure):\n plt.title(\"Original Calcium Intensity Over Time\")\n plt.xlabel(\"Video Frame (#)\")\n plt.ylabel(\"Calcium Intensity\")\n figure.gca().plot(y)\n\n\n\n\"\"\"\nFunction matches the peak detected on the smoothed graph to the correct frame in original data\n\"\"\"\ndef matchRefinedPeakToActualPeak(peaks, originalData):\n # since data was smoothed when peaks were detected, look for highest point around frame\n # where peak was detected in the original data based on an error deviation\n peakIndices = []\n for peak in peaks:\n highPointIndex = peak\n for value in range(peak - 30, peak + 30):\n if originalData[value] > originalData[highPointIndex]:\n highPointIndex = value\n peakIndices.append(highPointIndex)\n return peakIndices\n\n##GLOBAL VARIABLES###\ncellData = read_csvs()\ncellID = 0\nfig = plt.figure()\nmax = len(cellData.columns)\n##GLOBAL VARIABLES###\n\n\n\n\"\"\"\nPlots peaks on a graph with the original data (peaks are marked with \"x\")\n\"\"\"\ndef plotPeaksOnOriginalData(peaks,data,cellnum,figure):\n plt.title(\"Original Calcium Intensity Over Time with Peaks\")\n plt.xlabel(\"Video Frame (#)\")\n plt.ylabel(\"Calcium Intensity\")\n\n for idx in peaks:\n figure.gca().plot(idx, data[idx],\"x\")\n\n\n\n\"\"\"\nFunction rechecks peak columns to figure out where the peaks are and replots them on the current figure.\nMainly used after user_addPeak or user_removePeak to replot and properly display figure with the new additions or new\nremovals\n\"\"\"\ndef replot_cell(figure):\n # print(\"HERE\")\n figure.canvas.manager.set_window_title(\"Cell %d\" %(cellID + 1))\n peakCol = \"Cell\" + str(cellID + 1) + \"_Peaks\"\n dataCol = \"Mean\" + str(cellID + 1)\n\n plotOriginalCellData(cellData[dataCol].values.tolist(), figure)\n if peakCol in cellData.columns:\n for i in range(0,len(cellData[peakCol])):\n if cellData[peakCol][i] == 1: #1 signifies there is a peak, -1 means there is no peak\n # print(\"peak\")\n figure.gca().plot(i,cellData[dataCol][i],marker=\"x\",color=\"red\")\n\n\n\n\"\"\"\nFunction does main calculations for cell data. Calls other functions to determine baseline,\nget smoothed/refined data values, and determine peaks. Once it's finished, it saves the\npeaks to the dataframe.\n\"\"\"\ndef cell_calculations():\n global cellID\n global cellData\n\n # we're really starting from Cell 0 because of indices. but it's easier for the client to start from 1\n #figure.canvas.manager.set_window_title(\"Cell %d\" %(cellID + 1))\n # figure.canvas.toolbar.pack_forget()\n cell = cellData.columns[cellID]\n videoFrames = len(cellData)\n average = cellData[cell].mean()\n originalIntensities = cellData[cell].values.tolist()\n # find baseline\n firstBaseline = findBaseline(average, list(originalIntensities), cellData)\n # normalize Data - don't need to use for now\n # normalBase = normalizeData(firstBaseline, cell, cellMean)\n smoothedData, smoothedBase = smoothDataPoints(firstBaseline,cellData,cell)\n # plot graph\n refinedData = smoothedData - smoothedBase\n\n peaks, properties = find_peaks(refinedData, prominence=(5))\n #plotOriginalCellData(originalIntensities, figure)\n #plotPeakCellData(peaks,refinedData,cell)\n peakIndices = matchRefinedPeakToActualPeak(peaks,originalIntensities)\n #plotPeaksOnOriginalData(peakIndices,originalIntensities,cellID,figure)\n cellData = writePeaksToDf(peakIndices,cellData,cellID)\n #return cellData\n\n\n\n\n# key event listener for switching between cell graphs\ndef on_press(event):\n global cellData\n global cellID\n global fig\n global max\n\n # right arrow key to advance, left to go back (WASD scheme used as a backup)\n # graphs should wrap if you go past the last cell or before the first one -- hence, \"carousel view\"\n if event.key in ['right', 'left', 'd', 'a']:\n if event.key == 'right' or event.key == 'd':\n cellID += 1\n if cellID >= max:\n cellID = 0\n if event.key == 'left' or event.key == 'a':\n if cellID > 0:\n cellID -= 1\n elif cellID <= 0:\n cellID = max - 1\n\n fig.clear()\n event.canvas.figure.clear()\n replot_cell(event.canvas.figure)\n event.canvas.draw()\n\n\n\n\"\"\"\nFunction calls the proper add/remove peak function depending on type of mouse click\n\"\"\"\ndef on_click(event):\n # print(\"on_click\")\n if event.button is MouseButton.LEFT:\n # print(\"LEFT\") # normal click\n # call add peak function\n user_addPeak(event)\n elif event.button is MouseButton.RIGHT:\n # print(\"Right\") # right click - remove\n # call remove peak function\n user_removePeak(event)\n\n\n\n\"\"\"\nFunction removes peak from the graph\n -registers x,y coordinate of mouse click\n -determines closest peak in df (based on frame range) to mouse click\n -removes this point from df (make it -1)\n -replot any peaks\n\"\"\"\ndef user_removePeak(event):\n global cellData\n global fig\n\n # print(\"remove peak from graph function\")\n peakCol = \"Cell\" + str(cellID + 1) + \"_Peaks\"\n dataCol = \"Mean\" + str(cellID + 1)\n\n if event.inaxes: # checks to see if user clicked on the plotted graph\n ax = event.inaxes # the axes instance\n x = int(event.xdata)\n y = int(event.ydata)\n # print('data coords %f %f' % (x, y))\n\n #finding the closest already defined peak (if there is any) to the mouseclick so we can remove it\n removeIdx = x\n diff = x\n for data in range(x - 10, x + 10): # original was 30\n try:\n if cellData[peakCol][data] == 1:\n if abs(cellData[dataCol][data] - cellData[dataCol][removeIdx]) < diff:\n removeIdx = data\n diff = abs(cellData[dataCol][data] - cellData[dataCol][removeIdx])\n except:\n continue # ignore indexes that are out of range\n\n #cellData[peakCol][removeIdx] = -1\n cellData.loc[removeIdx,peakCol] = -1\n\n fig.clear()\n event.canvas.figure.clear()\n replot_cell(event.canvas.figure)\n\n event.canvas.draw()\n\n # print(\"DONE\")\n plt.show()\n\n\n\n\"\"\"\nFunction adds peak to the graph\n -registers x,y coordinate of mouse click\n -determines relatively highest y value in df (based on frame range) to mouse click\n -adds this point as peak (make it 1)\n -replot any peaks\n\"\"\"\ndef user_addPeak(event):\n global cellData\n global fig\n\n # print(\"add peak to graph function\")\n peakCol = \"Cell\" + str(cellID + 1) + \"_Peaks\"\n dataCol = \"Mean\" + str(cellID + 1)\n\n if event.inaxes: # checks to see if user clicked on the plotted graph\n ax = event.inaxes # the axes instance\n x = int(event.xdata)\n y = int(event.ydata)\n # print('data coords %f %f' % (x, y))\n\n maxValIdx = x\n for data in range(x - 10, x + 10): # original was 30\n try:\n if cellData[dataCol][data] > cellData[dataCol][maxValIdx]:\n maxValIdx = data\n except:\n continue # ignore indexes that are out of range\n\n cellData.loc[maxValIdx,peakCol] = 1\n # print(cellData.loc[maxValIdx, peakCol])\n # print(\"x: \" + str(maxValIdx))\n # print(\"y: \" + str(cellData[dataCol][maxValIdx]))\n\n fig.clear()\n event.canvas.figure.clear()\n replot_cell(event.canvas.figure)\n event.canvas.draw()\n\n # print(\"DONE\")\n plt.show()\n\n\n\ndef main():\n # uncomment below line for debugging only (and be sure to close stdout at the end)\n # this redirects print() output to output.txt, which you will find in the Fiji.app directory after program finishes\n # sys.stdout = open('output.txt', 'w')\n\n # sorry about the globals. it's for a good cause, I promise.\n global cellData\n global cellID\n global fig\n\n fig.canvas.mpl_connect('key_press_event', on_press)\n fig.canvas.mpl_connect('button_press_event', on_click)\n\n numColumns = len(cellData.columns)\n for col in range(0,numColumns):\n cellID = col\n cell_calculations()\n\n cellID = 0\n write_csv(cellData)\n path = \"plugins/CalciumSignal/pythonscript/cell_data/\"\n cellData = pd.read_csv((path + \"graph_data.csv\"))\n\n #plot cells and open carousel view for user to click through\n replot_cell(fig)\n plt.show()\n\n # write to csv at the end (after window is closed)!\n write_csv(cellData)\n\n # uncomment below for debugging only (also see output.txt at the start of main)\n # sys.stdout.close()\n\nif __name__ == \"__main__\":\n main()\n", "repo_name": "Eoldham/CISC498Project-Group17", "sub_path": "release/peakscript.py", "file_name": "peakscript.py", "file_ext": "py", "file_size_in_byte": 12271, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 3, "dataset": "github-code", "pt": "76", "api": [{"api_name": "pandas.read_csv", "line_number": 21, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 32, "usage_type": "call"}, {"api_name": "os.path", "line_number": 32, "usage_type": "attribute"}, {"api_name": "peakutils.baseline", "line_number": 83, "usage_type": "call"}, {"api_name": "math.floor", "line_number": 83, "usage_type": "call"}, {"api_name": "scipy.signal.butter", "line_number": 94, "usage_type": "call"}, {"api_name": "scipy.signal.filtfilt", "line_number": 95, "usage_type": "call"}, {"api_name": "peakutils.baseline", "line_number": 96, "usage_type": "call"}, {"api_name": "math.floor", "line_number": 96, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 106, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 106, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 107, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 107, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 108, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 108, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 109, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 109, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 110, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 110, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 111, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 111, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 112, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 112, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 119, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 119, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 120, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 120, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 121, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 121, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 144, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 144, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 154, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 154, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 155, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 155, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 156, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 156, "usage_type": "name"}, {"api_name": "scipy.signal.find_peaks", "line_number": 207, "usage_type": "call"}, {"api_name": "matplotlib.backend_bases.MouseButton.LEFT", "line_number": 250, "usage_type": "attribute"}, {"api_name": "matplotlib.backend_bases.MouseButton", "line_number": 250, "usage_type": "name"}, {"api_name": "matplotlib.backend_bases.MouseButton.RIGHT", "line_number": 254, "usage_type": "attribute"}, {"api_name": "matplotlib.backend_bases.MouseButton", "line_number": 254, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 304, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 304, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 348, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 348, "usage_type": "name"}, {"api_name": "pandas.read_csv", "line_number": 373, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.show", "line_number": 377, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 377, "usage_type": "name"}]}
+{"seq_id": "5479663774", "text": "import os\nimport time\nimport pandas as pd\nfrom datetime import datetime\n\nfrom dash import Dash, no_update, ctx, Output, Input, State\nimport dash_bootstrap_components as dbc\nfrom dash.exceptions import PreventUpdate\n\nfrom src.market_data import update_market_data\nfrom src.components.table_cards import get_row_highlight_condition\nfrom src.components.figures import get_candlestick_figure, get_bar_figure\nfrom src.utils import filter_df, add_emas\n\n\ndef register_callbacks(app: Dash):\n\n @app.long_callback(\n Output(\"timestamp\", \"data\"),\n Input(\"update_button\", \"n_clicks\"),\n running=[\n (Output(\"update_button\", \"disabled\"), True, False),\n (Output(\"update_button\", \"children\"), [dbc.Spinner(size=\"sm\"), \" Updating...\"], \"Update Data\"),\n ]\n )\n def update_data(n_clicks):\n \"\"\" \n Update all market data on startup or when the update button was clicked. \n Once the data is ready, the timestamp is updated, which triggers other callbacks.\n \"\"\"\n timestamp = int(time.time())\n update_market_data()\n return timestamp\n\n\n @app.callback(\n Output(\"last_update_text\", \"children\"),\n Input(\"timestamp\", \"data\"),\n prevent_initial_call=True,\n )\n def set_last_update_text(timestamp):\n \"\"\" Display the time of the last update once the new data is available. \"\"\"\n return f\"Last update: {datetime.fromtimestamp(timestamp).strftime('%d.%m.%Y, %H:%M')}\"\n\n \n @app.callback(\n Output(\"trend_table\", \"data\"),\n Input(\"timestamp\", \"data\"),\n Input(\"radio_trend\", \"value\"),\n prevent_initial_call=True,\n )\n def update_trend_table(timestamp, filter):\n \"\"\" Update the data table of the uptrend screener whenever the data was updated or another filter was selected. \"\"\"\n df = pd.read_csv(os.path.join(\"data\", \"market_data.csv\"), index_col=\"name\")\n df = df.drop([\"BTC\"]) # only keep altcoins\n df[\"id\"] = df.index\n df = filter_df(df, filter)\n df = df[[\"id\", \"trend_strength\", \"gain_1d\", \"gain_1w\", \"gain_1m\"]]\n\n return df.to_dict(\"records\")\n\n\n @app.callback(\n Output(\"pump_table\", \"data\"),\n Input(\"timestamp\", \"data\"),\n Input(\"radio_pump\", \"value\"),\n prevent_initial_call=True,\n )\n def update_pump_table(timestamp, filter):\n \"\"\" Update the data table of the pump screener whenever the data was updated or another filter was selected. \"\"\"\n df = pd.read_csv(os.path.join(\"data\", \"market_data.csv\"), index_col=\"name\")\n df = df.drop([\"BTC\"]) # only keep altcoins\n df[\"id\"] = df.index\n df = filter_df(df, filter)\n df = df.loc[df[\"pump_strength\"] > 2]\n df = df[[\"id\", \"pump_strength\", \"gain_1d\", \"gain_1w\", \"gain_1m\"]] \n df = df.sort_values(by=[\"pump_strength\"], ascending=False)\n\n return df.to_dict(\"records\")\n\n\n @app.callback(\n Output(\"trend_table\", \"page_current\"),\n Output(\"pump_table\", \"page_current\"),\n Input(\"timestamp\", \"data\"),\n Input(\"trend_table\", \"sort_by\"),\n )\n def reset_to_first_page(timestamp, sort_by):\n \"\"\" \n Go to the first page of both data tables whenever the data was updated. \n Go to the first page of the uptrend data table whenever the user changes the sorting.\n \"\"\"\n if ctx.triggered_id == \"timestamp\":\n return 0, 0\n return 0, no_update\n\n\n @app.callback(\n Output(\"altcoin\", \"data\"),\n Output(\"trend_table\", \"active_cell\"), Output(\"trend_table\", \"selected_cells\"), Output(\"trend_table\", \"style_data_conditional\"),\n Output(\"pump_table\", \"active_cell\"), Output(\"pump_table\", \"selected_cells\"), Output(\"pump_table\", \"style_data_conditional\"),\n Input(\"trend_table\", \"active_cell\"), Input(\"pump_table\", \"active_cell\"), Input(\"timestamp\", \"data\"),\n Input(\"radio_trend\", \"value\"), Input(\"radio_pump\", \"value\"),\n State(\"trend_table\", \"style_data_conditional\"), State(\"pump_table\", \"style_data_conditional\"),\n prevent_initial_call=True,\n )\n def select_altcoin(active_cell_trend, active_cell_pump, timestamp, filter_trend, filter_pump, style_trend, style_pump):\n \"\"\" Highlight the table row of the currently selected altcoin. \"\"\"\n # remove highlighting when reloading or applying filters\n if ctx.triggered_id in [\"timestamp\", \"radio_trend\", \"radio_pump\"]:\n style_trend[1] = {}\n style_pump[1] = {}\n return no_update, None, [], style_trend, None, [], style_pump\n\n altcoin = no_update\n if ctx.triggered_id == \"trend_table\":\n if active_cell_trend:\n condition = get_row_highlight_condition(active_cell_trend[\"row\"])\n style_trend[1] = condition\n style_pump[1] = {}\n altcoin = active_cell_trend[\"row_id\"]\n else:\n style_trend[1] = {}\n style_pump = no_update\n else:\n if active_cell_pump:\n condition = get_row_highlight_condition(active_cell_pump[\"row\"])\n style_pump[1] = condition\n style_trend[1] = {}\n altcoin = active_cell_pump[\"row_id\"]\n else:\n style_pump[1] = {}\n style_trend = no_update\n \n return altcoin, None, [], style_trend, None, [], style_pump\n\n \n @app.callback(\n Output(\"bar_chart\", \"children\"),\n Input(\"timestamp\", \"data\"),\n Input(\"radio_overview_filter\", \"value\"),\n Input(\"radio_overview_timeframe\", \"value\"),\n prevent_initial_call=True,\n )\n def update_overview_card(timestamp, filter, timeframe):\n \"\"\" \n Update the bar figure containing the top gainers whenever the data was updated \n or another filter or timeframe was selected. \n \"\"\"\n df = pd.read_csv(os.path.join(\"data\", \"market_data.csv\"), index_col=\"name\")\n col = f\"gain_{timeframe.lower()}\"\n btc_gain = df.loc[\"BTC\", col]\n df = df.drop([\"BTC\"]) # only keep altcoins\n df = filter_df(df, filter)\n df = df.sort_values(by=[col], ascending=False).iloc[:30]\n\n return get_bar_figure(names=df.index, gains=df[col], btc_gain=btc_gain, timeframe=timeframe)\n \n\n @app.callback(\n Output(\"bitcoin_chart\", \"children\"),\n Input(\"timestamp\", \"data\"),\n Input(\"radio_btc_chart\", \"value\"),\n prevent_initial_call=True,\n )\n def update_bitcoin_chart(timestamp, timeframe):\n \"\"\" Update the Bitcoin chart whenever the data was updated or another timeframe was selected. \"\"\"\n klines = pd.read_csv(os.path.join(\"data\", \"klines\", \"BTC.csv\"), index_col=\"timestamp\")\n klines = add_emas(klines=klines, ema_lengths=[12, 21, 50])\n\n if timeframe == \"1W\":\n klines = klines.iloc[-42:]\n else:\n klines = klines.iloc[-186:]\n\n return get_candlestick_figure(title=\"BTC / USD\", klines=klines)\n \n\n @app.callback(\n Output(\"altcoin_usd_chart\", \"children\"), \n Output(\"altcoin_btc_chart\", \"children\"),\n Input(\"timestamp\", \"data\"),\n Input(\"altcoin\", \"data\"),\n Input(\"radio_altcoin_chart\", \"value\"), \n prevent_initial_call=True,\n )\n def update_altcoin_charts(timestamp, altcoin, timeframe):\n \"\"\" Update both altcoin charts whenever the data was updated or another timeframe was selected. \"\"\"\n if altcoin in [None, \"\"]:\n raise PreventUpdate\n \n btc_klines = pd.read_csv(os.path.join(\"data\", \"klines\", \"BTC.csv\"), index_col=\"timestamp\")\n usd_denom_klines = pd.read_csv(os.path.join(\"data\", \"klines\", f\"{altcoin}.csv\"), index_col=\"timestamp\")\n btc_denom_klines = pd.DataFrame(\n index=usd_denom_klines.index,\n data={\n \"open\": usd_denom_klines[\"open\"] / btc_klines[\"open\"], \n \"high\": usd_denom_klines[\"high\"] / btc_klines[\"close\"],\n \"low\": usd_denom_klines[\"low\"] / btc_klines[\"close\"], \n \"close\": usd_denom_klines[\"close\"] / btc_klines[\"close\"],\n },\n ).dropna()\n\n usd_denom_klines = add_emas(klines=usd_denom_klines, ema_lengths=[12, 21, 50])\n btc_denom_klines = add_emas(klines=btc_denom_klines, ema_lengths=[12, 21, 50])\n\n if timeframe == \"1W\":\n usd_denom_klines = usd_denom_klines.iloc[-42:]\n btc_denom_klines = btc_denom_klines.iloc[-42:]\n else:\n usd_denom_klines = usd_denom_klines.iloc[-186:]\n btc_denom_klines = btc_denom_klines.iloc[-186:]\n\n usd_chart = get_candlestick_figure(title=f\"{altcoin} / USD\", klines=usd_denom_klines)\n btc_chart = get_candlestick_figure(title=f\"{altcoin} / BTC\", klines=btc_denom_klines)\n\n return usd_chart, btc_chart\n\n\n @app.callback(\n Output(\"bitcoin_tradingview\", \"children\"),\n Output(\"bitcoin_exchanges\", \"children\"),\n Input(\"timestamp\", \"data\"),\n prevent_initial_call=True,\n )\n def update_bitcoin_links(timestamp):\n \"\"\" Update the TradingView and exchange links for Bitcoin whenever the data was updated. \"\"\"\n df = pd.read_csv(os.path.join(\"data\", \"config.csv\"), index_col=\"name\")\n tradingview_link = dbc.CardLink(\"TradingView\", target=\"_blank\", href=df.loc[\"BTC\", \"chart_usd\"])\n \n exchange_links = []\n if type(df.loc[\"BTC\", \"spot_usd\"]) == str:\n exchange_links.append(dbc.CardLink(\"Spot (USD)\", target=\"_blank\", href=df.loc[\"BTC\", \"spot_usd\"]))\n if type(df.loc[\"BTC\", \"perps\"]) == str:\n exchange_links.append(dbc.CardLink(\"Perpetuals\", target=\"_blank\", href=df.loc[\"BTC\", \"perps\"]))\n\n return tradingview_link, exchange_links\n\n\n @app.callback(\n Output(\"altcoin_tradingview\", \"children\"),\n Output(\"altcoin_exchanges\", \"children\"),\n Input(\"altcoin\", \"data\"),\n prevent_initial_call=True,\n )\n def update_altcoin_links(altcoin):\n \"\"\" Update the TradingView and exchange links for the current altcoin whenever a new altcoin was selected. \"\"\"\n df = pd.read_csv(os.path.join(\"data\", \"config.csv\"), index_col=\"name\")\n\n tradingview_links = []\n if type(df.loc[altcoin, \"chart_usd\"]) == str:\n tradingview_links.append(dbc.CardLink(\"TradingView (USD)\", target=\"_blank\", href=df.loc[altcoin, \"chart_usd\"]))\n if type(df.loc[altcoin, \"chart_btc\"]) == str:\n tradingview_links.append(dbc.CardLink(\"TradingView (BTC)\", target=\"_blank\", href=df.loc[altcoin, \"chart_btc\"]))\n\n exchange_links = []\n if type(df.loc[altcoin, \"spot_usd\"]) == str:\n exchange_links.append(dbc.CardLink(\"Spot (USD)\", target=\"_blank\", href=df.loc[altcoin, \"spot_usd\"]))\n if type(df.loc[altcoin, \"spot_btc\"]) == str:\n exchange_links.append(dbc.CardLink(\"Spot (BTC)\", target=\"_blank\", href=df.loc[altcoin, \"spot_btc\"]))\n if type(df.loc[altcoin, \"perps\"]) == str:\n exchange_links.append(dbc.CardLink(\"Perpetuals\", target=\"_blank\", href=df.loc[altcoin, \"perps\"]))\n\n return tradingview_links, exchange_links\n", "repo_name": "fyangch/crypto-dashboard", "sub_path": "src/callbacks.py", "file_name": "callbacks.py", "file_ext": "py", "file_size_in_byte": 11195, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 16, "dataset": "github-code", "pt": "76", "api": [{"api_name": "dash.Dash", "line_number": 16, "usage_type": "name"}, {"api_name": "time.time", "line_number": 31, "usage_type": "call"}, {"api_name": "src.market_data.update_market_data", "line_number": 32, "usage_type": "call"}, {"api_name": "dash.Output", "line_number": 19, "usage_type": "call"}, {"api_name": "dash.Input", "line_number": 20, "usage_type": "call"}, {"api_name": "dash.Output", "line_number": 22, "usage_type": "call"}, {"api_name": "dash.Output", "line_number": 23, "usage_type": "call"}, {"api_name": "dash_bootstrap_components.Spinner", "line_number": 23, "usage_type": "call"}, {"api_name": "datetime.datetime.fromtimestamp", "line_number": 43, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 43, "usage_type": "name"}, {"api_name": "dash.Output", "line_number": 37, "usage_type": "call"}, {"api_name": "dash.Input", "line_number": 38, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 54, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 54, "usage_type": "call"}, {"api_name": "os.path", "line_number": 54, "usage_type": "attribute"}, {"api_name": "src.utils.filter_df", "line_number": 57, "usage_type": "call"}, {"api_name": "dash.Output", "line_number": 47, "usage_type": "call"}, {"api_name": "dash.Input", "line_number": 48, "usage_type": "call"}, {"api_name": "dash.Input", "line_number": 49, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 71, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 71, "usage_type": "call"}, {"api_name": "os.path", "line_number": 71, "usage_type": "attribute"}, {"api_name": "src.utils.filter_df", "line_number": 74, "usage_type": "call"}, {"api_name": "dash.Output", "line_number": 64, "usage_type": "call"}, {"api_name": "dash.Input", "line_number": 65, "usage_type": "call"}, {"api_name": "dash.Input", "line_number": 66, "usage_type": "call"}, {"api_name": "dash.ctx.triggered_id", "line_number": 93, "usage_type": "attribute"}, {"api_name": "dash.ctx", "line_number": 93, "usage_type": "name"}, {"api_name": "dash.no_update", "line_number": 95, "usage_type": "name"}, {"api_name": "dash.Output", "line_number": 83, "usage_type": "call"}, {"api_name": "dash.Output", "line_number": 84, "usage_type": "call"}, {"api_name": "dash.Input", "line_number": 85, "usage_type": "call"}, {"api_name": "dash.Input", "line_number": 86, "usage_type": "call"}, {"api_name": "dash.ctx.triggered_id", "line_number": 110, "usage_type": "attribute"}, {"api_name": "dash.ctx", "line_number": 110, "usage_type": "name"}, {"api_name": "dash.no_update", "line_number": 113, "usage_type": "name"}, {"api_name": "dash.no_update", "line_number": 115, "usage_type": "name"}, {"api_name": "dash.ctx.triggered_id", "line_number": 116, "usage_type": "attribute"}, {"api_name": "dash.ctx", "line_number": 116, "usage_type": "name"}, {"api_name": "src.components.table_cards.get_row_highlight_condition", "line_number": 118, "usage_type": "call"}, {"api_name": "dash.no_update", "line_number": 124, "usage_type": "name"}, {"api_name": "src.components.table_cards.get_row_highlight_condition", "line_number": 127, "usage_type": "call"}, {"api_name": "dash.no_update", "line_number": 133, "usage_type": "name"}, {"api_name": "dash.Output", "line_number": 99, "usage_type": "call"}, {"api_name": "dash.Output", "line_number": 100, "usage_type": "call"}, {"api_name": "dash.Output", "line_number": 101, "usage_type": "call"}, {"api_name": "dash.Input", "line_number": 102, "usage_type": "call"}, {"api_name": "dash.Input", "line_number": 103, "usage_type": "call"}, {"api_name": "dash.State", "line_number": 104, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 150, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 150, "usage_type": "call"}, {"api_name": "os.path", "line_number": 150, "usage_type": "attribute"}, {"api_name": "src.utils.filter_df", "line_number": 154, "usage_type": "call"}, {"api_name": "src.components.figures.get_bar_figure", "line_number": 157, "usage_type": "call"}, {"api_name": "dash.Output", "line_number": 139, "usage_type": "call"}, {"api_name": "dash.Input", "line_number": 140, "usage_type": "call"}, {"api_name": "dash.Input", "line_number": 141, "usage_type": "call"}, {"api_name": "dash.Input", "line_number": 142, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 168, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 168, "usage_type": "call"}, {"api_name": "os.path", "line_number": 168, "usage_type": "attribute"}, {"api_name": "src.utils.add_emas", "line_number": 169, "usage_type": "call"}, {"api_name": "src.components.figures.get_candlestick_figure", "line_number": 176, "usage_type": "call"}, {"api_name": "dash.Output", "line_number": 161, "usage_type": "call"}, {"api_name": "dash.Input", "line_number": 162, "usage_type": "call"}, {"api_name": "dash.Input", "line_number": 163, "usage_type": "call"}, {"api_name": "dash.exceptions.PreventUpdate", "line_number": 190, "usage_type": "name"}, {"api_name": "pandas.read_csv", "line_number": 192, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 192, "usage_type": "call"}, {"api_name": "os.path", "line_number": 192, "usage_type": "attribute"}, {"api_name": "pandas.read_csv", "line_number": 193, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 193, "usage_type": "call"}, {"api_name": "os.path", "line_number": 193, "usage_type": "attribute"}, {"api_name": "pandas.DataFrame", "line_number": 194, "usage_type": "call"}, {"api_name": "src.utils.add_emas", "line_number": 204, "usage_type": "call"}, {"api_name": "src.utils.add_emas", "line_number": 205, "usage_type": "call"}, {"api_name": "src.components.figures.get_candlestick_figure", "line_number": 214, "usage_type": "call"}, {"api_name": "src.components.figures.get_candlestick_figure", "line_number": 215, "usage_type": "call"}, {"api_name": "dash.Output", "line_number": 180, "usage_type": "call"}, {"api_name": "dash.Output", "line_number": 181, "usage_type": "call"}, {"api_name": "dash.Input", "line_number": 182, "usage_type": "call"}, {"api_name": "dash.Input", "line_number": 183, "usage_type": "call"}, {"api_name": "dash.Input", "line_number": 184, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 228, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 228, "usage_type": "call"}, {"api_name": "os.path", "line_number": 228, "usage_type": "attribute"}, {"api_name": "dash_bootstrap_components.CardLink", "line_number": 229, "usage_type": "call"}, {"api_name": "dash_bootstrap_components.CardLink", "line_number": 233, "usage_type": "call"}, {"api_name": "dash_bootstrap_components.CardLink", "line_number": 235, "usage_type": "call"}, {"api_name": "dash.Output", "line_number": 221, "usage_type": "call"}, {"api_name": "dash.Output", "line_number": 222, "usage_type": "call"}, {"api_name": "dash.Input", "line_number": 223, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 248, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 248, "usage_type": "call"}, {"api_name": "os.path", "line_number": 248, "usage_type": "attribute"}, {"api_name": "dash_bootstrap_components.CardLink", "line_number": 252, "usage_type": "call"}, {"api_name": "dash_bootstrap_components.CardLink", "line_number": 254, "usage_type": "call"}, {"api_name": "dash_bootstrap_components.CardLink", "line_number": 258, "usage_type": "call"}, {"api_name": "dash_bootstrap_components.CardLink", "line_number": 260, "usage_type": "call"}, {"api_name": "dash_bootstrap_components.CardLink", "line_number": 262, "usage_type": "call"}, {"api_name": "dash.Output", "line_number": 241, "usage_type": "call"}, {"api_name": "dash.Output", "line_number": 242, "usage_type": "call"}, {"api_name": "dash.Input", "line_number": 243, "usage_type": "call"}]}
+{"seq_id": "27517327370", "text": "import os\nimport re\nimport requests\n\nfrom .._cache import _test_cache, _insert_cache\nfrom ._string_aware_strip import (\n _string_aware_comment_strip,\n _string_aware_generic_strip\n)\n\n\n_CACHE_DIR = os.path.abspath(os.path.join(__file__, \"..\", \".cache\", \"js\"))\n_TOPTAL_CACHE_DIR = os.path.join(_CACHE_DIR, \"toptal\")\n\n_SINGLE_LINE_COMMENT_RE = re.compile(r\"//.*\")\n_MULTI_LINE_COMMENT_RE = re.compile(r\"/\\*[\\s\\S]*?\\*/\")\n_EXTRA_NEWLINES_RE = re.compile(r\"\\n{2,}\")\n_EXTRA_SPACES_RE = re.compile(r\" {2,}\")\n_EXTRA_TABS_RE = re.compile(r\"\\t{2,}\")\n\n\ndef _strip_comments(source):\n \"\"\"Strip comments from Javascript source.\n\n Args:\n source (str): Input Javascript.\n\n Returns:\n str: Stripped source.\n \"\"\"\n source = _string_aware_comment_strip(_SINGLE_LINE_COMMENT_RE, source)\n source = _string_aware_comment_strip(_MULTI_LINE_COMMENT_RE, source)\n return source\n\n\ndef _strip_whitespace(source, has_consistent_semicolons=True):\n \"\"\"Strip excessive whitespace.\n\n Args:\n source (str): Input Javascript.\n has_consistent_semicolons (bool): If the source code\n has consistent semicolons. (Default: True)\n\n Returns:\n str: Stripped source.\n \"\"\"\n base_op_characters = \"[+-*^|&<>{;:?=,(!~/\"\n semicolon_fragile_characters = \")]\"\n if has_consistent_semicolons:\n base_op_characters += \"}\"\n else:\n semicolon_fragile_characters += \"}\"\n all_op_characters = semicolon_fragile_characters + base_op_characters\n\n # 1 + 2 + 3 => 1+2+3\n base_strip = re.compile(\n r\"\\s*([\" + re.escape(base_op_characters) + r\"])\\s*\"\n )\n\n # hello ] => hello]\n prefix_fragile_strip = re.compile(\n r\"\\s+([\" + re.escape(semicolon_fragile_characters) + r\"])\"\n )\n\n # [ [ ] ] => [[]]\n sequential_operator_strip = re.compile(\n r\"([\"+ re.escape(all_op_characters) + r\"])\"\n r\"\\s+(?=[\" + re.escape(all_op_characters) + r\"])\"\n )\n # )\\n\\n\\nthing => )\\nthing\n fragile_semicolon_strip = re.compile(\n r\"\\s*([\"+ re.escape(semicolon_fragile_characters) + r\"])\\s+\"\n )\n\n source = _string_aware_generic_strip(base_strip, lambda x: x.group(1), source)\n source = _string_aware_generic_strip(sequential_operator_strip, lambda x: x.group(1), source)\n source = _string_aware_generic_strip(prefix_fragile_strip, lambda x: \"{0}\\n\".format(x.group(1)), source)\n source = _string_aware_generic_strip(fragile_semicolon_strip, lambda x: \"{0}\\n\".format(x.group(1)), source)\n source = _string_aware_generic_strip(_EXTRA_NEWLINES_RE, lambda x: \"\\n\", source)\n source = _string_aware_generic_strip(_EXTRA_SPACES_RE, lambda x: \" \", source)\n source = _string_aware_generic_strip(_EXTRA_TABS_RE, lambda x: \"\\t\", source)\n source = source.strip()\n return source\n\n\ndef _request_toptal_minify_js(source):\n \"\"\"Request minifaction from Toptal.\n\n Args:\n source (str): Javascript to minify.\n\n Returns:\n tuple(bool, str): Success flag, minified text.\n \"\"\"\n try:\n found, cached = _test_cache(_TOPTAL_CACHE_DIR, source)\n if found:\n return True, cached\n response = requests.post(\n \"https://www.toptal.com/developers/javascript-minifier/api/raw\",\n data={\"input\": source}\n )\n if response.ok:\n _insert_cache(_TOPTAL_CACHE_DIR, source, response.text)\n return True, response.text\n except Exception:\n pass\n return False, \"\"\n\n\ndef _minify_js(source, has_consistent_semicolons=True):\n \"\"\"Minify Javascript source with custom minifier.\n\n Args:\n source (str): Javascript to minify.\n has_consistent_semicolons (bool): If the source code\n has consistent semicolons. (Default: True)\n\n Returns:\n str: Minified Javascript.\n \"\"\"\n found, cached = _test_cache(_CACHE_DIR, source)\n if found:\n return cached\n minified = source\n minified = _strip_comments(minified)\n minified = _strip_whitespace(minified, has_consistent_semicolons)\n _insert_cache(_CACHE_DIR, source, minified)\n return minified\n\n\ndef minify_js(source, allow_toptol=True, has_consistent_semicolons=True):\n \"\"\"Minify Javascript source.\n\n Args:\n source (str): Javascript to minify.\n allow_toptol (bool): Allow the use of www.toptol.com to\n minify (Default: True)\n has_consistent_semicolons (bool): If the source code\n has consistent semicolons. (Default: True)\n\n Returns:\n str: Minified Javascript.\n \"\"\"\n source = source.strip()\n if not source:\n return source\n if allow_toptol:\n ok, minified = _request_toptal_minify_js(source)\n if ok:\n return minified\n return _minify_js(source, has_consistent_semicolons)\n", "repo_name": "alister-chowdhury/alister-chowdhury.github.io", "sub_path": "builder/minify/minify_js.py", "file_name": "minify_js.py", "file_ext": "py", "file_size_in_byte": 4763, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "76", "api": [{"api_name": "os.path.abspath", "line_number": 12, "usage_type": "call"}, {"api_name": "os.path", "line_number": 12, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 12, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 13, "usage_type": "call"}, {"api_name": "os.path", "line_number": 13, "usage_type": "attribute"}, {"api_name": "re.compile", "line_number": 15, "usage_type": "call"}, {"api_name": "re.compile", "line_number": 16, "usage_type": "call"}, {"api_name": "re.compile", "line_number": 17, "usage_type": "call"}, {"api_name": "re.compile", "line_number": 18, "usage_type": "call"}, {"api_name": "re.compile", "line_number": 19, "usage_type": "call"}, {"api_name": "_string_aware_strip._string_aware_comment_strip", "line_number": 31, "usage_type": "call"}, {"api_name": "_string_aware_strip._string_aware_comment_strip", "line_number": 32, "usage_type": "call"}, {"api_name": "re.compile", "line_number": 56, "usage_type": "call"}, {"api_name": "re.escape", "line_number": 57, "usage_type": "call"}, {"api_name": "re.compile", "line_number": 61, "usage_type": "call"}, {"api_name": "re.escape", "line_number": 62, "usage_type": "call"}, {"api_name": "re.compile", "line_number": 66, "usage_type": "call"}, {"api_name": "re.escape", "line_number": 67, "usage_type": "call"}, {"api_name": "re.escape", "line_number": 68, "usage_type": "call"}, {"api_name": "re.compile", "line_number": 71, "usage_type": "call"}, {"api_name": "re.escape", "line_number": 72, "usage_type": "call"}, {"api_name": "_string_aware_strip._string_aware_generic_strip", "line_number": 75, "usage_type": "call"}, {"api_name": "_string_aware_strip._string_aware_generic_strip", "line_number": 76, "usage_type": "call"}, {"api_name": "_string_aware_strip._string_aware_generic_strip", "line_number": 77, "usage_type": "call"}, {"api_name": "_string_aware_strip._string_aware_generic_strip", "line_number": 78, "usage_type": "call"}, {"api_name": "_string_aware_strip._string_aware_generic_strip", "line_number": 79, "usage_type": "call"}, {"api_name": "_string_aware_strip._string_aware_generic_strip", "line_number": 80, "usage_type": "call"}, {"api_name": "_string_aware_strip._string_aware_generic_strip", "line_number": 81, "usage_type": "call"}, {"api_name": "_cache._test_cache", "line_number": 96, "usage_type": "call"}, {"api_name": "requests.post", "line_number": 99, "usage_type": "call"}, {"api_name": "_cache._insert_cache", "line_number": 104, "usage_type": "call"}, {"api_name": "_cache._test_cache", "line_number": 122, "usage_type": "call"}, {"api_name": "_cache._insert_cache", "line_number": 128, "usage_type": "call"}]}
+{"seq_id": "6331632571", "text": "\"\"\"Motif object definition.\n\nA single Motif object stores important values, used during the following steps of \nGRAFIMO's analysis, such as motif PSSM, p-value matrix, scaling factor, offset,\nmotif information, etc.\n\"\"\"\n\n\nfrom grafimo.grafimo_errors import NotValidMotifMatrixError \nfrom grafimo.utils import isListEqual, DNA_ALPHABET \n\nfrom typing import List, Optional, Dict \n\nimport pandas as pd\nimport numpy as np\n\n\nclass Motif(object):\n \"\"\"\n This class defines a DNA motif object.\n\n In a single object we carry: \n * the original count matrix or probability matrix \n * the motif scaled scoring matrix \n * the P-value matrix used to assign a P-value to each motif \n occurrence candidate score \n * the parameters used to scale the matrix (to revert the scaled \n score to the log-odds score) \n * the background probability distribution used, while processing the\n PWM values \n * the motif width\n * the minimum value in the scoring matrix\n * the maximum value in the scoring matrix\n * the motif name (both ID and extended name)\n * the motif alphabet \n \n ...\n\n Attributes\n ----------\n _count_matrix : numpy.ndarray\n motif probability matrix\n _score_matrix : numpy.ndarray\n scaled motif scoring matrix\n _min_val : int\n minimum value of the scaled scoring matrix\n _max_value : int\n maximum value of the scaled scoring matrix\n _scale : int\n scaling value\n _offset : numpy.double\n offset used during motif matrix scaling\n _bg : dict\n background probability distribution\n _width : int\n motif width\n _motif_id : str\n motif ID\n _motif_name : str\n motif extended name\n _alphabet : list()\n DNA motif alphabet\n _isScaled : bool\n flag value to state if the scoring matrix has been scaled\n\n Methods\n -------\n setMotif_matrix(motif_matrix : pandas.DataFrame)\n set the count matrix\n setMotif_scoreMatrix(score_matrix : numpy.ndarray)\n set the scoring matrix\n setMotif_pval_matrix(pval_mat : numpy.array)\n set the P-value matrix\n setMin_val(min_val : int)\n set the scoring matrix minimum value\n setMax_val(max_val : int)\n set the scoring matrix maximum value\n setScale(scale : int)\n set the scoring matrix scaling factor\n setOffset(offset : numpy.double)\n set the scaling offset\n setBg(bgs : dict)\n set the background probability distribution\n setWidth(width : int)\n set motif width\n setMotifID(motif_id : str)\n set motif ID\n setMotifName(motif_name : str)\n set motif extended name\n setAlphabet(alphabet : list)\n set DNA motif alphabet\n setIsScaled(isScaled : bool)\n set the isScaled flag value\n getMotif_matrix()\n return the motif count matrix\n getMotif_scoreMatrix()\n return the motif scaled scoring matrix\n getMotif_pval_mat()\n return the P-value matrix\n getMin_val()\n return the scoring matrix minimum value\n getMax_val()\n return the scoring matrix maximum value\n getScale()\n return the matrix scaling factor\n getOffset()\n return the offset used while scaling the motif scoring matrix\n getBg()\n return the background probability distribution\n getWidth():\n return motif width\n getMotifID()\n return the motif ID\n getMotifName()\n return the motif extended name\n getAlphabet()\n return the DNA motif alphabet\n getIsScaled()\n return the isScaled flag value\n compute_minValue()\n compute the minimum value of the scaled scoring motif matrix\n print()\n print one matrix among the counts one, the scoring one or the \n P-value one \n \"\"\"\n\n # class attributes value initialization\n _min_val = -np.inf\n _max_val = np.inf\n _scale = -1\n _offset = 0\n _width = -1\n _is_scaled = False\n #-------------------------------------------------------------------\n # Motif methods\n # \n # these errors should never appear --> no need for error formatting\n # can assume that debug mode == True\n def __init__(\n self,\n count_matrix: np.ndarray,\n width: int,\n alphabet: List[str],\n motif_id: str,\n motif_name: str,\n nucsmap: dict\n ):\n if not isinstance(count_matrix, np.ndarray):\n errmsg = f\"\\n\\nERROR: Expected {type(np.ndarray).__name__}, got {type(count_matrix).__name__}.\\n\"\n raise TypeError(errmsg)\n if count_matrix.size == 0 or sum(sum(count_matrix)) == 0:\n errmsg = \"\\n\\nERROR: Empty motif count matrix.\\n\"\n raise NotValidMotifMatrixError(errmsg)\n if not isinstance(width, int):\n errmsg = f\"\\n\\nERROR: Expected {int.__name__}, got {type(width).__name__}.\\n\"\n raise TypeError(errmsg)\n if width <= 0:\n errmsg = f\"\\n\\nERROR: Forbidden motif width ({width}).\\n\"\n raise ValueError(errmsg)\n if not isinstance(motif_id, str):\n errmsg = f\"\\n\\nERROR: Expected {str.__name__}, got {type(motif_id).__name__}.\\n\"\n raise TypeError(errmsg)\n if not motif_id:\n errmsg = \"\\n\\nERROR: Not valid motif ID.\\n\"\n raise ValueError(errmsg)\n if not isinstance(motif_name, str):\n errmsg = f\"\\n\\nERROR: Expected {str.__name__}, got {type(motif_name).__name__}.\\n\"\n raise TypeError(errmsg)\n if not motif_name:\n errmsg = \"\\n\\nERROR: Not valid motif name.\\n\"\n raise ValueError(errmsg)\n if not isinstance(alphabet, list):\n errmsg = f\"\\n\\nERROR: Expected {list.__name__}, got {type(alphabet).__name__}.\\n\"\n raise TypeError(errmsg)\n if not isListEqual(alphabet, DNA_ALPHABET):\n errmsg = \"\\n\\nERROR: The motif is not built on DNA alphabet.\\n\"\n raise ValueError(errmsg)\n if not isinstance(nucsmap, dict):\n errmsg = f\"\\n\\nERROR: Expected {dict.__name__}, got {type(nucsmap).__name__}.\\n\"\n raise TypeError(errmsg)\n self._count_matrix = count_matrix\n self._width = width\n self._motif_id = motif_id\n self._motif_name = motif_name\n self._alphabet = alphabet\n self._nucsmap = nucsmap\n\n\n def set_motif_matrix(self, motif_matrix: pd.DataFrame) -> None:\n if not isinstance(motif_matrix, pd.DataFrame):\n errmsg = f\"\\n\\nERROR: Expected {type(pd.DataFrame).__name__}, got {type(motif_matrix).__name__}.\\n\"\n raise TypeError(errmsg)\n if motif_matrix.empty:\n errmsg = \"\\n\\nERROR: Empty motif matrix.\\n\"\n raise ValueError(errmsg)\n self._count_matrix = motif_matrix\n\n\n def set_motif_score_matrix(self, score_matrix: np.ndarray) -> None:\n if not isinstance(score_matrix, np.ndarray):\n errmsg = f\"\\n\\nERROR: Expected {type(np.ndarray).__name__}, got {type(score_matrix).__name__}.\\n\"\n raise TypeError(errmsg)\n if score_matrix.size == 0 or sum(sum(score_matrix)) == 0:\n errmsg = \"\\n\\nERROR: Empty motif score matrix.\\n\"\n raise ValueError(errmsg)\n self._score_matrix = score_matrix\n\n\n def set_motif_pval_matrix(self, pval_mat: np.array) -> None:\n if not isinstance(pval_mat, np.ndarray):\n errmsg = f\"\\n\\nERROR: Expected {type(np.array).__name__}, got {type(pval_mat).__name__}.\\n\"\n raise TypeError(errmsg)\n if len(pval_mat) == 0:\n errmsg = \"\\n\\nERROR: Empty motif p-value matrix.\\n\"\n raise ValueError(errmsg)\n if sum(pval_mat) == 0:\n errmsg = \"\\n\\nERROR: Not valid motif p-value matrix.\\n\"\n raise ValueError(errmsg)\n self._pval_matrix = pval_mat\n\n\n def set_min_val(self, min_val: int) -> None:\n if not isinstance(min_val, int):\n errmsg = f\"\\n\\nERROR: Expected {int.__name__}, got {type(min_val).__name__}.\\n\"\n raise TypeError(errmsg)\n if min_val <= -np.inf:\n errmsg = f\"\\n\\nERROR: Forbidden value {min_val}.\\n\"\n raise ValueError(errmsg)\n self._min_val = min_val\n\n\n def set_max_val(self, max_val: int) -> None:\n if not isinstance(max_val, int):\n errmsg = f\"\\n\\nERROR: Expected {int.__name__}, got {type(max_val).__name__}.\\n\"\n raise TypeError(errmsg)\n if max_val >= np.inf:\n errmsg = f\"\\n\\nERROR: Forbidden value ({max_val}).\\n\"\n raise ValueError(errmsg)\n self._max_val = max_val\n\n\n def set_scale(self, scale: int) -> None:\n if not isinstance(scale, int):\n errmsg = f\"\\n\\nERROR: Expected {int.__name__}, got {type(scale).__name__}.\\n\"\n raise TypeError(errmsg)\n if scale <= 0:\n errmsg = \"\\n\\nERROR: Scaling factor must be positive integer number.\\n\"\n raise ValueError(errmsg)\n self._scale = scale\n\n\n def set_offset(self, offset: np.double) -> None: \n if not isinstance(offset, np.double):\n errmsg = f\"\\n\\nERROR: Expected {type(np.double).__name__}, got {type(offset).__name__}.\\n\"\n raise TypeError(errmsg) \n self._offset = offset\n\n\n def set_bg(self, bgs: Dict[str, float]) -> None:\n if not isinstance(bgs, dict):\n errmsg = f\"\\n\\nERROR: Expected {dict.__name__}, got {type(bgs).__name__}.\\n\"\n raise TypeError(errmsg)\n self._bg = bgs\n\n\n def set_width(self, width: int) -> None:\n if not isinstance(width, int):\n errmsg = f\"\\n\\nERROR: Expected {int.__name__}, got {type(width).__name__}.\\n\"\n raise TypeError(errmsg)\n if width <= 0:\n errmsg = \"\\n\\nERROR: Not valid motif width.\\n\"\n raise ValueError(errmsg)\n self._width = width\n\n\n def set_motif_id(self, motif_id: str) -> None:\n if not isinstance(motif_id, str):\n errmsg = f\"\\n\\nERROR: Expected {str.__name__}, got {type(motif_id).__name__}.\\n\"\n raise TypeError(errmsg)\n if not motif_id:\n errmsg = \"\\n\\nERROR: Not valid motif ID.\\n\"\n raise ValueError(errmsg)\n self._motif_id = motif_id\n\n\n def set_motif_name(self, motif_name: str) -> None:\n if not isinstance(motif_name, str):\n errmsg = f\"\\n\\nERROR: Expected {str.__name__}, got {type(motif_name).__name__}.\\n\"\n raise TypeError(errmsg)\n if not motif_name:\n errmsg = \"\\n\\nERROR: Not valid motif name.\\n\"\n raise ValueError(errmsg)\n self._motif_name = motif_name\n\n\n def set_alphabet(self, alphabet: List[str]) -> None:\n if not isinstance(alphabet, list):\n errmsg = f\"\\n\\nERROR: Expected {list.__name__}, got {type(alphabet).__name__}.\\n\"\n raise TypeError(errmsg)\n if len(alphabet) == 0:\n errmsg = \"\\n\\nERROR: Empty motif alphabet.\\n\"\n raise ValueError(errmsg)\n if not isListEqual(alphabet, DNA_ALPHABET):\n errmsg = \"\\n\\nERROR: The motif is not built on DNA alphabet.\\n\"\n raise ValueError(errmsg)\n self.alphabet = alphabet\n\n\n def set_is_scaled(self) -> None:\n if self._is_scaled:\n errmsg = \"\\n\\nERROR: The motif matrix has already been scaled.\\n\"\n raise AssertionError(errmsg)\n self._is_scaled = True\n\n\n def _get_motif_matrix(self) -> np.ndarray:\n if self._count_matrix.size == 0 or sum(sum(self._count_matrix)) == 0:\n errmsg = \"\\n\\nERROR: \\\"self._count_matrix\\\" is empty.\\n\"\n raise AttributeError(errmsg)\n else:\n return self._count_matrix\n \n @property\n def count_matrix(self):\n return self._get_motif_matrix()\n \n\n def _get_motif_score_matrix(self) -> np.ndarray:\n if self._score_matrix.size == 0 or sum(sum(self._score_matrix)) == 0:\n errmsg = \"\\n\\nERROR: \\\"self._score_matrix\\\" is empty.\\n\"\n raise AttributeError(errmsg)\n else:\n return self._score_matrix\n\n @property\n def score_matrix(self):\n return self._get_motif_score_matrix()\n\n\n def _get_motif_pval_mat(self) -> np.ndarray:\n if self._pval_matrix.size == 0 or sum(self._pval_matrix) == 0:\n errmsg = \"\\n\\nERROR: \\\"self._pval_matrix\\\" is empty.\\n\"\n raise AttributeError(errmsg)\n else:\n return self._pval_matrix\n \n @property\n def pval_matrix(self):\n return self._get_motif_pval_mat()\n\n\n def _get_min_val(self) -> int:\n return self._min_val\n\n @property\n def min_val(self):\n return self._get_min_val()\n\n\n def _get_max_val(self) -> int:\n return self._max_val\n\n @property\n def max_val(self):\n return self._get_max_val()\n\n\n def _get_scale(self) -> int:\n return self._scale\n\n @property\n def scale(self):\n return self._get_scale()\n\n\n def _get_nucsmap(self):\n if not bool(self._nucsmap):\n errmsg = \"\\n\\nERROR: \\\"self._nucsmap\\\" is empty.\\n\"\n raise AttributeError(errmsg)\n else:\n return self._nucsmap\n \n @property\n def nucsmap(self):\n return self._get_nucsmap()\n\n\n def _get_offset(self) -> np.double:\n return self._offset\n\n @property\n def offset(self):\n return self._get_offset()\n\n\n def _get_bg(self) -> dict:\n if not bool(self._bg):\n errmsg = \"\\n\\nERROR: \\\"self._bg\\\" is empty.\\n\"\n raise AttributeError(errmsg)\n else:\n return self._bg\n\n @property\n def bg(self):\n return self._get_bg()\n\n\n def _get_width(self) -> int:\n return self._width\n\n @property\n def width(self):\n return self._get_width()\n\n\n def _get_motif_id(self) -> str:\n if not self._motif_id:\n errmsg = \"\\n\\nERROR: \\\"self._motif_id\\\" is empty.\\n\"\n raise AttributeError(errmsg)\n else:\n return self._motif_id\n\n @property\n def motif_id(self):\n return self._get_motif_id()\n\n\n def _get_motif_name(self) -> str:\n if not self._motif_name:\n errmsg = \"\\n\\nERROR: \\\"self._motif_name\\\" is empty.\\n\"\n raise AttributeError(errmsg)\n else:\n return self._motif_name\n\n @property\n def motif_name(self):\n return self._get_motif_name()\n\n\n def _get_alphabet(self) -> List[str]:\n if not self._alphabet:\n errmsg = \"\\n\\nERROR: \\\"self._alphabet\\\" si empty.\\n\"\n raise AttributeError(errmsg)\n else:\n return self._alphabet\n \n @property\n def alphabet(self):\n return self._get_alphabet()\n\n\n def _get_is_scaled(self) -> bool:\n return self._is_scaled\n\n @property\n def is_scaled(self):\n return self._get_is_scaled()\n\n\n def compute_min_value(self) -> None:\n min_value = self._score_matrix.min()\n self._min_val = min_value\n\n\n def print(self, matrix: str) -> None:\n if not isinstance(matrix, str):\n errmsg = \"\\n\\nERROR: Expected str, got {}.\\n\"\n raise TypeError(errmsg.format(type(matrix).__name__))\n if not matrix:\n errmsg = \"\\n\\nERROR: Unable to guess what should be printed.\\n\"\n raise ValueError(errmsg)\n available_matrices = [\"raw_counts\", \"score_matrix\", \"pval_matrix\"]\n if matrix not in available_matrices:\n errmsg = \"\\n\\nERROR: Unknown motif matrix.\\n\"\n raise ValueError(errmsg)\n if matrix == \"raw_counts\": print(self._count_matrix)\n elif matrix == \"score_matrix\": print(self._score_matrix)\n elif matrix == \"pval_matrix\": print(self._pval_matrix)\n else: # we should never reach this point\n errmsg = \"\\n\\nERROR: Unknown motif matrix.\\n\"\n raise ValueError(errmsg)\n \n# end of Motif\n\n", "repo_name": "pinellolab/GRAFIMO", "sub_path": "src/grafimo/motif.py", "file_name": "motif.py", "file_ext": "py", "file_size_in_byte": 15872, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 24, "dataset": "github-code", "pt": "76", "api": [{"api_name": "numpy.inf", "line_number": 128, "usage_type": "attribute"}, {"api_name": "numpy.inf", "line_number": 129, "usage_type": "attribute"}, {"api_name": "numpy.ndarray", "line_number": 141, "usage_type": "attribute"}, {"api_name": "typing.List", "line_number": 143, "usage_type": "name"}, {"api_name": "numpy.ndarray", "line_number": 148, "usage_type": "attribute"}, {"api_name": "numpy.ndarray", "line_number": 149, "usage_type": "attribute"}, {"api_name": "grafimo.grafimo_errors.NotValidMotifMatrixError", "line_number": 153, "usage_type": "call"}, {"api_name": "grafimo.utils.isListEqual", "line_number": 175, "usage_type": "call"}, {"api_name": "grafimo.utils.DNA_ALPHABET", "line_number": 175, "usage_type": "argument"}, {"api_name": "pandas.DataFrame", "line_number": 189, "usage_type": "attribute"}, {"api_name": "pandas.DataFrame", "line_number": 190, "usage_type": "attribute"}, {"api_name": "pandas.DataFrame", "line_number": 191, "usage_type": "attribute"}, {"api_name": "numpy.ndarray", "line_number": 199, "usage_type": "attribute"}, {"api_name": "numpy.ndarray", "line_number": 200, "usage_type": "attribute"}, {"api_name": "numpy.ndarray", "line_number": 201, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 209, "usage_type": "attribute"}, {"api_name": "numpy.ndarray", "line_number": 210, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 211, "usage_type": "attribute"}, {"api_name": "numpy.inf", "line_number": 226, "usage_type": "attribute"}, {"api_name": "numpy.inf", "line_number": 236, "usage_type": "attribute"}, {"api_name": "numpy.double", "line_number": 252, "usage_type": "attribute"}, {"api_name": "numpy.double", "line_number": 253, "usage_type": "attribute"}, {"api_name": "numpy.double", "line_number": 254, "usage_type": "attribute"}, {"api_name": "typing.Dict", "line_number": 259, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 296, "usage_type": "name"}, {"api_name": "grafimo.utils.isListEqual", "line_number": 303, "usage_type": "call"}, {"api_name": "grafimo.utils.DNA_ALPHABET", "line_number": 303, "usage_type": "argument"}, {"api_name": "numpy.ndarray", "line_number": 316, "usage_type": "attribute"}, {"api_name": "numpy.ndarray", "line_number": 328, "usage_type": "attribute"}, {"api_name": "numpy.ndarray", "line_number": 340, "usage_type": "attribute"}, {"api_name": "numpy.double", "line_number": 388, "usage_type": "attribute"}, {"api_name": "typing.List", "line_number": 440, "usage_type": "name"}]}
+{"seq_id": "28405269475", "text": "\"\"\"ManhuaKO site downloader\"\"\"\n\nimport time\nfrom typing import Iterable, Set\nfrom urllib.parse import quote\n\nfrom bs4 import BeautifulSoup\n\nfrom .base import Chapter, ChapterImage, Language, Manga, Site\n\n\nclass ManhuaKO(Site):\n @property\n def name(self) -> str:\n return \"ManhuaKO\"\n\n @property\n def url(self) -> str:\n return \"https://manhuako.com\"\n\n @property\n def supported_languages(self) -> Set[Language]:\n return {Language.es}\n\n def search(self, query: str, lang: Language = None) -> Iterable[Manga]:\n with self.session.get(f\"{self.url}/home/search\", params={\"mq\": query}) as resp:\n resp.raise_for_status()\n soup = BeautifulSoup(resp.text, \"html.parser\")\n pages = [soup]\n pagelist = soup.find(\"ul\", class_=\"pagination\")\n if pagelist:\n # get only the second page\n for page in pagelist(\"a\")[1:2]:\n with self.session.get(page[\"href\"]) as resp:\n resp.raise_for_status()\n soup = BeautifulSoup(resp.text, \"html.parser\")\n pages.append(soup)\n\n for page in pages:\n for card in page(\"div\", {\"class\": \"card\"}):\n if card.findNext(\"p\", {\"class\": \"type\"}).text == \"Novela\":\n continue\n anchor = card.findNext(\"a\", {\"class\": \"white-text\"})\n yield Manga(\n url=anchor[\"href\"],\n name=anchor.text.strip(),\n cover=card.findNext(\"img\")[\"src\"],\n )\n\n def get_chapters(self, manga: Manga) -> Iterable[Chapter]:\n with self.session.get(manga.url) as resp:\n resp.raise_for_status()\n soup = BeautifulSoup(resp.text, \"html.parser\")\n pages = [soup]\n pagelist = soup.find(\"ul\", class_=\"pagination\")\n if pagelist:\n last_page = int(\n pagelist(\"a\")[-1][\"href\"].strip(\"/\").rsplit(\"/\", maxsplit=1)[-1]\n )\n for page_number in range(2, last_page + 1):\n with self.session.get(f\"{manga.url}/page/{page_number}\") as resp:\n resp.raise_for_status()\n soup = BeautifulSoup(resp.text, \"html.parser\")\n pages.append(soup)\n time.sleep(0.1)\n for page in pages:\n page = page.find(\"table\", {\"class\": \"table-chapters\"})\n for item in page(\"tr\"):\n item = item.findNext(\"a\")\n yield Chapter(name=item.text.strip(), url=item[\"href\"])\n\n def get_images(self, chapter: Chapter) -> Iterable[ChapterImage]:\n with self.session.get(chapter.url) as resp:\n resp.raise_for_status()\n soup = BeautifulSoup(resp.text, \"html.parser\")\n soup = soup.find(\"div\", {\"id\": \"pantallaCompleta\"})\n for img in soup(\"img\"):\n yield ChapterImage(url=quote(img[\"src\"], safe=\":/%\"))\n", "repo_name": "adbenitez/simplebot_manga", "sub_path": "simplebot_manga/manga_api/manhuako.py", "file_name": "manhuako.py", "file_ext": "py", "file_size_in_byte": 2933, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "76", "api": [{"api_name": "base.Site", "line_number": 12, "usage_type": "name"}, {"api_name": "base.Language.es", "line_number": 23, "usage_type": "attribute"}, {"api_name": "base.Language", "line_number": 23, "usage_type": "name"}, {"api_name": "typing.Set", "line_number": 22, "usage_type": "name"}, {"api_name": "base.Language", "line_number": 22, "usage_type": "name"}, {"api_name": "base.Language", "line_number": 25, "usage_type": "name"}, {"api_name": "bs4.BeautifulSoup", "line_number": 28, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 36, "usage_type": "call"}, {"api_name": "base.Manga", "line_number": 44, "usage_type": "call"}, {"api_name": "typing.Iterable", "line_number": 25, "usage_type": "name"}, {"api_name": "base.Manga", "line_number": 25, "usage_type": "name"}, {"api_name": "base.Manga", "line_number": 50, "usage_type": "name"}, {"api_name": "bs4.BeautifulSoup", "line_number": 53, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 63, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 65, "usage_type": "call"}, {"api_name": "base.Chapter", "line_number": 70, "usage_type": "call"}, {"api_name": "typing.Iterable", "line_number": 50, "usage_type": "name"}, {"api_name": "base.Chapter", "line_number": 50, "usage_type": "name"}, {"api_name": "base.Chapter", "line_number": 72, "usage_type": "name"}, {"api_name": "bs4.BeautifulSoup", "line_number": 75, "usage_type": "call"}, {"api_name": "base.ChapterImage", "line_number": 78, "usage_type": "call"}, {"api_name": "urllib.parse.quote", "line_number": 78, "usage_type": "call"}, {"api_name": "typing.Iterable", "line_number": 72, "usage_type": "name"}, {"api_name": "base.ChapterImage", "line_number": 72, "usage_type": "name"}]}
+{"seq_id": "38218631636", "text": "# file_name:html_parse.py\n# 解析方法一\nfrom bs4 import BeautifulSoup\nimport urllib3\nimport json\nfrom get_html import get_html\nfrom Paper_class import security_Paper\nfrom utils import validateTitle,save_to_file\nimport pandas as pd\nimport warnings\nimport requests\nimport io\nimport os\n\nwarnings.filterwarnings('ignore')\nroot_dir = os.getcwd()\n\ndef list2csv(columns = [\"paper_title\",\"author\",\"paper_link\",\"pdf_link\",\"slides_link\",\"abstract\"],list = None,name=None):\n pd_DataFrame = pd.DataFrame(columns=columns, data=list)\n pd_DataFrame.to_csv(root_dir+name+\".csv\",encoding='utf-8')\n\n\ndef download_content(url):\n \"\"\"\n 第一个函数,用来下载网页,返回网页内容\n 参数 url 代表所要下载的网页网址。\n 整体代码和之前类似\n \"\"\"\n http = urllib3.PoolManager()\n response = http.request(\"GET\", url)\n response_data = response.data\n html_content = response_data.decode()\n return html_content\n\n# 输入参数为要分析的 html 文件名,返回值为对应的 BeautifulSoup 对象\ndef create_doc_from_html(html_content):\n doc = BeautifulSoup(html_content)\n return doc\n\ndef create_doc_from_filename(filename):\n with open(root_dir + \"/\"+ filename, \"r\", encoding='utf-8') as f:\n html_content = f.read()\n doc = BeautifulSoup(html_content)\n return doc\n\ndef security_list_parse(doc,tag=\"h2\",class_=\"node-title\"):\n link_list = doc.body.find_all(tag,class_=class_)\n link_paper = []\n for link in link_list:\n temp = link.find_all(\"a\")\n if len(temp)!=0:\n link_paper.append((paper_dict[\"security\"]+temp[0]['href'],temp[0].text))\n\n return link_paper\n\ndef security_single_parse(link):\n temp_content = create_doc_from_html(download_content(link))\n temp_content = create_doc_from_html(str(temp_content.find_all(\"section\", id=\"content\")))\n title = temp_content.find_all(\"h1\", id=\"page-title\")\n author = temp_content.find_all(\"div\", class_=\"field-name-field-paper-people-text\")\n abstract = temp_content.find_all(\"div\", class_=\"field-name-field-paper-description\")\n pdf_link = temp_content.find_all(\"div\", class_=\"field-name-field-presentation-pdf\")\n slide_link = temp_content.find_all(\"div\", class_=\"field-name-field-paper-slides-file\")\n\n return security_Paper(paper_title=title,abstract=abstract,pdf_link=pdf_link,author=author,slides_link=slide_link,paper_link=link)\n\n\ndef download_pdf(save_path,pdf_name,pdf_url):\n send_headers = {\n \"User-Agent\": \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/61.0.3163.100 Safari/537.36\",\n \"Connection\": \"keep-alive\",\n \"Accept\": \"text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8\",\n \"Accept-Language\": \"zh-CN,zh;q=0.8\"}\n response = requests.get(pdf_url, headers=send_headers)\n bytes_io = io.BytesIO(response.content)\n os.chdir(save_path)\n if not os.path.exists(\"%s.PDF\" % pdf_name):\n with open(\"%s.PDF\" % pdf_name, mode='wb') as f:\n f.write(bytes_io.getvalue())\n else:\n print(\"已存在\")\n os.chdir(root_dir)\n print('%s.PDF,下载成功!' % (pdf_name))\n\ndef new_file(root_dir,name):\n os.chdir(root_dir)\n if not os.path.exists(name):\n os.mkdir(name)\n os.chdir(name)\n path = os.getcwd()\n os.chdir(root_dir)\n return path\n\n\n# security的历年论文https://www.usenix.org/conferences/byname/108\n\npaper_dict = {\"security\":\"https://www.usenix.org/\"}\nconference_url = {\"security22_fall\":\"https://www.usenix.org/conference/usenixsecurity22/fall-accepted-papers\",\n \"security22_summer\":\"https://www.usenix.org/conference/usenixsecurity22/summer-accepted-papers\",\n \"security21_fall\":\"https://www.usenix.org/conference/usenixsecurity21/fall-accepted-papers\",\n \"security21_summer\":\"https://www.usenix.org/conference/usenixsecurity21/summer-accepted-papers\",\n \"security20_fall\":\"https://www.usenix.org/conference/usenixsecurity20/fall-accepted-papers\",\n \"security20_summer\":\"https://www.usenix.org/conference/usenixsecurity20/summer-accepted-papers\",\n \"security20_spring\":\"https://www.usenix.org/conference/usenixsecurity20/spring-accepted-papers\",\n \"security19_fall\":\"https://www.usenix.org/conference/usenixsecurity19/fall-accepted-papers\",\n }\n\nif __name__ == '__main__':\n use_history = True\n name = \"security21\"\n pdf_save_path = new_file(root_dir=root_dir+\"/pdf_info\",name=name)\n url = conference_url[name]\n\n if not use_history:\n get_html(name=name, url=url)\n result = download_content(url)\n save_to_file(root_dir + \"/history_file/\" + name + \".html\", result)\n doc = create_doc_from_html(result)\n else:\n doc = create_doc_from_filename(\"history_file/\"+name+\".html\")\n\n link_paper = security_list_parse(doc)\n paper_info_list = []\n pdf_link_list = []\n for link, paper_name in link_paper:\n print(paper_name)\n paper = security_single_parse(link)\n if paper.paper_title !=\"\":\n paper_info_list.append([paper.paper_title,paper.author,\n paper.paper_link,paper.pdf_link,\n paper.slides_link,paper.abstract])\n if paper.pdf_link != None:\n download_pdf(pdf_save_path, validateTitle(paper.paper_title), paper.pdf_link)\n\n # list2csv(list=paper_info_list,name=\"/paper_info/\"+name)\n # for title, link in pdf_link_list:\n # download_pdf(pdf_save_path,title,link)\n\n", "repo_name": "xaddwell/paper_crawler", "sub_path": "html_parse.py", "file_name": "html_parse.py", "file_ext": "py", "file_size_in_byte": 5625, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "76", "api": [{"api_name": "warnings.filterwarnings", "line_number": 15, "usage_type": "call"}, {"api_name": "os.getcwd", "line_number": 16, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 19, "usage_type": "call"}, {"api_name": "urllib3.PoolManager", "line_number": 29, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 37, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 43, "usage_type": "call"}, {"api_name": "Paper_class.security_Paper", "line_number": 65, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 74, "usage_type": "call"}, {"api_name": "io.BytesIO", "line_number": 75, "usage_type": "call"}, {"api_name": "os.chdir", "line_number": 76, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 77, "usage_type": "call"}, {"api_name": "os.path", "line_number": 77, "usage_type": "attribute"}, {"api_name": "os.chdir", "line_number": 82, "usage_type": "call"}, {"api_name": "os.chdir", "line_number": 86, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 87, "usage_type": "call"}, {"api_name": "os.path", "line_number": 87, "usage_type": "attribute"}, {"api_name": "os.mkdir", "line_number": 88, "usage_type": "call"}, {"api_name": "os.chdir", "line_number": 89, "usage_type": "call"}, {"api_name": "os.getcwd", "line_number": 90, "usage_type": "call"}, {"api_name": "os.chdir", "line_number": 91, "usage_type": "call"}, {"api_name": "get_html.get_html", "line_number": 115, "usage_type": "call"}, {"api_name": "utils.save_to_file", "line_number": 117, "usage_type": "call"}, {"api_name": "utils.validateTitle", "line_number": 133, "usage_type": "call"}]}
+{"seq_id": "24714648651", "text": "from draw import draw_pixbuf, propagate_expose, draw_vlinear, cairo_state\nfrom skin_config import skin_config\nfrom utils import get_window_shadow_size\nimport gobject\nimport gtk\n\nclass EventBox(gtk.EventBox):\n '''Event box.'''\n\t\n def __init__(self):\n '''Init event box.'''\n gtk.EventBox.__init__(self)\n self.set_visible_window(False)\n \nclass ImageBox(gtk.EventBox):\n '''Box just contain image.'''\n\t\n def __init__(self, image_dpixbuf):\n '''Init image box.'''\n # Init.\n gtk.EventBox.__init__(self)\n self.set_visible_window(False)\n self.image_dpixbuf = image_dpixbuf\n \n # Set size.\n pixbuf = self.image_dpixbuf.get_pixbuf()\n self.set_size_request(pixbuf.get_width(), pixbuf.get_height())\n \n # Connect expose signal.\n self.connect(\"expose-event\", self.expose_image_box)\n \n def expose_image_box(self, widget, event):\n '''Expose image box.'''\n # Init.\n cr = widget.window.cairo_create()\n rect = widget.allocation\n pixbuf = self.image_dpixbuf.get_pixbuf()\n \n # Draw.\n draw_pixbuf(cr, pixbuf, rect.x, rect.y)\n \n # Propagate expose.\n propagate_expose(widget, event)\n \n return True\n \ngobject.type_register(ImageBox)\n\nclass BackgroundBox(gtk.VBox):\n '''Box to expande background.'''\n\t\n def __init__(self):\n '''Init background box.'''\n # Init.\n gtk.VBox.__init__(self)\n self.set_can_focus(True)\n \n self.connect(\"expose-event\", self.expose_background_box)\n \n def draw_mask(self, cr, x, y, w, h):\n '''Draw mask.'''\n draw_vlinear(cr, x, y, w, h,\n [(0, (\"#FF0000\", 1)),\n (1, (\"#FF0000\", 1))]\n )\n \n def expose_background_box(self, widget, event):\n '''Expose background box.'''\n cr = widget.window.cairo_create()\n rect = widget.allocation\n toplevel = widget.get_toplevel()\n coordinate = widget.translate_coordinates(toplevel, rect.x, rect.y)\n (offset_x, offset_y) = coordinate\n \n with cairo_state(cr):\n cr.rectangle(rect.x, rect.y, rect.width, rect.height)\n cr.clip()\n \n (shadow_x, shadow_y) = get_window_shadow_size(toplevel)\n skin_config.render_background(cr, widget, shadow_x, shadow_y)\n \n self.draw_mask(cr, rect.x, rect.y, rect.width, rect.height) \n\n return False\n \ngobject.type_register(BackgroundBox)\n\n", "repo_name": "netphi/deepin-ui", "sub_path": "dtk/ui/box.py", "file_name": "box.py", "file_ext": "py", "file_size_in_byte": 2603, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 2, "dataset": "github-code", "pt": "76", "api": [{"api_name": "gtk.EventBox", "line_number": 7, "usage_type": "attribute"}, {"api_name": "gtk.EventBox.__init__", "line_number": 12, "usage_type": "call"}, {"api_name": "gtk.EventBox", "line_number": 12, "usage_type": "attribute"}, {"api_name": "gtk.EventBox", "line_number": 15, "usage_type": "attribute"}, {"api_name": "gtk.EventBox.__init__", "line_number": 21, "usage_type": "call"}, {"api_name": "gtk.EventBox", "line_number": 21, "usage_type": "attribute"}, {"api_name": "draw.draw_pixbuf", "line_number": 40, "usage_type": "call"}, {"api_name": "draw.propagate_expose", "line_number": 43, "usage_type": "call"}, {"api_name": "gobject.type_register", "line_number": 47, "usage_type": "call"}, {"api_name": "gtk.VBox", "line_number": 49, "usage_type": "attribute"}, {"api_name": "gtk.VBox.__init__", "line_number": 55, "usage_type": "call"}, {"api_name": "gtk.VBox", "line_number": 55, "usage_type": "attribute"}, {"api_name": "draw.draw_vlinear", "line_number": 62, "usage_type": "call"}, {"api_name": "draw.cairo_state", "line_number": 75, "usage_type": "call"}, {"api_name": "utils.get_window_shadow_size", "line_number": 79, "usage_type": "call"}, {"api_name": "skin_config.skin_config.render_background", "line_number": 80, "usage_type": "call"}, {"api_name": "skin_config.skin_config", "line_number": 80, "usage_type": "name"}, {"api_name": "gobject.type_register", "line_number": 86, "usage_type": "call"}]}
+{"seq_id": "43754686613", "text": "# users/models.py\nfrom django.contrib.auth.models import AbstractUser\nfrom django.utils.translation import ugettext_lazy as _\nfrom django.db import models\n\n\nclass CustomUser(AbstractUser):\n email = models.EmailField(_(\"email\"), max_length=255, unique=True)\n first_name = models.CharField(_(\"first_name\"), max_length=30)\n last_name = models.CharField(_(\"last_name\"), max_length=30)\n\n USERNAME_FIELD = 'email'\n REQUIRED_FIELDS = ['first_name', 'last_name', 'username']\n\n def __str__(self):\n return self.email\n\n def get_full_name(self):\n return self.first_name + ' ' + self.last_name\n\n\nSTATES = (\n ('AL', 'Alabama'),\n ('AK', 'Alaska'),\n ('AZ', 'Arizona'),\n ('AR', 'Arkansas'),\n ('CA', 'California'),\n ('CO', 'Colorado'),\n ('CT', 'Connecticut'),\n ('DE', 'Delaware'),\n ('FL', 'Florida'),\n ('GA', 'Georgia'),\n ('HI', 'Hawaii'),\n ('ID', 'Idaho'),\n ('IL', 'Illinois'),\n ('IN', 'Indiana'),\n ('IA', 'Iowa'),\n ('KS', 'Kansas'),\n ('KY', 'Kentucky'),\n ('LA', 'Louisiana'),\n ('ME', 'Maine'),\n ('MD', 'Maryland'),\n ('MA', 'Massachusetts'),\n ('MI', 'Michigan'),\n ('MN', 'Minnesota'),\n ('MS', 'Mississippi'),\n ('MO', 'Missouri'),\n ('MT', 'Montana'),\n ('NE', 'Nebraska'),\n ('NV', 'Nevada'),\n ('NH', 'New Hampshire'),\n ('NJ', 'New Jersey'),\n ('NM', 'New Mexico'),\n ('NY', 'New York'),\n ('NC', 'North Carolina'),\n ('ND', 'North Dakota'),\n ('OH', 'Ohio'),\n ('OK', 'Oklahoma'),\n ('OR', 'Oregon'),\n ('PA', 'Pennsylvania'),\n ('RI', 'Rhode Island'),\n ('SC', 'South Carolina'),\n ('SD', 'South Dakota'),\n ('TN', 'Tennessee'),\n ('TX', 'Texas'),\n ('UT', 'Utah'),\n ('VT', 'Vermont'),\n ('VA', 'Virginia'),\n ('WA', 'Washington'),\n ('WV', 'West Virginia'),\n ('WI', 'Wisconsin'),\n ('WY', 'Wyoming'),\n)\n\n\nclass Address(models.Model):\n street = models.CharField(max_length=45)\n city = models.CharField(max_length=45)\n state = models.CharField(max_length=2, choices=STATES)\n zip = models.IntegerField()\n\n def __str__(self):\n return \"%s, %s, %s, %s\" % (self.street, self.city, self.state,\n self.zipcode)\n", "repo_name": "jjacobson/Sahara", "sub_path": "users/models.py", "file_name": "models.py", "file_ext": "py", "file_size_in_byte": 2207, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "76", "api": [{"api_name": "django.contrib.auth.models.AbstractUser", "line_number": 7, "usage_type": "name"}, {"api_name": "django.db.models.EmailField", "line_number": 8, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 8, "usage_type": "name"}, {"api_name": "django.utils.translation.ugettext_lazy", "line_number": 8, "usage_type": "call"}, {"api_name": "django.db.models.CharField", "line_number": 9, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 9, "usage_type": "name"}, {"api_name": "django.utils.translation.ugettext_lazy", "line_number": 9, "usage_type": "call"}, {"api_name": "django.db.models.CharField", "line_number": 10, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 10, "usage_type": "name"}, {"api_name": "django.utils.translation.ugettext_lazy", "line_number": 10, "usage_type": "call"}, {"api_name": "django.db.models.Model", "line_number": 76, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 76, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 77, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 77, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 78, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 78, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 79, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 79, "usage_type": "name"}, {"api_name": "django.db.models.IntegerField", "line_number": 80, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 80, "usage_type": "name"}]}
+{"seq_id": "4903183322", "text": "import numpy as np\nimport time as t\n\n# my own modules\nimport molecule\n\n# create class objects\nm = molecule.Molecule()\nnm = molecule.Normal_modes()\nsp = molecule.Structure_pool_method()\n# define stuff\n#natom = 3\nnatom = 14\n#xyzheader, comment, atomlist, xyz = m.read_xyz(\"xyz/test.xyz\")\nxyzheader, comment, atomlist, xyz = m.read_xyz(\"xyz/chd.xyz\")\natomic_numbers = [m.periodic_table(symbol) for symbol in atomlist]\n#dim = 3\n#tcm, fcm = m.triangle_cm(atomic_numbers, xyz, dim)\n\n# normal mode definitions\n#nmfile = \"nm/test_normalmodes.txt\"\nnmfile = \"nm/chd_normalmodes.txt\"\ndisplacements = nm.read_nm_displacements(nmfile, natom)\ndisplacement = displacements[0, :, :] # 1st mode displacements\nfactor = 1\n\n# xray testing\nx = molecule.Xray()\nqlen = 101\nqvector = np.linspace(0, 10, qlen, endpoint=True) # q probably in a.u.\n\n\ndef test_read_xyz():\n assert xyzheader == 3, \"xyzheader should be 3\"\n assert comment.__contains__(\"test\"), \"comment should be 'test'\"\n assert atomlist[0] == \"O\", \"1st atom should be O\"\n assert atomic_numbers[0] == 8, \"1st atomic charge should be 8\"\n assert xyz[0, 0] == 0.0, \"Upper left coordinate should be 0.0\"\n\n\ndef test_write_xyz():\n fname = \"xyz/out.xyz\"\n comment = \"test\"\n m.write_xyz(fname, comment, atomlist, xyz)\n with open(fname) as out:\n assert out.readline() == \"3\\n\", \"1st line of out.xyz != 3\"\n assert out.readline() == \"test\\n\", \"2nd line of out.xyz != 'test'\"\n\ndef test_read_xyz_traj():\n natoms, comment, atomlist, xyz_traj = m.read_xyz_traj('xyz/chd_target_traj.xyz', 12)\n fname = 'out.xyz'\n m.write_xyz_traj(fname, atomlist, xyz_traj)\n\ndef test_sort_array():\n print(atomic_numbers)\n print(xyz)\n xyz_sorted = m.sort_array(xyz, atomic_numbers)\n print(xyz_sorted)\n print(atomlist)\n atoms = m.sort_array(atomlist, atomic_numbers)\n print(atoms)\n # add assertion ...\n\n\ndef test_periodic_table():\n h = m.periodic_table(\"H\")\n he = m.periodic_table(\"He\")\n c = m.periodic_table(\"C\")\n assert h == 1, \"H should have atom number 1\"\n assert he == 2, \"He should have atom number 2\"\n assert c == 6, \"C should have atom number 2\"\n\n\ndef test_triangle_cm():\n print(\"tcm\")\n print(tcm)\n assert round(tcm[0, 0]) == 74, \"rounded [0, 0] element != 74\"\n assert tcm[0, 1] == 8, \"[0, 1] element not != 8\"\n assert tcm[-1, -1] == 0.5, \"bottom right element != 0.5\"\n assert tcm[1, 0] == 0, \"bottom left diagonal != 0\"\n\n\ndef test_full_cm():\n print(\"fcm\")\n print(fcm)\n assert fcm[1, 0] == fcm[0, 1], \"upper diagonal != lower diagonal\"\n assert fcm[2, 0] == fcm[0, 2], \"upper diagonal != lower diagonal\"\n assert fcm[2, 1] == fcm[1, 2], \"upper diagonal != lower diagonal\"\n\n\ndef test_read_nm_displacements():\n assert displacements[0, 0, 1] == 0.07049, \"displacements[0, 0, 1] != 0.07049\"\n assert displacements[1, 1, 0] == 0.58365, \"displacements[1, 1, 0] != 0.58365\"\n\n\ndef test_displace_xyz():\n displaced_xyz = nm.displace_xyz(xyz, displacement, factor)\n assert displaced_xyz[1, 0] == 0.57028, (\n \"displaced_xyz[1, 0] !== 0.57028, for factor %d\" % factor\n )\n\n\ndef test_displace_write_xyz():\n displacement = displacements[0, :, :] # 1st mode displacements\n factor = 1\n displaced_xyz = nm.displace_xyz(xyz, displacement, factor)\n fname = \"xyz/displaced.xyz\"\n comment = \"displaced\"\n m.write_xyz(fname, comment, atomlist, displaced_xyz)\n with open(fname) as out:\n assert out.readline() == \"3\\n\", \"1st line of %s != 3\" % fname\n assert out.readline() == \"displaced\\n\", \"2nd line of %s != %s\" % (\n fname,\n comment,\n )\n\n\ndef test_nm_displacer():\n factors = [1, 1, 1]\n modes = [0, 1, 2]\n displaced_xyz = nm.nm_displacer(xyz, displacements, modes, factors)\n assert round(displaced_xyz[0, 1], 5) == round(\n xyz[0, 1] + 0.07049 + 0.05016 + 0.00003, 5\n ), \"displaced xyz error\"\n assert round(displaced_xyz[1, 0], 5) == round(\n xyz[1, 0] - 0.42972 + 0.58365 - 0.55484, 5\n ), \"displaced xyz error\"\n\n\ndef test_atomic_factor():\n atom_number = 1 # atom_number = 1 is hydrogen, etc.\n atom_factor = x.atomic_factor(atom_number, qvector)\n assert round(atom_factor[0], 3) == 1.0, \"H atomic factor (q = 0) != 1\"\n assert (\n round(x.atomic_factor(2, qvector)[0], 3) == 2.0\n ), \"He atomic factor (q = 0) != 2\"\n\n\ndef test_iam_calc():\n #compton_array = x.compton_spline(\n # atomic_numbers, qvector\n #) # atomic compton factors\n iam = x.iam_calc(atomic_numbers, xyz, qvector)\n np.savetxt('iam.dat', iam)\n #assert round(iam[0], 1) == 100.0, \"H2O molecular factor (q = 0) != 100\"\n\n#test_iam_calc()\n\ndef test_iam_calc_2d():\n #compton_array = x.compton_spline(\n # atomic_numbers, qvector\n #) # atomic compton factors\n atomic, molecular, rotavg = x.iam_calc_2d(atomic_numbers, xyz, qvector)\n iam = atomic + molecular\n np.savetxt('atomic2d.dat', atomic)\n np.savetxt('molecular2d.dat', molecular)\n np.savetxt('rotavg.dat', rotavg)\n np.savetxt('iam2d.dat', iam)\n\ntest_iam_calc_2d()\n\ndef test_iam_calc_3d():\n atomic, molecular = x.iam_calc_3d(atomic_numbers, xyz, qvector)\n iam = atomic + molecular\n np.savetxt('atomic3d.dat', atomic)\n np.savetxt('molecular3d.dat', molecular)\n #np.savetxt('rotavg.dat', rotavg)\n np.savetxt('iam3d.dat', iam)\n\n#test_iam_calc_3d()\n\ndef test_distances_array():\n dist_array = m.distances_array(xyz)\n assert dist_array[1, 2] == 2, \"distance between hydrogens != 2\"\n\ndef test_simulate_trajectory():\n xyzheader, comment, atomlist, xyz = m.read_xyz(\"xyz/nmm.xyz\")\n starting_xyz = xyz\n natom = xyz.shape[0]\n nsteps = 100\n step_size = 0.5\n wavenumbers = np.loadtxt('quantum/nmm_wavenumbers.dat')[:, 1]\n nmfile = \"nm/nmm_normalmodes.txt\"\n displacements = nm.read_nm_displacements(nmfile, natom)\n xyz_traj = sp.simulate_trajectory(starting_xyz, displacements, wavenumbers, nsteps, step_size)\n sp.xyz_traj_to_file(atomlist, xyz_traj)\n\n#test_simulate_trajectory()\n\ndef test_simulated_annealing():\n _, _, atomlist, xyz = m.read_xyz(\"xyz/nmm.xyz\")\n atomic_numbers = [m.periodic_table(symbol) for symbol in atomlist]\n starting_iam = x.iam_calc(atomic_numbers, xyz, qvector)\n starting_xyz = xyz\n wavenumbers = np.loadtxt('quantum/nmm_wavenumbers.dat')[:, 1]\n nmfile = \"nm/nmm_normalmodes.txt\"\n natom = 18\n displacements = nm.read_nm_displacements(nmfile, natom)\n # experiment percent diff\n _, _, _, xyz_displaced = m.read_xyz(\"xyz/nmm_displaced.xyz\")\n displaced_iam = x.iam_calc(atomic_numbers, xyz_displaced, qvector)\n experiment_pcd = 100 * (displaced_iam/starting_iam - 1)\n # run sim annealing\n nsteps = 10\n convergence_value = 0.001\n cooling_rate=4.0\n step_size=0.1\n save_xyz_path=True\n xyz_min_traj, chi2_path = sp.simulated_annealing(\n starting_xyz,\n displacements,\n wavenumbers,\n experiment_pcd,\n qvector,\n nsteps,\n convergence_value,\n cooling_rate,\n step_size,\n save_xyz_path,\n )\n save_xyz_traj_file = True\n if save_xyz_traj_file:\n fname = 'data/min_traj.xyz'\n sp.xyz_traj_to_file(atomlist, xyz_min_traj, fname)\n\ndef test_simulated_annealing_v4():\n _, _, atomlist, starting_xyz = m.read_xyz(\"xyz/chd.xyz\")\n atomic_numbers = [m.periodic_table(symbol) for symbol in atomlist]\n nmfile = \"nm/chd_normalmodes.txt\"\n natoms = 14\n displacements = nm.read_nm_displacements(nmfile, natoms)\n qlen = 99\n qvector = np.linspace(0, 12, qlen, endpoint=True)\n starting_iam = x.iam_calc(atomic_numbers, starting_xyz, qvector)\n # \"experiment\" target percent diff\n tlen = 18\n target_pcd_array = np.zeros((qlen, tlen))\n _, _, _, target_xyz_array = m.read_xyz_traj(\"xyz/chd_target_traj.xyz\", tlen)\n for t in range(tlen):\n target_iam = x.iam_calc(atomic_numbers, target_xyz_array[:, : , t], qvector)\n target_pcd_array[:, t] = 100 * (target_iam / starting_iam - 1)\n target_pcd_array[:, t] /= np.max(np.abs(target_pcd_array[:, t])) # normalise abs. max value to 1\n target_pcd = target_pcd_array[:, 0]\n\n starting_temp = 0.2\n nsteps = 10000\n step_size = 0.1\n chi2_best, pcd_best, xyz_best = sp.simulated_annealing_v4(\n displacements,\n target_pcd,\n qvector,\n starting_temp,\n nsteps,\n step_size,\n )\n print(chi2_best)\n print(pcd_best)\n print(xyz_best)\n\n#start = t.time()\n#test_simulated_annealing_v4()\n#end = t.time()\n#total = float(end - start)\n#print('time taken: %f' % total)\n\ndef test_gradient_d():\n xyzheader, comment, atomlist, xyz = m.read_xyz(\"xyz/target.xyz\")\n qlen = 81\n qvector = np.linspace(0.1, 8, qlen, endpoint=True)\n target_iam = x.iam_calc(atomic_numbers, xyz, qvector)\n nsteps=1000\n step_size=5e-9\n chi2_best, iam_best, rk_best = sp.gradient_d( target_iam, qvector, nsteps, step_size )\n np.savetxt('iam_target.dat', target_iam)\n np.savetxt('iam_best_%10.8f.dat' % chi2_best, iam_best)\n\ntest_gradient_d()\n\n", "repo_name": "tnorthey/molecule", "sub_path": "test_functions.py", "file_name": "test_functions.py", "file_ext": "py", "file_size_in_byte": 9071, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "76", "api": [{"api_name": "molecule.Molecule", "line_number": 8, "usage_type": "call"}, {"api_name": "molecule.Normal_modes", "line_number": 9, "usage_type": "call"}, {"api_name": "molecule.Structure_pool_method", "line_number": 10, "usage_type": "call"}, {"api_name": "molecule.Xray", "line_number": 28, "usage_type": "call"}, {"api_name": "numpy.linspace", "line_number": 30, "usage_type": "call"}, {"api_name": "numpy.savetxt", "line_number": 144, "usage_type": "call"}, {"api_name": "numpy.savetxt", "line_number": 155, "usage_type": "call"}, {"api_name": "numpy.savetxt", "line_number": 156, "usage_type": "call"}, {"api_name": "numpy.savetxt", "line_number": 157, "usage_type": "call"}, {"api_name": "numpy.savetxt", "line_number": 158, "usage_type": "call"}, {"api_name": "numpy.savetxt", "line_number": 165, "usage_type": "call"}, {"api_name": "numpy.savetxt", "line_number": 166, "usage_type": "call"}, {"api_name": "numpy.savetxt", "line_number": 168, "usage_type": "call"}, {"api_name": "numpy.loadtxt", "line_number": 182, "usage_type": "call"}, {"api_name": "numpy.loadtxt", "line_number": 195, "usage_type": "call"}, {"api_name": "numpy.linspace", "line_number": 233, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 237, "usage_type": "call"}, {"api_name": "numpy.max", "line_number": 242, "usage_type": "call"}, {"api_name": "numpy.abs", "line_number": 242, "usage_type": "call"}, {"api_name": "numpy.linspace", "line_number": 269, "usage_type": "call"}, {"api_name": "numpy.savetxt", "line_number": 274, "usage_type": "call"}, {"api_name": "numpy.savetxt", "line_number": 275, "usage_type": "call"}]}
+{"seq_id": "14176923284", "text": "#!/usr/bin/env python\n\n# Cutter Coryell\n# Ay 190\n# WS3 Problem 2\n\nimport numpy as np\nimport scipy.special as sp\nimport matplotlib.pyplot as pl\nimport plot_defaults\n\n# Part A\n\n# parameters\nmax_exponent = 8\n\nnumber_density_coeff = 1.05495e35 # cm^(-3)\n\nns = [2**p for p in range(1,max_exponent)]\n\ndef f(x):\n return x * x * np.exp(x) / (np.exp(x) + 1)\n\n[xs, ws] = sp.l_roots(ns[-1], 0)\n\nQs = np.array([np.sum(ws[:n] * f(xs)[:n]) for n in ns])\nnumber_densities = number_density_coeff * Qs\n\nprint(\"\\nPart A\\n\")\nprint(\"Number of nodes:\\n{}\".format(ns))\nprint(\"Number density [cm^(-3)]:\\n{}\".format(number_densities))\nprint(\"Change in number density [cm^(-3)]:\\n{}\".format(number_densities[1:]\n - number_densities[:-1]))\n\n# Answer: 1.902*10^35 cm^(-3)\n\n# Part B\n\n# parameters\nn = 100 # number of nodes in Legendre Quadrature\ndE = 5 # energy bin size (MeV)\nmax_E = 155 # energy cutoff (MeV)\n\nEs = np.arange(0, 155, dE) # energies\nxs = Es / 20.0 # x parameter, energy / temperature (20 MeV)\n\ndef x(y, a, b):\n return 0.5 * (y + 1) * (b - a) + a\n\ndef f(y, a, b):\n x_ = x(y, a, b)\n return 0.5 * (b - a) * x_ * x_ / (np.exp(x_) + 1)\n\n[ys, ws] = sp.p_roots(n, 0)\n\nQs = np.array([np.sum(ws * f(ys, xs[i], xs[i+1])) for i in range(len(xs) - 1)])\n\nprint(\"\\nPart B\\n\")\nprint(\"Total number density: {}\".format(number_density_coeff * np.sum(Qs)))\n\nmyfig = pl.figure(figsize=(10,8))\nmyfig.subplots_adjust(left=0.13)\nmyfig.subplots_adjust(bottom=0.14)\nmyfig.subplots_adjust(top=0.90)\nmyfig.subplots_adjust(right=0.95)\npl.bar(Es[:-1], number_density_coeff * Qs / 10**34, color='c', width=5)\npl.xlim(0, max_E - dE)\npl.xlabel(\"Energy bin [MeV]\")\npl.ylabel(r\"Number density [$\\times 10^{34}$ cm$^{-3}$]\")\npl.title(\"Number Density versus Energy\", fontsize=30)\npl.savefig(\"problem2b.pdf\")\npl.show()\n", "repo_name": "savione/cutter-ay190", "sub_path": "ws3/problem2.py", "file_name": "problem2.py", "file_ext": "py", "file_size_in_byte": 1827, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "76", "api": [{"api_name": "numpy.exp", "line_number": 22, "usage_type": "call"}, {"api_name": "scipy.special.l_roots", "line_number": 24, "usage_type": "call"}, {"api_name": "scipy.special", "line_number": 24, "usage_type": "name"}, {"api_name": "numpy.array", "line_number": 26, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 26, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 44, "usage_type": "call"}, {"api_name": "numpy.exp", "line_number": 52, "usage_type": "call"}, {"api_name": "scipy.special.p_roots", "line_number": 54, "usage_type": "call"}, {"api_name": "scipy.special", "line_number": 54, "usage_type": "name"}, {"api_name": "numpy.array", "line_number": 56, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 56, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 59, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 61, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 61, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.bar", "line_number": 66, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 66, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlim", "line_number": 67, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 67, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 68, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 68, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 69, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 69, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 70, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 70, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 71, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 71, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 72, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 72, "usage_type": "name"}]}
+{"seq_id": "23872175618", "text": "\"\"\"Определяет схемы URL для pixlands.\"\"\"\n\nfrom django.conf.urls import url\n\nfrom . import views\n\napp_name = 'pixlands'\n\nurlpatterns = [\n # Домашняя страница\n url(r'^$', views.index, name='index'),\n # Вывод публичных тем\n url(r'^topics/$', views.topics, name='topics'),\n # Вывод личных тем\n url(r'^profile/$', views.profile, name='profile'),\n # Страница с подробной информацией по отдельной теме\n url(r'^topics/(?P\\d+)/$', views.topic, name='topic'),\n # Страница для добавления новой темы\n url(r'^new_topic/$', views.new_topic, name='new_topic'),\n # Страница для добавления нового изображения\n url(r'^add_image/(?P\\d+)/$', views.add_image, name='add_image'),\n # Страница для добавления изображения пользователя\n url(r'^add_profile_pic/$', views.add_profile_pic, name='add_profile_pic'),\n # Страница для редактирования темы\n url(r'^edit_topic/(?P\\d+)/$', views.edit_topic, name='edit_topic'),\n # Страница редактирования текста изображения и удаления\n url(r'^edit_image/(?P\\d+)/$', views.edit_image, name='edit_image'),\n # Страница для удаления темы\n url(r'^delete_topic/(?P\\d+)/$', views.delete_topic, name='delete_topic'),\n # Страница для удаления фото\n url(r'^delete_image/(?P\\d+)/$', views.delete_image, name='delete_image'),\n # Страница изображения\n url(r'^image/(?P\\d+)/$', views.image, name='image'),\n # Страница добавления комментария\n url(r'^add_comment/(?P\\d+)/$', views.add_comment, name='add_comment'),\n # Страница лайка на странице изображеия\n url(r'^like_on_image/(?P\\d+)/$', views.like_on_image, name='like_on_image'),\n # Страница лайка на странице топика\n url(r'^like_on_topic/(?P\\d+)/$', views.like_on_topic, name='like_on_topic'),\n # Страница лайка на странице поиска\n url(r'^like_on_search/(?P\\d+)/$', views.like_on_search, name='like_on_search'),\n # Страница для удаления комментария\n url(r'^delete_comment/(?P\\d+)/$', views.delete_comment, name='delete_comment'),\n # Страница поиска\n url(r'^search/$', views.search, name='search'),\n]", "repo_name": "renenoir/pixland", "sub_path": "pixlands/urls.py", "file_name": "urls.py", "file_ext": "py", "file_size_in_byte": 2708, "program_lang": "python", "lang": "ru", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "76", "api": [{"api_name": "django.conf.urls.url", "line_number": 11, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 13, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 15, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 17, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 19, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 21, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 23, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 25, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 27, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 29, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 31, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 33, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 35, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 37, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 39, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 41, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 43, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 45, "usage_type": "call"}]}
+{"seq_id": "42771241368", "text": "import random\nimport requests\n\nimport time\n\nstarttime = time.time()\n\nwhile True:\n print(\"tick\")\n\n soundVal = random.randint(0, 2000)\n motionVal = random.randint(0, 60)\n\n response = requests.post(\"http://127.0.0.1:5000/api/sound\",\n headers={\"Content-Type\": \"application/json\"},\n json={'value': soundVal,\n 'sensorId': 'SoundTest'})\n\n print(response.text)\n\n response2 = requests.post(\"http://127.0.0.1:5000/api/motion\",\n headers={\"Content-Type\": \"application/json\"},\n json={'value': motionVal,\n 'sensorId': 'MotionTest'})\n\n print(response2.text)\n\n time.sleep(60.0 - ((time.time() - starttime) % 60.0))\n", "repo_name": "DuncanBH/IoT-Security-Backend", "sub_path": "DummyDataCreator.py", "file_name": "DummyDataCreator.py", "file_ext": "py", "file_size_in_byte": 806, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "76", "api": [{"api_name": "time.time", "line_number": 6, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 11, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 12, "usage_type": "call"}, {"api_name": "requests.post", "line_number": 14, "usage_type": "call"}, {"api_name": "requests.post", "line_number": 21, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 28, "usage_type": "call"}, {"api_name": "time.time", "line_number": 28, "usage_type": "call"}]}
+{"seq_id": "11472671664", "text": "import os\r\nfrom tkinter import *\r\nfrom tkinter import ttk\r\nfrom tkinter import filedialog\r\nfrom tkinter import messagebox \r\nfrom datetime import datetime\r\n\r\n\r\ngui = Tk()\r\ngui.geometry(\"350x100\")\r\ngui.title(\"VLC OneClick Screen Capture !\")\r\n\r\n\r\ndef getFolderPath():\r\n folder_selected = filedialog.asksaveasfilename(initialdir = \"/\",title = \"Save file as...\",defaultextension='.mp4',filetype=[('*.mp4', 'MP4 Files')])\r\n folderPath.set(folder_selected)\r\n\r\ndef GO():\r\n start_time = datetime.now()\r\n folder = folderPath.get()\r\n \r\n if folder == '':\r\n messagebox.showerror(title=\"Error...!\", message=\"Empty Path, Please check your Path.\")\r\n \r\n else:\r\n os.system('cmd /c \"\"C:/Program Files/VideoLAN/VLC/vlc.exe\" screen:// --qt-start-minimized :screen-fps=25 :run-time=9999 :quiet :sout=#transcode{vcodec=h264,vb072}:standard{access=file,mux=mp4,dst='+folder+'}\"')\r\n end_time = datetime.now()\r\n messagebox.showinfo(title=\"Done\", message=('Duration: {}'.format(end_time - start_time)))\r\ndef ABOUT():\r\n messagebox.showinfo(\"ALU DEV TEAM @ 2022\", \"by nikkpap (nikkpap@gmail.com)\")\r\n\r\nfolderPath = StringVar()\r\n\r\nlbl1 = Label(gui ,text=\"Save as capture...\").grid(row=0,column = 0)\r\nentry1 = Entry(gui,textvariable=folderPath, state=DISABLED).grid(row=0,column=1)\r\n\r\nbtnBrowse = ttk.Button(gui, text=\"Browse\",command=getFolderPath).grid(row=0,column=2)\r\nbtnGO = ttk.Button(gui ,text=\"Go\", command=GO).grid(row=4,column=0)\r\nbtnAbout = ttk.Button(gui ,text=\"About\", command=ABOUT).grid(row=4,column=1)\r\n\r\ngui.mainloop()\r\n", "repo_name": "nikkpap/VLC-OneClick-Screen-Capture", "sub_path": "VLC OneClick Screen Capture.pyw", "file_name": "VLC OneClick Screen Capture.pyw", "file_ext": "pyw", "file_size_in_byte": 1560, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "76", "api": [{"api_name": "tkinter.filedialog.asksaveasfilename", "line_number": 15, "usage_type": "call"}, {"api_name": "tkinter.filedialog", "line_number": 15, "usage_type": "name"}, {"api_name": "datetime.datetime.now", "line_number": 19, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 19, "usage_type": "name"}, {"api_name": "tkinter.messagebox.showerror", "line_number": 23, "usage_type": "call"}, {"api_name": "tkinter.messagebox", "line_number": 23, "usage_type": "name"}, {"api_name": "os.system", "line_number": 26, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 27, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 27, "usage_type": "name"}, {"api_name": "tkinter.messagebox.showinfo", "line_number": 28, "usage_type": "call"}, {"api_name": "tkinter.messagebox", "line_number": 28, "usage_type": "name"}, {"api_name": "tkinter.messagebox.showinfo", "line_number": 30, "usage_type": "call"}, {"api_name": "tkinter.messagebox", "line_number": 30, "usage_type": "name"}, {"api_name": "tkinter.ttk.Button", "line_number": 37, "usage_type": "call"}, {"api_name": "tkinter.ttk", "line_number": 37, "usage_type": "name"}, {"api_name": "tkinter.ttk.Button", "line_number": 38, "usage_type": "call"}, {"api_name": "tkinter.ttk", "line_number": 38, "usage_type": "name"}, {"api_name": "tkinter.ttk.Button", "line_number": 39, "usage_type": "call"}, {"api_name": "tkinter.ttk", "line_number": 39, "usage_type": "name"}]}
+{"seq_id": "24293545362", "text": "import os\nimport re\nimport unicodedata\nimport string\nimport random\nimport base64\nfrom Crypto import Random\nfrom Crypto.Cipher import AES\nfrom mongoengine import DoesNotExist\nfrom werkzeug._compat import text_type\nfrom werkzeug._compat import PY2\nfrom pypinyin import pinyin, Style\n\nuid_chars = ('1', '2', '3', '4', '5', '6', '7', '8', '9', '0')\n\n_ascii_strip_re = {\n 'user': re.compile(r'[^A-Za-z0-9]'),\n 'app': re.compile(r'[^A-Za-z0-9-]'),\n 'module': re.compile(r'[^A-Za-z0-9]'),\n 'dataset': re.compile(r'[^A-Za-z0-9-]'),\n}\n\nsplit_re = {\n 'user': '[^A-Za-z0-9]',\n 'app': '[^A-Za-z0-9-]',\n 'module': '[^A-Za-z0-9]',\n 'dataset': '[^A-Za-z0-9-]',\n}\nconnector_re = {\n 'user': '',\n 'app': '-',\n 'module': '',\n 'dataset': '-',\n}\nAKEY = '27cfbc4d262403839797636105d0a476' # AES key must be either 16, 24, or 32 bytes long\n\n# iv = Random.new().read(AES.block_size)\niv = 'This is an IV456'\n\n\ndef encode(message):\n obj = AES.new(AKEY.encode(\"utf8\"), AES.MODE_CFB, iv.encode(\"utf8\"))\n message = bytes(message, encoding=\"utf8\")\n return base64.urlsafe_b64encode(obj.encrypt(message)).decode(\"utf-8\")\n\n\ndef decode(cipher):\n obj2 = AES.new(AKEY, AES.MODE_CFB, iv)\n if not isinstance(cipher, str):\n cipher = cipher.encode(\"uft-8\")\n return obj2.decrypt(base64.urlsafe_b64decode(cipher))\n\n\ndef generate_args_str(args):\n array = [\"%s=%s\" % (k, (v if not isinstance(v, str) else \"'%s'\" % v))\n for k, v in args.items()]\n return ', '.join(array)\n\n\n# def remove_dot(string):\n# string.replace('.', '')\n# return string\n\n\ndef slugify(value, allow_unicode=False):\n \"\"\"\n Convert to ASCII if 'allow_unicode' is False. Convert spaces to hyphens.\n Remove characters that aren't alphanumerics, underscores, or hyphens.\n Convert to lowercase. Also strip leading and trailing whitespace.\n \"\"\"\n if value == '':\n value = 'field' + rand_str(3)\n\n value = str(value)\n if allow_unicode:\n value = unicodedata.normalize('NFKC', value)\n else:\n value = unicodedata.normalize('NFKD', value).encode(\n 'ascii', 'ignore').decode('ascii')\n value = re.sub(r'[^\\w\\s-]', '', value).strip()\n return re.sub(r'[-\\s]+', '-', value)\n\n\ndef rand_str(length):\n return ''.join(\n random.choices(string.ascii_uppercase + string.digits, k=length))\n\n\ndef split_without_empty(string):\n return [x.strip() for x in string.split(',') if x]\n\n\n# werkzeug.utils\ndef secure_name(filename, type='user'):\n r\"\"\"Pass it a filename and it will return a secure version of it. This\n filename can then safely be stored on a regular file system and passed\n to :func:`os.path.join`. The filename returned is an ASCII only string\n for maximum portability.\n On windows systems the function also makes sure that the file is not\n named after one of the special device files.\n >> secure_filename(\"My cool movie.mov\")\n 'My_cool_movie.mov'\n >> secure_filename(\"../../../etc/passwd\")\n 'etc_passwd'\n >> secure_filename(u'i contain cool \\xfcml\\xe4uts.txt')\n 'i_contain_cool_umlauts.txt'\n The function might return an empty filename. It's your responsibility\n to ensure that the filename is unique and that you abort or\n generate a random filename if the function returned an empty one.\n .. versionadded:: 0.5\n :param filename: the filename to secure\n :param type: ['user', 'app', 'module', 'dataset']\n \"\"\"\n if isinstance(filename, text_type):\n filename = ''.join([p[0] for p in pinyin(filename, style=Style.TONE2)])\n from unicodedata import normalize\n filename = normalize('NFKD', filename).encode('ascii', 'ignore')\n if not PY2:\n filename = filename.decode('ascii')\n for sep in os.path.sep, os.path.altsep:\n if sep:\n filename = filename.replace(sep, ' ')\n filename = str(\n re.compile(r'[^A-Za-z0-9-_]').sub('', connector_re[type].join(\n re.split('[\\-_ ]+', filename)))).strip(\n connector_re[type]).lower()\n\n return filename\n\n\ndef short_uid(uid_length):\n count = len(uid_chars) - 1\n c = ''\n for i in range(0, uid_length):\n c += uid_chars[random.randint(0, count)]\n return c\n\n\ndef gen_rand_name(name, get_func, times=1, **kwargs):\n from server3.constants import RCUserDoesNotExists\n for i in range(times):\n try:\n get_func(name, **kwargs)\n except (DoesNotExist, RCUserDoesNotExists):\n break\n else:\n name += short_uid(2)\n return name\n\n\ndef gen_rand_str(N=8, low=False):\n if low:\n return ''.join(\n random.choices(string.ascii_lowercase + string.digits, k=N))\n return ''.join(random.choices(string.ascii_uppercase + string.digits, k=N))\n", "repo_name": "yssAI/tp_project", "sub_path": "server3/utility/str_utility.py", "file_name": "str_utility.py", "file_ext": "py", "file_size_in_byte": 4767, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "76", "api": [{"api_name": "re.compile", "line_number": 17, "usage_type": "call"}, {"api_name": "re.compile", "line_number": 18, "usage_type": "call"}, {"api_name": "re.compile", "line_number": 19, "usage_type": "call"}, {"api_name": "re.compile", "line_number": 20, "usage_type": "call"}, {"api_name": "Crypto.Cipher.AES.new", "line_number": 42, "usage_type": "call"}, {"api_name": "Crypto.Cipher.AES", "line_number": 42, "usage_type": "name"}, {"api_name": "Crypto.Cipher.AES.MODE_CFB", "line_number": 42, "usage_type": "attribute"}, {"api_name": "base64.urlsafe_b64encode", "line_number": 44, "usage_type": "call"}, {"api_name": "Crypto.Cipher.AES.new", "line_number": 48, "usage_type": "call"}, {"api_name": "Crypto.Cipher.AES", "line_number": 48, "usage_type": "name"}, {"api_name": "Crypto.Cipher.AES.MODE_CFB", "line_number": 48, "usage_type": "attribute"}, {"api_name": "base64.urlsafe_b64decode", "line_number": 51, "usage_type": "call"}, {"api_name": "unicodedata.normalize", "line_number": 76, "usage_type": "call"}, {"api_name": "unicodedata.normalize", "line_number": 78, "usage_type": "call"}, {"api_name": "re.sub", "line_number": 80, "usage_type": "call"}, {"api_name": "re.sub", "line_number": 81, "usage_type": "call"}, {"api_name": "random.choices", "line_number": 86, "usage_type": "call"}, {"api_name": "string.ascii_uppercase", "line_number": 86, "usage_type": "attribute"}, {"api_name": "string.digits", "line_number": 86, "usage_type": "attribute"}, {"api_name": "string.split", "line_number": 90, "usage_type": "call"}, {"api_name": "werkzeug._compat.text_type", "line_number": 114, "usage_type": "argument"}, {"api_name": "pypinyin.pinyin", "line_number": 115, "usage_type": "call"}, {"api_name": "pypinyin.Style.TONE2", "line_number": 115, "usage_type": "attribute"}, {"api_name": "pypinyin.Style", "line_number": 115, "usage_type": "name"}, {"api_name": "unicodedata.normalize", "line_number": 117, "usage_type": "call"}, {"api_name": "werkzeug._compat.PY2", "line_number": 118, "usage_type": "name"}, {"api_name": "os.path", "line_number": 120, "usage_type": "attribute"}, {"api_name": "re.compile", "line_number": 124, "usage_type": "call"}, {"api_name": "re.split", "line_number": 125, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 135, "usage_type": "call"}, {"api_name": "mongoengine.DoesNotExist", "line_number": 144, "usage_type": "name"}, {"api_name": "server3.constants.RCUserDoesNotExists", "line_number": 144, "usage_type": "name"}, {"api_name": "random.choices", "line_number": 154, "usage_type": "call"}, {"api_name": "string.ascii_lowercase", "line_number": 154, "usage_type": "attribute"}, {"api_name": "string.digits", "line_number": 154, "usage_type": "attribute"}, {"api_name": "random.choices", "line_number": 155, "usage_type": "call"}, {"api_name": "string.ascii_uppercase", "line_number": 155, "usage_type": "attribute"}, {"api_name": "string.digits", "line_number": 155, "usage_type": "attribute"}]}
+{"seq_id": "29003772881", "text": "# -*- coding: utf-8 -*-\nimport re\n\nimport scrapy\n\nfrom freelance.items import TaskItem, PRICE_PERIOD_HOURLY, PRICE_PERIOD_PROJECT\n\n\nre_task_id = re.compile(r'\\/(\\d+)\\/')\n\n\nclass FlSpider(scrapy.Spider):\n name = 'fl'\n allowed_domains = ['fl.ru']\n start_urls = ['https://www.fl.ru/projects/']\n\n def parse(self, response):\n for dom_item in response.css('.b-post'):\n task_url = response.urljoin(dom_item.css('.b-post__link::attr(href)').extract_first())\n task_title = dom_item.css('.b-post__link::text').extract_first()\n\n item = TaskItem(\n url=task_url,\n title=task_title,\n )\n\n yield scrapy.Request(url=task_url, callback=self.parse_detail, meta={\"item\": item})\n\n def parse_detail(self, response):\n task_id = self.get_id(response)\n item = response.meta.get('item')\n\n item['description'] = self.get_description(response, task_id)\n\n yield item\n\n\n def get_description(self, response, id):\n try:\n return response.css('#projectp' + str(id)).extract_first().strip()\n except (AttributeError) as e:\n self.logger.warn('get description error (%s) %s' % (response.url, e))\n return ''\n\n @staticmethod\n def get_id(response):\n return re.search(r'/(\\d+)/', response.url).group(1)", "repo_name": "power-freelance/examples", "sub_path": "python/freelance-parser/freelance/spiders/fl.py", "file_name": "fl.py", "file_ext": "py", "file_size_in_byte": 1358, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "76", "api": [{"api_name": "re.compile", "line_number": 9, "usage_type": "call"}, {"api_name": "scrapy.Spider", "line_number": 12, "usage_type": "attribute"}, {"api_name": "freelance.items.TaskItem", "line_number": 22, "usage_type": "call"}, {"api_name": "scrapy.Request", "line_number": 27, "usage_type": "call"}, {"api_name": "re.search", "line_number": 47, "usage_type": "call"}]}
+{"seq_id": "69824383286", "text": "import argparse\nimport glob\nimport io\nimport json\nimport multiprocessing as mp\nimport os\nfrom os.path import basename, exists\n\nfrom cytoolz import curry\nimport numpy as np\nfrom tqdm import tqdm\nimport lmdb\n\nimport msgpack\nimport msgpack_numpy\nmsgpack_numpy.patch()\n\n\ndef _compute_nbb(img_dump, conf_th, max_bb, min_bb, num_bb):\n num_bb = max(min_bb, (img_dump['conf'] > conf_th).sum())\n num_bb = min(max_bb, num_bb)\n return int(num_bb)\n\n\n@curry\ndef load_npz(conf_th, max_bb, min_bb, num_bb, fname, keep_all=False):\n try:\n img_dump = np.load(fname, allow_pickle=True)\n if keep_all:\n nbb = None\n else:\n nbb = _compute_nbb(img_dump, conf_th, max_bb, min_bb, num_bb)\n dump = {}\n for key, arr in img_dump.items():\n if arr.dtype == np.float32:\n arr = arr.astype(np.float16)\n if arr.ndim == 2:\n dump[key] = arr[:nbb, :]\n elif arr.ndim == 1:\n dump[key] = arr[:nbb]\n else:\n raise ValueError('wrong ndim')\n except Exception as e:\n # corrupted file\n print(f'corrupted file {fname}', e)\n dump = {}\n nbb = 0\n\n name = basename(fname)\n return name, dump, nbb\n\n\ndef dumps_npz(dump, compress=False):\n with io.BytesIO() as writer:\n if compress:\n np.savez_compressed(writer, **dump, allow_pickle=True)\n else:\n np.savez(writer, **dump, allow_pickle=True)\n return writer.getvalue()\n\n\ndef dumps_msgpack(dump):\n return msgpack.dumps(dump, use_bin_type=True)\n\n\ndef main(opts):\n if opts.img_dir[-1] == '/':\n opts.img_dir = opts.img_dir[:-1]\n split = basename(opts.img_dir)\n if opts.keep_all:\n db_name = 'all'\n else:\n if opts.conf_th == -1:\n db_name = f'feat_numbb{opts.num_bb}'\n else:\n db_name = (f'feat_th{opts.conf_th}_max{opts.max_bb}'\n f'_min{opts.min_bb}')\n if opts.compress:\n db_name += '_compressed'\n if not exists(f'{opts.output}/{split}'):\n os.makedirs(f'{opts.output}/{split}')\n env = lmdb.open(f'{opts.output}/{split}/{db_name}', map_size=1024**4)\n txn = env.begin(write=True)\n files = glob.glob(f'{opts.img_dir}/*.npz')\n load = load_npz(opts.conf_th, opts.max_bb, opts.min_bb, opts.num_bb,\n keep_all=opts.keep_all)\n name2nbb = {}\n with mp.Pool(opts.nproc) as pool, tqdm(total=len(files)) as pbar:\n for i, (fname, features, nbb) in enumerate(\n pool.imap_unordered(load, files, chunksize=128)):\n if not features:\n continue # corrupted feature\n if opts.compress:\n dump = dumps_npz(features, compress=True)\n else:\n dump = dumps_msgpack(features)\n txn.put(key=fname.encode('utf-8'), value=dump)\n if i % 1000 == 0:\n txn.commit()\n txn = env.begin(write=True)\n name2nbb[fname] = nbb\n pbar.update(1)\n txn.put(key=b'__keys__',\n value=json.dumps(list(name2nbb.keys())).encode('utf-8'))\n txn.commit()\n env.close()\n if opts.conf_th != -1 and not opts.keep_all:\n with open(f'{opts.output}/{split}/'\n f'nbb_th{opts.conf_th}_'\n f'max{opts.max_bb}_min{opts.min_bb}.json', 'w') as f:\n json.dump(name2nbb, f)\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--img_dir\", default=None, type=str,\n help=\"The input images.\")\n parser.add_argument(\"--output\", default=None, type=str,\n help=\"output lmdb\")\n parser.add_argument('--nproc', type=int, default=8,\n help='number of cores used')\n parser.add_argument('--compress', action='store_true',\n help='compress the tensors')\n parser.add_argument('--keep_all', action='store_true',\n help='keep all features, overrides all following args')\n parser.add_argument('--conf_th', type=float, default=0.2,\n help='threshold for dynamic bounding boxes '\n '(-1 for fixed)')\n parser.add_argument('--max_bb', type=int, default=100,\n help='max number of bounding boxes')\n parser.add_argument('--min_bb', type=int, default=10,\n help='min number of bounding boxes')\n parser.add_argument('--num_bb', type=int, default=100,\n help='number of bounding boxes (fixed)')\n args = parser.parse_args()\n main(args)\n", "repo_name": "ChenRocks/UNITER", "sub_path": "scripts/convert_imgdir.py", "file_name": "convert_imgdir.py", "file_ext": "py", "file_size_in_byte": 4673, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 752, "dataset": "github-code", "pt": "76", "api": [{"api_name": "msgpack_numpy.patch", "line_number": 16, "usage_type": "call"}, {"api_name": "numpy.load", "line_number": 28, "usage_type": "call"}, {"api_name": "numpy.float32", "line_number": 35, "usage_type": "attribute"}, {"api_name": "numpy.float16", "line_number": 36, "usage_type": "attribute"}, {"api_name": "os.path.basename", "line_number": 49, "usage_type": "call"}, {"api_name": "cytoolz.curry", "line_number": 25, "usage_type": "name"}, {"api_name": "io.BytesIO", "line_number": 54, "usage_type": "call"}, {"api_name": "numpy.savez_compressed", "line_number": 56, "usage_type": "call"}, {"api_name": "numpy.savez", "line_number": 58, "usage_type": "call"}, {"api_name": "msgpack.dumps", "line_number": 63, "usage_type": "call"}, {"api_name": "os.path.basename", "line_number": 69, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 80, "usage_type": "call"}, {"api_name": "os.makedirs", "line_number": 81, "usage_type": "call"}, {"api_name": "lmdb.open", "line_number": 82, "usage_type": "call"}, {"api_name": "glob.glob", "line_number": 84, "usage_type": "call"}, {"api_name": "multiprocessing.Pool", "line_number": 88, "usage_type": "call"}, {"api_name": "tqdm.tqdm", "line_number": 88, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 104, "usage_type": "call"}, {"api_name": "json.dump", "line_number": 111, "usage_type": "call"}, {"api_name": "argparse.ArgumentParser", "line_number": 115, "usage_type": "call"}]}
+{"seq_id": "12439576161", "text": "#!/usr/bin/env python\n# Written by Miguel Brown, 2016-Apr-27. Takes table with gene metrics and selects for high variability\n'''\nWritten by Miguel Brown, 2016-Apr-27. Bins and normalizes genes for last step in drop-seq training set creation\nPrints to stdout, specify > output_file at end!!!\nUsage: ./3_bin_norm_sel.py \n\nArguments:\n table with gene metrics mean, variance and disperion\n Z score cutoff for selection\n\nOptions:\n-h\n'''\nimport sys\nfrom docopt import docopt\nimport numpy\nfrom scipy import stats\n\nargs = docopt(__doc__)\n\ntable = open(args[''], 'r')\nscore = float(args[''])\nnbins = 20\n\nhead = next(table)\n# get range of dataset to bin\ngenes = []\nmeans = []\ndm = []\nfor line in table:\n data = line.rstrip('\\n').split('\\t')\n genes.append(data[0])\n means.append(float(data[1]))\n dm.append(float(data[-1]))\ntable.close()\n# binning done by means\n(hist, bins) = numpy.histogram(means, nbins)\npos = numpy.digitize(means, bins)\nsys.stderr.write('Bin edges:\\n')\nfor i in bins:\n sys.stderr.write('\\t' + str(i) + '\\n')\n# \"Validation\" done by dispersion metric!\nsys.stdout.write('bin\\tgene\\tdm\\tzscore\\n')\nfor i in xrange(0, len(bins), 1):\n cur = []\n ind = []\n for j in xrange(0, len(pos), 1):\n if pos[j] == i:\n ind.append(j)\n cur.append(dm[j])\n if len(cur) > 1:\n zcur = stats.zscore(cur)\n else:\n sys.stderr.write('Nothing fit into bin ' + str(i) + '\\n')\n continue\n flag = 0\n for j in xrange(0, len(cur), 1):\n if zcur[j] >= score:\n sys.stdout.write(str(i) + '\\t' + '\\t'.join((genes[ind[j]], str(dm[ind[j]]), str(zcur[j]))) + '\\n')\n flag = 1\n if flag == 0:\n sys.stderr.write('Nothing variable enough in bin ' + str(i) + '\\n')", "repo_name": "WhiteLab/dropseq", "sub_path": "3_bin_norm_sel.py", "file_name": "3_bin_norm_sel.py", "file_ext": "py", "file_size_in_byte": 1800, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "76", "api": [{"api_name": "docopt.docopt", "line_number": 20, "usage_type": "call"}, {"api_name": "numpy.histogram", "line_number": 38, "usage_type": "call"}, {"api_name": "numpy.digitize", "line_number": 39, "usage_type": "call"}, {"api_name": "sys.stderr.write", "line_number": 40, "usage_type": "call"}, {"api_name": "sys.stderr", "line_number": 40, "usage_type": "attribute"}, {"api_name": "sys.stderr.write", "line_number": 42, "usage_type": "call"}, {"api_name": "sys.stderr", "line_number": 42, "usage_type": "attribute"}, {"api_name": "sys.stdout.write", "line_number": 44, "usage_type": "call"}, {"api_name": "sys.stdout", "line_number": 44, "usage_type": "attribute"}, {"api_name": "scipy.stats.zscore", "line_number": 53, "usage_type": "call"}, {"api_name": "scipy.stats", "line_number": 53, "usage_type": "name"}, {"api_name": "sys.stderr.write", "line_number": 55, "usage_type": "call"}, {"api_name": "sys.stderr", "line_number": 55, "usage_type": "attribute"}, {"api_name": "sys.stdout.write", "line_number": 60, "usage_type": "call"}, {"api_name": "sys.stdout", "line_number": 60, "usage_type": "attribute"}, {"api_name": "sys.stderr.write", "line_number": 63, "usage_type": "call"}, {"api_name": "sys.stderr", "line_number": 63, "usage_type": "attribute"}]}
+{"seq_id": "39669117297", "text": "# -*- coding: utf-8 -*-\n# @Time : 2021/7/19 13:45\n# @Author : Yize Wang\n# @File : load_cases.py\n# @Software: AutoBladed\n\nimport numpy as np\nimport pandas as pd\nfrom PyQt5.QtCore import QThread, pyqtSignal\n\nfrom exceptions import SheetNameError\n\n\nclass CasesConfig(QThread):\n\t\"\"\"\n\twhen the cases configuration is large, the parse time would be so long\n\thence, here I use thread to encapsulate this calculations\n\t\"\"\"\n\n\thead = \"head\"\n\titems = \"items\"\n\n\tfinish_signal = pyqtSignal(bool, str)\n\n\tdef __init__(self, filename: str):\n\t\tsuper(CasesConfig, self).__init__()\n\n\t\t# record input\n\t\tself.filename = filename\n\n\t\t# use dictionary to store the cases configurations\n\t\tself.majors = []\t\t\t# major cases\n\t\tself.minors = []\t\t\t# minor cases\n\t\tself.cases_config = []\t\t# their configurations\n\n\t\treturn\n\n\tdef run(self):\n\t\traw_data = pd.read_excel(self.filename, sheet_name=None, engine=\"openpyxl\")\n\n\t\t# parse majors of the cases\n\t\tself.parse_majors(raw_data)\n\n\t\ttry:\n\t\t\t# parse minors of the cases\n\t\t\tself.parse_minors(raw_data)\n\t\texcept Exception as exc:\n\t\t\tself.finish_signal.emit(False, str(exc))\n\t\telse:\n\t\t\t# parse configurations\n\t\t\tself.parse_configs(raw_data)\n\n\t\t\tself.finish_signal.emit(True, \"Successful\")\n\n\t\treturn\n\n\tdef parse_majors(self, raw_data: dict):\n\t\t# all the sheets\n\t\tsheets = raw_data.keys()\n\t\t# at the left of dot\n\t\tself.majors = list(set([sheet.split(\".\")[0].strip() for sheet in sheets]))\n\t\t# sort them\n\t\tself.majors.sort()\n\n\t\treturn\n\n\tdef parse_minors(self, raw_data: dict):\n\t\tnum_majors = len(self.majors)\n\t\t# initialize the minors array\n\t\tself.minors = [[] for i in range(num_majors)]\n\n\t\tsheets = raw_data.keys()\n\t\tfor sheet in sheets:\n\t\t\tmajor_and_minor = sheet.split(\".\")\n\t\t\tif len(major_and_minor) != 2:\n\t\t\t\traise SheetNameError(self.filename)\n\n\t\t\t# loop for each sheet\n\t\t\tidx = self.majors.index(major_and_minor[0].strip())\n\t\t\t# append this sheet\n\t\t\tself.minors[idx].append(major_and_minor[1].strip())\n\n\t\tfor i in range(num_majors):\n\t\t\t# sort them\n\t\t\tself.minors[i].sort()\n\n\t\treturn\n\n\tdef parse_configs(self, raw_data: dict):\n\t\t# initialize the empty configuration list\n\t\tnum_major = len(self.majors)\n\t\tfor i in range(num_major):\n\t\t\ttemp = [1 for j in range(len(self.minors[i]))]\n\t\t\tself.cases_config.append(temp)\n\n\t\tfor sheet, value in raw_data.items():\n\t\t\tmajor, minor = sheet.split(\".\")\n\t\t\tidx_major = self.majors.index(major.strip())\n\t\t\tidx_minor = self.minors[idx_major].index(minor.strip())\n\n\t\t\thead = value.columns\n\t\t\tvalue = np.array(value).tolist()\n\t\t\tself.cases_config[idx_major][idx_minor] = {self.head: head, self.items: value}\n\n\t\treturn\n\n\nif __name__ == '__main__':\n\timport sys\n\tfrom PyQt5.QtWidgets import QApplication, QMainWindow, QPushButton\n\n\t# initialize an application instance\n\tapp = QApplication(sys.argv)\n\n\twindow = QMainWindow()\n\twindow.setFixedSize(800, 300)\n\tbtn = QPushButton(window)\n\n\twork = CasesConfig(\"../../data/LoadCases.xlsx\")\n\tbtn.clicked.connect(work.start)\n\n\twindow.show()\n\n\tsys.exit(app.exec_())", "repo_name": "wangyize0125/AutoBladed", "sub_path": "src/kernels/load_cases.py", "file_name": "load_cases.py", "file_ext": "py", "file_size_in_byte": 2958, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "76", "api": [{"api_name": "PyQt5.QtCore.QThread", "line_number": 14, "usage_type": "name"}, {"api_name": "PyQt5.QtCore.pyqtSignal", "line_number": 23, "usage_type": "call"}, {"api_name": "pandas.read_excel", "line_number": 39, "usage_type": "call"}, {"api_name": "exceptions.SheetNameError", "line_number": 76, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 102, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QApplication", "line_number": 113, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 113, "usage_type": "attribute"}, {"api_name": "PyQt5.QtWidgets.QMainWindow", "line_number": 115, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QPushButton", "line_number": 117, "usage_type": "call"}, {"api_name": "sys.exit", "line_number": 124, "usage_type": "call"}]}
+{"seq_id": "37765885746", "text": "from nltk import CFG, Tree\nimport copy\nfrom pprint import pprint\n\n# question1.py (edited by Pavlos Musenidis)\n# Jonas Kuhn, University of Stuttgart, 2020\n# course \"Parsing\"\n\n# Boolean variable for switching tracing info on and off\ntrace = True # set this to False if you don't want to see intermediate steps\n\n# Boolean variable for running parser interactively on user input or on pre-specified input\ninteractive = False # True\n\n# internal format of cfg production rules with reversed right-hand sides (!)\n\ngrammar = \"\"\"\nS -> NP VP \nNP -> DET N | DET N PP | 'I'\nVP -> V | V NP | V NP PP\nPP -> P NP \nDET -> 'the' | 'an' | 'my' | 'most'\nP -> 'in'\nN -> 'elephant' | 'elephants' | 'mouse' | 'mice' | 'pajamas'\nV -> 'sneezed' | 'giggled' | 'trumpeted' | 'saw' | 'shot'\n\"\"\"\n\n\ndef load_grammar(grammar):\n G = {}\n cfg = CFG.fromstring(grammar)\n for p in cfg.productions():\n p = p.__str__().split()\n for i in range(len(p)):\n p[i] = p[i].strip(\"'\")\n G.setdefault(p[0], [])\n right = p[2:]\n right.reverse()\n G[p[0]].append(right)\n return G\n\n\n# main procedure:\ndef parse(G, tokens):\n # G: dict with list of reversed rhs's for each non-terminal\n # tokens: list of input tokens\n\n if trace: print(\"parsing \", tokens, \"...\")\n\n # initialize data structures:\n stack = ['S']\n inbuffer = tokens\n seq = []\n agenda = []\n solutions = []\n\n # main loop:\n while True:\n if trace: print(' {:<40}{:>40}'.format(str(stack), str(inbuffer)))\n\n # expand\n if stack != [] and inbuffer != [] and stack[-1] in G:\n replace = stack[-1]\n if [inbuffer[0]] in G[replace]:\n if trace: print(\" >expand: \", stack[-1], \" -R-> \", G[stack[-1]][0])\n right = G[replace][G[replace].index([inbuffer[0]])]\n seq.append((stack[-1], len(right)))\n del stack[-1]\n stack += right\n else:\n for production in G[replace]:\n new_seq = copy.deepcopy(seq)\n new_stack = copy.deepcopy(stack)\n new_inbuffer = copy.deepcopy(inbuffer)\n new_seq.append((new_stack[-1], len(production)))\n del new_stack[-1]\n new_stack += production\n last = production\n agenda.append((new_stack, new_inbuffer, new_seq))\n stack = agenda[-1][0]\n inbuffer = agenda[-1][1]\n seq = agenda[-1][2]\n del agenda[-1]\n if trace: print(\" >expand: \", replace, \" -R-> \", last)\n\n\n # match\n elif stack != [] and inbuffer != [] and stack[-1] == inbuffer[0]:\n if trace: print(\" >match: \", stack[-1], \" -R-> \", inbuffer[0])\n seq.append((stack[-1], 0))\n del stack[-1]\n del inbuffer[0]\n\n\n # termination\n elif stack == inbuffer == []:\n if trace: print(' {:<40}{:>40}'.format(str(stack), str(inbuffer)))\n solutions.append(seq)\n print(\"found one solution!\\n\")\n if agenda != []:\n print(\"searching for more solutions...\\n\")\n stack = agenda[-1][0]\n inbuffer = agenda[-1][1]\n seq = agenda[-1][2]\n del agenda[-1]\n else:\n if solutions != []:\n print(\"failure!\\n\\n\\n\\n\\n\\n\\n\")\n else:\n print(\"success!\\n\\n\\n\\n\\n\\n\\n\")\n return solutions\n else:\n if trace: print(\" >dead end!\")\n if agenda != []:\n print(\"searching for more solutions...\\n\")\n stack = agenda[-1][0]\n inbuffer = agenda[-1][1]\n seq = agenda[-1][2]\n del agenda[-1]\n else:\n if solutions == []:\n print(\"failure!\\n\\n\\n\\n\\n\\n\\n\")\n else:\n print(\"success!\\n\\n\\n\\n\\n\\n\\n\")\n return solutions\n\n\ndef build_tree(seq):\n if seq == []:\n return []\n else:\n sub = seq[0]\n del seq[0]\n subtrees = []\n for i in range(sub[1]):\n subtree = build_tree(seq)\n subtrees.append(subtree[0])\n return(Tree(sub[0], subtrees), seq)\n\n\ndef demo():\n G = load_grammar(grammar)\n if trace: print(\"Internal grammar representation:\\n\", grammar)\n\n if interactive:\n while True:\n # interactive way of running the parser in user input:\n\n sentence = input('Type sentence or type \"q\" to exit: ') # user can input the string to be parsed\n if sentence != \"q\":\n tokens = sentence.split() # split up string in tokens (using the default separator, i.e. space)sequence = parse(G, tokens)\n solutions = parse(G, tokens)\n for sequence in solutions:\n parsetree = build_tree(sequence)\n parsetree[0].draw()\n else:\n exit()\n else:\n tokens = \"the elephant saw the mouse\".split()\n solutions = parse(G, tokens)\n for sequence in solutions:\n parsetree = build_tree(sequence)\n parsetree[0].draw()\n tokens = \"I shot the elephant shot my pajamas\".split()\n solutions = parse(G, tokens)\n for sequence in solutions:\n parsetree = build_tree(sequence)\n parsetree[0].draw()\n tokens = \"I shot the elephant in my pajamas\".split()\n solutions = parse(G, tokens)\n for sequence in solutions:\n parsetree = build_tree(sequence)\n parsetree[0].draw()\n\n\ndemo()\n", "repo_name": "Pavlos-96/Parsing_Project", "sub_path": "5_2.py", "file_name": "5_2.py", "file_ext": "py", "file_size_in_byte": 5766, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "76", "api": [{"api_name": "nltk.CFG.fromstring", "line_number": 31, "usage_type": "call"}, {"api_name": "nltk.CFG", "line_number": 31, "usage_type": "name"}, {"api_name": "copy.deepcopy", "line_number": 72, "usage_type": "call"}, {"api_name": "copy.deepcopy", "line_number": 73, "usage_type": "call"}, {"api_name": "copy.deepcopy", "line_number": 74, "usage_type": "call"}, {"api_name": "nltk.Tree", "line_number": 138, "usage_type": "call"}]}
+{"seq_id": "1289380763", "text": "import numpy as np\nimport os\nimport grn_sim as sim\n\nfrom matplotlib import rc, gridspec\nimport matplotlib.pyplot as plt\n\nrc('font', **{'family':'serif','serif':['Palatino']})\nrc('font',**{'family':'sans-serif','sans-serif':['Helvetica']})\n\nrc('text',usetex=True)\nrc('text.latex', preamble=r'\\usepackage{amssymb}') \n\n# dynamics\ng1a=lambda g2,m1,m2,tau: m1*tau/(1+g2*g2)\ng1b=lambda g2,m1,m2,tau: np.sqrt(m2*tau/g2-1)\n\ng1dot = lambda g1,g2, m1, m2, tau: m1/(1+g2*g2)-g1/tau\ng2dot = lambda g1,g2, m1, m2, tau: m2/(1+g1*g1)-g2/tau\n\n#solve for stuff\nsn_m1 = np.arange(2,4.1,0.01)\npf_tau = np.arange(0.1,4.2,0.02)\nsn_g1 = [sim.get_yss(3, m1, 1, False) for m1 in sn_m1]\npf_g1 = [sim.get_yss(1,1,tau,False) for tau in pf_tau]\n\nsn_zs = [np.sort(z) for z in sn_g1]\nsn_n0 = np.array([z[0] for z in sn_zs])\nsn_n1 = np.array([z[2] for z in sn_zs if len(z)>1])\nsn_s = np.array([z[1] for z in sn_zs if len(z)>1])\nsn_s_m1 = np.array([sn_m1[i] for i in range(sn_m1.shape[0]) if sn_g1[i].shape[0]>1])\n\n\npf_zs = [np.sort(z) for z in pf_g1]\npf_n0 = np.array([z[0] for z in pf_zs])\npf_n1 = np.array([z[2] for z in pf_zs if len(z)>1])\npf_s = np.array([z[1] for z in pf_zs if len(z)>1])\npf_s_tau = np.array([pf_tau[i] for i in range(pf_tau.shape[0]) if pf_g1[i].shape[0]>1])\n\n#### the figure ####\n### plt.style.reload_library()\nplt.style.use('sty/one_col_fig.mplstyle')\n# nr = 90\n# nc = 45\n\nspc_ht = 3\ntau_ht = 3\nm1_ht = 7\n\nmarg_wd = 3\nm1_wd = 9\nspc_wd = 2\ntau_wd = 9\n\n# row heights\nhts = np.array([\n\n tau_ht,\n tau_ht,\n tau_ht,\n spc_ht,\n m1_ht\n])\n\nwds = np.array([\n marg_wd,\n m1_wd,\n spc_wd,\n tau_wd\n])\n\nrs = np.cumsum(hts) # starting rows\ncs = np.cumsum(wds) # starting cols\n\nnr = np.sum(hts)\nnc = np.sum(wds)\n\nwid = 8.7/2.54\nht = wid*nr/nc\n\nfig = plt.figure(figsize=(wid, ht), dpi=200)\n\ngs = gridspec.GridSpec(nr, nc, hspace=0)\n\n# g1 vs g2\n#axA = plt.subplot( gs[0 :rs[2], cs[0]:cs[1]]) # m1\naxA1 = plt.subplot( gs[0 :rs[0], cs[0]:cs[1]]) # m1\naxA2 = plt.subplot( gs[rs[0]:rs[1], cs[0]:cs[1]]) # m1\naxA3 = plt.subplot( gs[rs[1]:rs[2], cs[0]:cs[1]]) # m1\n\naxB1 = plt.subplot( gs[0 :rs[0], cs[2]:cs[3]]) # tau\naxB2 = plt.subplot( gs[rs[0]:rs[1], cs[2]:cs[3]]) # tau\naxB3 = plt.subplot( gs[rs[1]:rs[2], cs[2]:cs[3]]) # tau\n\naxC = plt.subplot( gs[rs[3]:rs[4], cs[0]:cs[1]]) # g1 vs m1\naxD = plt.subplot( gs[rs[3]:rs[4], cs[2]:cs[3]]) # g1 vs m1\n\ncaps = ['A','B','C','D']\nri = [0,0,rs[3],rs[3]]\nci = [0,cs[1],0,cs[1]]\nys = [1,1,2.5,2.5]\nfor i in range(len(caps)):\n\n cap_ax=plt.subplot(gs[ri[i]:ri[i]+1,ci[i]:ci[i]+1])\n cap_ax.text(s=caps[i],\n x=0,y=ys[i],fontsize=14, verticalalignment='top',horizontalalignment='left')\n cap_ax.axis('off')\n\n\n#########################################\n###### A-B: phase diagrams ##############\n#########################################\ntaus = np.array([[1,1,1],[0.5,2,4]])\nm1s = np.array([[2,3,6],[1,1,1]])\nm2s = np.array([[3,3,3],[1,1,1]])\n\nlss = ['-','--']\n\naxs = [[axA1,axA2,axA3],[axB1, axB2, axB3]]\n\nming1 = 0.1\nmaxg1 = 10\nming2 = 0.03\nmaxg2 = 5\nnv1d = 20\nnpts = 100\n\n# g1s = np.linspace(0.1,4,100)\n# g2s = np.linspace(0.1,4,100)\n\n#g1s = np.linspace(ming1,maxg1,npts)\ng2s = np.linspace(ming2,maxg2,npts)\n\nxs = np.linspace(ming2, maxg2, nv1d)\nys = np.linspace(ming1, maxg1, nv1d)\n\ng2s = np.logspace(np.log10(ming2),np.log10(maxg2),npts)\nxs = np.logspace(np.log10(ming2)-1, np.log10(maxg2)+1, nv1d)\nys = np.logspace(np.log10(ming1)-1, np.log10(maxg1)+1, nv1d)\n\nxx, yy = np.meshgrid(xs,ys)\nxxf = xx.reshape(-1)\nyyf = yy.reshape(-1)\nrs = np.vstack([xxf,yyf]).T\nzfills = ['none','none','none']\nzmarks = ['o','s','o']\nfc = ['gray','none','gray']\nfor i in range(len(axs)):\n for j in range(len(axs[i])):\n ax = axs[i][j]\n tau = taus[i,j]\n m1 = m1s[i,j]\n m2 = m2s[i,j]\n g2i = np.where(m2*tau/g2s>=1)[0]\n\n ncg2 = g2s[g2i]\n ncg1a = g1a(ncg2, m1, m2, tau)\n ncg1b = g1b(ncg2, m1, m2, tau)\n\n ax.plot(ncg2, ncg1a, color = 'r', ls = lss[0],lw=1,zorder=1)\n ax.plot(ncg2, ncg1b, color = 'b', ls = lss[1],lw=1,zorder=1)\n\n g2zs = np.sort(sim.get_yss(m1,m2,tau,False))\n g1zs = g1a(g2zs, m1, m2, tau)\n\n # plot the zeros\n for k in range(g2zs.shape[0]):\n ax.plot(g2zs[k],g1zs[k],color='k',marker=zmarks[k],\n markeredgewidth=1,markersize=4,alpha=1,zorder=2,markerfacecolor=fc[k])#fillstyle=zfills[k],\n\n # plot the vector field\n uu = g2dot(yy,xx,m1,m2,tau)\n vv = g1dot(yy,xx,m1,m2,tau)\n uvnorm = np.sqrt(uu*uu + vv*vv)\n uuh = uu/uvnorm\n vvh = vv/uvnorm\n ax.quiver(xxf, yyf, uuh, vvh, color = 'k', width=0.004,\n headwidth=5, headlength=4,alpha=0.55,pivot='tip',scale=20,zorder=0)\n\n skip = 10\n ncg1vs = np.array([ncg1a[::skip], ncg1b[::skip]])\n ncg2v = ncg2[::skip]\n for k in range(ncg1vs.shape[0]):\n ncg1v = ncg1vs[k]\n uu = g2dot(ncg1v,ncg2v,m1,m2,tau)\n vv = g1dot(ncg1v,ncg2v,m1,m2,tau)\n uvnorm = np.sqrt(uu*uu + vv*vv)\n uuh = uu/uvnorm\n vvh = vv/uvnorm\n ax.quiver(ncg2v, ncg1v, uuh, vvh, color = 'k', width=0.004,\n headwidth=5, headlength=4,alpha=1,pivot='tip',scale=20,zorder=3)\n\n # format\n ax.set_xscale('log')\n ax.set_yscale('log')\n ax.set_xlim(ming2,maxg2)\n ax.set_ylim(ming1,maxg1)\n\n\n if i==1:\n ax.set_yticklabels([])\n label=r'$k_D=${0:.2f}'.format(1/tau)\n else:\n label=r'$m_1=${0}'.format(m1)\n\n if j>0:\n ax.set_yticks([0.1,1])\n ax.set_yticklabels([\"0.1\",\"1\"])\n else:\n ax.set_yticks([0.1,1,10])\n ax.set_yticklabels([\"0.1\",\"1\",\"10\"])\n\n props = dict(boxstyle='round,pad=0.01', facecolor='wheat', alpha=0.5,ec='none')\n# ax.text(x=0.98,y=0.95,s=label,transform=ax.transAxes,\n# verticalalignment='top', horizontalalignment='right',bbox=props,fontsize=6)\n ax.text(x=0.02,y=0.02,s=label,transform=ax.transAxes,\n verticalalignment='bottom', horizontalalignment='left',bbox=props,fontsize=6)\n\n if j==2:\n ax.set_xticks([0.1,1])\n ax.set_xticklabels([\"0.1\",\"1\"])\n\n\n\naxA3.set_xlabel(r'$g_2$',labelpad=-4)\naxB3.set_xlabel(r'$g_2$',labelpad=-4)\naxA2.set_ylabel(r'$g_1$')\n\n########################################\n#######C: saddle node g1################\n########################################\naxC.plot(sn_m1, sn_n0,'ko',markersize=0.5)\naxC.plot(sn_s_m1, sn_n1,'ko', markersize=0.5)\naxC.plot(sn_s_m1, sn_s,'k--',fillstyle='none')\n\naxC.set_xlabel(r'$m_1$')\naxC.set_ylabel(r'$g_1$')\n########################################\n#######D: pitchfork g1################\n########################################\naxD.plot(pf_tau, pf_n0,'ko',markersize=0.5)\naxD.plot(pf_s_tau, pf_n1,'ko',markersize=0.5)\naxD.plot(pf_s_tau, pf_s,'k--',fillstyle='none')\n\naxD.set_xlabel(r'$1/k_D$')\n\nfigdir = 'figs'\nos.makedirs(figdir, exist_ok=True)\nplt.savefig('{0}/figS2_dyn_syss.pdf'.format(figdir), bbox_inches='tight')\n", "repo_name": "Simfreed/sc_bifurc_figs", "sub_path": "python/phase_planes.py", "file_name": "phase_planes.py", "file_ext": "py", "file_size_in_byte": 7176, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "76", "api": [{"api_name": "matplotlib.rc", "line_number": 8, "usage_type": "call"}, {"api_name": "matplotlib.rc", "line_number": 9, "usage_type": "call"}, {"api_name": "matplotlib.rc", "line_number": 11, "usage_type": "call"}, {"api_name": "matplotlib.rc", "line_number": 12, "usage_type": "call"}, {"api_name": "numpy.sqrt", "line_number": 16, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 22, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 23, "usage_type": "call"}, {"api_name": "grn_sim.get_yss", "line_number": 24, "usage_type": "call"}, {"api_name": "grn_sim.get_yss", "line_number": 25, "usage_type": "call"}, {"api_name": "numpy.sort", "line_number": 27, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 28, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 29, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 30, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 31, "usage_type": "call"}, {"api_name": "numpy.sort", "line_number": 34, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 35, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 36, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 37, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 38, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.style.use", "line_number": 42, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.style", "line_number": 42, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot", "line_number": 42, "usage_type": "name"}, {"api_name": "numpy.array", "line_number": 56, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 65, "usage_type": "call"}, {"api_name": "numpy.cumsum", "line_number": 72, "usage_type": "call"}, {"api_name": "numpy.cumsum", "line_number": 73, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 75, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 76, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 81, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 81, "usage_type": "name"}, {"api_name": "matplotlib.gridspec.GridSpec", "line_number": 83, "usage_type": "call"}, {"api_name": "matplotlib.gridspec", "line_number": 83, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplot", "line_number": 87, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 87, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplot", "line_number": 88, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 88, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplot", "line_number": 89, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 89, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplot", "line_number": 91, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 91, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplot", "line_number": 92, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 92, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplot", "line_number": 93, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 93, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplot", "line_number": 95, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 95, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplot", "line_number": 96, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 96, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplot", "line_number": 104, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 104, "usage_type": "name"}, {"api_name": "numpy.array", "line_number": 113, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 114, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 115, "usage_type": "call"}, {"api_name": "numpy.linspace", "line_number": 132, "usage_type": "call"}, {"api_name": "numpy.linspace", "line_number": 134, "usage_type": "call"}, {"api_name": "numpy.linspace", "line_number": 135, "usage_type": "call"}, {"api_name": "numpy.logspace", "line_number": 137, "usage_type": "call"}, {"api_name": "numpy.log10", "line_number": 137, "usage_type": "call"}, {"api_name": "numpy.logspace", "line_number": 138, "usage_type": "call"}, {"api_name": "numpy.log10", "line_number": 138, "usage_type": "call"}, {"api_name": "numpy.logspace", "line_number": 139, "usage_type": "call"}, {"api_name": "numpy.log10", "line_number": 139, "usage_type": "call"}, {"api_name": "numpy.meshgrid", "line_number": 141, "usage_type": "call"}, {"api_name": "numpy.vstack", "line_number": 144, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 154, "usage_type": "call"}, {"api_name": "numpy.sort", "line_number": 163, "usage_type": "call"}, {"api_name": "grn_sim.get_yss", "line_number": 163, "usage_type": "call"}, {"api_name": "numpy.sqrt", "line_number": 174, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 181, "usage_type": "call"}, {"api_name": "numpy.sqrt", "line_number": 187, "usage_type": "call"}, {"api_name": "os.makedirs", "line_number": 248, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 249, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 249, "usage_type": "name"}]}
+{"seq_id": "42239678543", "text": "import random\nfrom typing import Dict, Tuple\n\nimport gymnasium as gym\nimport numpy as np\nimport pytest\nfrom gymnasium import spaces\nfrom stable_baselines3.common.callbacks import EventCallback, StopTrainingOnNoModelImprovement\nfrom stable_baselines3.common.env_util import make_vec_env\nfrom stable_baselines3.common.envs import FakeImageEnv, IdentityEnv, IdentityEnvBox\nfrom stable_baselines3.common.monitor import Monitor\nfrom stable_baselines3.common.policies import ActorCriticPolicy\n\nfrom sb3_contrib import MaskablePPO\nfrom sb3_contrib.common.envs import InvalidActionEnvDiscrete, InvalidActionEnvMultiBinary, InvalidActionEnvMultiDiscrete\nfrom sb3_contrib.common.maskable.callbacks import MaskableEvalCallback\nfrom sb3_contrib.common.maskable.evaluation import evaluate_policy\nfrom sb3_contrib.common.maskable.utils import is_masking_supported\nfrom sb3_contrib.common.wrappers import ActionMasker\n\n\ndef make_env():\n return InvalidActionEnvDiscrete(dim=20, n_invalid_actions=10)\n\n\nclass ToDictWrapper(gym.Wrapper):\n \"\"\"\n Simple wrapper to test MultInputPolicy on Dict obs.\n \"\"\"\n\n def __init__(self, env):\n super().__init__(env)\n self.observation_space = spaces.Dict({\"obs\": self.env.observation_space})\n\n def reset(self, **kwargs) -> Tuple[Dict[str, np.ndarray], Dict]:\n return {\"obs\": self.env.reset(seed=kwargs.get(\"seed\", 0))[0]}, {} # type: ignore[dict-item]\n\n def step(self, action):\n obs, reward, terminated, truncated, infos = self.env.step(action)\n return {\"obs\": obs}, reward, terminated, truncated, infos\n\n\ndef test_identity():\n \"\"\"\n Performance test.\n A randomly initialized model cannot solve that task (score ~=6),\n nor a model without invalid action masking (score ~=30 after training)\n which such a low training budget.\n \"\"\"\n env = InvalidActionEnvDiscrete(dim=70, n_invalid_actions=55)\n model = MaskablePPO(\n \"MlpPolicy\",\n env,\n gamma=0.4,\n seed=32,\n verbose=0,\n )\n model.learn(3000)\n evaluate_policy(model, env, n_eval_episodes=20, reward_threshold=90, warn=False)\n\n\ndef test_bootstraping():\n # Max ep length = 100 by default\n env = InvalidActionEnvDiscrete(dim=20, n_invalid_actions=10)\n env = gym.wrappers.TimeLimit(env, 30)\n model = MaskablePPO(\"MlpPolicy\", env, n_steps=64, seed=8)\n model.learn(128)\n\n\ndef test_supports_discrete_action_space():\n \"\"\"\n No errors using algorithm with an env that has a discrete action space\n \"\"\"\n\n env = InvalidActionEnvDiscrete(dim=20, n_invalid_actions=10)\n model = MaskablePPO(\"MlpPolicy\", env, n_steps=64, seed=8)\n model.learn(100)\n evaluate_policy(model, env, warn=False)\n\n # Mask all actions except the good one, a random model should succeed\n env = InvalidActionEnvDiscrete(dim=20, n_invalid_actions=19)\n model = MaskablePPO(\"MlpPolicy\", env, seed=8)\n evaluate_policy(model, env, reward_threshold=99, warn=False)\n\n\ndef test_supports_multi_discrete_action_space():\n \"\"\"\n No errors using algorithm with an env that has a multidiscrete action space\n \"\"\"\n\n env = InvalidActionEnvMultiDiscrete(dims=[2, 3], n_invalid_actions=1)\n model = MaskablePPO(\"MlpPolicy\", env, n_steps=64, seed=8)\n model.learn(100)\n evaluate_policy(model, env, warn=False)\n\n # Mask all actions except the good ones, a random model should succeed\n env = InvalidActionEnvMultiDiscrete(dims=[2, 3], n_invalid_actions=3)\n model = MaskablePPO(\"MlpPolicy\", env, seed=8)\n evaluate_policy(model, env, reward_threshold=99, warn=False)\n\n\ndef test_supports_multi_binary_action_space():\n \"\"\"\n No errors using algorithm with an env that has a multidiscrete action space\n \"\"\"\n\n env = InvalidActionEnvMultiBinary(dims=3, n_invalid_actions=1)\n model = MaskablePPO(\"MlpPolicy\", env, n_steps=64, seed=8)\n model.learn(100)\n evaluate_policy(model, env, warn=False)\n\n # Mask all actions except the good ones, a random model should succeed\n env = InvalidActionEnvMultiBinary(dims=3, n_invalid_actions=3)\n model = MaskablePPO(\"MlpPolicy\", env, seed=8)\n evaluate_policy(model, env, reward_threshold=99, warn=False)\n\n\ndef test_disabling_masking():\n \"\"\"\n Behave like normal PPO if masking is disabled, which allows for envs that don't provide masks\n \"\"\"\n\n env = InvalidActionEnvDiscrete(dim=20, n_invalid_actions=19)\n model = MaskablePPO(\"MlpPolicy\", env, seed=8)\n evaluate_policy(model, env, reward_threshold=99, warn=False)\n\n # With masking disabled, perfect performance disappears\n with pytest.raises(AssertionError):\n evaluate_policy(model, env, reward_threshold=99, warn=False, use_masking=False)\n\n # Without masking disabled, learning/evaluation will fail if the env doesn't provide masks\n env = IdentityEnv(dim=2)\n model = MaskablePPO(\"MlpPolicy\", env, n_steps=64, seed=8)\n with pytest.raises(ValueError):\n model.learn(100)\n with pytest.raises(ValueError):\n evaluate_policy(model, env, warn=False)\n\n model.learn(100, use_masking=False)\n evaluate_policy(model, env, warn=False, use_masking=False)\n\n\ndef test_masked_evaluation():\n \"\"\"\n Masking can be enabled or disabled for evaluation, but masking should perform better.\n \"\"\"\n\n env = InvalidActionEnvDiscrete(dim=20, n_invalid_actions=19)\n model = MaskablePPO(\"MlpPolicy\", env, seed=8)\n masked_avg_rew, _ = evaluate_policy(model, env, warn=False)\n unmasked_avg_rew, _ = evaluate_policy(model, env, warn=False, use_masking=False)\n assert masked_avg_rew > unmasked_avg_rew\n\n\ndef test_supports_multi_envs():\n \"\"\"\n Learning and evaluation works with VecEnvs\n \"\"\"\n\n env = make_vec_env(make_env, n_envs=2)\n assert is_masking_supported(env)\n model = MaskablePPO(\"MlpPolicy\", env, n_steps=256, gamma=0.4, seed=32, verbose=1)\n model.learn(100)\n evaluate_policy(model, env, warn=False)\n\n env = make_vec_env(IdentityEnv, n_envs=2, env_kwargs={\"dim\": 2})\n assert not is_masking_supported(env)\n model = MaskablePPO(\"MlpPolicy\", env, n_steps=256, gamma=0.4, seed=32, verbose=1)\n with pytest.raises(ValueError):\n model.learn(100)\n with pytest.raises(ValueError):\n evaluate_policy(model, env, warn=False)\n model.learn(100, use_masking=False)\n evaluate_policy(model, env, warn=False, use_masking=False)\n\n\ndef test_callback(tmp_path):\n \"\"\"\n No errors using MaskableEvalCallback during learning\n \"\"\"\n\n env = make_env()\n eval_env = make_env()\n model = MaskablePPO(\"MlpPolicy\", env, n_steps=64, gamma=0.4, seed=32, verbose=1)\n model.learn(100, callback=MaskableEvalCallback(eval_env, eval_freq=100, warn=False, log_path=tmp_path))\n\n model.learn(100, callback=MaskableEvalCallback(Monitor(eval_env), eval_freq=100, warn=False), progress_bar=True)\n\n\ndef test_child_callback():\n \"\"\"\n Stop callback and callback on new best rewards\n \"\"\"\n\n env = make_env()\n eval_env = make_env()\n model = MaskablePPO(\"MlpPolicy\", env, n_steps=64, n_epochs=1)\n stop_callback = StopTrainingOnNoModelImprovement(1, 2)\n new_best_mean_callback = EventCallback()\n eval_callback = MaskableEvalCallback(\n Monitor(eval_env),\n eval_freq=64,\n callback_after_eval=stop_callback,\n callback_on_new_best=new_best_mean_callback,\n )\n model.learn(128, callback=eval_callback)\n assert new_best_mean_callback.n_calls > 0\n assert stop_callback.n_calls > 0\n assert stop_callback.n_calls >= new_best_mean_callback.n_calls\n\n\ndef test_maskable_policy_required():\n \"\"\"\n MaskablePPO requires a policy that subclasses MaskableActorCriticPolicy\n \"\"\"\n\n env = make_env()\n with pytest.raises(ValueError):\n MaskablePPO(ActorCriticPolicy, env)\n\n\ndef test_discrete_action_space_required():\n \"\"\"\n MaskablePPO requires an env with a discrete (ie non-continuous) action space\n \"\"\"\n\n env = IdentityEnvBox()\n with pytest.raises(AssertionError):\n MaskablePPO(\"MlpPolicy\", env)\n\n\n@pytest.mark.parametrize(\"share_features_extractor\", [True, False])\ndef test_cnn(share_features_extractor):\n def action_mask_fn(env):\n random_invalid_action = random.randrange(env.action_space.n)\n return [i != random_invalid_action for i in range(env.action_space.n)]\n\n env = FakeImageEnv()\n env = ActionMasker(env, action_mask_fn)\n\n model = MaskablePPO(\n \"CnnPolicy\",\n env,\n n_steps=64,\n seed=32,\n verbose=1,\n policy_kwargs=dict(\n features_extractor_kwargs=dict(features_dim=32),\n share_features_extractor=share_features_extractor,\n ),\n )\n model.learn(100)\n evaluate_policy(model, env, warn=False)\n\n\ndef test_dict_obs():\n env = InvalidActionEnvDiscrete(dim=20, n_invalid_actions=10)\n env = ToDictWrapper(env)\n model = MaskablePPO(\"MultiInputPolicy\", env, n_steps=64, seed=8)\n model.learn(64)\n evaluate_policy(model, env, warn=False)\n\n # Mask all actions except the good one, a random model should succeed\n env = InvalidActionEnvDiscrete(dim=20, n_invalid_actions=19)\n env = ToDictWrapper(env)\n model = MaskablePPO(\"MultiInputPolicy\", env, seed=8)\n evaluate_policy(model, env, reward_threshold=99, warn=False)\n # MultiDiscrete\n env = InvalidActionEnvMultiDiscrete(dims=[2, 3], n_invalid_actions=1)\n env = ToDictWrapper(env)\n model = MaskablePPO(\"MultiInputPolicy\", env, n_steps=32, seed=8)\n model.learn(32)\n # MultiBinary\n env = InvalidActionEnvMultiBinary(dims=3, n_invalid_actions=1)\n env = ToDictWrapper(env)\n model = MaskablePPO(\"MultiInputPolicy\", env, n_steps=32, seed=8)\n model.learn(32)\n", "repo_name": "Stable-Baselines-Team/stable-baselines3-contrib", "sub_path": "tests/test_invalid_actions.py", "file_name": "test_invalid_actions.py", "file_ext": "py", "file_size_in_byte": 9645, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 353, "dataset": "github-code", "pt": "76", "api": [{"api_name": "sb3_contrib.common.envs.InvalidActionEnvDiscrete", "line_number": 23, "usage_type": "call"}, {"api_name": "gymnasium.Wrapper", "line_number": 26, "usage_type": "attribute"}, {"api_name": "gymnasium.spaces.Dict", "line_number": 33, "usage_type": "call"}, {"api_name": "gymnasium.spaces", "line_number": 33, "usage_type": "name"}, {"api_name": "typing.Tuple", "line_number": 35, "usage_type": "name"}, {"api_name": "typing.Dict", "line_number": 35, "usage_type": "name"}, {"api_name": "numpy.ndarray", "line_number": 35, "usage_type": "attribute"}, {"api_name": "sb3_contrib.common.envs.InvalidActionEnvDiscrete", "line_number": 50, "usage_type": "call"}, {"api_name": "sb3_contrib.MaskablePPO", "line_number": 51, "usage_type": "call"}, {"api_name": "sb3_contrib.common.maskable.evaluation.evaluate_policy", "line_number": 59, "usage_type": "call"}, {"api_name": "sb3_contrib.common.envs.InvalidActionEnvDiscrete", "line_number": 64, "usage_type": "call"}, {"api_name": "gymnasium.wrappers.TimeLimit", "line_number": 65, "usage_type": "call"}, {"api_name": "gymnasium.wrappers", "line_number": 65, "usage_type": "attribute"}, {"api_name": "sb3_contrib.MaskablePPO", "line_number": 66, "usage_type": "call"}, {"api_name": "sb3_contrib.common.envs.InvalidActionEnvDiscrete", "line_number": 75, "usage_type": "call"}, {"api_name": "sb3_contrib.MaskablePPO", "line_number": 76, "usage_type": "call"}, {"api_name": "sb3_contrib.common.maskable.evaluation.evaluate_policy", "line_number": 78, "usage_type": "call"}, {"api_name": "sb3_contrib.common.envs.InvalidActionEnvDiscrete", "line_number": 81, "usage_type": "call"}, {"api_name": "sb3_contrib.MaskablePPO", "line_number": 82, "usage_type": "call"}, {"api_name": "sb3_contrib.common.maskable.evaluation.evaluate_policy", "line_number": 83, "usage_type": "call"}, {"api_name": "sb3_contrib.common.envs.InvalidActionEnvMultiDiscrete", "line_number": 91, "usage_type": "call"}, {"api_name": "sb3_contrib.MaskablePPO", "line_number": 92, "usage_type": "call"}, {"api_name": "sb3_contrib.common.maskable.evaluation.evaluate_policy", "line_number": 94, "usage_type": "call"}, {"api_name": "sb3_contrib.common.envs.InvalidActionEnvMultiDiscrete", "line_number": 97, "usage_type": "call"}, {"api_name": "sb3_contrib.MaskablePPO", "line_number": 98, "usage_type": "call"}, {"api_name": "sb3_contrib.common.maskable.evaluation.evaluate_policy", "line_number": 99, "usage_type": "call"}, {"api_name": "sb3_contrib.common.envs.InvalidActionEnvMultiBinary", "line_number": 107, "usage_type": "call"}, {"api_name": "sb3_contrib.MaskablePPO", "line_number": 108, "usage_type": "call"}, {"api_name": "sb3_contrib.common.maskable.evaluation.evaluate_policy", "line_number": 110, "usage_type": "call"}, {"api_name": "sb3_contrib.common.envs.InvalidActionEnvMultiBinary", "line_number": 113, "usage_type": "call"}, {"api_name": "sb3_contrib.MaskablePPO", "line_number": 114, "usage_type": "call"}, {"api_name": "sb3_contrib.common.maskable.evaluation.evaluate_policy", "line_number": 115, "usage_type": "call"}, {"api_name": "sb3_contrib.common.envs.InvalidActionEnvDiscrete", "line_number": 123, "usage_type": "call"}, {"api_name": "sb3_contrib.MaskablePPO", "line_number": 124, "usage_type": "call"}, {"api_name": "sb3_contrib.common.maskable.evaluation.evaluate_policy", "line_number": 125, "usage_type": "call"}, {"api_name": "pytest.raises", "line_number": 128, "usage_type": "call"}, {"api_name": "sb3_contrib.common.maskable.evaluation.evaluate_policy", "line_number": 129, "usage_type": "call"}, {"api_name": "stable_baselines3.common.envs.IdentityEnv", "line_number": 132, "usage_type": "call"}, {"api_name": "sb3_contrib.MaskablePPO", "line_number": 133, "usage_type": "call"}, {"api_name": "pytest.raises", "line_number": 134, "usage_type": "call"}, {"api_name": "pytest.raises", "line_number": 136, "usage_type": "call"}, {"api_name": "sb3_contrib.common.maskable.evaluation.evaluate_policy", "line_number": 137, "usage_type": "call"}, {"api_name": "sb3_contrib.common.maskable.evaluation.evaluate_policy", "line_number": 140, "usage_type": "call"}, {"api_name": "sb3_contrib.common.envs.InvalidActionEnvDiscrete", "line_number": 148, "usage_type": "call"}, {"api_name": "sb3_contrib.MaskablePPO", "line_number": 149, "usage_type": "call"}, {"api_name": "sb3_contrib.common.maskable.evaluation.evaluate_policy", "line_number": 150, "usage_type": "call"}, {"api_name": "sb3_contrib.common.maskable.evaluation.evaluate_policy", "line_number": 151, "usage_type": "call"}, {"api_name": "stable_baselines3.common.env_util.make_vec_env", "line_number": 160, "usage_type": "call"}, {"api_name": "sb3_contrib.common.maskable.utils.is_masking_supported", "line_number": 161, "usage_type": "call"}, {"api_name": "sb3_contrib.MaskablePPO", "line_number": 162, "usage_type": "call"}, {"api_name": "sb3_contrib.common.maskable.evaluation.evaluate_policy", "line_number": 164, "usage_type": "call"}, {"api_name": "stable_baselines3.common.env_util.make_vec_env", "line_number": 166, "usage_type": "call"}, {"api_name": "stable_baselines3.common.envs.IdentityEnv", "line_number": 166, "usage_type": "argument"}, {"api_name": "sb3_contrib.common.maskable.utils.is_masking_supported", "line_number": 167, "usage_type": "call"}, {"api_name": "sb3_contrib.MaskablePPO", "line_number": 168, "usage_type": "call"}, {"api_name": "pytest.raises", "line_number": 169, "usage_type": "call"}, {"api_name": "pytest.raises", "line_number": 171, "usage_type": "call"}, {"api_name": "sb3_contrib.common.maskable.evaluation.evaluate_policy", "line_number": 172, "usage_type": "call"}, {"api_name": "sb3_contrib.common.maskable.evaluation.evaluate_policy", "line_number": 174, "usage_type": "call"}, {"api_name": "sb3_contrib.MaskablePPO", "line_number": 184, "usage_type": "call"}, {"api_name": "sb3_contrib.common.maskable.callbacks.MaskableEvalCallback", "line_number": 185, "usage_type": "call"}, {"api_name": "sb3_contrib.common.maskable.callbacks.MaskableEvalCallback", "line_number": 187, "usage_type": "call"}, {"api_name": "stable_baselines3.common.monitor.Monitor", "line_number": 187, "usage_type": "call"}, {"api_name": "sb3_contrib.MaskablePPO", "line_number": 197, "usage_type": "call"}, {"api_name": "stable_baselines3.common.callbacks.StopTrainingOnNoModelImprovement", "line_number": 198, "usage_type": "call"}, {"api_name": "stable_baselines3.common.callbacks.EventCallback", "line_number": 199, "usage_type": "call"}, {"api_name": "sb3_contrib.common.maskable.callbacks.MaskableEvalCallback", "line_number": 200, "usage_type": "call"}, {"api_name": "stable_baselines3.common.monitor.Monitor", "line_number": 201, "usage_type": "call"}, {"api_name": "pytest.raises", "line_number": 218, "usage_type": "call"}, {"api_name": "sb3_contrib.MaskablePPO", "line_number": 219, "usage_type": "call"}, {"api_name": "stable_baselines3.common.policies.ActorCriticPolicy", "line_number": 219, "usage_type": "argument"}, {"api_name": "stable_baselines3.common.envs.IdentityEnvBox", "line_number": 227, "usage_type": "call"}, {"api_name": "pytest.raises", "line_number": 228, "usage_type": "call"}, {"api_name": "sb3_contrib.MaskablePPO", "line_number": 229, "usage_type": "call"}, {"api_name": "random.randrange", "line_number": 235, "usage_type": "call"}, {"api_name": "stable_baselines3.common.envs.FakeImageEnv", "line_number": 238, "usage_type": "call"}, {"api_name": "sb3_contrib.common.wrappers.ActionMasker", "line_number": 239, "usage_type": "call"}, {"api_name": "sb3_contrib.MaskablePPO", "line_number": 241, "usage_type": "call"}, {"api_name": "sb3_contrib.common.maskable.evaluation.evaluate_policy", "line_number": 253, "usage_type": "call"}, {"api_name": "pytest.mark.parametrize", "line_number": 232, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 232, "usage_type": "attribute"}, {"api_name": "sb3_contrib.common.envs.InvalidActionEnvDiscrete", "line_number": 257, "usage_type": "call"}, {"api_name": "sb3_contrib.MaskablePPO", "line_number": 259, "usage_type": "call"}, {"api_name": "sb3_contrib.common.maskable.evaluation.evaluate_policy", "line_number": 261, "usage_type": "call"}, {"api_name": "sb3_contrib.common.envs.InvalidActionEnvDiscrete", "line_number": 264, "usage_type": "call"}, {"api_name": "sb3_contrib.MaskablePPO", "line_number": 266, "usage_type": "call"}, {"api_name": "sb3_contrib.common.maskable.evaluation.evaluate_policy", "line_number": 267, "usage_type": "call"}, {"api_name": "sb3_contrib.common.envs.InvalidActionEnvMultiDiscrete", "line_number": 269, "usage_type": "call"}, {"api_name": "sb3_contrib.MaskablePPO", "line_number": 271, "usage_type": "call"}, {"api_name": "sb3_contrib.common.envs.InvalidActionEnvMultiBinary", "line_number": 274, "usage_type": "call"}, {"api_name": "sb3_contrib.MaskablePPO", "line_number": 276, "usage_type": "call"}]}
+{"seq_id": "71268020087", "text": "import pygame as pg\nfrom random import uniform\nfrom vehicle import Vehicle, mass_to_size_constant, circle_width\n\nboid_uid = 0\n\nclass Boid(Vehicle):\n\n # CONFIG\n debug = False\n min_speed = .001\n max_speed = .02\n max_force = 1\n max_turn = 5\n perception = 60\n # crowding = 15\n low_crowding = 1\n high_crowding = 15\n can_wrap = True\n edge_distance_pct = 5\n ###############\n\n def __init__(self):\n global boid_uid\n self.uid = boid_uid\n boid_uid += 1\n Boid.set_boundary(Boid.edge_distance_pct)\n\n # Randomize starting position and velocity\n start_position = pg.math.Vector2(\n uniform(0, Boid.max_x),\n uniform(0, Boid.max_y))\n start_velocity = pg.math.Vector2(\n uniform(-1, 1) * Boid.max_speed,\n uniform(-1, 1) * Boid.max_speed)\n\n super().__init__(start_position, start_velocity,\n Boid.min_speed, Boid.max_speed,\n Boid.max_force, Boid.can_wrap)\n\n self.rect = self.image.get_rect(center=self.position)\n\n self.debug = Boid.debug\n # self.debug = True\n\n def separation(self, boids):\n steering = pg.Vector2()\n for boid in boids:\n dist = self.position.distance_to(boid.position)\n # Lower crowding based on mass - after the threshold\n if dist < self.high_crowding:\n steering -= boid.position - self.position\n steering = self.clamp_force(steering)\n return steering\n\n def alignment(self, boids):\n steering = pg.Vector2()\n for boid in boids:\n steering += boid.velocity\n steering /= len(boids)\n steering -= self.velocity\n steering = self.clamp_force(steering)\n return steering / 8\n\n def cohesion(self, boids):\n steering = pg.Vector2()\n for boid in boids:\n steering += boid.position\n steering /= len(boids)\n steering -= self.position\n steering = self.clamp_force(steering)\n return steering / 100\n\n def update(self, dt, boids):\n steering = pg.Vector2()\n\n if not self.can_wrap:\n steering += self.avoid_edge()\n\n neighbors = self.get_neighbors(boids)\n if neighbors:\n\n separation = self.separation(neighbors)\n alignment = self.alignment(neighbors)\n cohesion = self.cohesion(neighbors)\n\n # DEBUG\n # separation *= 0\n # alignment *= 0\n # cohesion *= 0\n\n steering += separation + alignment + cohesion\n\n # steering = self.clamp_force(steering)\n\n super().update(dt, steering)\n\n def get_neighbors(self, boids):\n neighbors = []\n for boid in boids:\n if boid != self:\n dist = self.position.distance_to(boid.position)\n # We see in a circle\n if dist < self.perception:\n neighbors.append(boid)\n if dist < self.radius and self.mass > boid.mass:\n # print(f\"boid {self.uid} eating {boid.uid}\")\n self.eat_boid(boid)\n # Eat!\n # pass\n # print(f\"We would eat boid {boid.uid}!\")\n return neighbors\n\n def eat_boid(self, target):\n print(f\"Boid {self.uid} mass {self.mass} eating {target.uid} mass {target.mass}\")\n self.mass += target.mass\n target.mass = 0.\n target.kill()\n # target.delete = True\n # del target\n old_radius = self.radius\n self.radius = pow(self.mass, 0.5) * mass_to_size_constant\n center_offset = (self.radius - old_radius) / 2\n self.position += pg.Vector2(center_offset, center_offset)\n del self.image\n self.image = pg.Surface((self.radius * 2, self.radius * 2), pg.SRCALPHA)\n pg.draw.circle(\n surface = self.image,\n color = pg.Color(\"White\"),\n center = (int(self.radius), int(self.radius)),\n radius = self.radius,\n width = circle_width)\n", "repo_name": "belarm/boids", "sub_path": "boid.py", "file_name": "boid.py", "file_ext": "py", "file_size_in_byte": 4106, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "76", "api": [{"api_name": "vehicle.Vehicle", "line_number": 7, "usage_type": "name"}, {"api_name": "pygame.math.Vector2", "line_number": 30, "usage_type": "call"}, {"api_name": "pygame.math", "line_number": 30, "usage_type": "attribute"}, {"api_name": "random.uniform", "line_number": 31, "usage_type": "call"}, {"api_name": "random.uniform", "line_number": 32, "usage_type": "call"}, {"api_name": "pygame.math.Vector2", "line_number": 33, "usage_type": "call"}, {"api_name": "pygame.math", "line_number": 33, "usage_type": "attribute"}, {"api_name": "random.uniform", "line_number": 34, "usage_type": "call"}, {"api_name": "random.uniform", "line_number": 35, "usage_type": "call"}, {"api_name": "pygame.Vector2", "line_number": 47, "usage_type": "call"}, {"api_name": "pygame.Vector2", "line_number": 57, "usage_type": "call"}, {"api_name": "pygame.Vector2", "line_number": 66, "usage_type": "call"}, {"api_name": "pygame.Vector2", "line_number": 75, "usage_type": "call"}, {"api_name": "vehicle.mass_to_size_constant", "line_number": 122, "usage_type": "name"}, {"api_name": "pygame.Vector2", "line_number": 124, "usage_type": "call"}, {"api_name": "pygame.Surface", "line_number": 126, "usage_type": "call"}, {"api_name": "pygame.SRCALPHA", "line_number": 126, "usage_type": "attribute"}, {"api_name": "pygame.draw.circle", "line_number": 127, "usage_type": "call"}, {"api_name": "pygame.draw", "line_number": 127, "usage_type": "attribute"}, {"api_name": "pygame.Color", "line_number": 129, "usage_type": "call"}, {"api_name": "vehicle.circle_width", "line_number": 132, "usage_type": "name"}]}
+{"seq_id": "32365358416", "text": "from django.conf.urls import patterns, include, url\n\n# Uncomment the next two lines to enable the admin:\n# from django.contrib import admin\n# admin.autodiscover()\n\nurlpatterns = patterns('',\n url(r'^$', 'trucks.views.home', name='home'),\n url(r'^about/$', 'trucks.views.about', name='about'),\n)\n", "repo_name": "colin2328/ginger", "sub_path": "trucks/urls.py", "file_name": "urls.py", "file_ext": "py", "file_size_in_byte": 301, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "76", "api": [{"api_name": "django.conf.urls.patterns", "line_number": 7, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 8, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 9, "usage_type": "call"}]}
+{"seq_id": "17987397045", "text": "#! /usr/bin/env python3\nimport rospy\nimport tf2_ros\nfrom std_msgs.msg import String\nfrom std_srvs.srv import Empty, EmptyResponse, EmptyRequest, Trigger, TriggerRequest\nfrom geometry_msgs.msg import PoseStamped, TwistStamped, Vector3, TransformStamped, Quaternion\nfrom trajectory_msgs.msg import MultiDOFJointTrajectory, MultiDOFJointTrajectoryPoint\nfrom nav_msgs.msg import Path\nfrom mavros_msgs.msg import State, ExtendedState\nfrom vertical_aam.srv import *\nfrom transitions import Machine\nfrom tf.transformations import euler_from_quaternion, quaternion_from_euler\nfrom trajectory import trajectory\n\n#todo: fix velocity ffs\n\nclass printStateMachine(object):\n states = ['Ground', 'Takeoff', 'Scan', 'Print', 'Land', 'Manual', 'Loiter']\n\n transitions = [\n {'trigger': 'startTakeoff', 'source': ['Ground', 'Manual'], 'dest': 'Takeoff', 'before': 'on_startTakeoff'},\n {'trigger': 'startScan', 'source': ['Takeoff', 'Manual'], 'dest': 'Scan', 'before': 'on_startScan'},\n {'trigger': 'startPrint', 'source': 'Loiter', 'dest': 'Print', 'before': 'on_startPrint'},\n {'trigger': 'endPrint', 'source': 'Print', 'dest': 'Scan', 'before': 'on_endPrint'},\n {'trigger': 'startLoiter', 'source': 'Scan', 'dest': 'Loiter', 'before': 'on_startLoiter'},\n {'trigger': 'startLanding', 'source': '*', 'dest': 'Land'},\n {'trigger': 'finishLanding', 'source': ['Land', 'Manual'], 'dest': 'Ground'},\n {'trigger': 'manualTakeover', 'source': '*', 'dest': 'Manual', 'before': 'on_manualTakeover'},\n {'trigger': 'switchToGround', 'source': ['Manual', 'Landing'], 'dest': 'Ground' },\n ]\n \n def __init__(self):\n # get config parameters from parameter server\n self.rate = rospy.get_param('/print_planner/setpoint_rate')\n self.tol_speed = rospy.get_param('/print_planner/tol_speed')\n self.takeoff_hgt = rospy.get_param('/print_planner/tol_height') \n self.scan_hgt = rospy.get_param('/print_planner/scan_height') \n self.pause_before_print = 1.0 \n self.scan_time = 5 \n\n self.yaw = 0.0\n self.tooltip_state = \"RETRACTED\"\n self.tooltip_pose = PoseStamped()\n self.tooltip_pose.header.frame_id = \"map\"\n self.tooltip_twist = TwistStamped()\n self.tooltip_twist.header.frame_id = \"map\"\n \n self.pose = PoseStamped()\n self.pose.header.frame_id = \"map\"\n self.velocity = TwistStamped()\n self.velocity.header.frame_id = \"map\"\n self.acceleration = TwistStamped()\n self.acceleration.header.frame_id = \"map\"\n\n self.trajectory = trajectory()\n self.tooltip_trajectory = trajectory()\n self.operator_confirmed = False\n\n\n # initiate state machine model with states and transitions listed above\n self.machine = Machine(model=self, states=self.states, transitions=self.transitions, initial = 'Ground')#, on_exception='on_exception')\n\n # publisher for on-board position controller\n self.sp_position_pub = rospy.Publisher(\n '/setpoint/pose', PoseStamped, queue_size=1, tcp_nodelay=True)\n self.sp_vel_pub = rospy.Publisher(\n '/setpoint/vel', TwistStamped, queue_size=1, tcp_nodelay=True)\n\n # publish current state for debugging\n self.pub_drone_state = rospy.Publisher(\n '/printer/state', String, queue_size=1, tcp_nodelay=True)\n\n # publishers to manipulator\n self.pub_tooltip_state = rospy.Publisher(\n '/manipulator/state', String, queue_size=1, tcp_nodelay=True)\n self.pub_tooltip_pose = rospy.Publisher(\n '/tooltip_setpoint/pose', PoseStamped, queue_size=1, tcp_nodelay=True)\n self.pub_tooltip_twist = rospy.Publisher(\n '/tooltip_setpoint/velocity', TwistStamped, queue_size=1, tcp_nodelay=True) \n\n # vizualisation publishers\n self.traj_viz_pub = rospy.Publisher(\n '/printer/drone_trajectory', Path, queue_size=1) \n self.tip_traj_viz_pub = rospy.Publisher(\n '/printer/tooltip_trajectory', Path, queue_size=1) \n\n # drone state subscriber\n state_sub = rospy.Subscriber(\n '/mavros/state', State, self._state_cb, queue_size=5, tcp_nodelay=True)\n ext_state_sub = rospy.Subscriber(\n '/mavros/extended_state', ExtendedState, self._ext_state_cb, queue_size=5, tcp_nodelay=True)\n local_position_sub = rospy.Subscriber(\n '/mavros/local_position/pose', PoseStamped, self._local_pos_cb, queue_size=1, tcp_nodelay=True)\n local_velocity_sub = rospy.Subscriber(\n '/mavros/local_position/velocity_body', TwistStamped, self._local_vel_cb, queue_size=1, tcp_nodelay=True)\n \n authorisation_service = rospy.Service('start_layer', Empty, self.authorisation_srv)\n\n # wait for drone to come online\n rospy.wait_for_message('/mavros/state', State)\n rospy.wait_for_message('/mavros/extended_state', ExtendedState)\n rospy.wait_for_message('/mavros/local_position/pose', PoseStamped)\n rospy.wait_for_service('generate_layer')\n \n # timer callback to send setpoints at a reasonable rate \n sp_timer = rospy.Timer(rospy.Duration(1.0/self.rate), self._timer_cb, reset=True)\n\n # initiate landing position at location where node is started\n self.pad_pose = PoseStamped()\n self.pad_pose = self.local_pose\n self.pad_pose.pose.position.z = self.takeoff_hgt\n rospy.loginfo(\"Landing site initiated at x=\" + str(self.pad_pose.pose.position.x) +\n \", y=\" + str(self.pad_pose.pose.position.y) + \".\")\n \n self.scan_start = self.pad_pose\n self.scan_start.pose.position.z = self.scan_hgt\n \n self.tfBuffer = tf2_ros.Buffer(rospy.Duration(20.0))\n listener = tf2_ros.TransformListener(self.tfBuffer)\n\n def authorisation_srv(self, req):\n self.operator_confirmed = True\n resp = EmptyResponse()\n return resp\n\n #--------------------------------------------------------------------------------------------------------------\n #callbacks on state transitions\n\n def on_startTakeoff(self):\n rospy.loginfo(\"Takeoff initiated\")\n self.pad_pose = PoseStamped()\n self.pad_pose = self.local_pose\n self.pad_pose.pose.position.z = self.takeoff_hgt\n\n self.scan_start = self.pad_pose\n self.scan_start.pose.position.z = self.scan_hgt\n \n rospy.loginfo(\"Landing site updated at x=\" + str(self.pad_pose.pose.position.x) +\n \", y=\" + str(self.pad_pose.pose.position.y) + \".\")\n\n def on_startScan(self):\n # reset aft_pgo_map here\n self.trajectory.reset()\n self.trajectory.transition(self.pose, self.scan_start)\n self.tooltip_trajectory.pause(self.scan_start, self.scan_time)\n self.trajectory.publish_viz_trajectory(self.traj_viz_pub)\n call_scan_reset_service()\n\n def on_startPrint(self):\n pause_time = self.pause_before_print\n\n #load tooltip trajectory\n self.tooltip_trajectory.reset()\n self.tooltip_trajectory.pause(self.tooltip_trajectory.trajectoryPoint2Pose(self.tooltip_layer.points[0]), pause_time)\n self.tooltip_trajectory.append_traj(self.tooltip_layer)\n self.tooltip_trajectory.publish_viz_trajectory(self.tip_traj_viz_pub)\n\n #load drone trajectory\n self.trajectory.reset()\n self.trajectory.pause(self.trajectory.trajectoryPoint2Pose(self.drone_layer.points[0]), pause_time)\n self.trajectory.append_traj(self.drone_layer)\n self.trajectory.publish_viz_trajectory(self.traj_viz_pub)\n\n #open nozzle\n call_nozzle_open_service()\n\n def on_endPrint(self):\n #close nozzle\n call_nozzle_close_service()\n self.on_startScan()\n \n def on_startLoiter(self):\n layer = call_slicing_service()\n\n try:\n tf_map2print = self.tfBuffer.lookup_transform('map', 'print_origin', rospy.Time.now(), timeout=rospy.Duration(5))\n tf_tip2drone = self.tfBuffer.lookup_transform('tooltip_init_r', 'base_link', rospy.Time.now(), timeout=rospy.Duration(5))\n tf_tip2tip = self.tfBuffer.lookup_transform('tooltip_init_r', 'tooltip_init', rospy.Time.now(), timeout=rospy.Duration(5))\n except:\n rospy.logerr(\"Unable to fetch TFs!\")\n\n layer = self.trajectory.transform_trajectory(layer, tf_map2print)\n\n self.drone_layer = self.trajectory.offset_trajectory(layer, tf_tip2drone)\n \n self.tooltip_layer = self.trajectory.rotate_trajectory(layer, tf_tip2tip)\n\n self.trajectory.reset()\n self.trajectory.transition(self.pose, self.trajectory.trajectoryPoint2Pose(self.drone_layer.points[0]))\n self.trajectory.publish_viz_trajectory(self.traj_viz_pub)\n\n def on_manualTakeover(self):\n rospy.loginfo(\"Manual takeover\")\n call_nozzle_close_service()\n\n def on_exception(self):\n rospy.logerr('state machine exception!')\n self.startLanding()\n call_nozzle_close_service()\n\n #---------------------------------------------------------------------------------------------------------------\n # callbacks to occur on timer event - need to be defined for every state that is called\n\n def during_Loiter(self):\n self.tooltip_state = \"STAB_3DOF\"\n if self.operator_confirmed:\n complete, pose, velocity = self.trajectory.follow()\n if not complete:\n self.pose = pose\n self.velocity = velocity\n else:\n self.operator_confirmed = False\n self.startPrint()\n\n def during_Scan(self):\n self.tooltip_state = \"STAB_3DOF\"\n scan_complete, pose, velocity = self.trajectory.follow()\n if not scan_complete:\n self.pose = pose\n self.velocity = velocity\n else:\n self.startLoiter()\n \n def during_Print(self):\n self.tooltip_state = \"STAB_6DOF\"\n print_complete, pose, velocity = self.trajectory.follow()\n tooltip_print_complete, tip_pose, tip_velocity = self.tooltip_trajectory.follow()\n if not print_complete:\n self.pose = pose\n self.velocity = velocity\n self.tooltip_pose = tip_pose\n self.tooltip_twist = tip_velocity\n else:\n self.endPrint()\n \n def during_Takeoff(self):\n self.tooltip_state = \"HOME\"\n self.velocity.twist.angular = Vector3(0,0,0)\n #increase target z to deined loiter height\n if self.pose.pose.position.z < self.takeoff_hgt:\n self.pose.pose.position.z += self.tol_speed / self.rate\n self.velocity.twist.linear = Vector3(0,0,self.tol_speed)\n else: #when target has reached loiter height and drone knows its flying, move to next state \n self.pose.pose.position.z = self.takeoff_hgt\n self.velocity.twist.linear = Vector3(0,0,0)\n self.startScan()\n\n def during_Land(self):\n self.tooltip_state = \"HOME\"\n self.velocity.twist.angular = Vector3(0,0,0)\n #reduce height of z setpoint until altitude is zero\n if self.pose.pose.position.z > 0 and not (self.mavros_ext_state.landed_state == 1):\n self.pose.pose.position.z += -self.tol_speed / self.rate\n self.velocity.twist.linear = Vector3(0,0,-self.tol_speed)\n else:\n self.switchToGround()\n\n def during_Manual(self):\n # If flying -> goto home position\n self.pose = self.local_pose\n self.velocity = self.local_velocity\n self.tooltip_state = \"STAB_3DOF\"\n if self.mavros_ext_state.landed_state == 1:\n self.switchToGround()\n if self.mavros_state.mode == \"OFFBOARD\":\n self.startScan()\n \n def during_Ground(self):\n # if landed -> takeoff. \n self.pose = self.local_pose\n self.velocity = self.local_velocity\n self.tooltip_state = \"HOME\"\n if self.mavros_state.armed:\n self.tooltip_state = \"HOME\"\n if self.mavros_ext_state.landed_state == 2:\n self.manualTakeover()\n if self.mavros_state.mode == \"OFFBOARD\":\n self.tooltip_state = \"HOME\"\n self.startTakeoff()\n \n def during_always(self): #this callback always runs to check if not in offboard mode\n if self.mavros_state.mode != \"OFFBOARD\" and not (self.state == 'Manual' or self.state == 'Ground'):\n self.manualTakeover()\n\n #----------------------------------------------------------------------------------------------\n #ros callbacks\n\n def _timer_cb(self, event): #timer callback runs at specified rate to output setpoints\n self.during_always()\n exec(\"self.during_\" + str(self.state) + \"()\") #execute the function name corresponding to the current state\n # update time stamps and publish current values of drone and manipulator commands\n \n self.pose.header.stamp = rospy.Time.now()\n self.velocity.header.stamp = rospy.Time.now()\n\n self.tooltip_pose.header.stamp = rospy.Time.now()\n self.tooltip_twist.header.stamp = rospy.Time.now()\n # self.tooltip_twist =\n # self.tooltip_pose =\n\n self.sp_position_pub.publish(self.pose)\n self.sp_vel_pub.publish(self.velocity)\n self.pub_drone_state.publish(String(str(self.state)))\n self.pub_tooltip_state.publish(String(self.tooltip_state))\n self.pub_tooltip_pose.publish(self.tooltip_pose)\n self.pub_tooltip_twist.publish(self.tooltip_twist)\n\n def _state_cb(self, state_msg):\n self.mavros_state = state_msg\n\n def _ext_state_cb(self, ext_state_msg):\n #reference for landed_state:\n # uint8 LANDED_STATE_UNDEFINED = 0\n # uint8 LANDED_STATE_ON_GROUND = 1\n # uint8 LANDED_STATE_IN_AIR = 2\n # uint8 LANDED_STATE_TAKEOFF = 3\n # uint8 LANDED_STATE_LANDING = 4\n self.mavros_ext_state = ext_state_msg\n\n def _local_pos_cb(self, local_pose_msg):\n self.local_pose = local_pose_msg\n\n def _local_vel_cb(self, local_vel_msg):\n self.local_velocity = local_vel_msg\n\n #---------------------\n\ndef call_slicing_service():\n slice_print = rospy.ServiceProxy('generate_layer', generateLayer)\n req = generateLayerRequest()\n resp = slice_print(req)\n return resp.trajectory\n\ndef call_nozzle_open_service():\n try:\n open_nozzle = rospy.ServiceProxy('open_nozzle', Trigger)\n req = TriggerRequest()\n resp = open_nozzle(req)\n except:\n rospy.logwarn(\"printing hardware not connected\")\n\ndef call_nozzle_close_service():\n try:\n close_nozzle = rospy.ServiceProxy('close_nozzle', Trigger)\n req = TriggerRequest()\n resp = close_nozzle(req)\n except:\n rospy.logwarn(\"printing hardware not connected\")\n\ndef call_scan_reset_service():\n try:\n restart_mapping = rospy.ServiceProxy('restart_mapping', Empty)\n req = EmptyRequest()\n resp = restart_mapping(req)\n except:\n rospy.logwarn(\"mapping restart unavailable\")\n\nif __name__ == '__main__':\n # initialize node\n rospy.init_node('print_state_machine', anonymous=True)\n pSM = printStateMachine()\n rospy.spin()", "repo_name": "lachie-aerialrobotics/vertical_AAM", "sub_path": "scripts/printing/print_state_machine.py", "file_name": "print_state_machine.py", "file_ext": "py", "file_size_in_byte": 15628, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "76", "api": [{"api_name": "rospy.get_param", "line_number": 34, "usage_type": "call"}, {"api_name": "rospy.get_param", "line_number": 35, "usage_type": "call"}, {"api_name": "rospy.get_param", "line_number": 36, "usage_type": "call"}, {"api_name": "rospy.get_param", "line_number": 37, "usage_type": "call"}, {"api_name": "geometry_msgs.msg.PoseStamped", "line_number": 43, "usage_type": "call"}, {"api_name": "geometry_msgs.msg.TwistStamped", "line_number": 45, "usage_type": "call"}, {"api_name": "geometry_msgs.msg.PoseStamped", "line_number": 48, "usage_type": "call"}, {"api_name": "geometry_msgs.msg.TwistStamped", "line_number": 50, "usage_type": "call"}, {"api_name": "geometry_msgs.msg.TwistStamped", "line_number": 52, "usage_type": "call"}, {"api_name": "trajectory.trajectory", "line_number": 55, "usage_type": "call"}, {"api_name": "trajectory.trajectory", "line_number": 56, "usage_type": "call"}, {"api_name": "transitions.Machine", "line_number": 61, "usage_type": "call"}, {"api_name": "rospy.Publisher", "line_number": 64, "usage_type": "call"}, {"api_name": "geometry_msgs.msg.PoseStamped", "line_number": 65, "usage_type": "argument"}, {"api_name": "rospy.Publisher", "line_number": 66, "usage_type": "call"}, {"api_name": "geometry_msgs.msg.TwistStamped", "line_number": 67, "usage_type": "argument"}, {"api_name": "rospy.Publisher", "line_number": 70, "usage_type": "call"}, {"api_name": "std_msgs.msg.String", "line_number": 71, "usage_type": "argument"}, {"api_name": "rospy.Publisher", "line_number": 74, "usage_type": "call"}, {"api_name": "std_msgs.msg.String", "line_number": 75, "usage_type": "argument"}, {"api_name": "rospy.Publisher", "line_number": 76, "usage_type": "call"}, {"api_name": "geometry_msgs.msg.PoseStamped", "line_number": 77, "usage_type": "argument"}, {"api_name": "rospy.Publisher", "line_number": 78, "usage_type": "call"}, {"api_name": "geometry_msgs.msg.TwistStamped", "line_number": 79, "usage_type": "argument"}, {"api_name": "rospy.Publisher", "line_number": 82, "usage_type": "call"}, {"api_name": "nav_msgs.msg.Path", "line_number": 83, "usage_type": "argument"}, {"api_name": "rospy.Publisher", "line_number": 84, "usage_type": "call"}, {"api_name": "nav_msgs.msg.Path", "line_number": 85, "usage_type": "argument"}, {"api_name": "rospy.Subscriber", "line_number": 88, "usage_type": "call"}, {"api_name": "mavros_msgs.msg.State", "line_number": 89, "usage_type": "argument"}, {"api_name": "rospy.Subscriber", "line_number": 90, "usage_type": "call"}, {"api_name": "mavros_msgs.msg.ExtendedState", "line_number": 91, "usage_type": "argument"}, {"api_name": "rospy.Subscriber", "line_number": 92, "usage_type": "call"}, {"api_name": "geometry_msgs.msg.PoseStamped", "line_number": 93, "usage_type": "argument"}, {"api_name": "rospy.Subscriber", "line_number": 94, "usage_type": "call"}, {"api_name": "geometry_msgs.msg.TwistStamped", "line_number": 95, "usage_type": "argument"}, {"api_name": "rospy.Service", "line_number": 97, "usage_type": "call"}, {"api_name": "std_srvs.srv.Empty", "line_number": 97, "usage_type": "argument"}, {"api_name": "rospy.wait_for_message", "line_number": 100, "usage_type": "call"}, {"api_name": "mavros_msgs.msg.State", "line_number": 100, "usage_type": "argument"}, {"api_name": "rospy.wait_for_message", "line_number": 101, "usage_type": "call"}, {"api_name": "mavros_msgs.msg.ExtendedState", "line_number": 101, "usage_type": "argument"}, {"api_name": "rospy.wait_for_message", "line_number": 102, "usage_type": "call"}, {"api_name": "geometry_msgs.msg.PoseStamped", "line_number": 102, "usage_type": "argument"}, {"api_name": "rospy.wait_for_service", "line_number": 103, "usage_type": "call"}, {"api_name": "rospy.Timer", "line_number": 106, "usage_type": "call"}, {"api_name": "rospy.Duration", "line_number": 106, "usage_type": "call"}, {"api_name": "geometry_msgs.msg.PoseStamped", "line_number": 109, "usage_type": "call"}, {"api_name": "rospy.loginfo", "line_number": 112, "usage_type": "call"}, {"api_name": "tf2_ros.Buffer", "line_number": 118, "usage_type": "call"}, {"api_name": "rospy.Duration", "line_number": 118, "usage_type": "call"}, {"api_name": "tf2_ros.TransformListener", "line_number": 119, "usage_type": "call"}, {"api_name": "std_srvs.srv.EmptyResponse", "line_number": 123, "usage_type": "call"}, {"api_name": "rospy.loginfo", "line_number": 130, "usage_type": "call"}, {"api_name": "geometry_msgs.msg.PoseStamped", "line_number": 131, "usage_type": "call"}, {"api_name": "rospy.loginfo", "line_number": 138, "usage_type": "call"}, {"api_name": "rospy.Time.now", "line_number": 176, "usage_type": "call"}, {"api_name": "rospy.Time", "line_number": 176, "usage_type": "attribute"}, {"api_name": "rospy.Duration", "line_number": 176, "usage_type": "call"}, {"api_name": "rospy.Time.now", "line_number": 177, "usage_type": "call"}, {"api_name": "rospy.Time", "line_number": 177, "usage_type": "attribute"}, {"api_name": "rospy.Duration", "line_number": 177, "usage_type": "call"}, {"api_name": "rospy.Time.now", "line_number": 178, "usage_type": "call"}, {"api_name": "rospy.Time", "line_number": 178, "usage_type": "attribute"}, {"api_name": "rospy.Duration", "line_number": 178, "usage_type": "call"}, {"api_name": "rospy.logerr", "line_number": 180, "usage_type": "call"}, {"api_name": "rospy.loginfo", "line_number": 193, "usage_type": "call"}, {"api_name": "rospy.logerr", "line_number": 197, "usage_type": "call"}, {"api_name": "geometry_msgs.msg.Vector3", "line_number": 238, "usage_type": "call"}, {"api_name": "geometry_msgs.msg.Vector3", "line_number": 242, "usage_type": "call"}, {"api_name": "geometry_msgs.msg.Vector3", "line_number": 245, "usage_type": "call"}, {"api_name": "geometry_msgs.msg.Vector3", "line_number": 250, "usage_type": "call"}, {"api_name": "geometry_msgs.msg.Vector3", "line_number": 254, "usage_type": "call"}, {"api_name": "rospy.Time.now", "line_number": 293, "usage_type": "call"}, {"api_name": "rospy.Time", "line_number": 293, "usage_type": "attribute"}, {"api_name": "rospy.Time.now", "line_number": 294, "usage_type": "call"}, {"api_name": "rospy.Time", "line_number": 294, "usage_type": "attribute"}, {"api_name": "rospy.Time.now", "line_number": 296, "usage_type": "call"}, {"api_name": "rospy.Time", "line_number": 296, "usage_type": "attribute"}, {"api_name": "rospy.Time.now", "line_number": 297, "usage_type": "call"}, {"api_name": "rospy.Time", "line_number": 297, "usage_type": "attribute"}, {"api_name": "std_msgs.msg.String", "line_number": 303, "usage_type": "call"}, {"api_name": "std_msgs.msg.String", "line_number": 304, "usage_type": "call"}, {"api_name": "rospy.ServiceProxy", "line_number": 329, "usage_type": "call"}, {"api_name": "rospy.ServiceProxy", "line_number": 336, "usage_type": "call"}, {"api_name": "std_srvs.srv.Trigger", "line_number": 336, "usage_type": "argument"}, {"api_name": "std_srvs.srv.TriggerRequest", "line_number": 337, "usage_type": "call"}, {"api_name": "rospy.logwarn", "line_number": 340, "usage_type": "call"}, {"api_name": "rospy.ServiceProxy", "line_number": 344, "usage_type": "call"}, {"api_name": "std_srvs.srv.Trigger", "line_number": 344, "usage_type": "argument"}, {"api_name": "std_srvs.srv.TriggerRequest", "line_number": 345, "usage_type": "call"}, {"api_name": "rospy.logwarn", "line_number": 348, "usage_type": "call"}, {"api_name": "rospy.ServiceProxy", "line_number": 352, "usage_type": "call"}, {"api_name": "std_srvs.srv.Empty", "line_number": 352, "usage_type": "argument"}, {"api_name": "std_srvs.srv.EmptyRequest", "line_number": 353, "usage_type": "call"}, {"api_name": "rospy.logwarn", "line_number": 356, "usage_type": "call"}, {"api_name": "rospy.init_node", "line_number": 360, "usage_type": "call"}, {"api_name": "rospy.spin", "line_number": 362, "usage_type": "call"}]}
+{"seq_id": "20003428378", "text": "from django.core.paginator import Paginator\nfrom django.db.models import Count\nfrom django.shortcuts import render_to_response, redirect\nfrom django.template import RequestContext\nfrom django.db.models import Q\nfrom functions import *\nfrom models import Deck, Suit, Meaning, MinorArcana, MajorArcana\nfrom models import Spread, CardPosition\nfrom random import choice\n\ndef deck_list(request):\n \"\"\" This is a view to show a list of all available decks with a few details \n about each one. We can't use a generic view because we need to cross-reference\n the suits associated with each deck. \"\"\"\n \n decks = Deck.objects.all()\n \n # Pull the suits for each deck and put them in a tuple with each deck\n deck_list = []\n for deck in decks:\n \n suits = Suit.objects.filter(deck=deck.id)\n deck_list += [ (deck, suits) ]\n \n pages = Paginator(deck_list, 10, 3)\n \n # Check if page is an int, if not deliver page 1\n try:\n page = int(request.GET.get('page', '1'))\n except ValueError:\n page = 1 \n \n # Check if page is in range, if not deliver last page\n try:\n result_list = pages.page(page)\n except (EmptyPage, InvalidPage):\n result_list = pages.page( pages.num_pages ) \n \n context = {'result_list': result_list}\n\n return render_to_response('diyTarot/deck_list.html', \n context_instance=RequestContext(request, context)) \n \ndef spread_list(request):\n \"\"\" This is a view to display the list of spreads. Can't use a generic \n view because want to cross-reference with the card positions to \n give information like the number of cards in a spread. \"\"\"\n\n # Set up the structures used to add successive filters and handle the query string\n active_options = request.GET.copy()\n query_list = []\n \n # Set up the query objects to search spreads and filter by size\n apply_spread_search_filter(active_options, query_list)\n apply_spread_size_filter(active_options, query_list)\n \n # Annotate each spread with the number of positions associated with it, for sorting\n # and also for filtering by size\n spreads = Spread.objects.annotate(size=Count('cardposition')).filter(*query_list).order_by('size')\n\n # Paginate the results\n pages = Paginator(spreads, 10, 3)\n current_page = get_current_page(active_options, pages)\n\n # Temporary, limited tag list before tags are added to the model\n tags = ['daily', 'traditional', 'love', 'work', 'advice', 'choice'] \n tag_results = {}\n for tag in tags:\n tag_query = [Q(title__icontains=tag) | Q(description__icontains=tag)]\n tag_results[tag] = Spread.objects.filter(*tag_query).count()\n \n # Check if there is a default deck stored in the current session\n # This determines which deck to point you to in the links on the spread list, \n # by remembering your preference in the session.\n if 'deck' in request.session:\n deck = request.session['deck']\n else:\n # @TODO: make the default deck configurable?\n deck = 1\n \n context = {'result_list': current_page,\n 'deck': deck,\n 'active_options': active_options,\n 'tag_results': tag_results}\n \n return render_to_response('diyTarot/spread_list.html',\n context_instance=RequestContext(request, context))\n \ndef card_list(request):\n \"\"\" This is a view to display all cards in the system, across all decks. Via the \n GET headers it paginates and filters the resulting list if the various arguments\n exist and are valid in the request. \"\"\"\n \n # Pull all get parameters from the request into a querydict structure.\n active_options = request.GET.copy()\n \n # Set up the structures used to add successive filters and handle the query string\n filter_args = {} \n query_list = []\n order_args = []\n \n # Try to apply all possible filters one by one. \n # If an option's value is not valid it is removed from display_options.\n apply_card_search_filter(active_options, query_list)\n apply_key_filter(active_options, filter_args, 'deck', 'deck')\n apply_card_filter(active_options, filter_args)\n apply_suit_filter(active_options, filter_args)\n apply_rank_filter(active_options, filter_args)\n \n apply_sorting_order(active_options, order_args)\n \n # To search the meanings as well as the Cards, need to get the list of tarot indexes which\n # link to meanings that match the search\n if 'search' in active_options:\n \n meaning_query_list = []\n apply_keyword_search_filter(active_options, meaning_query_list)\n \n # Get all tarot_indexes which have keyword strings containing the search term.\n meanings = Meaning.objects.filter(*meaning_query_list).values('tarot_index', 'meaning_set').order_by('meaning_set')\n \n # Get the id of each card (or set of cards) which matches each tarot_index / meaning_set pair\n # This assures that only the cards that use the matching meaning set get pulled, instead of all cards\n # with a certain tarot index.\n ids = []\n for meaning in meanings:\n ids += Card.objects.filter(tarot_index=meaning['tarot_index'], \n deck__meaning_set=meaning['meaning_set']).values_list('id') \n \n # Reformat list so it can be read directly by the id__in parameter \n id_list = [card_id[0] for card_id in ids]\n\n # Pull only the cards that have the same tarot index AND whose deck has the same meaning_set\n query_list[0] |= Q(id__in=id_list)\n\n # Some of the options only apply to the MinorArcana schema.\n # The rank and suit filters will set the card option automatically.\n if ('cards' in active_options and\n active_options['cards'] =='minors'):\n cards = MinorArcana.objects.filter(*query_list).filter(**filter_args).order_by(*order_args)\n else:\n cards = Card.objects.filter(*query_list).filter(**filter_args).order_by(*order_args)\n\n # Used by the shared sidebar navigation menu\n base_url = \"/diytarot/cards/\"\n \n # Populate the deck and suit lists used in navigation\n deck_list = Deck.objects.values('name', 'id')\n suit_list = Suit.objects.filter(deck=deck_list[0]['id']).values('suit', 'name')\n \n # Paginate the queryset and fetch the current page from the URL, with validation\n pages = Paginator(cards, 10, 3)\n current_page = get_current_page(active_options, pages)\n \n context = {'result_list': current_page,\n 'base_url': base_url,\n 'active_options': active_options,\n 'deck_list': deck_list,\n 'suit_list': suit_list}\n\n return render_to_response('diyTarot/card_list.html',\n context_instance=RequestContext(request, context)) \n\ndef deck_detail(request, deck_id):\n \"\"\" This is a view to show all the cards associated with a particular \n tarot deck. \"\"\"\n \n # Get the list of cards, return the deck listing page if deck doesn't exist.\n try:\n deck = Deck.objects.get(pk=deck_id)\n except Deck.DoesNotExist:\n return deck_list(request)\n \n # Set up the structures used to add successive filters and handle the query string\n active_options = request.GET.copy()\n filter_args = {'deck': deck_id} \n order_args = []\n \n # Apply the filters one by one. If an option is not valid it is removed from display_options.\n apply_card_filter(active_options, filter_args)\n apply_suit_filter(active_options, filter_args)\n apply_rank_filter(active_options, filter_args)\n apply_sorting_order(active_options, order_args)\n \n # Some of the options only apply to the MinorArcana schema.\n # The rank and suit filters will set the card option automatically.\n if ('cards' in active_options and\n active_options['cards'] =='minors'):\n cards = MinorArcana.objects.filter(**filter_args).order_by(*order_args)\n else:\n cards = Card.objects.filter(**filter_args).order_by(*order_args)\n\n # The base url, since there is a different one for deck view and all cards view\n base_url = \"/diytarot/decks/%s/\" % deck_id\n \n # Populate the suit list used in navigation\n suit_list = Suit.objects.filter(deck=deck_id).values('suit', 'name')\n \n # Paginate the queryset and fetch the current page from the URL, with validation\n pages = Paginator(cards, 10, 3)\n current_page = get_current_page(active_options, pages)\n \n context = {'deck': deck,\n 'result_list': current_page,\n 'base_url': base_url,\n 'suit_list': suit_list,\n 'active_options': active_options}\n\n return render_to_response('diyTarot/deck_detail.html',\n context_instance=RequestContext(request, context))\n\ndef random_card(request):\n \"\"\" This is a view which simply chooses a card at random and gives you the \n detail view for it. \"\"\"\n \n # Get a list of all ids, choose one randomly, and get a handle to that Card object.\n card_ids = Card.objects.values('id')\n random_id = choice( card_ids )['id']\n card = Card.objects.get(pk=random_id)\n \n # Display the card detail view for the random card. Don't redirect, because this way you\n # can refresh the page and get another random card.\n return card_detail(request, card.tarot_index, card.deck.id)\n\ndef card_detail(request, tarot_index, deck_id):\n \"\"\" This is a view to show all information about a specific card in a \n specific deck. \"\"\" \n \n # If the card isn't in this deck, load the list of all cards in the deck\n try:\n card = Card.objects.get(tarot_index=tarot_index, deck=deck_id)\n except Deck.DoesNotExist:\n return tarot_card_detail(request, tarot_index)\n except Card.DoesNotExist:\n return deck_detail(request, deck_id)\n \n try:\n meaning = Meaning.objects.get(meaning_set=card.deck.meaning_set, tarot_index=tarot_index)\n except Meaning.DoesNotExist:\n meaning = Meaning()\n \n # For next and previous page links\n indices = get_nearest_indices(tarot_index, deck_id)\n \n # For the side navigation\n majors_list = MajorArcana.objects.filter(deck=deck_id).order_by('tarot_index')\n first_major = ''\n if majors_list.count() > 0:\n first_major = majors_list[0].tarot_index\n \n minors_list = MinorArcana.objects.filter(deck=deck_id).order_by('suit', 'tarot_index')\n first_minor = ''\n if minors_list.count() > 0:\n first_minor = minors_list[0].tarot_index\n \n # Gives us the list of suits which have no cards in them, for completion.\n empty_suit_list = []\n suits = Suit.objects.filter(deck=deck_id)\n for suit in suits:\n cards = MinorArcana.objects.filter(suit=suit.id)\n if cards.count() == 0:\n empty_suit_list += [suit]\n \n related_cards = Card.objects.filter(tarot_index=tarot_index).values('deck', 'deck__name')\n\n context = {'card': card,\n 'meaning': meaning,\n 'majors_list': majors_list,\n 'minors_list': minors_list,\n 'empty_suit_list': empty_suit_list,\n 'related_cards': related_cards,\n 'first_major': first_major,\n 'first_minor': first_minor,\n 'next_card_index': indices['next_index'],\n 'previous_card_index': indices['previous_index'], }\n \n return render_to_response('diyTarot/card_detail.html',\n context_instance=RequestContext(request, context)) \n\ndef tarot_card_detail(request, tarot_index):\n \"\"\" This is a view to show all of the tarot cards of a particular index, \n across all decks in the system. So, if you send it 1 (The Magician), it\n will display all Magician cards. \"\"\"\n \n # Retrieve the card with the matching tarot_index from the right deck\n cards = Card.objects.filter(tarot_index=tarot_index).order_by('deck')\n meanings = Meaning.objects.filter(tarot_index=tarot_index)\n if meanings.count() > 0:\n meaning = meanings[0]\n else:\n meaning = {'keywords': 'None provided.',\n 'reversed_keywords': 'None provided.'}\n \n if len(cards) == 0:\n # If there is no matching tarot_index in any of the decks, then load\n # the view with all the cards\n return card_list(request)\n \n else:\n active_options = request.GET.copy()\n \n # Paginate the queryset and fetch the current page from the URL, with validation\n pages = Paginator(cards, 10, 3)\n current_page = get_current_page(active_options, pages)\n \n # The base url, since there is a different one for deck view and all cards view\n base_url = \"/diytarot/cards/%s/\" % tarot_index\n \n # For the next and previous links\n indices = get_nearest_indices(tarot_index)\n \n # For the side navigation\n default_deck_id = 1\n majors_list = MajorArcana.objects.filter(deck=default_deck_id).order_by('tarot_index')\n first_major = ''\n if majors_list.count() > 0:\n first_major = majors_list[0].tarot_index\n \n minors_list = MinorArcana.objects.filter(deck=default_deck_id).order_by('suit', 'tarot_index')\n first_minor = ''\n if minors_list.count() > 0:\n first_minor = minors_list[0].tarot_index\n \n # Gives us the list of suits which have no cards in them, for completion.\n empty_suit_list = []\n suits = Suit.objects.filter(deck=default_deck_id)\n for suit in suits:\n cards = MinorArcana.objects.filter(suit=suit.id)\n if cards.count() == 0:\n empty_suit_list += [suit]\n \n related_cards = Card.objects.filter(tarot_index=tarot_index).values('deck', 'deck__name')\n \n context = {'result_list': current_page,\n 'active_options': active_options,\n 'base_url': base_url,\n 'meaning': meaning,\n 'previous_card_index': indices['previous_index'],\n 'next_card_index': indices['next_index'],\n 'majors_list': majors_list,\n 'minors_list': minors_list,\n 'empty_suit_list': empty_suit_list,\n 'related_cards': related_cards,\n 'first_major': first_major,\n 'first_minor': first_minor}\n \n return render_to_response('diyTarot/tarot_card_detail.html',\n context_instance=RequestContext(request, context))\n\ndef reading(request, spread_id, deck_id):\n \"\"\" This is a view for displaying card readings on a given spread and deck. \n By default the cards drawn are random, but if a string of saved cards called\n 'cards' is passed in via query string it will try to load those cards, returning\n an error if the string is invalid.\"\"\"\n \n try:\n spread = Spread.objects.get(pk=spread_id)\n except Spread.DoesNotExist:\n return spread_list(request)\n \n try:\n deck = Deck.objects.get(pk=deck_id)\n deck_name = deck.name\n except Deck.DoesNotExist:\n return deck_list(request)\n \n # Get all the positions in the spread and pull out the maximums for layout\n positions = CardPosition.objects.filter(spread=spread.id).order_by('index')\n num_positions = positions.count()\n max_x_coordinate = positions.aggregate(Max('x_coordinate'))['x_coordinate__max'] \n max_y_coordinate = positions.aggregate(Max('y_coordinate'))['y_coordinate__max'] \n \n # Get all of the layout information for the template to use later\n layout = calculate_layout(positions, max_x_coordinate, max_y_coordinate)\n \n # If we have a query string, try to display the saved reading \n if request.method == 'GET' and request.GET.get('cards') is not None:\n \n # Get the query string\n reading_string = request.GET.get('cards')\n try:\n # Try to parse out the saved reading encoding, and catch the exceptions.\n reading = load_saved_reading(reading_string, num_positions, deck.id)\n \n except (IndexError, TypeError, ValueError, Card.DoesNotExist):\n # Exceptions with custom messages are raised in the helper function,\n # then they are caught and their text is passed to the template for display\n return render_to_response('diyTarot/reading.html',\n {'error': 'Problem loading saved reading.',\n 'spread': spread,\n 'deck': deck })\n else : \n # Otherwise, create a random reading that is different every time the page is loaded.\n # Select all cards, filter by the chosen deck, put in random order and then \n # slice off the number of cards that appear in the spread\n random_cards = Card.objects.all().filter(deck=deck_id).order_by('?')[:num_positions]\n reversal_odds = [False, False, False, False, False, False, False, True, True, True]\n \n reading = []\n for card in random_cards:\n reading += [{'card': card,\n 'reversed': choice(reversal_odds)}]\n \n # Put together the card object, position object, layout coordinates for display in the template\n # and generate a save string for the thrown cards.\n card_list = []\n saved_card_list = []\n for (thrown_card, position, coordinates) in zip(reading, positions, layout['coordinates']):\n \n card_list += [(position, thrown_card, coordinates)]\n saved_card_list += [\"%d.%d\" % (thrown_card['card'].tarot_index, \n int(thrown_card['reversed']))]\n \n # Build the string to re-create this reading. \n save_string = (',').join(saved_card_list)\n \n # Lists for use in the navigation menu\n deck_list = Deck.objects.values('id', 'name').order_by('name')\n spread_list = Spread.objects.values('id', 'title').order_by('title')\n \n deck_options = {}\n # Check if there is a default deck stored in the current session\n if 'deck' in request.session:\n deck_options['session_deck_id'] = request.session['deck']\n else:\n deck_options['session_deck_id'] = '1'\n \n deck_options['session_deck_name'] = Deck.objects.get(id=deck_options['session_deck_id']).name\n deck_options['display_deck_id'] = deck_id\n deck_options['display_deck_name'] = deck_name\n \n context = {'spread': spread,\n 'card_list': card_list,\n 'save_string': save_string,\n 'layout': layout['sizes'],\n 'deck_options': deck_options,\n 'deck_list': deck_list,\n 'spread_list': spread_list,} \n \n return render_to_response('diyTarot/reading.html',\n context_instance=RequestContext(request, context))\n \ndef update_reading_settings(request, spread_id):\n \"\"\" This is the view that sets the persistent settings for readings: which deck to use\n and whether to enable card reversals. It is invoked when the user updates their reading\n settings. \"\"\"\n \n # Get the form data, see if it's valid, and if needed update\n # the session variables.\n try:\n deck_id = int(request.GET.get('deck', 1))\n except ValueError:\n deck_id = 1\n \n # Check if the deck is a valid deck in the system\n deck = Deck.objects.filter(id=deck_id)\n if deck.count() > 0:\n request.session['deck'] = deck_id\n\n # Then just invoke the reading view\n target = \"/diytarot/reading/%s/%s/\" % (spread_id, deck_id)\n return redirect(target)\n\ndef random_reading(request):\n \"\"\" This is a view to get you directly to a tarot reading without browsing through\n the various spreads. Then, you can change the reading settings via the sidebar. \"\"\"\n \n deck_id_list = Deck.objects.values_list('id')\n deck_id = int(choice(deck_id_list)[0])\n \n spread_id_list = Spread.objects.values_list('id')\n spread_id = int(choice(spread_id_list)[0])\n \n # Then just invoke the reading view\n target = \"/diytarot/reading/%s/%s/\" % (spread_id, deck_id)\n return redirect(target)\n\ndef two_cards_exercise(request):\n \"\"\" This is a view for the game/exercise Two Cards, a Question, and a Sentence, which\n chooses two random cards and a random question and asks the user to make a \n one-sentence interpretation. \"\"\"\n \n \n \n context = {'deck_options': None,\n 'deck_list': deck_list,\n 'card_list': card_list,} \n \n return render_to_response('diyTarot/reading.html',\n context_instance=RequestContext(request, context))\n \n\n\n ", "repo_name": "blakecsutton/diyTarot", "sub_path": "views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 21093, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 10, "dataset": "github-code", "pt": "76", "api": [{"api_name": "models.Deck.objects.all", "line_number": 16, "usage_type": "call"}, {"api_name": "models.Deck.objects", "line_number": 16, "usage_type": "attribute"}, {"api_name": "models.Deck", "line_number": 16, "usage_type": "name"}, {"api_name": "models.Suit.objects.filter", "line_number": 22, "usage_type": "call"}, {"api_name": "models.Suit.objects", "line_number": 22, "usage_type": "attribute"}, {"api_name": "models.Suit", "line_number": 22, "usage_type": "name"}, {"api_name": "django.core.paginator.Paginator", "line_number": 25, "usage_type": "call"}, {"api_name": "django.shortcuts.render_to_response", "line_number": 41, "usage_type": "call"}, {"api_name": "django.template.RequestContext", "line_number": 42, "usage_type": "call"}, {"api_name": "models.Spread.objects.annotate", "line_number": 59, "usage_type": "call"}, {"api_name": "models.Spread.objects", "line_number": 59, "usage_type": "attribute"}, {"api_name": "models.Spread", "line_number": 59, "usage_type": "name"}, {"api_name": "django.db.models.Count", "line_number": 59, "usage_type": "call"}, {"api_name": "django.core.paginator.Paginator", "line_number": 62, "usage_type": "call"}, {"api_name": "django.db.models.Q", "line_number": 69, "usage_type": "call"}, {"api_name": "models.Spread.objects.filter", "line_number": 70, "usage_type": "call"}, {"api_name": "models.Spread.objects", "line_number": 70, "usage_type": "attribute"}, {"api_name": "models.Spread", "line_number": 70, "usage_type": "name"}, {"api_name": "django.shortcuts.render_to_response", "line_number": 86, "usage_type": "call"}, {"api_name": "django.template.RequestContext", "line_number": 87, "usage_type": "call"}, {"api_name": "models.Meaning.objects.filter", "line_number": 120, "usage_type": "call"}, {"api_name": "models.Meaning.objects", "line_number": 120, "usage_type": "attribute"}, {"api_name": "models.Meaning", "line_number": 120, "usage_type": "name"}, {"api_name": "django.db.models.Q", "line_number": 134, "usage_type": "call"}, {"api_name": "models.MinorArcana.objects.filter", "line_number": 140, "usage_type": "call"}, {"api_name": "models.MinorArcana.objects", "line_number": 140, "usage_type": "attribute"}, {"api_name": "models.MinorArcana", "line_number": 140, "usage_type": "name"}, {"api_name": "models.Deck.objects.values", "line_number": 148, "usage_type": "call"}, {"api_name": "models.Deck.objects", "line_number": 148, "usage_type": "attribute"}, {"api_name": "models.Deck", "line_number": 148, "usage_type": "name"}, {"api_name": "models.Suit.objects.filter", "line_number": 149, "usage_type": "call"}, {"api_name": "models.Suit.objects", "line_number": 149, "usage_type": "attribute"}, {"api_name": "models.Suit", "line_number": 149, "usage_type": "name"}, {"api_name": "django.core.paginator.Paginator", "line_number": 152, "usage_type": "call"}, {"api_name": "django.shortcuts.render_to_response", "line_number": 161, "usage_type": "call"}, {"api_name": "django.template.RequestContext", "line_number": 162, "usage_type": "call"}, {"api_name": "models.Deck.objects.get", "line_number": 170, "usage_type": "call"}, {"api_name": "models.Deck.objects", "line_number": 170, "usage_type": "attribute"}, {"api_name": "models.Deck", "line_number": 170, "usage_type": "name"}, {"api_name": "models.Deck.DoesNotExist", "line_number": 171, "usage_type": "attribute"}, {"api_name": "models.Deck", "line_number": 171, "usage_type": "name"}, {"api_name": "models.MinorArcana.objects.filter", "line_number": 189, "usage_type": "call"}, {"api_name": "models.MinorArcana.objects", "line_number": 189, "usage_type": "attribute"}, {"api_name": "models.MinorArcana", "line_number": 189, "usage_type": "name"}, {"api_name": "models.Suit.objects.filter", "line_number": 197, "usage_type": "call"}, {"api_name": "models.Suit.objects", "line_number": 197, "usage_type": "attribute"}, {"api_name": "models.Suit", "line_number": 197, "usage_type": "name"}, {"api_name": "django.core.paginator.Paginator", "line_number": 200, "usage_type": "call"}, {"api_name": "django.shortcuts.render_to_response", "line_number": 209, "usage_type": "call"}, {"api_name": "django.template.RequestContext", "line_number": 210, "usage_type": "call"}, {"api_name": "random.choice", "line_number": 218, "usage_type": "call"}, {"api_name": "models.Deck.DoesNotExist", "line_number": 232, "usage_type": "attribute"}, {"api_name": "models.Deck", "line_number": 232, "usage_type": "name"}, {"api_name": "models.Meaning.objects.get", "line_number": 238, "usage_type": "call"}, {"api_name": "models.Meaning.objects", "line_number": 238, "usage_type": "attribute"}, {"api_name": "models.Meaning", "line_number": 238, "usage_type": "name"}, {"api_name": "models.Meaning.DoesNotExist", "line_number": 239, "usage_type": "attribute"}, {"api_name": "models.Meaning", "line_number": 239, "usage_type": "name"}, {"api_name": "models.Meaning", "line_number": 240, "usage_type": "call"}, {"api_name": "models.MajorArcana.objects.filter", "line_number": 246, "usage_type": "call"}, {"api_name": "models.MajorArcana.objects", "line_number": 246, "usage_type": "attribute"}, {"api_name": "models.MajorArcana", "line_number": 246, "usage_type": "name"}, {"api_name": "models.MinorArcana.objects.filter", "line_number": 251, "usage_type": "call"}, {"api_name": "models.MinorArcana.objects", "line_number": 251, "usage_type": "attribute"}, {"api_name": "models.MinorArcana", "line_number": 251, "usage_type": "name"}, {"api_name": "models.Suit.objects.filter", "line_number": 258, "usage_type": "call"}, {"api_name": "models.Suit.objects", "line_number": 258, "usage_type": "attribute"}, {"api_name": "models.Suit", "line_number": 258, "usage_type": "name"}, {"api_name": "models.MinorArcana.objects.filter", "line_number": 260, "usage_type": "call"}, {"api_name": "models.MinorArcana.objects", "line_number": 260, "usage_type": "attribute"}, {"api_name": "models.MinorArcana", "line_number": 260, "usage_type": "name"}, {"api_name": "django.shortcuts.render_to_response", "line_number": 277, "usage_type": "call"}, {"api_name": "django.template.RequestContext", "line_number": 278, "usage_type": "call"}, {"api_name": "models.Meaning.objects.filter", "line_number": 287, "usage_type": "call"}, {"api_name": "models.Meaning.objects", "line_number": 287, "usage_type": "attribute"}, {"api_name": "models.Meaning", "line_number": 287, "usage_type": "name"}, {"api_name": "django.core.paginator.Paginator", "line_number": 303, "usage_type": "call"}, {"api_name": "models.MajorArcana.objects.filter", "line_number": 314, "usage_type": "call"}, {"api_name": "models.MajorArcana.objects", "line_number": 314, "usage_type": "attribute"}, {"api_name": "models.MajorArcana", "line_number": 314, "usage_type": "name"}, {"api_name": "models.MinorArcana.objects.filter", "line_number": 319, "usage_type": "call"}, {"api_name": "models.MinorArcana.objects", "line_number": 319, "usage_type": "attribute"}, {"api_name": "models.MinorArcana", "line_number": 319, "usage_type": "name"}, {"api_name": "models.Suit.objects.filter", "line_number": 326, "usage_type": "call"}, {"api_name": "models.Suit.objects", "line_number": 326, "usage_type": "attribute"}, {"api_name": "models.Suit", "line_number": 326, "usage_type": "name"}, {"api_name": "models.MinorArcana.objects.filter", "line_number": 328, "usage_type": "call"}, {"api_name": "models.MinorArcana.objects", "line_number": 328, "usage_type": "attribute"}, {"api_name": "models.MinorArcana", "line_number": 328, "usage_type": "name"}, {"api_name": "django.shortcuts.render_to_response", "line_number": 347, "usage_type": "call"}, {"api_name": "django.template.RequestContext", "line_number": 348, "usage_type": "call"}, {"api_name": "models.Spread.objects.get", "line_number": 357, "usage_type": "call"}, {"api_name": "models.Spread.objects", "line_number": 357, "usage_type": "attribute"}, {"api_name": "models.Spread", "line_number": 357, "usage_type": "name"}, {"api_name": "models.Spread.DoesNotExist", "line_number": 358, "usage_type": "attribute"}, {"api_name": "models.Spread", "line_number": 358, "usage_type": "name"}, {"api_name": "models.Deck.objects.get", "line_number": 362, "usage_type": "call"}, {"api_name": "models.Deck.objects", "line_number": 362, "usage_type": "attribute"}, {"api_name": "models.Deck", "line_number": 362, "usage_type": "name"}, {"api_name": "models.Deck.DoesNotExist", "line_number": 364, "usage_type": "attribute"}, {"api_name": "models.Deck", "line_number": 364, "usage_type": "name"}, {"api_name": "models.CardPosition.objects.filter", "line_number": 368, "usage_type": "call"}, {"api_name": "models.CardPosition.objects", "line_number": 368, "usage_type": "attribute"}, {"api_name": "models.CardPosition", "line_number": 368, "usage_type": "name"}, {"api_name": "django.shortcuts.render_to_response", "line_number": 388, "usage_type": "call"}, {"api_name": "random.choice", "line_number": 402, "usage_type": "call"}, {"api_name": "models.Deck.objects.values", "line_number": 418, "usage_type": "call"}, {"api_name": "models.Deck.objects", "line_number": 418, "usage_type": "attribute"}, {"api_name": "models.Deck", "line_number": 418, "usage_type": "name"}, {"api_name": "models.Spread.objects.values", "line_number": 419, "usage_type": "call"}, {"api_name": "models.Spread.objects", "line_number": 419, "usage_type": "attribute"}, {"api_name": "models.Spread", "line_number": 419, "usage_type": "name"}, {"api_name": "models.Deck.objects.get", "line_number": 428, "usage_type": "call"}, {"api_name": "models.Deck.objects", "line_number": 428, "usage_type": "attribute"}, {"api_name": "models.Deck", "line_number": 428, "usage_type": "name"}, {"api_name": "django.shortcuts.render_to_response", "line_number": 440, "usage_type": "call"}, {"api_name": "django.template.RequestContext", "line_number": 441, "usage_type": "call"}, {"api_name": "models.Deck.objects.filter", "line_number": 456, "usage_type": "call"}, {"api_name": "models.Deck.objects", "line_number": 456, "usage_type": "attribute"}, {"api_name": "models.Deck", "line_number": 456, "usage_type": "name"}, {"api_name": "django.shortcuts.redirect", "line_number": 462, "usage_type": "call"}, {"api_name": "models.Deck.objects.values_list", "line_number": 468, "usage_type": "call"}, {"api_name": "models.Deck.objects", "line_number": 468, "usage_type": "attribute"}, {"api_name": "models.Deck", "line_number": 468, "usage_type": "name"}, {"api_name": "random.choice", "line_number": 469, "usage_type": "call"}, {"api_name": "models.Spread.objects.values_list", "line_number": 471, "usage_type": "call"}, {"api_name": "models.Spread.objects", "line_number": 471, "usage_type": "attribute"}, {"api_name": "models.Spread", "line_number": 471, "usage_type": "name"}, {"api_name": "random.choice", "line_number": 472, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 476, "usage_type": "call"}, {"api_name": "django.shortcuts.render_to_response", "line_number": 489, "usage_type": "call"}, {"api_name": "django.template.RequestContext", "line_number": 490, "usage_type": "call"}]}
+{"seq_id": "39862057484", "text": "import setuptools\n\nwith open(\"README.md\", \"r\", encoding=\"utf-8\") as fh:\n long_description = fh.read()\n\nsetuptools.setup(\n name=\"size_effect_normalization\",\n version=\"1.0\",\n author=\"Mathias Gotsmy\",\n author_email=\"mathias.gotsmy@univie.ac.at\",\n description=\"A python package for size effect normalization in time series metabolome data sets.\",\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n url=\"https://github.com/Gotsmy/sweat_normalization\",\n classifiers=[\n \"Programming Language :: Python :: 3\",\n \"License :: OSI Approved :: GNUv3 License\",\n \"Operating System :: OS Independent\",\n ],\n package_dir={\"\": \"src\"},\n packages=setuptools.find_packages(where=\"src\"),\n python_requires=\">=3.7,<3.8\",\n include_package_data=True,\n package_data={'': ['data/*.csv']},\n install_requires=['importlib-metadata==3.7.3',\n 'matplotlib==3.3.4',\n 'numpy==1.20.1',\n 'pandas==1.3.0',\n 'pickleshare==0.7.5',\n 'scipy==1.7.0',\n 'tqdm==4.50.0',]\n)\n", "repo_name": "Gotsmy/sweat_normalization", "sub_path": "setup.py", "file_name": "setup.py", "file_ext": "py", "file_size_in_byte": 1158, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "76", "api": [{"api_name": "setuptools.setup", "line_number": 6, "usage_type": "call"}, {"api_name": "setuptools.find_packages", "line_number": 21, "usage_type": "call"}]}
+{"seq_id": "20077166727", "text": "from cgi import test\nfrom selenium import webdriver\nfrom time import sleep\nclass TestEven:\n def test(self):\n self.driver = webdriver.Chrome()\n url = 'http://www.baidu.com'\n self.driver.get(url)\n self.driver.maximize_window()\n self.driver.implicitly_wait(30)\n input_x = self.driver.find_element_by_id('su')\n input_x.send_keys('测试---软件测试')\n sleep(2)\n even_x = self.driver.find_element_by_id('kw')\n even_x.click()\n sleep(2)\n print(input_x)\n print(even_x)\n self.driver.quit()\n return input_x,even_x\n \n \n# if __name__ == '__main__':\n # test()\n\n# test1 = TestEven()\n", "repo_name": "Elt-wlj/learn", "sub_path": "LogTest/login.py", "file_name": "login.py", "file_ext": "py", "file_size_in_byte": 697, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 2, "dataset": "github-code", "pt": "76", "api": [{"api_name": "selenium.webdriver.Chrome", "line_number": 6, "usage_type": "call"}, {"api_name": "selenium.webdriver", "line_number": 6, "usage_type": "name"}, {"api_name": "time.sleep", "line_number": 13, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 16, "usage_type": "call"}]}
+{"seq_id": "73600944245", "text": "import discord\nimport scraper\nimport re\n\nclass MyClient(discord.Client):\n async def on_ready(self):\n print(\"Rastley is online!\")\n \n # check for rick roll in a message\n async def on_message(self, message):\n # return if message author is a bot\n if message.author.bot: return\n\n # check if there's a link in the message using regex and store all links in urls\n urls = re.findall(\"(?Phttps?://[^\\s]+)\", message.content)\n\n # check each url in urls for a rick roll\n for url in urls:\n if scraper.searchForRick(url):\n await message.channel.send(\"<@\" + str(message.author.id) + \"> https://youtu.be/Ux0YNqhaw0I\")\n\n\n\ntoken = input(\"What is the bot token? \")\nclient = MyClient()\nclient.run(token)", "repo_name": "QuoteNat/rastley.py", "sub_path": "rastley.py", "file_name": "rastley.py", "file_ext": "py", "file_size_in_byte": 776, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "76", "api": [{"api_name": "discord.Client", "line_number": 5, "usage_type": "attribute"}, {"api_name": "re.findall", "line_number": 15, "usage_type": "call"}, {"api_name": "scraper.searchForRick", "line_number": 19, "usage_type": "call"}]}
+{"seq_id": "38189022740", "text": "## Imports\nfrom __future__ import print_function\nimport sys\nfrom pyspark import SparkConf, SparkContext\nfrom pyspark.sql import SparkSession\n\nimport math\n \n## Module Constants\nAPP_NAME = \"My Spark Application\"\n \n## Closure Functions\ndef stdDev(sumX, sumSquared, n):\n mean = sumX / n\n stdDeviation = math.sqrt((sumSquared - n*mean*mean) / n)\n return(mean, stdDeviation)\n\n## Main functionalitya\n \ndef main(sc):\n pass\n \nif __name__ == \"__main__\":\n \t# create an instance of a SparkSession as spark\n spark = SparkSession.builder.appName(\"wordcount\").getOrCreate()\n\n # create SparkContext as sc\n sc = spark.sparkContext\n\n data = [(\"A\", 2.), (\"A\", 4.), (\"A\", 9.), (\"B\", 10.), (\"B\", 20.), (\"Z\", 3.), (\"Z\", 5.), (\"Z\", 8.), (\"Z\", 12.)]\n\n rdd = sc.parallelize(data)\n print(rdd.collect())\n print(rdd.count())\n\n # mean and standard deviation\n sumCount = rdd.combineByKey(lambda value: (value, value*value, 1),\n lambda x, value: (x[0] + value, x[1] + value*value, x[2]+1),\n lambda x, y: (x[0] + y[0], x[1] + y[1], x[2]+y[2])\n )\n print(sumCount.collect())\n\n meanAndStdDev = sumCount.mapValues(lambda x: stdDev(x[0], x[1], x[2]))\n print(meanAndStdDev.collect())\n\n # done!\n spark.stop()\n", "repo_name": "laylalaisy/LearningNote_Spark", "sub_path": "9_combine_by_key/combineByKey2.py", "file_name": "combineByKey2.py", "file_ext": "py", "file_size_in_byte": 1306, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "76", "api": [{"api_name": "math.sqrt", "line_number": 15, "usage_type": "call"}, {"api_name": "pyspark.sql.SparkSession.builder.appName", "line_number": 25, "usage_type": "call"}, {"api_name": "pyspark.sql.SparkSession.builder", "line_number": 25, "usage_type": "attribute"}, {"api_name": "pyspark.sql.SparkSession", "line_number": 25, "usage_type": "name"}]}
+{"seq_id": "39731821134", "text": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\n__author__ = \"ipetrash\"\n\n\nfrom pathlib import Path\n\n\nDIR: Path = Path(__file__).resolve().parent\n\nDIR_LOGS: Path = DIR / \"logs\"\nDIR_LOGS.mkdir(parents=True, exist_ok=True)\n\nPORT_WEB: int = 12000\n", "repo_name": "gil9red/get_metal_rates", "sub_path": "app_web_server/config.py", "file_name": "config.py", "file_ext": "py", "file_size_in_byte": 243, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "76", "api": [{"api_name": "pathlib.Path", "line_number": 10, "usage_type": "name"}, {"api_name": "pathlib.Path", "line_number": 12, "usage_type": "name"}]}
+{"seq_id": "28160640799", "text": "from sdPy.functionDefinitions import make2d\nfrom UI.objectsTable import Ui_MainWindow as objectsTableUI\nfrom PySide2.QtWidgets import QGraphicsItem, QMainWindow,QHeaderView, QTableWidgetItem,QComboBox\nfrom numpy import array\nimport re\nfrom sdPy.segmentMethods import segPlotData2\nfrom sdPy.loadMethods import loadPlotData2, loadPlotData4\nfrom sdPy.supportMethods import supportPlotData2\nfrom PySide2.QtGui import QPainterPath, QPen,QColor\nfrom PySide2.QtCore import Qt,QPointF\nsegmentColumns={\n 0:'Name',\n 1:'P1',\n 2:'P3',\n 3:'P2',\n 4:'youngsModulus',\n 5:'shearModulus',\n 6:'area',\n 7:'I',\n 8:'shapeFactor',\n 9:'density',\n 10:'alpha',\n 11:'type'}\nloadColumns={\n 0:'Name',\n 1:'degree',\n 2:'peak',\n 3:'parentSegment',\n 4:'P1',\n 5:'P3',\n 6:'normal',\n }\nsupportColumns={\n 0:'Name',\n 1:'type',\n 2:'location',\n 3:'normal',\n 4:'settlement'\n }\ncolumnTypes={0:segmentColumns,1:loadColumns,2:supportColumns}\ntableClass={0:'segment',1:'load',2:'support'}\neditableitems=['P1','P2','P3','peak','normal','location','degree','type','parentSegment']\n\ndef editObjectsTable(self):\n pass\n\ndef addDataToTable(self,tableIndex,name,values):\n table=self.objectTables[tableIndex]\n rows = table.rowCount()\n columns = table.columnCount()\n table.insertRow(rows)\n table.setItem(rows,0,QTableWidgetItem(name))\n columnHeaders=columnTypes[tableIndex]\n for i in range(1,columns):\n table.setItem(rows,i,QTableWidgetItem(str(values[columnHeaders[i]])))\n if tableIndex==1:\n combo=QComboBox()\n combo.addItems(reversed(list(self.df[(self.df['Class']=='segment')&(self.df['Flag']==True)]['Name'])))\n combo.currentIndexChanged.connect(lambda:editObjectsTable(self))\n combo.setCurrentText(self.df[(self.df['Graphitem']==self.parent)]['Name'].iloc[0])\n table.setCellWidget(rows,3,combo) \n\n\ndef editMembers(self,tab): \n try:\n currentItem=self.objectTables[tab].currentItem()\n self.graphicsView.setFocus()\n if currentItem:\n row=currentItem.row()\n column=currentItem.column()\n currentText=currentItem.text()\n columnName=columnTypes[tab][column]\n data=self.df[(self.df['Class']==tableClass[tab]) & (self.df['Flag']==True)].iloc[row,2]\n index= self.df[(self.df['Class']==tableClass[tab]) & (self.df['Flag']==True)].index[0]\n\n if columnName not in['Name','parentSegment']:\n if columnName in ['P1','P2','P3','normal','location','settlement']: \n currentText=re.sub('[\\[\\]]','',currentText)\n currentText=re.split(',| ',currentText)\n while '' in currentText:currentText.remove('')\n if len(currentText)==2:\n currentText=array(currentText,dtype=float) \n else: \n if columnName=='normal':\n currentText=array([0]+[currentText[0]],dtype=float)\n else:\n currentText=array([currentText[0]]+[0],dtype=float) \n\n elif columnName=='type':\n currentText=currentText.capitalize()\n else:\n currentText=float(currentText)\n else:\n if currentText not in list(self.df['Name']):\n self.df.iat[index,0]=currentText\n else:\n raise NameError \n if columnName=='parentSegment':\n combo=self.ee.loadsTable.cellWidget(row,column) \n currentText=combo.currentText()\n data[columnName]=self.df[self.df['Name']==currentText].iloc[0]['Robject']\n data=make2d(list(data.values()))\n self.ee.loadsTable.setItem(row,4,QTableWidgetItem(data['P1']))\n self.ee.loadsTable.setItem(row,5,QTableWidgetItem(data['P3']))\n self.ee.loadsTable.setItem(row,6,QTableWidgetItem(data['normal']))\n self.ee.loadsTable.setItem(row,2,QTableWidgetItem(data['peak']))\n else:\n # prevData=data[columnName]\n data[columnName]=currentText\n data=make2d(list(data.values()))\n self.df.iat[index,2]=data\n\n print(row,column)\n if str(self.prevData)==str(data[columnName]):\n return\n self.prevData=data[columnName]\n \n self.objectTables[tab].setItem(\n row,column,QTableWidgetItem(str(currentText))\n )\n if columnName in editableitems:\n if tab==0:\n Rsegment = data \n data=segPlotData2(Rsegment['type'],self.rrts(Rsegment['P1']),\n self.rrts(Rsegment['P3']),scale=1,P2=self.rrts(Rsegment['P2']),no=self.NoOfPointsInCurvedSegments)\n pen=self.pen\n\n elif tab==2:\n Rsupport=data\n data=supportPlotData2(Rsupport['type'] , self.rrts(Rsupport['location']),1/(self.scale*50),Rsupport['normal'])\n color=self.supportColors[Rsupport['type']]\n self.supportPen=QPen(QColor(*color),1.5)\n pen=self.supportPen\n\n else:\n Rload=data\n ps=data['parentSegment']\n pen=self.loadPen\n\n if Rload['degree']> -3:\n data=loadPlotData2(self.rrts(Rload['P1']),self.rrts(Rload['P3']),self.rrts(ps['P1']),self.rrts(ps['P3']),\n self.rrts(ps['P2']), Rload['degree'],self.rrts(Rload['peak']),Rload['normal'],ps['type'],self.scale,self.loadLogScale)\n else:\n data=loadPlotData4(ps,Rload['degree'],self.rrts(Rload['peak']))\n\n\n\n rect = QPainterPath(QPointF(data[0][0],data[0][1]))\n for i in range(1,len(data),1):\n rect.lineTo(QPointF(data[i][0],data[i][1]))\n rect=self.scene.addPath(rect,pen) \n rect.setFlag(QGraphicsItem.ItemIsSelectable)\n\n self.df.iloc[index,3].hide()\n self.df.iat[index,3]=self.scene.items()[0] \n\n except NameError:\n self.statusbar.showMessage('Enter a unique name',5000)\n except:\n import traceback\n traceback.print_exc()\n self.statusbar.showMessage('Enter a valid data',5000)\ndef editLoads(self):\n pass\n\ndef editSupports(self):\n pass\n\n\n\ndef objectsTable(self):\n self.eE=QMainWindow(parent=self.MainWindow)\n self.ee=objectsTableUI()\n self.ee.setupUi(self.eE)\n self.prevData=None\n self.objectTables={\n 0:self.ee.segmentsTable,\n 1:self.ee.loadsTable,\n 2:self.ee.supportsTable\n }\n [self.objectTables[i].horizontalHeader().setSectionResizeMode(QHeaderView.Stretch) for i in [0,1,2]]\n self.ee.segmentsTable.itemChanged.connect(lambda:editMembers(self,0))\n self.ee.loadsTable.itemChanged.connect(lambda:editMembers(self,1))\n self.ee.supportsTable.itemChanged.connect(lambda:editMembers(self,2))\n self.eE.show()", "repo_name": "samrachana/Samrachana-Araniko", "sub_path": "src/structure2d/objectsTable.py", "file_name": "objectsTable.py", "file_ext": "py", "file_size_in_byte": 7311, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "76", "api": [{"api_name": "PySide2.QtWidgets.QTableWidgetItem", "line_number": 52, "usage_type": "call"}, {"api_name": "PySide2.QtWidgets.QTableWidgetItem", "line_number": 55, "usage_type": "call"}, {"api_name": "PySide2.QtWidgets.QComboBox", "line_number": 57, "usage_type": "call"}, {"api_name": "re.sub", "line_number": 78, "usage_type": "call"}, {"api_name": "re.split", "line_number": 79, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 82, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 85, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 87, "usage_type": "call"}, {"api_name": "sdPy.functionDefinitions.make2d", "line_number": 102, "usage_type": "call"}, {"api_name": "PySide2.QtWidgets.QTableWidgetItem", "line_number": 103, "usage_type": "call"}, {"api_name": "PySide2.QtWidgets.QTableWidgetItem", "line_number": 104, "usage_type": "call"}, {"api_name": "PySide2.QtWidgets.QTableWidgetItem", "line_number": 105, "usage_type": "call"}, {"api_name": "PySide2.QtWidgets.QTableWidgetItem", "line_number": 106, "usage_type": "call"}, {"api_name": "sdPy.functionDefinitions.make2d", "line_number": 110, "usage_type": "call"}, {"api_name": "PySide2.QtWidgets.QTableWidgetItem", "line_number": 119, "usage_type": "call"}, {"api_name": "sdPy.segmentMethods.segPlotData2", "line_number": 124, "usage_type": "call"}, {"api_name": "sdPy.supportMethods.supportPlotData2", "line_number": 130, "usage_type": "call"}, {"api_name": "PySide2.QtGui.QPen", "line_number": 132, "usage_type": "call"}, {"api_name": "PySide2.QtGui.QColor", "line_number": 132, "usage_type": "call"}, {"api_name": "sdPy.loadMethods.loadPlotData2", "line_number": 141, "usage_type": "call"}, {"api_name": "sdPy.loadMethods.loadPlotData4", "line_number": 144, "usage_type": "call"}, {"api_name": "PySide2.QtGui.QPainterPath", "line_number": 148, "usage_type": "call"}, {"api_name": "PySide2.QtCore.QPointF", "line_number": 148, "usage_type": "call"}, {"api_name": "PySide2.QtCore.QPointF", "line_number": 150, "usage_type": "call"}, {"api_name": "PySide2.QtWidgets.QGraphicsItem.ItemIsSelectable", "line_number": 152, "usage_type": "attribute"}, {"api_name": "PySide2.QtWidgets.QGraphicsItem", "line_number": 152, "usage_type": "name"}, {"api_name": "traceback.print_exc", "line_number": 161, "usage_type": "call"}, {"api_name": "PySide2.QtWidgets.QMainWindow", "line_number": 172, "usage_type": "call"}, {"api_name": "UI.objectsTable.Ui_MainWindow", "line_number": 173, "usage_type": "call"}, {"api_name": "PySide2.QtWidgets.QHeaderView.Stretch", "line_number": 181, "usage_type": "attribute"}, {"api_name": "PySide2.QtWidgets.QHeaderView", "line_number": 181, "usage_type": "name"}]}
+{"seq_id": "20140645210", "text": "from datetime import datetime as dt\n\nfrom common.logger import get_logger\nfrom orchestrator.config import ORDER_EXPIRATION_THRESHOLD_IN_MINUTES\nfrom orchestrator.order_status import OrderStatus\n\nlogger = get_logger(__name__)\n\n\nclass TransactionHistoryDAO:\n def __init__(self, repo):\n self.__repo = repo\n\n def insert_transaction_history(self, obj_transaction_history):\n transaction_history = obj_transaction_history.get_transaction_history()\n query_response = self.__repo.execute(\n \"INSERT INTO transaction_history (username, order_id, order_type, status, payment_id, payment_method, \"\n \"raw_payment_data, transaction_hash, row_created, row_updated)\"\n \"VALUES(%s, %s, %s, %s, %s, %s, %s, %s, %s, %s) \"\n \"ON DUPLICATE KEY UPDATE payment_id = %s, payment_method = %s, raw_payment_data = %s, transaction_hash = %s, row_updated = %s\",\n [\n transaction_history[\"username\"],\n transaction_history[\"order_id\"],\n transaction_history[\"order_type\"],\n transaction_history[\"status\"],\n transaction_history[\"payment_id\"],\n transaction_history[\"payment_method\"],\n transaction_history[\"raw_payment_data\"],\n transaction_history[\"transaction_hash\"],\n dt.utcnow(),\n dt.utcnow(),\n transaction_history[\"payment_id\"],\n transaction_history[\"payment_method\"],\n transaction_history[\"raw_payment_data\"],\n transaction_history[\"transaction_hash\"],\n dt.utcnow()\n ]\n )\n if query_response[0] == 1:\n return True\n return False\n\n def get_order_id_for_expired_transaction(self):\n params = [OrderStatus.PAYMENT_INITIATED.value, OrderStatus.PAYMENT_INITIATION_FAILED.value,\n OrderStatus.PAYMENT_EXECUTION_FAILED.value, ORDER_EXPIRATION_THRESHOLD_IN_MINUTES]\n order_id_raw_data = self.__repo.execute(\n \"SELECT order_id FROM transaction_history WHERE status IN (%s, %s, %s) AND \"\n \"TIMESTAMPDIFF(MINUTE, row_created, NOW()) > %s \",\n [OrderStatus.PAYMENT_INITIATED.value, OrderStatus.PAYMENT_INITIATION_FAILED.value,\n OrderStatus.PAYMENT_EXECUTION_FAILED.value, ORDER_EXPIRATION_THRESHOLD_IN_MINUTES])\n list_of_order_id = [rec[\"order_id\"] for rec in order_id_raw_data]\n return list_of_order_id\n\n def update_transaction_status(self, list_of_order_id, status):\n if len(list_of_order_id) == 0:\n return \"No order id found\"\n temp_holder = (\"%s, \" * len(list_of_order_id))[:-2]\n params = [status] + list_of_order_id + [OrderStatus.PAYMENT_INITIATED.value,\n OrderStatus.PAYMENT_INITIATION_FAILED.value,\n OrderStatus.PAYMENT_EXECUTION_FAILED.value]\n update_transaction_status_response = self.__repo.execute(\n \"UPDATE transaction_history SET status = %s WHERE order_id IN (\" + temp_holder + \") AND status IN (%s, %s, %s)\",\n params)\n logger.info(f\"update_transaction_status: {update_transaction_status_response}\")\n return update_transaction_status_response\n\n def get_transaction_details_for_given_order_id(self, order_id):\n transaction_data = self.__repo.execute(\n \"SELECT username, order_id, order_type, status, payment_id, payment_type, payment_method, raw_payment_data, \"\n \"transaction_hash FROM transaction_history WHERE order_id = %s\", [order_id])\n if len(transaction_data) == 0:\n raise Exception(\"Order Id does not exist.\")\n return transaction_data[0]\n", "repo_name": "singnet/snet-marketplace-service", "sub_path": "orchestrator/dao/transaction_history_dao.py", "file_name": "transaction_history_dao.py", "file_ext": "py", "file_size_in_byte": 3777, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 15, "dataset": "github-code", "pt": "76", "api": [{"api_name": "common.logger.get_logger", "line_number": 7, "usage_type": "call"}, {"api_name": "datetime.datetime.utcnow", "line_number": 30, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 30, "usage_type": "name"}, {"api_name": "datetime.datetime.utcnow", "line_number": 31, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 31, "usage_type": "name"}, {"api_name": "datetime.datetime.utcnow", "line_number": 36, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 36, "usage_type": "name"}, {"api_name": "orchestrator.order_status.OrderStatus.PAYMENT_INITIATED", "line_number": 44, "usage_type": "attribute"}, {"api_name": "orchestrator.order_status.OrderStatus", "line_number": 44, "usage_type": "name"}, {"api_name": "orchestrator.order_status.OrderStatus.PAYMENT_INITIATION_FAILED", "line_number": 44, "usage_type": "attribute"}, {"api_name": "orchestrator.order_status.OrderStatus.PAYMENT_EXECUTION_FAILED", "line_number": 45, "usage_type": "attribute"}, {"api_name": "orchestrator.order_status.OrderStatus", "line_number": 45, "usage_type": "name"}, {"api_name": "orchestrator.config.ORDER_EXPIRATION_THRESHOLD_IN_MINUTES", "line_number": 45, "usage_type": "name"}, {"api_name": "orchestrator.order_status.OrderStatus.PAYMENT_INITIATED", "line_number": 49, "usage_type": "attribute"}, {"api_name": "orchestrator.order_status.OrderStatus", "line_number": 49, "usage_type": "name"}, {"api_name": "orchestrator.order_status.OrderStatus.PAYMENT_INITIATION_FAILED", "line_number": 49, "usage_type": "attribute"}, {"api_name": "orchestrator.order_status.OrderStatus.PAYMENT_EXECUTION_FAILED", "line_number": 50, "usage_type": "attribute"}, {"api_name": "orchestrator.order_status.OrderStatus", "line_number": 50, "usage_type": "name"}, {"api_name": "orchestrator.config.ORDER_EXPIRATION_THRESHOLD_IN_MINUTES", "line_number": 50, "usage_type": "name"}, {"api_name": "orchestrator.order_status.OrderStatus.PAYMENT_INITIATED", "line_number": 58, "usage_type": "attribute"}, {"api_name": "orchestrator.order_status.OrderStatus", "line_number": 58, "usage_type": "name"}, {"api_name": "orchestrator.order_status.OrderStatus.PAYMENT_INITIATION_FAILED", "line_number": 59, "usage_type": "attribute"}, {"api_name": "orchestrator.order_status.OrderStatus", "line_number": 59, "usage_type": "name"}, {"api_name": "orchestrator.order_status.OrderStatus.PAYMENT_EXECUTION_FAILED", "line_number": 60, "usage_type": "attribute"}, {"api_name": "orchestrator.order_status.OrderStatus", "line_number": 60, "usage_type": "name"}]}
+{"seq_id": "34255728875", "text": "import logging\nfrom pathlib import Path\n\nimport sys\nfrom fastapi import FastAPI\nfrom loguru import logger\n\nfrom helpers.path_helpers import LOGS_PATH\n\n\ndef init_logging(app: FastAPI):\n formatter = '{level: <8} {time:YYYY-MM-DD HH:mm:ss.SSS} ' \\\n '- {name}:{function} - {message}'\n logger = CustomizeLogger.make_logger(formatter)\n app.logger = logger\n\n\n# See https://medium.com/1mgofficial/how-to-override-uvicorn-logger-in-fastapi-using-loguru-124133cdcd4e\nclass InterceptHandler(logging.Handler):\n loglevel_mapping = {\n 50: 'CRITICAL',\n 40: 'ERROR',\n 30: 'WARNING',\n 20: 'INFO',\n 10: 'DEBUG',\n 0: 'NOTSET',\n }\n\n def emit(self, record):\n try:\n level = logger.level(record.levelname).name\n except AttributeError:\n level = self.loglevel_mapping[record.levelno]\n\n frame, depth = logging.currentframe(), 2\n while frame.f_code.co_filename == logging.__file__:\n frame = frame.f_back\n depth += 1\n\n log = logger.bind(request_id='app')\n log.opt(\n depth=depth,\n exception=record.exc_info\n ).log(level, record.getMessage())\n\n\nclass CustomizeLogger:\n\n @classmethod\n def make_logger(cls, formatter):\n logger = cls.customize_logging(\n filepath=Path(LOGS_PATH, 'app_log.txt'),\n level='INFO',\n retention='1 months',\n rotation='20 days',\n format=formatter\n )\n return logger\n\n @classmethod\n def customize_logging(cls,\n filepath: Path,\n level: str,\n rotation: str,\n retention: str,\n format: str\n ):\n logger.remove()\n logger.add(\n sys.stdout,\n enqueue=True,\n backtrace=True,\n level=level,\n format=format\n )\n logger.add(\n str(filepath),\n rotation=rotation,\n retention=retention,\n enqueue=True,\n backtrace=True,\n level=level,\n format=format\n )\n logging.basicConfig(handlers=[InterceptHandler()], level=0)\n logging.getLogger(\"uvicorn.access\").handlers = [InterceptHandler()]\n for _log in ['uvicorn',\n 'uvicorn.error',\n 'fastapi'\n ]:\n _logger = logging.getLogger(_log)\n _logger.handlers = [InterceptHandler()]\n\n return logger.bind(request_id=None, method=None)\n", "repo_name": "NobisIndustries/GitInsight", "sub_path": "backend/helpers/logging_helpers.py", "file_name": "logging_helpers.py", "file_ext": "py", "file_size_in_byte": 2713, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 4, "dataset": "github-code", "pt": "76", "api": [{"api_name": "fastapi.FastAPI", "line_number": 11, "usage_type": "name"}, {"api_name": "loguru.logger", "line_number": 14, "usage_type": "name"}, {"api_name": "loguru.logger", "line_number": 15, "usage_type": "name"}, {"api_name": "logging.Handler", "line_number": 19, "usage_type": "attribute"}, {"api_name": "loguru.logger.level", "line_number": 31, "usage_type": "call"}, {"api_name": "loguru.logger", "line_number": 31, "usage_type": "name"}, {"api_name": "logging.currentframe", "line_number": 35, "usage_type": "call"}, {"api_name": "logging.__file__", "line_number": 36, "usage_type": "attribute"}, {"api_name": "loguru.logger.bind", "line_number": 40, "usage_type": "call"}, {"api_name": "loguru.logger", "line_number": 40, "usage_type": "name"}, {"api_name": "loguru.logger", "line_number": 51, "usage_type": "name"}, {"api_name": "pathlib.Path", "line_number": 52, "usage_type": "call"}, {"api_name": "helpers.path_helpers.LOGS_PATH", "line_number": 52, "usage_type": "argument"}, {"api_name": "loguru.logger", "line_number": 58, "usage_type": "name"}, {"api_name": "pathlib.Path", "line_number": 62, "usage_type": "name"}, {"api_name": "loguru.logger.remove", "line_number": 68, "usage_type": "call"}, {"api_name": "loguru.logger", "line_number": 68, "usage_type": "name"}, {"api_name": "loguru.logger.add", "line_number": 69, "usage_type": "call"}, {"api_name": "loguru.logger", "line_number": 69, "usage_type": "name"}, {"api_name": "sys.stdout", "line_number": 70, "usage_type": "attribute"}, {"api_name": "loguru.logger.add", "line_number": 76, "usage_type": "call"}, {"api_name": "loguru.logger", "line_number": 76, "usage_type": "name"}, {"api_name": "logging.basicConfig", "line_number": 85, "usage_type": "call"}, {"api_name": "logging.getLogger", "line_number": 86, "usage_type": "call"}, {"api_name": "logging.getLogger", "line_number": 91, "usage_type": "call"}, {"api_name": "loguru.logger.bind", "line_number": 94, "usage_type": "call"}, {"api_name": "loguru.logger", "line_number": 94, "usage_type": "name"}]}
+{"seq_id": "6242539770", "text": "from datetime import datetime\nimport traceback\n\nfrom django.db.models.query import QuerySet\nfrom django.shortcuts import render, redirect\nfrom django_currentuser.middleware import get_current_authenticated_user\nfrom django.contrib import messages\nfrom django.contrib.auth.decorators import login_required\n\nfrom petApp.models import PetModel\nfrom petApp.forms import PetForm\nfrom petCommentApp.forms import PetCommentForm\n\n\n@login_required\ndef index(request):\n list = PetModel.objects.all()\n list = list.order_by(\"created_at\").reverse()\n return render(request, \"pet/index.html\", {\"list\": list})\n\n\n@login_required\ndef new(request):\n if request.method == \"POST\":\n form = PetForm(request.POST, request.FILES)\n if not form.is_valid():\n # clean_post_cord()でバリデーションにかかった場合\n if \"post_cord\" in form.errors:\n messages.error(request, form.errors[\"post_cord\"][0])\n else:\n messages.error(request, \"ご入力の際にエラーが発生しました。管理者にご確認ください\")\n return redirect(\"/pet/new/\")\n\n pet = form.save(commit=False)\n\n now = datetime.now()\n today = now.strftime(\"%Y-%m-%d %H:%M:%S\")\n\n pet.created_at = today\n pet.updated_at = today\n pet.owner = get_current_authenticated_user()\n\n pet.save()\n messages.success(request, \"ペットの登録が完了しました\")\n return redirect(\"/pet/index\")\n\n form = PetForm()\n return render(request, \"pet/new.html\", context={\"form\": form})\n\n\n@login_required\ndef show(request, id):\n pet = get_one_pet(id)\n if pet is None:\n return render(\n request,\n \"pet/index.html\",\n {\"error_message\": \"予期せぬエラーが発生しました\\n管理者にご確認ください\"},\n )\n\n pet_comment_list = pet.petcommentmodel_set.all() # type: ignore\n pet_comment_list = pet_comment_list.order_by(\"created_at\").reverse()\n form = PetCommentForm()\n return render(\n request,\n \"pet/show.html\",\n context={\"pet\": pet, \"pet_comment_list\": pet_comment_list, \"form\": form},\n )\n\n\n@login_required\ndef edit(request, id):\n pet = get_one_pet(id)\n if pet is None:\n return render(\n request,\n \"pet/index.html\",\n {\"error_message\": \"予期せぬエラーが発生しました\\n管理者にご確認ください\"},\n )\n\n if request.method == \"POST\":\n form = PetForm(request.POST, request.FILES)\n if not form.is_valid():\n # clean_post_cord()でバリデーションにかかった場合\n if \"post_cord\" in form.errors:\n messages.error(request, form.errors[\"post_cord\"][0])\n else:\n messages.error(request, \"ご入力の際にエラーが発生しました。管理者にご確認ください\")\n return redirect(\"edit\", id=id)\n\n edit_pet = form.save(commit=False)\n\n # 写真が未選択の場合、更新前の写真を設定\n if len(request.FILES) == 0:\n edit_pet.image = pet.image\n\n now = datetime.now()\n today = now.strftime(\"%Y-%m-%d %H:%M:%S\")\n\n edit_pet.created_at = pet.created_at\n edit_pet.updated_at = today\n edit_pet.owner = get_current_authenticated_user()\n\n edit_pet.save()\n pet.delete() # save()完了後、更新前のデータは削除\n messages.success(request, \"ペットの更新が完了しました\")\n return redirect(\"/pet/index\")\n\n form = PetForm(instance=pet)\n pet_id = pet.id # type: ignore\n # nameから画像添付の有無を確認\n if pet.image.name:\n pet_image_url = pet.image.url\n else:\n pet_image_url = \"/static/image/no-image.png\"\n return render(\n request,\n \"pet/edit.html\",\n context={\"form\": form, \"pet_id\": pet_id, \"pet_image_url\": pet_image_url},\n )\n\n\n@login_required\ndef delete(request, id):\n pet = get_one_pet(id)\n if pet is None:\n return render(\n request,\n \"pet/index.html\",\n {\"error_message\": \"予期せぬエラーが発生しました\\n管理者にご確認ください\"},\n )\n\n try:\n pet.delete()\n return redirect(\"/pet/index\")\n except Exception as e:\n print(traceback.format_exc())\n return render(\n request,\n \"pet/index.html\",\n {\"error_message\": \"予期せぬエラーが発生しました\\n管理者にご確認ください\"},\n )\n\n\n@login_required\ndef search(request):\n if request.method == \"POST\":\n name = request.POST.get(\"name\")\n age = request.POST.get(\"age\")\n sex = request.POST.get(\"sex\")\n charm_point = request.POST.get(\"charm_point\")\n post_cord = request.POST.get(\"post_cord\")\n address = request.POST.get(\"address\")\n owner = request.POST.get(\"owner\")\n\n # 検索実行\n list = PetModel.objects\n if name:\n list = list.filter(name__icontains=name)\n if age:\n list = list.filter(age=age)\n if sex:\n if sex == \"true\":\n list = list.filter(sex=True)\n elif sex == \"false\":\n list = list.filter(sex=False)\n if charm_point:\n list = list.filter(charm_point__icontains=charm_point)\n if post_cord:\n list = list.filter(post_cord__icontains=post_cord)\n if address:\n list = list.filter(address__icontains=address)\n if owner:\n list = list.filter(owner__username__icontains=owner)\n\n # 未入力で検索ボタン押下\n if hasattr(list, \"name\") and list.name == \"objects\":\n return render(request, \"pet/index.html\", {\"search_message\": \"検索結果は0件でした\"})\n\n if list.exists():\n return render(request, \"pet/index.html\", {\"list\": list})\n else:\n return render(request, \"pet/index.html\", {\"search_message\": \"検索結果は0件でした\"})\n\n return render(\n request, \"pet/index.html\", {\"search_message\": \"予期せぬエラーが発生しました\\n管理者にご確認ください\"}\n )\n\n\ndef get_one_pet(id):\n try:\n return PetModel.objects.get(pk=id)\n except PetModel.DoesNotExist:\n print(traceback.format_exc())\n return None\n\n\ndef get_media_or_empty(request, name):\n if (name, \"\") in request.POST.items():\n return \"\"\n else:\n return request.FILES[name]\n", "repo_name": "otinu/Pertch-Django", "sub_path": "petApp/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 6570, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "76", "api": [{"api_name": "petApp.models.PetModel.objects.all", "line_number": 17, "usage_type": "call"}, {"api_name": "petApp.models.PetModel.objects", "line_number": 17, "usage_type": "attribute"}, {"api_name": "petApp.models.PetModel", "line_number": 17, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 19, "usage_type": "call"}, {"api_name": "django.contrib.auth.decorators.login_required", "line_number": 15, "usage_type": "name"}, {"api_name": "petApp.forms.PetForm", "line_number": 25, "usage_type": "call"}, {"api_name": "django.contrib.messages.error", "line_number": 29, "usage_type": "call"}, {"api_name": "django.contrib.messages", "line_number": 29, "usage_type": "name"}, {"api_name": "django.contrib.messages.error", "line_number": 31, "usage_type": "call"}, {"api_name": "django.contrib.messages", "line_number": 31, "usage_type": "name"}, {"api_name": "django.shortcuts.redirect", "line_number": 32, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 36, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 36, "usage_type": "name"}, {"api_name": "django_currentuser.middleware.get_current_authenticated_user", "line_number": 41, "usage_type": "call"}, {"api_name": "django.contrib.messages.success", "line_number": 44, "usage_type": "call"}, {"api_name": "django.contrib.messages", "line_number": 44, "usage_type": "name"}, {"api_name": "django.shortcuts.redirect", "line_number": 45, "usage_type": "call"}, {"api_name": "petApp.forms.PetForm", "line_number": 47, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 48, "usage_type": "call"}, {"api_name": "django.contrib.auth.decorators.login_required", "line_number": 22, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 55, "usage_type": "call"}, {"api_name": "petCommentApp.forms.PetCommentForm", "line_number": 63, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 64, "usage_type": "call"}, {"api_name": "django.contrib.auth.decorators.login_required", "line_number": 51, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 75, "usage_type": "call"}, {"api_name": "petApp.forms.PetForm", "line_number": 82, "usage_type": "call"}, {"api_name": "django.contrib.messages.error", "line_number": 86, "usage_type": "call"}, {"api_name": "django.contrib.messages", "line_number": 86, "usage_type": "name"}, {"api_name": "django.contrib.messages.error", "line_number": 88, "usage_type": "call"}, {"api_name": "django.contrib.messages", "line_number": 88, "usage_type": "name"}, {"api_name": "django.shortcuts.redirect", "line_number": 89, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 97, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 97, "usage_type": "name"}, {"api_name": "django_currentuser.middleware.get_current_authenticated_user", "line_number": 102, "usage_type": "call"}, {"api_name": "django.contrib.messages.success", "line_number": 106, "usage_type": "call"}, {"api_name": "django.contrib.messages", "line_number": 106, "usage_type": "name"}, {"api_name": "django.shortcuts.redirect", "line_number": 107, "usage_type": "call"}, {"api_name": "petApp.forms.PetForm", "line_number": 109, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 116, "usage_type": "call"}, {"api_name": "django.contrib.auth.decorators.login_required", "line_number": 71, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 127, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 135, "usage_type": "call"}, {"api_name": "traceback.format_exc", "line_number": 137, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 138, "usage_type": "call"}, {"api_name": "django.contrib.auth.decorators.login_required", "line_number": 123, "usage_type": "name"}, {"api_name": "petApp.models.PetModel.objects", "line_number": 157, "usage_type": "attribute"}, {"api_name": "petApp.models.PetModel", "line_number": 157, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 178, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 181, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 183, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 185, "usage_type": "call"}, {"api_name": "django.contrib.auth.decorators.login_required", "line_number": 145, "usage_type": "name"}, {"api_name": "petApp.models.PetModel.objects.get", "line_number": 192, "usage_type": "call"}, {"api_name": "petApp.models.PetModel.objects", "line_number": 192, "usage_type": "attribute"}, {"api_name": "petApp.models.PetModel", "line_number": 192, "usage_type": "name"}, {"api_name": "petApp.models.PetModel.DoesNotExist", "line_number": 193, "usage_type": "attribute"}, {"api_name": "petApp.models.PetModel", "line_number": 193, "usage_type": "name"}, {"api_name": "traceback.format_exc", "line_number": 194, "usage_type": "call"}]}
+{"seq_id": "71399401846", "text": "from nio.modules.settings import Settings\nfrom nio.testing import NIOTestCase\n\n\nclass TestSetSettings(NIOTestCase):\n\n \"\"\" Tests that set_settings is called in the right spot (after initializing\n settings and before initializing any other module, and settings are\n available within tests . \"\"\"\n\n def set_settings(self):\n\n Settings.set(\"test_section\", \"test_option\", \"test_value\")\n\n # assert only Settings module has been initialized\n self.assertEqual(len(self._module_initializer._initialized_modules), 1)\n settings_module = self.get_module(\"settings\")\n self.assertEqual(\n self._module_initializer._initialized_modules[0].__class__,\n settings_module.__class__)\n\n def test_set_settings(self):\n \"\"\" Makes sure settings are available within test \"\"\"\n\n self.assertEqual(Settings.get(\"test_section\", \"test_option\"),\n \"test_value\")\n\n # assert that at this point all modules have been initialized\n self.assertEqual(len(self._module_initializer._initialized_modules),\n len(self.get_test_modules()))\n", "repo_name": "niolabs/nio", "sub_path": "nio/testing/tests/test_set_settings.py", "file_name": "test_set_settings.py", "file_ext": "py", "file_size_in_byte": 1138, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 5, "dataset": "github-code", "pt": "76", "api": [{"api_name": "nio.testing.NIOTestCase", "line_number": 5, "usage_type": "name"}, {"api_name": "nio.modules.settings.Settings.set", "line_number": 13, "usage_type": "call"}, {"api_name": "nio.modules.settings.Settings", "line_number": 13, "usage_type": "name"}, {"api_name": "nio.modules.settings.Settings.get", "line_number": 25, "usage_type": "call"}, {"api_name": "nio.modules.settings.Settings", "line_number": 25, "usage_type": "name"}]}
+{"seq_id": "14207869115", "text": "from tkinter import *\nfrom tkinter import messagebox\nfrom tkinter import filedialog\n\nglobal root\nglobal scrnwparam\nglobal scrnhparam\nscrnwparam = 185\nscrnhparam = 150\n\nimport os, sys, re, shutil\nfrom pathlib import Path\nfrom threading import Thread\nimport time\nimport datetime as datetime2\n\nfrom pdfminer.layout import LAParams, LTTextBox, LTText, LTChar, LTAnno\nfrom pdfminer.pdfpage import PDFPage\nfrom pdfminer.pdfinterp import PDFPageInterpreter, PDFResourceManager\nfrom pdfminer.converter import PDFPageAggregator\n\n\nglobal root\n\n\nglobal PlaceInputDir\nglobal PlaceFilesArray\nglobal PlaceInputDirSel\nglobal PlaceOutputDirSel\nglobal PlaceWorking\nglobal PlaceStartTime\n\n\n\n\n\n\ndef main():\n global root\n\n root = Tk()\n root.resizable(False, False)\n \n scrnw = (root.winfo_screenwidth()//2) - scrnwparam\n scrnh = (root.winfo_screenheight()//2) - scrnhparam\n root.geometry('375x250+{}+{}'.format(scrnw, scrnh))\n \n app = GUI(root)\n root.mainloop()\n\n\n\n\nclass GUI(Frame):\n def __init__(self, parent):\n Frame.__init__(self, parent, background=\"white\") \n self.parent = parent\n self.parent.title(\"\")\n self.pack(fill=BOTH, expand=1)\n self.initUI()\n \n def initUI(self):\n global PlaceInputDirLbl\n PlaceInputDirLbl = Label(text=\"Выберите файлы на сортировку:\", background=\"white\", font=(\"Arial\", 10))\n \n global PlaceInputDirEntry\n PlaceInputDirEntry = Entry(fg=\"black\", bg=\"white\", width=20)\n PlaceInputDirEntry.configure(state = DISABLED)\n \n global PlaceInputDirBtn\n PlaceInputDirBtn = Button(text='Выбор', command=PlaceInputDirChoose)\n \n global PlaceInputFilesCountLbl\n PlaceInputFilesCountLbl = Label(text=\"\", background=\"white\")\n \n \n global PlaceOutputDirLbl\n PlaceOutputDirLbl = Label(text=\"Выберите папку для отсортированных:\", background=\"white\", font=(\"Arial\", 10))\n \n global PlaceOutputDirEntry\n PlaceOutputDirEntry = Entry(fg=\"black\", bg=\"white\", width=20)\n PlaceOutputDirEntry.configure(state = DISABLED)\n \n global PlaceOutputDirBtn\n PlaceOutputDirBtn = Button(text='Выбор', command=PlaceOutputDirChoose)\n \n \n global PlaceStartBtn\n PlaceStartBtn = Button(text='Запуск', command=PlaceStart)\n PlaceStartBtn.configure(state = DISABLED)\n \n global PlaceStatusLbl\n PlaceStatusLbl = Label(text=\"\", background=\"white\")\n \n global PlaceTimeLbl\n PlaceTimeLbl = Label(text=\"\", background=\"white\")\n\n\n PlaceInputDirLbl.place (x=16, y=7+40)\n PlaceInputDirEntry.place (x=20, y=30+40, width=275)\n PlaceInputDirBtn.place (x=305, y=29+40, height=20)\n PlaceInputFilesCountLbl.place (x=16, y=49+40)\n PlaceOutputDirLbl.place (x=17, y=87+35)\n PlaceOutputDirEntry.place (x=75, y=111+35, width=275)\n PlaceOutputDirBtn.place (x=20, y=110+35, height=20)\n PlaceStartBtn.place (x=20, y=190, width=85)\n PlaceStatusLbl.place (x=120, y=183)\n PlaceTimeLbl.place (x=120, y=200)\n \n \n global PlaceInputDir\n global PlaceFilesArray\n global PlaceInputDirSel\n global PlaceOutputDirSel\n global PlaceWorking\n global PlaceStartTime\n \n PlaceInputDir = \"\"\n PlaceFilesArray = []\n \n PlaceInputDirSel = False\n PlaceOutputDirSel = False\n \n PlaceWorking = False\n PlaceStartTime = \"\"\n\n\n\n\n\ndef PlaceInputDirChoose():\n global PlaceInputDir\n\n PlaceInputDir = filedialog.askdirectory(title=\"Выберите папку на сортировку\")\n \n if PlaceInputDir:\n PlaceInputDirEntry.configure(state = NORMAL)\n PlaceInputDirEntry.delete(0,END)\n PlaceInputDirEntry.insert(0,str(Path(PlaceInputDir).name))\n PlaceInputDirEntry.configure(state = DISABLED)\n \n print('PlaceInputDir:', PlaceInputDir)\n \n PlaceInputDirCheck()\n else:\n print('PlaceInputDir is NOT defined')\n \n\ndef PlaceInputDirCheck():\n global PlaceInputDir\n global PlaceFilesArray\n global PlaceInputDirSel\n global PlaceOutputDirSel\n\n PlaceFilesArray.clear()\n for file in os.listdir(PlaceInputDir):\n if file.endswith(\".pdf\"):\n PlaceFilesArray.append(os.path.join(PlaceInputDir, file))\n \n if len(PlaceFilesArray) == 0:\n PlaceInputFilesCountLbl.config(text = 'Нет файлов PDF !')\n print('no pdf in folder !')\n PlaceInputDirSel = False\n else:\n PlaceInputFilesCountLbl.config(text = '��оличество файлов PDF: ' + str(len(PlaceFilesArray)))\n print('number of valid files -', str(len(PlaceFilesArray)))\n PlaceInputDirSel = True\n\n if PlaceInputDirSel and PlaceOutputDirSel:\n PlaceStartBtn.configure(state = NORMAL)\n else:\n PlaceStartBtn.configure(state = DISABLED)\n\n\ndef PlaceOutputDirChoose():\n global PlaceOutputDir\n global PlaceInputDirSel\n global PlaceOutputDirSel\n \n PlaceOutputDir = filedialog.askdirectory(title=\"Выберите папку для отсортированных\")\n if PlaceInputDir:\n PlaceOutputDirEntry.configure(state = NORMAL)\n PlaceOutputDirEntry.delete(0,END)\n PlaceOutputDirEntry.insert(0,str(Path(PlaceOutputDir).name))\n PlaceOutputDirEntry.configure(state = DISABLED)\n \n PlaceOutputDirSel = True\n print('PlaceOutputDir:', PlaceOutputDir)\n else:\n print('PlaceInputDir is NOT defined')\n \n \n if PlaceInputDirSel and PlaceOutputDirSel:\n PlaceStartBtn.configure(state = NORMAL)\n else:\n PlaceStartBtn.configure(state = DISABLED)\n\n\ndef PlaceStart():\n\n PlaceMainThreadthread = Thread(target=PlaceMainThread)\n PlaceMainThreadthread.start()\n timethread = Thread(target=PlaceTimeUpdater)\n timethread.start()\n\n\ndef PlaceMainThread():\n global PlaceWorking\n global PlaceStartTime\n\n global PlaceInputDir\n global PlaceOutputDir\n global PlaceFilesArray\n \n PlaceStartTime = time.time()\n PlaceWorking = True\n PlaceBlockGUI(True)\n \n for f in range(len(PlaceFilesArray)):\n print(\"_________________________\")\n print('Документ №{0}: {1}'.format(f+1, Path(PlaceFilesArray[f]).name))\n \n invoicedata = PlaceFileTextSearch(PlaceFilesArray[f])\n if isinstance(invoicedata, list):\n statustext = \"Обработка {0} из {1}\".format(f, len(PlaceFilesArray))\n PlaceStatusLbl.config(text = str(statustext))\n print('ИНН, КПП документа: {0}'.format(invoicedata))\n \n fileoutputdir = Path(PlaceOutputDir, invoicedata[1])\n fileoutputdirexist = os.path.exists(fileoutputdir)\n print('fileoutputdir: {0}, exists: {1}'.format(fileoutputdir, fileoutputdirexist))\n \n if not fileoutputdirexist:\n os.makedirs(fileoutputdir)\n \n fileoutputpath = Path(fileoutputdir, Path(PlaceFilesArray[f]).name).as_posix()\n shutil.move(PlaceFilesArray[f], fileoutputpath)\n \n \n else:\n print('Не найдено ИНН, КПП !')\n msgbxlbl = ['В документе не найдено ИНН, КПП !', '{0}'.format(PlaceFilesArray[f])]\n messagebox.showerror(\"\", \"\\n\".join(msgbxlbl))\n \n \n PlaceStatusLbl.config(text = \"Обработка завершена !\")\n PlaceWorking = False\n PlaceBlockGUI(False)\n PlaceInputDirCheck()\n\n\ndef PlaceFileTextSearch(file):\n\n with open(file, 'rb') as pdftomine:\n manager = PDFResourceManager()\n laparams = LAParams()\n dev = PDFPageAggregator(manager, laparams=laparams)\n interpreter = PDFPageInterpreter(manager, dev)\n pages = PDFPage.get_pages(pdftomine)\n\n for pagenumber, page in enumerate(pages):\n if pagenumber == 0:\n interpreter.process_page(page)\n layout = dev.get_result()\n \n for textbox in layout:\n if isinstance(textbox, LTText):\n for line in textbox:\n text = line.get_text().replace('\\n', '')\n if len(text) == 22 or len(text) == 20:\n invoiceinn = re.sub(\"[^0-9]\", \"\", (text.partition(\"/\")[0]))\n invoicekpp = re.sub(\"[^0-9]\", \"\", (text.partition(\"/\")[2]))\n if invoiceinn.isnumeric() and invoicekpp.isnumeric():\n if len(invoiceinn)==10 and len(invoicekpp)==9:\n #print(\"_________________________\")\n #print('ИНН документа: {0}'.format(invoiceinn))\n #print('КПП документа: {0}'.format(invoicekpp))\n invoicedata = [invoiceinn, invoicekpp]\n return invoicedata\n break\n return \"NONE\"\n\n\ndef PlaceBlockGUI(yes):\n if yes:\n PlaceInputDirBtn.configure(state = DISABLED)\n PlaceOutputDirBtn.configure(state = DISABLED)\n PlaceStartBtn.configure(state = DISABLED)\n else:\n PlaceInputDirBtn.configure(state = NORMAL)\n PlaceOutputDirBtn.configure(state = NORMAL)\n PlaceStartBtn.configure(state = NORMAL)\n\n\ndef PlaceTimeUpdater():\n global PlaceWorking\n global PlaceStartTime\n\n time.sleep(0.01)\n while PlaceWorking:\n CreateDocTime = time.time()\n result = CreateDocTime - PlaceStartTime\n result = datetime2.timedelta(seconds=round(result))\n PlaceTimeLbl.config(text = str(result))\n time.sleep(0.01)\n\n\n\n\nif __name__ == '__main__':\n main()\n", "repo_name": "albertkovach/Python", "sub_path": "templates/pdf/storesort.py", "file_name": "storesort.py", "file_ext": "py", "file_size_in_byte": 10149, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "76", "api": [{"api_name": "tkinter.filedialog.askdirectory", "line_number": 134, "usage_type": "call"}, {"api_name": "tkinter.filedialog", "line_number": 134, "usage_type": "name"}, {"api_name": "pathlib.Path", "line_number": 139, "usage_type": "call"}, {"api_name": "os.listdir", "line_number": 156, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 158, "usage_type": "call"}, {"api_name": "os.path", "line_number": 158, "usage_type": "attribute"}, {"api_name": "tkinter.filedialog.askdirectory", "line_number": 180, "usage_type": "call"}, {"api_name": "tkinter.filedialog", "line_number": 180, "usage_type": "name"}, {"api_name": "pathlib.Path", "line_number": 184, "usage_type": "call"}, {"api_name": "threading.Thread", "line_number": 201, "usage_type": "call"}, {"api_name": "threading.Thread", "line_number": 203, "usage_type": "call"}, {"api_name": "time.time", "line_number": 215, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 221, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 229, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 230, "usage_type": "call"}, {"api_name": "os.path", "line_number": 230, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 234, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 236, "usage_type": "call"}, {"api_name": "shutil.move", "line_number": 237, "usage_type": "call"}, {"api_name": "tkinter.messagebox.showerror", "line_number": 243, "usage_type": "call"}, {"api_name": "tkinter.messagebox", "line_number": 243, "usage_type": "name"}, {"api_name": "pdfminer.pdfinterp.PDFResourceManager", "line_number": 255, "usage_type": "call"}, {"api_name": "pdfminer.layout.LAParams", "line_number": 256, "usage_type": "call"}, {"api_name": "pdfminer.converter.PDFPageAggregator", "line_number": 257, "usage_type": "call"}, {"api_name": "pdfminer.pdfinterp.PDFPageInterpreter", "line_number": 258, "usage_type": "call"}, {"api_name": "pdfminer.pdfpage.PDFPage.get_pages", "line_number": 259, "usage_type": "call"}, {"api_name": "pdfminer.pdfpage.PDFPage", "line_number": 259, "usage_type": "name"}, {"api_name": "pdfminer.layout.LTText", "line_number": 267, "usage_type": "argument"}, {"api_name": "re.sub", "line_number": 271, "usage_type": "call"}, {"api_name": "re.sub", "line_number": 272, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 299, "usage_type": "call"}, {"api_name": "time.time", "line_number": 301, "usage_type": "call"}, {"api_name": "datetime.timedelta", "line_number": 303, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 305, "usage_type": "call"}]}
+{"seq_id": "72839197686", "text": "from uuid import UUID\nfrom fastapi import APIRouter, Depends, Body\nfrom sqlalchemy.orm.attributes import flag_modified\nfrom ert_storage.database import Session, get_db\nfrom ert_storage import database_schema as ds, json_schema as js\nfrom ert_storage.json_schema.prior import (\n PriorConst,\n PriorTrig,\n PriorNormal,\n PriorLogNormal,\n PriorErtTruncNormal,\n PriorStdNormal,\n PriorUniform,\n PriorErtDUniform,\n PriorLogUniform,\n PriorErtErf,\n PriorErtDErf,\n)\nfrom typing import Any, Mapping, List, Type\n\n\nrouter = APIRouter(tags=[\"experiment\"])\n\n\n@router.get(\"/experiments\", response_model=List[js.ExperimentOut])\ndef get_experiments(\n *,\n db: Session = Depends(get_db),\n) -> List[js.ExperimentOut]:\n experiments = db.query(ds.Experiment).all()\n return [_experiment_from_db(exp) for exp in experiments]\n\n\n@router.get(\"/experiments/{experiment_id}\", response_model=js.ExperimentOut)\ndef get_experiment_by_id(\n *, db: Session = Depends(get_db), experiment_id: UUID\n) -> js.ExperimentOut:\n experiment = db.query(ds.Experiment).filter_by(id=experiment_id).one()\n return _experiment_from_db(experiment)\n\n\n@router.post(\"/experiments\", response_model=js.ExperimentOut)\ndef post_experiments(\n *,\n db: Session = Depends(get_db),\n ens_in: js.ExperimentIn,\n) -> js.ExperimentOut:\n experiment = ds.Experiment(name=ens_in.name)\n\n if ens_in.priors:\n db.add_all(\n ds.Prior(\n function=ds.PriorFunction.__members__[prior.function],\n experiment=experiment,\n name=name,\n argument_names=[x[0] for x in prior if isinstance(x[1], (float, int))],\n argument_values=[x[1] for x in prior if isinstance(x[1], (float, int))],\n )\n for name, prior in ens_in.priors.items()\n )\n\n db.add(experiment)\n db.commit()\n return _experiment_from_db(experiment)\n\n\n@router.get(\n \"/experiments/{experiment_id}/ensembles\", response_model=List[js.EnsembleOut]\n)\ndef get_experiment_ensembles(\n *, db: Session = Depends(get_db), experiment_id: UUID\n) -> List[ds.Ensemble]:\n return db.query(ds.Ensemble).join(ds.Experiment).filter_by(id=experiment_id).all()\n\n\n@router.put(\"/experiments/{experiment_id}/userdata\")\nasync def replace_experiment_userdata(\n *,\n db: Session = Depends(get_db),\n experiment_id: UUID,\n body: Any = Body(...),\n) -> None:\n \"\"\"\n Assign new userdata json\n \"\"\"\n experiment = db.query(ds.Experiment).filter_by(id=experiment_id).one()\n experiment.userdata = body\n db.commit()\n\n\n@router.patch(\"/experiments/{experiment_id}/userdata\")\nasync def patch_experiment_userdata(\n *,\n db: Session = Depends(get_db),\n experiment_id: UUID,\n body: Any = Body(...),\n) -> None:\n \"\"\"\n Update userdata json\n \"\"\"\n experiment = db.query(ds.Experiment).filter_by(id=experiment_id).one()\n experiment.userdata.update(body)\n flag_modified(experiment, \"userdata\")\n db.commit()\n\n\n@router.get(\"/experiments/{experiment_id}/userdata\", response_model=Mapping[str, Any])\nasync def get_experiment_userdata(\n *,\n db: Session = Depends(get_db),\n experiment_id: UUID,\n) -> Mapping[str, Any]:\n \"\"\"\n Get userdata json\n \"\"\"\n experiment = db.query(ds.Experiment).filter_by(id=experiment_id).one()\n return experiment.userdata\n\n\n@router.delete(\"/experiments/{experiment_id}\")\ndef delete_experiment(*, db: Session = Depends(get_db), experiment_id: UUID) -> None:\n experiment = db.query(ds.Experiment).filter_by(id=experiment_id).one()\n db.delete(experiment)\n db.commit()\n\n\nPRIOR_FUNCTION_TO_PYDANTIC: Mapping[ds.PriorFunction, Type[js.Prior]] = {\n ds.PriorFunction.const: PriorConst,\n ds.PriorFunction.trig: PriorTrig,\n ds.PriorFunction.normal: PriorNormal,\n ds.PriorFunction.lognormal: PriorLogNormal,\n ds.PriorFunction.ert_truncnormal: PriorErtTruncNormal,\n ds.PriorFunction.stdnormal: PriorStdNormal,\n ds.PriorFunction.uniform: PriorUniform,\n ds.PriorFunction.ert_duniform: PriorErtDUniform,\n ds.PriorFunction.loguniform: PriorLogUniform,\n ds.PriorFunction.ert_erf: PriorErtErf,\n ds.PriorFunction.ert_derf: PriorErtDErf,\n}\n\n\ndef prior_to_dict(prior: ds.Prior) -> dict:\n return (\n PRIOR_FUNCTION_TO_PYDANTIC[prior.function]\n .parse_obj(\n {key: val for key, val in zip(prior.argument_names, prior.argument_values)}\n )\n .dict()\n )\n\n\ndef experiment_priors_to_dict(experiment: ds.Experiment) -> Mapping[str, dict]:\n return {p.name: prior_to_dict(p) for p in experiment.priors}\n\n\ndef _experiment_from_db(exp: ds.Experiment) -> js.ExperimentOut:\n return js.ExperimentOut(\n id=exp.id,\n name=exp.name,\n ensemble_ids=exp.ensemble_ids,\n priors=experiment_priors_to_dict(exp),\n userdata=exp.userdata,\n )\n", "repo_name": "equinor/ert-storage", "sub_path": "src/ert_storage/endpoints/experiments.py", "file_name": "experiments.py", "file_ext": "py", "file_size_in_byte": 4847, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 2, "dataset": "github-code", "pt": "76", "api": [{"api_name": "fastapi.APIRouter", "line_number": 22, "usage_type": "call"}, {"api_name": "ert_storage.database.Session", "line_number": 28, "usage_type": "name"}, {"api_name": "fastapi.Depends", "line_number": 28, "usage_type": "call"}, {"api_name": "ert_storage.database.get_db", "line_number": 28, "usage_type": "argument"}, {"api_name": "ert_storage.database_schema.Experiment", "line_number": 30, "usage_type": "attribute"}, {"api_name": "ert_storage.database_schema", "line_number": 30, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 25, "usage_type": "name"}, {"api_name": "ert_storage.json_schema.ExperimentOut", "line_number": 25, "usage_type": "attribute"}, {"api_name": "ert_storage.json_schema", "line_number": 25, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 29, "usage_type": "name"}, {"api_name": "ert_storage.json_schema.ExperimentOut", "line_number": 29, "usage_type": "attribute"}, {"api_name": "ert_storage.json_schema", "line_number": 29, "usage_type": "name"}, {"api_name": "ert_storage.database.Session", "line_number": 36, "usage_type": "name"}, {"api_name": "uuid.UUID", "line_number": 36, "usage_type": "name"}, {"api_name": "fastapi.Depends", "line_number": 36, "usage_type": "call"}, {"api_name": "ert_storage.database.get_db", "line_number": 36, "usage_type": "argument"}, {"api_name": "ert_storage.database_schema.Experiment", "line_number": 38, "usage_type": "attribute"}, {"api_name": "ert_storage.database_schema", "line_number": 38, "usage_type": "name"}, {"api_name": "ert_storage.json_schema.ExperimentOut", "line_number": 34, "usage_type": "attribute"}, {"api_name": "ert_storage.json_schema", "line_number": 34, "usage_type": "name"}, {"api_name": "ert_storage.json_schema.ExperimentOut", "line_number": 37, "usage_type": "attribute"}, {"api_name": "ert_storage.json_schema", "line_number": 37, "usage_type": "name"}, {"api_name": "ert_storage.database.Session", "line_number": 45, "usage_type": "name"}, {"api_name": "ert_storage.json_schema.ExperimentIn", "line_number": 46, "usage_type": "attribute"}, {"api_name": "ert_storage.json_schema", "line_number": 46, "usage_type": "name"}, {"api_name": "fastapi.Depends", "line_number": 45, "usage_type": "call"}, {"api_name": "ert_storage.database.get_db", "line_number": 45, "usage_type": "argument"}, {"api_name": "ert_storage.database_schema.Experiment", "line_number": 48, "usage_type": "call"}, {"api_name": "ert_storage.database_schema", "line_number": 48, "usage_type": "name"}, {"api_name": "ert_storage.database_schema.Prior", "line_number": 52, "usage_type": "call"}, {"api_name": "ert_storage.database_schema", "line_number": 52, "usage_type": "name"}, {"api_name": "ert_storage.database_schema.PriorFunction", "line_number": 53, "usage_type": "attribute"}, {"api_name": "ert_storage.database_schema", "line_number": 53, "usage_type": "name"}, {"api_name": "ert_storage.json_schema.ExperimentOut", "line_number": 42, "usage_type": "attribute"}, {"api_name": "ert_storage.json_schema", "line_number": 42, "usage_type": "name"}, {"api_name": "ert_storage.json_schema.ExperimentOut", "line_number": 47, "usage_type": "attribute"}, {"api_name": "ert_storage.json_schema", "line_number": 47, "usage_type": "name"}, {"api_name": "ert_storage.database.Session", "line_number": 71, "usage_type": "name"}, {"api_name": "uuid.UUID", "line_number": 71, "usage_type": "name"}, {"api_name": "fastapi.Depends", "line_number": 71, "usage_type": "call"}, {"api_name": "ert_storage.database.get_db", "line_number": 71, "usage_type": "argument"}, {"api_name": "ert_storage.database_schema.Ensemble", "line_number": 73, "usage_type": "attribute"}, {"api_name": "ert_storage.database_schema", "line_number": 73, "usage_type": "name"}, {"api_name": "ert_storage.database_schema.Experiment", "line_number": 73, "usage_type": "attribute"}, {"api_name": "typing.List", "line_number": 68, "usage_type": "name"}, {"api_name": "ert_storage.json_schema.EnsembleOut", "line_number": 68, "usage_type": "attribute"}, {"api_name": "ert_storage.json_schema", "line_number": 68, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 72, "usage_type": "name"}, {"api_name": "ert_storage.database_schema.Ensemble", "line_number": 72, "usage_type": "attribute"}, {"api_name": "ert_storage.database_schema", "line_number": 72, "usage_type": "name"}, {"api_name": "ert_storage.database.Session", "line_number": 79, "usage_type": "name"}, {"api_name": "uuid.UUID", "line_number": 80, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 81, "usage_type": "name"}, {"api_name": "fastapi.Depends", "line_number": 79, "usage_type": "call"}, {"api_name": "ert_storage.database.get_db", "line_number": 79, "usage_type": "argument"}, {"api_name": "fastapi.Body", "line_number": 81, "usage_type": "call"}, {"api_name": "ert_storage.database_schema.Experiment", "line_number": 86, "usage_type": "attribute"}, {"api_name": "ert_storage.database_schema", "line_number": 86, "usage_type": "name"}, {"api_name": "ert_storage.database.Session", "line_number": 94, "usage_type": "name"}, {"api_name": "uuid.UUID", "line_number": 95, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 96, "usage_type": "name"}, {"api_name": "fastapi.Depends", "line_number": 94, "usage_type": "call"}, {"api_name": "ert_storage.database.get_db", "line_number": 94, "usage_type": "argument"}, {"api_name": "fastapi.Body", "line_number": 96, "usage_type": "call"}, {"api_name": "ert_storage.database_schema.Experiment", "line_number": 101, "usage_type": "attribute"}, {"api_name": "ert_storage.database_schema", "line_number": 101, "usage_type": "name"}, {"api_name": "sqlalchemy.orm.attributes.flag_modified", "line_number": 103, "usage_type": "call"}, {"api_name": "ert_storage.database.Session", "line_number": 110, "usage_type": "name"}, {"api_name": "uuid.UUID", "line_number": 111, "usage_type": "name"}, {"api_name": "fastapi.Depends", "line_number": 110, "usage_type": "call"}, {"api_name": "ert_storage.database.get_db", "line_number": 110, "usage_type": "argument"}, {"api_name": "ert_storage.database_schema.Experiment", "line_number": 116, "usage_type": "attribute"}, {"api_name": "ert_storage.database_schema", "line_number": 116, "usage_type": "name"}, {"api_name": "typing.Mapping", "line_number": 107, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 107, "usage_type": "name"}, {"api_name": "typing.Mapping", "line_number": 112, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 112, "usage_type": "name"}, {"api_name": "ert_storage.database.Session", "line_number": 121, "usage_type": "name"}, {"api_name": "uuid.UUID", "line_number": 121, "usage_type": "name"}, {"api_name": "fastapi.Depends", "line_number": 121, "usage_type": "call"}, {"api_name": "ert_storage.database.get_db", "line_number": 121, "usage_type": "argument"}, {"api_name": "ert_storage.database_schema.Experiment", "line_number": 122, "usage_type": "attribute"}, {"api_name": "ert_storage.database_schema", "line_number": 122, "usage_type": "name"}, {"api_name": "typing.Mapping", "line_number": 127, "usage_type": "name"}, {"api_name": "ert_storage.database_schema.PriorFunction", "line_number": 127, "usage_type": "attribute"}, {"api_name": "ert_storage.database_schema", "line_number": 127, "usage_type": "name"}, {"api_name": "typing.Type", "line_number": 127, "usage_type": "name"}, {"api_name": "ert_storage.json_schema.Prior", "line_number": 127, "usage_type": "attribute"}, {"api_name": "ert_storage.json_schema", "line_number": 127, "usage_type": "name"}, {"api_name": "ert_storage.database_schema.PriorFunction", "line_number": 128, "usage_type": "attribute"}, {"api_name": "ert_storage.database_schema", "line_number": 128, "usage_type": "name"}, {"api_name": "ert_storage.database_schema.PriorFunction", "line_number": 129, "usage_type": "attribute"}, {"api_name": "ert_storage.database_schema", "line_number": 129, "usage_type": "name"}, {"api_name": "ert_storage.database_schema.PriorFunction", "line_number": 130, "usage_type": "attribute"}, {"api_name": "ert_storage.database_schema", "line_number": 130, "usage_type": "name"}, {"api_name": "ert_storage.database_schema.PriorFunction", "line_number": 131, "usage_type": "attribute"}, {"api_name": "ert_storage.database_schema", "line_number": 131, "usage_type": "name"}, {"api_name": "ert_storage.database_schema.PriorFunction", "line_number": 132, "usage_type": "attribute"}, {"api_name": "ert_storage.database_schema", "line_number": 132, "usage_type": "name"}, {"api_name": "ert_storage.database_schema.PriorFunction", "line_number": 133, "usage_type": "attribute"}, {"api_name": "ert_storage.database_schema", "line_number": 133, "usage_type": "name"}, {"api_name": "ert_storage.database_schema.PriorFunction", "line_number": 134, "usage_type": "attribute"}, {"api_name": "ert_storage.database_schema", "line_number": 134, "usage_type": "name"}, {"api_name": "ert_storage.database_schema.PriorFunction", "line_number": 135, "usage_type": "attribute"}, {"api_name": "ert_storage.database_schema", "line_number": 135, "usage_type": "name"}, {"api_name": "ert_storage.database_schema.PriorFunction", "line_number": 136, "usage_type": "attribute"}, {"api_name": "ert_storage.database_schema", "line_number": 136, "usage_type": "name"}, {"api_name": "ert_storage.database_schema.PriorFunction", "line_number": 137, "usage_type": "attribute"}, {"api_name": "ert_storage.database_schema", "line_number": 137, "usage_type": "name"}, {"api_name": "ert_storage.database_schema.PriorFunction", "line_number": 138, "usage_type": "attribute"}, {"api_name": "ert_storage.database_schema", "line_number": 138, "usage_type": "name"}, {"api_name": "ert_storage.json_schema.prior.PriorConst", "line_number": 128, "usage_type": "name"}, {"api_name": "ert_storage.json_schema.prior.PriorTrig", "line_number": 129, "usage_type": "name"}, {"api_name": "ert_storage.json_schema.prior.PriorNormal", "line_number": 130, "usage_type": "name"}, {"api_name": "ert_storage.json_schema.prior.PriorLogNormal", "line_number": 131, "usage_type": "name"}, {"api_name": "ert_storage.json_schema.prior.PriorErtTruncNormal", "line_number": 132, "usage_type": "name"}, {"api_name": "ert_storage.json_schema.prior.PriorStdNormal", "line_number": 133, "usage_type": "name"}, {"api_name": "ert_storage.json_schema.prior.PriorUniform", "line_number": 134, "usage_type": "name"}, {"api_name": "ert_storage.json_schema.prior.PriorErtDUniform", "line_number": 135, "usage_type": "name"}, {"api_name": "ert_storage.json_schema.prior.PriorLogUniform", "line_number": 136, "usage_type": "name"}, {"api_name": "ert_storage.json_schema.prior.PriorErtErf", "line_number": 137, "usage_type": "name"}, {"api_name": "ert_storage.json_schema.prior.PriorErtDErf", "line_number": 138, "usage_type": "name"}, {"api_name": "ert_storage.database_schema.Prior", "line_number": 142, "usage_type": "attribute"}, {"api_name": "ert_storage.database_schema", "line_number": 142, "usage_type": "name"}, {"api_name": "ert_storage.database_schema.Experiment", "line_number": 152, "usage_type": "attribute"}, {"api_name": "ert_storage.database_schema", "line_number": 152, "usage_type": "name"}, {"api_name": "typing.Mapping", "line_number": 152, "usage_type": "name"}, {"api_name": "ert_storage.database_schema.Experiment", "line_number": 156, "usage_type": "attribute"}, {"api_name": "ert_storage.database_schema", "line_number": 156, "usage_type": "name"}, {"api_name": "ert_storage.json_schema.ExperimentOut", "line_number": 157, "usage_type": "call"}, {"api_name": "ert_storage.json_schema", "line_number": 157, "usage_type": "name"}, {"api_name": "ert_storage.json_schema.ExperimentOut", "line_number": 156, "usage_type": "attribute"}, {"api_name": "ert_storage.json_schema", "line_number": 156, "usage_type": "name"}]}
+{"seq_id": "23857415515", "text": "## getting some errors\nimport cv2\nprint(cv2.__version__)\n\ngoFlag = 0\n\ndef mouse_click(event, x, y, flags, params):\n global x1, y1, x2, y2\n global goFlag \n if event == cv2.EVENT_LBUTTONDOWN:\n x1 = x\n y1 = y\n goFlag = 0\n if event == cv2.EVENT_LBUTTONUP:\n x2 = x\n y2 = y\n goFlag = 1\n\ncv2.namedWindow('piCam')\ncv2.setMouseCallback('piCam', mouse_click)\n\n## want to keep this aspect ratio\n## display width/height\ndispW=1280\ndispH=960\n\n## if not 4 camera will be upside down, or horizontally flipped \nflip = 4\n\n## laucnhes g streamer nvarguscamerasrc\n## Don't want to run at full full fps as camera can't handle it\n## BGR is blue green red\ncamSet='nvarguscamerasrc ! video/x-raw(memory:NVMM), width=3264, height=2464, format=NV12, framerate=21/1 ! nvvidconv flip-method='+str(flip)+' ! video/x-raw, width='+str(dispW)+', height='+str(dispH)+', format=BGRx ! videoconvert ! video/x-raw, format=BGR ! appsink'\n\n## camera is now ready to run\ncam = cv2.VideoCapture(camSet)\n\nwhile True:\n ## ret allows creating the var\n # #frame will get the last picture from the camera\n ret, frame=cam.read()\n ## Grabbing a frame and then showing the frame\n cv2.imshow('piCam', frame)\n\n if goFlag == 1:\n frame = cv2.rectangle(frame, (x1, y1), (x2, y2), (255,0,0), 3)\n roi = frame[y1:y2, x1:x2]\n cv2.imshow('ros', roi)\n cv2.moveWindow('ros', 1400, 0)\n\n cv2.moveWindow('nanoCam', 0, 0)\n ## checks every ms to see if key is pressed\n if cv2.waitKey(1) ==ord('q'):\n break\n## need to release camera otherwise will still run\ncam.release()\ncv2.destroyAllWindows()", "repo_name": "hakbar0/py-projects", "sub_path": "openCV/openCV11-ROI_mouse.py", "file_name": "openCV11-ROI_mouse.py", "file_ext": "py", "file_size_in_byte": 1647, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "76", "api": [{"api_name": "cv2.__version__", "line_number": 3, "usage_type": "attribute"}, {"api_name": "cv2.EVENT_LBUTTONDOWN", "line_number": 10, "usage_type": "attribute"}, {"api_name": "cv2.EVENT_LBUTTONUP", "line_number": 14, "usage_type": "attribute"}, {"api_name": "cv2.namedWindow", "line_number": 19, "usage_type": "call"}, {"api_name": "cv2.setMouseCallback", "line_number": 20, "usage_type": "call"}, {"api_name": "cv2.VideoCapture", "line_number": 36, "usage_type": "call"}, {"api_name": "cv2.imshow", "line_number": 43, "usage_type": "call"}, {"api_name": "cv2.rectangle", "line_number": 46, "usage_type": "call"}, {"api_name": "cv2.imshow", "line_number": 48, "usage_type": "call"}, {"api_name": "cv2.moveWindow", "line_number": 49, "usage_type": "call"}, {"api_name": "cv2.moveWindow", "line_number": 51, "usage_type": "call"}, {"api_name": "cv2.waitKey", "line_number": 53, "usage_type": "call"}, {"api_name": "cv2.destroyAllWindows", "line_number": 57, "usage_type": "call"}]}
+{"seq_id": "30330289891", "text": "import itertools\nimport numpy as np\nfrom GRABIM import LadderCircuitAnalysis\n\n# REFERENCES\n# [1] Thomas R. Cuthbert. Broadband Direct-Coupled and Matching RF Networks. TRCPEP, 1999.\n# [2] Wideband Circuit Design. Herbert J Carlin\n\n# Generating matrix for the grid search as specified in [1], Eq. 6.3.5\ndef generating_matrix(dim):\n lst = list(itertools.product([0, 1], repeat=dim))\n arr = np.array(lst)\n\n ncols = arr.shape[0]\n for i in range(0, ncols):\n s = arr[i, :];\n s = [-1 if x==0 else x for x in s]\n arr[i, :] = s;\n\n return arr;\n\n# Scale the values of the matching network previously normalized to fmax and 1 Ohm\n# Reference: [2] Eq. 6.3.3 \ndef scaleValues_wrt_R_f(x, code, R, f):\n \n for i in range(len(x)):\n if ((code[i] == 'CS') or (code[i] == 'CP')):\n x[i] = x[i]/(R*f)\n elif ((code[i] == 'LS') or (code[i] == 'LP')):\n x[i] = R*x[i]/(f)\n \n return x\n\n\n# GRID SEARCH ALGORITHM\n# Reference [1]: Table 5.5.3. The GRABIM Grid Search Algorithm Without Details.\n# INPUTS:\n# ZS: Source impedance (without normalization)\n# ZL: Load impedance (withour normalization)\n# freq: Range of frequencies where the optimization is desired\n# m: Number of frequency points where optimization must be done\n# code: Candidate topology\n#\n# OUTPUT:\n# v_scaled: Values of the network components (scaled)\ndef GridSearch(ZS, ZL, freq, m, code, verbose=0, delta_X=10):\n ####### Impedance and frequency normalization #######\n # Normalize impedance to 1 Ohm\n max_ZS = np.max(np.abs(ZS))\n max_ZL = np.max(np.abs(ZL))\n max_Z = np.max([max_ZS, max_ZL])\n\n ZS_norm = ZS/max_Z;\n ZL_norm = ZL/max_Z;\n\n # Normalize frequency to 1 rad/s\n max_f = freq[-1]\n f_norm = freq/(max_f*2*np.pi);\n #####################################################\n\n # Log file\n if (verbose):\n f = open(\"GridSearch.log\", \"w\")\n \n dim = len(code);\n\n # Grid building\n base_point = np.ones(dim); # Base point\n C = generating_matrix(dim);\n X = delta_X*base_point*C; # Space for data search\n\n rho_max = 1; # Maximum reflection coefficient\n x_best = base_point; # Best point\n\n n_searches = X.shape[0]; # Number of combinations\n\n while delta_X >= 0.025:\n found_better_x = 0;\n for k in range(0, n_searches):\n sk = delta_X*C[k];\n xk = base_point + sk;\n\n vk = np.exp(xk); # Convert the grid vector into a search space vector\n\n # Calculate the reflection coefficient over the whole frequency span\n rho_k = LadderCircuitAnalysis.get_Input_Reflection_Coeff(ZS_norm, ZL_norm, code, vk, f_norm);\n max_rho_k = np.max(np.abs(rho_k));# Get the maximum\n\n if (verbose):\n print(\"Testing: x=(\", xk, '), v=(', vk, ') -> rho = ', max_rho_k, file = f )\n\n\n if (max_rho_k < rho_max): # A better combination was found\n rho_max = max_rho_k;\n x_best = xk;\n if (verbose):\n print(\"A better point was found x=(\", xk, \"), v=\", vk, ') -> rho =', rho_max, file = f)\n found_better_x = 1; # Then, recenter the grid and examine the search space again (same refinement factor)\n\n base_point = x_best;\n if (found_better_x == 0):\n # After examining the whole search space, shrink the search space around the best point by 1/4\n if (verbose):\n print(\"Shrinking grid (delta_x = deltax/4)\", file = f)\n delta_X *= 0.25;\n else:\n if (verbose):\n print(\"Centering grid around: x=(\", x_best, \"), v=(\", np.exp(x_best), ')', file = f)\n\n # Get reflection coefficient and VSWR of the best point \n RL = LadderCircuitAnalysis.get_ReturnLoss_from_ReflectionCoefficient(rho_max);\n VSWR = LadderCircuitAnalysis.get_VSWR_from_ReflectionCoefficient(rho_max);\n if (verbose):\n print(\"Best point found:\", x_best, file = f)\n print(\"Best rho:\", rho_max, \" RL = \", RL, \" VSWR = \", VSWR, file = f)\n\n\n # Transform the grid point into the search space point\n v_best = np.exp(x_best);\n # Scale the result according to the previous normalization\n v_scaled = scaleValues_wrt_R_f(v_best, code, max_Z, max_f*2*np.pi);\n \n rho_max = LadderCircuitAnalysis.get_Input_Reflection_Coeff(ZS, ZL, code, v_scaled, freq);\n rho_max = np.max(np.abs(rho_max))\n RL = LadderCircuitAnalysis.get_ReturnLoss_from_ReflectionCoefficient(rho_max);\n\n if (verbose):\n print(\"Result (scaled)\", v_scaled, file = f)\n f.close()\n return v_scaled, RL\n\n\ndef RemoveIrrelevantComponents(code, v_best, freq, ZS, ZL):\n \n max_ZS = np.max(np.abs(ZS))\n max_ZL = np.max(np.abs(ZL))\n max_Z = np.max([max_ZS, max_ZL])\n \n min_ZS = np.min(np.abs(ZS))\n min_ZL = np.min(np.abs(ZL))\n min_Z = np.min([min_ZS, min_ZL])\n \n k = 0\n index_to_remove = [];\n for comp in code:\n if (comp == 'LS'):\n w = 2*np.pi*freq[-1]; # Impedance at the highest frequency\n X = w*v_best[k];\n print('X(LS) = ', X*min_Z)\n if (X < 0.5*min_Z):\n # Remove component\n index_to_remove = np.append(index_to_remove, k)\n elif (comp == 'LP'):\n w = 2*np.pi*freq[0]; # Impedance at the lowest frequency\n X = w*v_best[k];\n print('X(LP) = ', X*max_Z)\n if (X > 5*max_Z):\n # Remove component\n index_to_remove = np.append(index_to_remove, k)\n elif (comp == 'CS'):\n w = 2*np.pi*freq[0]; # Impedance at the lowest frequency\n X = 1/(w*v_best[k]);\n print('X(CS) = ', X*min_Z)\n if (X < 0.33*max_Z):\n # Remove component\n index_to_remove = np.append(index_to_remove, k)\n elif (comp == 'CP'):\n w = 2*np.pi*freq[-1]; # Impedance at the highest frequency\n X = 1/(w*v_best[k]);\n print('X(CP) = ', X*max_Z)\n if (X > 5*max_Z):\n # Remove component\n index_to_remove = np.append(index_to_remove, k)\n k += 1\n \n # Remove irrelevant components\n print('To remove: ', index_to_remove)\n code = np.delete(code, index_to_remove)\n v_best = np.delete(v_best, index_to_remove)\n \n return [code, v_best]", "repo_name": "andresmmera/GRABIM", "sub_path": "GRABIM/GridSearch.py", "file_name": "GridSearch.py", "file_ext": "py", "file_size_in_byte": 6375, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "76", "api": [{"api_name": "itertools.product", "line_number": 11, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 12, "usage_type": "call"}, {"api_name": "numpy.max", "line_number": 49, "usage_type": "call"}, {"api_name": "numpy.abs", "line_number": 49, "usage_type": "call"}, {"api_name": "numpy.max", "line_number": 50, "usage_type": "call"}, {"api_name": "numpy.abs", "line_number": 50, "usage_type": "call"}, {"api_name": "numpy.max", "line_number": 51, "usage_type": "call"}, {"api_name": "numpy.pi", "line_number": 58, "usage_type": "attribute"}, {"api_name": "numpy.ones", "line_number": 68, "usage_type": "call"}, {"api_name": "numpy.exp", "line_number": 83, "usage_type": "call"}, {"api_name": "GRABIM.LadderCircuitAnalysis.get_Input_Reflection_Coeff", "line_number": 86, "usage_type": "call"}, {"api_name": "GRABIM.LadderCircuitAnalysis", "line_number": 86, "usage_type": "name"}, {"api_name": "numpy.max", "line_number": 87, "usage_type": "call"}, {"api_name": "numpy.abs", "line_number": 87, "usage_type": "call"}, {"api_name": "numpy.exp", "line_number": 108, "usage_type": "call"}, {"api_name": "GRABIM.LadderCircuitAnalysis.get_ReturnLoss_from_ReflectionCoefficient", "line_number": 111, "usage_type": "call"}, {"api_name": "GRABIM.LadderCircuitAnalysis", "line_number": 111, "usage_type": "name"}, {"api_name": "GRABIM.LadderCircuitAnalysis.get_VSWR_from_ReflectionCoefficient", "line_number": 112, "usage_type": "call"}, {"api_name": "GRABIM.LadderCircuitAnalysis", "line_number": 112, "usage_type": "name"}, {"api_name": "numpy.exp", "line_number": 119, "usage_type": "call"}, {"api_name": "numpy.pi", "line_number": 121, "usage_type": "attribute"}, {"api_name": "GRABIM.LadderCircuitAnalysis.get_Input_Reflection_Coeff", "line_number": 123, "usage_type": "call"}, {"api_name": "GRABIM.LadderCircuitAnalysis", "line_number": 123, "usage_type": "name"}, {"api_name": "numpy.max", "line_number": 124, "usage_type": "call"}, {"api_name": "numpy.abs", "line_number": 124, "usage_type": "call"}, {"api_name": "GRABIM.LadderCircuitAnalysis.get_ReturnLoss_from_ReflectionCoefficient", "line_number": 125, "usage_type": "call"}, {"api_name": "GRABIM.LadderCircuitAnalysis", "line_number": 125, "usage_type": "name"}, {"api_name": "numpy.max", "line_number": 135, "usage_type": "call"}, {"api_name": "numpy.abs", "line_number": 135, "usage_type": "call"}, {"api_name": "numpy.max", "line_number": 136, "usage_type": "call"}, {"api_name": "numpy.abs", "line_number": 136, "usage_type": "call"}, {"api_name": "numpy.max", "line_number": 137, "usage_type": "call"}, {"api_name": "numpy.min", "line_number": 139, "usage_type": "call"}, {"api_name": "numpy.abs", "line_number": 139, "usage_type": "call"}, {"api_name": "numpy.min", "line_number": 140, "usage_type": "call"}, {"api_name": "numpy.abs", "line_number": 140, "usage_type": "call"}, {"api_name": "numpy.min", "line_number": 141, "usage_type": "call"}, {"api_name": "numpy.pi", "line_number": 147, "usage_type": "attribute"}, {"api_name": "numpy.append", "line_number": 152, "usage_type": "call"}, {"api_name": "numpy.pi", "line_number": 154, "usage_type": "attribute"}, {"api_name": "numpy.append", "line_number": 159, "usage_type": "call"}, {"api_name": "numpy.pi", "line_number": 161, "usage_type": "attribute"}, {"api_name": "numpy.append", "line_number": 166, "usage_type": "call"}, {"api_name": "numpy.pi", "line_number": 168, "usage_type": "attribute"}, {"api_name": "numpy.append", "line_number": 173, "usage_type": "call"}, {"api_name": "numpy.delete", "line_number": 178, "usage_type": "call"}, {"api_name": "numpy.delete", "line_number": 179, "usage_type": "call"}]}
+{"seq_id": "35967018184", "text": "from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport setuptools\n\nREQUIRED_PACKAGES = [\"absl-py\", \"numpy\", \"scipy\", \"jax\", \"jaxlib\", \"tensorflow\",\n \"flax\"]\n\nwith open(\"README.md\", \"r\") as fh:\n long_description = fh.read()\n\nsetuptools.setup(\n name=\"jax-influence\",\n version=\"0.1\",\n description=\"Jax Influence.\",\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n url=\"https://github.com/google-research/jax-influence\",\n author=\"Google Inc.\",\n packages=setuptools.find_packages(),\n license=\"Apache 2.0\",\n install_requires=REQUIRED_PACKAGES,\n)\n", "repo_name": "google-research/jax-influence", "sub_path": "pip_package/setup.py", "file_name": "setup.py", "file_ext": "py", "file_size_in_byte": 689, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 48, "dataset": "github-code", "pt": "76", "api": [{"api_name": "setuptools.setup", "line_number": 13, "usage_type": "call"}, {"api_name": "setuptools.find_packages", "line_number": 21, "usage_type": "call"}]}
+{"seq_id": "29689096246", "text": "from django.urls import path\r\nfrom . import views\r\n\r\n\r\nurlpatterns = [\r\n\r\n path('', views.myhtml, name='myhtml'),\r\n\r\n path('', views.welcome, name='welcome'),\r\n path('', views.home, name='home'),\r\n path('', views.topcats, name='topcats'),\r\n path('', views.inputshow, name='inputshow'),\r\n\r\n path('', views.login_user, name='login'),\r\n path('', views.logout_user, name='logout'),\r\n path('', views.register_user, name='register'),\r\n\r\n path('/', views.detail, name='detail'),\r\n path('/results/', views.results, name='results'),\r\n path('/vote/', views.vote, name='vote'),\r\n]", "repo_name": "Kaewkamphon62/Cats10TH", "sub_path": "myweb/urls.py", "file_name": "urls.py", "file_ext": "py", "file_size_in_byte": 653, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "76", "api": [{"api_name": "django.urls.path", "line_number": 7, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 9, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 10, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 11, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 12, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 14, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 15, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 16, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 18, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 19, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 20, "usage_type": "call"}]}
+{"seq_id": "18136794272", "text": "from game.players import BasePokerPlayer\nfrom agents.model import DQN, ConvNet, DDPG\nfrom .utils import *\nimport numpy as np\nimport torch\nimport random as rand\nimport copy\n\nround_map = {\"preflop\": 0, \"flop\": 1, \"turn\": 2, \"river\": 3}\nsuits = list(Card.SUIT_MAP.keys())\nranks = list(Card.RANK_MAP.keys())\n\ndef gen_cards_im(cards):\n a = torch.zeros(4, 13)\n for i, card in enumerate(cards):\n s = suits.index(card.suit)\n r = ranks.index(card.rank)\n a[s][r] = 1\n return torch.nn.functional.pad(a, (2, 2, 6, 7))\n\nclass DDPGPlayer(BasePokerPlayer):\n def __init__(self, do_train=True, model_path=\"./DDPG\", batch_size=128, capacity=5000, device=\"cuda:4\"):\n self.do_train = do_train\n self.model_path = model_path\n self.cache = []\n if self.do_train:\n self.model = DDPG(self.model_path, batch_size, capacity, c=device)\n else:\n self.model = torch.load(self.model_path, map_location=torch.device('cpu'))\n self.model.actor = self.model.actor.to(device)\n self.model.act_target = self.model.act_target.to(device)\n self.model.critic = self.model.critic.to(device)\n self.model.cri_target = self.model.cri_target.to(device)\n self.model.c = device\n \n self.watch = ConvNet().to(\"cpu\")\n self.watch.load_state_dict(torch.load(\"./embedding/model-999.pt\"))\n self.step = 0\n self.update_step = 5000\n self.last_image = None\n self.last_features = None\n self.last_action = None\n \n self.CHE = 0\n \n #self.model = torch.load(model_path, map_location=lambda storage, loc: storage)\n\n def declare_action(self, valid_actions, hole_card, round_state):\n #print(self.step)\n community_card = round_state[\"community_card\"]\n hole_card = gen_cards(hole_card)\n community_card = gen_cards(community_card)\n hc = gen_cards_im(hole_card)\n cc = gen_cards_im(community_card)\n un = hc + cc\n img = torch.stack([hc, cc, un])\n with torch.no_grad():\n wr = self.watch(img)\n #wr = estimate_hole_card_win_rate(nb_simulation=5000, nb_player=2, hole_card=hole_card, community_card=community_card)\n #print(wr)\n features = [round_state['pot']['main']['amount'], round_state['small_blind_pos'], round_state['big_blind_pos'], round_state['dealer_btn'], round_state['next_player'], round_state['round_count'], wr]\n features.extend([s['stack'] for s in round_state['seats'] if s['uuid'] == self.uuid])\n features.extend([s['stack'] for s in round_state['seats'] if s['uuid'] != self.uuid])\n features.extend([0 if i != round_map[round_state['street']] else 1 for i in range(4)])\n features.append([s['stack'] for s in round_state['seats'] if s['uuid'] == self.uuid][0] - self.street_stack)\n features.append([s['stack'] for s in round_state['seats'] if s['uuid'] == self.uuid][0] - self.round_stack)\n features.append(round_state['pot']['main']['amount'] - [s['stack'] for s in round_state['seats'] if s['uuid'] == self.uuid][0] + self.round_stack)\n features = torch.Tensor(features)\n \n \n money = float(self.model.choose_action(img, features, valid_actions))\n \n if valid_actions[2][\"amount\"][\"max\"] == -1:\n if money < -10:\n action = valid_actions[0][\"action\"]\n amount = valid_actions[0][\"amount\"]\n else:\n action, amount = valid_actions[1]['action'], valid_actions[1]['amount']\n \n elif money < -10:\n action = valid_actions[0][\"action\"]\n amount = valid_actions[0][\"amount\"]\n elif money <= valid_actions[1][\"amount\"]:\n action = valid_actions[1][\"action\"]\n amount = valid_actions[1][\"amount\"]\n else:\n action = valid_actions[2][\"action\"]\n amount = min(max(money, valid_actions[2][\"amount\"][\"min\"]), valid_actions[2][\"amount\"][\"max\"])\n \n if self.do_train:\n if self.last_image != None:\n self.cache.append([self.last_image, self.last_features, self.last_action, img, features])\n self.last_image = copy.deepcopy(img)\n self.last_features = copy.deepcopy(features)\n self.last_action = copy.deepcopy(amount) if money > 0 else copy.deepcopy(money)\n #print(action, amount, money)\n \n if self.step > self.update_step:\n if self.CHE == 0:\n print(\"-----START_LEARNING-------\")\n self.CHE = 1\n self.model.learn()\n \n self.step += 1\n \n return action, amount\n\n def receive_game_start_message(self, game_info):\n self.start_stack = game_info[\"rule\"][\"initial_stack\"]\n\n def receive_round_start_message(self, round_count, hole_card, seats):\n self.round_stack = [s['stack'] for s in seats if s['uuid'] == self.uuid][0]\n\n def receive_street_start_message(self, street, round_state):\n self.street_stack = [s['stack'] for s in round_state['seats'] if s['uuid'] == self.uuid][0]\n\n def receive_game_update_message(self, new_action, round_state):\n pass\n\n def receive_round_result_message(self, winners, hand_info, round_state):\n# print(\"WWW\")\n if self.do_train:\n if self.last_image != None:\n reward = [s['stack'] for s in round_state['seats'] if s['uuid'] == self.uuid][0] - self.round_stack\n# print(reward)\n# print(winners)\n# print(hand_info)\n reward = (2 * (reward >= 0) - 1) * np.log(1 + abs(reward))\n #print(reward)\n for C in self.cache:\n self.model.store_memory(C[0], C[1], C[2], reward, C[3], C[4])\n self.model.store_memory(self.last_image, self.last_features, self.last_action, reward, self.last_image, self.last_features)\n self.cache = []\n self.last_image = None\n self.last_features = None\n self.last_action = None\n\n\ndef setup_ai():\n return DDPGPlayer()\n\n", "repo_name": "yahcreepers/FAI-Final_Project", "sub_path": "agents/DDPG.py", "file_name": "DDPG.py", "file_ext": "py", "file_size_in_byte": 6191, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "76", "api": [{"api_name": "torch.zeros", "line_number": 14, "usage_type": "call"}, {"api_name": "torch.nn.functional.pad", "line_number": 19, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 19, "usage_type": "attribute"}, {"api_name": "game.players.BasePokerPlayer", "line_number": 21, "usage_type": "name"}, {"api_name": "agents.model.DDPG", "line_number": 27, "usage_type": "call"}, {"api_name": "torch.load", "line_number": 29, "usage_type": "call"}, {"api_name": "torch.device", "line_number": 29, "usage_type": "call"}, {"api_name": "agents.model.ConvNet", "line_number": 36, "usage_type": "call"}, {"api_name": "torch.load", "line_number": 37, "usage_type": "call"}, {"api_name": "torch.stack", "line_number": 56, "usage_type": "call"}, {"api_name": "torch.no_grad", "line_number": 57, "usage_type": "call"}, {"api_name": "torch.Tensor", "line_number": 68, "usage_type": "call"}, {"api_name": "copy.deepcopy", "line_number": 93, "usage_type": "call"}, {"api_name": "copy.deepcopy", "line_number": 94, "usage_type": "call"}, {"api_name": "copy.deepcopy", "line_number": 95, "usage_type": "call"}, {"api_name": "numpy.log", "line_number": 128, "usage_type": "call"}]}
+{"seq_id": "29647105347", "text": "\"\"\"\nThe keys for the Flood Fill are:\n - Get valid neighbors: [up, down , left, right] -> within the matrix dimensions\n - Color of the source pixel\n - If source pixel color and color are the same: return image\n\n1. Iterate over each of the neighbors\n2. If the color matches the source pixel, update color\n2. Call recursively for the neighbors of the neighbors, pass updated image matrix\n\"\"\"\nfrom typing import List\n\nclass Solution:\n moves = [\n [-1, 0],\n [1, 0],\n [0, 1],\n [0, -1],\n ]\n\n def fill(self, image, sr, sc, color, def_color, m, n):\n for move in self.moves:\n r = sr+move[0]\n c = sc+move[1]\n if 0 <= r < m and 0 <= c < n:\n if image[r][c] == def_color:\n image[r][c] = color\n self.fill(image, r, c, color, def_color, m, n)\n\n return image\n\n\n def floodFill(self, image: List[List[int]], sr: int, sc: int, color: int) -> List[List[int]]:\n m = len(image)\n n = len(image[0])\n def_color = image[sr][sc]\n if color == def_color:\n return image\n image[sr][sc] = color\n result = self.fill(image, sr, sc, color, def_color, m, n)\n\n return result\n\nif __name__ == \"__main__\":\n sol = Solution()\n image1 = [\n [1,1,1],\n [1,1,0],\n [1,0,1],\n ]\n sr, sc = 1, 1\n color = 2\n result1 = sol.floodFill(image1, sr, sc, color)\n print(\"Result 1: \", result1)\n image2 = [\n [0,0,0],\n [0,0,0],\n ]\n sr, sc = 0, 0\n color = 0\n result2 = sol.floodFill(image2, sr, sc, color)\n print(\"Result 2: \", result2)\n", "repo_name": "salasberryfin/leetcode-challenges", "sub_path": "python/733-flood-fill/solution.py", "file_name": "solution.py", "file_ext": "py", "file_size_in_byte": 1689, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "76", "api": [{"api_name": "typing.List", "line_number": 33, "usage_type": "name"}]}
+{"seq_id": "35979163597", "text": "from plyer import notification\nimport requests\nfrom datetime import datetime\n# from bs4 import BeautifulSoup\nimport json\nfrom datetime import date\nimport time \n\ndef notifyme(title,message):\n notification.notify(\n title = title,\n message = message,\n app_icon = \"C:\\\\Users\\\\yashk\\\\Videos\\\\work\\\\CoronaVisrus Notification System\\\\icon.ico\",\n timeout = 20\n )\n\ndef getData(url):\n r = requests.get(url)\n return r.json()\n\nif __name__ == \"__main__\":\n while True:\n myJSONdata = getData(\"https://api.covid19india.org/states_daily.json\")\n datetime = datetime.now()\n today = date.today()\n today= str(today)\n print(type(today))\n for i in myJSONdata.get(\"states_daily\")[-3:]:\n # print(i.get('date'))\n # print(\"\\ntoday date is : \",today) \n # print('\\n')\n # print('\\n')\n # if i.get('date')==\"07-Aug-20\":\n date = i.get('date')\n if i.get('status')==\"Recovered\":\n Recovered = i.get('gj')\n if i.get('status')==\"Confirmed\":\n Confirmed = i.get('gj')\n if i.get('status')==\"Deceased\":\n Deceased = i.get('gj')\n final_string = f\"Confirmed : {Confirmed}\\nRecovered : {Recovered}\\nDeceased : {Deceased}\\nDate : {today}\"\n\n notifyme(\"State : Gujrat\",f\"Last Update : {date}\\n\"+final_string)\n time.sleep(3600)", "repo_name": "Yash-Patel01/Coronavirus-Cases-Notification-System", "sub_path": "main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 1430, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "76", "api": [{"api_name": "plyer.notification.notify", "line_number": 10, "usage_type": "call"}, {"api_name": "plyer.notification", "line_number": 10, "usage_type": "name"}, {"api_name": "requests.get", "line_number": 18, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 24, "usage_type": "name"}, {"api_name": "datetime.datetime.now", "line_number": 24, "usage_type": "call"}, {"api_name": "datetime.date.today", "line_number": 25, "usage_type": "call"}, {"api_name": "datetime.date", "line_number": 25, "usage_type": "name"}, {"api_name": "datetime.date", "line_number": 34, "usage_type": "name"}, {"api_name": "datetime.date", "line_number": 43, "usage_type": "name"}, {"api_name": "time.sleep", "line_number": 44, "usage_type": "call"}]}
+{"seq_id": "7851229645", "text": "from controller import Controller\r\nfrom controller import cols_to_col\r\nimport numpy as np\r\nimport pygame, sys, colorsys\r\n\r\npygame.init()\r\n\r\ncon = Controller()\r\n\r\ndisplay = pygame.display.set_mode((256, 256))\r\n\r\nbasecol = [0, 1, 1]\r\ncol = basecol\r\nupdate = True\r\n\r\nwhile True:\r\n for event in pygame.event.get():\r\n if event.type == pygame.QUIT:\r\n sys.exit()\r\n elif event.type == pygame.MOUSEMOTION:\r\n if event.buttons[0] == 1:\r\n pos = event.pos\r\n col[1] = (256-pos[0])/256\r\n col[2] = (256-pos[1])/256\r\n elif event.type == pygame.MOUSEWHEEL:\r\n col[0] += event.y/360\r\n if col[0] > 1: col[0] = 0\r\n elif col[0] < 0: col[0] = 1\r\n update = True\r\n \r\n n_rgb = colorsys.hsv_to_rgb(*col)\r\n rgb = [round(n_rgb[0]*255), round(n_rgb[1]*255), round(n_rgb[2]*255)]\r\n cin = cols_to_col(*rgb)\r\n print(col, rgb)\r\n con.send(cin, wait=(1/60)*1000, log=False)\r\n\r\n if update:\r\n update = False\r\n display.fill((0, 0, 0))\r\n for x in range(256):\r\n for y in range(256):\r\n n_rgb = colorsys.hsv_to_rgb(col[0], (256-x)/256, (256-y)/256)\r\n rgb = [round(n_rgb[0]*255), round(n_rgb[1]*255), round(n_rgb[2]*255)]\r\n display.set_at((x, y), rgb)\r\n pygame.display.flip()", "repo_name": "jazzyocean/rgbtq", "sub_path": "color.py", "file_name": "color.py", "file_ext": "py", "file_size_in_byte": 1370, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "76", "api": [{"api_name": "pygame.init", "line_number": 6, "usage_type": "call"}, {"api_name": "controller.Controller", "line_number": 8, "usage_type": "call"}, {"api_name": "pygame.display.set_mode", "line_number": 10, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 10, "usage_type": "attribute"}, {"api_name": "pygame.event.get", "line_number": 17, "usage_type": "call"}, {"api_name": "pygame.event", "line_number": 17, "usage_type": "attribute"}, {"api_name": "pygame.QUIT", "line_number": 18, "usage_type": "attribute"}, {"api_name": "sys.exit", "line_number": 19, "usage_type": "call"}, {"api_name": "pygame.MOUSEMOTION", "line_number": 20, "usage_type": "attribute"}, {"api_name": "pygame.MOUSEWHEEL", "line_number": 25, "usage_type": "attribute"}, {"api_name": "colorsys.hsv_to_rgb", "line_number": 31, "usage_type": "call"}, {"api_name": "controller.cols_to_col", "line_number": 33, "usage_type": "call"}, {"api_name": "colorsys.hsv_to_rgb", "line_number": 42, "usage_type": "call"}, {"api_name": "pygame.display.flip", "line_number": 45, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 45, "usage_type": "attribute"}]}
+{"seq_id": "10947809762", "text": "import cv2\nimport torch\nimport numpy as np\nimport torch.nn.functional as F\n\nfrom torch.utils.data import Dataset\nfrom data.preparation import get_df_series\nfrom params import PATIENT_TARGETS, IMG_TARGETS_EXTENDED\n\n\ndef to_one_hot_patient(y):\n \"\"\"\n Convert a patient target tensor to a one-hot encoded representation.\n Each column with index less than or equal to 1 (bowel, extrav) are unchanged.\n Columns with index greater than 1 are one-hot encoded based on their original class values.\n\n Args:\n y (torch.Tensor): The input multi-class tensor of shape (N, C), where N is the number\n of samples and C is the number of classes.\n\n Returns:\n torch.Tensor: A one-hot encoded tensor of shape (N, K), where K is the sum of the number\n of classes in each column of the input tensor.\n \"\"\"\n new_y = []\n for i in range(y.size(1)):\n if i <= 1:\n new_y.append(y[:, i].unsqueeze(-1))\n else:\n y_ = (\n torch.zeros(y.size(0), 3)\n .to(y.device)\n .scatter(1, y[:, i].view(-1, 1).long(), 1)\n )\n new_y.append(y_)\n return torch.cat(new_y, -1)\n\n\ndef get_frames(frame, n_frames, frames_c, stride=1, max_frame=100):\n \"\"\"\n Calculate a sequence of frame indices based on the specified parameters.\n If stride is -1, sample n_frames from 0 to max_frame using linear spacing.\n\n Args:\n frame (int): The central frame index around which the sequence is generated.\n n_frames (int): The number of frames in the sequence.\n frames_c (int): The number of frames to be repeated and offset around each frame.\n stride (int, optional): The step size between frames. Defaults to 1.\n max_frame (int, optional): The maximum frame index allowed. Defaults to 100.\n\n Returns:\n numpy.ndarray: An array of frame indices representing the calculated sequence.\n \"\"\"\n if stride == -1:\n frames = np.linspace(0, max_frame, n_frames + 4, endpoint=True, dtype=int)[\n 2:-2\n ]\n\n else:\n frames = np.arange(n_frames) * stride\n frames = frames - frames[n_frames // 2] + frame\n\n if frames_c:\n offset = np.tile(np.arange(-1, 2) * frames_c, len(frames))\n frames = np.repeat(frames, 3) + offset\n\n if frames.min() < 0:\n frames -= frames.min()\n elif frames.max() > max_frame:\n frames += max_frame - frames.max()\n\n frames = np.clip(frames, 0, max_frame)\n return frames\n\n\nclass AbdominalDataset(Dataset):\n \"\"\"\n Dataset for training 2D classification models.\n\n Attributes:\n df_img (pandas DataFrame): Metadata containing image information.\n df_patient (pandas DataFrame): Metadata containing patient information.\n transforms (albu transforms): Transforms to apply to the images.\n frames_chanel (int): The number of frames to consider for channel stacking.\n n_frames (int): The number of frames to use.\n stride (int): The step size between frames.\n train (bool): Flag indicating whether the dataset is for training.\n classes (list): List of target classes.\n targets (numpy.ndarray): Array of patient targets.\n max_frames (dict): Dictionary of maximum frames per series.\n \"\"\"\n def __init__(\n self,\n df_patient,\n df_img,\n transforms=None,\n frames_chanel=0,\n n_frames=0,\n stride=1,\n train=False,\n ):\n \"\"\"\n Constructor.\n\n Args:\n df_patient (pandas DataFrame): Metadata containing patient information.\n df_img (pandas DataFrame): Metadata containing image information.\n transforms (albu transforms, optional): Transforms to apply to images and masks. Defaults to None.\n frames_chanel (int, optional): Number of frames to consider for channel stacking. Defaults to 0.\n n_frames (int, optional): The number of frames to use. Defaults to 0.\n stride (int, optional): The step size between frames. Defaults to 1.\n train (bool, optional): Flag indicating whether the dataset is for training. Defaults to False.\n \"\"\"\n self.df_img = df_img\n self.df_patient = df_patient\n self.transforms = transforms\n self.frames_chanel = frames_chanel\n self.n_frames = n_frames\n self.stride = stride\n self.train = train\n\n self.classes = IMG_TARGETS_EXTENDED\n\n self.targets = df_patient[PATIENT_TARGETS].values\n self.max_frames = dict(\n df_img[[\"series\", \"frame\"]].groupby(\"series\").max()[\"frame\"]\n )\n\n def __len__(self):\n \"\"\"\n Get the length of the dataset.\n\n Returns:\n int: Length of the dataset.\n \"\"\"\n return len(self.df_patient) * len(self.classes)\n\n def __getitem__(self, idx):\n \"\"\"\n Item accessor.\n Frames are sampled the following way:\n - kidney / liver / spleen / negative bowel : Inside the organ.\n - positive bowel / positive extravasation : Using the frame-level labels.\n - Negative extravasation : Anywhere\n\n Args:\n idx (int): Index.\n\n Returns:\n torch.Tensor: Image as a tensor of shape [(N) x C, H, W].\n torch.Tensor: Label as a tensor of shape [9].\n torch.Tensor: Aux label as a tensor of shape [9]. Not used.\n \"\"\"\n tgt_idx = idx % len(self.classes)\n tgt = self.classes[tgt_idx]\n\n idx = idx // len(self.classes)\n patient = self.df_patient[\"patient_id\"].values[idx]\n y_patient = self.targets[idx]\n\n df_img = self.df_img[self.df_img[\"patient_id\"] == patient]\n\n # Restrict to considered class\n if (df_img[self.classes[tgt_idx]] == y_patient[tgt_idx]).max():\n df_img = df_img[df_img[self.classes[tgt_idx]] == y_patient[tgt_idx]]\n else: # Class has no match, use argmax - should not happen\n raise NotImplementedError\n\n # Restrict to segmentation > 0.9 for negatives\n if not y_patient[tgt_idx]:\n seg = df_img[f'pred_{tgt.split(\"_\")[0]}'].values\n seg = seg / (seg.max() + 1e-6)\n df_img = df_img[seg > 0.9]\n\n # Restrict to one series\n series = (\n np.random.choice(df_img[\"series\"].unique())\n if self.train\n else df_img[\"series\"].values[0]\n )\n df_img = df_img[df_img[\"series\"] == series]\n\n # Sort by frame\n df_img = df_img.sort_values(\"frame\").reset_index(drop=True)\n\n # Pick a row\n if self.train:\n ps = np.exp(\n -(\n (\n (np.arange(len(df_img)) - len(df_img) // 2)\n / (0.4 * len(df_img))\n )\n ** 2\n )\n ) # gaussian\n row_idx = np.random.choice(len(df_img), p=ps / ps.sum())\n row = df_img.iloc[row_idx]\n else:\n row = df_img.iloc[len(df_img) // 2] # center\n\n if self.frames_chanel > 0 or self.n_frames > 1:\n frame = row.frame\n\n if self.n_frames <= 1:\n frame = np.clip(\n frame,\n self.frames_chanel,\n self.max_frames[series] - self.frames_chanel,\n )\n frames = [frame - self.frames_chanel, frame, frame + self.frames_chanel]\n else:\n frames = get_frames(\n frame,\n self.n_frames,\n self.frames_chanel,\n stride=self.stride,\n max_frame=self.max_frames[series],\n )\n\n prefix = row.path.rsplit(\"_\", 1)[0]\n paths = [prefix + f\"_{f:04d}.png\" for f in frames]\n image = np.array([cv2.imread(path, 0) for path in paths]).transpose(1, 2, 0)\n\n else:\n frame = row.frame\n image = cv2.imread(row.path)\n\n image = image.astype(np.float32) / 255.0\n\n if self.transforms:\n transformed = self.transforms(image=image)\n image = transformed[\"image\"]\n\n y_patient = torch.tensor(y_patient, dtype=torch.float)\n y_img = torch.tensor(row[self.classes], dtype=torch.float)\n\n if y_img.size(-1) == 5: # Patient level - TODO : y_patient ?\n y_img = to_one_hot_patient(y_img.unsqueeze(0))[0]\n\n if self.n_frames > 1:\n if self.frames_chanel:\n image = image.view(\n self.n_frames, 3, image.size(1), image.size(2)\n )\n else:\n image = (\n image.view(1, self.n_frames, image.size(1), image.size(2))\n .repeat(3, 1, 1, 1)\n .transpose(0, 1)\n )\n else:\n if not self.frames_chanel:\n image = image.repeat(3, 1, 1)\n\n return image, y_img, y_patient\n\n\nclass AbdominalCropDataset(Dataset):\n \"\"\"\n Dataset for training 2.5D crop classification models.\n\n Attributes:\n df_img (pandas DataFrame): Metadata containing image information.\n df_patient (pandas DataFrame): Metadata containing patient information.\n df_series (pandas DataFrame): Metadata containing information about image series.\n transforms (albu transforms): Transforms to apply to the images.\n frames_chanel (int): The number of frames to consider for channel stacking.\n n_frames (int): The number of frames to use.\n stride (int): The step size between frames.\n train (bool): Flag indicating whether the dataset is for training.\n sigmas (dict): Dictionary containing Gaussian sigmas for various organs.\n \"\"\"\n def __init__(\n self,\n df_patient,\n df_img,\n transforms=None,\n frames_chanel=0,\n n_frames=0,\n stride=1,\n train=False,\n use_soft_target=False,\n df_series=None,\n ):\n \"\"\"\n Constructor for the AbdominalCropDataset class.\n\n Args:\n df_patient (pandas DataFrame): Metadata containing patient information.\n df_img (pandas DataFrame): Metadata containing image information.\n transforms (albu transforms, optional): Transforms to apply to images and masks. Defaults to None.\n frames_chanel (int, optional): Number of frames to consider for channel stacking. Defaults to 0.\n n_frames (int, optional): The number of frames to use. Defaults to 0.\n stride (int, optional): The step size between frames. Defaults to 1.\n train (bool, optional): Flag indicating whether the dataset is for training. Defaults to False.\n use_soft_target (bool, optional): Flag indicating the use of soft targets. Defaults to False.\n df_series (pandas DataFrame, optional): Metadata containing info about series. Defaults to None.\n \"\"\"\n self.df_img = df_img\n self.df_patient = df_patient\n self.df_series = (\n get_df_series(df_patient, df_img) if df_series is None else df_series\n )\n self.targets = self.df_series[\"target\"].values\n\n self.transforms = transforms\n self.frames_chanel = frames_chanel\n self.n_frames = n_frames\n self.stride = stride\n\n self.train = train\n\n self.sigmas = {\"kidney\": 0.15, \"spleen\": 0.2, \"liver\": 0.3}\n\n def __len__(self):\n \"\"\"\n Get the length of the dataset.\n\n Returns:\n int: Length of the dataset.\n \"\"\"\n return len(self.df_series)\n\n def __getitem__(self, idx):\n \"\"\"\n Item accessor. Samples a random frame inside the organ.\n\n Args:\n idx (int): Index.\n\n Returns:\n torch.Tensor: Image as a tensor of shape [(N,) C, H, W].\n torch.Tensor: Label as a tensor of shape [3].\n int: Dummy value.\n \"\"\"\n img = np.load(self.df_series[\"img_path\"].values[idx])\n\n organ = self.df_series[\"organ\"].values[idx]\n if organ == \"kidney\":\n d = int(img.shape[1] * 3 / 4)\n img = np.concatenate([img[:, :, :d], img[:, :, -d:]], -1)\n\n # Pick frame(s)\n if self.train:\n ps = np.exp(\n -(\n (\n (np.arange(len(img)) - len(img) // 2)\n / (self.sigmas[organ] * len(img))\n )\n ** 2\n )\n ) # gaussian\n m = 5 + self.stride * (self.n_frames - 1) + self.frames_chanel\n ps[:m] = 0 # Stay in bounds\n ps[-m:] = 0 # Stay in bounds\n if ps.max():\n frame = np.random.choice(len(img), p=ps / ps.sum())\n else:\n frame = len(img) // 2 + np.random.choice([-2, -1, 0, 1, 2])\n else:\n frame = len(img) // 2 # center\n\n frames = get_frames(\n frame,\n self.n_frames,\n self.frames_chanel,\n stride=self.stride,\n max_frame=len(img) - 1,\n )\n\n # Load\n image = img[np.array(frames)].transpose(1, 2, 0)\n image = image.astype(np.float32) / 255.0\n\n # Augment\n if self.transforms:\n transformed = self.transforms(image=image)\n image = transformed[\"image\"]\n\n y_img = torch.zeros(3, dtype=torch.float)\n y_img[self.targets[idx]] = 1\n\n # Reshape\n if self.n_frames > 1:\n if self.frames_chanel:\n image = image.view(\n self.n_frames, 3, image.size(1), image.size(2)\n ) # .transpose(0, 1)\n else:\n image = (\n image.view(1, self.n_frames, image.size(1), image.size(2))\n .repeat(2, 1, 1, 1)\n .transpose(0, 1)\n )\n\n return image, y_img, 0\n\n\nclass AbdominalInfDataset(Dataset):\n \"\"\"\n Dataset for infering 2D classification models.\n It is optimized to compute the CNN forward only once when models are 2.5D :\n Trick is to extract CNN features for all images,\n and then compute the sequential head by retrieving the indexed features.\n\n Attributes:\n df (pandas DataFrame): Metadata containing image information.\n transforms (albu transforms): Transforms to apply to the images.\n frames_chanel (int): The number of frames to consider for channel stacking.\n n_frames (int): The number of frames to use.\n stride (int): The step size between frames.\n imgs (dict): Dictionary for storing loaded images.\n features (list): List of precompted features.\n single_frame (bool): Flag indicating if only a single frame is used for each item.\n \"\"\"\n def __init__(\n self,\n df,\n transforms=None,\n frames_chanel=0,\n n_frames=1,\n stride=1,\n imgs={},\n features=[],\n single_frame=False,\n ):\n \"\"\"\n Constructor.\n The single frame flag is used for features precomputation.\n\n Args:\n df (pandas DataFrame): Metadata containing image information.\n transforms (albu transforms, optional): Transforms to apply to images and masks. Defaults to None.\n frames_chanel (int, optional): Number of frames to consider for channel stacking. Defaults to 0.\n n_frames (int, optional): The number of frames to use. Defaults to 1.\n stride (int, optional): The step size between frames. Defaults to 1.\n imgs (dict, optional): Dictionary for storing loaded images. Defaults to an empty dictionary.\n features (list, optional): List of precomputed features. Defaults to an empty list.\n single_frame (bool, optional): Whether a single frame is used for each item. Defaults to False.\n \"\"\"\n self.df = df\n self.info = self.df[[\"path\", \"patient_id\", \"series\", \"frame\"]].values\n self.transforms = transforms\n\n self.frames_chanel = frames_chanel\n self.n_frames = n_frames\n self.stride = stride\n self.single_frame = single_frame\n\n self.max_frames = dict(df[[\"series\", \"frame\"]].groupby(\"series\").max()[\"frame\"])\n\n self.imgs = imgs\n self.features = features\n\n if len(features):\n self.features = dict(zip(self.get_keys(), features))\n\n def __len__(self):\n \"\"\"\n Get the length of the dataset.\n\n Returns:\n int: Length of the dataset.\n \"\"\"\n return len(self.df)\n\n def get_keys(self):\n \"\"\"\n Get keys for indexing features.\n\n Returns:\n list: List of keys.\n \"\"\"\n keys = []\n for idx in range(len(self.df)):\n path, patient, series, frame = self.info[idx]\n frames = get_frames(\n frame,\n 1,\n self.frames_chanel,\n stride=1,\n max_frame=self.max_frames[series],\n )\n key = f'{patient}_{series}_{\"-\".join(list(frames.astype(str)))}'\n keys.append(key)\n return keys\n\n def _getitem_feature(self, idx):\n \"\"\"\n Item accessor for features.\n\n Args:\n idx (int): Index.\n\n Returns:\n np.ndarray: Features.\n int: Dummy value.\n int: Dummy value.\n \"\"\"\n path, patient, series, frame = self.info[idx]\n\n all_frames = get_frames(\n frame,\n self.n_frames,\n self.frames_chanel,\n stride=self.stride,\n max_frame=self.max_frames[series],\n )\n all_frames = all_frames.reshape(-1, 3)\n\n fts = []\n for frames in all_frames:\n key = f'{patient}_{series}_{\"-\".join(list(frames.astype(str)))}'\n fts.append(self.features[key])\n fts = np.array(fts)\n return fts, 0, 0\n\n def __getitem__(self, idx):\n \"\"\"\n Item accessor.\n Refer to _getitem_feature if features are precomputed.\n\n Args:\n idx (int): Index.\n\n Returns:\n torch.Tensor: Image as a tensor.\n int: Dummy value.\n int: Dummy value.\n \"\"\"\n if len(self.features):\n return self._getitem_feature(idx)\n\n path, patient, series, frame = self.info[idx]\n\n if self.single_frame:\n frames = get_frames(\n frame,\n 1,\n self.frames_chanel,\n stride=1,\n max_frame=self.max_frames[series],\n )\n else:\n frames = get_frames(\n frame,\n self.n_frames,\n self.frames_chanel,\n stride=self.stride,\n max_frame=self.max_frames[series],\n )\n\n paths = [path.rsplit(\"_\", 1)[0] + f\"_{f:04d}.png\" for f in frames]\n\n image = []\n for path, frame in zip(paths, frames):\n try:\n img = self.imgs[path]\n except Exception:\n img = cv2.imread(path, 0)\n if not (idx + 1 % 10000): # clear buffer\n self.imgs = {}\n self.imgs[path] = img\n\n image.append(img)\n\n image = np.array(image).transpose(1, 2, 0)\n image = image.astype(np.float32) / 255.0\n\n if self.transforms:\n transformed = self.transforms(image=image)\n image = transformed[\"image\"]\n\n if not self.single_frame:\n if self.n_frames > 1:\n if self.frames_chanel:\n image = image.view(\n self.n_frames, 3, image.size(1), image.size(2)\n )\n else:\n image = (\n image.view(1, self.n_frames, image.size(1), image.size(2))\n .repeat(3, 1, 1, 1)\n .transpose(0, 1)\n )\n # else:\n if image.size(0) == 1:\n image = image.repeat(3, 1, 1)\n\n return image, 0, 0\n\n\nclass SegDataset(Dataset):\n \"\"\"\n Dataset for training segmentation models.\n Masks are not used in the pipeline here, we only use the classification part.\n\n Attributes:\n df (pandas DataFrame): Metadata containing image and mask information.\n for_classification (bool): Flag indicating whether the dataset is used for classification.\n use_soft_target (bool): Flag indicating whether soft targets are used.\n transforms (albu transforms): Transforms to apply to images and masks.\n\n \"\"\"\n def __init__(\n self,\n df,\n for_classification=True,\n use_soft_target=False,\n transforms=None,\n ):\n \"\"\"\n Constructor for the SegDataset class.\n\n Args:\n df (pandas DataFrame): Metadata containing image and mask information.\n for_classification (bool, optional): Whether the dataset is used for classif. Defaults to True.\n use_soft_target (bool, optional): Whether soft targets are used. Defaults to False.\n transforms (albu transforms, optional): Transforms to apply to images and masks. Defaults to None.\n \"\"\"\n self.df = df\n self.transforms = transforms\n self.for_classification = for_classification\n\n self.img_paths = df[\"img_path\"].values\n self.mask_paths = df[\"mask_path\"].values\n\n targets = [\n \"pixel_count_liver\",\n \"pixel_count_spleen\",\n \"pixel_count_left-kidney\",\n \"pixel_count_right-kidney\",\n \"pixel_count_bowel\",\n ]\n if use_soft_target:\n self.img_targets = df[[c + \"_norm\" for c in targets]].values\n else:\n self.img_targets = df[targets].values > 100\n\n def __len__(self):\n \"\"\"\n Get the length of the dataset.\n\n Returns:\n int: Length of the dataset.\n \"\"\"\n return len(self.img_paths)\n\n def __getitem__(self, idx):\n \"\"\"\n Item accessor.\n\n Args:\n idx (int): Index.\n\n Returns:\n torch.Tensor: Image as a tensor.\n torch.Tensor: Mask as a tensor (if not for classification).\n torch.Tensor: Label as a tensor.\n \"\"\"\n image = cv2.imread(self.img_paths[idx]).astype(np.float32) / 255.0\n\n y = torch.tensor(self.img_targets[idx], dtype=torch.float)\n\n if not self.for_classification:\n mask = cv2.imread(self.mask_paths[idx], 0)\n\n mask = np.where(mask == 4, 3, mask)\n mask = np.where(mask == 5, 4, mask)\n\n transformed = self.transforms(image=image, mask=mask)\n image = transformed[\"image\"]\n mask = transformed[\"mask\"]\n mask = mask.unsqueeze(0).float()\n\n return image, mask, y\n\n image = self.transforms(image=image)[\"image\"]\n return image, y, 0\n\n\nclass Seg3dDataset(Dataset):\n \"\"\"\n Dataset for training 3D segmentation models.\n\n Attributes:\n df (pandas DataFrame): Metadata containing image and mask information.\n train (bool): Flag indicating whether the dataset is used for training.\n test (bool): Flag indicating whether the dataset is used for testing.\n \"\"\"\n def __init__(\n self,\n df,\n train=False,\n test=False,\n ):\n \"\"\"\n Constructor.\n\n Args:\n df (pandas DataFrame): Metadata containing image and mask information.\n train (bool, optional): Whether the dataset is used for training. Defaults to False.\n test (bool, optional): Whether the dataset is used for testing. Defaults to False.\n \"\"\"\n self.df = df\n self.train = train\n self.test = test\n\n self.img_paths = df[\"img_path\"].values\n self.mask_paths = df[\"mask_path\"].values\n\n if train:\n import monai.transforms as transforms\n\n # https://docs.monai.io/en/0.3.0/transforms.html\n self.transforms = transforms.Compose(\n [\n transforms.RandAffined(\n translate_range=[256 * 0.1] * 3,\n padding_mode=\"zeros\",\n keys=[\"image\", \"mask\"],\n prob=0.5,\n ),\n transforms.RandRotated(\n range_x=(-0.3, 0.3),\n range_y=(-0.3, 0.3),\n range_z=(-0.3, 0.3),\n mode=\"nearest\",\n keys=[\"image\", \"mask\"],\n prob=0.5,\n ),\n transforms.RandZoomd(\n min_zoom=0.666,\n max_zoom=1.5,\n mode=\"nearest\",\n keys=[\"image\", \"mask\"],\n prob=0.5,\n ),\n ]\n )\n else:\n self.transforms = None\n\n self.imgs = {}\n self.masks = {}\n if not test:\n for idx in range(len(self.img_paths)):\n self.imgs[self.img_paths[idx]] = np.load(self.img_paths[idx])[None]\n self.masks[self.mask_paths[idx]] = np.load(self.mask_paths[idx])[None]\n\n def __len__(self):\n \"\"\"\n Get the length of the dataset.\n\n Returns:\n int: Length of the dataset.\n \"\"\"\n return len(self.img_paths)\n\n def __getitem__(self, idx):\n \"\"\"\n Item accessor.\n\n Args:\n idx (int): Index.\n\n Returns:\n torch.Tensor: Image as a tensor.\n torch.Tensor: Mask as a tensor (if not for testing).\n int: Dummy value.\n \"\"\"\n image = self.imgs.get(\n self.img_paths[idx],\n np.load(self.img_paths[idx])[None],\n )\n\n if not self.test:\n mask = self.masks.get(\n self.mask_paths[idx], np.load(self.mask_paths[idx])[None]\n )\n # Merge both kidneys !\n mask = np.where(mask == 4, 3, mask)\n mask = np.where(mask == 5, 4, mask)\n else:\n mask = 0\n\n if self.transforms is not None:\n res = self.transforms({\"image\": image, \"mask\": mask})\n image = res[\"image\"].as_tensor().float() / 255.0\n mask = res[\"mask\"].as_tensor()\n else:\n image = torch.from_numpy(image).float() / 255.0\n if not self.test:\n mask = torch.from_numpy(mask)\n\n return image, mask, 0\n\n\nclass PatientFeatureDataset(Dataset):\n \"\"\"\n Dataset for training RNN models.\n\n Attributes:\n df_patient (pandas DataFrame): Metadata containing patient information.\n df_img (pandas DataFrame): Metadata containing image information.\n exp_folders (list of tuples): Experiment folders and modes.\n max_len (int, optional): Maximum length for feature sequences. Defaults to None.\n restrict (bool, optional): Flag to restrict feature length. Defaults to False.\n resize (tuple, optional): Tuple specifying the size for resizing features. Defaults to None.\n \"\"\"\n def __init__(\n self,\n df_patient,\n df_img,\n exp_folders,\n max_len=None,\n restrict=False,\n resize=None,\n ):\n \"\"\"\n Constructor.\n\n Args:\n df_patient (pandas DataFrame): Metadata containing patient information.\n df_img (pandas DataFrame): Metadata containing image information.\n exp_folders (list of tuples): Experiment folders and modes.\n max_len (int, optional): Maximum length for feature sequences. Defaults to None.\n restrict (bool, optional): Flag to restrict feature length. Defaults to False.\n resize (tuple, optional): Tuple specifying the size for resizing features. Defaults to None.\n \"\"\"\n self.df_patient = df_patient\n self.fts, self.crop_fts = self.retrieve_features(df_img, exp_folders)\n self.ids = list(self.fts.keys())\n self.max_len = max_len\n self.restrict = restrict\n self.resize = resize\n\n def retrieve_features(self, df, exp_folders):\n \"\"\"\n Retrieve and organize features from experiment folders.\n\n Args:\n df (pandas DataFrame): Metadata containing image information.\n exp_folders (list of tuples): Experiment folders and modes.\n\n Returns:\n dict: Features dictionary.\n dict: Crop features dictionary.\n \"\"\"\n features_dict = {}\n crop_features_dict = {}\n for fold in sorted(df[\"fold\"].unique()):\n df_val = df[df[\"fold\"] == fold].reset_index(drop=True)\n\n fts = []\n for exp_folder, mode in exp_folders:\n if mode == \"seg\":\n seg = np.load(exp_folder + f\"pred_val_{fold}.npy\")\n fts.append(seg)\n elif mode == \"crop\":\n continue\n else: # proba\n ft = np.load(exp_folder + f\"pred_val_{fold}.npy\")\n fts.append(ft)\n\n kidney = (\n seg[:, 2:4].max(-1, keepdims=True)\n if seg.shape[-1] == 5\n else seg[:, 2:3]\n )\n fts.append(\n np.concatenate(\n [\n ft[:, :1] * seg[:, -1:], # bowel\n ft[:, 1:2] * seg.max(-1, keepdims=True), # extravasation\n ft[:, 2:5] * kidney, # kidney\n ft[:, 5:8] * seg[:, :1], # liver\n ft[:, 8:] * seg[:, 1:2], # spleen\n ],\n -1,\n )\n )\n try:\n fts = np.concatenate(fts, axis=1)\n except Exception:\n fts = np.zeros(len(df_val))\n\n df_val[\"index\"] = np.arange(len(df_val))\n slice_starts = (\n df_val.groupby([\"patient_id\", \"series\"])[\"index\"].min().to_dict()\n )\n slice_ends = (\n df_val.groupby([\"patient_id\", \"series\"])[\"index\"].max() + 1\n ).to_dict()\n\n for k in slice_starts.keys():\n start = slice_starts[k]\n end = slice_ends[k]\n\n if df_val[\"frame\"][start] < df_val[\"frame\"][end - 1]:\n features_dict[k] = fts[start:end]\n else:\n features_dict[k] = fts[start:end][::-1]\n\n crop_fts = []\n for exp_folder, mode in exp_folders:\n if mode == \"crop\":\n if not len(df_val):\n continue\n\n preds = np.load(exp_folder + f\"pred_val_{fold}.npy\")\n df_series = get_df_series(\n self.df_patient[self.df_patient[\"fold\"] == fold],\n df_val,\n )\n\n for i, c in enumerate([\"pred_healthy\", \"pred_low\", \"pred_high\"]):\n df_series[c] = preds[:, i]\n df_series = (\n df_series.groupby([\"patient_id\", \"series\"])\n .agg(list)\n .reset_index()\n )\n\n i = 2\n crop_scores = np.array(\n [\n np.array(df_series[p].values.tolist())\n for p in [\"pred_healthy\", \"pred_low\", \"pred_high\"]\n ]\n ).transpose(1, 2, 0)\n crop_fts.append(crop_scores)\n\n if len(crop_fts):\n crop_scores = np.concatenate(crop_fts, -1)\n for i, (p, s) in enumerate(df_series[[\"patient_id\", \"series\"]].values):\n try:\n _ = features_dict[(p, s)]\n crop_features_dict[(p, s)] = crop_scores[i] # cls x score\n except KeyError:\n print(p, s)\n\n return features_dict, crop_features_dict\n\n def __len__(self):\n return len(self.fts)\n\n @staticmethod\n def restrict_fts(fts):\n \"\"\"\n Restrict the length of features.\n\n Args:\n fts (numpy.ndarray): Features array.\n\n Returns:\n numpy.ndarray: Restricted features array.\n \"\"\"\n if len(fts) > 400:\n fts = fts[len(fts) // 6:]\n else:\n fts = fts[len(fts) // 8:]\n return fts\n\n @staticmethod\n def resize_fts(fts, size, max_len=None):\n \"\"\"\n Resize features.\n\n Args:\n fts (numpy.ndarray): Features array.\n size (tuple): Size for resizing.\n max_len (int, optional): Maximum length for features. Defaults to None.\n\n Returns:\n numpy.ndarray: Resized features array.\n \"\"\"\n if max_len is not None: # crop too long\n fts = fts[-max_len:]\n\n fts = fts[::2].copy()\n\n fts = F.interpolate(\n torch.from_numpy(fts.T).float().unsqueeze(0), size=size, mode=\"linear\"\n )[0].transpose(0, 1)\n return fts\n\n def __getitem__(self, idx):\n \"\"\"\n Item accessor.\n\n Args:\n idx (int): Index.\n\n Returns:\n dict: Features and crop features (if available).\n torch.Tensor: Label as a tensor.\n int: Dummy value.\n \"\"\"\n patient_series = self.ids[idx]\n\n fts = self.fts[patient_series]\n crop_fts = self.crop_fts.get(patient_series, None)\n\n if self.restrict:\n fts = self.restrict_fts(fts)\n\n if self.resize:\n fts = self.resize_fts(fts, self.resize, self.max_len)\n else:\n if self.max_len is not None:\n fts = self.pad(fts)\n fts = torch.from_numpy(fts).float()\n\n if crop_fts is not None:\n crop_fts = torch.from_numpy(crop_fts).float()\n else:\n crop_fts = 0\n\n y = self.df_patient[self.df_patient[\"patient_id\"] == patient_series[0]][\n PATIENT_TARGETS\n ].values[0]\n\n y = torch.from_numpy(y).float() # bowel, extravasion, kidney, liver, spleen\n\n return {\"x\": fts, \"ft\": crop_fts}, y, 0\n", "repo_name": "TheoViel/kaggle_rsna_abdominal_trauma", "sub_path": "src/data/dataset.py", "file_name": "dataset.py", "file_ext": "py", "file_size_in_byte": 34648, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 22, "dataset": "github-code", "pt": "76", "api": [{"api_name": "torch.zeros", "line_number": 31, "usage_type": "call"}, {"api_name": "torch.cat", "line_number": 36, "usage_type": "call"}, {"api_name": "numpy.linspace", "line_number": 55, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 60, "usage_type": "call"}, {"api_name": "numpy.tile", "line_number": 64, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 64, "usage_type": "call"}, {"api_name": "numpy.repeat", "line_number": 65, "usage_type": "call"}, {"api_name": "numpy.clip", "line_number": 72, "usage_type": "call"}, {"api_name": "torch.utils.data.Dataset", "line_number": 76, "usage_type": "name"}, {"api_name": "params.IMG_TARGETS_EXTENDED", "line_number": 122, "usage_type": "name"}, {"api_name": "params.PATIENT_TARGETS", "line_number": 124, "usage_type": "name"}, {"api_name": "numpy.random.choice", "line_number": 177, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 177, "usage_type": "attribute"}, {"api_name": "numpy.exp", "line_number": 188, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 191, "usage_type": "call"}, {"api_name": "numpy.random.choice", "line_number": 197, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 197, "usage_type": "attribute"}, {"api_name": "numpy.clip", "line_number": 206, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 223, "usage_type": "call"}, {"api_name": "cv2.imread", "line_number": 223, "usage_type": "call"}, {"api_name": "cv2.imread", "line_number": 227, "usage_type": "call"}, {"api_name": "numpy.float32", "line_number": 229, "usage_type": "attribute"}, {"api_name": "torch.tensor", "line_number": 235, "usage_type": "call"}, {"api_name": "torch.float", "line_number": 235, "usage_type": "attribute"}, {"api_name": "torch.tensor", "line_number": 236, "usage_type": "call"}, {"api_name": "torch.float", "line_number": 236, "usage_type": "attribute"}, {"api_name": "torch.utils.data.Dataset", "line_number": 259, "usage_type": "name"}, {"api_name": "data.preparation.get_df_series", "line_number": 303, "usage_type": "call"}, {"api_name": "numpy.load", "line_number": 337, "usage_type": "call"}, {"api_name": "numpy.concatenate", "line_number": 342, "usage_type": "call"}, {"api_name": "numpy.exp", "line_number": 346, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 349, "usage_type": "call"}, {"api_name": "numpy.random.choice", "line_number": 359, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 359, "usage_type": "attribute"}, {"api_name": "numpy.random.choice", "line_number": 361, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 361, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 374, "usage_type": "call"}, {"api_name": "numpy.float32", "line_number": 375, "usage_type": "attribute"}, {"api_name": "torch.zeros", "line_number": 382, "usage_type": "call"}, {"api_name": "torch.float", "line_number": 382, "usage_type": "attribute"}, {"api_name": "torch.utils.data.Dataset", "line_number": 401, "usage_type": "name"}, {"api_name": "numpy.array", "line_number": 517, "usage_type": "call"}, {"api_name": "cv2.imread", "line_number": 562, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 569, "usage_type": "call"}, {"api_name": "numpy.float32", "line_number": 570, "usage_type": "attribute"}, {"api_name": "torch.utils.data.Dataset", "line_number": 595, "usage_type": "name"}, {"api_name": "cv2.imread", "line_number": 663, "usage_type": "call"}, {"api_name": "numpy.float32", "line_number": 663, "usage_type": "attribute"}, {"api_name": "torch.tensor", "line_number": 665, "usage_type": "call"}, {"api_name": "torch.float", "line_number": 665, "usage_type": "attribute"}, {"api_name": "cv2.imread", "line_number": 668, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 670, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 671, "usage_type": "call"}, {"api_name": "torch.utils.data.Dataset", "line_number": 684, "usage_type": "name"}, {"api_name": "monai.transforms.Compose", "line_number": 718, "usage_type": "call"}, {"api_name": "monai.transforms", "line_number": 718, "usage_type": "name"}, {"api_name": "monai.transforms.RandAffined", "line_number": 720, "usage_type": "call"}, {"api_name": "monai.transforms", "line_number": 720, "usage_type": "name"}, {"api_name": "monai.transforms.RandRotated", "line_number": 726, "usage_type": "call"}, {"api_name": "monai.transforms", "line_number": 726, "usage_type": "name"}, {"api_name": "monai.transforms.RandZoomd", "line_number": 734, "usage_type": "call"}, {"api_name": "monai.transforms", "line_number": 734, "usage_type": "name"}, {"api_name": "numpy.load", "line_number": 750, "usage_type": "call"}, {"api_name": "numpy.load", "line_number": 751, "usage_type": "call"}, {"api_name": "numpy.load", "line_number": 776, "usage_type": "call"}, {"api_name": "numpy.load", "line_number": 781, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 784, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 785, "usage_type": "call"}, {"api_name": "torch.from_numpy", "line_number": 794, "usage_type": "call"}, {"api_name": "torch.from_numpy", "line_number": 796, "usage_type": "call"}, {"api_name": "torch.utils.data.Dataset", "line_number": 801, "usage_type": "name"}, {"api_name": "numpy.load", "line_number": 860, "usage_type": "call"}, {"api_name": "numpy.load", "line_number": 865, "usage_type": "call"}, {"api_name": "numpy.concatenate", "line_number": 874, "usage_type": "call"}, {"api_name": "numpy.concatenate", "line_number": 886, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 888, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 890, "usage_type": "call"}, {"api_name": "numpy.load", "line_number": 913, "usage_type": "call"}, {"api_name": "data.preparation.get_df_series", "line_number": 914, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 928, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 930, "usage_type": "call"}, {"api_name": "numpy.concatenate", "line_number": 937, "usage_type": "call"}, {"api_name": "torch.nn.functional.interpolate", "line_number": 985, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 985, "usage_type": "name"}, {"api_name": "torch.from_numpy", "line_number": 986, "usage_type": "call"}, {"api_name": "torch.from_numpy", "line_number": 1015, "usage_type": "call"}, {"api_name": "torch.from_numpy", "line_number": 1018, "usage_type": "call"}, {"api_name": "params.PATIENT_TARGETS", "line_number": 1023, "usage_type": "name"}, {"api_name": "torch.from_numpy", "line_number": 1026, "usage_type": "call"}]}
+{"seq_id": "2624957686", "text": "\"\"\"\nContains views for DawgHouse\n\"\"\"\n\n# pylint: disable=no-member\n# pylint: disable=undefined-variable\n\nimport json\nfrom django.contrib.auth import authenticate, login, logout\nfrom django.shortcuts import get_object_or_404, render, redirect\nfrom django.contrib import messages\nfrom django.contrib.auth.decorators import login_required\nfrom django.http import JsonResponse\nfrom django.views.decorators.csrf import csrf_exempt\nfrom django.utils.decorators import method_decorator\nfrom django.db.models import Q\nfrom Levenshtein import distance\nfrom .models import Bark, DawgHouseUser, SniffRequest, Comment\nfrom .forms import LoginForm, CustomUserCreationForm\n\n\ndef home_view(request):\n \"\"\"Shows login/signup if not authenticated otherwise timeline\"\"\"\n if request.user.is_authenticated:\n user = request.user\n friends = user.friends.all()\n barks = Bark.objects.filter(Q(user=user) | Q(user__in=friends)).order_by(\n \"-timestamp\"\n )\n\n context = {\n \"barks\": barks,\n }\n\n return render(request, \"main_page.html\", context)\n\n return render(request, \"homepage.html\")\n\n\ndef login_view(request):\n \"\"\"Handles loging form POST data\"\"\"\n form = LoginForm()\n if request.method == \"POST\":\n form = LoginForm(request.POST)\n if form.is_valid():\n user = authenticate(\n username=form.cleaned_data[\"username\"],\n password=form.cleaned_data[\"password\"],\n )\n if user is not None:\n login(request, user)\n return redirect(\"/main/\")\n \n message = \"Unrecognized Dawgtag or Password\"\n\n return render(request, \"login.html\", {\"form\": form, \"message\": message})\n\n return render(request, \"login.html\", {\"form\": form})\n\n\ndef signup_view(request):\n \"\"\"Handles signup form POST data\"\"\"\n if request.method == \"POST\":\n form = CustomUserCreationForm(request.POST)\n if form.is_valid():\n user = form.save()\n messages.success(request, \"Account created successfully\")\n login(request, user)\n return redirect(\"/main/\")\n \n for field in form:\n for error in field.errors:\n messages.error(request, f\"{field.label}: {error}\")\n return redirect(\"/signup/\")\n \n\n form = CustomUserCreationForm()\n\n return render(request, \"signup.html\", {\"form\": form})\n\n\ndef logout_view(request):\n \"\"\"Logs user out\"\"\"\n logout(request)\n return redirect(\"/\")\n\n\n@login_required\ndef send_sniff_request(request, user_ID):\n \"\"\"Creates new sniff request in DB\"\"\"\n from_user = request.user\n to_user = DawgHouseUser.objects.get(id=user_ID)\n sniff_request, created = SniffRequest.objects.get_or_create(\n from_user=from_user, to_user=to_user\n )\n if created:\n messages.success(request, \"Sniff request sent successfully.\")\n return redirect(\"profile\", username=to_user.username)\n\n messages.warning(request, \"Sniff request was already sent.\")\n return redirect(\"profile\", username=to_user.username)\n\n\n@login_required\ndef accept_sniff_request(request, request_ID):\n \"\"\"Moves sniff request to friends table and deltes from sniff table\"\"\"\n sniff_request = SniffRequest.objects.get(id=request_ID)\n if sniff_request.to_user == request.user:\n sniff_request.to_user.friends.add(sniff_request.from_user)\n sniff_request.from_user.friends.add(sniff_request.to_user)\n sniff_request.delete()\n return redirect(\"home_view\")\n\n return redirect(\"home_view\")\n\n@login_required\ndef decline_sniff_request(request, request_id):\n \"\"\"Deletes sniff from table\"\"\"\n sniff_request = SniffRequest.objects.get(id=request_id)\n if sniff_request.to_user == request.user:\n sniff_request.delete()\n return redirect(\"home_view\")\n \n return redirect(\"home_view\")\n\n@login_required\ndef send_example_view(request):\n \"\"\"Sends information to DB to make new sniff request\"\"\"\n allusers = DawgHouseUser.objects.all()\n all_sniff_requests = SniffRequest.objects.all()\n\n context = {\n \"allusers\": allusers,\n \"all_sniff_requests\": all_sniff_requests,\n }\n return render(request, \"sniff_example.html\", context)\n\n\n@login_required\ndef accept_example_view(request):\n \"\"\"Renders sniff request template\"\"\"\n all_sniff_requests = SniffRequest.objects.all()\n\n context = {\n \"all_sniff_requests\": all_sniff_requests,\n }\n return render(request, \"accept_sniffs_example.html\", context)\n\n\ndef profile_view(request, username):\n \"\"\"Determines which profile template to render and renders it\"\"\"\n logged_in_user = request.user\n user = get_object_or_404(DawgHouseUser, username=username)\n friends_list = user.friends.all()\n barks = Bark.objects.filter(user=user).order_by(\"-timestamp\")\n context = {\n \"user\": user,\n \"barks\": barks,\n \"friends_list\": friends_list,\n \"logged_in_user\": logged_in_user\n }\n if user == request.user:\n return render(request, \"user_profile.html\", context)\n if request.user in user.friends.all():\n return render(request, \"friend_view.html\", context)\n\n return render(request, \"non_friend_view.html\", context)\n\n\n@login_required\ndef post_bark(request):\n \"\"\"Logic for posting a new bark\"\"\"\n if request.method == \"POST\":\n bark_content = request.POST.get(\"bark_content\")\n\n new_bark = Bark(user=request.user, content=bark_content)\n new_bark.save()\n\n return redirect(f\"/profile/{request.user.username}/\")\n\n return redirect(\"/\")\n\n@login_required\ndef home_post_bark(request):\n \"\"\"Logic for posting a bark from homepage\"\"\"\n if request.method == \"POST\":\n bark_content = request.POST.get(\"bark_content\")\n\n new_bark = Bark(user=request.user, content=bark_content)\n new_bark.save()\n\n return redirect(\"/main/\")\n\n return redirect(\"/\")\n\n@login_required\ndef delete_bark(request, id):\n \"\"\"Logic for deleting a bark\"\"\"\n post = get_object_or_404(Bark, pk=id)\n\n if request.method == \"DELETE\":\n # Check if the user has permission to delete the post\n if request.user == post.user:\n post.delete() # Delete the post\n return JsonResponse({\"success\": True})\n\n return JsonResponse({\"success\": False, \"error\": \"Permission denied\"})\n return JsonResponse({\"success\": False, \"error\": \"Invalid request method\"})\n\n\n@csrf_exempt\n@login_required\ndef repost_post(request, bark_id):\n \"\"\"Logic for reposting\"\"\"\n if request.method == \"POST\":\n # Get the original bark\n original_bark = get_object_or_404(Bark, id=bark_id)\n existing_repost = Bark.objects.filter(\n original_bark=original_bark,\n user=request.user,\n is_repost=True\n ).first()\n\n if existing_repost:\n existing_repost.delete()\n original_bark.num_howls -= 1\n original_bark.save()\n return JsonResponse({\"success\": True, \"is_repost\": False})\n # Create a new Bark instance\n new_bark = Bark(\n content=original_bark.content,\n user=request.user, # Use the currently logged-in user as the author\n is_repost=True,\n original_bark=original_bark,\n )\n original_bark.num_howls += 1\n new_bark.save()\n original_bark.save()\n\n return JsonResponse({\"success\": True, \"is_repost\":True})\n return JsonResponse({\"success\": False})\n\n\n@csrf_exempt\n@login_required\ndef edit_bark_ajax(request):\n \"\"\"Ajax logic for editing a post\"\"\"\n if request.method == \"POST\":\n data = json.loads(request.body)\n post_id = data.get(\"post_id\")\n new_content = data.get(\"new_content\")\n post = Bark.objects.filter(id=post_id).first()\n\n if (\n post and request.user == post.user\n ): # Check if the post exists and the user is the owner of the bark\n post.content = new_content\n post.save()\n return JsonResponse({\"success\": True})\n return JsonResponse({\"success\": False})\n\n\n@login_required\ndef add_comment(request, bark_id): # include bark_id here\n \"\"\"Logic for adding a comment to a post\"\"\"\n if request.method == \"POST\":\n comment_text = request.POST.get(\"comment_text\")\n user = request.user\n\n if comment_text:\n bark = Bark.objects.get(id=bark_id) # now bark_id is defined\n comment = Comment(bark=bark, name=user, body=comment_text)\n bark.num_yips = Comment.objects.filter(bark=bark).count() + 1\n comment.save()\n bark.save()\n\n return JsonResponse({\"user\": user.username, \"text\": comment_text})\n\n return JsonResponse({\"error\": \"Comment text is empty\"}, status=400)\n\n return JsonResponse({}, status=400)\n\n\n@login_required\ndef delete_comment(request, comment_id):\n \"\"\"Logic for deleting a comment\"\"\"\n try:\n comment = Comment.objects.get(id=comment_id)\n\n # Check if the user is the owner of the comment or the owner of the Bark.\n if request.user in (comment.name, comment.bark.user):\n comment.delete()\n\n bark = comment.bark\n bark.num_yips = Comment.objects.filter(bark=bark).count()\n bark.save()\n\n return JsonResponse({\"success\": True})\n\n return JsonResponse({\"success\": False, \"error\": \"Permission denied\"})\n except Comment.DoesNotExist:\n return JsonResponse({\"success\": False, \"error\": \"Comment not found\"})\n\n\n@login_required\ndef give_treat(request, bark_id, user_which, return_to):\n \"\"\"Logic for liking a post\"\"\"\n bark = get_object_or_404(Bark, id=bark_id)\n user = request.user\n\n if user in bark.treated_by.all():\n bark.num_likes -= 1\n bark.treated_by.remove(user)\n else:\n bark.num_likes += 1\n bark.treated_by.add(user)\n\n bark.save()\n\n if return_to == \"main_timeline\":\n return redirect(\"main_timeline\")\n if return_to == \"profile\":\n return redirect(f\"/profile/{user_which}/\")\n \n return redirect(f\"/profile/{return_to}/\")\n\n\n@method_decorator(csrf_exempt, name=\"dispatch\")\ndef edit_bio_ajax(request):\n \"\"\"Ajax logic for editing a bio\"\"\"\n if request.method == \"POST\":\n data = json.loads(request.body)\n new_bio = data.get(\"bio\")\n request.user.bio = new_bio\n request.user.save()\n return JsonResponse({\"success\": True})\n return JsonResponse({\"success\": False})\n\n\ndef search_users(request):\n \"\"\"Fuzzy username matching alogorithm\"\"\"\n if request.method == \"POST\":\n username = request.POST.get(\"username\", None)\n if username:\n users = DawgHouseUser.objects.all()\n similar_users = []\n for user in users:\n dist = distance(username, user.username)\n username_length = len(username)\n similarity_ratio = (\n 1 - dist / max(username_length, len(user.username))\n ) * 100\n if similarity_ratio >= 60:\n similar_users.append(user)\n return render(\n request, \"search_results.html\", {\"similar_users\": similar_users}\n )\n return render(request, \"search_users.html\")\n\n\n@login_required\ndef main_timeline(request):\n \"\"\"Retrieves all posts to display and renders homepage template\"\"\"\n user = request.user\n friends = user.friends.all()\n barks = Bark.objects.filter(Q(user=user) | Q(user__in=friends)).order_by(\n \"-timestamp\"\n )\n\n for friend in friends:\n print(friend.username)\n\n context = {\n \"barks\": barks,\n }\n return render(request, \"main_page.html\", context)\n\n@login_required\ndef change_profile_picture(request, picture_path):\n \"\"\"Sets 'profile_picture' in DB to static image path\"\"\"\n request.user.profile_picture = picture_path\n request.user.save()\n \n return redirect(f\"/profile/{request.user.username}\")\n", "repo_name": "Ragnarok9401/intro_software_eng_jf1774", "sub_path": "DawgHouse/homepage/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 11974, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "76", "api": [{"api_name": "models.Bark.objects.filter", "line_number": 27, "usage_type": "call"}, {"api_name": "models.Bark.objects", "line_number": 27, "usage_type": "attribute"}, {"api_name": "models.Bark", "line_number": 27, "usage_type": "name"}, {"api_name": "django.db.models.Q", "line_number": 27, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 35, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 37, "usage_type": "call"}, {"api_name": "forms.LoginForm", "line_number": 42, "usage_type": "call"}, {"api_name": "forms.LoginForm", "line_number": 44, "usage_type": "call"}, {"api_name": "django.contrib.auth.authenticate", "line_number": 46, "usage_type": "call"}, {"api_name": "django.contrib.auth.login", "line_number": 51, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 52, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 56, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 58, "usage_type": "call"}, {"api_name": "forms.CustomUserCreationForm", "line_number": 64, "usage_type": "call"}, {"api_name": "django.contrib.messages.success", "line_number": 67, "usage_type": "call"}, {"api_name": "django.contrib.messages", "line_number": 67, "usage_type": "name"}, {"api_name": "django.contrib.auth.login", "line_number": 68, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 69, "usage_type": "call"}, {"api_name": "django.contrib.messages.error", "line_number": 73, "usage_type": "call"}, {"api_name": "django.contrib.messages", "line_number": 73, "usage_type": "name"}, {"api_name": "django.shortcuts.redirect", "line_number": 74, "usage_type": "call"}, {"api_name": "forms.CustomUserCreationForm", "line_number": 77, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 79, "usage_type": "call"}, {"api_name": "django.contrib.auth.logout", "line_number": 84, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 85, "usage_type": "call"}, {"api_name": "models.DawgHouseUser.objects.get", "line_number": 92, "usage_type": "call"}, {"api_name": "models.DawgHouseUser.objects", "line_number": 92, "usage_type": "attribute"}, {"api_name": "models.DawgHouseUser", "line_number": 92, "usage_type": "name"}, {"api_name": "models.SniffRequest.objects.get_or_create", "line_number": 93, "usage_type": "call"}, {"api_name": "models.SniffRequest.objects", "line_number": 93, "usage_type": "attribute"}, {"api_name": "models.SniffRequest", "line_number": 93, "usage_type": "name"}, {"api_name": "django.contrib.messages.success", "line_number": 97, "usage_type": "call"}, {"api_name": "django.contrib.messages", "line_number": 97, "usage_type": "name"}, {"api_name": "django.shortcuts.redirect", "line_number": 98, "usage_type": "call"}, {"api_name": "django.contrib.messages.warning", "line_number": 100, "usage_type": "call"}, {"api_name": "django.contrib.messages", "line_number": 100, "usage_type": "name"}, {"api_name": "django.shortcuts.redirect", "line_number": 101, "usage_type": "call"}, {"api_name": "django.contrib.auth.decorators.login_required", "line_number": 88, "usage_type": "name"}, {"api_name": "models.SniffRequest.objects.get", "line_number": 107, "usage_type": "call"}, {"api_name": "models.SniffRequest.objects", "line_number": 107, "usage_type": "attribute"}, {"api_name": "models.SniffRequest", "line_number": 107, "usage_type": "name"}, {"api_name": "django.shortcuts.redirect", "line_number": 112, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 114, "usage_type": "call"}, {"api_name": "django.contrib.auth.decorators.login_required", "line_number": 104, "usage_type": "name"}, {"api_name": "models.SniffRequest.objects.get", "line_number": 119, "usage_type": "call"}, {"api_name": "models.SniffRequest.objects", "line_number": 119, "usage_type": "attribute"}, {"api_name": "models.SniffRequest", "line_number": 119, "usage_type": "name"}, {"api_name": "django.shortcuts.redirect", "line_number": 122, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 124, "usage_type": "call"}, {"api_name": "django.contrib.auth.decorators.login_required", "line_number": 116, "usage_type": "name"}, {"api_name": "models.DawgHouseUser.objects.all", "line_number": 129, "usage_type": "call"}, {"api_name": "models.DawgHouseUser.objects", "line_number": 129, "usage_type": "attribute"}, {"api_name": "models.DawgHouseUser", "line_number": 129, "usage_type": "name"}, {"api_name": "models.SniffRequest.objects.all", "line_number": 130, "usage_type": "call"}, {"api_name": "models.SniffRequest.objects", "line_number": 130, "usage_type": "attribute"}, {"api_name": "models.SniffRequest", "line_number": 130, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 136, "usage_type": "call"}, {"api_name": "django.contrib.auth.decorators.login_required", "line_number": 126, "usage_type": "name"}, {"api_name": "models.SniffRequest.objects.all", "line_number": 142, "usage_type": "call"}, {"api_name": "models.SniffRequest.objects", "line_number": 142, "usage_type": "attribute"}, {"api_name": "models.SniffRequest", "line_number": 142, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 147, "usage_type": "call"}, {"api_name": "django.contrib.auth.decorators.login_required", "line_number": 139, "usage_type": "name"}, {"api_name": "django.shortcuts.get_object_or_404", "line_number": 153, "usage_type": "call"}, {"api_name": "models.DawgHouseUser", "line_number": 153, "usage_type": "argument"}, {"api_name": "models.Bark.objects.filter", "line_number": 155, "usage_type": "call"}, {"api_name": "models.Bark.objects", "line_number": 155, "usage_type": "attribute"}, {"api_name": "models.Bark", "line_number": 155, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 163, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 165, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 167, "usage_type": "call"}, {"api_name": "models.Bark", "line_number": 176, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 179, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 181, "usage_type": "call"}, {"api_name": "django.contrib.auth.decorators.login_required", "line_number": 170, "usage_type": "name"}, {"api_name": "models.Bark", "line_number": 189, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 192, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 194, "usage_type": "call"}, {"api_name": "django.contrib.auth.decorators.login_required", "line_number": 183, "usage_type": "name"}, {"api_name": "django.shortcuts.get_object_or_404", "line_number": 199, "usage_type": "call"}, {"api_name": "models.Bark", "line_number": 199, "usage_type": "argument"}, {"api_name": "django.http.JsonResponse", "line_number": 205, "usage_type": "call"}, {"api_name": "django.http.JsonResponse", "line_number": 207, "usage_type": "call"}, {"api_name": "django.http.JsonResponse", "line_number": 208, "usage_type": "call"}, {"api_name": "django.contrib.auth.decorators.login_required", "line_number": 196, "usage_type": "name"}, {"api_name": "django.shortcuts.get_object_or_404", "line_number": 217, "usage_type": "call"}, {"api_name": "models.Bark", "line_number": 217, "usage_type": "argument"}, {"api_name": "models.Bark.objects.filter", "line_number": 218, "usage_type": "call"}, {"api_name": "models.Bark.objects", "line_number": 218, "usage_type": "attribute"}, {"api_name": "models.Bark", "line_number": 218, "usage_type": "name"}, {"api_name": "django.http.JsonResponse", "line_number": 228, "usage_type": "call"}, {"api_name": "models.Bark", "line_number": 230, "usage_type": "call"}, {"api_name": "django.http.JsonResponse", "line_number": 240, "usage_type": "call"}, {"api_name": "django.http.JsonResponse", "line_number": 241, "usage_type": "call"}, {"api_name": "django.views.decorators.csrf.csrf_exempt", "line_number": 211, "usage_type": "name"}, {"api_name": "django.contrib.auth.decorators.login_required", "line_number": 212, "usage_type": "name"}, {"api_name": "json.loads", "line_number": 249, "usage_type": "call"}, {"api_name": "models.Bark.objects.filter", "line_number": 252, "usage_type": "call"}, {"api_name": "models.Bark.objects", "line_number": 252, "usage_type": "attribute"}, {"api_name": "models.Bark", "line_number": 252, "usage_type": "name"}, {"api_name": "django.http.JsonResponse", "line_number": 259, "usage_type": "call"}, {"api_name": "django.http.JsonResponse", "line_number": 260, "usage_type": "call"}, {"api_name": "django.views.decorators.csrf.csrf_exempt", "line_number": 244, "usage_type": "name"}, {"api_name": "django.contrib.auth.decorators.login_required", "line_number": 245, "usage_type": "name"}, {"api_name": "models.Bark.objects.get", "line_number": 271, "usage_type": "call"}, {"api_name": "models.Bark.objects", "line_number": 271, "usage_type": "attribute"}, {"api_name": "models.Bark", "line_number": 271, "usage_type": "name"}, {"api_name": "models.Comment", "line_number": 272, "usage_type": "call"}, {"api_name": "models.Comment.objects.filter", "line_number": 273, "usage_type": "call"}, {"api_name": "models.Comment.objects", "line_number": 273, "usage_type": "attribute"}, {"api_name": "models.Comment", "line_number": 273, "usage_type": "name"}, {"api_name": "django.http.JsonResponse", "line_number": 277, "usage_type": "call"}, {"api_name": "django.http.JsonResponse", "line_number": 279, "usage_type": "call"}, {"api_name": "django.http.JsonResponse", "line_number": 281, "usage_type": "call"}, {"api_name": "django.contrib.auth.decorators.login_required", "line_number": 263, "usage_type": "name"}, {"api_name": "models.Comment.objects.get", "line_number": 288, "usage_type": "call"}, {"api_name": "models.Comment.objects", "line_number": 288, "usage_type": "attribute"}, {"api_name": "models.Comment", "line_number": 288, "usage_type": "name"}, {"api_name": "models.Comment.objects.filter", "line_number": 295, "usage_type": "call"}, {"api_name": "models.Comment.objects", "line_number": 295, "usage_type": "attribute"}, {"api_name": "models.Comment", "line_number": 295, "usage_type": "name"}, {"api_name": "django.http.JsonResponse", "line_number": 298, "usage_type": "call"}, {"api_name": "django.http.JsonResponse", "line_number": 300, "usage_type": "call"}, {"api_name": "models.Comment.DoesNotExist", "line_number": 301, "usage_type": "attribute"}, {"api_name": "models.Comment", "line_number": 301, "usage_type": "name"}, {"api_name": "django.http.JsonResponse", "line_number": 302, "usage_type": "call"}, {"api_name": "django.contrib.auth.decorators.login_required", "line_number": 284, "usage_type": "name"}, {"api_name": "django.shortcuts.get_object_or_404", "line_number": 308, "usage_type": "call"}, {"api_name": "models.Bark", "line_number": 308, "usage_type": "argument"}, {"api_name": "django.shortcuts.redirect", "line_number": 321, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 323, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 325, "usage_type": "call"}, {"api_name": "django.contrib.auth.decorators.login_required", "line_number": 305, "usage_type": "name"}, {"api_name": "json.loads", "line_number": 332, "usage_type": "call"}, {"api_name": "django.http.JsonResponse", "line_number": 336, "usage_type": "call"}, {"api_name": "django.http.JsonResponse", "line_number": 337, "usage_type": "call"}, {"api_name": "django.utils.decorators.method_decorator", "line_number": 328, "usage_type": "call"}, {"api_name": "django.views.decorators.csrf.csrf_exempt", "line_number": 328, "usage_type": "argument"}, {"api_name": "models.DawgHouseUser.objects.all", "line_number": 345, "usage_type": "call"}, {"api_name": "models.DawgHouseUser.objects", "line_number": 345, "usage_type": "attribute"}, {"api_name": "models.DawgHouseUser", "line_number": 345, "usage_type": "name"}, {"api_name": "Levenshtein.distance", "line_number": 348, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 355, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 358, "usage_type": "call"}, {"api_name": "models.Bark.objects.filter", "line_number": 366, "usage_type": "call"}, {"api_name": "models.Bark.objects", "line_number": 366, "usage_type": "attribute"}, {"api_name": "models.Bark", "line_number": 366, "usage_type": "name"}, {"api_name": "django.db.models.Q", "line_number": 366, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 376, "usage_type": "call"}, {"api_name": "django.contrib.auth.decorators.login_required", "line_number": 361, "usage_type": "name"}, {"api_name": "django.shortcuts.redirect", "line_number": 384, "usage_type": "call"}, {"api_name": "django.contrib.auth.decorators.login_required", "line_number": 378, "usage_type": "name"}]}
+{"seq_id": "73336514165", "text": "from django.db.models import Q\nfrom django.contrib import admin\nfrom django.contrib.admin.views.main import ChangeList\n\nclass InputFilter(admin.SimpleListFilter):\n template = 'admin/input_filter.html'\n\n def lookups(self, request, model_admin):\n # Dummy, required to show the filter.\n return ((),)\n\n def choices(self, changelist):\n # Grab only the \"all\" option.\n all_choice = next(super().choices(changelist))\n all_choice['query_parts'] = (\n (k, v)\n for k, v in changelist.get_filters_params().items()\n if k != self.parameter_name\n )\n yield all_choice\n\nclass ReferenceFilter(InputFilter):\n parameter_name = 'invoice'\n title = 'invoice number' \n def queryset(self, request, queryset):\n term = self.value() \n if term is None:\n return \n any_name = Q()\n for bit in term.split():\n any_name &= (\n Q(reference__icontains=bit) |\n Q(supplier_invoice__icontains=bit)\n ) \n return queryset.filter(any_name).distinct()\n\nclass POFilter(InputFilter):\n parameter_name = 'po'\n title = 'po' \n def queryset(self, request, queryset):\n term = self.value() \n if term is None:\n return \n any_name = Q()\n for bit in term.split():\n any_name &= (\n Q(po__icontains=bit)\n ) \n return queryset.filter(any_name).distinct()\n\nclass PartFilter(InputFilter):\n parameter_name = 'part'\n title = 'part' \n def queryset(self, request, queryset):\n term = self.value() \n if term is None:\n return \n any_name = Q()\n for bit in term.split():\n any_name &= (\n Q(purchased_parts__part__icontains=bit) |\n Q(purchased_parts__internal__part_number__icontains=bit)\n ) \n return queryset.filter(any_name).distinct()\n\nclass PricePartFilter(InputFilter):\n parameter_name = 'part'\n title = 'brand and part' \n def queryset(self, request, queryset):\n term = self.value() \n if term is None:\n return \n any_name = Q()\n for bit in term.split():\n any_name &= (\n Q(part__part__icontains=bit) |\n Q(part__internal__part_number__icontains=bit)\n ) \n return queryset.filter(any_name).distinct()\n\nclass InvoiceFilter(InputFilter):\n parameter_name = 'invoice'\n title = 'invoice'\n def queryset(self, request, queryset):\n term = self.value() \n if term is None:\n return\n elif Q(term__icontains='none'):\n any_name = Q(supplier_invoice=None)\n return queryset.filter(any_name).distinct()\n any_name = Q()\n for bit in term.split():\n any_name &= (\n Q(supplier_invoice__supplier_invoice__icontains=bit) |\n Q(supplier_invoice__reference__icontains=bit)\n ) \n return queryset.filter(any_name).distinct()\n\nclass PricePOFilter(InputFilter):\n parameter_name = 'po'\n title = 'po' \n def queryset(self, request, queryset):\n term = self.value() \n if term is None:\n return \n any_name = Q()\n for bit in term.split():\n any_name &= (\n Q(supplier_invoice__po__icontains=bit)\n ) \n return queryset.filter(any_name).distinct()", "repo_name": "reinali07/autoshop-manager", "sub_path": "sup_invoices/filters.py", "file_name": "filters.py", "file_ext": "py", "file_size_in_byte": 3590, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "76", "api": [{"api_name": "django.contrib.admin.SimpleListFilter", "line_number": 5, "usage_type": "attribute"}, {"api_name": "django.contrib.admin", "line_number": 5, "usage_type": "name"}, {"api_name": "django.db.models.Q", "line_number": 29, "usage_type": "call"}, {"api_name": "django.db.models.Q", "line_number": 32, "usage_type": "call"}, {"api_name": "django.db.models.Q", "line_number": 33, "usage_type": "call"}, {"api_name": "django.db.models.Q", "line_number": 44, "usage_type": "call"}, {"api_name": "django.db.models.Q", "line_number": 47, "usage_type": "call"}, {"api_name": "django.db.models.Q", "line_number": 58, "usage_type": "call"}, {"api_name": "django.db.models.Q", "line_number": 61, "usage_type": "call"}, {"api_name": "django.db.models.Q", "line_number": 62, "usage_type": "call"}, {"api_name": "django.db.models.Q", "line_number": 73, "usage_type": "call"}, {"api_name": "django.db.models.Q", "line_number": 76, "usage_type": "call"}, {"api_name": "django.db.models.Q", "line_number": 77, "usage_type": "call"}, {"api_name": "django.db.models.Q", "line_number": 88, "usage_type": "call"}, {"api_name": "django.db.models.Q", "line_number": 89, "usage_type": "call"}, {"api_name": "django.db.models.Q", "line_number": 91, "usage_type": "call"}, {"api_name": "django.db.models.Q", "line_number": 94, "usage_type": "call"}, {"api_name": "django.db.models.Q", "line_number": 95, "usage_type": "call"}, {"api_name": "django.db.models.Q", "line_number": 106, "usage_type": "call"}, {"api_name": "django.db.models.Q", "line_number": 109, "usage_type": "call"}]}
+{"seq_id": "37087287577", "text": "from discord.ext import commands\nfrom discord import Member\n\nfrom managers import mongo_manager\nfrom config import SERVER_COL_NAME\nimport config\n\nclass UtilityCog(commands.Cog):\n\n @commands.command(name=\"ping\", description=\"Returns the bot's latency\")\n async def ping(self, ctx:commands.Context):\n await ctx.send(f\"Bot's Latency : **{round(ctx.bot.latency * 1000, 2)} ms**\")\n\n @commands.command(name=\"prefix\", description=\"Sets the prefix of the bot\")\n async def prefix(self, ctx:commands.Context, prefix:str=None):\n\n if prefix is None:\n return await ctx.reply(f\"Current Prefix is {config.PREFIX}\")\n\n try:\n if not ctx.author.guild_permissions.administrator:\n return await ctx.reply(\"You need administrator privilages to update the guild prefix.\")\n\n updated_data = {\"prefix\" : prefix}\n\n mongo_manager.manager.update_all_data(SERVER_COL_NAME, {}, updated_data)\n config.modify_prefix_timer_max(prefix=prefix, timer=None, max=None)\n\n return await ctx.reply(f\"Prefix changed to **{prefix}**\")\n except Exception as e:\n return await ctx.reply(f\"Error occured while changing the prefix. \\n```{e}```\")\n\n @commands.command(name=\"max\", description=\"Sets the max collection size limit\")\n async def max(self, ctx:commands.Context, max:int=None):\n \n if max is None:\n return await ctx.reply(f\"Maximum Collection Size is **{config.MAX}**\")\n\n try:\n if not ctx.author.guild_permissions.administrator:\n return await ctx.reply(\"You need administrator privilages to update collection max.\")\n\n updated_data = {\"max\" : max}\n\n mongo_manager.manager.update_all_data(SERVER_COL_NAME, {}, updated_data)\n config.modify_prefix_timer_max(prefix=None, timer=None, max=int(max))\n\n return await ctx.reply(f\"New Collection Max is set to **{max}**\")\n except Exception as e:\n await ctx.reply(f\"Error occured while trying to update the collection max. \\n```{e}```\")\n\ndef setup(bot:commands.Bot):\n bot.add_cog(UtilityCog())", "repo_name": "Devanshu19/PokeCol", "sub_path": "cogs/utility.py", "file_name": "utility.py", "file_ext": "py", "file_size_in_byte": 2146, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 2, "dataset": "github-code", "pt": "76", "api": [{"api_name": "discord.ext.commands.Cog", "line_number": 8, "usage_type": "attribute"}, {"api_name": "discord.ext.commands", "line_number": 8, "usage_type": "name"}, {"api_name": "discord.ext.commands.Context", "line_number": 11, "usage_type": "attribute"}, {"api_name": "discord.ext.commands", "line_number": 11, "usage_type": "name"}, {"api_name": "discord.ext.commands.command", "line_number": 10, "usage_type": "call"}, {"api_name": "discord.ext.commands", "line_number": 10, "usage_type": "name"}, {"api_name": "discord.ext.commands.Context", "line_number": 15, "usage_type": "attribute"}, {"api_name": "discord.ext.commands", "line_number": 15, "usage_type": "name"}, {"api_name": "config.PREFIX", "line_number": 18, "usage_type": "attribute"}, {"api_name": "managers.mongo_manager.manager.update_all_data", "line_number": 26, "usage_type": "call"}, {"api_name": "config.SERVER_COL_NAME", "line_number": 26, "usage_type": "argument"}, {"api_name": "managers.mongo_manager.manager", "line_number": 26, "usage_type": "attribute"}, {"api_name": "managers.mongo_manager", "line_number": 26, "usage_type": "name"}, {"api_name": "config.modify_prefix_timer_max", "line_number": 27, "usage_type": "call"}, {"api_name": "discord.ext.commands.command", "line_number": 14, "usage_type": "call"}, {"api_name": "discord.ext.commands", "line_number": 14, "usage_type": "name"}, {"api_name": "discord.ext.commands.Context", "line_number": 34, "usage_type": "attribute"}, {"api_name": "discord.ext.commands", "line_number": 34, "usage_type": "name"}, {"api_name": "config.MAX", "line_number": 37, "usage_type": "attribute"}, {"api_name": "managers.mongo_manager.manager.update_all_data", "line_number": 45, "usage_type": "call"}, {"api_name": "config.SERVER_COL_NAME", "line_number": 45, "usage_type": "argument"}, {"api_name": "managers.mongo_manager.manager", "line_number": 45, "usage_type": "attribute"}, {"api_name": "managers.mongo_manager", "line_number": 45, "usage_type": "name"}, {"api_name": "config.modify_prefix_timer_max", "line_number": 46, "usage_type": "call"}, {"api_name": "discord.ext.commands.command", "line_number": 33, "usage_type": "call"}, {"api_name": "discord.ext.commands", "line_number": 33, "usage_type": "name"}, {"api_name": "discord.ext.commands.Bot", "line_number": 52, "usage_type": "attribute"}, {"api_name": "discord.ext.commands", "line_number": 52, "usage_type": "name"}]}
+{"seq_id": "33542733691", "text": "import scipy.spatial.kdtree as kd\nimport matplotlib.pyplot as plt\nimport numpy as np\n\nclass _spatial_smoother_2d(object):\n \"\"\"\n Stores domain parameters for all spatially adaptive kernel density estimators.\n\n Arguments:\n\n 1. num_neighbors: The number of neighbors to use for the adaptive smoothing parameter.\n 2. domx_params: A tuple specifying the parameters for the x-axis. Elements containg lower bound; upper bound; number of steps.\n 3. domy_params: A tuple specifying the parameters for the y-axis. Elements containg lower bound; upper bound; number of steps.\n\n \"\"\"\n def __init__(self,\n num_neighbors,\n domx_params,\n domy_params):\n\n self.k = num_neighbors\n self.domx_params = domx_params\n self.domy_params = domy_params\n\n def _construct_domain(self):\n # construct domain over x-axis\n domx = np.linspace( self.domx_params[0],\n self.domx_params[1],\n self.domx_params[2])\n # domain over the y-axis\n domy = np.linspace( self.domy_params[0],\n self.domy_params[1],\n self.domy_params[2])\n\n # return grid coordinates\n return(np.meshgrid(domx, domy))\n\n def plot_kernel(self,\n f,\n x = None,\n title = \"\",\n data_alpha = 1.0,\n filename = None):\n\n \"\"\"\n Plotting tools for spatially adaptive kernel smoothing functions.\n\n Arguments:\n\n 1. f: Matrix containing the kernel smoothed function.\n 2. x: Data used by the kernel smoothing the function. (optional)\n 3. title: Plot title. (optional)\n\n Returns:\n\n 1. Plots kernel smoothed data.\n\n \"\"\"\n domx, domy = self._construct_domain()\n\n plt.figure()\n ctf = plt.contourf(domx, domy, f, cmap = \"Blues\", levels = np.linspace(np.min(f), np.max(f), 31))\n plt.colorbar(ctf, shrink=0.9, format = '%.01e')\n plt.title(title)\n\n # plot the data if provided\n if x is not None:\n plt.scatter(x[:, 0], x[:, 1], facecolor = 'red', edgecolor = \"black\", zorder = 1, alpha = data_alpha)\n\n if filename is not None:\n plt.savefig(filename, dpi = 300)\n else:\n plt.show()\n\n\nclass knn_gaussian_2d(_spatial_smoother_2d):\n\n def __init__(self, \\\n num_neighbors, \\\n domx_params, \\\n domy_params):\n\n \"\"\"\n Parameters and functions for k nearest neighbor adaptive density estimator\n with a Gaussian kernel.\n\n Arguments:\n\n 1. domx_params: A tuple specifying the parameters for the x-axis. Elements containg lower bound; upper bound; number of steps.\n 2. domy_params: A tuple specifying the parameters for the y-axis. Elements containg lower bound; upper bound; number of steps.\n\n Example:\n\n domx = (0, 10, 100) # start at 0; end at 10; 100 steps\n domy = (-10, 10, 100) # start at -10; end at 10; 100 steps\n\n # initialize smoother with using 10 neighbors\n gaussian_smoother = knn_gaussian_2d(num_neighbors = 10, domx_params = domx, domy_params = domy)\n \"\"\"\n\n _spatial_smoother_2d.__init__(self, num_neighbors, domx_params, domy_params)\n\n def smooth(self, x):\n \"\"\"\n Apply K Nearest Neighbors adaptive Gaussian smoother to a provided 2D dataset.\n\n Arguments:\n\n 1. x: 2D dataset to be smoothed. It is assumed that the rows of\n the data matrix are the sample points.\n\n Returns:\n\n 1. Smoothed function over the specified domain.\n\n Example:\n\n TODO: Write sample code...\n \"\"\"\n domx, domy = self._construct_domain()\n dom = np.vstack((domx.ravel(), domy.ravel())).T\n\n # construct KD tree on data\n tree = kd.KDTree(x)\n\n # get k nearest neighbors\n dist = tree.query(dom, k = self.k)[0]\n tp_knn = dist[:, self.k - 1].reshape(-1, 1)\n\n ### ADAPTIVE KERNEL SMOOTHING\n # pairwise subtraction between grid points and each data point\n # reshape from tensor to matrix (K x 2)\n Fxy = np.subtract(dom[:, np.newaxis, :], x[np.newaxis, :, :]).reshape(-1, 2)\n Fxy = np.square(np.linalg.norm(Fxy, axis = 1)).reshape(dom.shape[0],-1)\n Fxy = np.divide(Fxy, -2 * tp_knn ** 2)\n Fxy = np.divide(np.exp(Fxy), 2 * np.pi * tp_knn ** 2)\n Fxy = Fxy.mean(axis = 1)\n\n return(Fxy.reshape(self.domy_params[2], self.domx_params[2]))\n\nclass knn_density_estimator(_spatial_smoother_2d):\n\n def __init__(self, num_neighbors, domx_params, domy_params):\n\n \"\"\"\n Parameters and functions for k-nearest neighbor adaptive density estimator.\n\n Arguments:\n\n 1. num_neighbors: The number of neighbors (k) used to estimate the density.\n 1. domx_params: A tuple specifying the parameters for the x-axis. Elements containg lower bound; upper bound; number of steps.\n 2. domy_params: A tuple specifying the parameters for the y-axis. Elements containg lower bound; upper bound; number of steps.\n\n Example:\n\n k = 10 # Number of neighbors to consider.\n domx = (0, 10, 100) # start at 0; end at 10; 100 steps.\n domy = (-10, 10, 100) # start at -10; end at 10; 100 steps.\n\n # initialize smoother with using 10 neighbors\n gaussian_smoother = knn_gaussian_2d(num_neighbors = 10, domx_params = domx, domy_params = domy)\n \"\"\"\n\n _spatial_smoother_2d.__init__(self, num_neighbors, domx_params, domy_params)\n\n def smooth(self, x):\n \"\"\"\n Apply K Nearest Neighbors density estimator over a grid.\n\n Arguments:\n\n 1. x: 2D dataset to be smoothed. It is assumed that the rows of\n the data matrix are the sample points.\n\n Returns:\n\n 1. Smoothed function over the specified domain.\n\n Example:\n\n TODO: Write sample code...\n \"\"\"\n domx, domy = self._construct_domain()\n dom = np.vstack((domx.ravel(), domy.ravel())).T\n\n # construct KD tree on data\n tree = kd.KDTree(x)\n\n # get k^{th} nearest neighbors to each point in the domain\n dist = tree.query(dom, k = self.k, p = 2)[0]\n dist_knn = dist[:, self.k - 1].reshape(self.domy_params[2], self.domx_params[2])\n dist_knn = np.divide(self.k / (x.shape[0] * np.pi), dist_knn ** 2)\n\n # KNN density estimator\n return(dist_knn)\n\nclass distance_to_measure(_spatial_smoother_2d):\n def __init__(self, domx_params, domy_params, num_neighbors = None, tau = None):\n \"\"\"Description here\"\"\"\n assert 0 < tau < 1 or tau is None, \\\n \"Parameter tau must be a numerical value in (0, 1).\"\n\n assert len(domx_params) == 3 and len(domy_params) == 3, \\\n \"Domain parameter tuples must contain three elements.\"\n\n assert isinstance(domx_params, tuple) and isinstance(domy_params, tuple), \\\n \"Domain parameters must be of type 'tuple'.\"\n\n\n if tau is None:\n _spatial_smoother_2d.__init__(self,\n num_neighbors = num_neighbors,\n domx_params = domx_params,\n domy_params = domy_params)\n else:\n _spatial_smoother_2d.__init__(self,\n num_neighbors = None,\n domx_params = domx_params,\n domy_params = domy_params)\n\n self.tau = tau\n\n def smooth(self, x):\n k = np.ceil(self.tau * x.shape[0]) if self.tau is not None else self.k\n k = int(k)\n domx, domy = self._construct_domain()\n dom = np.vstack((domx.ravel(), domy.ravel())).T\n\n tree = kd.KDTree(x)\n knn = tree.query(dom, k = k, p = 2)[1]\n\n #\n Fxy = np.subtract(dom[:, np.newaxis, :], x[knn, :]).reshape(-1, 2)\n Fxy = np.linalg.norm(Fxy, axis = 1).reshape(-1, k)\n Fxy = np.sqrt(Fxy.mean(axis = 1))\n\n return(Fxy.reshape(self.domy_params[2], self.domx_params[2]))\n\n\n\n\n# class knn_uniform_2d(_spatial_smoother_2d):\n# \"\"\"\n# Parameters and functions for k nearest neighbor adaptive density estimator\n# with a uniform kernel.\n# Arguments:\n# 1. domx_params: A tuple specifying the parameters for the x-axis. Elements containg lower bound; upper bound; number of steps.\n# 2. domy_params: A tuple specifying the parameters for the y-axis. Elements containg lower bound; upper bound; number of steps.\n# Example:\n# TODO: Write example codeblock\n# \"\"\"\n#\n# def __init__(self, num_neighbors, domx_params, domy_params):\n# _spatial_smoother_2d.__init__(self, num_neighbors, domx_params, domy_params)\n#\n# def smooth(self, x):\n# \"\"\"\n# Apply K Nearest Neighbors adaptive smoother with a uniform kernel to a\n# given 2D dataset.\n# Arguments:\n# 1. x: 2D dataset to be smoothed. It is assumed that the rows of\n# the data matrix are the sample points.\n# Returns:\n# 1. Smoothed function over the specified domain.\n# Example:\n# TODO: Write sample code...\n# \"\"\"\n#\n# domx, domy = self._construct_domain()\n# dom = np.vstack((domx.ravel(), domy.ravel())).T\n#\n# # construct KD tree on data\n# tree = kd.KDTree(x)\n#\n# # get k+1 nearest neighbors\n# #! the point itself is always the first nearest neighbor\n# dist = tree.query(x, k = self.k)[0]\n# tp_knn = dist[:, self.k - 1].reshape(-1, 1)\n# tp_knn = np.hstack([tp_knn, tp_knn])\n#\n# # ADAPTIVE KERNEL SMOOTHING\n# # pairwise subtraction between each grid point and each point in the data\n# Fxy = np.subtract(dom[:, np.newaxis, :], x[np.newaxis, :, :])\n\n ## divide each data point by its relative weight\n #tp_knn_big = np.tile(tp_knn, (dom.shape[0], 1)).reshape(dom.shape[0], -1, 2)\n #Fxy = np.divide(Fxy, tp_knn_big)\n\n ## find where values satisfy kernel condition\n #Fxy = (np.abs(Fxy) < 1.0) * 1.0\n\n ## get columns where both x, y coordinates are satisfied\n #Fxy = np.prod(Fxy, axis = 2)\n #Fxy = np.divide(Fxy.T, 4.0 * np.prod(tp_knn, 1).reshape(-1,1)).T\n #Fxy = np.mean(Fxy, axis = 1)\n\n #return(Fxy.reshape(domx.shape[0], domy.shape[0]))\n\nif __name__ == \"__main__\":\n \"\"\"For testing and illustrative purposes only\"\"\"\n import tdaw.examples.annulus_data as ad\n\n x = ad.sample_paired_annuli(R1 = 60,\n r1 = 40,\n R2 = 40,\n r2 = 20,\n center_modifier = 50,\n samples_from_shape = 500)\n\n domx_params = (np.min(x[:, 0]) - 10, np.max(x[:, 0]) + 10, 100)\n domy_params = (np.min(x[:, 1]) - 10, np.max(x[:, 1]) + 10, 100)\n\n dtm = distance_to_measure( tau = 0.10,\n domx_params = domx_params,\n domy_params = domy_params)\n dtm.plot_kernel(f = dtm.smooth(x), x = x)\n\n #gaussian_de = knn_gaussian_2d(num_neighbors = 8,\n # domx_params = ,\n # domy_params = )\n\n #gaussian_de.plot_kernel(f = gaussian_de.smooth(x), x = x, title = \"{} KNN with Gaussian Kernel\".format(gaussian_de.k))\n\n #from mpl_toolkits.mplot3d import Axes3D\n #fig = plt.figure()\n #ax = fig.gca(projection='3d')\n #X, Y = dtm._construct_domain()\n #surf = ax.plot_surface(X, Y, dtm.smooth(x), cmap = \"Blues\", linewidth=0, antialiased=False)\n #fig.colorbar(surf, shrink=0.25, aspect=5)\n #plt.show()\n\n # knn_de = knn_density_2d(num_neighbors = 8,\n # domx_params = (np.min(x[:, 0]) - 10, np.max(x[:, 0]) + 10, 50),\n # domy_params = (np.min(x[:, 1]) - 10, np.max(x[:, 1]) + 10, 100))\n\n #print knn_de.smooth(x).shape\n #knn_de.plot_kernel(f = knn_de.smooth(x), x = x, title = \"{} KNN Density Estimator\".format(knn_de.k))\n\n #adaptive_knn.plot_kernel(f = adaptive_knn.smooth(x), x = x, title = \"{} KNN with Gaussian Kernel\".format(adaptive_knn.k))\n", "repo_name": "patricksmedina/tdatools", "sub_path": "data_smoothing_kernels.py", "file_name": "data_smoothing_kernels.py", "file_ext": "py", "file_size_in_byte": 12605, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "76", "api": [{"api_name": "numpy.linspace", "line_number": 27, "usage_type": "call"}, {"api_name": "numpy.linspace", "line_number": 31, "usage_type": "call"}, {"api_name": "numpy.meshgrid", "line_number": 36, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 61, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 61, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.contourf", "line_number": 62, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 62, "usage_type": "name"}, {"api_name": "numpy.linspace", "line_number": 62, "usage_type": "call"}, {"api_name": "numpy.min", "line_number": 62, "usage_type": "call"}, {"api_name": "numpy.max", "line_number": 62, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.colorbar", "line_number": 63, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 63, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 64, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 64, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.scatter", "line_number": 68, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 68, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 71, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 71, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 73, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 73, "usage_type": "name"}, {"api_name": "numpy.vstack", "line_number": 121, "usage_type": "call"}, {"api_name": "scipy.spatial.kdtree.KDTree", "line_number": 124, "usage_type": "call"}, {"api_name": "scipy.spatial.kdtree", "line_number": 124, "usage_type": "name"}, {"api_name": "numpy.subtract", "line_number": 133, "usage_type": "call"}, {"api_name": "numpy.newaxis", "line_number": 133, "usage_type": "attribute"}, {"api_name": "numpy.square", "line_number": 134, "usage_type": "call"}, {"api_name": "numpy.linalg.norm", "line_number": 134, "usage_type": "call"}, {"api_name": "numpy.linalg", "line_number": 134, "usage_type": "attribute"}, {"api_name": "numpy.divide", "line_number": 135, "usage_type": "call"}, {"api_name": "numpy.divide", "line_number": 136, "usage_type": "call"}, {"api_name": "numpy.exp", "line_number": 136, "usage_type": "call"}, {"api_name": "numpy.pi", "line_number": 136, "usage_type": "attribute"}, {"api_name": "numpy.vstack", "line_number": 184, "usage_type": "call"}, {"api_name": "scipy.spatial.kdtree.KDTree", "line_number": 187, "usage_type": "call"}, {"api_name": "scipy.spatial.kdtree", "line_number": 187, "usage_type": "name"}, {"api_name": "numpy.divide", "line_number": 192, "usage_type": "call"}, {"api_name": "numpy.pi", "line_number": 192, "usage_type": "attribute"}, {"api_name": "numpy.ceil", "line_number": 224, "usage_type": "call"}, {"api_name": "numpy.vstack", "line_number": 227, "usage_type": "call"}, {"api_name": "scipy.spatial.kdtree.KDTree", "line_number": 229, "usage_type": "call"}, {"api_name": "scipy.spatial.kdtree", "line_number": 229, "usage_type": "name"}, {"api_name": "numpy.subtract", "line_number": 233, "usage_type": "call"}, {"api_name": "numpy.newaxis", "line_number": 233, "usage_type": "attribute"}, {"api_name": "numpy.linalg.norm", "line_number": 234, "usage_type": "call"}, {"api_name": "numpy.linalg", "line_number": 234, "usage_type": "attribute"}, {"api_name": "numpy.sqrt", "line_number": 235, "usage_type": "call"}, {"api_name": "tdaw.examples.annulus_data.sample_paired_annuli", "line_number": 303, "usage_type": "call"}, {"api_name": "tdaw.examples.annulus_data", "line_number": 303, "usage_type": "name"}, {"api_name": "numpy.min", "line_number": 310, "usage_type": "call"}, {"api_name": "numpy.max", "line_number": 310, "usage_type": "call"}, {"api_name": "numpy.min", "line_number": 311, "usage_type": "call"}, {"api_name": "numpy.max", "line_number": 311, "usage_type": "call"}]}
+{"seq_id": "70849105205", "text": "\"\"\"Fridge objects AML components based training pipeline.\"\"\"\nfrom typing import Optional\nimport os\nimport time\nimport logging\nimport argparse\nfrom azure.ai.ml.dsl import pipeline\nfrom azure.ai.ml import load_component\nfrom common.mlops.get_compute import get_compute\nfrom common.mlops.get_environment import get_environment\nfrom common.mlops.get_aml_client import get_aml_client\n\n\ngl_pipeline_components = []\n\n\n@pipeline()\ndef fridge_objects_automl_train(\n subscription_id: str,\n resource_group_name: str,\n workspace_name: str,\n automl_model_name: str,\n automl_experiment_name: str,\n automl_compute_cluster_name: str,\n build_reference_id: str,\n model_name: str,\n model_description: str,\n deploy_environment: str\n) -> None:\n \"\"\"Compose the fridge objects AutoML training pipeline.\n\n Adds steps for data preparation (creating train, val, test MLTables) and then launches\n an AutoML object detection training job.\n\n Args:\n subscription_id (str): AML subscription ID.\n resource_group_name (str): AML resource group name.\n workspace_name (str): AML workspace name.\n automl_model_name (str): the AutoML object detection model variant.\n automl_experiment_name (str): the AutoML experiment name.\n automl_compute_cluster_name (str): the compute cluster name to use\n for the AutoML job.\n build_reference_id (str): the DevOps build reference ID executing the pipeline.\n model_name (str): name of model shown at registration.\n model_description (str): description of model shown at registration.\n deploy_environment (str): the environment to use for the AutoML job.\n\n Returns:\n None\n \"\"\"\n tenant_id = os.getenv(\"AZURE_TENANT_ID\")\n client_id = os.getenv(\"AZURE_CLIENT_ID\")\n client_secret = os.getenv(\"AZURE_CLIENT_SECRET\")\n\n if tenant_id is None or client_id is None or client_secret is None:\n raise ValueError(\"Env variables not set, unable to create client\")\n\n train_mltable_name = \"fride_obj_det_mltable_train_\" + deploy_environment\n val_mltable_name = \"fride_obj_det_mltable_val_\" + deploy_environment\n test_mltable_name = \"fride_obj_det_mltable_test_\" + deploy_environment\n\n prepare_fridge_obj_data = gl_pipeline_components[0](\n client_id=client_id,\n client_secret=client_secret,\n tenant_id=tenant_id,\n subscription_id=subscription_id,\n resource_group_name=resource_group_name,\n workspace_name=workspace_name,\n train_mltable_name=train_mltable_name,\n val_mltable_name=val_mltable_name,\n test_mltable_name=test_mltable_name,\n )\n\n train_automl_model = gl_pipeline_components[1](\n client_id=client_id,\n client_secret=client_secret,\n tenant_id=tenant_id,\n subscription_id=subscription_id,\n resource_group_name=resource_group_name,\n workspace_name=workspace_name,\n training_mltable_path=prepare_fridge_obj_data.outputs.train_mltable,\n validation_mltable_path=prepare_fridge_obj_data.outputs.val_mltable,\n automl_obj_det_model_name=automl_model_name,\n automl_experiment_name=automl_experiment_name,\n automl_compute_cluster_name=automl_compute_cluster_name,\n )\n\n gl_pipeline_components[2](\n fp32_input_dir=train_automl_model.outputs.model_artifacts_dir\n )\n\n score_fp32_model = gl_pipeline_components[3](\n model_folder_path=train_automl_model.outputs.model_artifacts_dir,\n mltable_folder=prepare_fridge_obj_data.outputs.test_mltable\n )\n\n score_fp16_model = gl_pipeline_components[4](\n model_folder_path=train_automl_model.outputs.model_artifacts_dir,\n mltable_folder=prepare_fridge_obj_data.outputs.test_mltable\n )\n # TODO: change model input to convert_onnx_model.outputs.fp16_output_dir\n\n compare_map_scores = gl_pipeline_components[5](\n map_before=score_fp32_model.outputs.results_file,\n map_after=score_fp16_model.outputs.results_file\n )\n\n gl_pipeline_components[6](\n client_id=client_id,\n client_secret=client_secret,\n tenant_id=tenant_id,\n subscription_id=subscription_id,\n resource_group_name=resource_group_name,\n workspace_name=workspace_name,\n onnx_model_artifacts_folder=train_automl_model.outputs.model_artifacts_dir,\n registered_model_name=model_name,\n registered_model_description=model_description,\n build_reference_id=build_reference_id,\n metrics_json_file=compare_map_scores.outputs.metrics_json_file\n )\n\n\ndef construct_pipeline(\n subscription_id: str,\n resource_group_name: str,\n workspace_name: str,\n cluster_name: str,\n environment_name: str,\n model_name: str,\n model_description: str,\n display_name: str,\n deploy_environment: str,\n build_reference: str,\n automl_model_name: str,\n automl_experiment_name: str,\n automl_compute_cluster_name: str\n) -> None:\n \"\"\"Construct the AML components based pipeline.\n\n Args:\n subscription_id (str): AML subscription ID.\n resource_group_name (str): AML resource group name.\n workspace_name (str): AML workspace name.\n cluster_name (str): the AML cluster name used to run the pipeline steps.\n environment_name (str): the AML environment name used to run the pipeline steps.\n model_name (str): name of model shown at registration.\n model_description (str): description of model shown at registration.\n display_name (str): the display name of the pipeline run.\n deploy_environment (str): the stage of deployment (eg. dev, prod).\n build_reference (str): the DevOps build reference ID executing the pipeline.\n automl_model_name (str): the AutoML object detection model variant.\n automl_experiment_name (str): the AutoML experiment name.\n automl_compute_cluster_name (str): the AML compute cluster name to use to run the\n AutoML job.\n\n Returns:\n None\n \"\"\"\n parent_dir = os.path.join(\n os.getcwd(), \"fridge_obj_det/mlops/components\"\n )\n\n prepare_data = load_component(source=parent_dir + \"/prep.yml\")\n train_model = load_component(source=parent_dir + \"/train.yml\")\n convert_model = load_component(source=parent_dir + \"/convert.yml\")\n score_fp32 = load_component(source=parent_dir + \"/score.yml\")\n score_fp16 = load_component(source=parent_dir + \"/score.yml\")\n compare_map = load_component(source=parent_dir + \"/compare_map.yml\")\n register_model = load_component(source=parent_dir + \"/register.yml\")\n\n # Set the environment name to custom environment using name and version number\n prepare_data.environment = environment_name\n train_model.environment = environment_name\n convert_model.environment = environment_name\n score_fp32.environment = environment_name\n score_fp16.environment = environment_name\n compare_map.environment = environment_name\n register_model.environment = environment_name\n\n gl_pipeline_components.append(prepare_data)\n gl_pipeline_components.append(train_model)\n gl_pipeline_components.append(convert_model)\n gl_pipeline_components.append(score_fp32)\n gl_pipeline_components.append(score_fp16)\n gl_pipeline_components.append(compare_map)\n gl_pipeline_components.append(register_model)\n\n pipeline_job = fridge_objects_automl_train(\n subscription_id,\n resource_group_name,\n workspace_name,\n automl_model_name,\n automl_experiment_name,\n automl_compute_cluster_name,\n build_reference,\n model_name,\n model_description,\n deploy_environment\n )\n pipeline_job.display_name = display_name\n pipeline_job.tags = {\n \"environment\": deploy_environment,\n \"build_reference\": build_reference,\n }\n\n # set pipeline level compute\n pipeline_job.settings.default_compute = cluster_name\n pipeline_job.settings.force_rerun = False\n # set pipeline level datastore\n pipeline_job.settings.default_datastore = \"workspaceblobstore\"\n\n return pipeline_job\n\n\ndef execute_pipeline(\n subscription_id: str,\n resource_group_name: str,\n workspace_name: str,\n experiment_name: str,\n pipeline_job: pipeline,\n wait_for_completion: bool,\n output_file: Optional[str],\n):\n \"\"\"Execute the AML components based pipeline.\n\n Args:\n subscription_id (str): AML subscription ID.\n resource_group_name (str): AML resource group name.\n workspace_name (str): AML workspace name.\n experiment_name (str): AML pipeline experiment name.\n pipeline_job (pipeline): the AML pipeline to execute.\n wait_for_completion (bool): True if the function should wait for the\n pipeline to complete.\n output_file (Optional[str]): _description_\n\n Raises:\n Exception: _description_\n \"\"\"\n try:\n tenant_id = os.getenv(\"AZURE_TENANT_ID\")\n client_id = os.getenv(\"AZURE_CLIENT_ID\")\n client_secret = os.getenv(\"AZURE_CLIENT_SECRET\")\n\n if tenant_id is None or client_id is None or client_secret is None:\n raise ValueError(\"Env variables not set, unable to create client\")\n\n ml_client = get_aml_client(\n client_id=client_id,\n client_secret=client_secret,\n tenant_id=tenant_id,\n subscription_id=subscription_id,\n resource_group_name=resource_group_name,\n workspace_name=workspace_name,\n )\n\n pipeline_job = ml_client.jobs.create_or_update(\n pipeline_job, experiment_name=experiment_name\n )\n\n logging.info(f\"The job {pipeline_job.name} has been submitted!\")\n if output_file is not None:\n with open(output_file, \"w\") as out_file:\n out_file.write(pipeline_job.name)\n\n if wait_for_completion is True:\n total_wait_time = 3600\n current_wait_time = 0\n job_status = [\n \"NotStarted\",\n \"Queued\",\n \"Starting\",\n \"Preparing\",\n \"Running\",\n \"Finalizing\",\n \"Provisioning\",\n \"CancelRequested\",\n \"Failed\",\n \"Canceled\",\n \"NotResponding\",\n ]\n\n while pipeline_job.status in job_status:\n if current_wait_time <= total_wait_time:\n time.sleep(20)\n pipeline_job = ml_client.jobs.get(pipeline_job.name)\n\n current_wait_time = current_wait_time + 15\n\n if (\n pipeline_job.status == \"Failed\"\n or pipeline_job.status == \"NotResponding\"\n or pipeline_job.status == \"CancelRequested\"\n or pipeline_job.status == \"Canceled\"\n ):\n break\n else:\n break\n\n if pipeline_job.status == \"Completed\" or pipeline_job.status == \"Finished\":\n logging.info(\"job completed\")\n else:\n raise Exception(\"Sorry, exiting job with failure..\")\n except Exception as ex:\n print(f\"Exception raised in execute_pipeline {ex}\")\n raise\n\n\ndef prepare_and_execute(\n subscription_id: str,\n resource_group_name: str,\n workspace_name: str,\n cluster_name: str,\n cluster_size: str,\n cluster_region: str,\n min_instances: int,\n max_instances: int,\n idle_time_before_scale_down: int,\n env_base_image_name: str,\n conda_path: str,\n environment_name: str,\n env_description: str,\n wait_for_completion: bool,\n model_name: str,\n model_description: str,\n display_name: str,\n experiment_name: str,\n deploy_environment: str,\n build_reference: str,\n automl_model_name: str,\n automl_experiment_name: str,\n automl_compute_cluster_name: str,\n automl_cluster_size: str,\n automl_cluster_region: str,\n automl_min_instances: int,\n automl_max_instances: int,\n automl_idle_time_before_scale_down: int,\n output_file: Optional[str],\n):\n \"\"\"Prepare the pipeline and execute it.\n\n Checks all resource requirements for the pipleine and creates them if they do not exist. Then\n creates the pipeline and executes it.\n \"\"\"\n compute = get_compute(\n subscription_id,\n resource_group_name,\n workspace_name,\n cluster_name,\n cluster_size,\n cluster_region,\n min_instances,\n max_instances,\n idle_time_before_scale_down,\n )\n\n automl_compute = get_compute(\n subscription_id,\n resource_group_name,\n workspace_name,\n automl_compute_cluster_name,\n automl_cluster_size,\n automl_cluster_region,\n automl_min_instances,\n automl_max_instances,\n automl_idle_time_before_scale_down,\n )\n\n environment = get_environment(\n subscription_id,\n resource_group_name,\n workspace_name,\n env_base_image_name,\n conda_path,\n environment_name,\n env_description,\n )\n print(f\"Environment: {environment.name}, version: {environment.version}\")\n\n pipeline_job = construct_pipeline(\n subscription_id,\n resource_group_name,\n workspace_name,\n compute.name,\n f\"azureml:{environment.name}:{environment.version}\",\n model_name,\n model_description,\n display_name,\n deploy_environment,\n build_reference,\n automl_model_name,\n automl_experiment_name,\n automl_compute.name,\n )\n\n execute_pipeline(\n subscription_id,\n resource_group_name,\n workspace_name,\n experiment_name,\n pipeline_job,\n wait_for_completion,\n output_file,\n )\n\n\ndef main():\n \"\"\"Parse all args and execute the pipeline.\"\"\"\n parser = argparse.ArgumentParser(\"build_environment\")\n parser.add_argument(\"--subscription_id\", type=str, help=\"Azure subscription id\")\n parser.add_argument(\n \"--resource_group_name\", type=str, help=\"Azure Machine learning resource group\"\n )\n parser.add_argument(\n \"--workspace_name\", type=str, help=\"Azure Machine learning Workspace name\"\n )\n parser.add_argument(\n \"--cluster_name\", type=str, help=\"Azure Machine learning cluster name\"\n )\n parser.add_argument(\n \"--cluster_size\", type=str, help=\"Azure Machine learning cluster size\"\n )\n parser.add_argument(\n \"--cluster_region\",\n type=str,\n help=\"Azure Machine learning cluster region\",\n default=\"eastus2\",\n )\n parser.add_argument(\"--min_instances\", type=int, default=0)\n parser.add_argument(\"--max_instances\", type=int, default=4)\n parser.add_argument(\"--idle_time_before_scale_down\", type=int, default=120)\n parser.add_argument(\n \"--build_reference\",\n type=str,\n help=\"Unique identifier for Azure DevOps pipeline run\",\n )\n parser.add_argument(\n \"--deploy_environment\",\n type=str,\n help=\"execution and deployment environment. e.g. dev, prod, test\",\n )\n parser.add_argument(\n \"--experiment_name\", type=str, help=\"Job execution experiment name\"\n )\n parser.add_argument(\"--display_name\", type=str, help=\"Job execution run name\")\n parser.add_argument(\n \"--wait_for_completion\",\n type=bool,\n help=\"Set to True to wait for pipeline job completion\",\n )\n parser.add_argument(\n \"--environment_name\",\n type=str,\n help=\"Azure Machine Learning Environment name for job execution\",\n default=\"conda-based-devenv-py38-cpu\",\n )\n parser.add_argument(\n \"--env_base_image_name\",\n type=str,\n help=\"Environment custom base image name\",\n default=\"mcr.microsoft.com/azureml/openmpi4.1.0-ubuntu20.04\",\n )\n parser.add_argument(\n \"--conda_path\",\n type=str,\n help=\"path to conda requirements file\",\n default=\"model_factory/fridge_obj_det/mlops/environment/conda.yml\",\n )\n parser.add_argument(\n \"--env_description\", type=str, default=\"Environment created using Conda.\"\n )\n parser.add_argument(\n \"--model_name\",\n type=str,\n default=\"fridge-objects-automl-onnx\",\n help=\"The name of the registered model.\",\n )\n parser.add_argument(\n \"--model-description\",\n type=str,\n default=\"Best AutoML Object Detection ONNX model for fridge objects dataset.\",\n help=\"The description of the registered model.\",\n )\n parser.add_argument(\n \"--automl_model_name\",\n type=str,\n default=\"fasterrcnn_resnet18_fpn\"\n )\n parser.add_argument(\n \"--automl_experiment_name\",\n type=str,\n default=\"automl-fridge-objects-detection-experiment\"\n )\n parser.add_argument(\n \"--automl_compute_cluster_name\",\n type=str,\n help=\"The AML cluster name for running AutoML training experiments.\",\n default=\"gpu-cluster-v100\"\n )\n parser.add_argument(\n \"--automl_cluster_size\",\n type=str,\n help=\"AML cluster size for AutoML jobs.\",\n default=\"STANDARD_NC6S_V3\"\n )\n parser.add_argument(\n \"--automl_cluster_region\",\n type=str,\n help=\"AML cluster region for AutoML jobs.\",\n default=\"eastus2\",\n )\n parser.add_argument(\"--automl_cluster_min_instances\", type=int, default=0)\n parser.add_argument(\"--automl_cluster_max_instances\", type=int, default=4)\n parser.add_argument(\"--automl_cluster_idle_time_before_scale_down\", type=int, default=120)\n parser.add_argument(\n \"--output_file\", type=str, required=False, help=\"A file to save run id\"\n )\n\n args = parser.parse_args()\n\n prepare_and_execute(\n args.subscription_id,\n args.resource_group_name,\n args.workspace_name,\n args.cluster_name,\n args.cluster_size,\n args.cluster_region,\n args.min_instances,\n args.max_instances,\n args.idle_time_before_scale_down,\n args.env_base_image_name,\n args.conda_path,\n args.environment_name,\n args.env_description,\n args.wait_for_completion,\n args.model_name,\n args.model_description,\n args.display_name,\n args.experiment_name,\n args.deploy_environment,\n args.build_reference,\n args.automl_model_name,\n args.automl_experiment_name,\n args.automl_compute_cluster_name,\n args.automl_cluster_size,\n args.automl_cluster_region,\n args.automl_cluster_min_instances,\n args.automl_cluster_max_instances,\n args.automl_cluster_idle_time_before_scale_down,\n args.output_file,\n )\n\n\nif __name__ == \"__main__\":\n main()\n", "repo_name": "microsoft/mlops-model-factory-accelerator", "sub_path": "telco_case_study_implementation/fridge_object_detection/model_factory/fridge_obj_det/mlops/src/mlops_pipeline.py", "file_name": "mlops_pipeline.py", "file_ext": "py", "file_size_in_byte": 18841, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 6, "dataset": "github-code", "pt": "76", "api": [{"api_name": "os.getenv", "line_number": 51, "usage_type": "call"}, {"api_name": "os.getenv", "line_number": 52, "usage_type": "call"}, {"api_name": "os.getenv", "line_number": 53, "usage_type": "call"}, {"api_name": "azure.ai.ml.dsl.pipeline", "line_number": 17, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 159, "usage_type": "call"}, {"api_name": "os.path", "line_number": 159, "usage_type": "attribute"}, {"api_name": "os.getcwd", "line_number": 160, "usage_type": "call"}, {"api_name": "azure.ai.ml.load_component", "line_number": 163, "usage_type": "call"}, {"api_name": "azure.ai.ml.load_component", "line_number": 164, "usage_type": "call"}, {"api_name": "azure.ai.ml.load_component", "line_number": 165, "usage_type": "call"}, {"api_name": "azure.ai.ml.load_component", "line_number": 166, "usage_type": "call"}, {"api_name": "azure.ai.ml.load_component", "line_number": 167, "usage_type": "call"}, {"api_name": "azure.ai.ml.load_component", "line_number": 168, "usage_type": "call"}, {"api_name": "azure.ai.ml.load_component", "line_number": 169, "usage_type": "call"}, {"api_name": "azure.ai.ml.dsl.pipeline", "line_number": 220, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 222, "usage_type": "name"}, {"api_name": "os.getenv", "line_number": 240, "usage_type": "call"}, {"api_name": "os.getenv", "line_number": 241, "usage_type": "call"}, {"api_name": "os.getenv", "line_number": 242, "usage_type": "call"}, {"api_name": "common.mlops.get_aml_client.get_aml_client", "line_number": 247, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 260, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 284, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 300, "usage_type": "call"}, {"api_name": "typing.Optional", "line_number": 337, "usage_type": "name"}, {"api_name": "common.mlops.get_compute.get_compute", "line_number": 344, "usage_type": "call"}, {"api_name": "common.mlops.get_compute.get_compute", "line_number": 356, "usage_type": "call"}, {"api_name": "common.mlops.get_environment.get_environment", "line_number": 368, "usage_type": "call"}, {"api_name": "argparse.ArgumentParser", "line_number": 408, "usage_type": "call"}]}
+{"seq_id": "8031013170", "text": "import requests\nfrom bs4 import BeautifulSoup\n\n\npage = requests.get('http://www.cdiscount.com/search/10/acer+aspire.html#_his_')\n\nsoup = BeautifulSoup(page.text)\n\nprdtBloc = soup.find_all(\"div\", class_='prdtBloc')\n\n\nprdtDATA = {}\nfor prdt in prdtBloc:\n prdt = {}\n prdt['name'] = prdt.find(\"div\", class_='prdtBTit').get_ext()\n prdt['url'] = prdt.find(\"a\").get('href')\n", "repo_name": "rachidalili/MS-BGD2015", "sub_path": "Maxime_Kubryk/lesson3/exo_cc_lesson3.py", "file_name": "exo_cc_lesson3.py", "file_ext": "py", "file_size_in_byte": 376, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 6, "dataset": "github-code", "pt": "76", "api": [{"api_name": "requests.get", "line_number": 5, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 7, "usage_type": "call"}]}
+{"seq_id": "9951815738", "text": "import os\nimport fnmatch\nimport io\nimport sys\nfrom optparse import OptionParser, Values\nfrom typing import List\n\nfrom littledarwin import JavaParse\nfrom tqdm import tqdm\nfrom chaosmeter import License\nfrom .metrics import *\nfrom .writers import *\n\nchaosMeterVersion = '0.1.7'\n\n\ndef main(mockArgs: list = None):\n \"\"\"\n Main ChaosMeter Function\n \"\"\"\n print(\"\"\"\n\n▄█▄ ▄ █ ██ ████▄ ▄▄▄▄▄ █▀▄▀█ ▄███▄ ▄▄▄▄▀ ▄███▄ █▄▄▄▄\n█▀ ▀▄ █ █ █ █ █ █ █ ▀▄ █ █ █ █▀ ▀ ▀▀▀ █ █▀ ▀ █ ▄▀\n█ ▀ ██▀▀█ █▄▄█ █ █ ▄ ▀▀▀▀▄ █ ▄ █ ██▄▄ █ ██▄▄ █▀▀▌\n█▄ ▄▀ █ █ █ █ ▀████ ▀▄▄▄▄▀ █ █ █▄ ▄▀ █ █▄ ▄▀ █ █\n▀███▀ █ █ █ ▀███▀ ▀ ▀███▀ █\n ▀ █ ▀ ▀\n ▀\n\n ChaosMeter version {} Copyright (C) 2020 Ali Parsai\n\n ChaosMeter comes with ABSOLUTELY NO WARRANTY.\n This is free software, and you are welcome to redistribute it\n under certain conditions; run ChaosMeter --license for details.\n\n\n \"\"\".format(chaosMeterVersion))\n\n optionParser = OptionParser(prog=\"chaosmeter\")\n options = parseCmdArgs(optionParser, mockArgs)\n\n sourcePath = os.path.abspath(options.sourcePath)\n targetPath = os.path.abspath(options.targetPath)\n\n # Find all metrics\n metricList = getAllMetrics()\n if len(metricList) == 0:\n print(\"No metrics found!\")\n sys.exit(3)\n\n for MetricClass in metricList:\n print(\"Found metric: \\\"\" + MetricClass.name + \"\\\"\")\n print(\"Found {} metrics.\\n\".format(len(metricList)))\n\n # We need to instatiate once in single-process mode. To be redesigned for mutli-process mode.\n javaParseInstance = JavaParse()\n metricInstanceList = instantiatePlugins(metricList, javaParseInstance)\n\n # Find all writers\n writerList = getAllWriters()\n if len(writerList) == 0:\n print(\"No writers found!\")\n sys.exit(4)\n\n for WriterClass in writerList:\n print(\"Found writer: \\\"\" + WriterClass.name + \"\\\"\")\n print(\"Found {} writers.\\n\".format(len(writerList)))\n\n writerInstanceList = instantiatePlugins(writerList, targetPath)\n\n fileList = findJavaFiles(sourcePath)\n\n print(os.linesep)\n print(\"Source Path: \", sourcePath)\n print(\"Target Path: \", targetPath)\n print(os.linesep)\n\n # Main loop\n fileCounter = 0\n completeResults = dict()\n completeResultsPath = os.path.join(targetPath, \"FinalReport\")\n wroteSkipMessage = False\n for srcFile in tqdm(fileList, dynamic_ncols=True, unit='files'):\n fileCounter += 1\n\n # Set paths\n fileRelativePath = os.path.relpath(srcFile, sourcePath)\n srcFileRoot, srcFileName = os.path.split(srcFile)\n targetDir = os.path.join(targetPath, os.path.relpath(srcFileRoot, sourcePath))\n targetFilePath = os.path.splitext(os.path.join(targetDir, srcFileName))[0]\n\n if options.isContinue:\n allExists = True\n for writerInstance in writerInstanceList:\n allExists = allExists and os.path.isfile(targetFilePath + writerInstance.extension)\n if allExists:\n if not wroteSkipMessage:\n tqdm.write(\"Skipping existing results...\")\n wroteSkipMessage = True\n continue\n try:\n tqdm.write(\"({:,}/{:,}) {}\".format(fileCounter, len(fileList), fileRelativePath))\n wroteSkipMessage = False\n except UnicodeError as e:\n tqdm.write(str(e) + os.linesep)\n tqdm.write(\"Non-unicode filename detected. Not showing in terminal.\")\n\n metricResults = calculateMetrics(srcFile, metricList, javaParseInstance, metricInstanceList)\n if metricResults is None:\n tqdm.write(\"Error in parsing Java code, skipping the file.\")\n continue\n\n metricResultsAggregate, metricLabels = aggregateMetrics(**metricResults)\n\n # Prepare the result file\n completeResults[fileRelativePath] = metricResultsAggregate\n\n if not os.path.exists(targetDir):\n os.makedirs(targetDir)\n\n for writerInstance in writerInstanceList:\n fileContent = writerInstance.createTargetFormat(metricResultsAggregate, metricLabels)\n writerInstance.write(targetFilePath, fileContent)\n\n if not options.isContinue:\n completeResultsLabels = [\"File\"]\n completeResultsLabels.extend(metricLabels)\n completeResultsAggregate = [completeResultsLabels]\n\n for cuName in sorted(completeResults.keys()):\n for methodName in sorted(completeResults[cuName].keys()):\n cellList = [cuName, methodName]\n cellList.extend(completeResults[cuName][methodName])\n completeResultsAggregate.append(cellList)\n\n for writerInstance in writerInstanceList:\n completeFileContent = writerInstance.createFinalReportTargetFormat(completeResultsAggregate)\n writerInstance.write(completeResultsPath, completeFileContent)\n\n print(os.linesep)\n\n return 0\n\n\ndef calculateMetrics(srcFile: str, metricList: List[Metric],\n javaParseExistingInstance: JavaParse = None,\n metricExistingInstanceList: List[Metric] = None):\n javaParseInstance = JavaParse() if javaParseExistingInstance is None else javaParseExistingInstance\n metricInstanceList = instantiatePlugins(metricList, javaParseInstance) \\\n if metricExistingInstanceList is None else metricExistingInstanceList\n\n try:\n # Parse source file\n sourceCode = getFileContent(srcFile)\n tree = javaParseInstance.parse(sourceCode)\n except Exception as e:\n return None\n\n # Calculate metrics\n metricResults = dict()\n for metricInstance in metricInstanceList:\n metricResults[metricInstance.name] = metricInstance.calculate(tree, sourceCode)\n del metricInstance\n del javaParseInstance\n\n return metricResults\n\n\ndef instantiatePlugins(classList, *args):\n instanceList = list()\n for pluginClass in classList:\n instance = pluginClass(*args)\n instanceList.append(instance)\n return instanceList\n\n\ndef findJavaFiles(sourcePath: str) -> List[str]:\n # Get the file list\n if not os.path.isdir(sourcePath):\n print(\"Source path must be a directory.\")\n sys.exit(5)\n\n fileList = list()\n print(\"Searching for Java files... \", end=\"\\r\")\n for root, dirnames, filenames in os.walk(sourcePath):\n for filename in fnmatch.filter(filenames, \"*.java\"):\n fileList.append(os.path.join(root, filename))\n print(\"Searching for Java files... {} found.\".format(len(fileList)), end=\"\\r\")\n\n if len(fileList) == 0:\n print(\"No Java files found in provided source path.\")\n sys.exit(6)\n\n return fileList\n\n\ndef getFileContent(filePath: str) -> str:\n with io.open(filePath, mode='r', errors='replace') as contentFile:\n file_data = contentFile.read()\n return str(file_data)\n\n\ndef parseCmdArgs(optionParser: OptionParser, mockArgs: list = None) -> Values:\n \"\"\"\n\n :param mockArgs:\n :type mockArgs:\n :param optionParser:\n :type optionParser:\n :return:\n :rtype:\n \"\"\"\n #\n # numberOfCPUs = os.cpu_count()\n # numberOfCPUs = numberOfCPUs if numberOfCPUs is not None else 1\n\n # parsing input options\n optionParser.add_option(\"-p\", \"--path\", action=\"store\", dest=\"sourcePath\",\n default=None, help=\"Path to Java source files\")\n optionParser.add_option(\"-t\", \"--target\", action=\"store\", dest=\"targetPath\",\n default=os.path.dirname(os.path.realpath(__file__)),\n help=\"Path to store results\")\n # optionParser.add_option(\"--workers\", action=\"store\", type=\"int\", dest=\"workers\",\n # default=numberOfCPUs, help=\"Number of workers to spawn\")\n optionParser.add_option(\"-c\", \"--continue\", action=\"store_true\", dest=\"isContinue\",\n default=False, help=\"Skips previously analyzed files\")\n optionParser.add_option(\"--license\", action=\"store_true\", dest=\"isLicenseActive\",\n default=False, help=\"Outputs the license and exit\")\n\n if mockArgs is None:\n (options, args) = optionParser.parse_args()\n else:\n (options, args) = optionParser.parse_args(args=mockArgs)\n\n if options.isLicenseActive:\n License.outputLicense()\n sys.exit(0)\n\n if options.sourcePath is None:\n optionParser.print_help()\n print(\"\\nYou need to specify at least the path to the source files.\\n\")\n print(\"\\nExample:\\n\\t ChaosMeter -p ./src/main -t ./target \\n\\n\")\n sys.exit(1)\n\n if not os.path.isdir(options.sourcePath):\n print(\"Source path must be a directory.\")\n sys.exit(2)\n\n return options\n", "repo_name": "aliparsai/ChaosMeter", "sub_path": "chaosmeter/ChaosMeter.py", "file_name": "ChaosMeter.py", "file_ext": "py", "file_size_in_byte": 9195, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "76", "api": [{"api_name": "optparse.OptionParser", "line_number": 40, "usage_type": "call"}, {"api_name": "os.path.abspath", "line_number": 43, "usage_type": "call"}, {"api_name": "os.path", "line_number": 43, "usage_type": "attribute"}, {"api_name": "os.path.abspath", "line_number": 44, "usage_type": "call"}, {"api_name": "os.path", "line_number": 44, "usage_type": "attribute"}, {"api_name": "sys.exit", "line_number": 50, "usage_type": "call"}, {"api_name": "littledarwin.JavaParse", "line_number": 57, "usage_type": "call"}, {"api_name": "sys.exit", "line_number": 64, "usage_type": "call"}, {"api_name": "os.linesep", "line_number": 74, "usage_type": "attribute"}, {"api_name": "os.linesep", "line_number": 77, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 82, "usage_type": "call"}, {"api_name": "os.path", "line_number": 82, "usage_type": "attribute"}, {"api_name": "tqdm.tqdm", "line_number": 84, "usage_type": "call"}, {"api_name": "os.path.relpath", "line_number": 88, "usage_type": "call"}, {"api_name": "os.path", "line_number": 88, "usage_type": "attribute"}, {"api_name": "os.path.split", "line_number": 89, "usage_type": "call"}, {"api_name": "os.path", "line_number": 89, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 90, "usage_type": "call"}, {"api_name": "os.path", "line_number": 90, "usage_type": "attribute"}, {"api_name": "os.path.relpath", "line_number": 90, "usage_type": "call"}, {"api_name": "os.path.splitext", "line_number": 91, "usage_type": "call"}, {"api_name": "os.path", "line_number": 91, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 91, "usage_type": "call"}, {"api_name": "os.path.isfile", "line_number": 96, "usage_type": "call"}, {"api_name": "os.path", "line_number": 96, "usage_type": "attribute"}, {"api_name": "tqdm.tqdm.write", "line_number": 99, "usage_type": "call"}, {"api_name": "tqdm.tqdm", "line_number": 99, "usage_type": "name"}, {"api_name": "tqdm.tqdm.write", "line_number": 103, "usage_type": "call"}, {"api_name": "tqdm.tqdm", "line_number": 103, "usage_type": "name"}, {"api_name": "tqdm.tqdm.write", "line_number": 106, "usage_type": "call"}, {"api_name": "tqdm.tqdm", "line_number": 106, "usage_type": "name"}, {"api_name": "os.linesep", "line_number": 106, "usage_type": "attribute"}, {"api_name": "tqdm.tqdm.write", "line_number": 107, "usage_type": "call"}, {"api_name": "tqdm.tqdm", "line_number": 107, "usage_type": "name"}, {"api_name": "tqdm.tqdm.write", "line_number": 111, "usage_type": "call"}, {"api_name": "tqdm.tqdm", "line_number": 111, "usage_type": "name"}, {"api_name": "os.path.exists", "line_number": 119, "usage_type": "call"}, {"api_name": "os.path", "line_number": 119, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 120, "usage_type": "call"}, {"api_name": "os.linesep", "line_number": 141, "usage_type": "attribute"}, {"api_name": "typing.List", "line_number": 146, "usage_type": "name"}, {"api_name": "littledarwin.JavaParse", "line_number": 147, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 148, "usage_type": "name"}, {"api_name": "littledarwin.JavaParse", "line_number": 149, "usage_type": "call"}, {"api_name": "os.path.isdir", "line_number": 180, "usage_type": "call"}, {"api_name": "os.path", "line_number": 180, "usage_type": "attribute"}, {"api_name": "sys.exit", "line_number": 182, "usage_type": "call"}, {"api_name": "os.walk", "line_number": 186, "usage_type": "call"}, {"api_name": "fnmatch.filter", "line_number": 187, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 188, "usage_type": "call"}, {"api_name": "os.path", "line_number": 188, "usage_type": "attribute"}, {"api_name": "sys.exit", "line_number": 193, "usage_type": "call"}, {"api_name": "typing.List", "line_number": 178, "usage_type": "name"}, {"api_name": "io.open", "line_number": 199, "usage_type": "call"}, {"api_name": "optparse.OptionParser", "line_number": 204, "usage_type": "name"}, {"api_name": "os.path.dirname", "line_number": 222, "usage_type": "call"}, {"api_name": "os.path", "line_number": 222, "usage_type": "attribute"}, {"api_name": "os.path.realpath", "line_number": 222, "usage_type": "call"}, {"api_name": "chaosmeter.License.outputLicense", "line_number": 237, "usage_type": "call"}, {"api_name": "chaosmeter.License", "line_number": 237, "usage_type": "name"}, {"api_name": "sys.exit", "line_number": 238, "usage_type": "call"}, {"api_name": "sys.exit", "line_number": 244, "usage_type": "call"}, {"api_name": "os.path.isdir", "line_number": 246, "usage_type": "call"}, {"api_name": "os.path", "line_number": 246, "usage_type": "attribute"}, {"api_name": "sys.exit", "line_number": 248, "usage_type": "call"}, {"api_name": "optparse.Values", "line_number": 204, "usage_type": "name"}]}
+{"seq_id": "31980322435", "text": "from django.contrib.messages import success\nfrom django.core.checks import messages\nfrom django.shortcuts import redirect, render,get_object_or_404\nfrom django.http import HttpResponse, Http404, HttpResponseRedirect\nfrom django.views.generic.detail import DetailView\nfrom django.views.generic.edit import CreateView\nfrom .models import Images, Profile, Comment\nfrom .forms import EditProfileForm, ImageForm, CommentForm, ProfileUpdateForm\nfrom django.views import generic\nfrom django.urls import reverse_lazy, reverse\nfrom django.contrib.auth.decorators import login_required\nfrom django.contrib.auth.forms import UserChangeForm\n# Create your views here.\n\n@login_required(login_url='/accounts/login/')\ndef index(request):\n name = 'instagram app'\n images = Images.objects.all()\n comments = Comment.objects.all()\n \n return render(request, 'instagram/index.html', {\"name\":name, \"images\":images, \"comments\":comments})\n\ndef image_detail(request, image_id):\n try:\n image = Images.objects.get(id = image_id)\n image_likes = image.like.count()\n \n except Images.DoesNotExist:\n raise Http404()\n\n return render(request,\"instagram/image.html\", {\"image\":image, \"image_likes\":image_likes})\n\n\n@login_required(login_url='/accounts/login/')\ndef like_image(request, image_id):\n image = Images.objects.get(id =image_id)\n image.like.add(request.user.profile)\n image.save()\n return HttpResponseRedirect(reverse('image_detail', args=[str(image_id)]))\n\n\n@login_required(login_url='/accounts/login/')\ndef new_image(request):\n current_user =request.user\n if request.method == 'POST':\n form = ImageForm(request.POST,request.FILES)\n if form.is_valid():\n image = form.save(commit = False)\n image.profile = current_user\n image.save()\n return redirect(\"index\")\n\n else:\n form = ImageForm()\n return render (request, 'new_image.html', {\"form\":form})\n\n@login_required(login_url='/accounts/login/')\ndef delete_image(request, image_id):\n item = Images.objects.get(id =image_id)\n if request.method =='POST':\n item.delete()\n return redirect('/')\n return render(request, 'instagram/delete.html', {\"item\":item})\n \n@login_required(login_url='/accounts/login/')\ndef update_image(request, image_id):\n image = Images.objects.get(id=image_id)\n update_form = ImageForm(instance=image)\n context = {\"update_form\": update_form}\n if request.method ==\"POST\":\n update_form = ImageForm(request.POST, instance = image)\n if update_form.is_valid():\n update_form.save()\n return redirect(\"/\")\n\n return render (request, 'instagram/update_image.html', context)\n \n@login_required(login_url='/accounts/login/')\ndef search(request):\n if 'user' in request.GET and request.GET['user']:\n search_term = request.GET.get('user')\n searched_users = Profile.search_profile(search_term)\n return render(request, 'instagram/search.html', {'users':searched_users})\n\n else: \n return render(request, 'instagram/search.html')\n \n\nclass UserEditView(generic.UpdateView):\n form_class = EditProfileForm\n template_name='django_registration/edit_profile.html'\n success_url =reverse_lazy('index')\n\n def get_object(self):\n return self.request.user\n\n@login_required(login_url='/accounts/login/')\ndef add_comment(request, image_id):\n image = get_object_or_404(Images, id=image_id)\n\n if request.method == 'POST':\n comment_form = CommentForm(request.POST, request.FILES, instance=image)\n if comment_form.is_valid():\n comments = comment_form.save(commit=False)\n comments.image = image\n comments.user = request.user\n \n return redirect('index')\n else:\n comment_form = CommentForm()\n \n return render(request, 'instagram/add_comment.html',{\"comment_form\":comment_form, \"image\":image})\n\n@login_required(login_url='/accounts/login/')\ndef profile(request):\n if request.method == 'POST':\n user_form=EditProfileForm(request.POST, instance =request.user)\n profile_form = ProfileUpdateForm(request.POST, request.FILES, instance=request.user.profile)\n if user_form.is_valid() and profile_form.is_valid():\n user_form.save()\n profile_form.save()\n return redirect('profile')\n else:\n user_form=EditProfileForm(instance =request.user)\n profile_form = ProfileUpdateForm(instance=request.user.profile)\n messages.success(request, f'Your profile was updated successfuly')\n context = {\"user_form\":user_form, \"profile_form\":profile_form}\n return render(request, 'django_registration/user_profile.html', context)\n", "repo_name": "ian-otieno/Django-Instagram", "sub_path": "Instagram/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 4735, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "76", "api": [{"api_name": "models.Images.objects.all", "line_number": 18, "usage_type": "call"}, {"api_name": "models.Images.objects", "line_number": 18, "usage_type": "attribute"}, {"api_name": "models.Images", "line_number": 18, "usage_type": "name"}, {"api_name": "models.Comment.objects.all", "line_number": 19, "usage_type": "call"}, {"api_name": "models.Comment.objects", "line_number": 19, "usage_type": "attribute"}, {"api_name": "models.Comment", "line_number": 19, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 21, "usage_type": "call"}, {"api_name": "django.contrib.auth.decorators.login_required", "line_number": 15, "usage_type": "call"}, {"api_name": "models.Images.objects.get", "line_number": 25, "usage_type": "call"}, {"api_name": "models.Images.objects", "line_number": 25, "usage_type": "attribute"}, {"api_name": "models.Images", "line_number": 25, "usage_type": "name"}, {"api_name": "models.Images.DoesNotExist", "line_number": 28, "usage_type": "attribute"}, {"api_name": "models.Images", "line_number": 28, "usage_type": "name"}, {"api_name": "django.http.Http404", "line_number": 29, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 31, "usage_type": "call"}, {"api_name": "models.Images.objects.get", "line_number": 36, "usage_type": "call"}, {"api_name": "models.Images.objects", "line_number": 36, "usage_type": "attribute"}, {"api_name": "models.Images", "line_number": 36, "usage_type": "name"}, {"api_name": "django.http.HttpResponseRedirect", "line_number": 39, "usage_type": "call"}, {"api_name": "django.urls.reverse", "line_number": 39, "usage_type": "call"}, {"api_name": "django.contrib.auth.decorators.login_required", "line_number": 34, "usage_type": "call"}, {"api_name": "forms.ImageForm", "line_number": 46, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 51, "usage_type": "call"}, {"api_name": "forms.ImageForm", "line_number": 54, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 55, "usage_type": "call"}, {"api_name": "django.contrib.auth.decorators.login_required", "line_number": 42, "usage_type": "call"}, {"api_name": "models.Images.objects.get", "line_number": 59, "usage_type": "call"}, {"api_name": "models.Images.objects", "line_number": 59, "usage_type": "attribute"}, {"api_name": "models.Images", "line_number": 59, "usage_type": "name"}, {"api_name": "django.shortcuts.redirect", "line_number": 62, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 63, "usage_type": "call"}, {"api_name": "django.contrib.auth.decorators.login_required", "line_number": 57, "usage_type": "call"}, {"api_name": "models.Images.objects.get", "line_number": 67, "usage_type": "call"}, {"api_name": "models.Images.objects", "line_number": 67, "usage_type": "attribute"}, {"api_name": "models.Images", "line_number": 67, "usage_type": "name"}, {"api_name": "forms.ImageForm", "line_number": 68, "usage_type": "call"}, {"api_name": "forms.ImageForm", "line_number": 71, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 74, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 76, "usage_type": "call"}, {"api_name": "django.contrib.auth.decorators.login_required", "line_number": 65, "usage_type": "call"}, {"api_name": "models.Profile.search_profile", "line_number": 82, "usage_type": "call"}, {"api_name": "models.Profile", "line_number": 82, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 83, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 86, "usage_type": "call"}, {"api_name": "django.contrib.auth.decorators.login_required", "line_number": 78, "usage_type": "call"}, {"api_name": "django.views.generic.UpdateView", "line_number": 89, "usage_type": "attribute"}, {"api_name": "django.views.generic", "line_number": 89, "usage_type": "name"}, {"api_name": "forms.EditProfileForm", "line_number": 90, "usage_type": "name"}, {"api_name": "django.urls.reverse_lazy", "line_number": 92, "usage_type": "call"}, {"api_name": "django.shortcuts.get_object_or_404", "line_number": 99, "usage_type": "call"}, {"api_name": "models.Images", "line_number": 99, "usage_type": "argument"}, {"api_name": "forms.CommentForm", "line_number": 102, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 108, "usage_type": "call"}, {"api_name": "forms.CommentForm", "line_number": 110, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 112, "usage_type": "call"}, {"api_name": "django.contrib.auth.decorators.login_required", "line_number": 97, "usage_type": "call"}, {"api_name": "forms.EditProfileForm", "line_number": 117, "usage_type": "call"}, {"api_name": "forms.ProfileUpdateForm", "line_number": 118, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 122, "usage_type": "call"}, {"api_name": "forms.EditProfileForm", "line_number": 124, "usage_type": "call"}, {"api_name": "forms.ProfileUpdateForm", "line_number": 125, "usage_type": "call"}, {"api_name": "django.core.checks.messages.success", "line_number": 126, "usage_type": "call"}, {"api_name": "django.core.checks.messages", "line_number": 126, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 128, "usage_type": "call"}, {"api_name": "django.contrib.auth.decorators.login_required", "line_number": 114, "usage_type": "call"}]}
+{"seq_id": "10255220697", "text": "#!/usr/bin/env python3\n\nimport sys\nimport argparse\nfrom argparse import RawDescriptionHelpFormatter, _HelpAction\nimport xml.etree.ElementTree as ET\nfrom interpret_src.Instruction import Instruction\nfrom interpret_src.Argument import Argument\nimport re\nimport interpret_src.errorTypes as errorTypes\nimport copy\n\nclass Nil:\t\n\t\"\"\"This class just simulate Nil type\"\"\"\n\n\tdef __init__(self):\n\t\tpass\n\tdef __str__(self):\n\t\treturn \"nil\"\n\tdef __repr__(self):\n\t\treturn \"nil\"\n\nNIL = Nil()\n\n\ndef parseArguments():\n\t\"\"\"Parse arguments with slightly modifed argparse\"\"\"\n\n\tclass ModifiedArgumentParser(argparse.ArgumentParser):\n\t\t\"\"\"Just override error exit status code in argparse to be correct\"\"\"\n\t\tdef error(self, message):\n\t\t\texitWithError(errorTypes.wrongParameters, message)\n\n\tclass Modified_HelpAction(_HelpAction):\n\t\t\"\"\"\n\t\tJust override __call__ so if --help argument is combined with others \n\t\tit exits with propper error\n\t\t\"\"\"\n\n\t\tdef __init__(self,\n\t\t\t\toption_strings,\n\t\t\t\tdest='==SUPPRESS==',\n\t\t\t\tdefault='==SUPPRESS==',\n\t\t\t\thelp=None):\n\t\t super(_HelpAction, self).__init__(\n\t\t\t\toption_strings=option_strings,\n\t\t\t\tdest=dest,\n\t\t\t\tdefault=default,\n\t\t\t\tnargs=0,\n\t\t\t\thelp=help)\n\n\t\tdef __call__(self, parser, namespace, values, option_string=None):\n\t\t\ttry:\n\t\t\t\tsourceArg = namespace._get_kwargs()[1][1]\n\t\t\t\tinputArg = namespace._get_kwargs()[2][1]\n\t\t\texcept:\n\t\t\t\tparser.exit(99, '%s: error: %s\\n' % (parser.prog,\"Internal error\"))\n\n\t\t\tif sourceArg or inputArg:\n\t\t\t\tparser.error(\"--help argumnet cannot be combined with others\")\t\t\n\t\t\telse:\n\t\t\t\tparser.print_help()\n\t\t\t\tparser.exit()\n\t \n\targparse._HelpAction = Modified_HelpAction\n\n\tp = ModifiedArgumentParser(formatter_class=RawDescriptionHelpFormatter, description=\"Interpret for IPPcode19. Source code must be in XML representation, for example generated by parse.php. If one of the arguments --input and --source is missing interpret loads particular data from stdin.\", epilog=\"\"\"\t\"\"\", add_help=False)\n\tp.add_argument('--help', action='help', help='show this help message and exit')\n\tp.add_argument(\"--source\", help = \"file with XML representation of source code. If not given, --input is required.\", metavar=('FILE'))\n\tp.add_argument(\"--input\", help = \"input file which will be used at interpretation. If not given, --source is required.\", metavar=('FILE'))\n\n\targs = p.parse_args()\n\n\tif not (args.source or args.input):\n\t p.error('Give me at least one of the arguments --input and --source')\n\n\treturn args.source, args.input\n\ndef getLinesFromFile(file):\n\t\"\"\" \n\tGet list of lines from file without line endings \n\t\n\tParameters: \n\tfile (str): file you want lines from\n\t\n\tReturns: \n\tlist of str: lines from file without line endings \n\t\"\"\"\n\n\tif file:\t\t\n\t\ttry:\n\t\t\twith open(file, \"r\") as f:\n\t\t\t\tdata = f.readlines()\n\t\texcept:\n\t\t\texitWithError(errorTypes.cannotOpenSourceFiles, f\"File: {file}\")\n\telse:\n\t\treturn None\n\t\n\treturn [line.strip() for line in data]\n\n\ndef exitWithError(errorType, additionalMessage = None):\n\t\"\"\" \n\tExit with specific error type, propper exit code and friendly text\n\t\n\tParameters: \n\terrorType (Error): Custom error object\n\tadditionalMessage (str): Text that will be printed to stderr\n\t\"\"\"\t\n\n\tprint(errorType, file=sys.stderr)\n\t\n\tif additionalMessage:\n\t\tprint(additionalMessage, file=sys.stderr)\n\n\tsys.exit(errorType.code)\n\ndef customAssert(result, errorType):\n\t\"\"\" \n\tCustom assert that can exit with specified error type\n\t\n\tParameters: \n\tresult (Bool): Result from condition that needs to be true\n\t\"\"\"\t\n\n\tif not result:\n\t\texitWithError(errorType)\n\n\ndef checkXmlHeader(sourceLines):\n\t\"\"\" \n\tCheck if xml header in source lines is correct\n\t\n\tParameters: \n\tsourceLines (list of str): source code that contains xml hedaer at the beggining\n\t\"\"\"\t\n\n\tfor line in sourceLines:\n\t\tif line.strip() != \"\":\n\t\t\tfirstLineOfXml = line\n\t\t\tcustomAssert(firstLineOfXml == r'', errorTypes.xmlNotWellFormated)\n\t\t\tbreak\t\n\ndef getInstructionsFromXml(root):\n\t\"\"\" \n\tGet list of instructions from xml code\n\t\n\tParameters: \n\troot : root of xml code from ElementTree\n\t\n\tReturns: \n\tinstructionsList (list of Instruction): list of instructions indeed\n\t\"\"\"\n\n\tinstructionsList = []\n\n\tfor child in root:\n\t\tcustomAssert(child.tag == \"instruction\", errorTypes.xmlStructureSyntaxLex)\n\n\t\tfor instrAttrib in child.attrib:\n\t\t\tcustomAssert(instrAttrib in (\"order\", \"opcode\"), errorTypes.xmlStructureSyntaxLex)\n\n\t\targuments = []\n\n\t\tfor arg in child:\n\t\t\tfor atrib in arg.attrib:\n\t\t\t\tcustomAssert(atrib == \"type\", errorTypes.xmlStructureSyntaxLex)\n\n\t\t\tresult = re.search(\"^arg([1-3])$\", arg.tag)\n\n\t\t\tcustomAssert(result, errorTypes.xmlStructureSyntaxLex)\n\t\t\t\n\t\t\torder = result.group(1)\n\t\t\targument = Argument(arg.get(\"type\"), arg.text, order)\n\t\t\targuments.append(argument)\n\n\t\targuments.sort(key=lambda argument: argument.order)\n\n\t\t# Assert if in instruction is for example just arg2, but arg1 is missing, also duplicates\n\t\tfor index, argument in enumerate(arguments, start=1):\n\t\t\tcustomAssert(argument.order == index, errorTypes.xmlStructureSyntaxLex)\n\n\t\tinstruction = Instruction(child.get(\"opcode\"), arguments, child.get(\"order\"))\n\t\tinstructionsList.append(instruction)\n \n\tinstructionsList.sort(key=lambda instruction: instruction.order)\n\n\t# Assert if for example there is instruction with order 2 but instruction with order 1 is missing, \n\t# also when there are order duplicates\n\tfor index, instruction in enumerate(instructionsList, start=1):\n\t\tcustomAssert(instruction.order == index, errorTypes.xmlStructureSyntaxLex)\t\n\n\treturn instructionsList\n\n\ndef makeInstructionRule(name, arg_1=None, arg_2=None, arg_3=None):\n\t\"\"\" \n\tMake one Instruction from instruction and arguments names. E.g. \n\t\n\tParameters: \n\tname (str) : Name of instruction\n\targ_1 (str) : Name of 1. argument\n\targ_2 (str) : Name of 2. argument\n\targ_3 (str) : Name of 3. argument\n\t\n\tReturns: \n\tInstruction : new instruction according to parameters\n\t\"\"\"\n\n\targuments = []\n\n\tif arg_1:\n\t\targuments.append(Argument(arg_1))\n\tif arg_2:\n\t\targuments.append(Argument(arg_2))\n\tif arg_3:\n\t\targuments.append(Argument(arg_3))\n\n\treturn Instruction(name, arguments)\n\ndef getInstrutionsRules():\n\t\"\"\" \n\tGet instruction rules, this defines IPPcode19 syntax\n\t\n\tReturns: \n\tinstructionRules (list of Instruction) : filled instruction rules\n\t\"\"\"\t\n\n\tinstructionRules = []\n\n\tinstructionRules.append(makeInstructionRule(\"MOVE\", \"var\", \"symb\"))\n\tinstructionRules.append(makeInstructionRule(\"CREATEFRAME\"))\n\tinstructionRules.append(makeInstructionRule(\"PUSHFRAME\"))\n\tinstructionRules.append(makeInstructionRule(\"POPFRAME\"))\n\tinstructionRules.append(makeInstructionRule(\"DEFVAR\", \"var\"))\n\tinstructionRules.append(makeInstructionRule(\"CALL\", \"label\"))\n\tinstructionRules.append(makeInstructionRule(\"RETURN\"))\n\tinstructionRules.append(makeInstructionRule(\"PUSHS\", \"symb\"))\n\tinstructionRules.append(makeInstructionRule(\"POPS\", \"var\"))\t\n\tinstructionRules.append(makeInstructionRule(\"ADD\", \"var\", \"symb\", \"symb\"))\n\tinstructionRules.append(makeInstructionRule(\"SUB\", \"var\", \"symb\", \"symb\"))\n\tinstructionRules.append(makeInstructionRule(\"MUL\", \"var\", \"symb\", \"symb\"))\n\tinstructionRules.append(makeInstructionRule(\"IDIV\", \"var\", \"symb\", \"symb\"))\n\tinstructionRules.append(makeInstructionRule(\"LT\", \"var\", \"symb\", \"symb\"))\n\tinstructionRules.append(makeInstructionRule(\"GT\", \"var\", \"symb\", \"symb\"))\n\tinstructionRules.append(makeInstructionRule(\"EQ\", \"var\", \"symb\", \"symb\"))\n\tinstructionRules.append(makeInstructionRule(\"AND\", \"var\", \"symb\", \"symb\"))\n\tinstructionRules.append(makeInstructionRule(\"OR\", \"var\", \"symb\", \"symb\"))\n\tinstructionRules.append(makeInstructionRule(\"NOT\", \"var\", \"symb\"))\n\tinstructionRules.append(makeInstructionRule(\"INT2CHAR\", \"var\", \"symb\"))\n\tinstructionRules.append(makeInstructionRule(\"STRI2INT\", \"var\", \"symb\", \"symb\"))\n\tinstructionRules.append(makeInstructionRule(\"READ\", \"var\", \"type\"))\n\tinstructionRules.append(makeInstructionRule(\"WRITE\", \"symb\"))\n\tinstructionRules.append(makeInstructionRule(\"CONCAT\", \"var\", \"symb\", \"symb\"))\n\tinstructionRules.append(makeInstructionRule(\"STRLEN\", \"var\", \"symb\"))\n\tinstructionRules.append(makeInstructionRule(\"GETCHAR\", \"var\", \"symb\", \"symb\"))\n\tinstructionRules.append(makeInstructionRule(\"SETCHAR\", \"var\", \"symb\", \"symb\"))\n\tinstructionRules.append(makeInstructionRule(\"TYPE\", \"var\", \"symb\"))\n\tinstructionRules.append(makeInstructionRule(\"LABEL\", \"label\"))\n\tinstructionRules.append(makeInstructionRule(\"JUMP\", \"label\"))\n\tinstructionRules.append(makeInstructionRule(\"JUMPIFEQ\", \"label\", \"symb\", \"symb\"))\n\tinstructionRules.append(makeInstructionRule(\"JUMPIFNEQ\", \"label\", \"symb\", \"symb\"))\n\tinstructionRules.append(makeInstructionRule(\"EXIT\", \"symb\"))\n\tinstructionRules.append(makeInstructionRule(\"DPRINT\", \"symb\"))\n\tinstructionRules.append(makeInstructionRule(\"BREAK\"))\n\n\treturn instructionRules\n\n\ndef checkOperandLexems(instructionsList):\n\t\"\"\" \n\tChceck if lexems in operands are correct\n\t\n\tParameters: \n\tinstructionsList (list of Instruction) : list of instructions from source code\n\t\"\"\"\n\n\tfor instruction in instructionsList:\n\t\tfor argument in instruction.arguments:\n\t\t\tif argument.type == \"var\":\n\t\t\t\tresult = re.search(r'^(GF|LF|TF)@([a-z]|[A-Z]|[\\_\\-\\$\\&\\%\\*\\?\\!])(\\w|[\\_\\-\\$\\&\\%\\*\\?\\!])*$', argument.name)\n\t\t\telif argument.type == \"string\":\n\t\t\t\tresult = re.search(r'^([^\\s\\#\\\\]|\\\\[0-9]{3})*$', argument.name)\t\n\t\t\telif argument.type == \"int\":\n\t\t\t\tresult = re.search(r'^[-\\+]?[0-9]+$', argument.name)\t\n\t\t\telif argument.type == \"bool\":\n\t\t\t\tresult = re.search(r'^(false|true)$', argument.name)\t\n\t\t\telif argument.type == \"nil\":\n\t\t\t\tresult = re.search(r'^nil$', argument.name)\t\t\t\t\n\t\t\telif argument.type == \"label\":\n\t\t\t\tresult = re.search(r'^([a-z]|[A-Z]|[\\_\\-\\$\\&\\%\\*\\?\\!])(\\w|[\\_\\-\\$\\&\\%\\*\\?\\!])*$', argument.name)\n\t\t\telif argument.type == \"type\":\n\t\t\t\tresult = re.search(r'^(string|int|bool)$', argument.name)\t\t\t\t\n\t\t\telse:\n\t\t\t\tresult = False\n\n\t\t\tcustomAssert(result, errorTypes.xmlStructureSyntaxLex)\n\ndef checkSyntax(instructionsList):\n\t\"\"\" \n\tCheck syntax in source coce\n\t\n\tParameters: \n\tinstructionsList (list of Instruction) : list of instructions from source code\n\t\"\"\"\t\n\n\tinstructionRules = getInstrutionsRules();\n\n\tfor instruction in instructionsList:\n\t\tcustomAssert(instruction in instructionRules, errorTypes.xmlStructureSyntaxLex)\n\ndef checkLabelsSematics(instructionsList):\n\t\"\"\" \n\tCheck labels semantics, i.e. if there is redefinition or using not defined label\n\t\n\tParameters: \n\tinstructionsList (list of Instruction) : list of instructions from source code\n\t\"\"\"\n\n\tdefinedLabels = []\n\n\t# Find labels, check if there is no attempt of redefinition\n\tfor instruction in instructionsList:\n\t\tif instruction.name == \"LABEL\":\n\t\t\tlabelName = instruction.arguments[0].name\n\t\t\tcustomAssert(labelName not in definedLabels, errorTypes.semantics)\n\n\t\t\tdefinedLabels.append(labelName)\n\n\t# Check if label not defined\n\tfor instruction in instructionsList:\n\t\tif instruction.name != \"LABEL\":\n\t\t\tfor argument in instruction.arguments:\n\t\t\t\tif argument.type == \"label\":\n\t\t\t\t\tcustomAssert(argument.name in definedLabels, errorTypes.semantics)\n\ndef replaceEscapeSequences(instructionsList):\n\t\"\"\" \n\tReplace escape sequences in string types in source code. E.g. a\\032a => a a\n\t\n\tParameters: \n\tinstructionsList (list of Instruction) : list of instructions from source code\n\t\n\tReturns: \n\tinstructionsList (list of Instruction) : list of instructions with replaced escape sequences\n\t\"\"\"\n\n\tfor instruction in instructionsList:\n\t\tfor argument in instruction.arguments:\n\t\t\tif argument.type == \"string\":\n\t\t\t\tescapedUnicodesList = re.findall(r'(\\\\[0-9]{3})+', argument.name)\n\n\t\t\t\tfor escapedUnicode in escapedUnicodesList:\n\t\t\t\t\tunicodeAsChar = chr(int(escapedUnicode[1:]))\n\t\t\t\t\targument.name = argument.name.replace(escapedUnicode, unicodeAsChar)\n\n\treturn instructionsList\n\ndef readInput(inputLines):\n\t\"\"\" \n\tGeneralizes reading from input. \n\tIf there is file with input text take data from there, otherwise read from stdin\n\t\n\tParameters: \n\tinputLines (list of str) : from file that user added as cli argument\n\t\n\tReturns: \n\tString : text from input\n\t\"\"\"\t\n\n\tif inputLines != None:\n\t\treturn inputLines.pop(0)\n\telse:\n\t\treturn input()\n\n\ndef\tgetLablesIndexes(instructionsList):\n\t\"\"\" \n\tGet labels name from source code with indexes, so it will be easier to do jumps to labels\n\tE.g.: {\"foo\" : 5, \"boo\" : 9}\n\t\n\tParameters: \n\tinstructionsList (list of Instruction) : list of instructions from source code\n\t\n\tReturns: \n\tlabelsIndexDict (Dictionary) : labels name from source code with indexes \n\t\"\"\"\t\n\n\tlabelsIndexDict = {}\n\n\tfor i, instruction in enumerate(instructionsList):\n\t \tif instruction.name == \"LABEL\":\n\t \t\tlabelName = instruction.arguments[0].name\n\t \t\tlabelsIndexDict[labelName] = i\n\n\treturn labelsIndexDict\n\n\ndef interpretCode(instructionsList, inputLines):\n\t\"\"\" \n\tInterpert code\n\t\n\tParameters: \n\tinstructionsList (list of Instruction) : list of instructions from source code\n\tinputLines (list of str) : from file that user added as cli argument \n\t\"\"\"\t\n\n\t# Position of labels in instructionsList E.g.: {\"foo\" : 5, \"boo\" : 9}\n\t# So it will be easier to jumping to these labes\n\tlabelsIndexDict = getLablesIndexes(instructionsList)\n\n\tGF = {}\n\tLF = []\n\tTF = None\n\n\tstackPushsPops = []\n\tstackReturnToCaller = []\n\n\tdataToDestination = None\n\tdestinationData = None\n\tsourceDataFirst = None\n\tsourceDataSecond = None\n\tlabelToJump = None\n\n\tcurrentInstructionIndex = 0\n\tprocessedInstructionsCount = 0\n\n\twhile currentInstructionIndex < len(instructionsList):\n\n\t\tinstruction = instructionsList[currentInstructionIndex]\n\t\tcurrentInstructionIndex += 1\n\t\tprocessedInstructionsCount += 1\n\n\t\t# Semantics assert and get data from instruction source and destination argmuents\n\t\tfor argument in instruction.arguments:\n\n\t\t\t\t# For classic: INSTRUCTION dest [source] [source]\n\t\t\t\tif argument.type == \"var\" and argument.order == 1 and instruction.name not in (\"PUSHS\", \"WRITE\", \"EXIT\", \"DPRINT\"):\n\t\t\t\t\tframe = argument.name.split(\"@\", 1)[0]\n\t\t\t\t\tvarName = argument.name.split(\"@\", 1)[1]\n\n\t\t\t\t\tif frame == \"GF\":\t\t\t\n\t\t\t\t\t\tif instruction.name != \"DEFVAR\":\n\t\t\t\t\t\t\tcustomAssert(varName in GF, errorTypes.variableNotDefined)\n\t\t\t\t\t\t\tdestinationData = GF[varName]\t\t\t\t\n\t\t\t\t\telif frame == \"LF\":\n\t\t\t\t\t\tcustomAssert(len(LF) != 0, errorTypes.frameNotExists)\n\t\t\t\t\t\tif instruction.name != \"DEFVAR\":\n\t\t\t\t\t\t\tcustomAssert(varName in LF[-1], errorTypes.variableNotDefined)\t\t\t\t\t\n\t\t\t\t\t\t\tdestinationData = LF[-1][varName]\t\t\t\t\n\t\t\t\t\telif frame == \"TF\":\n\t\t\t\t\t\tcustomAssert(TF != None, errorTypes.frameNotExists)\n\t\t\t\t\t\tif instruction.name != \"DEFVAR\":\n\t\t\t\t\t\t\tcustomAssert(varName in TF, errorTypes.variableNotDefined)\t\t\t\t\t\n\t\t\t\t\t\t\tdestinationData = TF[varName]\t\t\t\t\n\n\t\t\t\t# For classic: INSTRUCTION dest source [source] E.g.: ADD var symb symb\n\t\t\t\t# Or: INSTRUCTION source E.g.: WRITE symb\n\t\t\t\tif argument.order in (2, 3) or instruction.name in (\"PUSHS\", \"WRITE\", \"EXIT\", \"DPRINT\"):\n\t\t\t\t\tif argument.type == \"var\":\n\t\t\t\t\t\tframe = argument.name.split(\"@\", 1)[0]\n\t\t\t\t\t\tvarName = argument.name.split(\"@\", 1)[1]\n\n\t\t\t\t\t\tif frame == \"GF\":\n\t\t\t\t\t\t\tcustomAssert(varName in GF, errorTypes.variableNotDefined)\n\t\t\t\t\t\t\tsourceData = GF[varName]\n\t\t\t\t\t\telif frame == \"LF\":\n\t\t\t\t\t\t\tcustomAssert(len(LF) != 0, errorTypes.frameNotExists)\n\t\t\t\t\t\t\tcustomAssert(varName in LF[-1], errorTypes.variableNotDefined)\n\t\t\t\t\t\t\tsourceData = LF[-1][varName]\n\t\t\t\t\t\telif frame == \"TF\":\n\t\t\t\t\t\t\tcustomAssert(TF != None, errorTypes.frameNotExists)\n\t\t\t\t\t\t\tcustomAssert(varName in TF, errorTypes.variableNotDefined)\n\t\t\t\t\t\t\tsourceData = TF[varName]\n\n\t\t\t\t\t\t# Because second argument in TYPE dont have to be initialized\n\t\t\t\t\t\tcustomAssert(sourceData != None or instruction.name == \"TYPE\", errorTypes.missingValue)\n\n\t\t\t\t\telif argument.type in (\"string\", \"type\"):\n\t\t\t\t\t\tsourceData = argument.name\n\t\t\t\t\telif argument.type == \"int\":\n\t\t\t\t\t\tsourceData = int(argument.name)\n\t\t\t\t\telif argument.type == \"bool\":\n\t\t\t\t\t\tsourceData = argument.name == \"true\"\n\t\t\t\t\telif argument.type == \"nil\":\n\t\t\t\t\t\tsourceData = NIL\t\t\t\n\n\t\t\t\t\tif argument.order in (1, 2):\n\t\t\t\t\t\tsourceDataFirst = sourceData\n\t\t\t\t\telif argument.order == 3:\n\t\t\t\t\t\tsourceDataSecond = sourceData\t\t\t\t\n\n\t\t\t\telif argument.type == \"label\":\n\t\t\t\t\tlabelToJump = argument.name\n\n\n\t\tif instruction.name == \"DEFVAR\":\n\n\t\t\tdataToDestination = None\n\n\t\telif instruction.name == \"MOVE\":\n\n\t\t\tdataToDestination = sourceDataFirst\n\n\t\telif instruction.name == \"CREATEFRAME\":\n\n\t\t\tTF = {}\n\n\t\telif instruction.name == \"PUSHFRAME\":\n\t\t\tcustomAssert(TF != None, errorTypes.frameNotExists)\n\t\t\tLF.append(TF)\t\n\t\t\tTF = None\n\n\t\telif instruction.name == \"POPFRAME\":\n\t\t\tcustomAssert(len(LF) != 0, errorTypes.frameNotExists)\n\t\t\tTF = LF.pop()\n\n\t\telif instruction.name == \"WRITE\":\n\t\t\tif type(sourceDataFirst) != Nil:\n\t\t\t\tif type(sourceDataFirst) == bool:\n\t\t\t\t\tprint(str(sourceDataFirst).lower(), end=\"\")\n\t\t\t\telse:\n\t\t\t\t\tprint(sourceDataFirst, end=\"\")\n\n\t\telif instruction.name == \"READ\":\n\t\t\texpectedType = sourceDataFirst\n\n\t\t\ttry:\n\t\t\t\treadedData = readInput(inputLines)\n\t\t\texcept:\n\t\t\t\treadedData = 0\n\t\t\t\tif expectedType == \"string\":\n\t\t\t\t\treadedData = \"\"\n\n\t\t\tif expectedType == \"string\":\n\t\t\t\treadedData = str(readedData)\n\t\t\telif expectedType == \"int\":\n\t\t\t\ttry:\n\t\t\t\t\treadedData = int(readedData)\n\t\t\t\texcept:\n\t\t\t\t\treadedData = 0\n\t\t\telif expectedType == \"bool\":\t\t\t\t\n\t\t\t\treadedData = str(readedData).lower() == \"true\"\n\n\t\t\tdataToDestination = readedData\n\n\t\telif instruction.name == \"ADD\":\n\t\t\tcustomAssert(type(sourceDataFirst) == int, errorTypes.wrongOperandType)\n\t\t\tcustomAssert(type(sourceDataSecond) == int, errorTypes.wrongOperandType)\n\t\t\tdataToDestination = sourceDataFirst + sourceDataSecond\n\n\t\telif instruction.name == \"SUB\":\n\t\t\tcustomAssert(type(sourceDataFirst) == int, errorTypes.wrongOperandType)\n\t\t\tcustomAssert(type(sourceDataSecond) == int, errorTypes.wrongOperandType)\n\t\t\tdataToDestination = sourceDataFirst - sourceDataSecond\n\n\t\telif instruction.name == \"MUL\":\n\t\t\tcustomAssert(type(sourceDataFirst) == int, errorTypes.wrongOperandType)\n\t\t\tcustomAssert(type(sourceDataSecond) == int, errorTypes.wrongOperandType)\n\t\t\tdataToDestination = sourceDataFirst * sourceDataSecond\n\n\t\telif instruction.name == \"IDIV\":\n\t\t\tcustomAssert(type(sourceDataFirst) == int, errorTypes.wrongOperandType)\n\t\t\tcustomAssert(type(sourceDataSecond) == int, errorTypes.wrongOperandType)\n\t\t\tcustomAssert(sourceDataSecond != 0, errorTypes.wrongOperandValue)\n\t\t\tdataToDestination = sourceDataFirst // sourceDataSecond\t\t\t\t\t\t\t\t\t\n\n\t\telif instruction.name == \"LT\":\n\t\t\tcustomAssert(type(sourceDataFirst) == type(sourceDataSecond), errorTypes.wrongOperandType)\n\t\t\tdataToDestination = sourceDataFirst < sourceDataSecond\n\t\t\n\t\telif instruction.name == \"GT\":\n\t\t\tcustomAssert(type(sourceDataFirst) == type(sourceDataSecond), errorTypes.wrongOperandType)\n\t\t\tdataToDestination = sourceDataFirst > sourceDataSecond\n\n\t\telif instruction.name == \"EQ\":\n\t\t\tcustomAssert(type(sourceDataFirst) == type(sourceDataSecond) \n\t\t\t\t\t\t\tor type(sourceDataFirst) == Nil \n\t\t\t\t\t\t\tor type(sourceDataSecond) == Nil, errorTypes.wrongOperandType)\n\t\t\tdataToDestination = sourceDataFirst == sourceDataSecond\n\n\t\telif instruction.name == \"AND\":\n\t\t\tcustomAssert(type(sourceDataFirst) == bool, errorTypes.wrongOperandType)\n\t\t\tcustomAssert(type(sourceDataSecond) == bool, errorTypes.wrongOperandType)\n\t\t\tdataToDestination = sourceDataFirst and sourceDataSecond\n\n\t\telif instruction.name == \"OR\":\n\t\t\tcustomAssert(type(sourceDataFirst) == bool, errorTypes.wrongOperandType)\n\t\t\tcustomAssert(type(sourceDataSecond) == bool, errorTypes.wrongOperandType)\n\t\t\tdataToDestination = sourceDataFirst or sourceDataSecond\n\n\t\telif instruction.name == \"NOT\":\n\t\t\tcustomAssert(type(sourceDataFirst) == bool, errorTypes.wrongOperandType)\n\t\t\tdataToDestination = not sourceDataFirst\n\n\t\telif instruction.name == \"PUSHS\":\n\n\t\t\tstackPushsPops.append(sourceDataFirst)\n\n\t\telif instruction.name == \"POPS\":\n\t\t\tcustomAssert(len(stackPushsPops) != 0, errorTypes.missingValue)\t\n\t\t\tdataToDestination = stackPushsPops.pop()\n\n\t\telif instruction.name == \"JUMP\":\n\n\t\t\tcurrentInstructionIndex = labelsIndexDict[labelToJump]\n\n\t\telif instruction.name == \"JUMPIFEQ\":\n\t\t\tcustomAssert(type(sourceDataFirst) == type(sourceDataSecond), errorTypes.wrongOperandType)\n\t\t\tif sourceDataFirst == sourceDataSecond:\n\t\t\t\tcurrentInstructionIndex = labelsIndexDict[labelToJump]\n\n\t\telif instruction.name == \"JUMPIFNEQ\":\n\t\t\tcustomAssert(type(sourceDataFirst) == type(sourceDataSecond), errorTypes.wrongOperandType)\n\t\t\tif sourceDataFirst != sourceDataSecond:\n\t\t\t\tcurrentInstructionIndex = labelsIndexDict[labelToJump]\n\n\t\telif instruction.name == \"CALL\":\n\t\t\tstackReturnToCaller.append(currentInstructionIndex)\n\t\t\tcurrentInstructionIndex = labelsIndexDict[labelToJump]\t\t\n\n\t\telif instruction.name == \"RETURN\":\n\t\t\tcustomAssert(len(stackReturnToCaller) != 0, errorTypes.missingValue)\n\t\t\tcurrentInstructionIndex = stackReturnToCaller.pop()\t\t\t\t\t\n\n\t\telif instruction.name == \"INT2CHAR\":\n\t\t\tcustomAssert(type(sourceDataFirst) == int, errorTypes.wrongOperandType)\n\t\t\ttry:\n\t\t\t\tdataToDestination = chr(sourceDataFirst)\n\t\t\texcept:\n\t\t\t\texitWithError(errorTypes.wrongStringManipulation)\n\n\t\telif instruction.name == \"STRI2INT\":\n\t\t\tcustomAssert(type(sourceDataFirst) == str, errorTypes.wrongOperandType)\n\t\t\tcustomAssert(type(sourceDataSecond) == int, errorTypes.wrongOperandType)\n\t\t\ttry:\n\t\t\t\tdataToDestination = ord(sourceDataFirst[sourceDataSecond])\n\t\t\texcept:\n\t\t\t\texitWithError(errorTypes.wrongStringManipulation)\n\n\t\telif instruction.name == \"CONCAT\":\n\t\t\tcustomAssert(type(sourceDataFirst) == str, errorTypes.wrongOperandType)\n\t\t\tcustomAssert(type(sourceDataSecond) == str, errorTypes.wrongOperandType)\n\t\t\tdataToDestination = sourceDataFirst + sourceDataSecond\n\n\t\telif instruction.name == \"STRLEN\":\n\t\t\tcustomAssert(type(sourceDataFirst) == str, errorTypes.wrongOperandType)\n\t\t\tdataToDestination = len(sourceDataFirst)\n\n\t\telif instruction.name == \"GETCHAR\":\n\t\t\tcustomAssert(type(sourceDataFirst) == str, errorTypes.wrongOperandType)\n\t\t\tcustomAssert(type(sourceDataSecond) == int, errorTypes.wrongOperandType)\n\t\t\ttry:\n\t\t\t\tdataToDestination = sourceDataFirst[sourceDataSecond]\n\t\t\texcept:\n\t\t\t\texitWithError(errorTypes.wrongStringManipulation)\n\n\t\telif instruction.name == \"SETCHAR\":\n\t\t\tcustomAssert(type(destinationData) == str, errorTypes.wrongOperandType)\n\t\t\tcustomAssert(type(sourceDataFirst) == int, errorTypes.wrongOperandType)\n\t\t\tcustomAssert(type(sourceDataSecond) == str, errorTypes.wrongOperandType)\n\n\t\t\tdestinationData = list(destinationData)\n\n\t\t\ttry:\n\t\t\t\tdestinationData[sourceDataFirst] = sourceDataSecond[0]\n\t\t\t\tdataToDestination = \"\".join(destinationData)\n\t\t\texcept:\n\t\t\t\texitWithError(errorTypes.wrongStringManipulation)\t\t\t\t\n\n\t\telif instruction.name == \"TYPE\":\n\t\t\t# Non initialized variable\n\t\t\tif sourceDataFirst == None:\n\t\t\t\tdataToDestination = \"\"\n\t\t\telif type(sourceDataFirst) == Nil:\n\t\t\t\tdataToDestination = \"nil\"\n\t\t\telif type(sourceDataFirst) == int:\n\t\t\t\tdataToDestination = \"int\"\n\t\t\telif type(sourceDataFirst) == str:\n\t\t\t\tdataToDestination = \"string\"\n\t\t\telif type(sourceDataFirst) == bool:\n\t\t\t\tdataToDestination = \"bool\"\n\n\t\telif instruction.name == \"EXIT\":\n\t\t\tcustomAssert(type(sourceDataFirst) == int, errorTypes.wrongOperandType)\n\t\t\tcustomAssert(0 <= sourceDataFirst <= 49, errorTypes.wrongOperandValue)\n\t\t\tsys.exit(sourceDataFirst)\n\n\t\telif instruction.name == \"DPRINT\":\n\t\t\tif type(sourceDataFirst) != Nil:\n\t\t\t\tif type(sourceDataFirst) == bool:\n\t\t\t\t\tprint(str(sourceDataFirst).lower(), file=sys.stderr, end=\"\")\n\t\t\t\telse:\t\n \t\t\t\t\tprint(sourceDataFirst, file=sys.stderr, end=\"\")\n\n\t\telif instruction.name == \"BREAK\":\n\t\t\tprint(\"Current instruction index:\", currentInstructionIndex - 1, file=sys.stderr)\n\t\t\tprint(\"Number of procesed instructions:\", processedInstructionsCount, \"(including this BREAK)\", file=sys.stderr)\n\t\t\tprint(\"GF:\", GF, file=sys.stderr)\n\t\t\tprint(\"TF:\", TF, file=sys.stderr)\n\t\t\tprint(\"LF:\", LF, file=sys.stderr)\n\n\n\t\t# Write dataToDestination to destination variable in frame\n\t\tif len(instruction.arguments) != 0:\n\t\t\tif instruction.arguments[0].type == \"var\" and instruction.name not in (\"PUSHS\", \"WRITE\", \"EXIT\", \"DPRINT\"):\n\t\t\t\tframe = instruction.arguments[0].name.split(\"@\", 1)[0]\n\t\t\t\tvarName = instruction.arguments[0].name.split(\"@\", 1)[1]\n\n\t\t\t\tif frame == \"GF\":\t\t\t\t\t\n\t\t\t\t\tGF[varName] = dataToDestination\n\t\t\t\telif frame == \"LF\":\n\t\t\t\t\tLF[-1][varName] = dataToDestination\n\t\t\t\telif frame == \"TF\":\n\t\t\t\t\tTF[varName] = dataToDestination\n\n\t\t\ndef main():\n\tsourceFile, inputFile = parseArguments()\n\n\tif sourceFile:\n\t\tsourceLines = getLinesFromFile(sourceFile)\n\telse:\n\t\tsourceLines = [line.strip() for line in sys.stdin]\t \n\t\n\tinputLines = getLinesFromFile(inputFile)\n\n\ttry:\n\t\troot = ET.fromstringlist(sourceLines)\n\texcept:\n\t\texitWithError(errorTypes.xmlNotWellFormated)\n\n\tcustomAssert(root.tag == \"program\", errorTypes.xmlStructureSyntaxLex)\n\tcustomAssert(root.get(\"language\") == \"IPPcode19\", errorTypes.xmlStructureSyntaxLex)\n\tfor atrib in root.attrib:\n\t\tcustomAssert(atrib in (\"language\", \"name\" , \"description\"), errorTypes.xmlStructureSyntaxLex)\n\tcheckXmlHeader(sourceLines)\n\n\tinstructionsList = getInstructionsFromXml(root)\n\n\tcheckOperandLexems(instructionsList)\n\tcheckSyntax(instructionsList)\n\n\tcheckLabelsSematics(instructionsList)\n\n\tinstructionsList = replaceEscapeSequences(instructionsList)\n\n\tinterpretCode(instructionsList, inputLines)\n\n\nif __name__== \"__main__\":\n\tmain()\n", "repo_name": "dvagala/VUT-FIT-IPP-Code-interpreter", "sub_path": "interpret.py", "file_name": "interpret.py", "file_ext": "py", "file_size_in_byte": 25149, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "76", "api": [{"api_name": "argparse.ArgumentParser", "line_number": 29, "usage_type": "attribute"}, {"api_name": "interpret_src.errorTypes.wrongParameters", "line_number": 32, "usage_type": "attribute"}, {"api_name": "interpret_src.errorTypes", "line_number": 32, "usage_type": "name"}, {"api_name": "argparse._HelpAction", "line_number": 34, "usage_type": "name"}, {"api_name": "argparse._HelpAction", "line_number": 45, "usage_type": "argument"}, {"api_name": "argparse._HelpAction", "line_number": 65, "usage_type": "attribute"}, {"api_name": "argparse.RawDescriptionHelpFormatter", "line_number": 67, "usage_type": "name"}, {"api_name": "interpret_src.errorTypes.cannotOpenSourceFiles", "line_number": 95, "usage_type": "attribute"}, {"api_name": "interpret_src.errorTypes", "line_number": 95, "usage_type": "name"}, {"api_name": "sys.stderr", "line_number": 111, "usage_type": "attribute"}, {"api_name": "sys.stderr", "line_number": 114, "usage_type": "attribute"}, {"api_name": "sys.exit", "line_number": 116, "usage_type": "call"}, {"api_name": "interpret_src.errorTypes.xmlNotWellFormated", "line_number": 141, "usage_type": "attribute"}, {"api_name": "interpret_src.errorTypes", "line_number": 141, "usage_type": "name"}, {"api_name": "interpret_src.errorTypes.xmlStructureSyntaxLex", "line_number": 158, "usage_type": "attribute"}, {"api_name": "interpret_src.errorTypes", "line_number": 158, "usage_type": "name"}, {"api_name": "interpret_src.errorTypes.xmlStructureSyntaxLex", "line_number": 161, "usage_type": "attribute"}, {"api_name": "interpret_src.errorTypes", "line_number": 161, "usage_type": "name"}, {"api_name": "interpret_src.errorTypes.xmlStructureSyntaxLex", "line_number": 167, "usage_type": "attribute"}, {"api_name": "interpret_src.errorTypes", "line_number": 167, "usage_type": "name"}, {"api_name": "re.search", "line_number": 169, "usage_type": "call"}, {"api_name": "interpret_src.errorTypes.xmlStructureSyntaxLex", "line_number": 171, "usage_type": "attribute"}, {"api_name": "interpret_src.errorTypes", "line_number": 171, "usage_type": "name"}, {"api_name": "interpret_src.Argument.Argument", "line_number": 174, "usage_type": "call"}, {"api_name": "interpret_src.errorTypes.xmlStructureSyntaxLex", "line_number": 181, "usage_type": "attribute"}, {"api_name": "interpret_src.errorTypes", "line_number": 181, "usage_type": "name"}, {"api_name": "interpret_src.Instruction.Instruction", "line_number": 183, "usage_type": "call"}, {"api_name": "interpret_src.errorTypes.xmlStructureSyntaxLex", "line_number": 191, "usage_type": "attribute"}, {"api_name": "interpret_src.errorTypes", "line_number": 191, "usage_type": "name"}, {"api_name": "interpret_src.Argument.Argument", "line_number": 213, "usage_type": "call"}, {"api_name": "interpret_src.Argument.Argument", "line_number": 215, "usage_type": "call"}, {"api_name": "interpret_src.Argument.Argument", "line_number": 217, "usage_type": "call"}, {"api_name": "interpret_src.Instruction.Instruction", "line_number": 219, "usage_type": "call"}, {"api_name": "re.search", "line_number": 281, "usage_type": "call"}, {"api_name": "re.search", "line_number": 283, "usage_type": "call"}, {"api_name": "re.search", "line_number": 285, "usage_type": "call"}, {"api_name": "re.search", "line_number": 287, "usage_type": "call"}, {"api_name": "re.search", "line_number": 289, "usage_type": "call"}, {"api_name": "re.search", "line_number": 291, "usage_type": "call"}, {"api_name": "re.search", "line_number": 293, "usage_type": "call"}, {"api_name": "interpret_src.errorTypes.xmlStructureSyntaxLex", "line_number": 297, "usage_type": "attribute"}, {"api_name": "interpret_src.errorTypes", "line_number": 297, "usage_type": "name"}, {"api_name": "interpret_src.errorTypes.xmlStructureSyntaxLex", "line_number": 310, "usage_type": "attribute"}, {"api_name": "interpret_src.errorTypes", "line_number": 310, "usage_type": "name"}, {"api_name": "interpret_src.errorTypes.semantics", "line_number": 326, "usage_type": "attribute"}, {"api_name": "interpret_src.errorTypes", "line_number": 326, "usage_type": "name"}, {"api_name": "interpret_src.errorTypes.semantics", "line_number": 335, "usage_type": "attribute"}, {"api_name": "interpret_src.errorTypes", "line_number": 335, "usage_type": "name"}, {"api_name": "re.findall", "line_number": 351, "usage_type": "call"}, {"api_name": "interpret_src.errorTypes.variableNotDefined", "line_number": 444, "usage_type": "attribute"}, {"api_name": "interpret_src.errorTypes", "line_number": 444, "usage_type": "name"}, {"api_name": "interpret_src.errorTypes.frameNotExists", "line_number": 447, "usage_type": "attribute"}, {"api_name": "interpret_src.errorTypes", "line_number": 447, "usage_type": "name"}, {"api_name": "interpret_src.errorTypes.variableNotDefined", "line_number": 449, "usage_type": "attribute"}, {"api_name": "interpret_src.errorTypes", "line_number": 449, "usage_type": "name"}, {"api_name": "interpret_src.errorTypes.frameNotExists", "line_number": 452, "usage_type": "attribute"}, {"api_name": "interpret_src.errorTypes", "line_number": 452, "usage_type": "name"}, {"api_name": "interpret_src.errorTypes.variableNotDefined", "line_number": 454, "usage_type": "attribute"}, {"api_name": "interpret_src.errorTypes", "line_number": 454, "usage_type": "name"}, {"api_name": "interpret_src.errorTypes.variableNotDefined", "line_number": 465, "usage_type": "attribute"}, {"api_name": "interpret_src.errorTypes", "line_number": 465, "usage_type": "name"}, {"api_name": "interpret_src.errorTypes.frameNotExists", "line_number": 468, "usage_type": "attribute"}, {"api_name": "interpret_src.errorTypes", "line_number": 468, "usage_type": "name"}, {"api_name": "interpret_src.errorTypes.variableNotDefined", "line_number": 469, "usage_type": "attribute"}, {"api_name": "interpret_src.errorTypes", "line_number": 469, "usage_type": "name"}, {"api_name": "interpret_src.errorTypes.frameNotExists", "line_number": 472, "usage_type": "attribute"}, {"api_name": "interpret_src.errorTypes", "line_number": 472, "usage_type": "name"}, {"api_name": "interpret_src.errorTypes.variableNotDefined", "line_number": 473, "usage_type": "attribute"}, {"api_name": "interpret_src.errorTypes", "line_number": 473, "usage_type": "name"}, {"api_name": "interpret_src.errorTypes.missingValue", "line_number": 477, "usage_type": "attribute"}, {"api_name": "interpret_src.errorTypes", "line_number": 477, "usage_type": "name"}, {"api_name": "interpret_src.errorTypes.frameNotExists", "line_number": 510, "usage_type": "attribute"}, {"api_name": "interpret_src.errorTypes", "line_number": 510, "usage_type": "name"}, {"api_name": "interpret_src.errorTypes.frameNotExists", "line_number": 515, "usage_type": "attribute"}, {"api_name": "interpret_src.errorTypes", "line_number": 515, "usage_type": "name"}, {"api_name": "interpret_src.errorTypes.wrongOperandType", "line_number": 548, "usage_type": "attribute"}, {"api_name": "interpret_src.errorTypes", "line_number": 548, "usage_type": "name"}, {"api_name": "interpret_src.errorTypes.wrongOperandType", "line_number": 549, "usage_type": "attribute"}, {"api_name": "interpret_src.errorTypes", "line_number": 549, "usage_type": "name"}, {"api_name": "interpret_src.errorTypes.wrongOperandType", "line_number": 553, "usage_type": "attribute"}, {"api_name": "interpret_src.errorTypes", "line_number": 553, "usage_type": "name"}, {"api_name": "interpret_src.errorTypes.wrongOperandType", "line_number": 554, "usage_type": "attribute"}, {"api_name": "interpret_src.errorTypes", "line_number": 554, "usage_type": "name"}, {"api_name": "interpret_src.errorTypes.wrongOperandType", "line_number": 558, "usage_type": "attribute"}, {"api_name": "interpret_src.errorTypes", "line_number": 558, "usage_type": "name"}, {"api_name": "interpret_src.errorTypes.wrongOperandType", "line_number": 559, "usage_type": "attribute"}, {"api_name": "interpret_src.errorTypes", "line_number": 559, "usage_type": "name"}, {"api_name": "interpret_src.errorTypes.wrongOperandType", "line_number": 563, "usage_type": "attribute"}, {"api_name": "interpret_src.errorTypes", "line_number": 563, "usage_type": "name"}, {"api_name": "interpret_src.errorTypes.wrongOperandType", "line_number": 564, "usage_type": "attribute"}, {"api_name": "interpret_src.errorTypes", "line_number": 564, "usage_type": "name"}, {"api_name": "interpret_src.errorTypes.wrongOperandValue", "line_number": 565, "usage_type": "attribute"}, {"api_name": "interpret_src.errorTypes", "line_number": 565, "usage_type": "name"}, {"api_name": "interpret_src.errorTypes.wrongOperandType", "line_number": 569, "usage_type": "attribute"}, {"api_name": "interpret_src.errorTypes", "line_number": 569, "usage_type": "name"}, {"api_name": "interpret_src.errorTypes.wrongOperandType", "line_number": 573, "usage_type": "attribute"}, {"api_name": "interpret_src.errorTypes", "line_number": 573, "usage_type": "name"}, {"api_name": "interpret_src.errorTypes.wrongOperandType", "line_number": 579, "usage_type": "attribute"}, {"api_name": "interpret_src.errorTypes", "line_number": 579, "usage_type": "name"}, {"api_name": "interpret_src.errorTypes.wrongOperandType", "line_number": 583, "usage_type": "attribute"}, {"api_name": "interpret_src.errorTypes", "line_number": 583, "usage_type": "name"}, {"api_name": "interpret_src.errorTypes.wrongOperandType", "line_number": 584, "usage_type": "attribute"}, {"api_name": "interpret_src.errorTypes", "line_number": 584, "usage_type": "name"}, {"api_name": "interpret_src.errorTypes.wrongOperandType", "line_number": 588, "usage_type": "attribute"}, {"api_name": "interpret_src.errorTypes", "line_number": 588, "usage_type": "name"}, {"api_name": "interpret_src.errorTypes.wrongOperandType", "line_number": 589, "usage_type": "attribute"}, {"api_name": "interpret_src.errorTypes", "line_number": 589, "usage_type": "name"}, {"api_name": "interpret_src.errorTypes.wrongOperandType", "line_number": 593, "usage_type": "attribute"}, {"api_name": "interpret_src.errorTypes", "line_number": 593, "usage_type": "name"}, {"api_name": "interpret_src.errorTypes.missingValue", "line_number": 601, "usage_type": "attribute"}, {"api_name": "interpret_src.errorTypes", "line_number": 601, "usage_type": "name"}, {"api_name": "interpret_src.errorTypes.wrongOperandType", "line_number": 609, "usage_type": "attribute"}, {"api_name": "interpret_src.errorTypes", "line_number": 609, "usage_type": "name"}, {"api_name": "interpret_src.errorTypes.wrongOperandType", "line_number": 614, "usage_type": "attribute"}, {"api_name": "interpret_src.errorTypes", "line_number": 614, "usage_type": "name"}, {"api_name": "interpret_src.errorTypes.missingValue", "line_number": 623, "usage_type": "attribute"}, {"api_name": "interpret_src.errorTypes", "line_number": 623, "usage_type": "name"}, {"api_name": "interpret_src.errorTypes.wrongOperandType", "line_number": 627, "usage_type": "attribute"}, {"api_name": "interpret_src.errorTypes", "line_number": 627, "usage_type": "name"}, {"api_name": "interpret_src.errorTypes.wrongStringManipulation", "line_number": 631, "usage_type": "attribute"}, {"api_name": "interpret_src.errorTypes", "line_number": 631, "usage_type": "name"}, {"api_name": "interpret_src.errorTypes.wrongOperandType", "line_number": 634, "usage_type": "attribute"}, {"api_name": "interpret_src.errorTypes", "line_number": 634, "usage_type": "name"}, {"api_name": "interpret_src.errorTypes.wrongOperandType", "line_number": 635, "usage_type": "attribute"}, {"api_name": "interpret_src.errorTypes", "line_number": 635, "usage_type": "name"}, {"api_name": "interpret_src.errorTypes.wrongStringManipulation", "line_number": 639, "usage_type": "attribute"}, {"api_name": "interpret_src.errorTypes", "line_number": 639, "usage_type": "name"}, {"api_name": "interpret_src.errorTypes.wrongOperandType", "line_number": 642, "usage_type": "attribute"}, {"api_name": "interpret_src.errorTypes", "line_number": 642, "usage_type": "name"}, {"api_name": "interpret_src.errorTypes.wrongOperandType", "line_number": 643, "usage_type": "attribute"}, {"api_name": "interpret_src.errorTypes", "line_number": 643, "usage_type": "name"}, {"api_name": "interpret_src.errorTypes.wrongOperandType", "line_number": 647, "usage_type": "attribute"}, {"api_name": "interpret_src.errorTypes", "line_number": 647, "usage_type": "name"}, {"api_name": "interpret_src.errorTypes.wrongOperandType", "line_number": 651, "usage_type": "attribute"}, {"api_name": "interpret_src.errorTypes", "line_number": 651, "usage_type": "name"}, {"api_name": "interpret_src.errorTypes.wrongOperandType", "line_number": 652, "usage_type": "attribute"}, {"api_name": "interpret_src.errorTypes", "line_number": 652, "usage_type": "name"}, {"api_name": "interpret_src.errorTypes.wrongStringManipulation", "line_number": 656, "usage_type": "attribute"}, {"api_name": "interpret_src.errorTypes", "line_number": 656, "usage_type": "name"}, {"api_name": "interpret_src.errorTypes.wrongOperandType", "line_number": 659, "usage_type": "attribute"}, {"api_name": "interpret_src.errorTypes", "line_number": 659, "usage_type": "name"}, {"api_name": "interpret_src.errorTypes.wrongOperandType", "line_number": 660, "usage_type": "attribute"}, {"api_name": "interpret_src.errorTypes", "line_number": 660, "usage_type": "name"}, {"api_name": "interpret_src.errorTypes.wrongOperandType", "line_number": 661, "usage_type": "attribute"}, {"api_name": "interpret_src.errorTypes", "line_number": 661, "usage_type": "name"}, {"api_name": "interpret_src.errorTypes.wrongStringManipulation", "line_number": 669, "usage_type": "attribute"}, {"api_name": "interpret_src.errorTypes", "line_number": 669, "usage_type": "name"}, {"api_name": "interpret_src.errorTypes.wrongOperandType", "line_number": 685, "usage_type": "attribute"}, {"api_name": "interpret_src.errorTypes", "line_number": 685, "usage_type": "name"}, {"api_name": "interpret_src.errorTypes.wrongOperandValue", "line_number": 686, "usage_type": "attribute"}, {"api_name": "interpret_src.errorTypes", "line_number": 686, "usage_type": "name"}, {"api_name": "sys.exit", "line_number": 687, "usage_type": "call"}, {"api_name": "sys.stderr", "line_number": 692, "usage_type": "attribute"}, {"api_name": "sys.stderr", "line_number": 694, "usage_type": "attribute"}, {"api_name": "sys.stderr", "line_number": 697, "usage_type": "attribute"}, {"api_name": "sys.stderr", "line_number": 698, "usage_type": "attribute"}, {"api_name": "sys.stderr", "line_number": 699, "usage_type": "attribute"}, {"api_name": "sys.stderr", "line_number": 700, "usage_type": "attribute"}, {"api_name": "sys.stderr", "line_number": 701, "usage_type": "attribute"}, {"api_name": "sys.stdin", "line_number": 724, "usage_type": "attribute"}, {"api_name": "xml.etree.ElementTree.fromstringlist", "line_number": 729, "usage_type": "call"}, {"api_name": "xml.etree.ElementTree", "line_number": 729, "usage_type": "name"}, {"api_name": "interpret_src.errorTypes.xmlNotWellFormated", "line_number": 731, "usage_type": "attribute"}, {"api_name": "interpret_src.errorTypes", "line_number": 731, "usage_type": "name"}, {"api_name": "interpret_src.errorTypes.xmlStructureSyntaxLex", "line_number": 733, "usage_type": "attribute"}, {"api_name": "interpret_src.errorTypes", "line_number": 733, "usage_type": "name"}, {"api_name": "interpret_src.errorTypes.xmlStructureSyntaxLex", "line_number": 734, "usage_type": "attribute"}, {"api_name": "interpret_src.errorTypes", "line_number": 734, "usage_type": "name"}, {"api_name": "interpret_src.errorTypes.xmlStructureSyntaxLex", "line_number": 736, "usage_type": "attribute"}, {"api_name": "interpret_src.errorTypes", "line_number": 736, "usage_type": "name"}]}
+{"seq_id": "72976186484", "text": "import os\nimport shutil\nimport subprocess\nimport csv\nimport bert_score\nimport sentence_transformers\nimport numpy as np\nfrom scipy.spatial.distance import cosine\n\n# TextDiversity pkgs\nfrom transformers import AutoModel, AutoTokenizer\nfrom sentence_transformers import SentenceTransformer\nfrom sklearn.decomposition import PCA\nfrom scipy.spatial import distance\nimport torch\nimport numpy as np\nimport nltk\nfrom nltk import word_tokenize, sent_tokenize\nfrom nltk.corpus import stopwords\nimport os\nimport itertools\nfrom multiprocessing import Pool\nimport spacy\n\n# locals\nimport metric\nfrom utils import *\n\nclass TokenSemanticDiversity(metric.TextDiversity):\n\n default_config = {\n # TextDiversity configs\n 'q': 1,\n 'normalize': False,\n 'distance_fn': distance.chebyshev, \n 'dim_reducer': PCA,\n 'remove_stopwords': False, \n 'scale_dist': \"exp\", \n 'sq_reg': False, \n 'mean_adj': True,\n 'verbose': False,\n # TokenSemanticDiversity configs\n 'MODEL_NAME':\"bert-large-uncased\",\n 'batch_size': 16,\n 'use_gpu': False,\n 'n_components': 'auto' \n }\n\n def __init__(self, config={}):\n config = {**self.default_config, **config} \n super().__init__(config)\n self.model = AutoModel.from_pretrained(config['MODEL_NAME'])\n self.tokenizer = AutoTokenizer.from_pretrained(config['MODEL_NAME'])\n self.undesirable_tokens = [\n self.tokenizer.pad_token_id, \n self.tokenizer.cls_token_id, \n self.tokenizer.sep_token_id\n ]\n self.batch_size = config['batch_size']\n self.device = torch.device('cuda' if config['use_gpu'] and torch.cuda.is_available() else 'cpu')\n self.verbose = config['verbose']\n\n # move model to device\n if isinstance(self.model, torch.nn.Module):\n self.model.to(self.device)\n\n def encode(self, input_ids, attention_mask):\n self.model.eval()\n with torch.no_grad():\n out = self.model(input_ids, attention_mask=attention_mask)\n emb = out[0]\n return emb\n\n def get_embeddings(self, corpus):\n inputs = self.tokenizer(corpus, return_tensors='pt', padding=True, truncation=True)\n batches = zip(chunker(inputs.input_ids, self.batch_size), \n chunker(inputs.attention_mask, self.batch_size))\n if self.verbose:\n print('getting token embeddings...')\n batches = tqdm(batches, total=int(len(inputs.input_ids)/self.batch_size))\n\n outputs = []\n for input_ids, attention_mask in batches:\n emb = self.encode(input_ids.to(self.device), \n attention_mask.to(self.device))\n outputs.append(emb)\n embeddings = torch.cat(outputs)\n\n # remove undesirable tokens\n idx = np.isin(inputs['input_ids'], self.undesirable_tokens, assume_unique=True, invert=True).reshape(-1)\n tok = np.array(self.tokenizer.convert_ids_to_tokens(inputs.input_ids.view(-1)))[idx]\n boe = embeddings.view(-1, embeddings.shape[-1])[idx].detach().cpu()\n\n # remove stopwords\n if self.config['remove_stopwords']:\n idx = np.isin(tok, stopwords.words('english'), invert=True)\n tok = tok[idx]\n boe = boe[idx]\n\n # compress embedding to speed up similarity matrix computation\n if self.config['n_components'] == \"auto\":\n n_components = min(max(2, len(boe) // 10), boe.shape[-1])\n if self.verbose:\n print('Using n_components={}'.format(str(n_components)))\n\n if type(n_components) == int and n_components > 0 and len(boe) > 1:\n boe = self.config['dim_reducer'](n_components=n_components).fit_transform(boe)\n\n if len(np.flatnonzero(np.core.defchararray.find(tok,'##')!=-1)) > 0:\n tok, boe = merge_bpe(tok, boe)\n\n return boe, tok\n\n def __call__(self, response_set): \n return super().__call__(response_set)\n\n\nclass SentenceSemanticDiversity(metric.TextDiversity):\n\n default_config = {\n # TextDiversity configs\n 'q': 1,\n 'normalize': False,\n 'distance_fn': distance.chebyshev, \n 'dim_reducer': PCA,\n 'remove_stopwords': False, \n 'scale_dist': \"exp\", \n 'sq_reg': False, \n 'mean_adj': True,\n 'verbose': False,\n # SentenceSemanticDiversity configs\n 'MODEL_NAME':\"stsb-roberta-large\",\n 'use_gpu': False,\n 'n_components': 'auto' \n }\n\n def __init__(self, config={}):\n config = {**self.default_config, **config} \n super().__init__(config)\n self.device = torch.device('cuda' if config['use_gpu'] and torch.cuda.is_available() else 'cpu')\n self.model = SentenceTransformer(config['MODEL_NAME'], device=self.device)\n self.verbose = config['verbose']\n\n def get_embeddings(self, corpus):\n\n boe = np.stack(self.model.encode(corpus))\n \n # compress embedding to speed up similarity matrix computation\n if self.config['n_components'] == \"auto\":\n n_components = min(max(2, len(boe) // 10), boe.shape[-1])\n if self.verbose:\n print('Using n_components={}'.format(str(n_components)))\n\n if type(n_components) == int and n_components > 0 and len(boe) > 1:\n boe = self.config['dim_reducer'](n_components=n_components).fit_transform(boe)\n\n return boe, corpus\n\n def __call__(self, response_set): \n return super().__call__(response_set)\n\nclass SyntacticDiversity(metric.TextDiversity):\n\n default_config = {\n # TextDiversity configs\n 'q': 1,\n 'normalize': False,\n 'dim_reducer': PCA,\n 'remove_stopwords': False, \n 'sq_reg': False, \n 'mean_adj': False,\n 'verbose': False,\n # SentenceSemanticDiversity configs\n 'MODEL_NAME': \"en_core_web_trf\",\n 'distance_fn': distance.hamming, \n 'scale_dist': \"invert\", \n 'part': 'pos_', \n 'part2int': True\n }\n\n def __init__(self, config={}):\n config = {**self.default_config, **config} \n super().__init__(config)\n self.model = spacy.load(config['MODEL_NAME'])\n self.verbose = config['verbose']\n\n\n def get_embeddings(self, corpus):\n\n # convert to spacy docs to get parts\n doc_parts = []\n for doc in corpus:\n for sent in sent_tokenize(doc):\n sent_ = []\n for w in self.model(sent):\n if self.config['remove_stopwords'] and w.text in stopwords.words('english'):\n continue\n part_ = getattr(w, self.config['part'])\n sent_.append(part_)\n doc_parts.append(sent_)\n\n species = doc_parts\n\n # pad to max sentence doc length\n pad_to = find_max_list(doc_parts)\n doc_parts = np.array([s + ['NULL']*(pad_to-len(s)) for s in doc_parts])\n\n # convert doc parts to int\n if self.config['part2int']:\n # build dict of unique doc parts\n part_map = set(itertools.chain(*doc_parts))\n part_map = {tag: i for i, tag in enumerate(part_map)}\n # convert to int for distance comparison\n part2int_fn = np.vectorize(part_map.get)\n doc_parts = part2int_fn(doc_parts)\n\n return doc_parts, species\n\n def __call__(self, response_set): \n return super().__call__(response_set)\n\nif __name__ == '__main__':\n\n def print_metric(metric, resp_set):\n print('{0}: {1:0.3f}'.format(type(metric).__name__, metric(resp_set)))\n\n # TEST\n response_set = ['i am going', 'i am going', 'lets go i i']\n\n config = {'normalize': False}\n print_metric(TokenSemanticDiversity(config), response_set)\n print_metric(SentenceSemanticDiversity(config), response_set)\n print_metric(SyntacticDiversity(config), response_set)\n\n config = {'normalize': True}\n print_metric(TokenSemanticDiversity(config), response_set)\n print_metric(SentenceSemanticDiversity(config), response_set)\n print_metric(SyntacticDiversity(config), response_set)", "repo_name": "asakhala921/Sibyl_eval", "sub_path": "text_diversity.py", "file_name": "text_diversity.py", "file_ext": "py", "file_size_in_byte": 8177, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "76", "api": [{"api_name": "metric.TextDiversity", "line_number": 29, "usage_type": "attribute"}, {"api_name": "scipy.spatial.distance.chebyshev", "line_number": 35, "usage_type": "attribute"}, {"api_name": "scipy.spatial.distance", "line_number": 35, "usage_type": "name"}, {"api_name": "sklearn.decomposition.PCA", "line_number": 36, "usage_type": "name"}, {"api_name": "transformers.AutoModel.from_pretrained", "line_number": 52, "usage_type": "call"}, {"api_name": "transformers.AutoModel", "line_number": 52, "usage_type": "name"}, {"api_name": "transformers.AutoTokenizer.from_pretrained", "line_number": 53, "usage_type": "call"}, {"api_name": "transformers.AutoTokenizer", "line_number": 53, "usage_type": "name"}, {"api_name": "torch.device", "line_number": 60, "usage_type": "call"}, {"api_name": "torch.cuda.is_available", "line_number": 60, "usage_type": "call"}, {"api_name": "torch.cuda", "line_number": 60, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 64, "usage_type": "attribute"}, {"api_name": "torch.no_grad", "line_number": 69, "usage_type": "call"}, {"api_name": "torch.cat", "line_number": 87, "usage_type": "call"}, {"api_name": "numpy.isin", "line_number": 90, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 91, "usage_type": "call"}, {"api_name": "numpy.isin", "line_number": 96, "usage_type": "call"}, {"api_name": "nltk.corpus.stopwords.words", "line_number": 96, "usage_type": "call"}, {"api_name": "nltk.corpus.stopwords", "line_number": 96, "usage_type": "name"}, {"api_name": "numpy.flatnonzero", "line_number": 109, "usage_type": "call"}, {"api_name": "numpy.core.defchararray.find", "line_number": 109, "usage_type": "call"}, {"api_name": "numpy.core", "line_number": 109, "usage_type": "attribute"}, {"api_name": "metric.TextDiversity", "line_number": 118, "usage_type": "attribute"}, {"api_name": "scipy.spatial.distance.chebyshev", "line_number": 124, "usage_type": "attribute"}, {"api_name": "scipy.spatial.distance", "line_number": 124, "usage_type": "name"}, {"api_name": "sklearn.decomposition.PCA", "line_number": 125, "usage_type": "name"}, {"api_name": "torch.device", "line_number": 140, "usage_type": "call"}, {"api_name": "torch.cuda.is_available", "line_number": 140, "usage_type": "call"}, {"api_name": "torch.cuda", "line_number": 140, "usage_type": "attribute"}, {"api_name": "sentence_transformers.SentenceTransformer", "line_number": 141, "usage_type": "call"}, {"api_name": "numpy.stack", "line_number": 146, "usage_type": "call"}, {"api_name": "metric.TextDiversity", "line_number": 162, "usage_type": "attribute"}, {"api_name": "sklearn.decomposition.PCA", "line_number": 168, "usage_type": "name"}, {"api_name": "scipy.spatial.distance.hamming", "line_number": 175, "usage_type": "attribute"}, {"api_name": "scipy.spatial.distance", "line_number": 175, "usage_type": "name"}, {"api_name": "spacy.load", "line_number": 184, "usage_type": "call"}, {"api_name": "nltk.sent_tokenize", "line_number": 193, "usage_type": "call"}, {"api_name": "nltk.corpus.stopwords.words", "line_number": 196, "usage_type": "call"}, {"api_name": "nltk.corpus.stopwords", "line_number": 196, "usage_type": "name"}, {"api_name": "numpy.array", "line_number": 206, "usage_type": "call"}, {"api_name": "itertools.chain", "line_number": 211, "usage_type": "call"}, {"api_name": "numpy.vectorize", "line_number": 214, "usage_type": "call"}]}
+{"seq_id": "911484176", "text": "import os\nimport cv2\nimport numpy as np\nimport rclpy\nimport pathlib\nimport math\nfrom sensor_msgs.msg import Image\nfrom ackermann_msgs.msg import AckermannDrive\nfrom rclpy.qos import qos_profile_sensor_data, QoSReliabilityPolicy\nfrom rclpy.node import Node\nfrom rclpy.qos import qos_profile_sensor_data, QoSReliabilityPolicy\nfrom .log_server import set_transmission\nfrom .log_server import set_steering\nfrom nav_msgs.msg import Odometry\nfrom ament_index_python.packages import get_package_share_directory\nfrom launch.substitutions.path_join_substitution import PathJoinSubstitution\nimport traceback\n\n\n\nCONTROL_COEFFICIENT = 0.0007\nCONTROL_COEFFICIENT_DETOUR = 0.3183\nANGLE_GAP = 0.435808714\n\nclass LaneFollower(Node):\n def __init__(self):\n try:\n super().__init__('field_follower')\n\n # ROS interface\n self.__ackermann_publisher = self.create_publisher(AckermannDrive, 'cmd_ackermann', 1)\n qos = qos_profile_sensor_data\n qos.reliability = QoSReliabilityPolicy.RELIABLE\n self.create_subscription(Odometry, '/odom', self.__on_odom, qos)\n\n qos_camera_data = qos_profile_sensor_data\n # In case ROS_DISTRO is not foxy the QoSReliabilityPolicy is strict.\n if 'ROS_DISTRO' in os.environ and os.environ['ROS_DISTRO'] != 'foxy':\n qos_camera_data.reliability = QoSReliabilityPolicy.RELIABLE\n #self.create_subscription(Image, 'vehicle/camera', self.__on_camera_image, qos_camera_data)\n self._logger.info('Field path follower initialized')\n package_dir = get_package_share_directory('webots_ros2_suv')\n points_path = f'{package_dir}/worlds/ulstu_field_points.txt'\n points = open(points_path, 'r')\n self.current_x = 210.23121027069885\n self.current_y = 77.42130129912289\n self.current_angle = 0\n self.x_coordinates = []\n self.y_coordinates = []\n x_flag = 1\n self.index_next_point = 1\n with open(points_path, 'r') as file:\n for line in file:\n if x_flag == 1:\n self.x_coordinates.append(float(line.strip()))\n x_flag = 0\n else:\n self.y_coordinates.append(float(line.strip()))\n x_flag = 1\n\n self._logger.info(points.read(500))\n except Exception as ex:\n self._logger.error(''.join(traceback.TracebackException.from_exception(ex).format()))\n\n #def wheel_control(self):\n #try:\n #infinity = 1\n #while infinity:\n # self._logger.info(str(self.current_angle))\n\n #else:\n # end = True\n #except Exception as ex:\n # self._logger.error(''.join(traceback.TracebackException.from_exception(ex).format()))\n\n\n def euler_from_quaternion(self, x, y, z, w):\n \"\"\"\n Convert a quaternion into euler angles (roll, pitch, yaw)\n roll is rotation around x in radians (counterclockwise)\n pitch is rotation around y in radians (counterclockwise)\n yaw is rotation around z in radians (counterclockwise)\n \"\"\"\n t0 = +2.0 * (w * x + y * z)\n t1 = +1.0 - 2.0 * (x * x + y * y)\n roll_x = math.atan2(t0, t1)\n\n t2 = +2.0 * (w * y - z * x)\n t2 = +1.0 if t2 > +1.0 else t2\n t2 = -1.0 if t2 < -1.0 else t2\n pitch_y = math.asin(t2)\n\n t3 = +2.0 * (w * z + x * y)\n t4 = +1.0 - 2.0 * (y * y + z * z)\n yaw_z = math.atan2(t3, t4)\n\n return roll_x, pitch_y, yaw_z # in radians\n\n def __on_odom(self, message):\n try:\n #self._logger.info(f'odom x: {message.pose.pose.position.x} y: {message.pose.pose.position.y} z: {message.pose.pose.position.z}')\n (roll, pitch, yaw) = self.euler_from_quaternion(message.pose.pose.orientation.x,\n message.pose.pose.orientation.y,\n message.pose.pose.orientation.z,\n message.pose.pose.orientation.w)\n # self._logger.info(f'yaw: {yaw}')\n self.current_angle = yaw - ANGLE_GAP\n self.current_x = float(message.pose.pose.position.x)\n self.current_y = float(message.pose.pose.position.y)\n if len(self.x_coordinates) != self.index_next_point:\n delta = self.calculate_distance(self.current_x, self.current_y,\n self.x_coordinates[self.index_next_point],\n self.y_coordinates[self.index_next_point])\n\n if (delta <= 3):\n self.index_next_point += 1\n self._logger.info(str(self.index_next_point))\n\n\n command_message = AckermannDrive()\n command_message.speed = 2.0\n command_message.steering_angle = 0.0\n calc_angle = self.calculate_angle(self.current_x, self.current_y,\n self.x_coordinates[self.index_next_point],\n self.y_coordinates[self.index_next_point])\n error = self.current_angle - calc_angle\n if error > 3.14159:\n error -= 6,28319\n elif error < 3.14159:\n error -= 6, 28319\n #self._logger.info(f'yaw: {yaw:.5f} curangle\" {self.current_angle:.5f} calc angle: {calc_angle:.5f}')\n command_message.steering_angle = error * CONTROL_COEFFICIENT_DETOUR\n set_transmission(command_message.speed / 25 + 1)\n set_steering(command_message.steering_angle)\n\n self.__ackermann_publisher.publish(command_message)\n except Exception as ex:\n self._logger.error(''.join(traceback.TracebackException.from_exception(ex).format()))\n\n def calculate_angle(self, x1, y1, x2, y2):\n dx = x2 - x1\n dy = y2 - y1\n angle = math.atan2(dy, dx) # Calculate the angle in radians\n #angle_deg = math.degrees(angle) # Convert the angle to degrees\n return angle\n\n def calculate_distance(self, x1, y1, x2, y2):\n distance = math.sqrt((x2 - x1) ** 2 + (y2 - y1) ** 2)\n return distance\n\n def __on_camera_image(self, message):\n try:\n img = message.data\n img = np.frombuffer(img, dtype=np.uint8).reshape((message.height, message.width, 4))\n img = img[120:240, :]\n\n # Segment the image by color in HSV color space\n img = cv2.cvtColor(img, cv2.COLOR_RGBA2RGB)\n #img = cv2.cvtColor(img, cv2.COLOR_RGB2HSV)\n #mask = cv2.inRange(img, np.array([50, 110, 150]), np.array([120, 255, 255]))\n mask = cv2.inRange(img, np.array([220, 220, 220]), np.array([255, 255, 255]))\n\n # Find the largest segmented contour\n contours, _ = cv2.findContours(mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)\n\n command_message = AckermannDrive()\n command_message.speed = 2.0\n command_message.steering_angle = 0.0\n\n if contours:\n largest_contour = max(contours, key=cv2.contourArea)\n largest_contour_center = cv2.moments(largest_contour)\n\n cv2.drawContours(img, largest_contour, -1, (0,255,0), 3)\n cv2.imshow(\"img\", img)\n if cv2.waitKey(25) & 0xFF == ord('q'):\n return\n\n if largest_contour_center['m00'] != 0:\n center_x = int(largest_contour_center['m10'] / largest_contour_center['m00'])\n error = 190 - center_x\n command_message.steering_angle = error * CONTROL_COEFFICIENT\n\n set_transmission(command_message.speed / 25 + 1)\n set_steering(command_message.steering_angle)\n\n self.__ackermann_publisher.publish(command_message)\n except Exception as ex:\n self._logger.error(''.join(traceback.TracebackException.from_exception(ex).format()))\n\n\ndef main(args=None):\n try:\n rclpy.init(args=args)\n follower = LaneFollower()\n #follower.wheel_control()\n rclpy.spin(follower)\n rclpy.shutdown()\n except KeyboardInterrupt:\n pass\n except Exception as err:\n print(f'node_gps stopped')\n finally:\n rclpy.shutdown()\n\n\nif __name__ == '__main__':\n main()\n", "repo_name": "ulstu/cad-self-driving", "sub_path": "simulation/webots_ros2_suv/webots_ros2_suv/field_follower.py", "file_name": "field_follower.py", "file_ext": "py", "file_size_in_byte": 8608, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 5, "dataset": "github-code", "pt": "76", "api": [{"api_name": "rclpy.node.Node", "line_number": 25, "usage_type": "name"}, {"api_name": "ackermann_msgs.msg.AckermannDrive", "line_number": 31, "usage_type": "argument"}, {"api_name": "rclpy.qos.qos_profile_sensor_data", "line_number": 32, "usage_type": "name"}, {"api_name": "rclpy.qos.QoSReliabilityPolicy.RELIABLE", "line_number": 33, "usage_type": "attribute"}, {"api_name": "rclpy.qos.QoSReliabilityPolicy", "line_number": 33, "usage_type": "name"}, {"api_name": "nav_msgs.msg.Odometry", "line_number": 34, "usage_type": "argument"}, {"api_name": "rclpy.qos.qos_profile_sensor_data", "line_number": 36, "usage_type": "name"}, {"api_name": "os.environ", "line_number": 38, "usage_type": "attribute"}, {"api_name": "rclpy.qos.QoSReliabilityPolicy.RELIABLE", "line_number": 39, "usage_type": "attribute"}, {"api_name": "rclpy.qos.QoSReliabilityPolicy", "line_number": 39, "usage_type": "name"}, {"api_name": "ament_index_python.packages.get_package_share_directory", "line_number": 42, "usage_type": "call"}, {"api_name": "traceback.TracebackException.from_exception", "line_number": 63, "usage_type": "call"}, {"api_name": "traceback.TracebackException", "line_number": 63, "usage_type": "attribute"}, {"api_name": "math.atan2", "line_number": 86, "usage_type": "call"}, {"api_name": "math.asin", "line_number": 91, "usage_type": "call"}, {"api_name": "math.atan2", "line_number": 95, "usage_type": "call"}, {"api_name": "ackermann_msgs.msg.AckermannDrive", "line_number": 120, "usage_type": "call"}, {"api_name": "log_server.set_transmission", "line_number": 133, "usage_type": "call"}, {"api_name": "log_server.set_steering", "line_number": 134, "usage_type": "call"}, {"api_name": "traceback.TracebackException.from_exception", "line_number": 138, "usage_type": "call"}, {"api_name": "traceback.TracebackException", "line_number": 138, "usage_type": "attribute"}, {"api_name": "math.atan2", "line_number": 143, "usage_type": "call"}, {"api_name": "math.sqrt", "line_number": 148, "usage_type": "call"}, {"api_name": "numpy.frombuffer", "line_number": 154, "usage_type": "call"}, {"api_name": "numpy.uint8", "line_number": 154, "usage_type": "attribute"}, {"api_name": "cv2.cvtColor", "line_number": 158, "usage_type": "call"}, {"api_name": "cv2.COLOR_RGBA2RGB", "line_number": 158, "usage_type": "attribute"}, {"api_name": "cv2.inRange", "line_number": 161, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 161, "usage_type": "call"}, {"api_name": "cv2.findContours", "line_number": 164, "usage_type": "call"}, {"api_name": "cv2.RETR_EXTERNAL", "line_number": 164, "usage_type": "attribute"}, {"api_name": "cv2.CHAIN_APPROX_NONE", "line_number": 164, "usage_type": "attribute"}, {"api_name": "ackermann_msgs.msg.AckermannDrive", "line_number": 166, "usage_type": "call"}, {"api_name": "cv2.contourArea", "line_number": 171, "usage_type": "attribute"}, {"api_name": "cv2.moments", "line_number": 172, "usage_type": "call"}, {"api_name": "cv2.drawContours", "line_number": 174, "usage_type": "call"}, {"api_name": "cv2.imshow", "line_number": 175, "usage_type": "call"}, {"api_name": "cv2.waitKey", "line_number": 176, "usage_type": "call"}, {"api_name": "log_server.set_transmission", "line_number": 184, "usage_type": "call"}, {"api_name": "log_server.set_steering", "line_number": 185, "usage_type": "call"}, {"api_name": "traceback.TracebackException.from_exception", "line_number": 189, "usage_type": "call"}, {"api_name": "traceback.TracebackException", "line_number": 189, "usage_type": "attribute"}, {"api_name": "rclpy.init", "line_number": 194, "usage_type": "call"}, {"api_name": "rclpy.spin", "line_number": 197, "usage_type": "call"}, {"api_name": "rclpy.shutdown", "line_number": 198, "usage_type": "call"}, {"api_name": "rclpy.shutdown", "line_number": 204, "usage_type": "call"}]}
+{"seq_id": "3394278862", "text": "#!/usr/bin/python\n\nimport multiprocessing\nimport containerstats\nimport etcd\nimport platform\nimport docker\nimport time\nimport os\nimport requests\n\ndockerconnection = docker.Client(base_url='unix://var/run/docker.sock', timeout=2)\ndockerconnection.close()\n\ndef getstats(obj):\n etcd.CreateDir(DDS_ETCD_URL, platform.node() + '/' + obj.containername, DDS_CONTAINER_TTL)\n etcd.SetValue(DDS_ETCD_URL, platform.node() + '/' + obj.containername + '/cpuusage',\n obj.getcontainercpuusage(dockerconnection)['cpuusage'])\n etcd.SetValue(DDS_ETCD_URL, platform.node() + '/' + obj.containername + '/memusage',\n obj.getcontainermemusage(dockerconnection)['memusage'])\n etcd.SetValue(DDS_ETCD_URL, platform.node() + '/' + obj.containername + '/memusagepercent',\n obj.getcontainermemusage(dockerconnection)['memusagepercent'])\n etcd.SetValue(DDS_ETCD_URL, platform.node() + '/' + obj.containername + '/netrx',\n obj.getcontainernetusage(dockerconnection)['netrx'])\n etcd.SetValue(DDS_ETCD_URL, platform.node() + '/' + obj.containername + '/nettx',\n obj.getcontainernetusage(dockerconnection)['nettx'])\n return True\n\n\nif __name__ == '__main__':\n\n if 'DDS_ETCD_URL' in os.environ:\n DDS_ETCD_URL = os.environ['DDS_ETCD_URL']\n else:\n DDS_ETCD_URL = 'http://127.0.0.1:4001/v2/keys/'\n\n if 'DDS_CONCURRENCY_LEVEL' in os.environ:\n DDS_CONCURRENCY_LEVEL = os.environ['DDS_CONCURRENCY_LEVEL']\n else:\n DDS_CONCURRENCY_LEVEL = 8\n\n # start values\n DDS_HOST_TTL = 120\n DDS_CONTAINER_TTL = 30\n\n\n while True:\n newpool = multiprocessing.Pool(processes=DDS_CONCURRENCY_LEVEL)\n etcd.CreateDir(DDS_ETCD_URL, platform.node(), ttl=DDS_HOST_TTL)\n containerlist = containerstats.getrunningcontainers(dockerconnection)\n objlist = []\n for container in containerlist:\n objlist.append(containerstats.ContainerStats(container))\n gatherstart = time.time()\n # when i.e. container stop during data gathering timeout generated\n try:\n newpool.map(getstats, objlist)\n except requests.packages.urllib3.exceptions.ReadTimeoutError:\n pass\n newpool.close()\n gatherstop = time.time()\n gatherduration = int(gatherstop - gatherstart)\n DDS_HOST_TTL = gatherduration * 5\n DDS_CONTAINER_TTL = gatherduration * 3\n time.sleep(gatherduration)\n", "repo_name": "witalisoft/dds", "sub_path": "app/dds.py", "file_name": "dds.py", "file_ext": "py", "file_size_in_byte": 2470, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "76", "api": [{"api_name": "docker.Client", "line_number": 12, "usage_type": "call"}, {"api_name": "etcd.CreateDir", "line_number": 16, "usage_type": "call"}, {"api_name": "platform.node", "line_number": 16, "usage_type": "call"}, {"api_name": "etcd.SetValue", "line_number": 17, "usage_type": "call"}, {"api_name": "platform.node", "line_number": 17, "usage_type": "call"}, {"api_name": "etcd.SetValue", "line_number": 19, "usage_type": "call"}, {"api_name": "platform.node", "line_number": 19, "usage_type": "call"}, {"api_name": "etcd.SetValue", "line_number": 21, "usage_type": "call"}, {"api_name": "platform.node", "line_number": 21, "usage_type": "call"}, {"api_name": "etcd.SetValue", "line_number": 23, "usage_type": "call"}, {"api_name": "platform.node", "line_number": 23, "usage_type": "call"}, {"api_name": "etcd.SetValue", "line_number": 25, "usage_type": "call"}, {"api_name": "platform.node", "line_number": 25, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 32, "usage_type": "attribute"}, {"api_name": "os.environ", "line_number": 33, "usage_type": "attribute"}, {"api_name": "os.environ", "line_number": 37, "usage_type": "attribute"}, {"api_name": "os.environ", "line_number": 38, "usage_type": "attribute"}, {"api_name": "multiprocessing.Pool", "line_number": 48, "usage_type": "call"}, {"api_name": "etcd.CreateDir", "line_number": 49, "usage_type": "call"}, {"api_name": "platform.node", "line_number": 49, "usage_type": "call"}, {"api_name": "containerstats.getrunningcontainers", "line_number": 50, "usage_type": "call"}, {"api_name": "containerstats.ContainerStats", "line_number": 53, "usage_type": "call"}, {"api_name": "time.time", "line_number": 54, "usage_type": "call"}, {"api_name": "requests.packages", "line_number": 58, "usage_type": "attribute"}, {"api_name": "time.time", "line_number": 61, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 65, "usage_type": "call"}]}
+{"seq_id": "24882625627", "text": "import collections\n\nclass Trump:\n def __init__(self, mark, num):\n self.mark = mark\n self.num = num\n\nclass Check_flag:\n def is_flush(self, cards):\n return all( i[\"mark\"] == cards[0][\"mark\"] for i in cards)\n\n def is_straight(self, cards):\n values = [i[\"num\"] for i in cards]\n straight = (values == list(range(values[0], values[0]-5, -1) ) or values == [14, 5, 4, 3, 2])\n return straight \n\nclass Porker:\n\n def ans_print(self, card_list, role):\n ans = \"\"\n for i in card_list:\n if 0 is i[\"mark\"]:\n ans = ans + \"S\"\n elif 1 is i[\"mark\"]:\n ans = ans + \"C\"\n elif 2 is i[\"mark\"]:\n ans = ans + \"D\"\n elif 3 is i[\"mark\"]:\n ans = ans + \"H\"\n\n if 14 is i[\"num\"]:\n ans = ans + \"A\"\n elif 13 is i[\"num\"]:\n ans = ans + \"K\"\n elif 12 is i[\"num\"]:\n ans = ans + \"Q\"\n elif 11 is i[\"num\"]:\n ans = ans + \"J\"\n else:\n ans = ans + str(i[\"num\"])\n ans = ans + \" \"\n print(ans)\n print(role)\n exit()\n\n def judgment_role(self, card_list_input):\n card_list = sorted(card_list_input, key = lambda x : x[\"num\"], reverse= True)\n\n straight = Check_flag().is_straight(card_list)\n flush = Check_flag().is_flush(card_list)\n\n if flush and \\\n card_list[0][\"num\"] is 14 and \\\n card_list[1][\"num\"] is 13 and \\\n card_list[2][\"num\"] is 12 and \\\n card_list[3][\"num\"] is 11 and \\\n card_list[4][\"num\"] is 10 :\n self.ans_print(card_list_input, \"ロイヤルストレートフラッシュ\")\n if straight and flush:\n self.ans_print(card_list_input, \"ストレートフラッシュ\")\n\n c = collections.Counter( [i[\"num\"] for i in card_list] )\n \n if c.most_common()[0][1] is 4:\n self.ans_print(card_list_input, \"フォーカード\")\n elif c.most_common()[0][1] is 3 and \\\n c.most_common()[1][1] is 2 : \n self.ans_print(card_list_input, \"フルハウス\")\n\n if flush :\n self.ans_print(card_list_input, \"フラッシュ\")\n\n if straight :\n self.ans_print(card_list_input, \"ストレート\")\n\n if c.most_common()[0][1] is 3 :\n self.ans_print(card_list_input, \"スリーカード\")\n\n if c.most_common()[0][1] is 2 and \\\n c.most_common()[1][1] is 2 : \n self.ans_print(card_list_input, \"ツーペア\")\n\n if c.most_common()[0][1] is 2 :\n self.ans_print(card_list_input, \"ワンペア\")\n\n self.ans_print(card_list_input, \"ハイカード\")\n", "repo_name": "sharknasuhorse/isc-kadai", "sub_path": "kadai2/main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 2799, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "76", "api": [{"api_name": "collections.Counter", "line_number": 62, "usage_type": "call"}]}
+{"seq_id": "10743227187", "text": "from flask import Flask, render_template\nimport requests\nfrom flask_humanize import Humanize\n\n# I set it up this way (along with a plethora of different ways) when I was following some tutorials\n# on how to setup a Flask project so it will deploy to Heroku. I have a Flask project successfully deployed\n# to Heroku, but I couldn't find and difference between this project and the previous one. If you uncomment the code,\n# it would pull the correct info and run it through the string format in the commented out request below.\n# I have done similar things with Flask, Django, and React, but for some reason I'm hitting a wall. I figured this\n# API can't really do any damage, so the key and id are hardcoded so that Heroku will work. Make it work, make it right, make it fast.\n# try:\n# from juicykey import app_id, app_key\n# except:\n # app_id = process.env.key_one\n # app_key = process.env.key_two\n\napp = Flask(__name__)\nhumanize = Humanize(app)\n\n\n@app.route(\"/\")\ndef index():\n result_span = []\n unique_ingredient_list = []\n unique_ingredient_index = {}\n total_of_each_items_cal_per_oz = 0\n lower_search_result = 0\n higher_search_result = 50\n total_hits = 1\n while len(result_span) != total_hits:\n # I don't normally hardcode api keys. Read the above comment if you haven't already\n # This is how I had the call setup prior to Heroku deployment issues\n # nutrionix_data = requests.get(\"https://api.nutritionix.com/v1_1/search/?brand_id=51db37d0176fe9790a899db2&results={}:{}&fields=*&appId={}&appKey={}\".format(lower_search_result, higher_search_result, app_id, app_key))\n nutrionix_data = requests.get(\n \"https://api.nutritionix.com/v1_1/search/?brand_id=51db37d0176fe9790a899db2&results={}:{}&fields=*&appId=f611e1fd&appKey=c63a3b9c90c5586828562a2cb5e93211\".format(\n lower_search_result, higher_search_result))\n nutrionix_data_json = nutrionix_data.json()\n nutrionix_hits = nutrionix_data_json['hits']\n\n lower_search_result += 50\n higher_search_result += 50\n total_hits = nutrionix_data_json['total_hits']\n\n for item in nutrionix_hits:\n fields = item['fields']\n result_span.append(fields)\n\n for result in result_span:\n if result['nf_ingredient_statement'] is None:\n pass\n else:\n\n first_del_pos = result['nf_ingredient_statement'].find(\"(\") # get the position of [\n second_del_pos = result['nf_ingredient_statement'].find(\"),\") # get the position of ]\n string_after_replace = result['nf_ingredient_statement'].replace(result['nf_ingredient_statement']\n [first_del_pos - 1:second_del_pos + 1], \"\").replace(\".\", \"\").title().replace(\"And \", \"\")\n\n ingredient_list = string_after_replace.split(\", \")\n\n for ingredient in ingredient_list:\n if ingredient not in unique_ingredient_list:\n print(\"ingredient, dude\", ingredient)\n unique_ingredient_list.append(ingredient)\n unique_ingredient_index.setdefault(ingredient, []).append(result['item_name'])\n else:\n unique_ingredient_index.setdefault(ingredient, []).append(result['item_name'])\n\n if result['nf_serving_size_unit'] == \"fl oz\":\n divided_up = result['nf_calories'] / 8\n total_of_each_items_cal_per_oz += divided_up\n elif result['nf_serving_size_unit'] == 'box':\n # assuming that all pouches are the 4.23 oz packages due to them being in packages of 8\n divided_up = result['nf_calories'] / 4.23\n total_of_each_items_cal_per_oz += divided_up\n elif result['nf_serving_size_unit'] == 'bottle':\n # Assuming that the bottle is for the 10 oz packages only because on the juicy juice nutrition facts,\n #it starts going by fl oz on the 48 oz bottles. https://juicyjuice.com/products/juicy-juice-fruit-juice/apple\n divided_up = result['nf_calories'] / 10\n total_of_each_items_cal_per_oz += divided_up\n else:\n # assuming that all pouches are 6 oz\n divided_up = result['nf_calories'] / 6\n total_of_each_items_cal_per_oz += divided_up\n\n ctx = {\n 'total': nutrionix_data_json['total_hits'],\n 'fields': result_span,\n 'ingredients': unique_ingredient_list,\n 'calories_per_oz': round(total_of_each_items_cal_per_oz / len(result_span)),\n 'index': unique_ingredient_index\n }\n print(\"index\", unique_ingredient_index)\n return render_template('index.html', **ctx)\n\n # ctx = {\n # 'index': {1: [1, 2, 3], 2: [1, 2, 3], 3: [2, 4, 5], 4: ['skdje', 'dfnkw'], 5: [5, 3, 2533], 6: [1, 2, 3], 7: [1, 2, 3], 8: [1, 2, 3], 9: [11, 22, 33, 44], 10: ['afs', 'fjknel', 'fasd;'], 11: ['fad', 'sad', 'dad']}\n # }\n # return render_template('index.html', **ctx)\n\n\nif __name__ == '__main__':\n app.run()\n", "repo_name": "SethCWilliams/juicy-juice-analytics", "sub_path": "app.py", "file_name": "app.py", "file_ext": "py", "file_size_in_byte": 5107, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "76", "api": [{"api_name": "flask.Flask", "line_number": 17, "usage_type": "call"}, {"api_name": "flask_humanize.Humanize", "line_number": 18, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 34, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 93, "usage_type": "call"}]}
+{"seq_id": "7723430435", "text": "from src.pipeline.modes.OCRBase import OCRBase\nfrom src.pipeline.dl_homograhpy.homographyDL import HomographyDL\nimport numpy as np\nimport cv2\nimport os\nfrom src.pipeline.dl_homograhpy.dl_homography_utils import *\n\nclass OCRMode1(OCRBase):\n \"\"\" OCR Mode 1: DL Homography + tesseract\"\"\"\n\n def __init__(self, ocr, intput, smartDoc, homography_model, grayscale):\n OCRBase.__init__(self, ocr, intput, smartDoc)\n self.homography_model = homography_model\n self.grayscale = grayscale\n\n # network spatial input shape\n input_shape = (384, 256)\n\n # create empty instance\n homography_dl = HomographyDL(input=None, output=None, architecture=None, model_fn=None, grayscale=None)\n\n def run(self, imgs):\n for img_nm in sorted(imgs):\n print('Processing image {} ...'.format(img_nm))\n\n # load image\n input_img = self.input + img_nm\n if self.grayscale:\n img = cv2.imread(input_img, 0)\n else:\n img = cv2.imread(input_img)\n img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)\n\n # manually rotate (should be automated)\n if self.smartDoc:\n img = cv2.rotate(img, cv2.ROTATE_90_CLOCKWISE)\n\n # save original size to compute scaling factor\n if self.grayscale:\n org_y, org_x = img.shape\n else:\n org_y, org_x, _ = img.shape\n\n fac_y, fac_x = org_y/self.input_shape[0], org_x/self.input_shape[1]\n\n # resize (just for recovering homography)\n img_homography = cv2.resize(img, (self.input_shape[1], self.input_shape[0]))\n\n # adjust dimension for network\n if self.grayscale:\n img_homography_net = np.reshape(img_homography, (1, self.input_shape[0], self.input_shape[1], 1))\n else:\n img_homography_net = np.reshape(img_homography, (1, self.input_shape[0], self.input_shape[1], 3))\n\n # normalize\n img_homography_norm = img_homography_net/255.0\n\n # estimate corner positions\n corners = self.homography_dl.predict_corners(self.homography_model, img_homography_norm)\n\n # unwarp imgage (original size)\n pts_src = np.reshape(corners, (4, 2))\n pts_src = self.scale_estim_corners(pts_src, fac_x, fac_y)\n pts_dst = np.array([[0, 0], [org_x, 0], [org_x, org_y], [0, org_y]], dtype = 'float32')\n\n dewarped_image = warp_image(img, pts_src, pts_dst, self.grayscale)\n\n # tesseract\n self.ocr.run_image_to_text_save(dewarped_image, os.path.splitext(img_nm)[0])\n\n def scale_estim_corners(self, corners, scale_x, scale_y):\n \"\"\"\n scale estimated corners to original image size\n\n :param corners:\n :param scale_x:\n :param scale_y:\n :return:\n \"\"\"\n erg = np.zeros((4,2))\n\n for idx, corner_tuple in enumerate(corners):\n erg[idx] = corner_tuple[0]*scale_x,corner_tuple[1]*scale_y\n\n return erg", "repo_name": "Nikolai10/mobile-ocr", "sub_path": "src/pipeline/modes/OCRMode1.py", "file_name": "OCRMode1.py", "file_ext": "py", "file_size_in_byte": 3072, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 24, "dataset": "github-code", "pt": "76", "api": [{"api_name": "src.pipeline.modes.OCRBase.OCRBase", "line_number": 8, "usage_type": "name"}, {"api_name": "src.pipeline.modes.OCRBase.OCRBase.__init__", "line_number": 12, "usage_type": "call"}, {"api_name": "src.pipeline.modes.OCRBase.OCRBase", "line_number": 12, "usage_type": "name"}, {"api_name": "src.pipeline.dl_homograhpy.homographyDL.HomographyDL", "line_number": 20, "usage_type": "call"}, {"api_name": "cv2.imread", "line_number": 29, "usage_type": "call"}, {"api_name": "cv2.imread", "line_number": 31, "usage_type": "call"}, {"api_name": "cv2.cvtColor", "line_number": 32, "usage_type": "call"}, {"api_name": "cv2.COLOR_BGR2RGB", "line_number": 32, "usage_type": "attribute"}, {"api_name": "cv2.rotate", "line_number": 36, "usage_type": "call"}, {"api_name": "cv2.ROTATE_90_CLOCKWISE", "line_number": 36, "usage_type": "attribute"}, {"api_name": "cv2.resize", "line_number": 47, "usage_type": "call"}, {"api_name": "numpy.reshape", "line_number": 51, "usage_type": "call"}, {"api_name": "numpy.reshape", "line_number": 53, "usage_type": "call"}, {"api_name": "numpy.reshape", "line_number": 62, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 64, "usage_type": "call"}, {"api_name": "os.path.splitext", "line_number": 69, "usage_type": "call"}, {"api_name": "os.path", "line_number": 69, "usage_type": "attribute"}, {"api_name": "numpy.zeros", "line_number": 80, "usage_type": "call"}]}
+{"seq_id": "2614426484", "text": "import datetime\nstart_time = datetime.datetime.now()\n\nimport sys\nimport hashlib\n\n\ndef cycle_through_numbers(puzzle_input, starting_i, num_zeroes):\n i =starting_i\n while True:\n new_input = puzzle_input+str(i)\n md5 = hashlib.md5(new_input).hexdigest()\n zeroes = len(md5) - len(md5.lstrip('0'))\n if zeroes == num_zeroes:\n break\n else:\n i +=1\n return i\n\n\ndef main(puzzle_input): \n part_a = cycle_through_numbers(puzzle_input,0, 5)\n print('Answer to part a is {}'.format(part_a))\n processing_time = (datetime.datetime.now() - start_time).total_seconds() * 1000\n print(\"Time taken to get answer: {:.3f} ms\".format(processing_time))\n \n \n part_b = cycle_through_numbers(puzzle_input,part_a, 6)\n print('\\nAnswer to part b is {}'.format(part_b))\n processing_time = (datetime.datetime.now() - start_time).total_seconds() * 1000\n print(\"Time taken to get answer: {:.3f} ms\".format(processing_time))\n\n\n\nif __name__ == \"__main__\":\n if len(sys.argv) != 2:\n print(\"USAGE: python [script.py] [input]\")\n else:\n main(sys.argv[1])\n", "repo_name": "lwalsh8/Advent_of_Code", "sub_path": "2015/day_04.py", "file_name": "day_04.py", "file_ext": "py", "file_size_in_byte": 1127, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "76", "api": [{"api_name": "datetime.datetime.now", "line_number": 2, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 2, "usage_type": "attribute"}, {"api_name": "hashlib.md5", "line_number": 12, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 24, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 24, "usage_type": "attribute"}, {"api_name": "datetime.datetime.now", "line_number": 30, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 30, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 36, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 39, "usage_type": "attribute"}]}
+{"seq_id": "22700406307", "text": "import numpy as np\n# import structlog\nfrom eliot import start_action, Message, to_file\nfrom pathlib import Path\nfrom pyqtgraph import ViewBox\nfrom PySide2.QtWidgets import QMainWindow, QFileDialog, QMessageBox\nfrom PySide2.QtCore import QObject, QThread, Signal, Slot, QMutex\nfrom . import common\nfrom .common import PlotData, UiSettings\nfrom .comp_worker import ComputationWorker\nfrom .exp_worker import ExperimentWorker\nfrom .generated_ui import Ui_MainWindow\n\n\n# logger = structlog.get_logger()\nto_file(open(\"log.txt\", \"w\"))\n\n\nclass MainWindowSignals(QObject):\n measure = Signal()\n\n\nclass MainWindow(QMainWindow):\n def __init__(self):\n super(MainWindow, self).__init__()\n self.ui = Ui_MainWindow()\n self.ui.setupUi(self)\n self.signals = MainWindowSignals()\n self.save_data_dir = None\n self.current_measurement = 0\n self.max_measurements = 0\n self.collecting = False\n self.time_axis = None\n self.mutex = QMutex()\n self.comp_thread = QThread()\n self.exp_thread = QThread()\n self._connect_components()\n self._set_initial_widget_states()\n self._store_line_objects()\n self._set_plot_mouse_mode()\n\n def _set_initial_widget_states(self):\n with start_action(action_type=\"_set_initial_widget_states\"):\n self.update_max_measurements(self.ui.measurements.value())\n self.ui.stop_btn.setDisabled(True)\n self.ui.reset_avg_btn.setDisabled(True)\n self.ui.save_loc.setDisabled(True)\n self.ui.save_loc_browse_btn.setDisabled(True)\n\n def _connect_components(self):\n \"\"\"Connect widgets and events to make the UI respond to user interaction.\n \"\"\"\n # Start/Stop Buttons\n self.ui.start_btn.clicked.connect(self.start_collecting)\n self.ui.stop_btn.clicked.connect(self.stop_collecting)\n # Measurement Counter\n self.ui.measurements.valueChanged.connect(self.update_max_measurements)\n # Save data controls\n self.ui.save_data_checkbox.stateChanged.connect(self.save_loc_set_state)\n self.ui.save_loc_browse_btn.clicked.connect(self.get_save_location)\n # Start/Stop point\n self.ui.stop_pt_checkbox.stateChanged.connect(self.stop_pt_set_state)\n # Dark current\n self.ui.dark_curr_checkbox.stateChanged.connect(self.dark_curr_set_state)\n\n def closeEvent(self, event):\n \"\"\"Clean up worker threads if the window is closed while collecting data.\n\n Notes\n -----\n This overrides the default closeEvent method of QMainWindow.\n \"\"\"\n with start_action(action_type=\"close\"):\n if self.collecting:\n with start_action(action_type=\"quit_threads\"):\n self.comp_thread.quit()\n self.exp_thread.quit()\n with start_action(action_type=\"wait_comp_thread\"):\n self.comp_thread.wait()\n with start_action(action_type=\"wait_exp_thread\"):\n self.exp_thread.wait()\n event.accept()\n\n def _store_line_objects(self):\n \"\"\"Store references to the lines so the data can be updated later.\n \"\"\"\n starting_data = (np.arange(100), np.zeros(100))\n self.live_par_line = self.ui.live_par_graph.plot(*starting_data)\n self.live_perp_line = self.ui.live_perp_graph.plot(*starting_data)\n self.live_ref_line = self.ui.live_ref_graph.plot(*starting_data)\n self.live_da_par_line = self.ui.live_da_par_graph.plot(*starting_data)\n self.live_da_perp_line = self.ui.live_da_perp_graph.plot(*starting_data)\n self.live_da_cd_line = self.ui.live_da_cd_graph.plot(*starting_data)\n self.avg_da_par_line = self.ui.avg_da_par_graph.plot(*starting_data)\n self.avg_da_perp_line = self.ui.avg_da_perp_graph.plot(*starting_data)\n self.avg_da_cd_line = self.ui.avg_da_cd_graph.plot(*starting_data)\n\n def _set_plot_mouse_mode(self):\n self.ui.live_par_graph.getPlotItem().getViewBox().setMouseMode(ViewBox.RectMode)\n self.ui.live_perp_graph.getPlotItem().getViewBox().setMouseMode(\n ViewBox.RectMode\n )\n self.ui.live_ref_graph.getPlotItem().getViewBox().setMouseMode(ViewBox.RectMode)\n self.ui.live_da_par_graph.getPlotItem().getViewBox().setMouseMode(\n ViewBox.RectMode\n )\n self.ui.live_da_perp_graph.getPlotItem().getViewBox().setMouseMode(\n ViewBox.RectMode\n )\n self.ui.live_da_cd_graph.getPlotItem().getViewBox().setMouseMode(\n ViewBox.RectMode\n )\n self.ui.avg_da_par_graph.getPlotItem().getViewBox().setMouseMode(\n ViewBox.RectMode\n )\n self.ui.avg_da_perp_graph.getPlotItem().getViewBox().setMouseMode(\n ViewBox.RectMode\n )\n self.ui.avg_da_cd_graph.getPlotItem().getViewBox().setMouseMode(\n ViewBox.RectMode\n )\n\n @Slot(np.ndarray)\n def set_time_axis(self, values):\n with start_action(action_type=\"set_time_axis\"):\n self.time_axis = values * 1e6\n\n @Slot(int)\n def update_max_measurements(self, x):\n with start_action(action_type=\"update_max_measurements\", new_max=x):\n self.max_measurements = x\n self.ui.measurement_counter_label.setText(\n f\"{self.current_measurement}/{self.max_measurements}\"\n )\n\n @Slot(int)\n def update_current_measurement(self, x):\n with start_action(action_type=\"update_current_measurement\", new_meas=x):\n self.current_measurement = x\n self.ui.measurement_counter_label.setText(\n f\"{self.current_measurement}/{self.max_measurements}\"\n )\n\n @Slot()\n def start_collecting(self):\n \"\"\"Begins collecting data when the \"Start\" button is pressed.\n \"\"\"\n with start_action(action_type=\"start_collecting\"):\n settings, should_quit = self._collect_settings()\n if should_quit:\n Message.log(should_quit=should_quit)\n return\n with start_action(action_type=\"create_workers\"):\n self.comp_worker = ComputationWorker(self.mutex, settings)\n self.exp_worker = ExperimentWorker(self.mutex, settings)\n self._connect_worker_signals()\n self.comp_worker.moveToThread(self.comp_thread)\n self.exp_worker.moveToThread(self.exp_thread)\n with start_action(action_type=\"start_threads\"):\n self.comp_thread.start()\n self.exp_thread.start()\n self.signals.measure.emit()\n Message.log(signal=\"measure\")\n self._disable_acq_controls()\n\n def _collect_settings(self):\n \"\"\"Collect all the settings from the UI.\n \"\"\"\n with start_action(action_type=\"collect_settings\"):\n settings = UiSettings()\n settings, should_quit = self._collect_meas_settings(settings)\n if should_quit:\n return settings, should_quit\n settings, should_quit = self._collect_instr_settings(settings)\n if should_quit:\n return settings, should_quit\n settings, should_quit = self._collect_save_settings(settings)\n if should_quit:\n return settings, should_quit\n settings, should_quit = self._collect_start_stop_settings(settings)\n if should_quit:\n return settings, should_quit\n settings, should_quit = self._collect_dark_curr_settings(settings)\n return settings, should_quit\n\n def _collect_dark_curr_settings(self, settings):\n with start_action(action_type=\"dark_current_settings\"):\n quit = False\n use_dark_curr = self.ui.dark_curr_checkbox.isChecked()\n Message.log(checked=use_dark_curr)\n if not use_dark_curr:\n Message.log(quit=quit)\n return settings, quit\n try:\n dark_curr_par = float(self.ui.dark_curr_par.text())\n dark_curr_perp = float(self.ui.dark_curr_perp.text())\n dark_curr_ref = float(self.ui.dark_curr_ref.text())\n settings.dark_curr_par = dark_curr_par\n settings.dark_curr_perp = dark_curr_perp\n settings.dark_curr_ref = dark_curr_ref\n except ValueError:\n quit = True\n Message.log(quit=quit)\n return settings, quit\n\n def _collect_meas_settings(self, settings):\n \"\"\"Collect the number of measurements from the UI.\n \"\"\"\n with start_action(action_type=\"measurement_settings\"):\n quit = False\n settings.num_measurements = self.ui.measurements.value()\n Message.log(quit=quit)\n return settings, quit\n\n def _collect_instr_settings(self, settings):\n \"\"\"Collect the settings from the UI related to the instrument.\n \"\"\"\n with start_action(action_type=\"instrument_settings\"):\n quit = False\n instr_name = self.ui.instr_name.text()\n Message.log(instrument_name=instr_name)\n if instr_name == \"\":\n Message.log(quit=quit)\n quit = True\n settings.instr_name = instr_name\n Message.log(quit=quit)\n return settings, quit\n\n def _collect_save_settings(self, settings):\n \"\"\"Collect the settings from the UI related to saving data.\n \"\"\"\n with start_action(action_type=\"save_data_settings\"):\n quit = False\n should_save = self.ui.save_data_checkbox.isChecked()\n Message.log(checked=should_save)\n if should_save and not self._saving_should_proceed():\n Message.log(quit=quit)\n quit = True\n settings.save = should_save\n settings.save_loc = self.ui.save_loc.text()\n Message.log(dir=settings.save_loc)\n Message.log(quit=quit)\n return settings, quit\n\n def _collect_start_stop_settings(self, settings):\n \"\"\"Collect the settings from the UI related to the Start/Stop points.\n \"\"\"\n with start_action(action_type=\"start_stop_settings\"):\n quit = False\n start_pt = self.ui.start_pt.value()\n settings.start_pt = start_pt\n Message.log(start=start_pt)\n if not self.ui.stop_pt_checkbox.isChecked():\n stop_pt = self.ui.stop_pt.value()\n settings.stop_pt = stop_pt\n if start_pt >= stop_pt:\n self._tell_start_greater_than_stop()\n quit = True\n Message.log(stop=settings.stop_pt)\n Message.log(quit=quit)\n return settings, quit\n\n def _connect_worker_signals(self):\n \"\"\"Connect signals for communication between workers and the main window.\n \"\"\"\n # Produced by the experiment worker\n self.exp_worker.signals.preamble.connect(self.comp_worker.store_preamble)\n self.exp_worker.signals.new_data.connect(self.comp_worker.compute_signals)\n self.exp_worker.signals.done.connect(self.cleanup_when_done)\n # Produced by the computation worker\n self.comp_worker.signals.time_axis.connect(self.set_time_axis)\n self.comp_worker.signals.new_data.connect(self.update_plots)\n self.comp_worker.signals.meas_num.connect(self.update_current_measurement)\n # Produced by the main window\n self.signals.measure.connect(self.exp_worker.measure)\n self.ui.reset_avg_btn.clicked.connect(self.comp_worker.reset_averages)\n\n def _disable_acq_controls(self):\n \"\"\"Disable certain controls while collecting data.\n \"\"\"\n # Disabled\n self.ui.start_btn.setDisabled(True)\n self.ui.instr_name.setDisabled(True)\n self.ui.measurements.setDisabled(True)\n self.ui.save_data_checkbox.setDisabled(True)\n self.ui.save_loc.setDisabled(True)\n self.ui.save_loc_browse_btn.setDisabled(True)\n self.ui.start_pt.setDisabled(True)\n self.ui.stop_pt.setDisabled(True)\n self.ui.stop_pt_checkbox.setDisabled(True)\n # Enabled\n self.ui.stop_btn.setEnabled(True)\n self.ui.reset_avg_btn.setEnabled(True)\n\n def _enable_acq_controls(self):\n \"\"\"Enable certain controls after data collection is complete.\n \"\"\"\n # Enabled\n self.ui.start_btn.setEnabled(True)\n self.ui.instr_name.setEnabled(True)\n self.ui.measurements.setEnabled(True)\n self.ui.save_data_checkbox.setEnabled(True)\n if self.ui.save_data_checkbox.isChecked():\n self.ui.save_loc.setEnabled(True)\n self.ui.save_loc_browse_btn.setEnabled(True)\n self.ui.start_pt.setEnabled(True)\n self.ui.stop_pt_checkbox.setEnabled(True)\n if not self.ui.stop_pt_checkbox.isChecked():\n self.ui.stop_pt.setEnabled(True)\n # Disabled\n self.ui.stop_btn.setDisabled(True)\n self.ui.reset_avg_btn.setDisabled(True)\n\n @Slot()\n def stop_collecting(self):\n \"\"\"Stops collecting data when the \"Stop\" button is pressed.\n \"\"\"\n with start_action(action_type=\"stop_collecting\"):\n with start_action(action_type=\"mutex\"):\n self.mutex.lock()\n common.SHOULD_STOP = True\n self.mutex.unlock()\n with start_action(action_type=\"quit_threads\"):\n self.comp_thread.quit()\n self.exp_thread.quit()\n with start_action(action_type=\"wait_comp_thread\"):\n self.comp_thread.wait()\n with start_action(action_type=\"wait_exp_thread\"):\n self.exp_thread.wait()\n self._enable_acq_controls()\n self.current_measurement = 0\n\n @Slot()\n def cleanup_when_done(self):\n \"\"\"Clean up workers and threads after data collection is complete.\n \"\"\"\n with start_action(action_type=\"done_collecting\"):\n with start_action(action_type=\"quit_threads\"):\n self.comp_thread.quit()\n self.exp_thread.quit()\n with start_action(action_type=\"wait_comp_thread\"):\n self.comp_thread.wait()\n with start_action(action_type=\"wait_exp_thread\"):\n self.exp_thread.wait()\n with start_action(action_type=\"mutex\"):\n self.mutex.lock()\n common.SHOULD_STOP = False\n self.mutex.unlock()\n self._enable_acq_controls()\n self.current_measurement = 0\n with start_action(action_type=\"dialog\"):\n QMessageBox.information(\n self, \"Done\", \"The experiment has finished.\", QMessageBox.StandardButton.Ok\n )\n\n @Slot(PlotData)\n def update_plots(self, data):\n \"\"\"Update the plots in the Live/Average tabs when new data is available.\n\n Parameters\n ----------\n data : PlotData\n Three live data channels and the signals computed from them.\n \"\"\"\n with start_action(action_type=\"update_plots\"):\n self.live_par_line.setData(self.time_axis, data.par)\n self.live_perp_line.setData(self.time_axis, data.perp)\n self.live_ref_line.setData(self.time_axis, data.ref)\n if data.da_par is not None:\n with start_action(action_type=\"update_da_plots\"):\n self.live_da_par_line.setData(self.time_axis, data.da_par)\n self.live_da_perp_line.setData(self.time_axis, data.da_perp)\n self.live_da_cd_line.setData(self.time_axis, data.da_cd)\n self.avg_da_par_line.setData(self.time_axis, data.avg_da_par)\n self.avg_da_perp_line.setData(self.time_axis, data.avg_da_perp)\n self.avg_da_cd_line.setData(self.time_axis, data.avg_da_cd)\n\n @Slot(int)\n def save_loc_set_state(self, state):\n \"\"\"Enable or disable the save location controls in response to the checkbox.\n\n Parameters\n ----------\n state : int\n An integer representing the state of the checkbox.\n\n Notes\n -----\n 0 - unchecked\n 2 - checked\n \"\"\"\n with start_action(action_type=\"save_loc_state\", state=state):\n if state == 0:\n self.ui.save_loc.setDisabled(True)\n self.ui.save_loc_browse_btn.setDisabled(True)\n Message.log(save=\"disabled\")\n elif state == 2:\n self.ui.save_loc.setEnabled(True)\n self.ui.save_loc_browse_btn.setEnabled(True)\n Message.log(save=\"enabled\")\n\n @Slot()\n def get_save_location(self):\n \"\"\"Get an existing directory in which to store the collected data.\n \"\"\"\n with start_action(action_type=\"get_save_location\"):\n self.save_data_dir = QFileDialog.getExistingDirectory()\n self.ui.save_loc.setText(self.save_data_dir)\n Message.log(dir=self.save_data_dir)\n\n def _save_loc_still_valid(self):\n \"\"\"Ensure that the path to the directory still exists before saving data to it.\n \"\"\"\n save_dir = Path(self.save_data_dir)\n return save_dir.exists()\n\n def _tell_save_loc_is_invalid(self):\n \"\"\"Tell the user that the current save location isn't valid or doesn't exist.\n \"\"\"\n with start_action(action_type=\"dialog\"):\n QMessageBox.critical(\n self,\n \"Invalid Save Location\",\n \"The current save data location is invalid or doesn't exist. Please choose a new location.\",\n QMessageBox.StandardButton.Ok,\n )\n\n def _save_would_overwrite(self):\n \"\"\"Returns True if the save directory contains *anything*.\n \"\"\"\n for item in Path(self.save_data_dir).iterdir():\n return True\n return False\n\n def _should_overwrite(self):\n \"\"\"Asks the user whether data in the save directory should be overwritten.\n \"\"\"\n with start_action(action_type=\"dialog\") as action:\n reply = QMessageBox.warning(\n self,\n \"Overwrite?\",\n \"The current directory contents will be erased. Continue?\",\n QMessageBox.StandardButton.Ok | QMessageBox.StandardButton.Cancel,\n )\n should_overwrite = reply == QMessageBox.StandardButton.Ok\n action.add_success_fields(overwrite=should_overwrite)\n return should_overwrite\n\n def _saving_should_proceed(self):\n \"\"\"Determine whether valid settings have been entered for saving data.\n \"\"\"\n with start_action(action_type=\"saving_should_proceed\"):\n try:\n loc_is_valid = self._save_loc_still_valid()\n except TypeError:\n loc_is_valid = False\n if not loc_is_valid:\n self._tell_save_loc_is_invalid()\n return False\n would_overwrite = self._save_would_overwrite()\n if would_overwrite and (not self._should_overwrite()):\n return False\n return True\n\n @Slot(int)\n def stop_pt_set_state(self, state):\n \"\"\"Enable or disable the \"Stop Point\" controls in response to the checkbox.\n\n Parameters\n ----------\n state : int\n An integer representing the state of the checkbox.\n\n Notes\n -----\n 0 - unchecked\n 2 - checked\n \"\"\"\n with start_action(action_type=\"stop_pt_state\", state=state):\n if state == 2:\n self.ui.stop_pt.setDisabled(True)\n Message.log(stop_pt=\"disabled\")\n elif state == 0:\n self.ui.stop_pt.setEnabled(True)\n Message.log(stop_pt=\"enabled\")\n\n def _tell_start_greater_than_stop(self):\n \"\"\"Tell the user that the current save location isn't valid or doesn't exist.\n \"\"\"\n QMessageBox.critical(\n self,\n \"Invalid Start/Stop Points\",\n \"The Start point must be less than the Stop point.\",\n QMessageBox.StandardButton.Ok,\n )\n\n @Slot(int)\n def dark_curr_set_state(self, state):\n \"\"\"Enable or disable the dark current controls in response to the checkbox.\n\n Parameters\n ----------\n state : int\n An integer representing the state of the checkbox.\n\n Notes\n -----\n 0 - unchecked\n 2 - checked\n \"\"\"\n with start_action(action_type=\"dark_curr_state\", state=state):\n if state == 0:\n self.ui.dark_curr_par.setDisabled(True)\n self.ui.dark_curr_perp.setDisabled(True)\n self.ui.dark_curr_ref.setDisabled(True)\n Message.log(dark_curr=\"disabled\")\n elif state == 2:\n self.ui.dark_curr_par.setEnabled(True)\n self.ui.dark_curr_perp.setEnabled(True)\n self.ui.dark_curr_ref.setEnabled(True)\n Message.log(dark_curr=\"enabled\")\n", "repo_name": "zmitchell/ns_trcd", "sub_path": "ns_trcd/ui.py", "file_name": "ui.py", "file_ext": "py", "file_size_in_byte": 21263, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "76", "api": [{"api_name": "eliot.to_file", "line_number": 16, "usage_type": "call"}, {"api_name": "PySide2.QtCore.QObject", "line_number": 19, "usage_type": "name"}, {"api_name": "PySide2.QtCore.Signal", "line_number": 20, "usage_type": "call"}, {"api_name": "PySide2.QtWidgets.QMainWindow", "line_number": 23, "usage_type": "name"}, {"api_name": "generated_ui.Ui_MainWindow", "line_number": 26, "usage_type": "call"}, {"api_name": "PySide2.QtCore.QMutex", "line_number": 34, "usage_type": "call"}, {"api_name": "PySide2.QtCore.QThread", "line_number": 35, "usage_type": "call"}, {"api_name": "PySide2.QtCore.QThread", "line_number": 36, "usage_type": "call"}, {"api_name": "eliot.start_action", "line_number": 43, "usage_type": "call"}, {"api_name": "eliot.start_action", "line_number": 73, "usage_type": "call"}, {"api_name": "eliot.start_action", "line_number": 75, "usage_type": "call"}, {"api_name": "eliot.start_action", "line_number": 78, "usage_type": "call"}, {"api_name": "eliot.start_action", "line_number": 80, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 87, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 87, "usage_type": "call"}, {"api_name": "pyqtgraph.ViewBox.RectMode", "line_number": 99, "usage_type": "attribute"}, {"api_name": "pyqtgraph.ViewBox", "line_number": 99, "usage_type": "name"}, {"api_name": "pyqtgraph.ViewBox.RectMode", "line_number": 101, "usage_type": "attribute"}, {"api_name": "pyqtgraph.ViewBox", "line_number": 101, "usage_type": "name"}, {"api_name": "pyqtgraph.ViewBox.RectMode", "line_number": 103, "usage_type": "attribute"}, {"api_name": "pyqtgraph.ViewBox", "line_number": 103, "usage_type": "name"}, {"api_name": "pyqtgraph.ViewBox.RectMode", "line_number": 105, "usage_type": "attribute"}, {"api_name": "pyqtgraph.ViewBox", "line_number": 105, "usage_type": "name"}, {"api_name": "pyqtgraph.ViewBox.RectMode", "line_number": 108, "usage_type": "attribute"}, {"api_name": "pyqtgraph.ViewBox", "line_number": 108, "usage_type": "name"}, {"api_name": "pyqtgraph.ViewBox.RectMode", "line_number": 111, "usage_type": "attribute"}, {"api_name": "pyqtgraph.ViewBox", "line_number": 111, "usage_type": "name"}, {"api_name": "pyqtgraph.ViewBox.RectMode", "line_number": 114, "usage_type": "attribute"}, {"api_name": "pyqtgraph.ViewBox", "line_number": 114, "usage_type": "name"}, {"api_name": "pyqtgraph.ViewBox.RectMode", "line_number": 117, "usage_type": "attribute"}, {"api_name": "pyqtgraph.ViewBox", "line_number": 117, "usage_type": "name"}, {"api_name": "pyqtgraph.ViewBox.RectMode", "line_number": 120, "usage_type": "attribute"}, {"api_name": "pyqtgraph.ViewBox", "line_number": 120, "usage_type": "name"}, {"api_name": "eliot.start_action", "line_number": 125, "usage_type": "call"}, {"api_name": "PySide2.QtCore.Slot", "line_number": 123, "usage_type": "call"}, {"api_name": "numpy.ndarray", "line_number": 123, "usage_type": "attribute"}, {"api_name": "eliot.start_action", "line_number": 130, "usage_type": "call"}, {"api_name": "PySide2.QtCore.Slot", "line_number": 128, "usage_type": "call"}, {"api_name": "eliot.start_action", "line_number": 138, "usage_type": "call"}, {"api_name": "PySide2.QtCore.Slot", "line_number": 136, "usage_type": "call"}, {"api_name": "eliot.start_action", "line_number": 148, "usage_type": "call"}, {"api_name": "eliot.Message.log", "line_number": 151, "usage_type": "call"}, {"api_name": "eliot.Message", "line_number": 151, "usage_type": "name"}, {"api_name": "eliot.start_action", "line_number": 153, "usage_type": "call"}, {"api_name": "comp_worker.ComputationWorker", "line_number": 154, "usage_type": "call"}, {"api_name": "exp_worker.ExperimentWorker", "line_number": 155, "usage_type": "call"}, {"api_name": "eliot.start_action", "line_number": 159, "usage_type": "call"}, {"api_name": "eliot.Message.log", "line_number": 163, "usage_type": "call"}, {"api_name": "eliot.Message", "line_number": 163, "usage_type": "name"}, {"api_name": "PySide2.QtCore.Slot", "line_number": 144, "usage_type": "call"}, {"api_name": "eliot.start_action", "line_number": 169, "usage_type": "call"}, {"api_name": "common.UiSettings", "line_number": 170, "usage_type": "call"}, {"api_name": "eliot.start_action", "line_number": 187, "usage_type": "call"}, {"api_name": "eliot.Message.log", "line_number": 190, "usage_type": "call"}, {"api_name": "eliot.Message", "line_number": 190, "usage_type": "name"}, {"api_name": "eliot.Message.log", "line_number": 192, "usage_type": "call"}, {"api_name": "eliot.Message", "line_number": 192, "usage_type": "name"}, {"api_name": "eliot.Message.log", "line_number": 203, "usage_type": "call"}, {"api_name": "eliot.Message", "line_number": 203, "usage_type": "name"}, {"api_name": "eliot.start_action", "line_number": 209, "usage_type": "call"}, {"api_name": "eliot.Message.log", "line_number": 212, "usage_type": "call"}, {"api_name": "eliot.Message", "line_number": 212, "usage_type": "name"}, {"api_name": "eliot.start_action", "line_number": 218, "usage_type": "call"}, {"api_name": "eliot.Message.log", "line_number": 221, "usage_type": "call"}, {"api_name": "eliot.Message", "line_number": 221, "usage_type": "name"}, {"api_name": "eliot.Message.log", "line_number": 223, "usage_type": "call"}, {"api_name": "eliot.Message", "line_number": 223, "usage_type": "name"}, {"api_name": "eliot.Message.log", "line_number": 226, "usage_type": "call"}, {"api_name": "eliot.Message", "line_number": 226, "usage_type": "name"}, {"api_name": "eliot.start_action", "line_number": 232, "usage_type": "call"}, {"api_name": "eliot.Message.log", "line_number": 235, "usage_type": "call"}, {"api_name": "eliot.Message", "line_number": 235, "usage_type": "name"}, {"api_name": "eliot.Message.log", "line_number": 237, "usage_type": "call"}, {"api_name": "eliot.Message", "line_number": 237, "usage_type": "name"}, {"api_name": "eliot.Message.log", "line_number": 241, "usage_type": "call"}, {"api_name": "eliot.Message", "line_number": 241, "usage_type": "name"}, {"api_name": "eliot.Message.log", "line_number": 242, "usage_type": "call"}, {"api_name": "eliot.Message", "line_number": 242, "usage_type": "name"}, {"api_name": "eliot.start_action", "line_number": 248, "usage_type": "call"}, {"api_name": "eliot.Message.log", "line_number": 252, "usage_type": "call"}, {"api_name": "eliot.Message", "line_number": 252, "usage_type": "name"}, {"api_name": "eliot.Message.log", "line_number": 259, "usage_type": "call"}, {"api_name": "eliot.Message", "line_number": 259, "usage_type": "name"}, {"api_name": "eliot.Message.log", "line_number": 260, "usage_type": "call"}, {"api_name": "eliot.Message", "line_number": 260, "usage_type": "name"}, {"api_name": "eliot.start_action", "line_number": 318, "usage_type": "call"}, {"api_name": "eliot.start_action", "line_number": 319, "usage_type": "call"}, {"api_name": "common.SHOULD_STOP", "line_number": 321, "usage_type": "attribute"}, {"api_name": "eliot.start_action", "line_number": 323, "usage_type": "call"}, {"api_name": "eliot.start_action", "line_number": 326, "usage_type": "call"}, {"api_name": "eliot.start_action", "line_number": 328, "usage_type": "call"}, {"api_name": "PySide2.QtCore.Slot", "line_number": 314, "usage_type": "call"}, {"api_name": "eliot.start_action", "line_number": 337, "usage_type": "call"}, {"api_name": "eliot.start_action", "line_number": 338, "usage_type": "call"}, {"api_name": "eliot.start_action", "line_number": 341, "usage_type": "call"}, {"api_name": "eliot.start_action", "line_number": 343, "usage_type": "call"}, {"api_name": "eliot.start_action", "line_number": 345, "usage_type": "call"}, {"api_name": "common.SHOULD_STOP", "line_number": 347, "usage_type": "attribute"}, {"api_name": "eliot.start_action", "line_number": 351, "usage_type": "call"}, {"api_name": "PySide2.QtWidgets.QMessageBox.information", "line_number": 352, "usage_type": "call"}, {"api_name": "PySide2.QtWidgets.QMessageBox", "line_number": 352, "usage_type": "name"}, {"api_name": "PySide2.QtWidgets.QMessageBox.StandardButton", "line_number": 353, "usage_type": "attribute"}, {"api_name": "PySide2.QtWidgets.QMessageBox", "line_number": 353, "usage_type": "name"}, {"api_name": "PySide2.QtCore.Slot", "line_number": 333, "usage_type": "call"}, {"api_name": "eliot.start_action", "line_number": 365, "usage_type": "call"}, {"api_name": "eliot.start_action", "line_number": 370, "usage_type": "call"}, {"api_name": "PySide2.QtCore.Slot", "line_number": 356, "usage_type": "call"}, {"api_name": "common.PlotData", "line_number": 356, "usage_type": "argument"}, {"api_name": "eliot.start_action", "line_number": 392, "usage_type": "call"}, {"api_name": "eliot.Message.log", "line_number": 396, "usage_type": "call"}, {"api_name": "eliot.Message", "line_number": 396, "usage_type": "name"}, {"api_name": "eliot.Message.log", "line_number": 400, "usage_type": "call"}, {"api_name": "eliot.Message", "line_number": 400, "usage_type": "name"}, {"api_name": "PySide2.QtCore.Slot", "line_number": 378, "usage_type": "call"}, {"api_name": "eliot.start_action", "line_number": 406, "usage_type": "call"}, {"api_name": "PySide2.QtWidgets.QFileDialog.getExistingDirectory", "line_number": 407, "usage_type": "call"}, {"api_name": "PySide2.QtWidgets.QFileDialog", "line_number": 407, "usage_type": "name"}, {"api_name": "eliot.Message.log", "line_number": 409, "usage_type": "call"}, {"api_name": "eliot.Message", "line_number": 409, "usage_type": "name"}, {"api_name": "PySide2.QtCore.Slot", "line_number": 402, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 414, "usage_type": "call"}, {"api_name": "eliot.start_action", "line_number": 420, "usage_type": "call"}, {"api_name": "PySide2.QtWidgets.QMessageBox.critical", "line_number": 421, "usage_type": "call"}, {"api_name": "PySide2.QtWidgets.QMessageBox", "line_number": 421, "usage_type": "name"}, {"api_name": "PySide2.QtWidgets.QMessageBox.StandardButton", "line_number": 425, "usage_type": "attribute"}, {"api_name": "PySide2.QtWidgets.QMessageBox", "line_number": 425, "usage_type": "name"}, {"api_name": "pathlib.Path", "line_number": 431, "usage_type": "call"}, {"api_name": "eliot.start_action", "line_number": 438, "usage_type": "call"}, {"api_name": "PySide2.QtWidgets.QMessageBox.warning", "line_number": 439, "usage_type": "call"}, {"api_name": "PySide2.QtWidgets.QMessageBox", "line_number": 439, "usage_type": "name"}, {"api_name": "PySide2.QtWidgets.QMessageBox.StandardButton", "line_number": 443, "usage_type": "attribute"}, {"api_name": "PySide2.QtWidgets.QMessageBox", "line_number": 443, "usage_type": "name"}, {"api_name": "PySide2.QtWidgets.QMessageBox.StandardButton", "line_number": 445, "usage_type": "attribute"}, {"api_name": "PySide2.QtWidgets.QMessageBox", "line_number": 445, "usage_type": "name"}, {"api_name": "eliot.start_action", "line_number": 452, "usage_type": "call"}, {"api_name": "eliot.start_action", "line_number": 479, "usage_type": "call"}, {"api_name": "eliot.Message.log", "line_number": 482, "usage_type": "call"}, {"api_name": "eliot.Message", "line_number": 482, "usage_type": "name"}, {"api_name": "eliot.Message.log", "line_number": 485, "usage_type": "call"}, {"api_name": "eliot.Message", "line_number": 485, "usage_type": "name"}, {"api_name": "PySide2.QtCore.Slot", "line_number": 465, "usage_type": "call"}, {"api_name": "PySide2.QtWidgets.QMessageBox.critical", "line_number": 490, "usage_type": "call"}, {"api_name": "PySide2.QtWidgets.QMessageBox", "line_number": 490, "usage_type": "name"}, {"api_name": "PySide2.QtWidgets.QMessageBox.StandardButton", "line_number": 494, "usage_type": "attribute"}, {"api_name": "PySide2.QtWidgets.QMessageBox", "line_number": 494, "usage_type": "name"}, {"api_name": "eliot.start_action", "line_number": 511, "usage_type": "call"}, {"api_name": "eliot.Message.log", "line_number": 516, "usage_type": "call"}, {"api_name": "eliot.Message", "line_number": 516, "usage_type": "name"}, {"api_name": "eliot.Message.log", "line_number": 521, "usage_type": "call"}, {"api_name": "eliot.Message", "line_number": 521, "usage_type": "name"}, {"api_name": "PySide2.QtCore.Slot", "line_number": 497, "usage_type": "call"}]}
+{"seq_id": "41054781890", "text": "import base64\nimport json\nimport sys\nimport time\n\nimport serial\n\n\nclass SerialConnection(object):\n def __init__(\n self,\n in_port,\n in_baud,\n in_parity,\n in_data_bits,\n in_stop_bits,\n in_hw_ctrl,\n in_sw_ctrl,\n in_timeout=60,\n termination=\"\\r\\n\",\n ):\n super().__init__()\n self.port = in_port\n self.baud = in_baud\n self.parity = get_parity_value(in_parity)\n self.data_bits = in_data_bits\n self.stop_bits = in_stop_bits\n self.timeout = in_timeout\n self.rtscts = in_hw_ctrl\n self.xonxoff = in_sw_ctrl\n self.termination = termination\n self.cmd_read_wait = 0.1\n self.cmd_timeout = 1\n self.child = None\n self.logs = []\n\n def connect(self):\n result_dict = {\"error\": 0, \"message\": \"\"}\n try:\n self.child = serial.Serial(\n port=self.port,\n baudrate=self.baud,\n bytesize=self.data_bits,\n parity=self.parity,\n stopbits=self.stop_bits,\n timeout=self.timeout,\n xonxoff=self.xonxoff,\n rtscts=self.rtscts,\n )\n if self.child:\n result_dict[\"child\"] = self\n return_value = self.child.isOpen()\n if return_value:\n self.child.flushInput()\n self.child.flushOutput()\n\n else:\n result_dict[\"error\"] = 1\n result_dict[\"message\"] = \"child is None\"\n self.logs.append(\"Cannot open the serial connection to the device\")\n except Exception as e:\n result_dict[\"error\"] = 1\n result_dict[\"message\"] = e\n self.logs.append(str(e))\n return result_dict\n\n def disconnect(self):\n if self.child:\n self.child.close()\n\n def send_command_device(self, cmd):\n result_dict = {\"error\": 0}\n try:\n if self.child and self.child.isOpen():\n cmd_write = cmd.encode(\"ascii\") + self.termination.encode(\"ascii\")\n self.child.write(cmd_write)\n # Read the output and send it back\n msg = self.read()\n else:\n result_dict[\"error\"] = 1\n msg = \"The port is not open\"\n self.logs.append(\"Failed to open the port\")\n except Exception as e:\n result_dict[\"error\"] = 1\n msg = \"An Exception occurred while writing to port \" + self.port\n print(str(e))\n self.logs.append(str(e))\n\n result_dict[\"message\"] = msg\n\n return result_dict\n\n def send_command(self, cmd, timeout=30):\n self.cmd_timeout = timeout\n result_dict = self.connect()\n if result_dict[\"error\"] == 0:\n result_dict = self.send_command_device(cmd)\n self.disconnect()\n\n return result_dict\n\n def read(self):\n \"\"\"\n read data from serial port\n :return: read data\n \"\"\"\n time.sleep(self.cmd_read_wait) # Need to wait before reading\n output = []\n self.child.timeout = 1\n length = 1\n # Timeout\n time_spent_so_far = 0.0\n start_time = time.time()\n while length != 0 and time_spent_so_far <= self.cmd_timeout:\n msg = self.child.readline().decode(\"ascii\").strip()\n output.append(msg)\n length = len(msg)\n delta = time.time() - start_time\n time_spent_so_far = delta\n return output\n\n\n# Static methods\ndef get_parity_value(in_parity):\n parity = None\n if in_parity == 0:\n parity = serial.PARITY_NONE\n elif in_parity == 1:\n parity = serial.PARITY_ODD\n elif in_parity == 2:\n parity = serial.PARITY_EVEN\n return parity\n\n\nif __name__ == \"__main__\":\n arg_jsn_string = sys.argv[1]\n base64_bytes = arg_jsn_string.encode(\"ascii\")\n message_bytes = base64.b64decode(base64_bytes)\n message = message_bytes.decode(\"ascii\")\n arg_dict = json.loads(message)\n\n hex_string_termination = arg_dict[\"termination\"]\n in_pck_termination = \"\"\n i = 0\n while (len(hex_string_termination)) > i:\n hex_data = hex_string_termination[i : i + 2]\n hex_data = int(hex_data, 16)\n hex_data = chr(hex_data)\n in_pck_termination += hex_data\n i += 2\n # cmd\n conn = SerialConnection(\n in_port=arg_dict[\"port\"],\n in_baud=arg_dict[\"baud\"],\n in_parity=arg_dict[\"parity\"],\n in_data_bits=arg_dict[\"data_bits\"],\n in_stop_bits=arg_dict[\"stop_bits\"],\n in_hw_ctrl=arg_dict[\"rtscts\"],\n in_sw_ctrl=arg_dict[\"xonxoff\"],\n termination=in_pck_termination,\n )\n result = conn.send_command(arg_dict[\"cmd\"], arg_dict[\"timeout\"])\n print(result[\"message\"])\n", "repo_name": "terragraph/terragraph-ctf", "sub_path": "ctf/common/connections/serial_jumphost_api/serial_api_v1/SerialConnection_api.py", "file_name": "SerialConnection_api.py", "file_ext": "py", "file_size_in_byte": 4890, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "76", "api": [{"api_name": "serial.Serial", "line_number": 40, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 107, "usage_type": "call"}, {"api_name": "time.time", "line_number": 113, "usage_type": "call"}, {"api_name": "time.time", "line_number": 118, "usage_type": "call"}, {"api_name": "serial.PARITY_NONE", "line_number": 127, "usage_type": "attribute"}, {"api_name": "serial.PARITY_ODD", "line_number": 129, "usage_type": "attribute"}, {"api_name": "serial.PARITY_EVEN", "line_number": 131, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 136, "usage_type": "attribute"}, {"api_name": "base64.b64decode", "line_number": 138, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 140, "usage_type": "call"}]}
+{"seq_id": "13344333382", "text": "from kivy.lang import Builder\r\nfrom kivy.uix.boxlayout import BoxLayout\r\nfrom kivy.uix.camera import Camera\r\nfrom kivymd.app import MDApp\r\nfrom kivymd.uix.label import MDLabel\r\nimport cv2\r\nimport numpy as np\r\nfrom keras.applications import ResNet50\r\nfrom keras.applications.resnet50 import preprocess_input\r\n\r\n# Charger un modèle pré-entraîné\r\nmodel = ResNet50(weights='imagenet')\r\n\r\n# Fonction pour prédire si l'image contient un chien\r\ndef dog_detector(img):\r\n img = cv2.resize(img, (224, 224)) # Redimensionner l'image\r\n img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) # Convertir l'image en RGB\r\n img = np.expand_dims(img, axis=0)\r\n img = preprocess_input(img)\r\n prediction = model.predict(img)\r\n return (np.argmax(prediction) <= 268) and (np.argmax(prediction) >= 151)\r\n\r\n# Fonction pour la détection en temps réel\r\ndef live_dog_detection(camera, label):\r\n # Access the camera texture and convert it to a numpy array\r\n frame = np.frombuffer(camera.texture.pixels, dtype='uint8')\r\n frame = frame.reshape((camera.texture.height, camera.texture.width, 4))\r\n\r\n # Détection de chien en temps réel\r\n if dog_detector(frame):\r\n text = \"Chien détecté\"\r\n else:\r\n text = \"Pas de chien détecté\"\r\n\r\n label.text = text\r\n\r\nKV = '''\r\nBoxLayout:\r\n orientation: 'vertical'\r\n\r\n Camera:\r\n id: camera\r\n resolution: (640, 480)\r\n play: True\r\n\r\n MDLabel:\r\n id: detection_label\r\n text: \"Attente de détection...\"\r\n halign: 'center'\r\n'''\r\n\r\nclass MonApplication(MDApp):\r\n\r\n def build(self):\r\n return Builder.load_string(KV)\r\n\r\n def on_start(self):\r\n camera = self.root.ids.camera\r\n label = self.root.ids.detection_label\r\n # Use the 'on_texture' event to trigger the live_dog_detection function\r\n camera.bind(on_texture=lambda instance: self.live_dog_detection(instance, label))\r\n\r\nif __name__ == \"__main__\":\r\n MonApplication().run()\r\n", "repo_name": "marilyneyapo/First-IA_PROJECT", "sub_path": "kivy_apk.py", "file_name": "kivy_apk.py", "file_ext": "py", "file_size_in_byte": 1971, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "76", "api": [{"api_name": "keras.applications.ResNet50", "line_number": 12, "usage_type": "call"}, {"api_name": "cv2.resize", "line_number": 16, "usage_type": "call"}, {"api_name": "cv2.cvtColor", "line_number": 17, "usage_type": "call"}, {"api_name": "cv2.COLOR_BGR2RGB", "line_number": 17, "usage_type": "attribute"}, {"api_name": "numpy.expand_dims", "line_number": 18, "usage_type": "call"}, {"api_name": "keras.applications.resnet50.preprocess_input", "line_number": 19, "usage_type": "call"}, {"api_name": "numpy.argmax", "line_number": 21, "usage_type": "call"}, {"api_name": "numpy.frombuffer", "line_number": 26, "usage_type": "call"}, {"api_name": "kivymd.app.MDApp", "line_number": 52, "usage_type": "name"}, {"api_name": "kivy.lang.Builder.load_string", "line_number": 55, "usage_type": "call"}, {"api_name": "kivy.lang.Builder", "line_number": 55, "usage_type": "name"}]}
+{"seq_id": "22396029976", "text": "# Tests numba.analysis functions\nfrom __future__ import print_function, absolute_import, division\n\nimport numpy as np\nfrom numba.compiler import compile_isolated, run_frontend\nfrom numba import types, rewrites, ir, jit, ir_utils\nfrom .support import TestCase, MemoryLeakMixin\n\n\nfrom numba.analysis import dead_branch_prune\n\n\ndef compile_to_ir(func):\n func_ir = run_frontend(func)\n\n class MockPipeline(object):\n def __init__(self, func_ir):\n self.typingctx = None\n self.targetctx = None\n self.args = None\n self.func_ir = func_ir\n self.typemap = None\n self.return_type = None\n self.calltypes = None\n # call this to get print etc rewrites\n rewrites.rewrite_registry.apply('before-inference', MockPipeline(func_ir),\n func_ir)\n return func_ir\n\n\nclass TestBranchPrune(MemoryLeakMixin, TestCase):\n \"\"\"\n Tests branch pruning\n \"\"\"\n _DEBUG = False\n\n # find *all* branches\n def find_branches(self, the_ir):\n branches = []\n for blk in the_ir.blocks.values():\n tmp = [_ for _ in blk.find_insts(cls=ir.Branch)]\n branches.extend(tmp)\n return branches\n\n def assert_prune(self, func, args_tys, prune, *args):\n # This checks that the expected pruned branches have indeed been pruned.\n # func is a python function to assess\n # args_tys is the numba types arguments tuple\n # prune arg is a list, one entry per branch. The value in the entry is\n # encoded as follows:\n # True: using constant inference only, the True branch will be pruned\n # False: using constant inference only, the False branch will be pruned\n # None: under no circumstances should this branch be pruned\n # *args: the argument instances to pass to the function to check\n # execution is still valid post transform\n\n func_ir = compile_to_ir(func)\n before = func_ir.copy()\n if self._DEBUG:\n print(\"=\" * 80)\n print(\"before prune\")\n func_ir.dump()\n\n dead_branch_prune(func_ir, args_tys)\n\n after = func_ir\n if self._DEBUG:\n print(\"after prune\")\n func_ir.dump()\n\n before_branches = self.find_branches(before)\n self.assertEqual(len(before_branches), len(prune))\n\n # what is expected to be pruned\n expect_removed = []\n for idx, prune in enumerate(prune):\n branch = before_branches[idx]\n if prune is True:\n expect_removed.append(branch.truebr)\n elif prune is False:\n expect_removed.append(branch.falsebr)\n elif prune is None:\n pass # nothing should be removed!\n else:\n assert 0, \"unreachable\"\n\n # compare labels\n original_labels = set([_ for _ in before.blocks.keys()])\n new_labels = set([_ for _ in after.blocks.keys()])\n # assert that the new labels are precisely the original less the\n # expected pruned labels\n try:\n self.assertEqual(new_labels, original_labels - set(expect_removed))\n except AssertionError as e:\n print(\"new_labels\", new_labels)\n print(\"original_labels\", original_labels)\n print(\"expect_removed\", expect_removed)\n raise e\n\n cres = compile_isolated(func, args_tys)\n res = cres.entry_point(*args)\n expected = func(*args)\n self.assertEqual(res, expected)\n\n def test_single_if(self):\n\n def impl(x):\n if 1 == 0:\n return 3.14159\n\n self.assert_prune(impl, (types.NoneType('none'),), [True], None)\n\n def impl(x):\n if 1 == 1:\n return 3.14159\n\n self.assert_prune(impl, (types.NoneType('none'),), [False], None)\n\n def impl(x):\n if x is None:\n return 3.14159\n\n self.assert_prune(impl, (types.NoneType('none'),), [False], None)\n self.assert_prune(impl, (types.IntegerLiteral(10),), [True], 10)\n\n def impl(x):\n if x == 10:\n return 3.14159\n\n self.assert_prune(impl, (types.NoneType('none'),), [True], None)\n self.assert_prune(impl, (types.IntegerLiteral(10),), [None], 10)\n\n def impl(x):\n if x == 10:\n z = 3.14159 # noqa: F841 # no effect\n\n self.assert_prune(impl, (types.NoneType('none'),), [True], None)\n self.assert_prune(impl, (types.IntegerLiteral(10),), [None], 10)\n\n # TODO: cannot handle this without const prop\n # def impl(x):\n # z = None\n # y = z\n # if x == y:\n # print(\"x is 10\")\n\n # self.assert_prune(impl, (types.NoneType('none'),), [None], None)\n # self.assert_prune(impl, (types.IntegerLiteral(10),), [None], 10)\n\n def test_single_if_else(self):\n\n def impl(x):\n if x is None:\n return 3.14159\n else:\n return 1.61803\n\n self.assert_prune(impl, (types.NoneType('none'),), [False], None)\n self.assert_prune(impl, (types.IntegerLiteral(10),), [True], 10)\n\n def test_single_if_const_val(self):\n\n def impl(x):\n if x == 100:\n return 3.14159\n\n self.assert_prune(impl, (types.NoneType('none'),), [True], None)\n self.assert_prune(impl, (types.IntegerLiteral(100),), [None], 100)\n\n def impl(x):\n # switch the condition order\n if 100 == x:\n return 3.14159\n\n self.assert_prune(impl, (types.NoneType('none'),), [True], None)\n self.assert_prune(impl, (types.IntegerLiteral(100),), [None], 100)\n\n def test_single_if_else_two_const_val(self):\n\n def impl(x, y):\n if x == y:\n return 3.14159\n else:\n return 1.61803\n\n self.assert_prune(impl, (types.IntegerLiteral(100),) * 2, [None], 100,\n 100)\n self.assert_prune(impl, (types.NoneType('none'),) * 2, [False], None,\n None)\n self.assert_prune(impl, (types.IntegerLiteral(100),\n types.NoneType('none'),), [True], 100, None)\n self.assert_prune(impl, (types.IntegerLiteral(100),\n types.IntegerLiteral(1000)), [None], 100, 1000)\n\n def test_single_if_else_w_following_undetermined(self):\n\n def impl(x):\n x_is_none_work = False\n if x is None:\n x_is_none_work = True\n else:\n dead = 7 # noqa: F841 # no effect\n\n if x_is_none_work:\n y = 10\n else:\n y = -3\n return y\n\n self.assert_prune(impl, (types.NoneType('none'),), [False, None], None)\n self.assert_prune(impl, (types.IntegerLiteral(10),), [True, None], 10)\n\n def impl(x):\n x_is_none_work = False\n if x is None:\n x_is_none_work = True\n else:\n pass # force the True branch exit to be on backbone\n\n if x_is_none_work:\n y = 10\n else:\n y = -3\n return y\n\n self.assert_prune(impl, (types.NoneType('none'),), [None, None], None)\n self.assert_prune(impl, (types.IntegerLiteral(10),), [True, None], 10)\n\n def test_double_if_else_rt_const(self):\n\n def impl(x):\n one_hundred = 100\n x_is_none_work = 4\n if x is None:\n x_is_none_work = 100\n else:\n dead = 7 # noqa: F841 # no effect\n\n if x_is_none_work == one_hundred:\n y = 10\n else:\n y = -3\n\n return y, x_is_none_work\n\n self.assert_prune(impl, (types.NoneType('none'),), [False, None], None)\n self.assert_prune(impl, (types.IntegerLiteral(10),), [True, None], 10)\n\n def test_double_if_else_non_literal_const(self):\n\n def impl(x):\n one_hundred = 100\n if x == one_hundred:\n y = 3.14159\n else:\n y = 1.61803\n return y\n\n # no prune as compilation specialization on literal value not permitted\n self.assert_prune(impl, (types.IntegerLiteral(10),), [None], 10)\n self.assert_prune(impl, (types.IntegerLiteral(100),), [None], 100)\n\n def test_single_two_branches_same_cond(self):\n\n def impl(x):\n if x is None:\n y = 10\n else:\n y = 40\n\n if x is not None:\n z = 100\n else:\n z = 400\n\n return z, y\n\n self.assert_prune(impl, (types.NoneType('none'),), [False, True], None)\n self.assert_prune(impl, (types.IntegerLiteral(10),), [True, False], 10)\n\n def test_cond_is_kwarg_none(self):\n\n def impl(x=None):\n if x is None:\n y = 10\n else:\n y = 40\n\n if x is not None:\n z = 100\n else:\n z = 400\n\n return z, y\n\n self.assert_prune(impl, (types.Omitted(None),),\n [False, True], None)\n self.assert_prune(impl, (types.NoneType('none'),), [False, True], None)\n self.assert_prune(impl, (types.IntegerLiteral(10),), [True, False], 10)\n\n def test_cond_is_kwarg_value(self):\n\n def impl(x=1000):\n if x == 1000:\n y = 10\n else:\n y = 40\n\n if x != 1000:\n z = 100\n else:\n z = 400\n\n return z, y\n\n self.assert_prune(impl, (types.Omitted(1000),), [None, None], 1000)\n self.assert_prune(impl, (types.IntegerLiteral(1000),), [None, None],\n 1000)\n self.assert_prune(impl, (types.IntegerLiteral(0),), [None, None], 0)\n self.assert_prune(impl, (types.NoneType('none'),), [True, False], None)\n\n def test_cond_rewrite_is_correct(self):\n # this checks that when a condition is replaced, it is replace by a\n # true/false bit that correctly represents the evaluated condition\n def fn(x):\n if x is None:\n return 10\n return 12\n\n def check(func, arg_tys, bit_val):\n func_ir = compile_to_ir(func)\n\n # check there is 1 branch\n before_branches = self.find_branches(func_ir)\n self.assertEqual(len(before_branches), 1)\n\n # check the condition in the branch is a binop\n condition_var = before_branches[0].cond\n condition_defn = ir_utils.get_definition(func_ir, condition_var)\n self.assertEqual(condition_defn.op, 'binop')\n\n # do the prune, this should kill the dead branch and rewrite the\n #'condition to a true/false const bit\n if self._DEBUG:\n print(\"=\" * 80)\n print(\"before prune\")\n func_ir.dump()\n dead_branch_prune(func_ir, arg_tys)\n if self._DEBUG:\n print(\"=\" * 80)\n print(\"after prune\")\n func_ir.dump()\n\n # after mutation, the condition should be a const value `bit_val`\n new_condition_defn = ir_utils.get_definition(func_ir, condition_var)\n self.assertTrue(isinstance(new_condition_defn, ir.Const))\n self.assertEqual(new_condition_defn.value, bit_val)\n\n check(fn, (types.NoneType('none'),), 1)\n check(fn, (types.IntegerLiteral(10),), 0)\n\n def test_obj_mode_fallback(self):\n # see issue #3879, this checks that object mode fall back doesn't suffer\n # from the IR mutation\n\n @jit\n def bug(a,b):\n if a.ndim == 1:\n if b is None:\n return 10\n return 12\n return []\n\n self.assertEqual(bug(np.arange(10), 4), 12)\n self.assertEqual(bug(np.arange(10), None), 10)\n self.assertEqual(bug(np.arange(10).reshape((2, 5)), 10), [])\n self.assertEqual(bug(np.arange(10).reshape((2, 5)), None), [])\n self.assertFalse(bug.nopython_signatures)\n", "repo_name": "nesliiiimmm/Web-Scraper-Python", "sub_path": "scraping/Lib/site-packages/numba/tests/test_analysis.py", "file_name": "test_analysis.py", "file_ext": "py", "file_size_in_byte": 12326, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 3, "dataset": "github-code", "pt": "76", "api": [{"api_name": "numba.compiler.run_frontend", "line_number": 14, "usage_type": "call"}, {"api_name": "numba.rewrites.rewrite_registry.apply", "line_number": 26, "usage_type": "call"}, {"api_name": "numba.rewrites.rewrite_registry", "line_number": 26, "usage_type": "attribute"}, {"api_name": "numba.rewrites", "line_number": 26, "usage_type": "name"}, {"api_name": "support.MemoryLeakMixin", "line_number": 31, "usage_type": "name"}, {"api_name": "support.TestCase", "line_number": 31, "usage_type": "name"}, {"api_name": "numba.ir.Branch", "line_number": 41, "usage_type": "attribute"}, {"api_name": "numba.ir", "line_number": 41, "usage_type": "name"}, {"api_name": "numba.analysis.dead_branch_prune", "line_number": 64, "usage_type": "call"}, {"api_name": "numba.compiler.compile_isolated", "line_number": 100, "usage_type": "call"}, {"api_name": "numba.types.NoneType", "line_number": 111, "usage_type": "call"}, {"api_name": "numba.types", "line_number": 111, "usage_type": "name"}, {"api_name": "numba.types.NoneType", "line_number": 117, "usage_type": "call"}, {"api_name": "numba.types", "line_number": 117, "usage_type": "name"}, {"api_name": "numba.types.NoneType", "line_number": 123, "usage_type": "call"}, {"api_name": "numba.types", "line_number": 123, "usage_type": "name"}, {"api_name": "numba.types.IntegerLiteral", "line_number": 124, "usage_type": "call"}, {"api_name": "numba.types", "line_number": 124, "usage_type": "name"}, {"api_name": "numba.types.NoneType", "line_number": 130, "usage_type": "call"}, {"api_name": "numba.types", "line_number": 130, "usage_type": "name"}, {"api_name": "numba.types.IntegerLiteral", "line_number": 131, "usage_type": "call"}, {"api_name": "numba.types", "line_number": 131, "usage_type": "name"}, {"api_name": "numba.types.NoneType", "line_number": 137, "usage_type": "call"}, {"api_name": "numba.types", "line_number": 137, "usage_type": "name"}, {"api_name": "numba.types.IntegerLiteral", "line_number": 138, "usage_type": "call"}, {"api_name": "numba.types", "line_number": 138, "usage_type": "name"}, {"api_name": "numba.types.NoneType", "line_number": 158, "usage_type": "call"}, {"api_name": "numba.types", "line_number": 158, "usage_type": "name"}, {"api_name": "numba.types.IntegerLiteral", "line_number": 159, "usage_type": "call"}, {"api_name": "numba.types", "line_number": 159, "usage_type": "name"}, {"api_name": "numba.types.NoneType", "line_number": 167, "usage_type": "call"}, {"api_name": "numba.types", "line_number": 167, "usage_type": "name"}, {"api_name": "numba.types.IntegerLiteral", "line_number": 168, "usage_type": "call"}, {"api_name": "numba.types", "line_number": 168, "usage_type": "name"}, {"api_name": "numba.types.NoneType", "line_number": 175, "usage_type": "call"}, {"api_name": "numba.types", "line_number": 175, "usage_type": "name"}, {"api_name": "numba.types.IntegerLiteral", "line_number": 176, "usage_type": "call"}, {"api_name": "numba.types", "line_number": 176, "usage_type": "name"}, {"api_name": "numba.types.IntegerLiteral", "line_number": 186, "usage_type": "call"}, {"api_name": "numba.types", "line_number": 186, "usage_type": "name"}, {"api_name": "numba.types.NoneType", "line_number": 188, "usage_type": "call"}, {"api_name": "numba.types", "line_number": 188, "usage_type": "name"}, {"api_name": "numba.types.IntegerLiteral", "line_number": 190, "usage_type": "call"}, {"api_name": "numba.types", "line_number": 190, "usage_type": "name"}, {"api_name": "numba.types.NoneType", "line_number": 191, "usage_type": "call"}, {"api_name": "numba.types", "line_number": 191, "usage_type": "name"}, {"api_name": "numba.types.IntegerLiteral", "line_number": 192, "usage_type": "call"}, {"api_name": "numba.types", "line_number": 192, "usage_type": "name"}, {"api_name": "numba.types.IntegerLiteral", "line_number": 193, "usage_type": "call"}, {"api_name": "numba.types", "line_number": 193, "usage_type": "name"}, {"api_name": "numba.types.NoneType", "line_number": 210, "usage_type": "call"}, {"api_name": "numba.types", "line_number": 210, "usage_type": "name"}, {"api_name": "numba.types.IntegerLiteral", "line_number": 211, "usage_type": "call"}, {"api_name": "numba.types", "line_number": 211, "usage_type": "name"}, {"api_name": "numba.types.NoneType", "line_number": 226, "usage_type": "call"}, {"api_name": "numba.types", "line_number": 226, "usage_type": "name"}, {"api_name": "numba.types.IntegerLiteral", "line_number": 227, "usage_type": "call"}, {"api_name": "numba.types", "line_number": 227, "usage_type": "name"}, {"api_name": "numba.types.NoneType", "line_number": 246, "usage_type": "call"}, {"api_name": "numba.types", "line_number": 246, "usage_type": "name"}, {"api_name": "numba.types.IntegerLiteral", "line_number": 247, "usage_type": "call"}, {"api_name": "numba.types", "line_number": 247, "usage_type": "name"}, {"api_name": "numba.types.IntegerLiteral", "line_number": 260, "usage_type": "call"}, {"api_name": "numba.types", "line_number": 260, "usage_type": "name"}, {"api_name": "numba.types.IntegerLiteral", "line_number": 261, "usage_type": "call"}, {"api_name": "numba.types", "line_number": 261, "usage_type": "name"}, {"api_name": "numba.types.NoneType", "line_number": 278, "usage_type": "call"}, {"api_name": "numba.types", "line_number": 278, "usage_type": "name"}, {"api_name": "numba.types.IntegerLiteral", "line_number": 279, "usage_type": "call"}, {"api_name": "numba.types", "line_number": 279, "usage_type": "name"}, {"api_name": "numba.types.Omitted", "line_number": 296, "usage_type": "call"}, {"api_name": "numba.types", "line_number": 296, "usage_type": "name"}, {"api_name": "numba.types.NoneType", "line_number": 298, "usage_type": "call"}, {"api_name": "numba.types", "line_number": 298, "usage_type": "name"}, {"api_name": "numba.types.IntegerLiteral", "line_number": 299, "usage_type": "call"}, {"api_name": "numba.types", "line_number": 299, "usage_type": "name"}, {"api_name": "numba.types.Omitted", "line_number": 316, "usage_type": "call"}, {"api_name": "numba.types", "line_number": 316, "usage_type": "name"}, {"api_name": "numba.types.IntegerLiteral", "line_number": 317, "usage_type": "call"}, {"api_name": "numba.types", "line_number": 317, "usage_type": "name"}, {"api_name": "numba.types.IntegerLiteral", "line_number": 319, "usage_type": "call"}, {"api_name": "numba.types", "line_number": 319, "usage_type": "name"}, {"api_name": "numba.types.NoneType", "line_number": 320, "usage_type": "call"}, {"api_name": "numba.types", "line_number": 320, "usage_type": "name"}, {"api_name": "numba.ir_utils.get_definition", "line_number": 339, "usage_type": "call"}, {"api_name": "numba.ir_utils", "line_number": 339, "usage_type": "name"}, {"api_name": "numba.analysis.dead_branch_prune", "line_number": 348, "usage_type": "call"}, {"api_name": "numba.ir_utils.get_definition", "line_number": 355, "usage_type": "call"}, {"api_name": "numba.ir_utils", "line_number": 355, "usage_type": "name"}, {"api_name": "numba.ir.Const", "line_number": 356, "usage_type": "attribute"}, {"api_name": "numba.ir", "line_number": 356, "usage_type": "name"}, {"api_name": "numba.types.NoneType", "line_number": 359, "usage_type": "call"}, {"api_name": "numba.types", "line_number": 359, "usage_type": "name"}, {"api_name": "numba.types.IntegerLiteral", "line_number": 360, "usage_type": "call"}, {"api_name": "numba.types", "line_number": 360, "usage_type": "name"}, {"api_name": "numba.jit", "line_number": 366, "usage_type": "name"}, {"api_name": "numpy.arange", "line_number": 374, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 375, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 376, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 377, "usage_type": "call"}]}
+{"seq_id": "3788646314", "text": "from skimage import io\nimport numpy as np\nimport os, sys\nfrom matplotlib import pyplot as plt\nfrom scipy.linalg import norm\nfrom scipy.ndimage import rotate\n\n\ndef load_processed_images(base_path):\n ic = io.imread_collection(base_path+'*.gif')\n return ic\ndef extract_grey(image):\n return image == 127\ndef extract_white(image):\n return image == 191\ndef extract_csf(image):\n return image == 63\n\ndef process(base_path):\n ic = load_processed_images(base_path)\n features = []\n for i, img in enumerate(ic):\n g_img = extract_grey(img)\n w_img = extract_white(img)\n c_img = extract_csf(img)\n g_mean = mean(g_img)\n w_mean = mean(w_img)\n c_mean = mean(c_img)\n g_s_ud = symmetry_upTdown(g_img)\n g_s_lt = symmetry_lefTright(g_img)\n w_s_ud = symmetry_upTdown(w_img)\n w_s_lt = symmetry_lefTright(w_img)\n c_s_ud = symmetry_upTdown(c_img)\n c_s_lt = symmetry_lefTright(c_img)\n features.append([g_mean, w_mean, c_mean, g_s_ud, g_s_lt, w_s_lt, w_s_ud, c_s_lt, c_s_ud])\n\n return features\n\ndef mean(image):\n return np.mean(image)\n\ndef plot_preprocessed_img(image):\n f, axes = plt.subplots(nrows=2, ncols=3, figsize=(10, 5))\n grey_img = extract_grey(image)\n white_img = extract_white(image)\n csf_img = extract_csf(image)\n axes[0,0].imshow(grey_img, cmap=\"gray\")\n axes[0,0].axis('off')\n axes[0,1].imshow(white_img, cmap=\"gray\")\n axes[0,1].axis('off')\n axes[0,2].imshow(csf_img, cmap=\"gray\")\n axes[0,2].axis('off')\n plt.show()\n\ndef symmetry_upTdown(img):\n r_img = rotate(img, 90, reshape=False)\n fliplrimg = np.fliplr(r_img)\n arry = img - fliplrimg\n #flatten the array & sum the values\n return norm(arry.ravel(), 0)\n\ndef symmetry_lefTright(image):\n fliplrimg = np.fliplr(image)\n arry = image - fliplrimg\n return norm(arry.ravel(), 0)\n\n#dementia\ndementia_base_path = sys.path[0]+ os.sep + 'dementia'+os.sep +'subj' + os.sep\nnon_dementia_base_path = sys.path[0]+ os.sep + 'Non_dementia'+os.sep +'subj' + os.sep\nprint(process(dementia_base_path))\nprint(process(non_dementia_base_path))\n", "repo_name": "Niteshsuresh/MRI", "sub_path": "data/raw/extract.py", "file_name": "extract.py", "file_ext": "py", "file_size_in_byte": 2135, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "76", "api": [{"api_name": "skimage.io.imread_collection", "line_number": 10, "usage_type": "call"}, {"api_name": "skimage.io", "line_number": 10, "usage_type": "name"}, {"api_name": "numpy.mean", "line_number": 40, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.subplots", "line_number": 43, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 43, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 53, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 53, "usage_type": "name"}, {"api_name": "scipy.ndimage.rotate", "line_number": 56, "usage_type": "call"}, {"api_name": "numpy.fliplr", "line_number": 57, "usage_type": "call"}, {"api_name": "scipy.linalg.norm", "line_number": 60, "usage_type": "call"}, {"api_name": "numpy.fliplr", "line_number": 63, "usage_type": "call"}, {"api_name": "scipy.linalg.norm", "line_number": 65, "usage_type": "call"}, {"api_name": "sys.path", "line_number": 68, "usage_type": "attribute"}, {"api_name": "os.sep", "line_number": 68, "usage_type": "attribute"}, {"api_name": "sys.path", "line_number": 69, "usage_type": "attribute"}, {"api_name": "os.sep", "line_number": 69, "usage_type": "attribute"}]}
+{"seq_id": "33156178002", "text": "from skimage import io\nimport numpy as np\n\ndef read_3d_points(rgbpath, depthpath, Rtilt, K):\n \"\"\"\n a python implementation of SUNRGBDTOOL read3dPoints.m\n Rtilt: (3, 3)\n K: (3, 3)\n \"\"\"\n depth_vis = io.imread(depthpath)\n valid = (depth_vis != 0).ravel()\n\n depth = (depth_vis >> 3) | (depth_vis << 13)\n depth = depth.astype(np.float32) / 1000\n depth[depthpath > 8] = 8\n height = depth.shape[0]\n width = depth.shape[1]\n\n cx, cy = K[0, 2], K[1, 2]\n fx, fy = K[0, 0], K[1, 1]\n\n x, y = np.meshgrid(np.arange(width), np.arange(height))\n x3 = (x - cx) * depth / fx\n y3 = (y - cy) * depth / fy\n z3 = depth\n\n points = np.stack([x3.ravel(), z3.ravel(), -y3.ravel()], 1)\n points = points[valid]\n\n rgb = io.imread(rgbpath)\n rgb = rgb.astype(np.float32).reshape(-1, 3)[valid] / 255\n points = np.matmul(Rtilt, points.T).T\n\n points_rgb = np.concatenate([points, rgb], 1)\n\n return points_rgb\n", "repo_name": "Gorilla-Lab-SCUT/frustum-convnet", "sub_path": "sunrgbd/read_3d_points.py", "file_name": "read_3d_points.py", "file_ext": "py", "file_size_in_byte": 949, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 236, "dataset": "github-code", "pt": "76", "api": [{"api_name": "skimage.io.imread", "line_number": 10, "usage_type": "call"}, {"api_name": "skimage.io", "line_number": 10, "usage_type": "name"}, {"api_name": "numpy.float32", "line_number": 14, "usage_type": "attribute"}, {"api_name": "numpy.meshgrid", "line_number": 22, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 22, "usage_type": "call"}, {"api_name": "numpy.stack", "line_number": 27, "usage_type": "call"}, {"api_name": "skimage.io.imread", "line_number": 30, "usage_type": "call"}, {"api_name": "skimage.io", "line_number": 30, "usage_type": "name"}, {"api_name": "numpy.float32", "line_number": 31, "usage_type": "attribute"}, {"api_name": "numpy.matmul", "line_number": 32, "usage_type": "call"}, {"api_name": "numpy.concatenate", "line_number": 34, "usage_type": "call"}]}
+{"seq_id": "15259895836", "text": "# -*- coding: utf-8 -*-\n\"\"\"Calculate the annual energy production\n\"\"\"\nimport re\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom _loads_utils import load_stats\n\n\nstat_dir = 'C:/Users/Mathieu Pellé/Documents/GitHub/LAC_RotorDesign/Loads/res_turb/' # results directory with statistics files !!! END WITH SLASH !!!\nv_ref = 37.5 # reference wind speed based on wind class (I=50, 2=42.5, 3=37.5)\ni_wind = 15 # channel number with the wind speed\ni_pow = 100 # channel number for electrical power\n\n# dictionary to map .sel index to ylabel for the plot\nylabels = {4: 'Pitch angle [deg]',\n 10: 'Rotor speed [rad/s]',\n 13: 'Thrust [kN]',\n 15: 'Wind speed [m/s]',\n 17: 'Tower-base FA [kNm]',\n 18: 'Tower-base SS [kNm]',\n 20: 'Yaw-bearing pitch [kNm]',\n 22: 'Yaw-bearing roll [kNm]',\n 25: 'Shaft torsion [kNm]',\n 26: 'OoP BRM [kNm]',\n 27: 'IP BRM [kNm]',\n 70: 'Generator torque [Nm]',\n 100: 'Electrical power [W]',\n 108: 'Tower clearance [m]'}\n\n# load the mean statistics for wind speed and power\nstat_file = stat_dir + 'stats_mean.txt'\nfiles, idxs, data = load_stats(stat_file)\nwind = data[:, idxs == i_wind].squeeze()\npower = data[:, idxs == i_pow].squeeze()\n\n# extract the set wind speed value from the filename using regex tricks\nwsps = [float(re.findall('[0-9]{1,2}[.][0-9]', f)[0]) for f in files]\n\n# calculate the average power in a wind speed bin\nwsp_unique = np.unique(wsps)\ndelta_v = wsp_unique[1] - wsp_unique[0]\npows = np.empty(wsp_unique.size) # mean power at each wind speed\nfor j, vj in enumerate(wsp_unique):\n # isolate the dels from each simulation\n wsp_pows = power[np.isclose(wsps, vj)] # powers for that wind speed\n p = 1/wsp_pows.size # probability of each simulation in the wsp bin is equal 1/nsim\n pows[j] = sum(p * wsp_pows) # this is actually just a mean, really\n\n\n# calculate the annual energy production\nv_ave = 0.2*v_ref # v_ave=0.2*vref\nhrs_per_year = 365 * 24 # hours per year\ndvj = wsp_unique[1] - wsp_unique[0] # assuming even bins!\nprobs = (np.exp(-np.pi*((wsp_unique - dvj/2) / (2*v_ave))**2)\n - np.exp(-np.pi*((wsp_unique + dvj/2) / (2*v_ave))**2)) # prob of wind in each bin\naep = hrs_per_year * sum(probs * pows) # sum weighted power and convert to AEP (Wh)\nprint(f'The AEP is: {aep/(1e6):.1f} MWh')\n\n# make the plot\nfig, ax1 = plt.subplots(1, 1, num=1, figsize=(7, 3), clear=True)\nplt.plot(wind, power, 'o', zorder=10) # 10-min means\nplt.plot(wsp_unique, pows, 'or', mec='0.2', ms=7, alpha=0.9, zorder=11) # bin-average\nplt.grid('on')\nplt.xlabel('Wind speed [m/s]')\nplt.ylabel(ylabels[i_pow])\n# bar plot with probabilities\nax2 = ax1.twinx() # new axis with shared x\nax2.bar(wsp_unique, probs, facecolor='0.8', edgecolor='0.4', alpha=0.7, zorder=-2)\nax2.set_yticks([])\nax1.set_zorder(1) # magic to put bars under power\nax1.patch.set_visible(False) # prevent ax1 from hiding ax2\nplt.tight_layout()\n", "repo_name": "GonMazzini/LAC_RotorDesign", "sub_path": "Loads/calculate_aep.py", "file_name": "calculate_aep.py", "file_ext": "py", "file_size_in_byte": 3000, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "76", "api": [{"api_name": "_loads_utils.load_stats", "line_number": 33, "usage_type": "call"}, {"api_name": "re.findall", "line_number": 38, "usage_type": "call"}, {"api_name": "numpy.unique", "line_number": 41, "usage_type": "call"}, {"api_name": "numpy.empty", "line_number": 43, "usage_type": "call"}, {"api_name": "numpy.isclose", "line_number": 46, "usage_type": "call"}, {"api_name": "numpy.exp", "line_number": 55, "usage_type": "call"}, {"api_name": "numpy.pi", "line_number": 55, "usage_type": "attribute"}, {"api_name": "numpy.exp", "line_number": 56, "usage_type": "call"}, {"api_name": "numpy.pi", "line_number": 56, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot.subplots", "line_number": 61, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 61, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 62, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 62, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 63, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 63, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.grid", "line_number": 64, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 64, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 65, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 65, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 66, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 66, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.tight_layout", "line_number": 73, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 73, "usage_type": "name"}]}
+{"seq_id": "27147366220", "text": "import logging\nimport json\nimport sys\nimport os\nimport time , hmac, hashlib, base64\nfrom websocket import create_connection\nfrom requests.auth import AuthBase\nimport requests\nsys.path.insert(0, os.path.dirname(os.path.dirname(os.path.realpath(__file__))))\nfrom core.libraries.pub_sub import Publisher, Subscriber\nfrom core.libraries.websocket_thread import ConnectThread\nfrom core.libraries.channels import channels as ch\nfrom core.libraries.gdax_auth import Authentication\n\n\nclass GDAXWebSocketClient():\n\n def __init__(self,data,channels=[]):\n self.url = \"wss://ws-feed.gdax.com\"\n\n self.params = json.dumps(data)\n self.pub = Publisher(channels)\n self.stop = False\n\n for c in channels:\n channel = Subscriber(c)\n channel.pub = Publisher(events=['incoming_data'])\n self.pub.register(c, channel)\n\n def on_message(self, message):\n if message['type'] == \"match\":\n self.pub.dispatch(message['product_id'], message)\n\n def on_open(self):\n print(\"--Subscribed--\")\n\n def on_error(self, err):\n\n self.stop = True\n print('{}'.format(err))\n\n def connect(self):\n self.on_open()\n self.ws = create_connection(self.url)\n self.ws.send(self.params)\n self.listen()\n\n def listen(self):\n while not self.stop:\n try:\n if int(time.time()%30) == 0:\n self.ws.ping(\"alive\")\n\n msg = json.loads(self.ws.recv())\n self.on_message(msg)\n except ValueError as e:\n self.on_error(e)\n except Exception as e:\n self.on_error(e)\n\ndef main():\n pairs=[\"ETH-USD\",\"BTC-USD\"]\n\n API_KEY = \"\"\n API_SECRET = \"\"\n API_PASS = \"\"\n\n auth=Authentication(API_KEY, API_SECRET, API_PASS)\n request = {\"type\": \"subscribe\",\n \"channels\": [{\"name\": \"full\", \"product_ids\": pairs }]}\n res = requests.get('https://api.gdax.com/'+ 'accounts', auth=auth)\n #test page example\n print(res.json())\n\n\n ws=GDAXWebSocketClient(request,pairs)\n ws.connect()\nif __name__==\"__main__\":\n main()\n", "repo_name": "pauljherrera/cryptotrader", "sub_path": "core/GDAX_data_feeder.py", "file_name": "GDAX_data_feeder.py", "file_ext": "py", "file_size_in_byte": 2151, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 5, "dataset": "github-code", "pt": "76", "api": [{"api_name": "sys.path.insert", "line_number": 9, "usage_type": "call"}, {"api_name": "sys.path", "line_number": 9, "usage_type": "attribute"}, {"api_name": "os.path.dirname", "line_number": 9, "usage_type": "call"}, {"api_name": "os.path", "line_number": 9, "usage_type": "attribute"}, {"api_name": "os.path.realpath", "line_number": 9, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 21, "usage_type": "call"}, {"api_name": "core.libraries.pub_sub.Publisher", "line_number": 22, "usage_type": "call"}, {"api_name": "core.libraries.pub_sub.Subscriber", "line_number": 26, "usage_type": "call"}, {"api_name": "core.libraries.pub_sub.Publisher", "line_number": 27, "usage_type": "call"}, {"api_name": "websocket.create_connection", "line_number": 44, "usage_type": "call"}, {"api_name": "time.time", "line_number": 51, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 54, "usage_type": "call"}, {"api_name": "core.libraries.gdax_auth.Authentication", "line_number": 68, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 71, "usage_type": "call"}]}
+{"seq_id": "127324968", "text": "from datetime import datetime, timezone\nfrom typing import Any, Dict, Set\n\nimport pytest\nfrom boto3.dynamodb.types import TypeDeserializer, TypeSerializer\nfrom fideslang.models import Dataset\n\nfrom fides.api.graph.config import (\n CollectionAddress,\n FieldAddress,\n FieldPath,\n ObjectField,\n ScalarField,\n)\nfrom fides.api.graph.graph import DatasetGraph, Edge\nfrom fides.api.graph.traversal import Traversal, TraversalNode\nfrom fides.api.models.datasetconfig import convert_dataset_to_graph\nfrom fides.api.models.privacy_request import PrivacyRequest\nfrom fides.api.schemas.masking.masking_configuration import HashMaskingConfiguration\nfrom fides.api.schemas.masking.masking_secrets import MaskingSecretCache, SecretType\nfrom fides.api.service.connectors.query_config import (\n DynamoDBQueryConfig,\n MongoQueryConfig,\n SQLQueryConfig,\n)\nfrom fides.api.service.masking.strategy.masking_strategy_hash import HashMaskingStrategy\nfrom fides.api.util.data_category import DataCategory\n\nfrom ...task.traversal_data import combined_mongo_postgresql_graph, integration_db_graph\nfrom ...test_helpers.cache_secrets_helper import cache_secret, clear_cache_secrets\n\n# customers -> address, order\n# orders -> address, payment card\n# payment card -> address\n# address\n\n# identities: customer.email\n\ngraph: DatasetGraph = integration_db_graph(\"postgres_example\")\ntraversal = Traversal(graph, {\"email\": \"X\"})\ntraversal_nodes: Dict[CollectionAddress, TraversalNode] = traversal.traversal_node_dict\npayment_card_node = traversal_nodes[\n CollectionAddress(\"postgres_example\", \"payment_card\")\n]\nuser_node = traversal_nodes[CollectionAddress(\"postgres_example\", \"payment_card\")]\nprivacy_request = PrivacyRequest(id=\"234544\")\n\n\nclass TestSQLQueryConfig:\n def test_extract_query_components(self):\n def found_query_keys(node: TraversalNode, values: Dict[str, Any]) -> Set[str]:\n return set(node.typed_filtered_values(values).keys())\n\n config = SQLQueryConfig(payment_card_node)\n assert config.field_map().keys() == {\n FieldPath(s)\n for s in [\n \"id\",\n \"name\",\n \"ccn\",\n \"customer_id\",\n \"billing_address_id\",\n ]\n }\n assert payment_card_node.query_field_paths == {\n FieldPath(\"id\"),\n FieldPath(\"customer_id\"),\n }\n\n # values exist for all query keys\n assert found_query_keys(\n payment_card_node,\n {\n \"id\": [\"A\"],\n \"customer_id\": [\"V\"],\n \"ignore_me\": [\"X\"],\n },\n ) == {\"id\", \"customer_id\"}\n # with no values OR an empty set, these are omitted\n assert found_query_keys(\n payment_card_node,\n {\n \"id\": [\"A\"],\n \"customer_id\": [],\n \"ignore_me\": [\"X\"],\n },\n ) == {\"id\"}\n assert found_query_keys(\n payment_card_node, {\"id\": [\"A\"], \"ignore_me\": [\"X\"]}\n ) == {\"id\"}\n assert found_query_keys(payment_card_node, {\"ignore_me\": [\"X\"]}) == set()\n assert found_query_keys(payment_card_node, {}) == set()\n\n def test_typed_filtered_values(self):\n assert payment_card_node.typed_filtered_values(\n {\n \"id\": [\"A\"],\n \"customer_id\": [\"V\"],\n \"ignore_me\": [\"X\"],\n }\n ) == {\"id\": [\"A\"], \"customer_id\": [\"V\"]}\n\n assert payment_card_node.typed_filtered_values(\n {\n \"id\": [\"A\"],\n \"customer_id\": [],\n \"ignore_me\": [\"X\"],\n }\n ) == {\"id\": [\"A\"]}\n\n assert payment_card_node.typed_filtered_values(\n {\"id\": [\"A\"], \"ignore_me\": [\"X\"]}\n ) == {\"id\": [\"A\"]}\n\n assert payment_card_node.typed_filtered_values(\n {\"id\": [], \"customer_id\": [\"V\"]}\n ) == {\"customer_id\": [\"V\"]}\n # test for type casting: id has type \"string\":\n assert payment_card_node.typed_filtered_values({\"id\": [1]}) == {\"id\": [\"1\"]}\n assert payment_card_node.typed_filtered_values({\"id\": [1, 2]}) == {\n \"id\": [\"1\", \"2\"]\n }\n\n def test_generated_sql_query(self):\n \"\"\"Test that the generated query depends on the input set\"\"\"\n assert (\n str(\n SQLQueryConfig(payment_card_node).generate_query(\n {\n \"id\": [\"A\"],\n \"customer_id\": [\"V\"],\n \"ignore_me\": [\"X\"],\n }\n )\n )\n == \"SELECT id,name,ccn,customer_id,billing_address_id FROM payment_card WHERE id = :id OR customer_id = :customer_id\"\n )\n\n assert (\n str(\n SQLQueryConfig(payment_card_node).generate_query(\n {\n \"id\": [\"A\"],\n \"customer_id\": [],\n \"ignore_me\": [\"X\"],\n }\n )\n )\n == \"SELECT id,name,ccn,customer_id,billing_address_id FROM payment_card WHERE id = :id\"\n )\n\n assert (\n str(\n SQLQueryConfig(payment_card_node).generate_query(\n {\"id\": [\"A\"], \"ignore_me\": [\"X\"]}\n )\n )\n == \"SELECT id,name,ccn,customer_id,billing_address_id FROM payment_card WHERE id = :id\"\n )\n\n assert (\n str(\n SQLQueryConfig(payment_card_node).generate_query(\n {\"id\": [], \"customer_id\": [\"V\"]}\n )\n )\n == \"SELECT id,name,ccn,customer_id,billing_address_id FROM payment_card WHERE customer_id = :customer_id\"\n )\n\n def test_update_rule_target_fields(\n self, erasure_policy, example_datasets, connection_config\n ):\n dataset = Dataset(**example_datasets[0])\n graph = convert_dataset_to_graph(dataset, connection_config.key)\n dataset_graph = DatasetGraph(*[graph])\n traversal = Traversal(dataset_graph, {\"email\": \"customer-1@example.com\"})\n\n customer_node = traversal.traversal_node_dict[\n CollectionAddress(\"postgres_example_test_dataset\", \"customer\")\n ]\n\n rule = erasure_policy.rules[0]\n config = SQLQueryConfig(customer_node)\n assert config.build_rule_target_field_paths(erasure_policy) == {\n rule: [FieldPath(\"name\")]\n }\n\n # Make target more broad\n target = rule.targets[0]\n target.data_category = DataCategory(\"user\").value\n assert config.build_rule_target_field_paths(erasure_policy) == {\n rule: [FieldPath(\"email\"), FieldPath(\"id\"), FieldPath(\"name\")]\n }\n\n # Check different collection\n address_node = traversal.traversal_node_dict[\n CollectionAddress(\"postgres_example_test_dataset\", \"address\")\n ]\n config = SQLQueryConfig(address_node)\n assert config.build_rule_target_field_paths(erasure_policy) == {\n rule: [FieldPath(x) for x in [\"city\", \"house\", \"street\", \"state\", \"zip\"]]\n }\n\n def test_generate_update_stmt_one_field(\n self, erasure_policy, example_datasets, connection_config\n ):\n dataset = Dataset(**example_datasets[0])\n graph = convert_dataset_to_graph(dataset, connection_config.key)\n dataset_graph = DatasetGraph(*[graph])\n traversal = Traversal(dataset_graph, {\"email\": \"customer-1@example.com\"})\n\n customer_node = traversal.traversal_node_dict[\n CollectionAddress(\"postgres_example_test_dataset\", \"customer\")\n ]\n\n config = SQLQueryConfig(customer_node)\n row = {\n \"email\": \"customer-1@example.com\",\n \"name\": \"John Customer\",\n \"address_id\": 1,\n \"id\": 1,\n }\n text_clause = config.generate_update_stmt(row, erasure_policy, privacy_request)\n assert text_clause.text == \"\"\"UPDATE customer SET name = :name WHERE id = :id\"\"\"\n assert text_clause._bindparams[\"name\"].key == \"name\"\n assert text_clause._bindparams[\"name\"].value is None # Null masking strategy\n\n def test_generate_update_stmt_length_truncation(\n self,\n erasure_policy_string_rewrite_long,\n example_datasets,\n connection_config,\n ):\n dataset = Dataset(**example_datasets[0])\n graph = convert_dataset_to_graph(dataset, connection_config.key)\n dataset_graph = DatasetGraph(*[graph])\n traversal = Traversal(dataset_graph, {\"email\": \"customer-1@example.com\"})\n\n customer_node = traversal.traversal_node_dict[\n CollectionAddress(\"postgres_example_test_dataset\", \"customer\")\n ]\n\n config = SQLQueryConfig(customer_node)\n row = {\n \"email\": \"customer-1@example.com\",\n \"name\": \"John Customer\",\n \"address_id\": 1,\n \"id\": 1,\n }\n\n text_clause = config.generate_update_stmt(\n row, erasure_policy_string_rewrite_long, privacy_request\n )\n assert text_clause.text == \"\"\"UPDATE customer SET name = :name WHERE id = :id\"\"\"\n assert text_clause._bindparams[\"name\"].key == \"name\"\n # length truncation on name field\n assert (\n text_clause._bindparams[\"name\"].value\n == \"some rewrite value that is very long and\"\n )\n\n def test_generate_update_stmt_multiple_fields_same_rule(\n self, erasure_policy, example_datasets, connection_config\n ):\n dataset = Dataset(**example_datasets[0])\n graph = convert_dataset_to_graph(dataset, connection_config.key)\n dataset_graph = DatasetGraph(*[graph])\n traversal = Traversal(dataset_graph, {\"email\": \"customer-1@example.com\"})\n\n customer_node = traversal.traversal_node_dict[\n CollectionAddress(\"postgres_example_test_dataset\", \"customer\")\n ]\n\n config = SQLQueryConfig(customer_node)\n row = {\n \"email\": \"customer-1@example.com\",\n \"name\": \"John Customer\",\n \"address_id\": 1,\n \"id\": 1,\n }\n\n # Make target more broad\n rule = erasure_policy.rules[0]\n target = rule.targets[0]\n target.data_category = DataCategory(\"user\").value\n\n # Update rule masking strategy\n rule.masking_strategy = {\n \"strategy\": \"hash\",\n \"configuration\": {\"algorithm\": \"SHA-512\"},\n }\n # cache secrets for hash strategy\n secret = MaskingSecretCache[str](\n secret=\"adobo\",\n masking_strategy=HashMaskingStrategy.name,\n secret_type=SecretType.salt,\n )\n cache_secret(secret, privacy_request.id)\n\n text_clause = config.generate_update_stmt(row, erasure_policy, privacy_request)\n assert (\n text_clause.text\n == \"UPDATE customer SET email = :email,name = :name WHERE id = :id\"\n )\n assert text_clause._bindparams[\"name\"].key == \"name\"\n # since length is set to 40 in dataset.yml, we expect only first 40 chars of masked val\n assert (\n text_clause._bindparams[\"name\"].value\n == HashMaskingStrategy(HashMaskingConfiguration(algorithm=\"SHA-512\")).mask(\n [\"John Customer\"], request_id=privacy_request.id\n )[0][0:40]\n )\n assert (\n text_clause._bindparams[\"email\"].value\n == HashMaskingStrategy(HashMaskingConfiguration(algorithm=\"SHA-512\")).mask(\n [\"customer-1@example.com\"], request_id=privacy_request.id\n )[0]\n )\n clear_cache_secrets(privacy_request.id)\n\n def test_generate_update_stmts_from_multiple_rules(\n self, erasure_policy_two_rules, example_datasets, connection_config\n ):\n dataset = Dataset(**example_datasets[0])\n graph = convert_dataset_to_graph(dataset, connection_config.key)\n dataset_graph = DatasetGraph(*[graph])\n traversal = Traversal(dataset_graph, {\"email\": \"customer-1@example.com\"})\n row = {\n \"email\": \"customer-1@example.com\",\n \"name\": \"John Customer\",\n \"address_id\": 1,\n \"id\": 1,\n }\n\n customer_node = traversal.traversal_node_dict[\n CollectionAddress(\"postgres_example_test_dataset\", \"customer\")\n ]\n\n config = SQLQueryConfig(customer_node)\n\n text_clause = config.generate_update_stmt(\n row, erasure_policy_two_rules, privacy_request\n )\n\n assert (\n text_clause.text\n == \"UPDATE customer SET email = :email,name = :name WHERE id = :id\"\n )\n # Two different masking strategies used for name and email\n assert text_clause._bindparams[\"name\"].value is None # Null masking strategy\n assert (\n text_clause._bindparams[\"email\"].value == \"*****\"\n ) # String rewrite masking strategy\n\n\nclass TestMongoQueryConfig:\n @pytest.fixture(scope=\"function\")\n def combined_traversal(self, connection_config, integration_mongodb_config):\n mongo_dataset, postgres_dataset = combined_mongo_postgresql_graph(\n connection_config, integration_mongodb_config\n )\n combined_dataset_graph = DatasetGraph(mongo_dataset, postgres_dataset)\n combined_traversal = Traversal(\n combined_dataset_graph,\n {\"email\": \"customer-1@examplecom\"},\n )\n return combined_traversal\n\n @pytest.fixture(scope=\"function\")\n def customer_details_node(self, combined_traversal):\n return combined_traversal.traversal_node_dict[\n CollectionAddress(\"mongo_test\", \"customer_details\")\n ]\n\n @pytest.fixture(scope=\"function\")\n def customer_feedback_node(self, combined_traversal):\n return combined_traversal.traversal_node_dict[\n CollectionAddress(\"mongo_test\", \"customer_feedback\")\n ]\n\n def test_field_map_nested(self, customer_details_node):\n config = MongoQueryConfig(customer_details_node)\n\n field_map = config.field_map()\n assert isinstance(field_map[FieldPath(\"workplace_info\")], ObjectField)\n assert isinstance(\n field_map[FieldPath(\"workplace_info\", \"employer\")], ScalarField\n )\n\n def test_primary_key_field_paths(self, customer_details_node):\n config = MongoQueryConfig(customer_details_node)\n assert list(config.primary_key_field_paths.keys()) == [FieldPath(\"_id\")]\n assert isinstance(config.primary_key_field_paths[FieldPath(\"_id\")], ScalarField)\n\n def test_nested_query_field_paths(\n self, customer_details_node, customer_feedback_node\n ):\n assert customer_details_node.query_field_paths == {\n FieldPath(\"customer_id\"),\n }\n\n assert customer_feedback_node.query_field_paths == {\n FieldPath(\"customer_information\", \"email\")\n }\n\n def test_nested_typed_filtered_values(self, customer_feedback_node):\n \"\"\"Identity data is located on a nested object\"\"\"\n input_data = {\n \"customer_information.email\": [\"test@example.com\"],\n \"ignore\": [\"abcde\"],\n }\n assert customer_feedback_node.typed_filtered_values(input_data) == {\n \"customer_information.email\": [\"test@example.com\"]\n }\n\n def test_generate_query(\n self,\n policy,\n example_datasets,\n integration_mongodb_config,\n connection_config,\n ):\n dataset_postgres = Dataset(**example_datasets[0])\n graph = convert_dataset_to_graph(dataset_postgres, connection_config.key)\n dataset_mongo = Dataset(**example_datasets[1])\n mongo_graph = convert_dataset_to_graph(\n dataset_mongo, integration_mongodb_config.key\n )\n dataset_graph = DatasetGraph(*[graph, mongo_graph])\n traversal = Traversal(dataset_graph, {\"email\": \"customer-1@example.com\"})\n # Edge created from Root to nested customer_information.email field\n assert (\n Edge(\n FieldAddress(\"__ROOT__\", \"__ROOT__\", \"email\"),\n FieldAddress(\n \"mongo_test\", \"customer_feedback\", \"customer_information\", \"email\"\n ),\n )\n in traversal.edges\n )\n\n # Test query on nested field\n customer_feedback = traversal.traversal_node_dict[\n CollectionAddress(\"mongo_test\", \"customer_feedback\")\n ]\n config = MongoQueryConfig(customer_feedback)\n input_data = {\"customer_information.email\": [\"customer-1@example.com\"]}\n # Tuple of query, projection - Searching for documents with nested\n # customer_information.email = customer-1@example.com\n assert config.generate_query(input_data, policy) == (\n {\"customer_information.email\": \"customer-1@example.com\"},\n {\"_id\": 1, \"customer_information\": 1, \"date\": 1, \"message\": 1, \"rating\": 1},\n )\n\n # Test query nested data\n customer_details = traversal.traversal_node_dict[\n CollectionAddress(\"mongo_test\", \"customer_details\")\n ]\n config = MongoQueryConfig(customer_details)\n input_data = {\"customer_id\": [1]}\n # Tuple of query, projection - Projection is specifying fields at the top-level. Nested data will\n # be filtered later.\n assert config.generate_query(input_data, policy) == (\n {\"customer_id\": 1},\n {\n \"_id\": 1,\n \"birthday\": 1,\n \"comments\": 1,\n \"customer_id\": 1,\n \"emergency_contacts\": 1,\n \"children\": 1,\n \"gender\": 1,\n \"travel_identifiers\": 1,\n \"workplace_info\": 1,\n },\n )\n\n def test_generate_update_stmt_multiple_fields(\n self,\n erasure_policy,\n example_datasets,\n integration_mongodb_config,\n connection_config,\n ):\n dataset_postgres = Dataset(**example_datasets[0])\n graph = convert_dataset_to_graph(dataset_postgres, connection_config.key)\n dataset_mongo = Dataset(**example_datasets[1])\n mongo_graph = convert_dataset_to_graph(\n dataset_mongo, integration_mongodb_config.key\n )\n dataset_graph = DatasetGraph(*[graph, mongo_graph])\n\n traversal = Traversal(dataset_graph, {\"email\": \"customer-1@example.com\"})\n customer_details = traversal.traversal_node_dict[\n CollectionAddress(\"mongo_test\", \"customer_details\")\n ]\n config = MongoQueryConfig(customer_details)\n row = {\n \"birthday\": \"1988-01-10\",\n \"gender\": \"male\",\n \"customer_id\": 1,\n \"_id\": 1,\n \"workplace_info\": {\n \"position\": \"Chief Strategist\",\n \"direct_reports\": [\"Robbie Margo\", \"Sully Hunter\"],\n },\n \"emergency_contacts\": [{\"name\": \"June Customer\", \"phone\": \"444-444-4444\"}],\n \"children\": [\"Christopher Customer\", \"Courtney Customer\"],\n }\n\n # Make target more broad\n rule = erasure_policy.rules[0]\n target = rule.targets[0]\n target.data_category = DataCategory(\"user\").value\n\n mongo_statement = config.generate_update_stmt(\n row, erasure_policy, privacy_request\n )\n\n expected_result_0 = {\"_id\": 1}\n expected_result_1 = {\n \"$set\": {\n \"birthday\": None,\n \"children.0\": None,\n \"children.1\": None,\n \"customer_id\": None,\n \"emergency_contacts.0.name\": None,\n \"workplace_info.direct_reports.0\": None, # Both direct reports are masked.\n \"workplace_info.direct_reports.1\": None,\n \"emergency_contacts.0.phone\": None,\n \"gender\": None,\n \"workplace_info.position\": None,\n }\n }\n\n print(mongo_statement[1])\n print(expected_result_1)\n assert mongo_statement[0] == expected_result_0\n assert mongo_statement[1] == expected_result_1\n\n def test_generate_update_stmt_multiple_rules(\n self,\n erasure_policy_two_rules,\n example_datasets,\n integration_mongodb_config,\n connection_config,\n ):\n dataset_postgres = Dataset(**example_datasets[0])\n graph = convert_dataset_to_graph(dataset_postgres, connection_config.key)\n dataset_mongo = Dataset(**example_datasets[1])\n mongo_graph = convert_dataset_to_graph(\n dataset_mongo, integration_mongodb_config.key\n )\n dataset_graph = DatasetGraph(*[graph, mongo_graph])\n\n traversal = Traversal(dataset_graph, {\"email\": \"customer-1@example.com\"})\n\n customer_details = traversal.traversal_node_dict[\n CollectionAddress(\"mongo_test\", \"customer_details\")\n ]\n\n config = MongoQueryConfig(customer_details)\n row = {\n \"birthday\": \"1988-01-10\",\n \"gender\": \"male\",\n \"customer_id\": 1,\n \"_id\": 1,\n \"workplace_info\": {\n \"position\": \"Chief Strategist\",\n \"direct_reports\": [\"Robbie Margo\", \"Sully Hunter\"],\n },\n \"emergency_contacts\": [{\"name\": \"June Customer\", \"phone\": \"444-444-4444\"}],\n \"children\": [\"Christopher Customer\", \"Courtney Customer\"],\n }\n\n rule = erasure_policy_two_rules.rules[0]\n rule.masking_strategy = {\n \"strategy\": \"hash\",\n \"configuration\": {\"algorithm\": \"SHA-512\"},\n }\n target = rule.targets[0]\n target.data_category = DataCategory(\"user.demographic.date_of_birth\").value\n\n rule_two = erasure_policy_two_rules.rules[1]\n rule_two.masking_strategy = {\n \"strategy\": \"random_string_rewrite\",\n \"configuration\": {\"length\": 30},\n }\n target = rule_two.targets[0]\n target.data_category = DataCategory(\"user.demographic.gender\").value\n # cache secrets for hash strategy\n secret = MaskingSecretCache[str](\n secret=\"adobo\",\n masking_strategy=HashMaskingStrategy.name,\n secret_type=SecretType.salt,\n )\n cache_secret(secret, privacy_request.id)\n\n mongo_statement = config.generate_update_stmt(\n row, erasure_policy_two_rules, privacy_request\n )\n assert mongo_statement[0] == {\"_id\": 1}\n assert len(mongo_statement[1][\"$set\"][\"gender\"]) == 30\n assert (\n mongo_statement[1][\"$set\"][\"birthday\"]\n == HashMaskingStrategy(HashMaskingConfiguration(algorithm=\"SHA-512\")).mask(\n [\"1988-01-10\"], request_id=privacy_request.id\n )[0]\n )\n\n\nclass TestDynamoDBQueryConfig:\n @pytest.fixture(scope=\"function\")\n def identity(self):\n identity = {\"email\": \"customer-test_uuid@example.com\"}\n return identity\n\n @pytest.fixture(scope=\"function\")\n def dataset_graph(self, integration_dynamodb_config, example_datasets):\n dataset = Dataset(**example_datasets[11])\n dataset_graph = convert_dataset_to_graph(\n dataset, integration_dynamodb_config.key\n )\n\n return DatasetGraph(*[dataset_graph])\n\n @pytest.fixture(scope=\"function\")\n def traversal(self, identity, dataset_graph):\n dynamo_traversal = Traversal(dataset_graph, identity)\n return dynamo_traversal\n\n @pytest.fixture(scope=\"function\")\n def customer_node(self, traversal):\n return traversal.traversal_node_dict[\n CollectionAddress(\"dynamodb_example_test_dataset\", \"customer\")\n ]\n\n @pytest.fixture(scope=\"function\")\n def customer_identifier_node(self, traversal):\n return traversal.traversal_node_dict[\n CollectionAddress(\"dynamodb_example_test_dataset\", \"customer_identifier\")\n ]\n\n @pytest.fixture(scope=\"function\")\n def customer_row(self):\n row = {\n \"customer_email\": {\"S\": \"customer-1@example.com\"},\n \"name\": {\"S\": \"John Customer\"},\n \"address_id\": {\"L\": [{\"S\": \"1\"}, {\"S\": \"2\"}]},\n \"personal_info\": {\"M\": {\"gender\": {\"S\": \"male\"}, \"age\": {\"S\": \"99\"}}},\n \"id\": {\"S\": \"1\"},\n }\n return row\n\n @pytest.fixture(scope=\"function\")\n def deserialized_customer_row(self, customer_row):\n deserialized_customer_row = {}\n deserializer = TypeDeserializer()\n for key, value in customer_row.items():\n deserialized_customer_row[key] = deserializer.deserialize(value)\n return deserialized_customer_row\n\n @pytest.fixture(scope=\"function\")\n def customer_identifier_row(self):\n row = {\n \"customer_id\": {\"S\": \"customer-1@example.com\"},\n \"email\": {\"S\": \"customer-1@example.com\"},\n \"name\": {\"S\": \"Customer 1\"},\n \"created\": {\"S\": datetime.now(timezone.utc).isoformat()},\n }\n return row\n\n @pytest.fixture(scope=\"function\")\n def deserialized_customer_identifier_row(self, customer_identifier_row):\n deserialized_customer_identifier_row = {}\n deserializer = TypeDeserializer()\n for key, value in customer_identifier_row.items():\n deserialized_customer_identifier_row[key] = deserializer.deserialize(value)\n return deserialized_customer_identifier_row\n\n def test_get_query_param_formatting_single_key(\n self,\n resources_dict,\n customer_node,\n ) -> None:\n input_data = {\n \"fidesops_grouped_inputs\": [],\n \"email\": [\"customer-test_uuid@example.com\"],\n }\n attribute_definitions = [{\"AttributeName\": \"email\", \"AttributeType\": \"S\"}]\n query_config = DynamoDBQueryConfig(customer_node, attribute_definitions)\n item = query_config.generate_query(\n input_data=input_data, policy=resources_dict[\"policy\"]\n )\n assert item[\"ExpressionAttributeValues\"] == {\n \":value\": {\"S\": \"customer-test_uuid@example.com\"}\n }\n assert item[\"KeyConditionExpression\"] == \"email = :value\"\n\n def test_put_query_param_formatting_single_key(\n self,\n erasure_policy,\n customer_node,\n deserialized_customer_row,\n ) -> None:\n input_data = {\n \"fidesops_grouped_inputs\": [],\n \"email\": [\"customer-test_uuid@example.com\"],\n }\n attribute_definitions = [{\"AttributeName\": \"email\", \"AttributeType\": \"S\"}]\n query_config = DynamoDBQueryConfig(customer_node, attribute_definitions)\n update_item = query_config.generate_update_stmt(\n deserialized_customer_row, erasure_policy, privacy_request\n )\n\n assert update_item == {\n \"customer_email\": {\"S\": \"customer-1@example.com\"},\n \"name\": {\"NULL\": True},\n \"address_id\": {\"S\": \"1\"},\n \"address_id\": {\"L\": [{\"S\": \"1\"}, {\"S\": \"2\"}]},\n \"personal_info\": {\"M\": {\"gender\": {\"S\": \"male\"}, \"age\": {\"S\": \"99\"}}},\n \"id\": {\"S\": \"1\"},\n }\n", "repo_name": "ethyca/fides", "sub_path": "tests/ops/service/connectors/test_queryconfig.py", "file_name": "test_queryconfig.py", "file_ext": "py", "file_size_in_byte": 27186, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 302, "dataset": "github-code", "pt": "76", "api": [{"api_name": "fides.api.graph.graph.DatasetGraph", "line_number": 39, "usage_type": "name"}, {"api_name": "task.traversal_data.integration_db_graph", "line_number": 39, "usage_type": "call"}, {"api_name": "fides.api.graph.traversal.Traversal", "line_number": 40, "usage_type": "call"}, {"api_name": "typing.Dict", "line_number": 41, "usage_type": "name"}, {"api_name": "fides.api.graph.config.CollectionAddress", "line_number": 41, "usage_type": "name"}, {"api_name": "fides.api.graph.traversal.TraversalNode", "line_number": 41, "usage_type": "name"}, {"api_name": "fides.api.graph.config.CollectionAddress", "line_number": 43, "usage_type": "call"}, {"api_name": "fides.api.graph.config.CollectionAddress", "line_number": 45, "usage_type": "call"}, {"api_name": "fides.api.models.privacy_request.PrivacyRequest", "line_number": 46, "usage_type": "call"}, {"api_name": "fides.api.graph.traversal.TraversalNode", "line_number": 51, "usage_type": "name"}, {"api_name": "typing.Dict", "line_number": 51, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 51, "usage_type": "name"}, {"api_name": "typing.Set", "line_number": 51, "usage_type": "name"}, {"api_name": "fides.api.service.connectors.query_config.SQLQueryConfig", "line_number": 54, "usage_type": "call"}, {"api_name": "fides.api.graph.config.FieldPath", "line_number": 56, "usage_type": "call"}, {"api_name": "fides.api.graph.config.FieldPath", "line_number": 66, "usage_type": "call"}, {"api_name": "fides.api.graph.config.FieldPath", "line_number": 67, "usage_type": "call"}, {"api_name": "fides.api.service.connectors.query_config.SQLQueryConfig", "line_number": 128, "usage_type": "call"}, {"api_name": "fides.api.service.connectors.query_config.SQLQueryConfig", "line_number": 141, "usage_type": "call"}, {"api_name": "fides.api.service.connectors.query_config.SQLQueryConfig", "line_number": 154, "usage_type": "call"}, {"api_name": "fides.api.service.connectors.query_config.SQLQueryConfig", "line_number": 163, "usage_type": "call"}, {"api_name": "fideslang.models.Dataset", "line_number": 173, "usage_type": "call"}, {"api_name": "fides.api.models.datasetconfig.convert_dataset_to_graph", "line_number": 174, "usage_type": "call"}, {"api_name": "fides.api.graph.graph.DatasetGraph", "line_number": 175, "usage_type": "call"}, {"api_name": "fides.api.graph.traversal.Traversal", "line_number": 176, "usage_type": "call"}, {"api_name": "fides.api.graph.config.CollectionAddress", "line_number": 179, "usage_type": "call"}, {"api_name": "fides.api.service.connectors.query_config.SQLQueryConfig", "line_number": 183, "usage_type": "call"}, {"api_name": "fides.api.graph.config.FieldPath", "line_number": 185, "usage_type": "call"}, {"api_name": "fides.api.util.data_category.DataCategory", "line_number": 190, "usage_type": "call"}, {"api_name": "fides.api.graph.config.FieldPath", "line_number": 192, "usage_type": "call"}, {"api_name": "fides.api.graph.config.CollectionAddress", "line_number": 197, "usage_type": "call"}, {"api_name": "fides.api.service.connectors.query_config.SQLQueryConfig", "line_number": 199, "usage_type": "call"}, {"api_name": "fides.api.graph.config.FieldPath", "line_number": 201, "usage_type": "call"}, {"api_name": "fideslang.models.Dataset", "line_number": 207, "usage_type": "call"}, {"api_name": "fides.api.models.datasetconfig.convert_dataset_to_graph", "line_number": 208, "usage_type": "call"}, {"api_name": "fides.api.graph.graph.DatasetGraph", "line_number": 209, "usage_type": "call"}, {"api_name": "fides.api.graph.traversal.Traversal", "line_number": 210, "usage_type": "call"}, {"api_name": "fides.api.graph.config.CollectionAddress", "line_number": 213, "usage_type": "call"}, {"api_name": "fides.api.service.connectors.query_config.SQLQueryConfig", "line_number": 216, "usage_type": "call"}, {"api_name": "fideslang.models.Dataset", "line_number": 234, "usage_type": "call"}, {"api_name": "fides.api.models.datasetconfig.convert_dataset_to_graph", "line_number": 235, "usage_type": "call"}, {"api_name": "fides.api.graph.graph.DatasetGraph", "line_number": 236, "usage_type": "call"}, {"api_name": "fides.api.graph.traversal.Traversal", "line_number": 237, "usage_type": "call"}, {"api_name": "fides.api.graph.config.CollectionAddress", "line_number": 240, "usage_type": "call"}, {"api_name": "fides.api.service.connectors.query_config.SQLQueryConfig", "line_number": 243, "usage_type": "call"}, {"api_name": "fideslang.models.Dataset", "line_number": 265, "usage_type": "call"}, {"api_name": "fides.api.models.datasetconfig.convert_dataset_to_graph", "line_number": 266, "usage_type": "call"}, {"api_name": "fides.api.graph.graph.DatasetGraph", "line_number": 267, "usage_type": "call"}, {"api_name": "fides.api.graph.traversal.Traversal", "line_number": 268, "usage_type": "call"}, {"api_name": "fides.api.graph.config.CollectionAddress", "line_number": 271, "usage_type": "call"}, {"api_name": "fides.api.service.connectors.query_config.SQLQueryConfig", "line_number": 274, "usage_type": "call"}, {"api_name": "fides.api.util.data_category.DataCategory", "line_number": 285, "usage_type": "call"}, {"api_name": "fides.api.schemas.masking.masking_secrets.MaskingSecretCache", "line_number": 293, "usage_type": "name"}, {"api_name": "fides.api.service.masking.strategy.masking_strategy_hash.HashMaskingStrategy.name", "line_number": 295, "usage_type": "attribute"}, {"api_name": "fides.api.service.masking.strategy.masking_strategy_hash.HashMaskingStrategy", "line_number": 295, "usage_type": "name"}, {"api_name": "fides.api.schemas.masking.masking_secrets.SecretType.salt", "line_number": 296, "usage_type": "attribute"}, {"api_name": "fides.api.schemas.masking.masking_secrets.SecretType", "line_number": 296, "usage_type": "name"}, {"api_name": "test_helpers.cache_secrets_helper.cache_secret", "line_number": 298, "usage_type": "call"}, {"api_name": "fides.api.service.masking.strategy.masking_strategy_hash.HashMaskingStrategy", "line_number": 309, "usage_type": "call"}, {"api_name": "fides.api.schemas.masking.masking_configuration.HashMaskingConfiguration", "line_number": 309, "usage_type": "call"}, {"api_name": "fides.api.service.masking.strategy.masking_strategy_hash.HashMaskingStrategy", "line_number": 315, "usage_type": "call"}, {"api_name": "fides.api.schemas.masking.masking_configuration.HashMaskingConfiguration", "line_number": 315, "usage_type": "call"}, {"api_name": "test_helpers.cache_secrets_helper.clear_cache_secrets", "line_number": 319, "usage_type": "call"}, {"api_name": "fideslang.models.Dataset", "line_number": 324, "usage_type": "call"}, {"api_name": "fides.api.models.datasetconfig.convert_dataset_to_graph", "line_number": 325, "usage_type": "call"}, {"api_name": "fides.api.graph.graph.DatasetGraph", "line_number": 326, "usage_type": "call"}, {"api_name": "fides.api.graph.traversal.Traversal", "line_number": 327, "usage_type": "call"}, {"api_name": "fides.api.graph.config.CollectionAddress", "line_number": 336, "usage_type": "call"}, {"api_name": "fides.api.service.connectors.query_config.SQLQueryConfig", "line_number": 339, "usage_type": "call"}, {"api_name": "task.traversal_data.combined_mongo_postgresql_graph", "line_number": 359, "usage_type": "call"}, {"api_name": "fides.api.graph.graph.DatasetGraph", "line_number": 362, "usage_type": "call"}, {"api_name": "fides.api.graph.traversal.Traversal", "line_number": 363, "usage_type": "call"}, {"api_name": "pytest.fixture", "line_number": 357, "usage_type": "call"}, {"api_name": "fides.api.graph.config.CollectionAddress", "line_number": 372, "usage_type": "call"}, {"api_name": "pytest.fixture", "line_number": 369, "usage_type": "call"}, {"api_name": "fides.api.graph.config.CollectionAddress", "line_number": 378, "usage_type": "call"}, {"api_name": "pytest.fixture", "line_number": 375, "usage_type": "call"}, {"api_name": "fides.api.service.connectors.query_config.MongoQueryConfig", "line_number": 382, "usage_type": "call"}, {"api_name": "fides.api.graph.config.ObjectField", "line_number": 385, "usage_type": "argument"}, {"api_name": "fides.api.graph.config.FieldPath", "line_number": 385, "usage_type": "call"}, {"api_name": "fides.api.graph.config.ScalarField", "line_number": 387, "usage_type": "argument"}, {"api_name": "fides.api.graph.config.FieldPath", "line_number": 387, "usage_type": "call"}, {"api_name": "fides.api.service.connectors.query_config.MongoQueryConfig", "line_number": 391, "usage_type": "call"}, {"api_name": "fides.api.graph.config.FieldPath", "line_number": 392, "usage_type": "call"}, {"api_name": "fides.api.graph.config.ScalarField", "line_number": 393, "usage_type": "argument"}, {"api_name": "fides.api.graph.config.FieldPath", "line_number": 393, "usage_type": "call"}, {"api_name": "fides.api.graph.config.FieldPath", "line_number": 399, "usage_type": "call"}, {"api_name": "fides.api.graph.config.FieldPath", "line_number": 403, "usage_type": "call"}, {"api_name": "fideslang.models.Dataset", "line_number": 423, "usage_type": "call"}, {"api_name": "fides.api.models.datasetconfig.convert_dataset_to_graph", "line_number": 424, "usage_type": "call"}, {"api_name": "fideslang.models.Dataset", "line_number": 425, "usage_type": "call"}, {"api_name": "fides.api.models.datasetconfig.convert_dataset_to_graph", "line_number": 426, "usage_type": "call"}, {"api_name": "fides.api.graph.graph.DatasetGraph", "line_number": 429, "usage_type": "call"}, {"api_name": "fides.api.graph.traversal.Traversal", "line_number": 430, "usage_type": "call"}, {"api_name": "fides.api.graph.graph.Edge", "line_number": 433, "usage_type": "call"}, {"api_name": "fides.api.graph.config.FieldAddress", "line_number": 434, "usage_type": "call"}, {"api_name": "fides.api.graph.config.FieldAddress", "line_number": 435, "usage_type": "call"}, {"api_name": "fides.api.graph.config.CollectionAddress", "line_number": 444, "usage_type": "call"}, {"api_name": "fides.api.service.connectors.query_config.MongoQueryConfig", "line_number": 446, "usage_type": "call"}, {"api_name": "fides.api.graph.config.CollectionAddress", "line_number": 457, "usage_type": "call"}, {"api_name": "fides.api.service.connectors.query_config.MongoQueryConfig", "line_number": 459, "usage_type": "call"}, {"api_name": "fideslang.models.Dataset", "line_number": 485, "usage_type": "call"}, {"api_name": "fides.api.models.datasetconfig.convert_dataset_to_graph", "line_number": 486, "usage_type": "call"}, {"api_name": "fideslang.models.Dataset", "line_number": 487, "usage_type": "call"}, {"api_name": "fides.api.models.datasetconfig.convert_dataset_to_graph", "line_number": 488, "usage_type": "call"}, {"api_name": "fides.api.graph.graph.DatasetGraph", "line_number": 491, "usage_type": "call"}, {"api_name": "fides.api.graph.traversal.Traversal", "line_number": 493, "usage_type": "call"}, {"api_name": "fides.api.graph.config.CollectionAddress", "line_number": 495, "usage_type": "call"}, {"api_name": "fides.api.service.connectors.query_config.MongoQueryConfig", "line_number": 497, "usage_type": "call"}, {"api_name": "fides.api.util.data_category.DataCategory", "line_number": 514, "usage_type": "call"}, {"api_name": "fideslang.models.Dataset", "line_number": 548, "usage_type": "call"}, {"api_name": "fides.api.models.datasetconfig.convert_dataset_to_graph", "line_number": 549, "usage_type": "call"}, {"api_name": "fideslang.models.Dataset", "line_number": 550, "usage_type": "call"}, {"api_name": "fides.api.models.datasetconfig.convert_dataset_to_graph", "line_number": 551, "usage_type": "call"}, {"api_name": "fides.api.graph.graph.DatasetGraph", "line_number": 554, "usage_type": "call"}, {"api_name": "fides.api.graph.traversal.Traversal", "line_number": 556, "usage_type": "call"}, {"api_name": "fides.api.graph.config.CollectionAddress", "line_number": 559, "usage_type": "call"}, {"api_name": "fides.api.service.connectors.query_config.MongoQueryConfig", "line_number": 562, "usage_type": "call"}, {"api_name": "fides.api.util.data_category.DataCategory", "line_number": 582, "usage_type": "call"}, {"api_name": "fides.api.util.data_category.DataCategory", "line_number": 590, "usage_type": "call"}, {"api_name": "fides.api.schemas.masking.masking_secrets.MaskingSecretCache", "line_number": 592, "usage_type": "name"}, {"api_name": "fides.api.service.masking.strategy.masking_strategy_hash.HashMaskingStrategy.name", "line_number": 594, "usage_type": "attribute"}, {"api_name": "fides.api.service.masking.strategy.masking_strategy_hash.HashMaskingStrategy", "line_number": 594, "usage_type": "name"}, {"api_name": "fides.api.schemas.masking.masking_secrets.SecretType.salt", "line_number": 595, "usage_type": "attribute"}, {"api_name": "fides.api.schemas.masking.masking_secrets.SecretType", "line_number": 595, "usage_type": "name"}, {"api_name": "test_helpers.cache_secrets_helper.cache_secret", "line_number": 597, "usage_type": "call"}, {"api_name": "fides.api.service.masking.strategy.masking_strategy_hash.HashMaskingStrategy", "line_number": 606, "usage_type": "call"}, {"api_name": "fides.api.schemas.masking.masking_configuration.HashMaskingConfiguration", "line_number": 606, "usage_type": "call"}, {"api_name": "pytest.fixture", "line_number": 613, "usage_type": "call"}, {"api_name": "fideslang.models.Dataset", "line_number": 620, "usage_type": "call"}, {"api_name": "fides.api.models.datasetconfig.convert_dataset_to_graph", "line_number": 621, "usage_type": "call"}, {"api_name": "fides.api.graph.graph.DatasetGraph", "line_number": 625, "usage_type": "call"}, {"api_name": "pytest.fixture", "line_number": 618, "usage_type": "call"}, {"api_name": "fides.api.graph.traversal.Traversal", "line_number": 629, "usage_type": "call"}, {"api_name": "pytest.fixture", "line_number": 627, "usage_type": "call"}, {"api_name": "fides.api.graph.config.CollectionAddress", "line_number": 635, "usage_type": "call"}, {"api_name": "pytest.fixture", "line_number": 632, "usage_type": "call"}, {"api_name": "fides.api.graph.config.CollectionAddress", "line_number": 641, "usage_type": "call"}, {"api_name": "pytest.fixture", "line_number": 638, "usage_type": "call"}, {"api_name": "pytest.fixture", "line_number": 644, "usage_type": "call"}, {"api_name": "boto3.dynamodb.types.TypeDeserializer", "line_number": 658, "usage_type": "call"}, {"api_name": "pytest.fixture", "line_number": 655, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 669, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 669, "usage_type": "name"}, {"api_name": "datetime.timezone.utc", "line_number": 669, "usage_type": "attribute"}, {"api_name": "datetime.timezone", "line_number": 669, "usage_type": "name"}, {"api_name": "pytest.fixture", "line_number": 663, "usage_type": "call"}, {"api_name": "boto3.dynamodb.types.TypeDeserializer", "line_number": 676, "usage_type": "call"}, {"api_name": "pytest.fixture", "line_number": 673, "usage_type": "call"}, {"api_name": "fides.api.service.connectors.query_config.DynamoDBQueryConfig", "line_number": 691, "usage_type": "call"}, {"api_name": "fides.api.service.connectors.query_config.DynamoDBQueryConfig", "line_number": 711, "usage_type": "call"}]}
+{"seq_id": "18151051342", "text": "# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Sat Oct 20 16:31:48 2018\r\n\r\n@author: James\r\n\"\"\"\r\n\r\nimport numpy as np\r\nimport pandas as pd\r\nimport itertools as it\r\nimport copy\r\n\r\nrfile = np.loadtxt(\"data3.data\")\r\n#rfile = np.loadtxt(\"data_mate.txt\")\r\n#print(rfile.shape)\r\n\r\n#rfile = [[1,1,1],[1,1,2],[1,1,3],[2,2,2],[2,2,4],[3,3,2],[3,3,5],[4,4,1],[4,4,2],[4,4,4],[5,5,1],[5,5,5],[6,6,2],[6,6,5],[7,7,1],[7,7,5],[8,8,1],[8,8,2],[8,8,3],[8,8,5],[9,9,1],[9,9,2],[9,9,5]]\r\n#rfile = np.array(rfile, dtype=float)\r\n#%%\r\nclass FPTree(object):\r\n def __init__(self):\r\n self.data = None\r\n self.parent = None\r\n self.children = []\r\n self.prefix = []\r\n self.count = 1\r\n\r\nclass TableNode(object):\r\n def __init__(self):\r\n self.key = None\r\n self.value = None\r\n self.nodes = []\r\n \r\nn_rfile = rfile.shape[0]\r\nn_trans = 1000\r\nn_item = 1000\r\nminsup = 80\r\nminconf = 0.5\r\nlevel = []\r\n\r\n#data preprocessing\r\nstart =1\r\ntrans_list = []\r\ntrans_tmp = []\r\ncount_arr = np.zeros(n_item+1)\r\n\r\nfor i in range(n_rfile):\r\n if(start == rfile[i,0]):\r\n trans_tmp.append(rfile[i,2])\r\n else:\r\n if trans_tmp:\r\n trans_list.append(list(trans_tmp))\r\n trans_tmp = []\r\n start = start+1\r\n trans_tmp.append(rfile[i,2])\r\n count_arr[int(rfile[i,2])] = count_arr[int(rfile[i,2])] +1\r\n\r\ntrans_list.append(list(trans_tmp))\r\n\r\n\r\nfp_list = copy.deepcopy(trans_list)\r\n\r\n#check support, if lower, remove\r\n### considered to be function\r\ndef powerset(iterable, item):\r\n \"powerset([1,2,3]) --> () (1,) (2,) (3,) (1,2) (1,3) (2,3) (1,2,3)\"\r\n s = list(iterable)\r\n return it.chain.from_iterable(it.combinations(s, r) for r in range(1, item+1))\r\n\r\n\r\n#%%\r\ndict_l = []\r\ndict_r = []\r\nfor i in range(n_item+1):\r\n if count_arr[i] >= minsup:\r\n dict_l.append(i)\r\n dict_r.append(count_arr[i])\r\n\r\ntable = dict(zip(dict_l, dict_r))\r\ntable = sorted(table.items(), key=lambda x: x[1], reverse=True)\r\n\r\n#%%\r\n\r\nmap_table = copy.deepcopy(table)\r\n#to negative, in order to rid of non frequent set\r\nindexneg = 0\r\nfor i in range(len(table)):\r\n indexneg = indexneg-1\r\n a = []\r\n c = []\r\n a.append(indexneg)\r\n c.append(map_table[i][0])\r\n map_table[i] = tuple(c) + tuple(a)\r\n\r\nfor i in range(n_trans):\r\n for j in range(len(fp_list[i])):\r\n for check in range(len(table)):\r\n if(fp_list[i][j] == map_table[check][0]):\r\n fp_list[i][j] = map_table[check][1]\r\n break\r\n#%%\r\nfor i in range(n_trans):\r\n fp_list[i] = [item for item in fp_list[i] if item < 0]\r\n fp_list[i].sort(reverse=True)\r\n \r\nfor i in range(n_trans):\r\n for j in range(len(fp_list[i])):\r\n for check in range(len(table)):\r\n if(fp_list[i][j] == map_table[check][1]):\r\n fp_list[i][j] = map_table[check][0]\r\n break\r\n\r\n\r\n#%%\r\n#BUILD TREE LA\r\n\r\n#bulid table\r\nroot_table = TableNode()\r\nfor i in range(len(table)):\r\n newnode = TableNode()\r\n newnode.key = table[i][0]\r\n newnode.value = table[i][1]\r\n root_table.nodes.append(newnode)\r\n\r\n#%%\r\n#build fp tree\r\n#aaa = root_table.nodes\r\ndebuglist = []\r\n\r\nroot_fp = FPTree()\r\nfor i in range(len(fp_list)):\r\n nodept = root_fp\r\n debuglist.append(\"G\")\r\n for items in fp_list[i]:\r\n found = False\r\n for nodes_inchild in nodept.children:\r\n if nodes_inchild.data == items:\r\n nodes_inchild.count = nodes_inchild.count + 1\r\n found = True\r\n nodept = nodes_inchild\r\n debuglist.append(nodept.data)\r\n break\r\n if found == False:\r\n newnode = FPTree()\r\n newnode.data = items\r\n newnode.parent = nodept\r\n list1 = nodept.prefix\r\n list2 = [nodept.data]\r\n newnode.prefix = list(list1+list2)\r\n nodept.children.append(newnode)\r\n debuglist.append(newnode.data)\r\n for tablept in root_table.nodes:\r\n if newnode.data == tablept.key:\r\n tablept.nodes.append(newnode)\r\n break\r\n nodept = newnode\r\n \r\nprint(\"BUILD FINISH\") \r\n#%%\r\naaa = root_fp.children #debug use\r\nttt = root_table.nodes\r\n#%%\r\ndef recursive_find(firstind, tablept, level, levelnum, realprefix):\r\n tmpset = []\r\n pdset = []\r\n pdcount = []\r\n weight = []\r\n addset = []\r\n reverseprefix = []\r\n runnext = False\r\n\r\n for leaffunc in tablept.nodes:\r\n single_count = leaffunc.count\r\n runnext = False\r\n reverseprefix = list(reversed(realprefix))\r\n reverseprefix = reverseprefix[1:]\r\n for findind in reverseprefix:\r\n while leaffunc.data != findind:\r\n leaffunc = leaffunc.parent\r\n if leaffunc.data == None:\r\n runnext = True\r\n break\r\n if runnext == True:\r\n break\r\n if runnext == True:\r\n continue\r\n \r\n prefix = leaffunc.prefix\r\n prefix = list(filter(None.__ne__, prefix))\r\n tmpset.append(prefix)\r\n weight.append(single_count)\r\n \r\n pdset = np.zeros((len(tmpset), n_item+1))\r\n for i in range(len(tmpset)):\r\n pdset[i, tmpset[i]] = pdset[i, tmpset[i]] + weight[i]\r\n pdset = pd.DataFrame(pdset)\r\n pdcount = pdset.sum()\r\n pdcount = pdcount[pdcount >= minsup]\r\n\r\n if pdcount.empty:\r\n return level\r\n \r\n #recursive starts here\r\n for ind in pdcount.index:\r\n addset = list(realprefix)\r\n level[levelnum].append(list(list([ind])+addset))\r\n level = recursive_find(ind, tablept, level, levelnum+1, list(list([ind])+addset))\r\n \r\n return level\r\n\r\n#%%\r\n#generate frequent dataset\r\nroot_table.nodes = list(reversed(root_table.nodes))\r\nttt = root_table.nodes #debug\r\n\r\n\r\ntmpset = []\r\npdset = []\r\npdcount = []\r\nweight = []\r\nfor i in range(15):\r\n level.append([])\r\n\r\n\r\nfor x in table:\r\n level[0].append(x[0])\r\n \r\n \r\nfor tablept in root_table.nodes:\r\n tmpset = []\r\n pdcount = []\r\n weight = []\r\n for leaf in tablept.nodes:\r\n single_count = leaf.count\r\n prefix = leaf.prefix\r\n prefix = list(filter(None.__ne__, prefix))\r\n tmpset.append(prefix)\r\n weight.append(single_count)\r\n \r\n pdset = np.zeros((len(tmpset), n_item+1))\r\n for i in range(len(tmpset)):\r\n pdset[i, tmpset[i]] = pdset[i, tmpset[i]] + weight[i]\r\n pdset = pd.DataFrame(pdset)\r\n pdcount = pdset.sum()\r\n\r\n pdcount = pdcount[pdcount >= minsup]\r\n #recursive starts here?\r\n for ind in pdcount.index:\r\n addset = [tablept.key]\r\n level[1].append(list(list([ind])+addset))\r\n level = recursive_find(ind, tablept, level, 2, list(list([ind])+addset))\r\n \r\n \r\n\r\n#%%\r\nimport csv\r\n\r\n\r\nwith open('sup80_0.csv','w') as out:\r\n csv_out=csv.writer(out)\r\n csv_out.writerow(level[0])\r\nwith open('sup80_1.csv','w') as out:\r\n csv_out=csv.writer(out)\r\n for row in level[1]:\r\n csv_out.writerow(row)\r\nwith open('sup80_2.csv','w') as out:\r\n csv_out=csv.writer(out)\r\n for row in level[2]:\r\n csv_out.writerow(row) \r\n#%%\r\nlastlevel = 0\r\ndef search_conf(left, landr):\r\n countl = 0\r\n countall = 0\r\n for j in range(len(trans_list)):\r\n if set(left).issubset(trans_list[j]):\r\n countl = countl +1\r\n if set(landr).issubset(trans_list[j]):\r\n countall = countall +1\r\n\r\n confi = countall/countl\r\n return confi\r\n \r\nresult = []\r\n\r\nfor i in range(15):\r\n if len(level[i])==0:\r\n lastlevel = i\r\n break\r\n \r\n#calculate confidence\r\n\r\nfor i in reversed(range(1,lastlevel)):\r\n for items in level[i]:\r\n newlist = list(set(items))\r\n combin_items = list(it.chain.from_iterable(it.combinations(newlist, r) for r in range(1,i+1)))\r\n for left in combin_items:\r\n confi = search_conf(left, newlist)\r\n print(left, newlist, confi)\r\n if confi >= minconf:\r\n right = tuple(set(newlist) - set(left))\r\n result.append([left, \"->\", right, confi])\r\n \r\nwith open('result800.5.csv','w') as out:\r\n csv_out=csv.writer(out)\r\n for row in result:\r\n csv_out.writerow(row)\r\n", "repo_name": "C14036227/DM_project1", "sub_path": "fpgrowth.py", "file_name": "fpgrowth.py", "file_ext": "py", "file_size_in_byte": 8325, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "76", "api": [{"api_name": "numpy.loadtxt", "line_number": 13, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 45, "usage_type": "call"}, {"api_name": "copy.deepcopy", "line_number": 61, "usage_type": "call"}, {"api_name": "itertools.chain.from_iterable", "line_number": 68, "usage_type": "call"}, {"api_name": "itertools.chain", "line_number": 68, "usage_type": "attribute"}, {"api_name": "itertools.combinations", "line_number": 68, "usage_type": "call"}, {"api_name": "copy.deepcopy", "line_number": 84, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 193, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 196, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 240, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 243, "usage_type": "call"}, {"api_name": "csv.writer", "line_number": 260, "usage_type": "call"}, {"api_name": "csv.writer", "line_number": 263, "usage_type": "call"}, {"api_name": "csv.writer", "line_number": 267, "usage_type": "call"}, {"api_name": "itertools.chain.from_iterable", "line_number": 296, "usage_type": "call"}, {"api_name": "itertools.chain", "line_number": 296, "usage_type": "attribute"}, {"api_name": "itertools.combinations", "line_number": 296, "usage_type": "call"}, {"api_name": "csv.writer", "line_number": 305, "usage_type": "call"}]}
+{"seq_id": "70790141687", "text": "from flask import Flask, render_template, request, redirect, url_for, flash, Response, session, abort\nfrom flask_mysqldb import MySQL\nfrom flask_wtf.csrf import CSRFProtect # Para el token de protección\nfrom flask_login import LoginManager, login_user, logout_user, login_required, current_user\nfrom flask_wtf import FlaskForm\nfrom wtforms import *\nfrom wtforms.validators import *\n#libreria para crear random en StringAleatorio\nfrom random import sample\nimport openpyxl\nfrom werkzeug.utils import secure_filename\nimport cv2\nimport datetime, time\nimport os\nimport numpy as np\nfrom threading import Thread\nimport mediapipe as mp\nimport pandas as pd\nimport pickle\n\nfrom base64 import b64encode\n\n# Modelos\nfrom models.ModelUser import ModelUser\n\n# Entidades\nfrom models.entities.User import User\n\nglobal capture,rec_frame, grey, switch, neg, face, rec, out \ncapture=0\ngrey=0\nneg=0\nface=0\nswitch=1\nrec=0\n\nmp_drawing = mp.solutions.drawing_utils\nmp_hands = mp.solutions.hands\nmp_drawing_styles = mp.solutions.drawing_styles\n\napp = Flask(__name__)\n\napp.secret_key = 'B!1w8NAt1T^%kvhUI*S^'\ncsrf = CSRFProtect(app)\n\ndb = MySQL(app)\nlogin_manager_app = LoginManager(app)\n\napp.config['DEBUG'] = True\napp.config['MYSQL_HOST'] = 'lenguajeparatodoos.mariadb.database.azure.com'\napp.config['MYSQL_USER'] = 'administrador@lenguajeparatodoos'\napp.config['MYSQL_PASSWORD'] = 'Lenguaje123'\napp.config['MYSQL_DB'] = 'lenguajeparatodos'\napp.config['MYSQL_PORT'] = 3306\n\n# app.config['DEBUG'] = True\n# app.config['MYSQL_HOST'] = 'localhost'\n# app.config['MYSQL_USER'] = 'root'\n# app.config['MYSQL_PASSWORD'] = 'clave'\n# app.config['MYSQL_DB'] = 'lenguajeparatodos'\n# app.config['MYSQL_PORT'] = 3306\n\n \n\ndef image_processed(hand_img):\n #BGR to RGB\n img_rgb = cv2.cvtColor(hand_img, cv2.COLOR_BGR2RGB)\n\n img_flip = cv2.flip(img_rgb, 1)\n\n\n hands = mp_hands.Hands(static_image_mode=True,\n max_num_hands=1,\n min_detection_confidence=0.7) \n\n output = hands.process(img_flip)\n\n hands.close()\n\n try:\n data = output.multi_hand_landmarks[0]\n data = str(data)\n\n data = data.strip().split('\\n')\n\n garbage = ['landmark {', ' visibility: 0.0', ' presence: 0.0', '}']\n\n without_garbage = []\n\n for i in data:\n if i not in garbage:\n without_garbage.append(i)\n clean = []\n\n for i in without_garbage:\n i = i.strip()\n clean.append(i[2:])\n\n for i in range(0, len(clean)):\n clean[i] = float(clean[i])\n return (clean)\n except:\n return(np.zeros([1,63], dtype=int)[0])\n\ndef gen_frames(): # generate frame by frame from camera\n camera = cv2.VideoCapture(0)\n with open('model.pkl', 'rb') as f:\n svm = pickle.load(f)\n global out, capture,rec_frame\n while True:\n success, frame = camera.read() \n data = image_processed(frame)\n data = np.array(data)\n y_pred = svm.predict(data.reshape(-1, 63))\n print(y_pred)\n font = cv2.FONT_HERSHEY_SIMPLEX\n\n org = (50, 100)\n\n fontScale = 3\n\n color = (255, 0, 0)\n\n thickness = 5\n\n frame = cv2.putText(frame, str(y_pred[0]),\n org, font, fontScale, color, thickness, cv2.LINE_AA)\n\n if success:\n if(capture):\n capture=0\n now = datetime.datetime.now()\n p = os.path.sep.join(['shots', \"shot{}.png\".format(str(now).replace(\":\",''))])\n cv2.imwrite(p, frame)\n try:\n ret, buffer = cv2.imencode('.jpg', frame)\n frame = buffer.tobytes()\n yield (b'--frame\\r\\n'\n b'Content-Type: image/jpeg\\r\\n\\r\\n' + frame + b'\\r\\n')\n except Exception as e:\n pass\n else:\n pass\n\n\nclass LoginForm(FlaskForm):\n username = StringField('username', validators=[InputRequired(), Length(min=1, max=30)])\n password = PasswordField('password', validators=[InputRequired(), Length(min=1, max=30)])\n\nclass LoginRegisterForm(FlaskForm):\n rut = StringField('rut', validators=[InputRequired()])\n username = StringField('username', validators=[InputRequired()])\n password = PasswordField('password', validators=[InputRequired()])\n comuna = StringField('comuna', validators=[InputRequired()])\n nombre = StringField('nombre', validators=[InputRequired()])\n apellido = StringField('apellidos', validators=[InputRequired()])\n tipoUsuario = StringField('tipoUsuario', validators=[InputRequired()])\n telefono = StringField('telefono', validators=[InputRequired()])\n direccion = StringField('direccion', validators=[InputRequired()])\n correo = StringField('correo', validators=[InputRequired()])\n tiposexo = StringField('tiposexo', validators=[InputRequired()])\n###\nclass UserForm(FlaskForm):\n nombre = StringField('Nombre', validators=[InputRequired(), Length(min=3, max=25)])\n apellidos = StringField('Apellido', validators=[InputRequired(), Length(min=3, max=25)])\n username = StringField('Username', validators=[InputRequired(), Length(min=3, max=25)])\n password = PasswordField('Contraseña', validators=[InputRequired(),])\n correo = EmailField('Correo', validators=[InputRequired()])\n imagen = FileField('Sube tu foto de perfil', validators=[InputRequired()])\n\n\n#Página tareas\n@app.route('/tareas', methods=['POST', 'GET'])\n@login_required\ndef tareas():\n if 'tipoUsuario' in session:\n tipoUsuario = session['tipoUsuario']\n if tipoUsuario == 1:\n abort(401)\n elif tipoUsuario == 2:\n if request.method == 'POST':\n contenido = request.form['content']\n creado_por = current_user.id\n print(contenido, creado_por)\n try:\n cur = db.connection.cursor()\n cur.execute(\"INSERT INTO tabla_tareas (contenido, creado_por) VALUES (%s, %s)\", [contenido, creado_por])\n db.connection.commit()\n return redirect('/tareas')\n except:\n return 'No se ha podido agregar la tarea'\n else:\n cur = db.connection.cursor()\n cur.execute('SELECT * FROM tabla_tareas WHERE creado_por = {}'.format(current_user.id))\n # SELECT * FROM tabla_tareas WHERE creado_por = 19;\n # WHERE id = {}'.format(id)\n tasks = cur.fetchall()\n return render_template('tarea.html', tasks=tasks)\n elif tipoUsuario == 3:\n abort(401)\n \n\n#Eliminar Tareas\n@app.route('/delete/', methods=['POST', 'GET'])\n@login_required\ndef eliminar_tarea(id):\n try:\n cur = db.connection.cursor()\n cur.execute(\"CALL EliminarTarea(%s)\", (id,))\n db.connection.commit()\n return redirect('/tareas')\n except:\n return 'No se ha podido eliminar la tarea'\n \n#Redirigir a página perfil\n@app.route('/perfil', methods=['GET'])\n@login_required\ndef perfil():\n return render_template('perfil.html')\n\n#Método para crear nombre aleatorio de la imagen\ndef stringAleatorio():\n #Generando string aleatorio\n string_aleatorio = \"0123456789abcdefghijklmnopqrstuvwxyz_\"\n longitud = 20\n secuencia = string_aleatorio.upper()\n resultado_aleatorio = sample(secuencia, longitud)\n string_aleatorio = \"\".join(resultado_aleatorio)\n return string_aleatorio\n\n#Actualizar Perfil unitario\n@app.route('/perfil/update/', methods=['GET', 'POST'])\n@login_required\ndef updatePerfil(id):\n cur = db.connection.cursor()\n form = UserForm()\n cur.execute('SELECT * FROM usuario WHERE id = {}'.format(id))\n form_update = cur.fetchall()\n if request.method == 'POST' and form.validate_on_submit:\n nombre = form.nombre.data\n apellidos = form.apellidos.data\n username = form.username.data\n password = form.password.data\n correo = form.correo.data\n comuna = request.form['comuna']\n file = form.imagen.data\n basepath = os.path.dirname (__file__) #La ruta donde se encuentra el archivo actual\n filename = secure_filename(file.filename) #Nombre original del archivo\n #capturando extensión del archivo ejemplo: (.png, .jpg, .pdf ...etc)\n extension = os.path.splitext(filename)[1]\n nuevoNombreFile = stringAleatorio() + extension\n\n #Guardar Archivo en la carpeta img_perfiles que se encuentra en static\n upload_path = os.path.join (basepath, 'static/img_perfiles', nuevoNombreFile) \n file.save(upload_path)\n print('Registro: ' + nombre, apellidos, username, password, correo, comuna, nuevoNombreFile)\n try:\n cur.execute(\"\"\"\n UPDATE usuario\n SET nombre = %s,\n apellidos = %s,\n username = %s,\n password = %s,\n correo = %s,\n comuna = %s,\n imagen = %s\n WHERE id = %s\n \"\"\", (nombre, apellidos, username, password, correo, comuna, nuevoNombreFile, id))\n db.connection.commit()\n flash(\"Info actualizada correctamente\")\n return redirect(url_for('perfil'))\n except:\n flash(\"Error, datos no han podido ser modificados\")\n return render_template(\"perfil.html\", form=form, )\n else:\n return render_template('update.html', form=form, form_update=form_update[0], )\n\n@login_manager_app.user_loader\ndef load_user(id):\n return ModelUser.get_by_id(db, id)\n\n@app.route('/')\ndef pindex():\n return render_template('index.html')\n\n@app.route('/index')\ndef index():\n return render_template('index.html')\n\n@app.route('/login', methods=['GET', 'POST'])\ndef login():\n form = LoginForm()\n if request.method == 'POST':\n user = User(0, 1, form.username.data,\n form.password.data, 4, 5, 6, 7, 8, 9, 10, 11, 12)\n logged_user = ModelUser.login(db, user)\n if logged_user != None:\n if logged_user.password:\n session['tipoUsuario'] = logged_user.tipoUsuario\n login_user(logged_user)\n\n if session['tipoUsuario'] == 1:\n return redirect(url_for('menuAdministrador'))\n elif session['tipoUsuario'] == 2:\n return redirect(url_for('menuDocente'))\n elif session['tipoUsuario'] == 3:\n return redirect(url_for('menuEstudiante'))\n else:\n flash(\"Clave Incorrecta...\")\n return render_template('auth/login.html', form=form)\n else:\n #print(\"Usuario no encontrado\")\n flash(\"Usuario no encontrado...\")\n return render_template('auth/login.html', form=form)\n else:\n return render_template('auth/login.html', form=form) \n\n@app.route('/aprende')\n@login_required\ndef aprende():\n if 'tipoUsuario' in session:\n tipoUsuario = session['tipoUsuario']\n if tipoUsuario == 1:\n return \" No tiene acceso a este modulo
\"\n elif tipoUsuario == 2:\n return render_template('aprende.html')\n elif tipoUsuario == 3:\n return render_template('aprende.html')\n # return render_template('error401', 400)\n\n#Base de la cámara\n@app.route('/video_feed')\ndef video_feed():\n return Response(gen_frames(), mimetype='multipart/x-mixed-replace; boundary=frame')\n\n#Prender y Apagar cámara\n@app.route('/requests', methods=['POST','GET'])\ndef tasks():\n global switch, camera\n if request.method == 'POST':\n if request.form.get('stop') == 'Stop/Start':\n if(switch==1):\n switch=0\n camera.release()\n cv2.destroyAllWindows()\n #flash(\"Cámara Apagada...\")\n else:\n camera = cv2.VideoCapture(0)\n switch=1\n elif request.method == 'GET':\n return redirect(url_for('aprende'))\n return redirect(url_for('aprende'))\n\n#Página visualizar usuarios\n@app.route('/edit/')\n@login_required\ndef Edit():\n if 'tipoUsuario' in session:\n tipoUsuario = session['tipoUsuario']\n if tipoUsuario == 1:\n cur = db.connection.cursor()\n cur.execute(\n 'SELECT U.id, U.rut, U.nombre, U.apellidos, C.nombre, t.tipoUsuario FROM usuario U INNER JOIN comunas C ON U.comuna = C.codCom INNER JOIN tipousuario t ON U.tipousuario = t.codtipoUsuario;')\n data = cur.fetchall()\n print(type(data))\n return render_template('edit.html', usuarios=data)\n elif tipoUsuario == 2:\n return redirect(url_for('menuDocente'))\n elif tipoUsuario == 3:\n return redirect(url_for('menuEstudiante'))\n \n\n \n \n#Agregar Usuarios\n@app.route('/agregarUsuario', methods=['GET', 'POST'])\n@login_required\ndef agregarUsuario():\n\n if 'tipoUsuario' in session:\n tipoUsuario = session['tipoUsuario']\n if tipoUsuario == 1:\n cur = db.connection.cursor()\n cur.execute(\"SELECT * FROM tipousuario\")\n tipoUsuario = cur.fetchall()\n\n cur = db.connection.cursor()\n cur.execute(\"SELECT * FROM comunas\")\n comunas = cur.fetchall()\n\n cur = db.connection.cursor()\n cur.execute(\"SELECT * FROM tiposexo\")\n tipoSexo = cur.fetchall()\n\n if request.method == 'POST':\n rut = request.form['rut']\n username = request.form['username']\n password = request.form['password']\n comuna = request.form['comuna']\n nombre = request.form['nombre']\n apellidos = request.form['apellido']\n tipoUsuario = request.form['tipoUsuario']\n telefono = request.form['telefono']\n direccion = request.form['direccion']\n correo = request.form['correo']\n tipoDeSexo = request.form['tipoSexo']\n imagen = \"imagen.png\"\n print('Registro' + rut, username,\n password, comuna, nombre, apellidos, tipoUsuario, telefono, direccion, correo, tipoDeSexo, imagen)\n try:\n cur = db.connection.cursor()\n cur.execute(\"CALL AgregarUsuarioI(%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s)\", (rut, username,\n password, comuna, nombre, apellidos, tipoUsuario, telefono, direccion, correo, tipoDeSexo, imagen))\n db.connection.commit()\n flash('Usuario agregado')\n return redirect('/agregarUsuario')\n except:\n return 'No se ha podido agregar el usuario'\n else: \n return render_template('agregarUsuario.html', tipoUsuario= tipoUsuario, comunas = comunas, tipoSexo = tipoSexo)\n elif tipoUsuario == 2:\n return redirect(url_for('menuDocente'))\n elif tipoUsuario == 3:\n return redirect(url_for('menuEstudiante'))\n\n#Agregar Usuario simple (Excel)\n@app.route('/agregarUsuarioFacil', methods=['GET', 'POST'])\n@login_required\ndef agregarUsuarioFacil():\n print('Registro: ')\n\n if request.method == 'POST':\n tipoDeUsuario = request.form['tipoUsuario']\n # Script para archivo\n file = request.files['archivo']\n # La ruta donde se encuentra el archivo actual\n basepath = os.path.dirname(__file__)\n # Nombre original del archivo\n filename = secure_filename(file.filename)\n\n # capturando extensión del archivo ejemplo: (.png, .jpg, .pdf ...etc)\n extension = os.path.splitext(filename)[1]\n print(extension)\n nuevoNombreFile = stringAleatorio() + extension\n print(nuevoNombreFile)\n\n upload_path = os.path.join(\n basepath, 'static/archivos', nuevoNombreFile)\n file.save(upload_path)\n\n df = pd.read_excel(upload_path)\n\n for row, datos in df.iterrows():\n rut = str(datos['Rut'])\n nombre = str(datos['Nombre'])\n apellidos = str(datos['Apellido'])\n username = str(datos['Username'])\n password = str(datos['Password'])\n comuna = int(datos['Comuna'])\n tipoUsuario = tipoDeUsuario\n telefono = str(datos['Telefono'])\n direccion = str(datos['Direccion'])\n correo = str(datos['Correo'])\n tipoSexo = int(datos['Sexo'])\n imagen = str(datos['Imagen'])\n cur = db.connection.cursor()\n cur.execute(\"CALL AgregarUsuarioI(%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s)\", (rut, username,\n password, comuna, nombre, apellidos, tipoUsuario, telefono, direccion, correo, tipoSexo, imagen))\n db.connection.commit()\n print(\"Usuario agregado\")\n #os.remove(\"static/archivos/nuevoNombreFile\")\n flash(\"Usuario Agregado\")\n return redirect(url_for('agregarUsuario'),)\n\n#Borrar usuario\n@app.route('/delete/')\n@login_required\ndef delete_user(id):\n # flash(id)\n cur = db.connection.cursor()\n cur.execute(\"SELECT tipoUsuario FROM usuario where id=(%s)\", (id,))\n data = cur.fetchall()\n tipoUsuario = data[0]\n\n if (tipoUsuario == (1,)):\n print(\"Administrador\")\n flash(\"Se Eliminó un administrador\")\n cur.execute(\"CALL EliminarUsuarioA_U(%s)\", (id,))\n db.connection.commit()\n elif (tipoUsuario == (2,)):\n flash(\"Se Eliminó un profesor\")\n cur.execute(\"CALL EliminarUsuarioP_U(%s)\", (id,))\n db.connection.commit()\n else:\n flash(\"Otro Usuario\")\n cur.execute(\"SELECT tipoUsuario FROM usuario WHERE id=(%s)\", (id,))\n db.connection.commit()\n return redirect(url_for('Edit'))\n\n\n@app.route('/menuAdministrador')\n@login_required\ndef menuAdministrador():\n if 'tipoUsuario' in session:\n tipoUsuario = session['tipoUsuario']\n if tipoUsuario == 1:\n return render_template('menuAdministrador.html')\n elif tipoUsuario == 2:\n return render_template('menuDocente.html')\n elif tipoUsuario == 3:\n return render_template('menuEstudiante.html')\n \n\n@app.route('/menuDocente')\n@login_required\ndef menuDocente():\n if 'tipoUsuario' in session:\n tipoUsuario = session['tipoUsuario']\n if tipoUsuario == 1:\n return render_template('menuAdministrador.html')\n elif tipoUsuario == 2:\n return render_template('menuDocente.html')\n elif tipoUsuario == 3:\n return render_template('menuEstudiante.html')\n\n@app.route('/menuEstudiante')\n@login_required\ndef menuEstudiante():\n if 'tipoUsuario' in session:\n tipoUsuario = session['tipoUsuario']\n if tipoUsuario == 1:\n return render_template('menuAdministrador.html')\n elif tipoUsuario == 2:\n return render_template('menuDocente.html')\n elif tipoUsuario == 3:\n return render_template('menuEstudiante.html')\n\n@app.route('/logout')\ndef logout():\n logout_user()\n return render_template('index.html')\n\n@app.route('/faq')\ndef faq():\n return render_template('preguntas.html') \n\n@app.route('/nosotros')\ndef nosotros():\n return render_template('nosotros.html')\n\n\n#######\n#Registro básico (que esta en inicio)\n@app.route('/registro', methods = ['GET', 'POST'])\ndef registro():\n form=LoginRegisterForm()\n\n cur = db.connection.cursor()\n cur.execute(\"SELECT * FROM tipousuario WHERE codTipoUsuario > 1 AND codTipoUsuario <= 3\")\n tipoUsuario = cur.fetchall()\n\n cur = db.connection.cursor()\n cur.execute(\"SELECT * FROM comunas\")\n comunas = cur.fetchall()\n\n cur = db.connection.cursor()\n cur.execute(\"SELECT * FROM tiposexo\")\n tipoSexo = cur.fetchall()\n \n # if request.method == 'POST':\n if request.method == 'POST':\n rut = form.rut.data\n username = form.username.data\n password = form.password.data\n comuna = request.form['comuna']\n nombre = form.nombre.data\n apellidos = form.apellido.data\n tipoUsuario = request.form['tipoUsuario']\n telefono = form.telefono.data\n direccion = form.direccion.data\n correo = form.correo.data\n tipoSexo = request.form['tipoSexo']\n\n try:\n cur = db.connection.cursor()\n cur.execute(\"CALL AgregarUsuarioI(%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s)\", (rut, username, password, comuna, nombre, apellidos, tipoUsuario, telefono, direccion, correo, tipoSexo, \"imagen.png\"))\n db.connection.commit()\n print(\"Usuario agregado\")\n flash(\"Usuario Agregado\")\n return redirect(url_for('registro'),)\n except:\n return ' no se ha podido agregar el usuario '\n return render_template('registro.html', form=form, tipoUsuario=tipoUsuario, comunas=comunas, tipoSexo=tipoSexo)\n\n#Envia Id de el usuario a actualizar\n@app.route('/edit/', methods=['GET', 'POST'])\n@login_required\ndef edit_select(id):\n cur = db.connection.cursor()\n cur.execute(\"SELECT * FROM tipousuario\")\n tipoUsuario = cur.fetchall()\n\n cur = db.connection.cursor()\n cur.execute(\"SELECT * FROM comunas\")\n comunas = cur.fetchall()\n\n cur = db.connection.cursor()\n cur.execute(\"SELECT * FROM tiposexo\")\n tipoSexo = cur.fetchall()\n\n cur = db.connection.cursor()\n cur.execute('SELECT * FROM usuario WHERE id = {}'.format(id))\n data = cur.fetchall()\n \n if request.method == 'POST':\n # rut = request.form['rut']\n username = request.form['username']\n password = request.form['password']\n comuna = request.form['comuna']\n nombre = request.form['nombre']\n apellidos = request.form['apellido']\n tipoUsuario = request.form['tipoUsuario']\n telefono = request.form['telefono']\n direccion = request.form['direccion']\n correo = request.form['correo']\n tipoSexo = request.form['tipoSexo']\n print('Registro'+ username, password, comuna, nombre, apellidos, tipoUsuario,\n telefono, direccion, correo, tipoSexo)\n try:\n cur = db.connection.cursor()\n cur.execute(\"\"\"\n UPDATE usuario\n SET username = %s,\n password = %s,\n comuna = %s,\n nombre = %s,\n apellidos = %s,\n tipoUsuario = %s,\n telefono = %s,\n direccion = %s,\n correo = %s,\n tipoSexo = %s\n WHERE id = %s\n \"\"\", (username, password, comuna, nombre, apellidos, tipoUsuario, telefono, direccion, correo, tipoSexo, id))\n db.connection.commit()\n flash('Usuario actualizado con exito')\n return redirect(url_for('Edit'))\n except:\n flash('El usuario no se ha podido actualizar')\n return redirect(url_for('Edit'),)\n return render_template('edit-contact.html', usuarios = data[0], tipoUsuario=tipoUsuario, comunas=comunas, tipoSexo=tipoSexo)\n\n#Página de Actualizar Perfil\n# @app.route('/update/', methods=['POST'])\n# @login_required\n# def update(id):\n# if request.method == 'POST':\n# username = request.form['username']\n# password = request.form['password']\n# comuna = request.form['comuna']\n# cur = db.connection.cursor()\n# cur.execute(\"\"\"\n# UPDATE usuario\n# SET username = %s,\n# password = %s,\n# comuna = %s\n# WHERE id = %s\n# \"\"\", (username, password, comuna, id))\n# db.connection.commit()\n# return redirect(url_for('Edit'))\n\n#Ruta Diccionario\n@app.route('/diccionario')\n@login_required\ndef diccionario():\n if 'tipoUsuario' in session:\n tipoUsuario = session['tipoUsuario']\n if tipoUsuario == 1:\n return render_template('menuAdministrador.html')\n elif tipoUsuario == 2:\n cur = db.connection.cursor()\n cur.execute(\n 'SELECT * FROM diccionario;')\n data = cur.fetchall()\n print(type(data))\n return render_template('diccionario.html', usuarios=data)\n elif tipoUsuario == 3:\n return render_template('menuEstudiante.html')\n \n\n#Ruta Agregar Diccionario\n@app.route('/agregarDiccionario', methods=['POST'])\n@login_required\ndef agregarDiccionario():\n if request.method == 'POST':\n gesto = request.form['gesto']\n definicion = request.form['definicion']\n fuente = request.form['fuente']\n frase = request.form['frase']\n file = request.files['imagen']\n # # La ruta donde se encuentra el archivo actual\n basepath = os.path.dirname(__file__)\n #Nombre original del archivo\n filename = secure_filename(file.filename)\n # capturando extensión del archivo ejemplo: (.png, .jpg, .pdf ...etc)\n extension = os.path.splitext(filename)[1]\n nuevoNombreFile = stringAleatorio() + extension\n\n # Guardar Archivo en la carpeta img_perfiles que se encuentra en static\n upload_path = os.path.join(basepath, 'static/img_diccionario', nuevoNombreFile)\n file.save(upload_path)\n usuario = request.form['submit']\n print(gesto,definicion,fuente, frase,nuevoNombreFile, usuario)\n cur = db.connection.cursor()\n cur.execute(\"CALL AgregarGestoI(%s,%s,%s,%s,%s,%s)\", (gesto,nuevoNombreFile,definicion,frase,fuente,usuario))\n db.connection.commit()\n #flash\n return redirect(url_for('diccionario'))\n else:\n #flash\n return redirect(url_for('diccionario'))\n\n#Visualizar Contenido\n@app.route('/diccionario/', methods=['POST', 'GET'])\n@login_required\ndef show_content(id):\n cur = db.connection.cursor()\n cur.execute('SELECT d.idDiccionario,d.palabra,d.imagen,d.descripcion,d.frase,t.fuente,d.creadoPor FROM diccionario d JOIN tipoFuente t ON d.tipoFuente = t.idFuente WHERE idDiccionario= %s',(id,))\n data = cur.fetchall()\n return render_template('show_content.html', palabras=data[0])\n\n#Errores\n#Error 404, página no existente\n@app.errorhandler(404)\ndef page_not_found(err):\n return render_template(\"page_not_found.html\"), 404\n\n#Error 401, Unauthorized\n@app.errorhandler(401)\ndef unauthorized(err):\n return render_template(\"unauthorized.html\"), 401\n\nif __name__ == '__main__':\n app.run(debug=True)\n", "repo_name": "SebastianOrtegaCL/lenguajeparatodospy", "sub_path": "app.py", "file_name": "app.py", "file_ext": "py", "file_size_in_byte": 26424, "program_lang": "python", "lang": "es", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "76", "api": [{"api_name": "mediapipe.solutions", "line_number": 37, "usage_type": "attribute"}, {"api_name": "mediapipe.solutions", "line_number": 38, "usage_type": "attribute"}, {"api_name": "mediapipe.solutions", "line_number": 39, "usage_type": "attribute"}, {"api_name": "flask.Flask", "line_number": 41, "usage_type": "call"}, {"api_name": "flask_wtf.csrf.CSRFProtect", "line_number": 44, "usage_type": "call"}, {"api_name": "flask_mysqldb.MySQL", "line_number": 46, "usage_type": "call"}, {"api_name": "flask_login.LoginManager", "line_number": 47, "usage_type": "call"}, {"api_name": "cv2.cvtColor", "line_number": 67, "usage_type": "call"}, {"api_name": "cv2.COLOR_BGR2RGB", "line_number": 67, "usage_type": "attribute"}, {"api_name": "cv2.flip", "line_number": 69, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 103, "usage_type": "call"}, {"api_name": "cv2.VideoCapture", "line_number": 106, "usage_type": "call"}, {"api_name": "pickle.load", "line_number": 108, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 113, "usage_type": "call"}, {"api_name": "cv2.FONT_HERSHEY_SIMPLEX", "line_number": 116, "usage_type": "attribute"}, {"api_name": "cv2.putText", "line_number": 126, "usage_type": "call"}, {"api_name": "cv2.LINE_AA", "line_number": 127, "usage_type": "attribute"}, {"api_name": "datetime.datetime.now", "line_number": 132, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 132, "usage_type": "attribute"}, {"api_name": "os.path.sep.join", "line_number": 133, "usage_type": "call"}, {"api_name": "os.path", "line_number": 133, "usage_type": "attribute"}, {"api_name": "cv2.imwrite", "line_number": 134, "usage_type": "call"}, {"api_name": "cv2.imencode", "line_number": 136, "usage_type": "call"}, {"api_name": "flask_wtf.FlaskForm", "line_number": 146, "usage_type": "name"}, {"api_name": "flask_wtf.FlaskForm", "line_number": 150, "usage_type": "name"}, {"api_name": "flask_wtf.FlaskForm", "line_number": 163, "usage_type": "name"}, {"api_name": "flask.session", "line_number": 176, "usage_type": "name"}, {"api_name": "flask.session", "line_number": 177, "usage_type": "name"}, {"api_name": "flask.abort", "line_number": 179, "usage_type": "call"}, {"api_name": "flask.request.method", "line_number": 181, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 181, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 182, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 182, "usage_type": "name"}, {"api_name": "flask_login.current_user.id", "line_number": 183, "usage_type": "attribute"}, {"api_name": "flask_login.current_user", "line_number": 183, "usage_type": "name"}, {"api_name": "flask.redirect", "line_number": 189, "usage_type": "call"}, {"api_name": "flask_login.current_user.id", "line_number": 194, "usage_type": "attribute"}, {"api_name": "flask_login.current_user", "line_number": 194, "usage_type": "name"}, {"api_name": "flask.render_template", "line_number": 198, "usage_type": "call"}, {"api_name": "flask.abort", "line_number": 200, "usage_type": "call"}, {"api_name": "flask_login.login_required", "line_number": 174, "usage_type": "name"}, {"api_name": "flask.redirect", "line_number": 211, "usage_type": "call"}, {"api_name": "flask_login.login_required", "line_number": 205, "usage_type": "name"}, {"api_name": "flask.render_template", "line_number": 219, "usage_type": "call"}, {"api_name": "flask_login.login_required", "line_number": 217, "usage_type": "name"}, {"api_name": "random.sample", "line_number": 227, "usage_type": "call"}, {"api_name": "flask.request.method", "line_number": 239, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 239, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 245, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 245, "usage_type": "name"}, {"api_name": "os.path.dirname", "line_number": 247, "usage_type": "call"}, {"api_name": "os.path", "line_number": 247, "usage_type": "attribute"}, {"api_name": "werkzeug.utils.secure_filename", "line_number": 248, "usage_type": "call"}, {"api_name": "os.path.splitext", "line_number": 250, "usage_type": "call"}, {"api_name": "os.path", "line_number": 250, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 254, "usage_type": "call"}, {"api_name": "os.path", "line_number": 254, "usage_type": "attribute"}, {"api_name": "flask.flash", "line_number": 270, "usage_type": "call"}, {"api_name": "flask.redirect", "line_number": 271, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 271, "usage_type": "call"}, {"api_name": "flask.flash", "line_number": 273, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 274, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 276, "usage_type": "call"}, {"api_name": "flask_login.login_required", "line_number": 233, "usage_type": "name"}, {"api_name": "models.ModelUser.ModelUser.get_by_id", "line_number": 280, "usage_type": "call"}, {"api_name": "models.ModelUser.ModelUser", "line_number": 280, "usage_type": "name"}, {"api_name": "flask.render_template", "line_number": 284, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 288, "usage_type": "call"}, {"api_name": "flask.request.method", "line_number": 293, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 293, "usage_type": "name"}, {"api_name": "models.entities.User.User", "line_number": 294, "usage_type": "call"}, {"api_name": "models.ModelUser.ModelUser.login", "line_number": 296, "usage_type": "call"}, {"api_name": "models.ModelUser.ModelUser", "line_number": 296, "usage_type": "name"}, {"api_name": "flask.session", "line_number": 299, "usage_type": "name"}, {"api_name": "flask_login.login_user", "line_number": 300, "usage_type": "call"}, {"api_name": "flask.session", "line_number": 302, "usage_type": "name"}, {"api_name": "flask.redirect", "line_number": 303, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 303, "usage_type": "call"}, {"api_name": "flask.session", "line_number": 304, "usage_type": "name"}, {"api_name": "flask.redirect", "line_number": 305, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 305, "usage_type": "call"}, {"api_name": "flask.session", "line_number": 306, "usage_type": "name"}, {"api_name": "flask.redirect", "line_number": 307, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 307, "usage_type": "call"}, {"api_name": "flask.flash", "line_number": 309, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 310, "usage_type": "call"}, {"api_name": "flask.flash", "line_number": 313, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 314, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 316, "usage_type": "call"}, {"api_name": "flask.session", "line_number": 321, "usage_type": "name"}, {"api_name": "flask.session", "line_number": 322, "usage_type": "name"}, {"api_name": "flask.render_template", "line_number": 326, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 328, "usage_type": "call"}, {"api_name": "flask_login.login_required", "line_number": 319, "usage_type": "name"}, {"api_name": "flask.Response", "line_number": 334, "usage_type": "call"}, {"api_name": "flask.request.method", "line_number": 340, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 340, "usage_type": "name"}, {"api_name": "flask.request.form.get", "line_number": 341, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 341, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 341, "usage_type": "name"}, {"api_name": "cv2.destroyAllWindows", "line_number": 345, "usage_type": "call"}, {"api_name": "cv2.VideoCapture", "line_number": 348, "usage_type": "call"}, {"api_name": "flask.request.method", "line_number": 350, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 350, "usage_type": "name"}, {"api_name": "flask.redirect", "line_number": 351, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 351, "usage_type": "call"}, {"api_name": "flask.redirect", "line_number": 352, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 352, "usage_type": "call"}, {"api_name": "flask.session", "line_number": 358, "usage_type": "name"}, {"api_name": "flask.session", "line_number": 359, "usage_type": "name"}, {"api_name": "flask.render_template", "line_number": 366, "usage_type": "call"}, {"api_name": "flask.redirect", "line_number": 368, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 368, "usage_type": "call"}, {"api_name": "flask.redirect", "line_number": 370, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 370, "usage_type": "call"}, {"api_name": "flask_login.login_required", "line_number": 356, "usage_type": "name"}, {"api_name": "flask.session", "line_number": 380, "usage_type": "name"}, {"api_name": "flask.session", "line_number": 381, "usage_type": "name"}, {"api_name": "flask.request.method", "line_number": 395, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 395, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 396, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 396, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 397, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 397, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 398, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 398, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 399, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 399, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 400, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 400, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 401, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 401, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 402, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 402, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 403, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 403, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 404, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 404, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 405, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 405, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 406, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 406, "usage_type": "name"}, {"api_name": "flask.flash", "line_number": 415, "usage_type": "call"}, {"api_name": "flask.redirect", "line_number": 416, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 420, "usage_type": "call"}, {"api_name": "flask.redirect", "line_number": 422, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 422, "usage_type": "call"}, {"api_name": "flask.redirect", "line_number": 424, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 424, "usage_type": "call"}, {"api_name": "flask_login.login_required", "line_number": 377, "usage_type": "name"}, {"api_name": "flask.request.method", "line_number": 432, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 432, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 433, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 433, "usage_type": "name"}, {"api_name": "flask.request.files", "line_number": 435, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 435, "usage_type": "name"}, {"api_name": "os.path.dirname", "line_number": 437, "usage_type": "call"}, {"api_name": "os.path", "line_number": 437, "usage_type": "attribute"}, {"api_name": "werkzeug.utils.secure_filename", "line_number": 439, "usage_type": "call"}, {"api_name": "os.path.splitext", "line_number": 442, "usage_type": "call"}, {"api_name": "os.path", "line_number": 442, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 447, "usage_type": "call"}, {"api_name": "os.path", "line_number": 447, "usage_type": "attribute"}, {"api_name": "pandas.read_excel", "line_number": 451, "usage_type": "call"}, {"api_name": "flask.flash", "line_number": 472, "usage_type": "call"}, {"api_name": "flask.redirect", "line_number": 473, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 473, "usage_type": "call"}, {"api_name": "flask_login.login_required", "line_number": 428, "usage_type": "name"}, {"api_name": "flask.flash", "line_number": 487, "usage_type": "call"}, {"api_name": "flask.flash", "line_number": 491, "usage_type": "call"}, {"api_name": "flask.flash", "line_number": 495, "usage_type": "call"}, {"api_name": "flask.redirect", "line_number": 498, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 498, "usage_type": "call"}, {"api_name": "flask_login.login_required", "line_number": 477, "usage_type": "name"}, {"api_name": "flask.session", "line_number": 504, "usage_type": "name"}, {"api_name": "flask.session", "line_number": 505, "usage_type": "name"}, {"api_name": "flask.render_template", "line_number": 507, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 509, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 511, "usage_type": "call"}, {"api_name": "flask_login.login_required", "line_number": 502, "usage_type": "name"}, {"api_name": "flask.session", "line_number": 517, "usage_type": "name"}, {"api_name": "flask.session", "line_number": 518, "usage_type": "name"}, {"api_name": "flask.render_template", "line_number": 520, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 522, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 524, "usage_type": "call"}, {"api_name": "flask_login.login_required", "line_number": 515, "usage_type": "name"}, {"api_name": "flask.session", "line_number": 529, "usage_type": "name"}, {"api_name": "flask.session", "line_number": 530, "usage_type": "name"}, {"api_name": "flask.render_template", "line_number": 532, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 534, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 536, "usage_type": "call"}, {"api_name": "flask_login.login_required", "line_number": 527, "usage_type": "name"}, {"api_name": "flask_login.logout_user", "line_number": 540, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 541, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 545, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 549, "usage_type": "call"}, {"api_name": "flask.request.method", "line_number": 571, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 571, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 575, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 575, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 578, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 578, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 582, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 582, "usage_type": "name"}, {"api_name": "flask.flash", "line_number": 589, "usage_type": "call"}, {"api_name": "flask.redirect", "line_number": 590, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 590, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 593, "usage_type": "call"}, {"api_name": "flask.request.method", "line_number": 615, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 615, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 617, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 617, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 618, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 618, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 619, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 619, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 620, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 620, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 621, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 621, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 622, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 622, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 623, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 623, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 624, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 624, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 625, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 625, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 626, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 626, "usage_type": "name"}, {"api_name": "flask.flash", "line_number": 646, "usage_type": "call"}, {"api_name": "flask.redirect", "line_number": 647, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 647, "usage_type": "call"}, {"api_name": "flask.flash", "line_number": 649, "usage_type": "call"}, {"api_name": "flask.redirect", "line_number": 650, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 650, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 651, "usage_type": "call"}, {"api_name": "flask_login.login_required", "line_number": 597, "usage_type": "name"}, {"api_name": "flask.session", "line_number": 676, "usage_type": "name"}, {"api_name": "flask.session", "line_number": 677, "usage_type": "name"}, {"api_name": "flask.render_template", "line_number": 679, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 686, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 688, "usage_type": "call"}, {"api_name": "flask_login.login_required", "line_number": 674, "usage_type": "name"}, {"api_name": "flask.request.method", "line_number": 695, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 695, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 696, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 696, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 697, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 697, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 698, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 698, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 699, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 699, "usage_type": "name"}, {"api_name": "flask.request.files", "line_number": 700, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 700, "usage_type": "name"}, {"api_name": "os.path.dirname", "line_number": 702, "usage_type": "call"}, {"api_name": "os.path", "line_number": 702, "usage_type": "attribute"}, {"api_name": "werkzeug.utils.secure_filename", "line_number": 704, "usage_type": "call"}, {"api_name": "os.path.splitext", "line_number": 706, "usage_type": "call"}, {"api_name": "os.path", "line_number": 706, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 710, "usage_type": "call"}, {"api_name": "os.path", "line_number": 710, "usage_type": "attribute"}, {"api_name": "flask.request.form", "line_number": 712, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 712, "usage_type": "name"}, {"api_name": "flask.redirect", "line_number": 718, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 718, "usage_type": "call"}, {"api_name": "flask.redirect", "line_number": 721, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 721, "usage_type": "call"}, {"api_name": "flask_login.login_required", "line_number": 693, "usage_type": "name"}, {"api_name": "flask.render_template", "line_number": 730, "usage_type": "call"}, {"api_name": "flask_login.login_required", "line_number": 725, "usage_type": "name"}, {"api_name": "flask.render_template", "line_number": 736, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 741, "usage_type": "call"}]}