diff --git "a/1280.jsonl" "b/1280.jsonl" new file mode 100644--- /dev/null +++ "b/1280.jsonl" @@ -0,0 +1,744 @@ +{"seq_id": "8293019957", "text": "import seaborn as sns\nfrom sklearn.manifold import TSNE\nfrom sklearn.decomposition import PCA, FastICA\nimport matplotlib as plt\n\ndef scatter_PCA(X, Y, components, alpha):\n \"\"\"\n Description: Creates PCA scatter plot where X is a numpy array of samples and Y contains the corresponding labels. \n\n Args:\n X -- numpy array (Numpy array of data to be plotted)\n Y -- numpy array (Numpy array with labels for data in X)\n components -- int (Number of features of data in X) \n alpha -- double (From [0.0 - 1.0], level of opacity for the dots on the plot)\n\n \"\"\"\n pca = PCA(n_components=components)\n pca_result = pca.fit_transform(X)\n scatter_plot(pca_result, Y, alpha)\n\n\ndef scatter_ICA(X, Y, components, alpha):\n \"\"\"\n Description: Creates ICA scatter plot where X is a numpy array of samples and Y contains the corresponding labels. \n\n Args:\n X -- numpy array (Numpy array of data to be plotted)\n Y -- numpy array (Numpy array with labels for data in X)\n components -- int (Number of features of data in X) \n alpha -- double (From [0.0 - 1.0], level of opacity for the dots on the plot)\n\n \"\"\"\n ica = FastICA(n_components=components)\n ica_result = ica.fit_transform(X)\n scatter_plot(ica_result, Y, alpha)\n\n\ndef scatter_TSNE(X, Y, components, alpha):\n \"\"\"\n Description: Creates t-SNE scatter plot where X is a numpy array of samples and Y contains the corresponding labels. \n\n Args:\n X -- numpy array (Numpy array of data to be plotted)\n Y -- numpy array (Numpy array with labels for data in X)\n components -- int (Number of features of data in X) \n alpha -- double (From [0.0 - 1.0], level of opacity for the dots on the plot)\n\n \"\"\"\n RS = 20150101\n TSNE_proj = TSNE(random_state=RS, n_components=components).fit_transform(X)\n scatter_plot(TSNE_proj, Y, alpha)\n\n\ndef scatter_plot(result, Y, alpha):\n \"\"\"\n Description: Creates scatter plot from output of PCA, ICA, t-SNE functions \n\n Args:\n result -- numpy array (nshape = (n_samples, n_components) Embedding of the training data in low-dimensional space)\n Y -- numpy array (Numpy array with labels for data in X)\n alpha -- double (From [0.0 - 1.0], level of opacity for the dots on the plot)\n\n \"\"\"\n sns.set_style('darkgrid')\n sns.set_palette('muted')\n sns.set_context(\"notebook\", font_scale=1.5,\n rc={\"lines.linewidth\": 1.25})\n df_subset = {\"1\": [], \"2\": []}\n df_subset['1'] = result[:, 0]\n df_subset['2'] = result[:, 1]\n df_subset['y'] = Y\n plt.pyplot.figure(figsize = (10,10))\n sns.scatterplot(\n x=\"2\", y=\"1\",\n hue=\"y\",\n palette=sns.color_palette(\"hls\", 2),\n data=df_subset,\n legend=\"full\",\n alpha=alpha\n )\n", "repo_name": "googleinterns/sensor-tools", "sub_path": "plotting_util.py", "file_name": "plotting_util.py", "file_ext": "py", "file_size_in_byte": 2889, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "12", "api": [{"api_name": "sklearn.decomposition.PCA", "line_number": 17, "usage_type": "call"}, {"api_name": "sklearn.decomposition.FastICA", "line_number": 33, "usage_type": "call"}, {"api_name": "sklearn.manifold.TSNE", "line_number": 50, "usage_type": "call"}, {"api_name": "seaborn.set_style", "line_number": 64, "usage_type": "call"}, {"api_name": "seaborn.set_palette", "line_number": 65, "usage_type": "call"}, {"api_name": "seaborn.set_context", "line_number": 66, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 72, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 72, "usage_type": "attribute"}, {"api_name": "seaborn.scatterplot", "line_number": 73, "usage_type": "call"}, {"api_name": "seaborn.color_palette", "line_number": 76, "usage_type": "call"}]} +{"seq_id": "72513542101", "text": "import setuptools\n\nwith open(\"README.md\", \"r\", encoding=\"utf-8\") as fh:\n long_description = fh.read()\n\nsetuptools.setup(\n name=\"multianndata\", # Replace with your own username\n version=\"0.0.4\",\n author=\"Yakir Reshef, Laurie Rumker\",\n author_email=\"yreshef@broadinstitute.org\",\n description=\"Multi-sample version of AnnData\",\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n url=\"https://github.com/yakirr/multianndata\",\n packages=setuptools.find_packages(),\n classifiers=[\n \"Programming Language :: Python :: 3\",\n \"License :: OSI Approved :: MIT License\",\n \"Operating System :: OS Independent\",\n ],\n python_requires='>=3.6',\n install_requires=[\n 'anndata',\n 'numpy',\n ],\n)\n", "repo_name": "yakirr/multianndata", "sub_path": "setup.py", "file_name": "setup.py", "file_ext": "py", "file_size_in_byte": 793, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "12", "api": [{"api_name": "setuptools.setup", "line_number": 6, "usage_type": "call"}, {"api_name": "setuptools.find_packages", "line_number": 15, "usage_type": "call"}]} +{"seq_id": "26366476200", "text": "from sqlalchemy import Column, Integer, String\nfrom sqlalchemy.ext.declarative import declarative_base\n\nBase = declarative_base()\n\n\nclass User(Base):\n \"\"\"User contains metadata for a user\"\"\"\n __tablename__ = 'users'\n id = Column(Integer, primary_key=True)\n login = Column(String)\n\n def __init__(self, login):\n self.login = login\n\n def __repr__(self):\n return \"\" % (self.login)\n\n\ndef test():\n\n user1 = User(\"Raph\")\n\n print(user1)\n\n\nif __name__ == '__main__':\n test()\n", "repo_name": "tourfl/Apprendre", "sub_path": "removed/API/User.py", "file_name": "User.py", "file_ext": "py", "file_size_in_byte": 525, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "12", "api": [{"api_name": "sqlalchemy.ext.declarative.declarative_base", "line_number": 4, "usage_type": "call"}, {"api_name": "sqlalchemy.Column", "line_number": 10, "usage_type": "call"}, {"api_name": "sqlalchemy.Integer", "line_number": 10, "usage_type": "argument"}, {"api_name": "sqlalchemy.Column", "line_number": 11, "usage_type": "call"}, {"api_name": "sqlalchemy.String", "line_number": 11, "usage_type": "argument"}]} +{"seq_id": "69816355220", "text": "# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Wed Nov 10 03:51:41 2021\r\n\r\n@author: ADMIN\r\n\"\"\"\r\n\r\nimport streamlit as st \r\nimport numpy as np \r\nimport pandas as pd \r\nimport plotly.express as px \r\nimport matplotlib.pyplot as plt \r\nimport seaborn as sns \r\nfrom sklearn.linear_model import LinearRegression,Lasso,Ridge \r\nfrom sklearn.model_selection import train_test_split \r\nst.title('Interest rate subsidy') \r\nst.text('Check your eligiblity for availing interest rate subsidy') \r\ndf=pd.read_csv('Training Data.csv') \r\n# The following lines create boxes in which user can enter data required to make prediction\r\nage=st.selectbox (\"Age\",range(21,80,1)) \r\nsex = st.radio(\"Select Gender: \", ('male', 'female')) \r\nincome=st.slider(\"Income\",min_value=0,max_value=10000000,step=10000) \r\nworkex = st.selectbox('Work Experience',range(0,20,1)) \r\nmarital=st.radio('Marital Status',('Yes','No')) \r\nown=st.selectbox('Ownership status',(\"not rented/not owned\",\"rented\",\"owned\")) \r\ndefault = st.radio(\"Have you ever defaulted in past: \", ('yes', 'no')) \r\n# User input \r\nw=0\r\no=0\r\na=0\r\ninc=0\r\nd=0\r\nif 0<= workex <=2:\r\n w = 0 \r\nelif 2 8:\r\n st.text('You are eligible for loans at subsidized interest rates')\r\n elif 6<=user_input<=8:\r\n st.text('You are eligible for loans at normal interest rates') \r\n else:\r\n st.text('You are eligible for loans at high interest rates')\r\n \r\n", "repo_name": "swatighiya/Loan", "sub_path": "Loan_App.py", "file_name": "Loan_App.py", "file_ext": "py", "file_size_in_byte": 1965, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "12", "api": [{"api_name": "streamlit.title", "line_number": 16, "usage_type": "call"}, {"api_name": "streamlit.text", "line_number": 17, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 18, "usage_type": "call"}, {"api_name": "streamlit.selectbox", "line_number": 20, "usage_type": "call"}, {"api_name": "streamlit.radio", "line_number": 21, "usage_type": "call"}, {"api_name": "streamlit.slider", "line_number": 22, "usage_type": "call"}, {"api_name": "streamlit.selectbox", "line_number": 23, "usage_type": "call"}, {"api_name": "streamlit.radio", "line_number": 24, "usage_type": "call"}, {"api_name": "streamlit.selectbox", "line_number": 25, "usage_type": "call"}, {"api_name": "streamlit.radio", "line_number": 26, "usage_type": "call"}, {"api_name": "streamlit.text", "line_number": 68, "usage_type": "call"}, {"api_name": "streamlit.button", "line_number": 69, "usage_type": "call"}, {"api_name": "streamlit.text", "line_number": 71, "usage_type": "call"}, {"api_name": "streamlit.text", "line_number": 73, "usage_type": "call"}, {"api_name": "streamlit.text", "line_number": 75, "usage_type": "call"}]} +{"seq_id": "34319458687", "text": "from django.conf.urls import url\nfrom . import views\n\n# gDefine which app this URL will find it's patterns\napp_name = 'ims'\nurlpatterns = [\n # URL for a user to input their own qty into a specific store\n url(r'^stores/(?P[0-9]+)/count', views.storerecount, \\\n name='storerecount'),\n \n # Details for a particular store, including inventory totals\n url(r'^stores/(?P[0-9]+)/', views.storedetail, name='storedetail'),\n \n url(r'^stores/$', views.StoreView.as_view(), name='stores'),\n \n # A Debug URL that shows every StoreItem row with a form\n #DEBUG\n url(r'^storeitemsform/$', views.storeitemsform, name='storeitemsform'),\n \n \n url(r'^managers/update/(?P[0-9]+)/', views.edit_manager, name='account_update'),\n \n # URL for a list of all managers\n url(r'^managers/$', views.ManagerView.as_view(), name='managers'),\n \n \n # Details for a particular Item\n url(r'^items/(?P[0-9]+)/', views.itemdetail, name='itemdetail'),\n \n # URL for a list of all items\n url(r'^items/$', views.ItemView.as_view(), name='items'),\n \n # URL for a list of all storeitems\n url(r'^storeitems/$', views.StoreItemView.as_view(), name='storeitems'),\n \n # Front page of IMS, Shows a login page or the user's Store view as default\n url(r'$', views.StoreView.as_view(), name='home_page'),\n]", "repo_name": "obl1v1us/MySite", "sub_path": "ims/urls.py", "file_name": "urls.py", "file_ext": "py", "file_size_in_byte": 1444, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "12", "api": [{"api_name": "django.conf.urls.url", "line_number": 8, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 12, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 14, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 18, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 21, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 24, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 28, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 31, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 34, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 37, "usage_type": "call"}]} +{"seq_id": "23580335597", "text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\"\"\"Import industry returns.\n\nNotes\n-----\nNever use 'grouped.mean()' in pandas! It leaks memory big time!\n\n\"\"\"\nfrom __future__ import print_function, division\n\nimport os\nimport zipfile\n\nimport pandas as pd\nimport datetime as dt\nimport numpy as np\n\npath = os.getenv(\"HOME\") + '/Dropbox/Research/data/CRSP/data/'\n# __location__ = os.path.realpath(os.path.join(os.getcwd(),\n# os.path.dirname(__file__)))\n# path = os.path.join(__location__, path + 'CRSP/data/')\n\n\ndef convert_dates(string):\n \"\"\"Convert dates from string to Python date format.\n\n \"\"\"\n return dt.datetime.strptime(string, '%d-%m-%Y')\n\n\ndef cum_returns(ret):\n \"\"\"Accumulate returns over time.\n\n \"\"\"\n return np.exp(np.log(1 + ret).sum()) - 1\n\n\ndef import_returns():\n \"\"\"Import raw data.\n\n The file is called industry_returns.zip\n\n Columns:\n DATE : str\n Date in the format 'dd-mm-yyy'\n HSICCD : int\n SIC industry codes\n CUSIP : str\n Firm ID\n PRC : float\n Price\n SHROUT : int\n Shares outstanding\n RETX : float\n Dividend adjusted monthly returns\n\n Typical output:\n Before resampling:\n Date SIC CUSIP Price Shares Return\n 0 1983-01-31 133 06022110 20.250 7074 0.094595\n 1 1983-01-31 174 68417710 2.250 20546 -0.142857\n 2 1983-01-31 179 25660510 9.125 27996 0.028169\n 3 1983-01-31 251 86666510 5.750 614 0.022222\n 4 1983-01-31 752 87831510 9.250 8400 0.088235\n\n After resampling:\n return value\n SIC CUSIP year\n 100 45292410 1995 -23.529461 15334.250\n 1996 -56.982108 15703.750\n 1997 -79.020959 8129.000\n 1998 38.372241 1755.125\n 115 24487820 1988 21.428654 87612.375\n\n \"\"\"\n # Import raw data\n zfile = zipfile.ZipFile(path + 'firm_returns.zip', 'r')\n data = zfile.open(zfile.namelist()[0])\n converters = {'DATE': convert_dates}\n returns = pd.read_csv(data, converters=converters, engine='c')\n # Rename columns\n columns = {'DATE': 'date', 'HSICCD': 'SIC',\n 'PRC': 'price', 'SHROUT': 'shares',\n 'RETX': 'return'}\n returns.rename(columns=columns, inplace=True)\n # Remove incorrect observations\n cond1 = returns['return'] != 'C'\n cond2 = returns['price'] > 0\n cond3 = returns['shares'] > 0\n returns = returns[cond1 & cond2 & cond3]\n # Convert to floats\n returns.loc[:, 'return'] = returns['return'].astype(float)\n\n print(returns.head())\n\n # Resample monthly returns to annual frequency\n returns = resample_returns(returns)\n\n returns.to_hdf(path + 'firm_returns.h5', 'returns')\n\n print(returns.head())\n\n\ndef resample_returns(returns):\n \"\"\"Resample monthly returns to annual frequency.\n\n Typical output:\n return value\n SIC CUSIP year\n 100 45292410 1995 -23.529461 15334.250\n 1996 -56.982108 15703.750\n 1997 -79.020959 8129.000\n 1998 38.372241 1755.125\n 115 24487820 1988 21.428654 87612.375\n\n \"\"\"\n returns.eval('value = shares * price')\n returns.loc[:, 'year'] = returns['date'].apply(lambda x: x.year)\n index = ['SIC', 'CUSIP', 'year']\n returns.set_index(index, inplace=True)\n returns = returns.loc[:, ['return', 'value']]\n returns.sort_index(inplace=True)\n\n grouped = returns.groupby(level=index)\n returns = grouped[['return']].apply(cum_returns)\n returns.loc[:, 'value'] = grouped['value'].first()\n returns.loc[:, 'return'] *= 100\n\n return returns\n\n\ndef load_returns():\n \"\"\"Load data from the disk.\n\n \"\"\"\n return pd.read_hdf(path + 'firm_returns.h5', 'returns')\n\n\nif __name__ == '__main__':\n\n import_returns()\n\n returns = load_returns()\n", "repo_name": "khrapovs/datastorage", "sub_path": "datastorage/crsp.py", "file_name": "crsp.py", "file_ext": "py", "file_size_in_byte": 3880, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "12", "api": [{"api_name": "os.getenv", "line_number": 19, "usage_type": "call"}, {"api_name": "datetime.datetime.strptime", "line_number": 29, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 29, "usage_type": "attribute"}, {"api_name": "numpy.exp", "line_number": 36, "usage_type": "call"}, {"api_name": "numpy.log", "line_number": 36, "usage_type": "call"}, {"api_name": "zipfile.ZipFile", "line_number": 78, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 81, "usage_type": "call"}, {"api_name": "pandas.read_hdf", "line_number": 137, "usage_type": "call"}]} +{"seq_id": "12635981920", "text": "#\n# Freesound is (c) MUSIC TECHNOLOGY GROUP, UNIVERSITAT POMPEU FABRA\n#\n# Freesound is free software: you can redistribute it and/or modify\n# it under the terms of the GNU Affero General Public License as\n# published by the Free Software Foundation, either version 3 of the\n# License, or (at your option) any later version.\n#\n# Freesound is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU Affero General Public License for more details.\n#\n# You should have received a copy of the GNU Affero General Public License\n# along with this program. If not, see .\n#\n# Authors:\n# See AUTHORS file.\n#\n\nfrom django.contrib.auth.decorators import login_required\nfrom django.contrib.contenttypes.models import ContentType\nfrom django.http import HttpResponse\nfrom ratings.models import Rating\nfrom utils.cache import invalidate_template_cache\n\n@login_required\ndef add(request, content_type_id, object_id, rating):\n rating = int(rating)\n if rating in range(1,6):\n # in order to keep the ratings compatible with freesound 1, we multiply by two...\n rating = rating*2\n content_type = ContentType.objects.get(id=content_type_id)\n try:\n rating_object = Rating.objects.get(user=request.user, object_id=object_id, content_type=content_type)\n rating_object.rating = rating;\n rating_object.save()\n except Rating.DoesNotExist: #@UndefinedVariable\n rating_object = Rating.objects.create(user=request.user, object_id=object_id, content_type=content_type, rating=rating)\n # make sure the rating is seen on the next page load by invalidating the cache for it.\n ct = ContentType.objects.get(id=content_type_id)\n if ct.name == 'sound':\n # invalidate for logged in/not logged in, only for 'OK' sounds\n invalidate_template_cache(\"sound_header\", object_id, True)\n invalidate_template_cache(\"sound_header\", object_id, False)\n invalidate_template_cache(\"display_sound\", object_id, True, 'OK')\n invalidate_template_cache(\"display_sound\", object_id, False, 'OK')\n # if you want to invalidate some other caches for other content types add them here\n return HttpResponse(Rating.objects.filter(object_id=object_id, content_type=content_type).count())\n", "repo_name": "djzikario/freesound", "sub_path": "ratings/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 2455, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "github-code", "pt": "12", "api": [{"api_name": "django.contrib.contenttypes.models.ContentType.objects.get", "line_number": 33, "usage_type": "call"}, {"api_name": "django.contrib.contenttypes.models.ContentType.objects", "line_number": 33, "usage_type": "attribute"}, {"api_name": "django.contrib.contenttypes.models.ContentType", "line_number": 33, "usage_type": "name"}, {"api_name": "ratings.models.Rating.objects.get", "line_number": 35, "usage_type": "call"}, {"api_name": "ratings.models.Rating.objects", "line_number": 35, "usage_type": "attribute"}, {"api_name": "ratings.models.Rating", "line_number": 35, "usage_type": "name"}, {"api_name": "ratings.models.Rating.DoesNotExist", "line_number": 38, "usage_type": "attribute"}, {"api_name": "ratings.models.Rating", "line_number": 38, "usage_type": "name"}, {"api_name": "ratings.models.Rating.objects.create", "line_number": 39, "usage_type": "call"}, {"api_name": "ratings.models.Rating.objects", "line_number": 39, "usage_type": "attribute"}, {"api_name": "ratings.models.Rating", "line_number": 39, "usage_type": "name"}, {"api_name": "django.contrib.contenttypes.models.ContentType.objects.get", "line_number": 41, "usage_type": "call"}, {"api_name": "django.contrib.contenttypes.models.ContentType.objects", "line_number": 41, "usage_type": "attribute"}, {"api_name": "django.contrib.contenttypes.models.ContentType", "line_number": 41, "usage_type": "name"}, {"api_name": "utils.cache.invalidate_template_cache", "line_number": 44, "usage_type": "call"}, {"api_name": "utils.cache.invalidate_template_cache", "line_number": 45, "usage_type": "call"}, {"api_name": "utils.cache.invalidate_template_cache", "line_number": 46, "usage_type": "call"}, {"api_name": "utils.cache.invalidate_template_cache", "line_number": 47, "usage_type": "call"}, {"api_name": "django.http.HttpResponse", "line_number": 49, "usage_type": "call"}, {"api_name": "ratings.models.Rating.objects.filter", "line_number": 49, "usage_type": "call"}, {"api_name": "ratings.models.Rating.objects", "line_number": 49, "usage_type": "attribute"}, {"api_name": "ratings.models.Rating", "line_number": 49, "usage_type": "name"}, {"api_name": "django.contrib.auth.decorators.login_required", "line_number": 27, "usage_type": "name"}]} +{"seq_id": "5966533025", "text": "import bench\nimport argparse\nimport warnings\nwarnings.simplefilter(action='ignore', category=FutureWarning)\n\n\ndef main():\n from sklearn.manifold import TSNE\n\n # Load and convert data\n X, _, _, _ = bench.load_data(params)\n\n # Create our TSNE model\n tsne = TSNE(n_components=params.n_components, early_exaggeration=params.early_exaggeration,\n learning_rate=params.learning_rate, angle=params.angle,\n min_grad_norm=params.min_grad_norm, random_state=params.random_state)\n\n fit_time, _ = bench.measure_function_time(tsne.fit, X, params=params)\n divergence = tsne.kl_divergence_\n\n bench.print_output(\n library='sklearn',\n algorithm='TSNE',\n stages=['training'],\n params=params,\n functions=['TSNE.fit'],\n times=[fit_time],\n metric_type='divergence',\n metrics=[divergence],\n data=[X],\n alg_instance=tsne,\n )\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(description='scikit-learn tsne '\n 'regression benchmark')\n\n parser.add_argument('--n-components', type=int, default=2,\n help='The dimension of the embedded space.')\n parser.add_argument('--early-exaggeration', type=float, default=12.0,\n help='This factor increases the attractive forces between points '\n 'and allows points to move around more freely, '\n 'finding their nearest neighbors more easily.')\n parser.add_argument('--learning-rate', type=float, default=200.0,\n help='The learning rate for t-SNE is usually in the range [10.0, 1000.0].')\n parser.add_argument('--angle', type=float, default=0.5,\n help='Angular size. This is the trade-off between speed and accuracy.')\n parser.add_argument('--min-grad-norm', type=float, default=1e-7,\n help='If the gradient norm is below this threshold,'\n 'the optimization is stopped.')\n parser.add_argument('--random-state', type=int, default=1234)\n\n params = bench.parse_args(parser)\n bench.run_with_context(params, main)\n", "repo_name": "IntelPython/scikit-learn_bench", "sub_path": "sklearn_bench/tsne.py", "file_name": "tsne.py", "file_ext": "py", "file_size_in_byte": 2205, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 102, "dataset": "github-code", "pt": "12", "api": [{"api_name": "warnings.simplefilter", "line_number": 4, "usage_type": "call"}, {"api_name": "bench.load_data", "line_number": 11, "usage_type": "call"}, {"api_name": "sklearn.manifold.TSNE", "line_number": 14, "usage_type": "call"}, {"api_name": "bench.measure_function_time", "line_number": 18, "usage_type": "call"}, {"api_name": "bench.print_output", "line_number": 21, "usage_type": "call"}, {"api_name": "argparse.ArgumentParser", "line_number": 36, "usage_type": "call"}, {"api_name": "bench.parse_args", "line_number": 54, "usage_type": "call"}, {"api_name": "bench.run_with_context", "line_number": 55, "usage_type": "call"}]} +{"seq_id": "28015481345", "text": "from rest_framework import serializers\nfrom rest_framework.exceptions import PermissionDenied\nfrom rest_framework.relations import StringRelatedField\n\nfrom building.serializers import BuildingSerializer\nfrom building_post.models import BuildingPost, BuildingPostHistory\n\n\nclass BuildingPostSerializer(serializers.ModelSerializer):\n creator = StringRelatedField()\n\n class Meta:\n model = BuildingPost\n fields = (\n 'building',\n 'creator',\n 'title',\n 'content',\n )\n\n def update(self, instance, validated_data):\n if self.context['request'].user != instance.creator:\n raise PermissionDenied()\n new_instance = super(BuildingPostSerializer, self).update(instance, validated_data)\n BuildingPostHistory.objects.create(\n building_post=instance,\n building=instance.building,\n creator=instance.creator,\n title=instance.title,\n content=instance.content\n )\n return new_instance\n\n\nclass BuildingPostReadSerializer(BuildingPostSerializer):\n building = BuildingSerializer(many=False)\n\n class Meta(BuildingPostSerializer.Meta):\n fields = (\n 'id',\n 'building',\n 'creator',\n 'title',\n 'content',\n 'is_enabled',\n 'created',\n 'updated',\n )\n", "repo_name": "trowa88/commstr", "sub_path": "building_post/serializers.py", "file_name": "serializers.py", "file_ext": "py", "file_size_in_byte": 1400, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "12", "api": [{"api_name": "rest_framework.serializers.ModelSerializer", "line_number": 9, "usage_type": "attribute"}, {"api_name": "rest_framework.serializers", "line_number": 9, "usage_type": "name"}, {"api_name": "rest_framework.relations.StringRelatedField", "line_number": 10, "usage_type": "call"}, {"api_name": "building_post.models.BuildingPost", "line_number": 13, "usage_type": "name"}, {"api_name": "rest_framework.exceptions.PermissionDenied", "line_number": 23, "usage_type": "call"}, {"api_name": "building_post.models.BuildingPostHistory.objects.create", "line_number": 25, "usage_type": "call"}, {"api_name": "building_post.models.BuildingPostHistory.objects", "line_number": 25, "usage_type": "attribute"}, {"api_name": "building_post.models.BuildingPostHistory", "line_number": 25, "usage_type": "name"}, {"api_name": "building.serializers", "line_number": 36, "usage_type": "name"}, {"api_name": "building.serializers.BuildingSerializer", "line_number": 36, "usage_type": "call"}]} +{"seq_id": "35329370595", "text": "import numpy as np\r\nimport time\r\nfrom scipy import interp\r\nfrom sklearn.metrics import roc_curve\r\nfrom sklearn.model_selection import RandomizedSearchCV\r\nfrom sklearn.ensemble import RandomForestClassifier\r\nfrom sklearn.linear_model import LassoCV, LogisticRegression, ElasticNetCV\r\nfrom sklearn.feature_selection import RFECV, SelectFromModel\r\nfrom MLPipe.measures import Measures\r\n\r\nclass LR_Pipeline:\r\n\r\n def __init__(self, x_train=None, y_train=None, x_test=None, y_test=None, itera=None, cv=None, mean_tprr=None, select_feats_logit=False, T=0.1, method='rfc', run=False):\r\n self.measure = Measures(run)\r\n if run:\r\n self.run(x_train, y_train, x_test, y_test, itera, cv, mean_tprr, select_feats_logit=False, T=0.1, method='rfc')\r\n\r\n else:\r\n self.name = 'NONE'\r\n self.clf = 0\r\n\r\n def run_grid(self, x_train, y_train, x_test, y_test, itera, cv, mean_tprr, select_feats_logit=False, T=0.1, method='rfc'):\r\n \r\n self.run = True\r\n self.name = 'LR'\r\n self.measure.run = True\r\n\r\n feats = np.ones(x_train.shape[1])\r\n if select_feats_logit:\r\n feats = self.Feature_Selection(x_train, y_train, T, method, cv)\r\n print(\"Features Selected\", sum(feats))\r\n x_train = x_train[:, feats]\r\n x_test = x_test[:, feats]\r\n\r\n self.clf = self.TestLogistic(x_train, y_train, x_test, y_test, itera, feats)\r\n print(\"Done testing - LR\")\r\n\r\n def Feature_Selection(self, X, y, T, method, cv):\r\n \"\"\"\r\n This functions returns only the features selected by the method using the threshold selected.\r\n We advise to run this function with several thresholds and look for the best, put this function inside a loop and see how it goes\r\n Suggestions for the range of t, thresholds = np.linspace(0.00001, 0.1, num=10)\r\n Input: \r\n X=training set\r\n y=training labels\r\n T=threshold selected\r\n which method= 'rfc', 'lasso', 'elastik'\r\n cv= number of cross validation iterations\r\n Output:\r\n Boolean array with the selected features,with this you can X=X[feats] to select only the relevant features\r\n \"\"\"\r\n alphagrid = np.linspace(0.001, 0.99, num=cv)\r\n\r\n clf = {\r\n 'rfc': RandomForestClassifier(),\r\n 'lasso': LassoCV(), # alphas=alphagrid),\r\n 'elastik': ElasticNetCV(alphas=alphagrid),\r\n 'backward': RFECV(LogisticRegression(), cv=cv, n_jobs=-2)\r\n\r\n }[method]\r\n if method == 'backward':\r\n clf = clf.fit(X, y)\r\n feats = clf.support_\r\n else:\r\n clf.fit(X, y)\r\n sfm = SelectFromModel(clf) # , threshold=T)\r\n print(X.shape)\r\n sfm.fit(X, y)\r\n feats = sfm.get_support()\r\n\r\n return(feats)\r\n\r\n def TestLogistic(self, X_train, Y_train, X_test, Y_test, itera, feats):\r\n\r\n clf = LogisticRegression(C=100000, solver=\"liblinear\")\r\n\r\n clf.fit(X_train, Y_train)\r\n\r\n preds = clf.predict(X_test)\r\n probas = clf.predict_proba(X_test)[:, 1]\r\n\r\n odds = np.exp(clf.coef_)\r\n feats = np.array(feats, dtype='float64')\r\n pos = 0\r\n for i in range(0, feats.shape[0]):\r\n if feats[i] == 1:\r\n feats[i] = odds[0, pos]\r\n # print(odds[0,pos])\r\n pos = pos + 1\r\n # print(feats)\r\n self.measure.feat_imp.append(feats)\r\n # print(\"classes\", clf.classes_)\r\n # name=('Models/RFC'+str(itera)+'.pkl')\r\n # joblib.dump(clf,name)\r\n\r\n self.measure.calculate(Y_test, preds, probas)\r\n\r\n return clf\r\n", "repo_name": "rriccilopes/MLPipe", "sub_path": "LR.py", "file_name": "LR.py", "file_ext": "py", "file_size_in_byte": 3707, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "12", "api": [{"api_name": "MLPipe.measures.Measures", "line_number": 14, "usage_type": "call"}, {"api_name": "numpy.ones", "line_number": 28, "usage_type": "call"}, {"api_name": "numpy.linspace", "line_number": 52, "usage_type": "call"}, {"api_name": "sklearn.ensemble.RandomForestClassifier", "line_number": 55, "usage_type": "call"}, {"api_name": "sklearn.linear_model.LassoCV", "line_number": 56, "usage_type": "call"}, {"api_name": "sklearn.linear_model.ElasticNetCV", "line_number": 57, "usage_type": "call"}, {"api_name": "sklearn.feature_selection.RFECV", "line_number": 58, "usage_type": "call"}, {"api_name": "sklearn.linear_model.LogisticRegression", "line_number": 58, "usage_type": "call"}, {"api_name": "sklearn.feature_selection.SelectFromModel", "line_number": 66, "usage_type": "call"}, {"api_name": "sklearn.linear_model.LogisticRegression", "line_number": 75, "usage_type": "call"}, {"api_name": "numpy.exp", "line_number": 82, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 83, "usage_type": "call"}]} +{"seq_id": "22664030228", "text": "import subprocess\n\nfrom mock import MagicMock\n\n\n# The process mock can be retrieved by calling PopenMock().mock\nclass PopenMock:\n def __init__(\n self,\n return_code=0,\n poll_result=0,\n communicate_return_value=None,\n communicate_side_effect=None,\n kill_side_effect=None,\n ):\n self.return_code = return_code\n self.poll_result = poll_result\n self.communicate_return_value = communicate_return_value\n self.communicate_side_effect = communicate_side_effect\n self.kill_side_effect = kill_side_effect\n self.mock = self._create_mock()\n\n def _create_mock(self):\n popen_mock = MagicMock()\n if self.communicate_return_value:\n popen_mock.communicate.return_value = self.communicate_return_value\n elif self.communicate_side_effect:\n popen_mock.communicate.side_effect = self.communicate_side_effect\n if self.kill_side_effect:\n popen_mock.kill.side_effect = self.kill_side_effect\n popen_mock.returncode = self.return_code\n popen_mock.poll.return_value = self.poll_result\n return popen_mock\n\n\nDEFAULT_RETRYABLE_FAILURE_POPEN = PopenMock(\n return_code=1,\n poll_result=1,\n communicate_return_value=(b\"\", b\"mount.nfs4: Connection reset by peer\"),\n)\nDEFAULT_NON_RETRYABLE_FAILURE_POPEN = PopenMock(\n return_code=1,\n poll_result=1,\n communicate_return_value=(\n b\"\",\n b\"mount.nfs4: access denied by server while mounting 127.0.0.1:/\",\n ),\n)\nDEFAULT_SUCCESS_POPEN = PopenMock(communicate_return_value=(b\"\", b\"\"))\nDEFAULT_TIMEOUT_POPEN = PopenMock(\n return_code=1,\n poll_result=1,\n communicate_side_effect=subprocess.TimeoutExpired(\"cmd\", timeout=1),\n)\nDEFAULT_UNKNOWN_EXCEPTION_POPEN = PopenMock(\n return_code=1, poll_result=1, communicate_side_effect=Exception(\"Unknown error\")\n)\n", "repo_name": "aws/efs-utils", "sub_path": "test/common.py", "file_name": "common.py", "file_ext": "py", "file_size_in_byte": 1894, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 240, "dataset": "github-code", "pt": "12", "api": [{"api_name": "mock.MagicMock", "line_number": 24, "usage_type": "call"}, {"api_name": "subprocess.TimeoutExpired", "line_number": 53, "usage_type": "call"}]} +{"seq_id": "25968420844", "text": "# encoding=utf-8\r\n\r\nimport time\r\n\r\nimport multiprocessing as mp\r\nimport threading as td\r\n\r\n\r\ncount = 1000000\r\n\r\ndef job(q, name):\r\n t = time.time()\r\n res = 0\r\n for i in range(count):\r\n res += i + i ** 2 + i ** 3\r\n q.put(res)\r\n t2 = time.time()\r\n print(\"%s - %s\" % (name, str(t2 - t)))\r\n\r\n\r\ndef multicore():\r\n q = mp.Queue()\r\n p1 = mp.Process(target=job, args=(q, \"multicore-1\"))\r\n p2 = mp.Process(target=job, args=(q, \"multicore-2\"))\r\n p1.start()\r\n p2.start()\r\n p1.join()\r\n p2.join()\r\n res1 = q.get()\r\n res2 = q.get()\r\n print('multicore:', res1 + res2)\r\n\r\ndef multithread():\r\n q = mp.Queue()\r\n t1 = td.Thread(target=job, args=(q, \"multithread-1\"))\r\n t2 = td.Thread(target=job, args=(q, \"multithread-2\"))\r\n t1.start()\r\n t2.start()\r\n t1.join()\r\n t2.join()\r\n res1 = q.get()\r\n res2 = q.get()\r\n print('multithread:', res1 + res2)\r\n \r\n\r\ndef normal():\r\n res = 0\r\n for _ in range(2):\r\n for i in range(count):\r\n res += i + i ** 2 + i ** 3\r\n print('normal:', res)\r\n\r\n\r\n\r\nif __name__ == '__main__':\r\n st = time.time()\r\n normal()\r\n st1 = time.time()\r\n print('normal time:', st1 - st)\r\n multithread()\r\n st2 = time.time()\r\n print('multithread time:', st2 - st1)\r\n multicore()\r\n print('multicore time:', time.time() - st2)\r\n\r\n", "repo_name": "liangrengongzuoshi/pythonDemo", "sub_path": "com/thread/thread_test.py", "file_name": "thread_test.py", "file_ext": "py", "file_size_in_byte": 1353, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "12", "api": [{"api_name": "time.time", "line_number": 12, "usage_type": "call"}, {"api_name": "time.time", "line_number": 17, "usage_type": "call"}, {"api_name": "multiprocessing.Queue", "line_number": 22, "usage_type": "call"}, {"api_name": "multiprocessing.Process", "line_number": 23, "usage_type": "call"}, {"api_name": "multiprocessing.Process", "line_number": 24, "usage_type": "call"}, {"api_name": "multiprocessing.Queue", "line_number": 34, "usage_type": "call"}, {"api_name": "threading.Thread", "line_number": 35, "usage_type": "call"}, {"api_name": "threading.Thread", "line_number": 36, "usage_type": "call"}, {"api_name": "time.time", "line_number": 56, "usage_type": "call"}, {"api_name": "time.time", "line_number": 58, "usage_type": "call"}, {"api_name": "time.time", "line_number": 61, "usage_type": "call"}, {"api_name": "time.time", "line_number": 64, "usage_type": "call"}]} +{"seq_id": "34266527551", "text": "import numpy as np\nimport torch\nimport torch.nn.functional as F\nfrom torch import nn, optim\n\nimport kornia\n\n\nclass MyHomography(nn.Module):\n def __init__(self, init_homo: torch.Tensor) -> None:\n super().__init__()\n self.homo = nn.Parameter(init_homo.clone().detach())\n\n def forward(self) -> torch.Tensor:\n return torch.unsqueeze(self.homo, dim=0)\n\n\nclass TestWarping:\n # optimization\n lr = 1e-3\n num_iterations = 100\n\n def test_smoke(self, device):\n img_src_t: torch.Tensor = torch.rand(1, 3, 120, 120).to(device)\n img_dst_t: torch.Tensor = torch.rand(1, 3, 120, 120).to(device)\n\n init_homo: torch.Tensor = torch.from_numpy(\n np.array([[0.0415, 1.2731, -1.1731], [-0.9094, 0.5072, 0.4272], [0.0762, 1.3981, 1.0646]])\n ).float()\n\n height, width = img_dst_t.shape[-2:]\n warper = kornia.geometry.transform.HomographyWarper(height, width)\n dst_homo_src = MyHomography(init_homo=init_homo).to(device)\n\n learning_rate = self.lr\n optimizer = optim.Adam(dst_homo_src.parameters(), lr=learning_rate)\n\n for _ in range(self.num_iterations):\n # warp the reference image to the destiny with current homography\n img_src_to_dst = warper(img_src_t, dst_homo_src())\n\n # compute the photometric loss\n loss = F.l1_loss(img_src_to_dst, img_dst_t)\n\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n\n assert not bool(torch.isnan(dst_homo_src.homo.grad).any())\n", "repo_name": "kornia/kornia", "sub_path": "test/integration/test_warp.py", "file_name": "test_warp.py", "file_ext": "py", "file_size_in_byte": 1555, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 8834, "dataset": "github-code", "pt": "12", "api": [{"api_name": "torch.nn.Module", "line_number": 9, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 9, "usage_type": "name"}, {"api_name": "torch.Tensor", "line_number": 10, "usage_type": "attribute"}, {"api_name": "torch.nn.Parameter", "line_number": 12, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 12, "usage_type": "name"}, {"api_name": "torch.unsqueeze", "line_number": 15, "usage_type": "call"}, {"api_name": "torch.Tensor", "line_number": 14, "usage_type": "attribute"}, {"api_name": "torch.Tensor", "line_number": 24, "usage_type": "attribute"}, {"api_name": "torch.rand", "line_number": 24, "usage_type": "call"}, {"api_name": "torch.Tensor", "line_number": 25, "usage_type": "attribute"}, {"api_name": "torch.rand", "line_number": 25, "usage_type": "call"}, {"api_name": "torch.Tensor", "line_number": 27, "usage_type": "attribute"}, {"api_name": "torch.from_numpy", "line_number": 27, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 28, "usage_type": "call"}, {"api_name": "kornia.geometry.transform.HomographyWarper", "line_number": 32, "usage_type": "call"}, {"api_name": "kornia.geometry", "line_number": 32, "usage_type": "attribute"}, {"api_name": "torch.optim.Adam", "line_number": 36, "usage_type": "call"}, {"api_name": "torch.optim", "line_number": 36, "usage_type": "name"}, {"api_name": "torch.nn.functional.l1_loss", "line_number": 43, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 43, "usage_type": "name"}, {"api_name": "torch.isnan", "line_number": 49, "usage_type": "call"}]} +{"seq_id": "31468330204", "text": "# import libraries\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport pandas as pd\nfrom sklearn.ensemble import RandomForestRegressor\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.linear_model import LinearRegression\nfrom sklearn.metrics import r2_score\n\n# Importing the dataset\nfrom sklearn.preprocessing import PolynomialFeatures, StandardScaler\nfrom sklearn.svm import SVR\nfrom sklearn.tree import DecisionTreeRegressor\n\ndataset = pd.read_csv(\"Data.csv\")\nx = dataset.iloc[:, :-1].values\ny = dataset.iloc[:, -1].values\n\n# Splitting the dataset into the Training set and Test set\nx_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.2, random_state=0)\n\n# Training the Simple Linear Regression model on the Training set\nlinear_regressor = LinearRegression()\nlinear_regressor.fit(x_train, y_train)\n\n# Predicting the Test set results\ny_pred = linear_regressor.predict(x_test)\n\nlr = r2_score(y_test, y_pred)\nprint('R2 for linear regression')\nprint(lr)\n\n# Training the Polynomial Regression model on the whole dataset\npoly_reg = PolynomialFeatures(degree=4)\nx_poly = poly_reg.fit_transform(x_train)\nlin_reg_2 = LinearRegression()\nlin_reg_2.fit(x_poly,y_train)\n\nx_test_poly = poly_reg.transform(x_test)\ny_test_pred_poly = lin_reg_2.predict(x_test_poly)\npr = r2_score(y_test, y_test_pred_poly)\nprint('R2 for polynomial regression')\nprint(pr)\n\n\n# support vector\ny1 = y.reshape(len(y), 1)\nx_train1, x_test1, y_train1, y_test1 = train_test_split(x, y1, test_size=0.2, random_state=0)\n\nsc_x = StandardScaler()\nxt = sc_x.fit_transform(x_train1)\nsc_y = StandardScaler()\nyt = sc_y.fit_transform(y_train1)\nsvr_regressor = SVR(kernel='rbf')\nsvr_regressor.fit(xt, yt)\n\n\nx1t = sc_x.transform(x_test1)\ny1t = svr_regressor.predict(x1t)\ny_pred_svr = sc_y.inverse_transform(y1t)\nsvr_r2 = r2_score(y_test1, y_pred_svr)\nprint('R2 for support vector')\nprint(svr_r2)\n\n\n#decision tree\nregressor = DecisionTreeRegressor(random_state=0)\nregressor.fit(x_train, y_train)\n\n\ny_pred_dcn = regressor.predict(x_test)\ndcn_r2 = r2_score(y_test, y_pred_dcn)\nprint('R2 for decision tree')\nprint(dcn_r2)\n\n# random forest\nregressor = RandomForestRegressor(n_estimators=10, random_state=0)\nregressor.fit(x_train, y_train)\n\n\ny_pred_rndf = regressor.predict(x_test)\nrndf_r2 = r2_score(y_test, y_pred_rndf)\nprint('R2 for random forest')\nprint(rndf_r2)\n", "repo_name": "vnc-edu/machine-learning", "sub_path": "Regression/AllAtOnce/power.py", "file_name": "power.py", "file_ext": "py", "file_size_in_byte": 2352, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "12", "api": [{"api_name": "pandas.read_csv", "line_number": 15, "usage_type": "call"}, {"api_name": "sklearn.model_selection.train_test_split", "line_number": 20, "usage_type": "call"}, {"api_name": "sklearn.linear_model.LinearRegression", "line_number": 23, "usage_type": "call"}, {"api_name": "sklearn.metrics.r2_score", "line_number": 29, "usage_type": "call"}, {"api_name": "sklearn.preprocessing.PolynomialFeatures", "line_number": 34, "usage_type": "call"}, {"api_name": "sklearn.linear_model.LinearRegression", "line_number": 36, "usage_type": "call"}, {"api_name": "sklearn.metrics.r2_score", "line_number": 41, "usage_type": "call"}, {"api_name": "sklearn.model_selection.train_test_split", "line_number": 48, "usage_type": "call"}, {"api_name": "sklearn.preprocessing.StandardScaler", "line_number": 50, "usage_type": "call"}, {"api_name": "sklearn.preprocessing.StandardScaler", "line_number": 52, "usage_type": "call"}, {"api_name": "sklearn.svm.SVR", "line_number": 54, "usage_type": "call"}, {"api_name": "sklearn.metrics.r2_score", "line_number": 61, "usage_type": "call"}, {"api_name": "sklearn.tree.DecisionTreeRegressor", "line_number": 67, "usage_type": "call"}, {"api_name": "sklearn.metrics.r2_score", "line_number": 72, "usage_type": "call"}, {"api_name": "sklearn.ensemble.RandomForestRegressor", "line_number": 77, "usage_type": "call"}, {"api_name": "sklearn.metrics.r2_score", "line_number": 82, "usage_type": "call"}]} +{"seq_id": "40566779247", "text": "from collections import defaultdict\ndef solution(s):\n a=defaultdict(list)\n answer = []\n for idx in range(len(s)):\n if len(a[s[idx]])==0:\n answer.append(-1)\n a[s[idx]].append(idx)\n continue\n a[s[idx]].append(idx)\n answer.append(idx-a[s[idx]][-2])\n return answer", "repo_name": "gudals-kim/Studyroom", "sub_path": "프로그래머스/unrated/142086. 가장 가까운 같은 글자/가장 가까운 같은 글자.py", "file_name": "가장 가까운 같은 글자.py", "file_ext": "py", "file_size_in_byte": 326, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "12", "api": [{"api_name": "collections.defaultdict", "line_number": 3, "usage_type": "call"}]} +{"seq_id": "71107620822", "text": "from dbpool import DBPool\nfrom genutils import *\nfrom uv_decorators import *\nfrom config import UVConfig\nimport time\nimport re\n\n@singleton\nclass UVNormalizer:\n def __init__(self):\n self.init()\n \n def reload(self):\n self.init()\n\n def init(self):\n self.db_name = UVConfig().get_config_value(\"database\",\"db_name.core\") \n self.rowcount, self.normalize_rules = DBPool().execute_query(\"select id, in_pattern, out_pattern, telco_id, channel, remarks from tb_number_normalizer order by id desc\", self.db_name)\n\n logging.info(\"Normalized rules in search order top to bottom\")\n logging.info(\"id\tin_pattern\tout_pattern\ttelco_id\tchannel\t\tremarks\")\n logging.info(\"-\" * 70)\n for l_row in self.normalize_rules:\n logging.info(\"{0}\\t{1}\\t{2}\\t{3}\\t{4}\\t{5}\".format(l_row['id'], l_row['in_pattern'], l_row['out_pattern'], l_row['telco_id'], l_row['channel'], l_row['remarks']))\n\n def normalize(self, p_msisdn, p_telco_id = \".*\", p_channel = \".*\"):\n logging.debug(\"params - p_msisdn {0}, p_telco_id {1}, p_channel {2}\".format(p_msisdn, p_telco_id, p_channel))\n for l_row in self.normalize_rules:\n if( (None != re.match(l_row['in_pattern'], p_msisdn)) and (None != re.match(l_row['telco_id'], p_telco_id)) and (None != re.match(l_row['channel'], p_channel)) ):\n l_norm_msisdn = re.sub(l_row['in_pattern'], l_row['out_pattern'], p_msisdn)\n logging.info(\"Matchfound. p_msisdn = {0}, l_norm_msisdn = {1}, id = {2}, in_pattern = {3}, out_pattern = {4}, telco_id = {5}, channel = {6}, p_telco_id = {7}. p_channel = {8}\".format(p_msisdn, l_norm_msisdn, l_row['id'], l_row['in_pattern'], l_row['out_pattern'], l_row['telco_id'], l_row['channel'], p_telco_id, p_channel) )\n return True, l_norm_msisdn\n\n #End of for loop. No match found. So return False\n logging.warn(\"No normalizer match not found. p_msisdn = {0}, p_telco_id = {1}. p_channel = {2}\".format(p_msisdn, p_telco_id, p_channel) )\n return False, p_msisdn\n\n#Run unit tests\nif __name__ == \"__main__\":\n init_logging(\"voiceapp.log\")\n conf = UVConfig()\n conf.init(\"/root/ucp/ucp/conf/ucp.conf\")\n\n l_normalizer = UVNormalizer()\n l_found, l_result = l_normalizer.normalize(\"9886161856\")\n l_found, l_result = l_normalizer.normalize(\"9886161856\", p_telco_id = \"91.*\")\n\n\n", "repo_name": "govardhan/ucp_beta", "sub_path": "ucp/core/number_normalize.py", "file_name": "number_normalize.py", "file_ext": "py", "file_size_in_byte": 2276, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "12", "api": [{"api_name": "config.UVConfig", "line_number": 17, "usage_type": "call"}, {"api_name": "dbpool.DBPool", "line_number": 18, "usage_type": "call"}, {"api_name": "re.match", "line_number": 29, "usage_type": "call"}, {"api_name": "re.sub", "line_number": 30, "usage_type": "call"}, {"api_name": "config.UVConfig", "line_number": 41, "usage_type": "call"}]} +{"seq_id": "70589647383", "text": "import gym\nimport math\nimport numpy as np\n# import Expected_Sarsa as Agent\nimport Dyna_Q_plus as Agent\n\nnum_episodes = 500\nbuckets=(1, 1, 6, 12,)\nagent_info = {\"num_actions\": 2, \n \"num_states\": buckets, \n \"epsilon\": 0.1, \n \"step_size\": 0.01, \n \"discount\": 1.0,\n \"kappa\": 0.001,\n \"planning_steps\": 5,\n \"random_seed\": 0,\n \"planning_random_seed\": 0}\n\n# agent = Agent.ExpectedSarsaAgent()\nagent = Agent.DynaQPlusAgent()\n\ndef discretize(obs, env):\n upper_bounds = [env.observation_space.high[0], 0.5, env.observation_space.high[2], math.radians(50)]\n lower_bounds = [env.observation_space.low[0], -0.5, env.observation_space.low[2], -math.radians(50)]\n ratios = [(obs[i] + abs(lower_bounds[i])) / (upper_bounds[i] - lower_bounds[i]) for i in range(len(obs))]\n new_obs = [int(round((buckets[i] - 1) * ratios[i])) for i in range(len(obs))]\n new_obs = [min(buckets[i] - 1, max(0, new_obs[i])) for i in range(len(obs))]\n return tuple(new_obs)\n\nif __name__ == \"__main__\":\n # step_size = [0.01, 0.05, 0.1, 0.5]\n planning_steps = [0, 5, 10, 50]\n\n for step in planning_steps:\n agent_info[\"planning_steps\"] = step\n agent.agent_init(agent_info)\n env = gym.make(\"CartPole-v1\")\n Rewards = []\n \n for ep in range(num_episodes):\n total_rewards = 0\n last_state = discretize(env.reset(), env)\n done = False\n last_action = agent.agent_start(last_state)\n\n agent.epsilon = agent.get_epsilon(ep)\n agent.step_size = agent.get_alpha(ep)\n \n count_steps = 0\n while not done:\n count_steps += 1\n obs, reward, done, _ = env.step(last_action)\n total_rewards += reward\n last_state = discretize(obs, env)\n last_action = agent.agent_step(reward, last_state)\n \n print(\"Episode: {} with {} planning step(s) Total reward: {}\".format(ep, agent.planning_steps, total_rewards))\n Rewards.append(total_rewards)\n \n np.save(\"./DynaQ_plus_results/step_size_0.01/adaptive/planning_step_{}\".format(agent.planning_steps), Rewards)\n ", "repo_name": "yanshuolee/RL-implementation", "sub_path": "Cartpole/cartpole_game_parameter_search.py", "file_name": "cartpole_game_parameter_search.py", "file_ext": "py", "file_size_in_byte": 2282, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "12", "api": [{"api_name": "Dyna_Q_plus.DynaQPlusAgent", "line_number": 20, "usage_type": "call"}, {"api_name": "math.radians", "line_number": 23, "usage_type": "call"}, {"api_name": "math.radians", "line_number": 24, "usage_type": "call"}, {"api_name": "gym.make", "line_number": 37, "usage_type": "call"}, {"api_name": "numpy.save", "line_number": 60, "usage_type": "call"}]} +{"seq_id": "27954757199", "text": "from setuptools import setup\n\ntry:\n with open('requirements.txt', 'r') as file:\n requirements = file.read()\nexcept FileNotFoundError:\n requirements = []\n\nsetup(name='gym_image_maze',\n version='0.0.1',\n install_requires=requirements)", "repo_name": "thanakorn/gym-image-maze", "sub_path": "setup.py", "file_name": "setup.py", "file_ext": "py", "file_size_in_byte": 255, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 2, "dataset": "github-code", "pt": "12", "api": [{"api_name": "setuptools.setup", "line_number": 9, "usage_type": "call"}]} +{"seq_id": "69814813783", "text": "from bs4 import BeautifulSoup\nimport requests\n\n\ndef get_soup(url):\n html_soup = requests.get(url)\n soup = BeautifulSoup(html_soup.text, 'lxml')\n return soup.find_all(class_=\"message-userContent\")\n\n\ndef get_plans(posts):\n for el in posts:\n el = str(el.text)\n return el.split(\"\\n\")\n\n\ndef plan_counts(arr, plan_count):\n\n for i in arr:\n if i.startswith(\"[X]\") or i.startswith(\"[x]\"):\n if i in plan_count:\n plan_count[i] += 1\n else:\n plan_count[i] = 1\n return plan_count\n\n\ndef send_plan(plans, url):\n for plan in plans:\n data ={\"plan\":next(iter(plan)), \"votes\":plan.next(iter(plan))}\n requests.post(url, data)", "repo_name": "Hazel-J-Nova/discord-Bot", "sub_path": "python/soup_test.py", "file_name": "soup_test.py", "file_ext": "py", "file_size_in_byte": 712, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "12", "api": [{"api_name": "requests.get", "line_number": 6, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 7, "usage_type": "call"}, {"api_name": "requests.post", "line_number": 31, "usage_type": "call"}]} +{"seq_id": "8363963415", "text": "# -*- coding: utf-8 -*-\n'''\nCreated on 18.06.2018\n\n@author: Kevin\n'''\n\n\n#Module importieren\nimport pandas as pd\nimport scipy.stats as stats\nimport lightgbm\nimport os\nimport numpy as np\nfrom sklearn.preprocessing import LabelEncoder\nfrom sklearn import preprocessing\nfrom bi2018.BI.data_handler import DataHandler\n\n\n\n\ndef main():\n #Hier werden alle verschiedenen Methoden aufgerufen, da es sonst wirklich ziemlich unübersichtlich wird\n #Einlesen des Files\n df = readF(\"train.csv\", True) # True wenn Index im File vorhanden, wie hier.\n test = readF('test.csv', False)\n #with pd.option_context('display.max_rows', 11, 'display.max_columns', 200):\n #print (df1)\n #print (test)\n cT = ChiSquare(df) #\n useChi(cT) #gibt aus, welche Columns \"important\" sind für \"Category\"; DESCRIPT is most important\n dh = DataHandler()\n dh.load_data(train=df, test=test)\n data_sets = dh.transform_data()\n with pd.option_context('display.max_rows', 11, 'display.max_columns', 200):\n print(\"datasets:\")\n print(data_sets)\n #exit()\n\n resulttrain= lgbm(data_sets)\n print(resulttrain)\n exit()\n\n\n#Data Understanding & Data Preparation von BI_martin.py, dort wird von train.csv die csv \"rewritten.csv\" erstellt, und hier wieder eingelesen zur Auswertung.\ndef readF(path, index): #index == True, wenn Index vorhanden\n print('Reading: ', path)\n if (index == True):\n df = pd.read_csv(path, delimiter= ',', quotechar='\"', header = 0, error_bad_lines=False, dtype={\"AddressSuffix\": str, 'X': float, 'Y': float}) # , dtype={\"Date\": str, \"Time\": str, \"Year\": int, \"Month\": int, \"Day\": int, \"Hour\": int, \"Season\": str, \"Descript\": str, \"DayOfWeek\": str, \"PdDistrict\": str, \"Resolution\": str, \"Address\": str, \"AdressSuffix\": str, \"X\": str, \"Y\": str} columns mit (delimiter\";\"), die headzeile ist die 0., dtype bestimmt datentyp der Columns\n else:\n #df = pd.read_csv(path, header = 0, sep='\\t' )\n #probably not needed anymore since bi_martin is fixed\n df = pd.read_csv(path, delimiter= ',', quotechar='\"', header = 0, error_bad_lines=False, dtype={\"AddressSuffix\": str, 'X': float, 'Y': float}, index_col=0) # , dtype={\"Date\": str, \"Time\": str, \"Year\": int, \"Month\": int, \"Day\": int, \"Hour\": int, \"Season\": str, \"Descript\": str, \"DayOfWeek\": str, \"PdDistrict\": str, \"Resolution\": str, \"Address\": str, \"AdressSuffix\": str, \"X\": str, \"Y\": str} columns mit (delimiter\";\"), die headzeile ist die 0., dtype bestimmt datentyp der Columns\n print('Transforming', path)\n #df['Date'], df['Time'] = df['Dates'].str.split(' ', 1).str\n df['Year'] = df['Dates'].str[:4]\n df['Month'] = df['Dates'].str[5:7]\n df['Day'] = df['Dates'].str[8:10]\n #df['Time'] = df['Dates'].str[11:16] # in stunde und minute aufgesplittet\n df['Hour'] = df['Dates'].str[11:13]\n df['Minute'] = df['Dates'].str[14:16]\n df['Season'] = df.apply(get_season, axis=1)\n #Note the axis=1 specifier, that means that the application is done at a row, rather than a column level.\n #df['AddressSuffix'] = df['Address'].str[-2:]\n df['DayOfWeek'] = df['DayOfWeek'].str.upper()\n #df['Address'] = df['Address'].str.upper()\n df['X'] = df['X'].apply(lambda x: 0 if float(x)>=-122.3649 or float(x)<=-122.5136 else x)\n df['Y'] = df['Y'].apply(lambda y: 0 if float(y)<=37.70788 or float(y)>=37.81998 else y)\n with pd.option_context('display.max_rows', 11, 'display.max_columns', 200):\n print (df)\n df = df.drop('Dates', 1)\n df = df.drop('Address', 1)\n if (path == 'train.csv'):\n df = df.drop('Descript', 1)\n df = df.drop('Resolution', 1)\n\n\n print (df)\n print('Success for ', path)\n\n #with pd.option_context('display.max_rows', 11, 'display.max_columns', 200):\n #print(df.ix[257059]) # --> Einige Zeilen sind abgeschnitten und ergeben nicht immer viel Sinn. So wie diese hier; Excel index + 2 = Python,,, index 257061 = 257059\n #print(df)\n # Abfrage für bestimmten Wert \"NONE\" in Spalte \"Resolution\"\n #print(output.loc[output['Resolution'] == 'NONE'])\n #Entfernt alle Einträge \"NONE\" aus der Spalte \"Resolution\"\n #print(\"Hier werden die zu löschenden Inhalte ausgegeben.\")\n #print(df.loc[~(df['Resolution'] != 'NONE')])\n #Will suchen nach 'OWNING' im Feld 'Descript'; um das zu tun müssen ggf. Descript Felder in Liste umgewandelt werden. oider einzelnd in CSV ausgelesen werden\n #print(df.loc[output['Descript'].isin('OWNING')])\n #Viele kompakte leicht zu verstehende Informationen auf Code Basis sind hier zu finden -v\n #further use: https://www.shanelynn.ie/using-pandas-dataframe-creating-editing-viewing-data-in-python/\n #existieren duplicates?\n #print (output.duplicated(subset='Dates', keep=False)) #Keep=False markiert alle Duplikate als True, keep=first, nur den ersten nicht\n #Gebe den Dataframe zurück, da wir nun alle Daten in der CSV wie gewünscht bearbeitet haben\n return df\n\ndef get_season(row):\n if 3 <= int(row['Dates'][5:7]) <= 5:\n return \"SPRING\"\n elif 6 <= int(row['Dates'][5:7]) <= 8:\n return \"SUMMER\"\n elif 9 <= int(row['Dates'][5:7]) <= 11:\n return \"AUTUMN\"\n else: return \"WINTER\"\n\n\n\"\"\"\nFeature Extraction\nFeature Extraction mit ChiSquare Test, welcher Wert nimmt am meisten Einfluß wenn Null Hypothese gilt\nChi-Square Erklärung 5-min YouTube: https://www.youtube.com/watch?v=VskmMgXmkMQ ;; Besser: https://www.youtube.com/watch?v=WXPBoFDqNVk (12 min)\nQuelle: http://www.handsonmachinelearning.com/blog/2AeuRL/chi-square-feature-selection-in-python\n\"\"\"\nclass ChiSquare: #Erstellen von chisquare-Klasse um Werte zu speichern\n def __init__(self, dataframe):\n self.df = dataframe\n self.p = None #P-Value\n self.chi2 = None #Chi Test Statistic\n self.dof = None\n self.dfTabular = None\n self.dfExpected = None\n\n\n # alpha is der Wert, der zur Bestimmung ob Null Hypothese angewendet zutrifft oder nicht\n def _print_chisquare_result(self, colX, alpha):\n result = \"\"\n if self.p calculated by ML soße: \"alpha_range = 10.0**-np.arange(1,7)\" ändert Outcome aber NICHT\n X = self.df[colX].astype(str) #Konvertierung zu String der unabhängigen Features\n Y = self.df[colY].astype(str) #Konvertierung zu String des abhängigen Features\n\n self.dfObserved = pd.crosstab(Y,X) #Anzahl für Observed in Abhängigkeit von Resolution\n chi2, p, dof, expected = stats.chi2_contingency(self.dfObserved.values)\n self.p = p\n self.chi2 = chi2\n self.dof = dof\n #print(\"Observed\")\n #print(self.dfObserved)\n\n self.dfExpected = pd.DataFrame(expected, columns=self.dfObserved.columns, index = self.dfObserved.index)\n #print(\"Expected\")\n #print(self.dfExpected)\n\n self._print_chisquare_result(colX, alpha)\n\n\n\n#Feature Selection\ndef useChi(cT):\n testColumns = ['Year', 'Month', 'Day','Time', 'Season', 'DayOfWeek', 'PdDistrict', 'X', 'Y']\n for var in testColumns: #Für jede einzelne Column wird Chi-Square ausgeführt\n cT.TestIndependence(colX=var,colY=\"Category\") #Aufruf des Chi-Square Test mit Resolution als abhängiges Features\n\ndef lgbm(data_set):\n #categorical_features = ['Year', 'Month', 'Day','Time', 'Season', 'DayOfWeek', 'PdDistrict'] funtzt net so\n params = {}\n params['task'] = 'train'\n params['learning_rate'] = 0.0005\n #params['num_boost_round'] = 'best_iteration'\n params['boosting_type'] = 'goss'\n params['objective'] = 'multiclass'\n params['num_class'] = '39'\n params['metric'] = 'multi_logloss'\n #params['categorical_feature'] = categorical_features\n #params['numerical_feature'] = ['X', 'Y']\n #params['sub_feature'] = 0.5\n\n #OVER/UNDERFITTING\n #params['min_data'] = 50\n params['max_depth'] = 4 # < 0 means no limit; some have 4-6\n params['subsample'] = 0.9\n params['num_leaves'] = 12 #38*2\n #https://github.com/Microsoft/LightGBM/blob/master/docs/Parameters.rst\n #min_data_in_leaf, default = 20, type = int, aliases: min_data_per_leaf, min_data, min_child_samples, constraints: min_data_in_leaf >= 0\n #minimal number of data in one leaf. Can be used to deal with over-fitting\n\n #LAST RESULT: 1: 3.68873 num_leaves = 8\n #LAST RESULT: 1: valid_0's multi_logloss: 3.6638 num_leaves = 12; max_depth = 8\n\n print ('Translating Datasets')\n x_train = data_set['train_X']\n y_train = data_set['train_Y']\n x_test = data_set['test_X']\n\n\n #http://lightgbm.readthedocs.io/en/latest/Python-Intro.html - how it should work\n print ('setup training and eval')\n lgb_train = lightgbm.Dataset(x_train, y_train)\n lgb_eval = lightgbm.Dataset(x_test, reference=lgb_train)\n\n\n print ('trying to perform')\n clf = lightgbm.train(params, lgb_train, 100, valid_sets=lgb_eval)\n print(\"Success, result: \", clf) #hier müsste ein output aus, und zurück gegeben werden\n for keys,values in clf.best_score.items():\n print(keys)\n print(values)\n\n print(\"at iteration: \", clf.best_iteration)\n return clf\n\n\n\n\n\n#Aufrufen der Ausführung, bitte ganz unten\nmain()\n", "repo_name": "TiRoX/bi2018", "sub_path": "BI/BI.py", "file_name": "BI.py", "file_ext": "py", "file_size_in_byte": 9565, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "12", "api": [{"api_name": "bi2018.BI.data_handler.DataHandler", "line_number": 32, "usage_type": "call"}, {"api_name": "pandas.option_context", "line_number": 35, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 49, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 53, "usage_type": "call"}, {"api_name": "pandas.option_context", "line_number": 69, "usage_type": "call"}, {"api_name": "pandas.crosstab", "line_number": 138, "usage_type": "call"}, {"api_name": "scipy.stats.chi2_contingency", "line_number": 139, "usage_type": "call"}, {"api_name": "scipy.stats", "line_number": 139, "usage_type": "name"}, {"api_name": "pandas.DataFrame", "line_number": 146, "usage_type": "call"}, {"api_name": "lightgbm.Dataset", "line_number": 194, "usage_type": "call"}, {"api_name": "lightgbm.Dataset", "line_number": 195, "usage_type": "call"}, {"api_name": "lightgbm.train", "line_number": 199, "usage_type": "call"}]} +{"seq_id": "72917753300", "text": "import sys\n# required for when running on a cluster\nsys.path.append('../')\nfrom typing import List\n\nimport sklearn\nfrom sklearn.linear_model import LinearRegression\nimport numpy as np\nfrom pathlib import Path\nimport pickle\n\nimport sdem\nfrom sdem import Experiment\nfrom sdem.utils import read_yaml, get_all_permutations, print_dict\n\n# Setup sacred experiment\nex = Experiment(__file__)\n\n@ex.configs\ndef get_config() -> List[dict]:\n configs = {\n 'name': ['linear_model'],\n 'fold': list(range(5))\n }\n return get_all_permutations(configs)\n\ndef get_raw_data():\n np.random.seed(0)\n\n N = 50\n\n x = np.linspace(0, 1, N)\n y = x + 0.1*np.random.randn(N)\n\n return x[:, None], y\n\ndef get_fold(fold):\n X, y = get_raw_data()\n\n kf_gen = sklearn.model_selection.KFold(n_splits=5, shuffle=False).split(X)\n\n # kf is a generator, convert to list so we can index\n kf = [k for k in kf_gen]\n\n train_index, test_index = kf[fold]\n\n X_train, X_test = X[train_index], X[test_index]\n y_train, y_test = y[train_index], y[test_index]\n\n return X_train, X_test, y_train, y_test\n\n\n@ex.automain\ndef main(config):\n print_dict(config)\n\n # Output format name. This must match the pattern defined in the experiment config.\n name = '{name}_{_id}'.format(name=config['name'], _id=config['experiment_id'])\n\n # Make sure folder for results exists\n results_root = Path('../results/')\n results_root.mkdir(exist_ok=True)\n\n # Get training data for current fold\n X_train, X_test, y_train, y_test = get_fold(config['fold'])\n\n # Make model\n m = LinearRegression().fit(X_train, y_train)\n\n # Log metrics\n def pred_fn(X):\n return m.predict(X)\n\n train_metrics, pred_train = ex.log_metrics(\n X_train, y_train, pred_fn, var_flag=False, prefix='train'\n )\n test_metrics, pred_test = ex.log_metrics(\n X_test, y_test, pred_fn, var_flag=False, prefix='test'\n )\n \n results = {\n 'metrics': {\n 'train': train_metrics,\n 'test': test_metrics\n },\n 'predictions': {\n 'train': pred_train,\n 'test': pred_test \n }\n }\n\n # save results\n print_dict(results['metrics'])\n\n pickle.dump(results, open(results_root/ f'{name}.pickle', \"wb\" ) )\n ex.add_artifact(results_root/ f'{name}.pickle')\n", "repo_name": "defaultobject/sdem", "sub_path": "example/example_exp/models/m_model.py", "file_name": "m_model.py", "file_ext": "py", "file_size_in_byte": 2340, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "12", "api": [{"api_name": "sys.path.append", "line_number": 3, "usage_type": "call"}, {"api_name": "sys.path", "line_number": 3, "usage_type": "attribute"}, {"api_name": "sdem.Experiment", "line_number": 17, "usage_type": "call"}, {"api_name": "sdem.utils.get_all_permutations", "line_number": 25, "usage_type": "call"}, {"api_name": "typing.List", "line_number": 20, "usage_type": "name"}, {"api_name": "numpy.random.seed", "line_number": 28, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 28, "usage_type": "attribute"}, {"api_name": "numpy.linspace", "line_number": 32, "usage_type": "call"}, {"api_name": "numpy.random.randn", "line_number": 33, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 33, "usage_type": "attribute"}, {"api_name": "sklearn.model_selection.KFold", "line_number": 40, "usage_type": "call"}, {"api_name": "sklearn.model_selection", "line_number": 40, "usage_type": "attribute"}, {"api_name": "sdem.utils.print_dict", "line_number": 55, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 61, "usage_type": "call"}, {"api_name": "sklearn.linear_model.LinearRegression", "line_number": 68, "usage_type": "call"}, {"api_name": "sdem.utils.print_dict", "line_number": 93, "usage_type": "call"}, {"api_name": "pickle.dump", "line_number": 95, "usage_type": "call"}]} +{"seq_id": "37288123686", "text": "import cv2\nimport numpy as np\n\nclass ImageHandler:\n \n @staticmethod\n def crop(np_image, points):\n return np_image[points[0]:points[1], points[2]:points[3]]\n\n @staticmethod\n def write_to_file(filename, np_image):\n return cv2.imwrite(filename, np_image)\n\n @staticmethod\n def draw_vehicle_shape(np_image, points, color=(255,0,0), thickness=1):\n top_left = (points[2], points[0])\n bottom_right = (points[3], points[1])\n cv2.rectangle(np_image, top_left, bottom_right, color, thickness=thickness)\n\n @staticmethod\n def draw_losangle(np_image, points, color=(1.,1.,1.), thickness=1):\n for i in range(4):\n pt1 = tuple(points[:,i].astype(int).tolist())\n pt2 = tuple(points[:,(i+1)%4].astype(int).tolist())\n cv2.line(np_image,pt1,pt2,color,thickness)\n\n @staticmethod\n def write2img(np_image,points,strg,txt_color=(0,0,0),bg_color=(255,255,255),font_size=1):\n wh_img = np.array(np_image.shape[1::-1])\n \n font = cv2.FONT_HERSHEY_SIMPLEX\n\n wh_text,v = cv2.getTextSize(strg, font, font_size, 3)\n rpoints = points / np.array(wh_img, dtype=float).reshape(2,1)\n \n bl_corner = rpoints.min(1) * wh_img\n tl_corner = np.array([bl_corner[0],bl_corner[1]-wh_text[1]])/wh_img\n br_corner = np.array([bl_corner[0]+wh_text[0],bl_corner[1]])/wh_img\n bl_corner /= wh_img\n\n if (tl_corner < 0.).any():\n delta = 0. - np.minimum(tl_corner,0.)\n elif (br_corner > 1.).any():\n delta = 1. - np.maximum(br_corner,1.)\n else:\n delta = 0.\n\n tl_corner += delta\n br_corner += delta\n bl_corner += delta\n\n tpl = lambda x: tuple((x*wh_img).astype(int).tolist())\n\n cv2.rectangle(np_image, tpl(tl_corner), tpl(br_corner), bg_color, -1)\t\n cv2.putText(np_image,strg,tpl(bl_corner),font,font_size,txt_color,3) \n\n ", "repo_name": "knetto/Pakeerplaats-lp-scanner-knetto-main", "sub_path": "alpr-unconstrained-master2/classes/ImageHandler.py", "file_name": "ImageHandler.py", "file_ext": "py", "file_size_in_byte": 1939, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "12", "api": [{"api_name": "cv2.imwrite", "line_number": 12, "usage_type": "call"}, {"api_name": "cv2.rectangle", "line_number": 18, "usage_type": "call"}, {"api_name": "cv2.line", "line_number": 25, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 29, "usage_type": "call"}, {"api_name": "cv2.FONT_HERSHEY_SIMPLEX", "line_number": 31, "usage_type": "attribute"}, {"api_name": "cv2.getTextSize", "line_number": 33, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 34, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 37, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 38, "usage_type": "call"}, {"api_name": "numpy.minimum", "line_number": 42, "usage_type": "call"}, {"api_name": "numpy.maximum", "line_number": 44, "usage_type": "call"}, {"api_name": "cv2.rectangle", "line_number": 54, "usage_type": "call"}, {"api_name": "cv2.putText", "line_number": 55, "usage_type": "call"}]} +{"seq_id": "26457729160", "text": "# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Wed Jun 1 13:34:43 2022\r\n\r\n\"\"\"\r\n\r\n#skrypt wykonuje zewnetrzny SQL na bazie oracle i mierzy czas\r\n\r\nimport os\r\nimport pandas as pd\r\nimport cx_Oracle\r\nimport time\r\n\r\nos.chdir(\"c:/python/bazy\") # format:'C:/folder/folder'\r\nfile='p.txt'\r\n\r\nline = []\r\nwith open(file, \"r\") as file:\r\n line = file.readlines()\r\n\r\n#with open as\r\nuserpwd=line[0]\r\n \r\nconnection = cx_Oracle.connect(\"N1400274\", password=userpwd, dsn=\"KMB_PRE\") #\r\nquery = connection.cursor()\r\n\r\n\r\nwith open(\"c:/python/bazy/sql.txt\") as file_in:\r\n lines = []\r\n for line in file_in:\r\n lines.append(line)\r\n\r\nprint(lines)\r\nsql_text=[]\r\n\r\nfor i in lines:\r\n a=str(i)\r\n sql_text.append(a)\r\n \r\nsql = ''.join(sql_text)\r\n\r\nprint(sql)\r\n\r\n# rs=query.execute(sql)\r\nrs=query.execute(\"select max(end_dt) as end_Dt, min(end_dt) as end_dt from kpr.cust_ent_dim \\\r\n union all \\\r\n select max(end_dt) as end_Dt, min(end_dt) as end_dt from kpr.cust_ent_dim \")\r\nprint(\"Fetching data: started\")\r\nstart_time = time.time()\r\ndata=pd.DataFrame(rs.fetchall())\r\nprint(\"Fetching data: finished. time: {} s\".format(time.time() - start_time))\r\nprint(len(query.description))\r\nprint(data)\r\ncol_names=[]\r\nprint(query.description)\r\nfor i in range(0, len(query.description)):\r\n col_names.append(query.description[i][0])\r\nprint(col_names)\r\nprint(data.columns)\r\ndata.columns=col_names\r\nprint(data)\r\nconnection.close()\r\nend_time = time.time()\r\nprint (end_time - start_time)", "repo_name": "krzysiekgluch/python_test", "sub_path": "KPR_02.py", "file_name": "KPR_02.py", "file_ext": "py", "file_size_in_byte": 1541, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "12", "api": [{"api_name": "os.chdir", "line_number": 14, "usage_type": "call"}, {"api_name": "cx_Oracle.connect", "line_number": 24, "usage_type": "call"}, {"api_name": "time.time", "line_number": 49, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 50, "usage_type": "call"}, {"api_name": "time.time", "line_number": 51, "usage_type": "call"}, {"api_name": "time.time", "line_number": 63, "usage_type": "call"}]} +{"seq_id": "12052106573", "text": "import os\nimport requests\n\nfrom functools import lru_cache\n\nfrom . import BaseBackend\n\nISSUE_BACKEND_URL = os.environ[\"ISSUE_BACKEND_URL\"]\nISSUE_BACKEND_ENDPOINT = \"/issues/{issue}.json\"\nISSUE_BACKEND_API_KEY = os.environ[\"ISSUE_BACKEND_API_KEY\"]\n\n\nclass Backend(BaseBackend):\n @property\n @lru_cache()\n def session(self):\n s = requests.Session()\n s.headers.update({\"X-Redmine-API-Key\": ISSUE_BACKEND_API_KEY})\n\n return s\n\n @property\n @lru_cache()\n def issue(self):\n full_url = \"{}{}\".format(ISSUE_BACKEND_URL, ISSUE_BACKEND_ENDPOINT).format(\n issue=self.issue_number\n )\n\n response = self.session.get(full_url)\n\n response.raise_for_status()\n\n return response.json()[\"issue\"]\n\n @property\n def subject(self):\n return self.issue[\"subject\"]\n", "repo_name": "rca/issuebranch", "sub_path": "src/issuebranch/backends/redmine.py", "file_name": "redmine.py", "file_ext": "py", "file_size_in_byte": 834, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "12", "api": [{"api_name": "os.environ", "line_number": 8, "usage_type": "attribute"}, {"api_name": "os.environ", "line_number": 10, "usage_type": "attribute"}, {"api_name": "requests.Session", "line_number": 17, "usage_type": "call"}, {"api_name": "functools.lru_cache", "line_number": 15, "usage_type": "call"}, {"api_name": "functools.lru_cache", "line_number": 23, "usage_type": "call"}]} +{"seq_id": "71388473941", "text": "#!/bin/env python\n\n'''pyats_ios_example_job.py\n\nThis is an easypy job example intended to run the pyATS IOS example testscript.\n\n\nArguments:\n This script requires one script argument (testbed_file) and two optional\n script argument (ios1 and ios2) to be passed in when run under easypy for\n demonstration purposes.\n testbed_file: the path to testbed yaml file\n ios1: the device name defined in the testbed yaml file, if modified\n ios2: the device name defined in the testbed yaml file, if modified\n\nExamples:\n # to run under easypy\n bash$ easypy pyats_ios_example_job.py -testbed_file pyats_ios_example.yaml\n\nReferences:\n For the complete and up-to-date user guide on pyATS, visit:\n https://developer.cisco.com/site/pyats/docs/\n'''\n\n#\n# optional author information\n#\n__author__ = 'Wei Chen '\n__copyright__ = 'Copyright 2017, Cisco Systems'\n__email__ = 'pyats-support@cisco.com'\n__date__= 'Nov 15, 2017'\n\n\n#\n# import statements\n#\nimport os\nimport logging\nimport argparse\n\nfrom ats.easypy import run\n\n# easypy allows argument propagations\n# any unrecognized is left behind to allow custom parsers to handle\nparser = argparse.ArgumentParser()\nparser.add_argument('--ios1', dest = 'ios1_name', type = str, default = 'ios1')\nparser.add_argument('--ios2', dest = 'ios2_name', type = str, default = 'ios2')\n\ndef main():\n\n # parse args\n args, unknown = parser.parse_known_args()\n\n # configure your log outputs level\n #logging.getLogger('ats.connections').setLevel('DEBUG')\n\n # Find the location of the script in relation to the job file\n test_path = os.path.dirname(os.path.abspath(__file__))\n testscript = os.path.join(test_path, 'pyats_ios_example.py')\n\n # run it\n run(testscript, **vars(args))\n", "repo_name": "CiscoDevNet/pyats-ios-sample", "sub_path": "pyats_ios_example_job.py", "file_name": "pyats_ios_example_job.py", "file_ext": "py", "file_size_in_byte": 1764, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 26, "dataset": "github-code", "pt": "12", "api": [{"api_name": "argparse.ArgumentParser", "line_number": 45, "usage_type": "call"}, {"api_name": "os.path.dirname", "line_number": 58, "usage_type": "call"}, {"api_name": "os.path", "line_number": 58, "usage_type": "attribute"}, {"api_name": "os.path.abspath", "line_number": 58, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 59, "usage_type": "call"}, {"api_name": "os.path", "line_number": 59, "usage_type": "attribute"}, {"api_name": "ats.easypy.run", "line_number": 62, "usage_type": "call"}]} +{"seq_id": "42888480411", "text": "import os\nimport re\nimport datetime\n\n#Data Settings\nUSING_WORDS = False\nMIN_UNIT_COUNT = 1\nANALYSIS_TYPE = \"word\" if USING_WORDS else \"character\"\n\n#File Settings\nROOT = \".\"\nDATA_DIR = os.path.join(ROOT, \"shakespeare_data\")\nPRINT_TO_FILE = True\nTIMESTAMP_FILE = False\n\ndef get_time_for_file():\n return datetime.datetime.now().strftime(\"_%m.%d.%y-%H.%M.%S\")\n\nOUTPUT_FILE = ANALYSIS_TYPE + \"_data_analysis\"\nif TIMESTAMP_FILE:\n OUTPUT_FILE += get_time_for_file()\nOUTPUT_FILE += \".txt\"\nOUTPUT_FILE = os.path.join(ROOT, \"data_analysis\", OUTPUT_FILE)\n\nfile_count = 0\ntext = \"\"\nfor file in os.listdir(DATA_DIR):\n if file.endswith(\".txt\"):\n file_count += 1\n text += open(os.path.join(DATA_DIR, file)).read()\n\nif USING_WORDS:\n text = text.lower()\n\nregex = r\"(?:[A-Za-z']*(?:(?| |\\t|\\n\" if USING_WORDS else r\".|\\n\"\nunits = re.findall(regex, text)\nunit_counts = dict()\n\nfor unit in units: #create a dict mapping unit to count\n unit_counts[unit] = unit_counts.get(unit, 0) + 1\n\nunit_counts = sorted(list(unit_counts.items()), key=lambda i: (-i[1], i[0])) #convert dict to list of tuples sort by count then unit\n\ntotal_units = 0\ntotal_top_units = 0\nnum_top_units = 0\nfor i in range(0, len(unit_counts)):\n total_units += unit_counts[i][1]\n if unit_counts[i][1] >= MIN_UNIT_COUNT:\n num_top_units += 1\n total_top_units += unit_counts[i][1]\n\npre_unk_len = len(unit_counts)\n\nless_than_min = 0\nfor i in range(len(unit_counts) - 1, -1, -1):\n if unit_counts[i][1] < MIN_UNIT_COUNT:\n less_than_min += unit_counts[i][1]\n del unit_counts[i]\n\nunit_counts.append((\"\", less_than_min))\n\nnum_top_units_with_unk = num_top_units\nif less_than_min >= MIN_UNIT_COUNT:\n num_top_units_with_unk += 1\n\nunique_percent = num_top_units / pre_unk_len * 100\ntotal_percent = total_top_units / total_units * 100\n\noutput = \"%d files analyzed\\n\\n\" % file_count\noutput += (\"%d unique \" + ANALYSIS_TYPE + \"s\\n%d total \" + ANALYSIS_TYPE + \"s\\n\\n\") % (pre_unk_len, total_units)\noutput += (\"Showing \" + ANALYSIS_TYPE + \"s with count >= %d (top %d)\\n\") % (MIN_UNIT_COUNT, num_top_units)\noutput += \"%.1f%% of unique, %.1f%% of total\\n\\n\" % (unique_percent, total_percent)\nif num_top_units_with_unk > num_top_units:\n output += \"Sum of counts of non-top \" + ANALYSIS_TYPE + \"s included under \\n\"\n output += \" not included in stats, but is ranked\\n\\n\"\noutput += \"%6s%16s%10s\\n\" % (\"Rank:\", \"Word:\", \"Count:\")\noutput += \"--------------------------------\"\n\nunit_counts.sort(key=lambda i: (-i[1], i[0])) #resort for \n\nfor i in range(0, num_top_units_with_unk):\n w = unit_counts[i][0]\n if w == \"\\n\":\n w = \"\"\n elif w == \"\\t\":\n w = \"\"\n elif w == \" \":\n w = \"\"\n output += \"\\n%5d)%16s%10d\" % (i + 1, w, unit_counts[i][1])\n\nif PRINT_TO_FILE:\n with open(OUTPUT_FILE, \"w\") as output_file:\n output_file.write(output)\nelse:\n print(output)", "repo_name": "brunofreeman/ShakespeareLSTM", "sub_path": "data_analysis.py", "file_name": "data_analysis.py", "file_ext": "py", "file_size_in_byte": 3004, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "12", "api": [{"api_name": "os.path.join", "line_number": 12, "usage_type": "call"}, {"api_name": "os.path", "line_number": 12, "usage_type": "attribute"}, {"api_name": "datetime.datetime.now", "line_number": 17, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 17, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 23, "usage_type": "call"}, {"api_name": "os.path", "line_number": 23, "usage_type": "attribute"}, {"api_name": "os.listdir", "line_number": 27, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 30, "usage_type": "call"}, {"api_name": "os.path", "line_number": 30, "usage_type": "attribute"}, {"api_name": "re.findall", "line_number": 36, "usage_type": "call"}]} +{"seq_id": "72345009941", "text": "import sys, os\n\nCURRENT_TEST_DIR = os.getcwd()\nsys.path.append(CURRENT_TEST_DIR + \"/..\")\n\nfrom datetime import datetime\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport torch\nfrom torch.utils.data import Dataset, DataLoader\nimport slayerSNN as snn\nfrom learningStats import learningStats\nimport zipfile\nfrom slayer_layer import SlayerLayer\n\nnetParams = snn.params(\"network.yaml\")\n\n\ndef augmentData(event):\n xs = 8\n ys = 8\n xjitter = np.random.randint(2 * xs) - xs\n yjitter = np.random.randint(2 * ys) - ys\n event.x += xjitter\n event.y += yjitter\n return event\n\n\n# Dataset definition\nclass nmnistDataset(Dataset):\n def __init__(\n self, datasetPath, sampleFile, samplingTime, sampleLength, augment=False\n ):\n self.path = datasetPath\n self.samples = np.loadtxt(sampleFile).astype(\"int\")\n self.samplingTime = samplingTime\n self.nTimeBins = int(sampleLength / samplingTime)\n self.augment = augment\n\n def __getitem__(self, index):\n inputIndex = self.samples[index, 0]\n classLabel = self.samples[index, 1]\n\n event = snn.io.read2Dspikes(self.path + str(inputIndex.item()) + \".bs2\")\n if self.augment is True:\n event = augmentData(event)\n inputSpikes = event.toSpikeTensor(\n torch.zeros((2, 34, 34, self.nTimeBins)), samplingTime=self.samplingTime\n )\n\n desiredClass = torch.zeros((10, 1, 1, 1))\n desiredClass[classLabel, ...] = 1\n return inputSpikes, desiredClass, classLabel\n\n def __len__(self):\n return self.samples.shape[0]\n\n\n# Network definition\nclass Network(torch.nn.Module):\n def __init__(self, netParams):\n super(Network, self).__init__()\n # initialize slayer\n slayer = SlayerLayer(netParams[\"neuron\"], netParams[\"simulation\"])\n self.slayer = slayer\n\n # weight normalization\n self.conv1 = torch.nn.utils.weight_norm(\n slayer.conv(2, 16, 5, padding=1), name=\"weight\"\n )\n self.conv2 = torch.nn.utils.weight_norm(\n slayer.conv(16, 32, 3, padding=1), name=\"weight\"\n )\n self.conv3 = torch.nn.utils.weight_norm(\n slayer.conv(32, 64, 3, padding=1), name=\"weight\"\n )\n\n self.pool1 = slayer.pool(2)\n self.pool2 = slayer.pool(2)\n\n self.fc1 = torch.nn.utils.weight_norm(\n slayer.dense((8 * 8 * 64), 512), name=\"weight\"\n )\n self.fc2 = torch.nn.utils.weight_norm(slayer.dense(512, 10), name=\"weight\")\n\n # delays\n self.delay1 = slayer.delay(16)\n self.delay2 = slayer.delay(16)\n self.delay3 = slayer.delay(32)\n self.delay4 = slayer.delay(32)\n self.delay5 = slayer.delay(64 * 8 * 8)\n self.delay6 = slayer.delay(512)\n\n def forward(self, spike):\n # count.append(torch.sum(spike).item())\n\n spike = self.slayer.spike(self.conv1(self.slayer.psp(spike))) # 32, 32, 16\n spike = self.delay1(spike)\n\n spike = self.slayer.spike(self.pool1(self.slayer.psp(spike))) # 16, 16, 16\n spike = self.delay2(spike)\n\n spike = self.slayer.spike(self.conv2(self.slayer.psp(spike))) # 16, 16, 32\n spike = self.delay3(spike)\n\n spike = self.slayer.spike(self.pool2(self.slayer.psp(spike))) # 8, 8, 32\n spike = self.delay4(spike)\n\n spike = self.slayer.spike(self.conv3(self.slayer.psp(spike))) # 8, 8, 64\n spike = spike.reshape((spike.shape[0], -1, 1, 1, spike.shape[-1]))\n spike = self.delay5(spike)\n\n spike = self.slayer.spike(self.fc1(self.slayer.psp(spike))) # 10\n spike = self.delay6(spike)\n\n spike = self.slayer.spike(self.fc2(self.slayer.psp(spike))) # 10\n\n return spike\n\n def clamp(self):\n self.delay1.delay.data.clamp_(0, 64)\n self.delay2.delay.data.clamp_(0, 64)\n self.delay3.delay.data.clamp_(0, 64)\n self.delay4.delay.data.clamp_(0, 64)\n self.delay5.delay.data.clamp_(0, 64)\n self.delay6.delay.data.clamp_(0, 64)\n\n def gradFlow(self, path):\n gradNorm = lambda x: torch.norm(x).item() / torch.numel(x)\n\n grad = []\n grad.append(gradNorm(self.conv1.weight_g.grad))\n grad.append(gradNorm(self.conv2.weight_g.grad))\n grad.append(gradNorm(self.conv3.weight_g.grad))\n grad.append(gradNorm(self.fc1.weight_g.grad))\n grad.append(gradNorm(self.fc2.weight_g.grad))\n\n plt.figure()\n plt.semilogy(grad)\n plt.savefig(path + \"gradFlow.png\")\n plt.close()\n\n\nif __name__ == \"__main__\":\n # # Extract NMNIST samples\n # with zipfile.ZipFile(\"NMNISTsmall.zip\") as zip_file:\n # for member in zip_file.namelist():\n # if not os.path.exists(\"./\" + member):\n # zip_file.extract(member, \"./\")\n\n device = torch.device(\"cuda\")\n net = Network(netParams).to(device)\n error = snn.loss(netParams).to(device)\n\n # Custom NADAM optimizer\n optimizer = snn.utils.optim.Nadam(net.parameters(), lr=0.01, amsgrad=False)\n\n # Dataset and dataLoader instances.\n trainingSet = nmnistDataset(\n datasetPath=netParams[\"training\"][\"path\"][\"in\"],\n sampleFile=netParams[\"training\"][\"path\"][\"train\"],\n samplingTime=netParams[\"simulation\"][\"Ts\"],\n sampleLength=netParams[\"simulation\"][\"tSample\"],\n )\n trainLoader = DataLoader(\n dataset=trainingSet, batch_size=12, shuffle=False, num_workers=4\n )\n\n testingSet = nmnistDataset(\n datasetPath=netParams[\"training\"][\"path\"][\"in\"],\n sampleFile=netParams[\"training\"][\"path\"][\"test\"],\n samplingTime=netParams[\"simulation\"][\"Ts\"],\n sampleLength=netParams[\"simulation\"][\"tSample\"],\n )\n testLoader = DataLoader(\n dataset=testingSet, batch_size=12, shuffle=False, num_workers=4\n )\n\n # Learning stats instance.\n stats = learningStats()\n\n # # Visualize the network.\n # for i in range(5):\n # input, target, label = trainingSet[i]\n # snn.io.showTD(snn.io.spikeArrayToEvent(input.reshape((2, 34, 34, -1)).cpu().data.numpy()))\n\n # training loop\n for epoch in range(200):\n tSt = datetime.now()\n\n # Training loop.\n for i, (input, target, label) in enumerate(trainLoader, 0):\n # Move the input and target to correct GPU.\n input = input.to(device)\n target = target.to(device)\n\n # Forward pass of the network.\n output = net.forward(input)\n\n # Gather the training stats.\n stats.training.correctSamples += torch.sum(\n snn.predict.getClass(output) == label\n ).data.item()\n stats.training.numSamples += len(label)\n\n # Calculate loss.\n loss = error.numSpikes(output, target)\n\n # Reset gradients to zero.\n optimizer.zero_grad()\n\n # Backward pass of the network.\n loss.backward()\n\n # Update weights.\n optimizer.step()\n\n # Clamp delay\n net.clamp()\n\n # Gather training loss stats.\n stats.training.lossSum += loss.cpu().data.item()\n\n # Display training stats.\n stats.print(epoch, i, (datetime.now() - tSt).total_seconds())\n\n # Testing loop.\n # Same steps as Training loops except loss backpropagation and weight update.\n for i, (input, target, label) in enumerate(testLoader, 0):\n input = input.to(device)\n target = target.to(device)\n\n output = net.forward(input)\n\n stats.testing.correctSamples += torch.sum(\n snn.predict.getClass(output) == label\n ).data.item()\n stats.testing.numSamples += len(label)\n\n loss = error.numSpikes(output, target)\n stats.testing.lossSum += loss.cpu().data.item()\n stats.print(epoch, i)\n\n # Update stats.\n stats.update()\n\n # Plot the results.\n plt.figure(1)\n plt.semilogy(stats.training.lossLog, label=\"Training\")\n plt.semilogy(stats.testing.lossLog, label=\"Testing\")\n plt.xlabel(\"Epoch\")\n plt.ylabel(\"Loss\")\n plt.legend()\n\n plt.figure(2)\n plt.plot(stats.training.accuracyLog, label=\"Training\")\n plt.plot(stats.testing.accuracyLog, label=\"Testing\")\n plt.xlabel(\"Epoch\")\n plt.ylabel(\"Accuracy\")\n plt.legend()\n\n plt.show()\n", "repo_name": "synsense/slayer-comparison", "sub_path": "archive/reproduce_slayer/nmnist.py", "file_name": "nmnist.py", "file_ext": "py", "file_size_in_byte": 8370, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "12", "api": [{"api_name": "os.getcwd", "line_number": 3, "usage_type": "call"}, {"api_name": "sys.path.append", "line_number": 4, "usage_type": "call"}, {"api_name": "sys.path", "line_number": 4, "usage_type": "attribute"}, {"api_name": "slayerSNN.params", "line_number": 16, "usage_type": "call"}, {"api_name": "numpy.random.randint", "line_number": 22, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 22, "usage_type": "attribute"}, {"api_name": "numpy.random.randint", "line_number": 23, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 23, "usage_type": "attribute"}, {"api_name": "torch.utils.data.Dataset", "line_number": 30, "usage_type": "name"}, {"api_name": "numpy.loadtxt", "line_number": 35, "usage_type": "call"}, {"api_name": "slayerSNN.io.read2Dspikes", "line_number": 44, "usage_type": "call"}, {"api_name": "slayerSNN.io", "line_number": 44, "usage_type": "attribute"}, {"api_name": "torch.zeros", "line_number": 48, "usage_type": "call"}, {"api_name": "torch.zeros", "line_number": 51, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 60, "usage_type": "attribute"}, {"api_name": "slayer_layer.SlayerLayer", "line_number": 64, "usage_type": "call"}, {"api_name": "torch.nn.utils.weight_norm", "line_number": 68, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 68, "usage_type": "attribute"}, {"api_name": "torch.nn.utils.weight_norm", "line_number": 71, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 71, "usage_type": "attribute"}, {"api_name": "torch.nn.utils.weight_norm", "line_number": 74, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 74, "usage_type": "attribute"}, {"api_name": "torch.nn.utils.weight_norm", "line_number": 81, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 81, "usage_type": "attribute"}, {"api_name": "torch.nn.utils.weight_norm", "line_number": 84, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 84, "usage_type": "attribute"}, {"api_name": "torch.norm", "line_number": 129, "usage_type": "call"}, {"api_name": "torch.numel", "line_number": 129, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 138, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 138, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.semilogy", "line_number": 139, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 139, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 140, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 140, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.close", "line_number": 141, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 141, "usage_type": "name"}, {"api_name": "torch.device", "line_number": 151, "usage_type": "call"}, {"api_name": "slayerSNN.loss", "line_number": 153, "usage_type": "call"}, {"api_name": "slayerSNN.utils.optim.Nadam", "line_number": 156, "usage_type": "call"}, {"api_name": "slayerSNN.utils", "line_number": 156, "usage_type": "attribute"}, {"api_name": "torch.utils.data.DataLoader", "line_number": 165, "usage_type": "call"}, {"api_name": "torch.utils.data.DataLoader", "line_number": 175, "usage_type": "call"}, {"api_name": "learningStats.learningStats", "line_number": 180, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 189, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 189, "usage_type": "name"}, {"api_name": "torch.sum", "line_number": 201, "usage_type": "call"}, {"api_name": "slayerSNN.predict.getClass", "line_number": 202, "usage_type": "call"}, {"api_name": "slayerSNN.predict", "line_number": 202, "usage_type": "attribute"}, {"api_name": "datetime.datetime.now", "line_number": 225, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 225, "usage_type": "name"}, {"api_name": "torch.sum", "line_number": 235, "usage_type": "call"}, {"api_name": "slayerSNN.predict.getClass", "line_number": 236, "usage_type": "call"}, {"api_name": "slayerSNN.predict", "line_number": 236, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 248, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 248, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.semilogy", "line_number": 249, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 249, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.semilogy", "line_number": 250, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 250, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 251, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 251, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 252, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 252, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 253, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 253, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 255, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 255, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 256, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 256, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 257, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 257, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 258, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 258, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 259, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 259, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 260, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 260, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 262, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 262, "usage_type": "name"}]} +{"seq_id": "13939025990", "text": "\"\"\"Model setup for products.\"\"\"\nfrom django.db import models\nfrom sorl.thumbnail import ImageField\nfrom multiselectfield import MultiSelectField\nfrom taggit.managers import TaggableManager\nfrom django.contrib.auth.models import User\n\nPUB_STATUS = (\n ('PB', 'public'),\n ('PV', 'private'),\n)\n\nLENGTHS = (\n ('4\\\"', '4\\\"'),\n ('5\\\"', '5\\\"'),\n ('6\\\"', '6\\\"'),\n ('7\\\"', '7\\\"'),\n ('8\\\"', '8\\\"'),\n ('9\\\"', '9\\\"'),\n ('10\\\"', '10\\\"'),\n ('11\\\"', '11\\\"'),\n ('12\\\"', '12\\\"'),\n ('13\\\"', '13\\\"'),\n ('14\\\"', '14\\\"'),\n ('15\\\"', '15\\\"'),\n ('16\\\"', '16\\\"'),\n)\n\nDIAMS = (\n ('1/8\\\"', '1/8\\\"'),\n ('1/4\\\"', '1/4\\\"'),\n ('3/8\\\"', '3/8\\\"'),\n ('1/2\\\"', '1/2\\\"'),\n ('5/8\\\"', '5/8\\\"'),\n)\n\n\nclass Product(models.Model):\n \"\"\"Product model for store display.\"\"\"\n\n image = ImageField(upload_to='images')\n published = models.CharField(\n max_length=2,\n choices=PUB_STATUS,\n default='PV')\n date_created = models.DateTimeField(auto_now_add=True)\n date_published = models.DateTimeField(blank=True, null=True)\n name = models.CharField(max_length=100)\n price = models.DecimalField(null=True, max_digits=6, decimal_places=2)\n stock = models.IntegerField(null=True, blank=True)\n length = MultiSelectField(\n max_length=150,\n choices=LENGTHS,\n default='',\n blank=True)\n diameter = MultiSelectField(\n max_length=150,\n choices=DIAMS,\n default='',\n blank=True)\n is_knife = models.BooleanField(default=False)\n creator = models.ForeignKey(User,\n on_delete=models.CASCADE,\n )\n description = models.TextField(default='')\n color = models.TextField(\n max_length=500,\n blank=True)\n extras = models.TextField(\n max_length=500,\n blank=True)\n catagories = TaggableManager(blank=True)\n shipping_length = models.DecimalField(null=True, max_digits=5,\n decimal_places=2)\n shipping_width = models.DecimalField(null=True, max_digits=5,\n decimal_places=2)\n shipping_height = models.DecimalField(null=True, max_digits=5,\n decimal_places=2)\n shipping_weight = models.DecimalField(null=True, max_digits=5,\n decimal_places=2)\n\n def __str__(self):\n \"\"\"Print for admin.\"\"\"\n return self.name\n\n\nclass Service(models.Model):\n \"\"\"Service model for store display.\"\"\"\n\n image = ImageField(upload_to='images')\n published = models.CharField(\n max_length=2,\n choices=PUB_STATUS,\n default='PV')\n date_created = models.DateTimeField(auto_now_add=True)\n date_published = models.DateTimeField(blank=True, null=True)\n name = models.CharField(max_length=100)\n blurb = models.TextField(default='', blank=True)\n description = models.TextField(default='', blank=True)\n commission_fee = models.IntegerField(blank=True, default=0)\n price_range = models.CharField(\n max_length=15,\n default='',\n blank=True)\n limitations = models.TextField(max_length=500, default='', blank=True)\n extras = models.TextField(\n max_length=500,\n blank=True)\n warning = models.TextField(\n max_length=500,\n blank=True)\n\n def __str__(self):\n \"\"\"Print for admin.\"\"\"\n return self.name\n\n\nclass Discount(models.Model):\n \"\"\"Model for discount codes.\"\"\"\n\n code = models.CharField(max_length=30)\n code_type = models.CharField(max_length=20)\n value = models.CharField(max_length=10)\n code_state = models.BooleanField(default=True)\n description = models.CharField(max_length=250)\n prod = models.IntegerField(null=True, blank=True)\n prod_name = models.CharField(null=True, blank=True, max_length=30)\n\n\nclass UserServiceImage(models.Model):\n \"\"\"Model to store images uploaded for a requested service.\"\"\"\n\n image = ImageField(upload_to='service_images')\n used = models.BooleanField(default=False)\n\n def __str__(self):\n \"\"\"Print for admin.\"\"\"\n return str(self.id)\n", "repo_name": "cahudson94/Raven-Valley-Forge-Shop", "sub_path": "RVFS/catalog/models.py", "file_name": "models.py", "file_ext": "py", "file_size_in_byte": 4178, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 2, "dataset": "github-code", "pt": "12", "api": [{"api_name": "django.db.models.Model", "line_number": 38, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 38, "usage_type": "name"}, {"api_name": "sorl.thumbnail.ImageField", "line_number": 41, "usage_type": "call"}, {"api_name": "django.db.models.CharField", "line_number": 42, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 42, "usage_type": "name"}, {"api_name": "django.db.models.DateTimeField", "line_number": 46, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 46, "usage_type": "name"}, {"api_name": "django.db.models.DateTimeField", "line_number": 47, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 47, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 48, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 48, "usage_type": "name"}, {"api_name": "django.db.models.DecimalField", "line_number": 49, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 49, "usage_type": "name"}, {"api_name": "django.db.models.IntegerField", "line_number": 50, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 50, "usage_type": "name"}, {"api_name": "multiselectfield.MultiSelectField", "line_number": 51, "usage_type": "call"}, {"api_name": "multiselectfield.MultiSelectField", "line_number": 56, "usage_type": "call"}, {"api_name": "django.db.models.BooleanField", "line_number": 61, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 61, "usage_type": "name"}, {"api_name": "django.db.models.ForeignKey", "line_number": 62, "usage_type": "call"}, {"api_name": "django.contrib.auth.models.User", "line_number": 62, "usage_type": "argument"}, {"api_name": "django.db.models", "line_number": 62, "usage_type": "name"}, {"api_name": "django.db.models.CASCADE", "line_number": 63, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 63, "usage_type": "name"}, {"api_name": "django.db.models.TextField", "line_number": 65, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 65, "usage_type": "name"}, {"api_name": "django.db.models.TextField", "line_number": 66, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 66, "usage_type": "name"}, {"api_name": "django.db.models.TextField", "line_number": 69, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 69, "usage_type": "name"}, {"api_name": "taggit.managers.TaggableManager", "line_number": 72, "usage_type": "call"}, {"api_name": "django.db.models.DecimalField", "line_number": 73, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 73, "usage_type": "name"}, {"api_name": "django.db.models.DecimalField", "line_number": 75, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 75, "usage_type": "name"}, {"api_name": "django.db.models.DecimalField", "line_number": 77, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 77, "usage_type": "name"}, {"api_name": "django.db.models.DecimalField", "line_number": 79, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 79, "usage_type": "name"}, {"api_name": "django.db.models.Model", "line_number": 87, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 87, "usage_type": "name"}, {"api_name": "sorl.thumbnail.ImageField", "line_number": 90, "usage_type": "call"}, {"api_name": "django.db.models.CharField", "line_number": 91, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 91, "usage_type": "name"}, {"api_name": "django.db.models.DateTimeField", "line_number": 95, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 95, "usage_type": "name"}, {"api_name": "django.db.models.DateTimeField", "line_number": 96, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 96, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 97, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 97, "usage_type": "name"}, {"api_name": "django.db.models.TextField", "line_number": 98, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 98, "usage_type": "name"}, {"api_name": "django.db.models.TextField", "line_number": 99, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 99, "usage_type": "name"}, {"api_name": "django.db.models.IntegerField", "line_number": 100, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 100, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 101, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 101, "usage_type": "name"}, {"api_name": "django.db.models.TextField", "line_number": 105, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 105, "usage_type": "name"}, {"api_name": "django.db.models.TextField", "line_number": 106, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 106, "usage_type": "name"}, {"api_name": "django.db.models.TextField", "line_number": 109, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 109, "usage_type": "name"}, {"api_name": "django.db.models.Model", "line_number": 118, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 118, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 121, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 121, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 122, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 122, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 123, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 123, "usage_type": "name"}, {"api_name": "django.db.models.BooleanField", "line_number": 124, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 124, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 125, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 125, "usage_type": "name"}, {"api_name": "django.db.models.IntegerField", "line_number": 126, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 126, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 127, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 127, "usage_type": "name"}, {"api_name": "django.db.models.Model", "line_number": 130, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 130, "usage_type": "name"}, {"api_name": "sorl.thumbnail.ImageField", "line_number": 133, "usage_type": "call"}, {"api_name": "django.db.models.BooleanField", "line_number": 134, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 134, "usage_type": "name"}]} +{"seq_id": "43892770", "text": "import serial\nimport serial.tools.list_ports\nfrom datetime import datetime\nimport calendar\n\nSETTIME = 6\n\n\nclass IMU_Watch(object):\n\n def __init__(self, serialrate=115200):\n # Initialise serial payload\n self.count = 0\n self.plSz = 0\n self.payload = bytearray()\n\n # Looks for a watch until it finds one\n running = False\n while True:\n ports = list(serial.tools.list_ports.comports())\n ports = [str(p.device) for p in ports if str(p.hwid).find('9D0F') > 0]\n if len(ports) == 0:\n print('Watch not found')\n else:\n break\n self.serialport = ports[0]\n # Initialise serial port\n self.ser = serial.Serial(self.serialport, serialrate)\n while not running:\n if self.ser.isOpen():\n print('Watch found at ', self.serialport)\n running = True\n else:\n print('Cannot open %s. Trying again...', self.serialport)\n self.ser.open()\n\n def serial_write(self, command, string=''):\n # Format:\n # | 255 | 255 | no. of bytes | command | filename/time | checksum |\n\n header = [255, 255]\n chksum = 254\n\n payload_size = len(string) + 1\n\n chksum += payload_size + command\n\n self.ser.write(bytes([header[0]]))\n self.ser.write(bytes([header[1]]))\n self.ser.write(bytes([payload_size]))\n\n self.ser.write(bytes([command]))\n\n if string != '':\n for i in range(len(string)):\n self.ser.write(bytes([ord(string[i])]))\n chksum += ord(string[i])\n\n self.ser.write(bytes([chksum % 256]))\n\n def serial_read(self):\n if (self.ser.read() == b'\\xff') and (self.ser.read() == b'\\xff'):\n self.count += 1\n chksum = 255 + 255\n\n sz = self.ser.read(2)\n self.plSz = int.from_bytes(sz, 'little')\n chksum += sum(sz)\n\n self.payload = self.ser.read(self.plSz)\n chksum += sum(self.payload)\n chksum = bytes([chksum % 256])\n _chksum = self.ser.read()\n\n return _chksum == chksum\n return False\n\n def set_time(self):\n # Sends current time from PC and reads the time set on the IMU watch\n unix = calendar.timegm(datetime.now().timetuple())\n self.serial_write(SETTIME, string=str(unix))\n # self.statusBar().showMessage(\"Initialized IMU watch\")\n print('Command sent: SETTIME - ', datetime.utcfromtimestamp(unix).strftime('%Y-%m-%d %H:%M:%S'))\n if self.serial_read():\n unix = int(self.payload.decode('utf-8'))\n a = ('Time on Watch: ' + datetime.utcfromtimestamp(unix).strftime('%Y-%m-%d %H:%M:%S'))\n return a", "repo_name": "SujithChristopher/MIRA", "sub_path": "support_py/timeset.py", "file_name": "timeset.py", "file_ext": "py", "file_size_in_byte": 2800, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "12", "api": [{"api_name": "serial.tools.list_ports.comports", "line_number": 20, "usage_type": "call"}, {"api_name": "serial.tools", "line_number": 20, "usage_type": "attribute"}, {"api_name": "serial.Serial", "line_number": 28, "usage_type": "call"}, {"api_name": "calendar.timegm", "line_number": 80, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 80, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 80, "usage_type": "name"}, {"api_name": "datetime.datetime.utcfromtimestamp", "line_number": 83, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 83, "usage_type": "name"}, {"api_name": "datetime.datetime.utcfromtimestamp", "line_number": 86, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 86, "usage_type": "name"}]} +{"seq_id": "7128639925", "text": "from telegram import Update, ReplyKeyboardRemove\nfrom telegram.ext import CallbackContext\n\nfrom tgbot.conversations import states\nfrom tgbot.conversations.core import JSON\nfrom tgbot.conversations.core import main_keyboard, siren_keyboard\nfrom tgbot.conversations.api import client as api\n\n\ndef district_choise(update: Update, context: CallbackContext[JSON, JSON, JSON]) -> int:\n question = 'В каком районе нужно найти РСУ?'\n context.user_data['choice'] = 'district'\n update.message.reply_text(question, reply_markup=ReplyKeyboardRemove())\n\n return states.DISTRICT_STATS\n\n\ndef district_stats(update: Update, context: CallbackContext[JSON, JSON, JSON]) -> int:\n if not isinstance(update.message.text, str):\n update.message.reply_text('Input text')\n return states.DISTRICT_STATS\n\n selected_district = update.message.text\n districts = api.districts.get_by_name(selected_district)\n\n if not districts:\n update.message.reply_text('В Татарстане нет такого района, проверьте правильность ввода')\n return states.DISTRICT_STATS\n\n district = districts[0]\n district_sirens = api.districts.get_for_district(district.uid)\n siren_name = [siren.name for siren in district_sirens]\n\n update.message.reply_text(f'{district.name} район:', reply_markup=siren_keyboard(siren_name))\n\n context.user_data['district_id'] = district.uid\n\n return states.SIREN_STATS\n", "repo_name": "mchs-rsu/tgbot", "sub_path": "tgbot/conversations/districts.py", "file_name": "districts.py", "file_ext": "py", "file_size_in_byte": 1490, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "12", "api": [{"api_name": "telegram.Update", "line_number": 10, "usage_type": "name"}, {"api_name": "telegram.ext.CallbackContext", "line_number": 10, "usage_type": "name"}, {"api_name": "tgbot.conversations.core.JSON", "line_number": 10, "usage_type": "name"}, {"api_name": "telegram.ReplyKeyboardRemove", "line_number": 13, "usage_type": "call"}, {"api_name": "tgbot.conversations.states.DISTRICT_STATS", "line_number": 15, "usage_type": "attribute"}, {"api_name": "tgbot.conversations.states", "line_number": 15, "usage_type": "name"}, {"api_name": "telegram.Update", "line_number": 18, "usage_type": "name"}, {"api_name": "telegram.ext.CallbackContext", "line_number": 18, "usage_type": "name"}, {"api_name": "tgbot.conversations.core.JSON", "line_number": 18, "usage_type": "name"}, {"api_name": "tgbot.conversations.states.DISTRICT_STATS", "line_number": 21, "usage_type": "attribute"}, {"api_name": "tgbot.conversations.states", "line_number": 21, "usage_type": "name"}, {"api_name": "tgbot.conversations.api.client.districts.get_by_name", "line_number": 24, "usage_type": "call"}, {"api_name": "tgbot.conversations.api.client.districts", "line_number": 24, "usage_type": "attribute"}, {"api_name": "tgbot.conversations.api.client", "line_number": 24, "usage_type": "name"}, {"api_name": "tgbot.conversations.states.DISTRICT_STATS", "line_number": 28, "usage_type": "attribute"}, {"api_name": "tgbot.conversations.states", "line_number": 28, "usage_type": "name"}, {"api_name": "tgbot.conversations.api.client.districts.get_for_district", "line_number": 31, "usage_type": "call"}, {"api_name": "tgbot.conversations.api.client.districts", "line_number": 31, "usage_type": "attribute"}, {"api_name": "tgbot.conversations.api.client", "line_number": 31, "usage_type": "name"}, {"api_name": "tgbot.conversations.core.siren_keyboard", "line_number": 34, "usage_type": "call"}, {"api_name": "tgbot.conversations.states.SIREN_STATS", "line_number": 38, "usage_type": "attribute"}, {"api_name": "tgbot.conversations.states", "line_number": 38, "usage_type": "name"}]} +{"seq_id": "42266291565", "text": "import pygame\r\nfrom pygame.sprite import Sprite\r\n\r\n\r\nclass Bullet(Sprite):\r\n \"\"\"A Class to manage bullet from ship\"\"\"\r\n\r\n def __init__(self, game_settings, screen, ship):\r\n super().__init__()\r\n\r\n self.screen = screen\r\n # Create a bullet rect at (0, 0) and then set correct position.\r\n self.rect = pygame.Rect(0, 0, game_settings.bullet_width, game_settings.bullet_height)\r\n\r\n self.rect.centerx = ship.rect.centerx\r\n\r\n self.rect.top = ship.rect.top\r\n\r\n # Store the bullet's position as a decimal value.\r\n self.y = float(self.rect.y)\r\n\r\n self.color = game_settings.bullet_color\r\n self.speed_factor = game_settings.bullet_speed_factor\r\n\r\n def update(self):\r\n \"\"\"Move bullet upperward in the screen \"\"\"\r\n\r\n self.y -= self.speed_factor # Update decimal position of the bullet.\r\n self.rect.y = self.y # Update rect position.\r\n\r\n def draw_bullet(self):\r\n \"\"\"Draw the bullet to the screen.\"\"\"\r\n\r\n pygame.draw.rect(self.screen, self.color, self.rect)\r\n\r\n\r\nif __name__ == '__main__':\r\n print(\"Go to main file and run from there.\")\r\n", "repo_name": "skinan/Alien-Shooter-Game", "sub_path": "bullet.py", "file_name": "bullet.py", "file_ext": "py", "file_size_in_byte": 1144, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 2, "dataset": "github-code", "pt": "12", "api": [{"api_name": "pygame.sprite.Sprite", "line_number": 5, "usage_type": "name"}, {"api_name": "pygame.Rect", "line_number": 13, "usage_type": "call"}, {"api_name": "pygame.draw.rect", "line_number": 34, "usage_type": "call"}, {"api_name": "pygame.draw", "line_number": 34, "usage_type": "attribute"}]} +{"seq_id": "8066610641", "text": "import os\nimport time\n\nimport mysql.connector\nfrom dotenv import load_dotenv\nfrom slack_sdk import WebClient\nfrom slack_sdk.errors import SlackApiError\n\nfrom merchants_data import get_merchants_data\n\nload_dotenv()\nprevious_statuses = {}\n\nMYSQL_HOST = os.environ[\"MYSQL_HOST\"]\nMYSQL_PORT = int(os.environ[\"MYSQL_PORT\"])\nMYSQL_USER = os.environ[\"MYSQL_USER\"]\nMYSQL_PASSWORD = os.environ[\"MYSQL_PASSWORD\"]\nMYSQL_DB_NAME = os.environ[\"MYSQL_DB_NAME\"]\n\nSLACK_BOT_TOKEN = os.environ[\"SLACK_BOT_TOKEN\"]\nSLACK_CHANNEL_ID = os.environ[\"SLACK_CHANNEL_ID\"]\n\ncurrency_decimal_places = {\n 'TRX': 2,\n 'ETH': 6,\n 'BTC': 7,\n 'DOGE': 2,\n 'USDT': 2,\n 'USDC': 2,\n}\n\ndef format_amount(amount, currency):\n decimal_places = currency_decimal_places.get(currency.upper(), 2)\n return f\"{amount:.{decimal_places}f}\"\n\ndef create_db_connection():\n return mysql.connector.connect(\n host=MYSQL_HOST,\n port=MYSQL_PORT,\n user=MYSQL_USER,\n password=MYSQL_PASSWORD,\n database=MYSQL_DB_NAME,\n )\n\nslack_client = WebClient(token=SLACK_BOT_TOKEN)\n\ndef get_status_text(status):\n if status == 'in_progress':\n return ':large_yellow_circle: Transaction in progress'\n elif status == 'success':\n return ':large_green_circle: Transaction success'\n elif status == 'rejected':\n return ':red_circle: Transaction decline'\n elif status == 'pending':\n return ':exclamation: Transaction awaiting provider approval @operations'\n else:\n return status\n\ndef send_slack_message(transaction, project_name, merchant_name):\n amount_from_formatted = format_amount(transaction['amount_from'], transaction['currency_from'])\n amount_to_formatted = format_amount(transaction['amount_to'], transaction['currency_to'])\n\n message_template = f\"\"\">*Exchange*\n:man_in_tuxedo: | \n:currency_exchange: {amount_from_formatted} {transaction['currency_from'].upper()} -> {amount_to_formatted} {transaction['currency_to'].upper()}\n:chart_with_upwards_trend: Rate: {transaction['rate']}\n:money_with_wings: Fee: {transaction['fee_exchange']} {transaction['currency_from'].upper()}\n\n{get_status_text(transaction['status'])}\n\"\"\"\n try:\n response = slack_client.chat_postMessage(\n channel=SLACK_CHANNEL_ID,\n text=message_template\n )\n return response['ts']\n except SlackApiError as e:\n print(f\"Error sending message: {e}\")\n\ndef post_status_in_thread(transaction, ts):\n status_text = get_status_text(transaction['status'])\n\n try:\n slack_client.chat_postMessage(\n channel=SLACK_CHANNEL_ID,\n text=status_text,\n thread_ts=ts\n )\n except SlackApiError as e:\n print(f\"Error posting status in thread: {e}\")\n\ndef update_slack_message(transaction, ts):\n current_status = transaction[\"status\"]\n previous_status = previous_statuses.get(transaction[\"id\"])\n\n if previous_status is None:\n previous_statuses[transaction[\"id\"]] = current_status\n elif current_status != previous_status:\n post_status_in_thread(transaction, ts)\n previous_statuses[transaction[\"id\"]] = current_status\n\ndef get_current_last_id():\n conn = create_db_connection()\n cursor = conn.cursor(dictionary=True)\n\n query = \"SELECT id FROM project_exchange_transactions ORDER BY id DESC LIMIT 1\"\n cursor.execute(query)\n result = cursor.fetchone()\n\n cursor.close()\n conn.close()\n\n if result:\n return result['id']\n return None\n\ndef monitor_transactions():\n merchants = get_merchants_data()\n last_processed_id = get_current_last_id()\n message_ts_map = {}\n\n while True:\n conn = create_db_connection()\n cursor = conn.cursor(dictionary=True)\n\n query = \"SELECT * FROM project_exchange_transactions\"\n if last_processed_id:\n query += f\" WHERE id > {last_processed_id}\"\n query += \" ORDER BY id DESC\"\n\n cursor.execute(query)\n result = cursor.fetchall()\n\n for row in result:\n merchant_name = merchants.get(row['owner_merchant_id'], 'Unknown')\n project_query = f\"SELECT name FROM projects WHERE id = {row['project_id']}\"\n cursor.execute(project_query)\n project = cursor.fetchone()\n project_name = project['name'] if project else 'Unknown'\n\n ts = send_slack_message(row, project_name, merchant_name)\n\n if row['id'] not in message_ts_map:\n message_ts_map[row['id']] = ts\n last_processed_id = row['id']\n else:\n ts = message_ts_map.get(row['id'])\n if ts:\n update_slack_message(row, ts)\n\n for transaction_id, ts in message_ts_map.items():\n query = f\"SELECT * FROM project_exchange_transactions WHERE id = {transaction_id}\"\n cursor.execute(query)\n row = cursor.fetchone()\n\n if row:\n update_slack_message(row, ts)\n\n cursor.close()\n conn.close()\n\n time.sleep(5)\n\nif __name__ == \"__main__\":\n monitor_transactions()\n\n", "repo_name": "nodeLogs/notify", "sub_path": "exchange_transactions.py", "file_name": "exchange_transactions.py", "file_ext": "py", "file_size_in_byte": 5386, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "12", "api": [{"api_name": "dotenv.load_dotenv", "line_number": 11, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 14, "usage_type": "attribute"}, {"api_name": "os.environ", "line_number": 15, "usage_type": "attribute"}, {"api_name": "os.environ", "line_number": 16, "usage_type": "attribute"}, {"api_name": "os.environ", "line_number": 17, "usage_type": "attribute"}, {"api_name": "os.environ", "line_number": 18, "usage_type": "attribute"}, {"api_name": "os.environ", "line_number": 20, "usage_type": "attribute"}, {"api_name": "os.environ", "line_number": 21, "usage_type": "attribute"}, {"api_name": "mysql.connector.connector.connect", "line_number": 37, "usage_type": "call"}, {"api_name": "mysql.connector.connector", "line_number": 37, "usage_type": "attribute"}, {"api_name": "mysql.connector", "line_number": 37, "usage_type": "name"}, {"api_name": "slack_sdk.WebClient", "line_number": 45, "usage_type": "call"}, {"api_name": "slack_sdk.errors.SlackApiError", "line_number": 77, "usage_type": "name"}, {"api_name": "slack_sdk.errors.SlackApiError", "line_number": 89, "usage_type": "name"}, {"api_name": "merchants_data.get_merchants_data", "line_number": 118, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 162, "usage_type": "call"}]} +{"seq_id": "37032162107", "text": "import time\n\nfrom django.core.management.base import BaseCommand, CommandError\n\nfrom temba.contacts.models import ContactGroup\nfrom temba.mailroom import queue_populate_dynamic_group\n\n\nclass Command(BaseCommand):\n help = \"Re-evaluates a smart group\"\n\n def add_arguments(self, parser):\n parser.add_argument(\"group_uuid\", help=\"UUID of contact group to re-evaluate.\")\n\n def handle(self, group_uuid: str, *args, **kwargs):\n group = ContactGroup.objects.filter(uuid=group_uuid, group_type=ContactGroup.TYPE_SMART).first()\n if not group:\n raise CommandError(\"no such group\")\n\n self.stdout.write(\n f\"Queueing re-evaluation for group {group.name} with query '{group.query}' \"\n f\"and {group.get_member_count()} members...\"\n )\n\n # mark group as evaluating\n group.status = ContactGroup.STATUS_EVALUATING\n group.save(update_fields=(\"status\",))\n\n queue_populate_dynamic_group(group)\n\n while True:\n time.sleep(2)\n\n group.refresh_from_db()\n if group.status == ContactGroup.STATUS_READY:\n break\n\n self.stdout.write(f\" > {group.get_member_count()} members...\")\n\n self.stdout.write(f\"Re-evaluation complete with {group.get_member_count()} members.\")\n", "repo_name": "rapidpro/rapidpro", "sub_path": "temba/contacts/management/commands/reeval_group.py", "file_name": "reeval_group.py", "file_ext": "py", "file_size_in_byte": 1312, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 832, "dataset": "github-code", "pt": "12", "api": [{"api_name": "django.core.management.base.BaseCommand", "line_number": 9, "usage_type": "name"}, {"api_name": "temba.contacts.models.ContactGroup.objects.filter", "line_number": 16, "usage_type": "call"}, {"api_name": "temba.contacts.models.ContactGroup.objects", "line_number": 16, "usage_type": "attribute"}, {"api_name": "temba.contacts.models.ContactGroup", "line_number": 16, "usage_type": "name"}, {"api_name": "temba.contacts.models.ContactGroup.TYPE_SMART", "line_number": 16, "usage_type": "attribute"}, {"api_name": "django.core.management.base.CommandError", "line_number": 18, "usage_type": "call"}, {"api_name": "temba.contacts.models.ContactGroup.STATUS_EVALUATING", "line_number": 26, "usage_type": "attribute"}, {"api_name": "temba.contacts.models.ContactGroup", "line_number": 26, "usage_type": "name"}, {"api_name": "temba.mailroom.queue_populate_dynamic_group", "line_number": 29, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 32, "usage_type": "call"}, {"api_name": "temba.contacts.models.ContactGroup.STATUS_READY", "line_number": 35, "usage_type": "attribute"}, {"api_name": "temba.contacts.models.ContactGroup", "line_number": 35, "usage_type": "name"}]} +{"seq_id": "26638704288", "text": "# create model that classifies the data in data/winequality.csv\nimport pandas as pd\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.ensemble import GradientBoostingClassifier\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn import metrics\nimport pickle\nimport numpy as np\n\ndef create_model():\n # import the data from data/winequality.csv\n data = pd.read_csv('model/data/winequality.csv')\n\n # split the data into training and test sets\n train, test = train_test_split(data, test_size=0.2, random_state=42)\n\n # separate the features from the labels\n train_features = train.drop('quality', axis=1)\n train_labels = train['quality']\n\n test_features = test.drop('quality', axis=1)\n test_labels = test['quality']\n\n # save test data in file test_wine.csv\n test.to_csv('model/data/test_wine.csv', index=False)\n sc = StandardScaler()\n\n train_features = sc.fit_transform(train_features)\n test_features = sc.transform(test_features)\n\n # create a model\n model = GradientBoostingClassifier()\n\n # train the model\n model.fit(train_features, train_labels)\n\n # evaluate the model\n predictions = model.predict(test_features)\n\n # save the metrics in file metrics.txt\n with open('model/metrics.txt', 'w') as f:\n f.write('For the accuracy:' + str(metrics.accuracy_score(test_labels, predictions)) + '\\n')\n f.write('For the recision: ' + str(metrics.precision_score(test_labels, predictions, average='weighted')) + '\\n')\n f.write('For the recall:' + str(metrics.recall_score(test_labels, predictions, average='weighted')) + '\\n')\n f.write('For the F1 score:' + str(metrics.f1_score(test_labels, predictions, average='weighted')))\n\n # save the model in file model.pkl\n pickle.dump(model, open('model/model.pkl', 'wb'))\n", "repo_name": "Unikarah/Wine-MLOps-project", "sub_path": "src/model/model.py", "file_name": "model.py", "file_ext": "py", "file_size_in_byte": 1826, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "12", "api": [{"api_name": "pandas.read_csv", "line_number": 12, "usage_type": "call"}, {"api_name": "sklearn.model_selection.train_test_split", "line_number": 15, "usage_type": "call"}, {"api_name": "sklearn.preprocessing.StandardScaler", "line_number": 26, "usage_type": "call"}, {"api_name": "sklearn.ensemble.GradientBoostingClassifier", "line_number": 32, "usage_type": "call"}, {"api_name": "sklearn.metrics.accuracy_score", "line_number": 42, "usage_type": "call"}, {"api_name": "sklearn.metrics", "line_number": 42, "usage_type": "name"}, {"api_name": "sklearn.metrics.precision_score", "line_number": 43, "usage_type": "call"}, {"api_name": "sklearn.metrics", "line_number": 43, "usage_type": "name"}, {"api_name": "sklearn.metrics.recall_score", "line_number": 44, "usage_type": "call"}, {"api_name": "sklearn.metrics", "line_number": 44, "usage_type": "name"}, {"api_name": "sklearn.metrics.f1_score", "line_number": 45, "usage_type": "call"}, {"api_name": "sklearn.metrics", "line_number": 45, "usage_type": "name"}, {"api_name": "pickle.dump", "line_number": 48, "usage_type": "call"}]} +{"seq_id": "21758253341", "text": "import os \nfrom glob import glob\nfrom setuptools import setup\n\npackage_name = 'robot_spawner_pkg'\ncur_directory_path = os.path.abspath(os.path.dirname(__file__))\n\nsetup(\n name=package_name,\n version='0.0.0',\n packages=[package_name],\n data_files=[\n ('share/ament_index/resource_index/packages',\n ['resource/' + package_name]),\n ('share/' + package_name, ['package.xml']),\n (os.path.join('share', package_name,'launch'), glob('launch/*.launch.py')),\n (os.path.join('share', package_name,'worlds/'), glob('./worlds/*')),\n (os.path.join('share', package_name,'models/Maze_ql_1/'), glob('./models/Maze_ql_1/*')),\n (os.path.join('share', package_name,'models/basic_robot'), glob('./models/basic_robot/*')),\n (os.path.join('share', package_name,'models/globe'), glob('./models/globe/*'))\n\n ],\n install_requires=['setuptools'],\n zip_safe=True,\n maintainer='ubuntu',\n maintainer_email='ubuntu@todo.todo',\n description='TODO: Package description',\n license='TODO: License declaration',\n tests_require=['pytest'],\n entry_points={\n 'console_scripts': [\n 'spawn_demo = robot_spawner_pkg.spawn_demo:main',\n 'spawn_scenario = robot_spawner_pkg.spawn_scenario:main'\n\n ],\n },\n)\n", "repo_name": "laurencourtney/Robotics", "sub_path": "test_final/src/template/robot_spawner_pkg/setup.py", "file_name": "setup.py", "file_ext": "py", "file_size_in_byte": 1300, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "12", "api": [{"api_name": "os.path.abspath", "line_number": 6, "usage_type": "call"}, {"api_name": "os.path", "line_number": 6, "usage_type": "attribute"}, {"api_name": "os.path.dirname", "line_number": 6, "usage_type": "call"}, {"api_name": "setuptools.setup", "line_number": 8, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 16, "usage_type": "call"}, {"api_name": "os.path", "line_number": 16, "usage_type": "attribute"}, {"api_name": "glob.glob", "line_number": 16, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 17, "usage_type": "call"}, {"api_name": "os.path", "line_number": 17, "usage_type": "attribute"}, {"api_name": "glob.glob", "line_number": 17, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 18, "usage_type": "call"}, {"api_name": "os.path", "line_number": 18, "usage_type": "attribute"}, {"api_name": "glob.glob", "line_number": 18, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 19, "usage_type": "call"}, {"api_name": "os.path", "line_number": 19, "usage_type": "attribute"}, {"api_name": "glob.glob", "line_number": 19, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 20, "usage_type": "call"}, {"api_name": "os.path", "line_number": 20, "usage_type": "attribute"}, {"api_name": "glob.glob", "line_number": 20, "usage_type": "call"}]} +{"seq_id": "23560749711", "text": "import discord\nfrom discord.ext import commands\nimport os\nimport random\nfrom discord.ext.commands.core import command\nimport praw\n\nreddit = praw.Reddit(client_id=\"5-_GzjyTOOhukQ\",\n client_secret=os.environ['REDDIT_SECRET'],\n username=\"idioticspaceman\",\n password=os.environ['REDDIT_PASS'],\n user_agent=\"Economy-BOT\")\n\nclass Images(commands.Cog):\n def __init__(self, bot):\n self.bot = bot\n\n @commands.command(help=\"Use the command to get a meme\", usage=\"`#meme`\", aliases=['memes'])\n async def meme(self, ctx):\n async with ctx.typing():\n subreddit = reddit.subreddit(\"memes\")\n all_subs = []\n top = subreddit.top(limit=50)\n\n for submission in top:\n all_subs.append(submission)\n random_sub = random.choice(all_subs)\n name = random_sub.title\n url = random_sub.url\n meme_embed = discord.Embed(title=name, colour=discord.Colour.blue())\n meme_embed.set_image(url=url)\n await ctx.send(embed=meme_embed)\n\n @commands.command(aliases=['dogs', 'bark'], help=\"Use the command to see cute pictures of dogs!\", usage=\"`#dog`\")\n async def dog(self, ctx):\n async with ctx.typing():\n subreddit = reddit.subreddit(\"dogs\")\n all_subs = []\n top = subreddit.top(limit=50)\n\n for submission in top:\n all_subs.append(submission)\n random_sub = random.choice(all_subs)\n name = random_sub.title\n url = random_sub.url\n dog_embed = discord.Embed(title=name, colour=discord.Colour.teal())\n dog_embed.set_image(url=url)\n await ctx.send(embed=dog_embed)\n\n @commands.command(aliases=['cats', 'meow'], help=\"Use the command to see cats! MEOWWWW!\", usage=\"`#cat`\")\n async def cat(self, ctx):\n async with ctx.typing():\n subreddit = reddit.subreddit(\"cats\")\n all_subs = []\n top = subreddit.top(limit=50)\n\n for submission in top:\n all_subs.append(submission)\n random_sub = random.choice(all_subs)\n name = random_sub.title\n url = random_sub.url\n cat_embed = discord.Embed(title=name, colour=discord.Colour.teal())\n cat_embed.set_image(url=url)\n await ctx.send(embed=cat_embed)\n\n @commands.command(aliases=['hoot', 'owls'], help=\"Use the command to see owls! HOOT HOOT\", usage=\"`#owl`\")\n async def owl(self, ctx):\n async with ctx.typing():\n subreddit = reddit.subreddit(\"owls\")\n all_subs = []\n top = subreddit.top(limit=50)\n\n for submission in top:\n all_subs.append(submission)\n random_sub = random.choice(all_subs)\n name = random_sub.title\n url = random_sub.url\n owl_embed = discord.Embed(title=name, colour=discord.Colour.teal())\n owl_embed.set_image(url=url)\n await ctx.send(embed=owl_embed)\n\n @commands.command(aliases=['foxxy'], help=\"Use the command to see foxes\", usage=\"`#fox`\")\n async def fox(self, ctx):\n async with ctx.typing():\n subreddit = reddit.subreddit(\"foxes\")\n all_subs = []\n top = subreddit.top(limit=50)\n\n for submission in top:\n all_subs.append(submission)\n random_sub = random.choice(all_subs)\n name = random_sub.title\n url = random_sub.url\n fox_embed = discord.Embed(title=name, colour=discord.Colour.teal())\n fox_embed.set_image(url=url)\n await ctx.send(embed=fox_embed)\n \n @commands.command(aliases=['lizzards', 'lizzard'], help=\"Use the command to see a lizzard\", usage=\"`#lizziboi`\")\n async def lizziboi(self, ctx):\n async with ctx.typing():\n subreddit = reddit.subreddit(\"lizards\")\n all_subs = []\n top = subreddit.top(limit=50)\n\n for submission in top:\n all_subs.append(submission)\n random_sub = random.choice(all_subs)\n name = random_sub.title\n url = random_sub.url\n liz_embed = discord.Embed(title=name, colour=discord.Colour.teal())\n liz_embed.set_image(url=url)\n await ctx.send(embed=liz_embed)\n\n\n\ndef setup(bot):\n bot.add_cog(Images(bot))\n", "repo_name": "nothingButSyntaxError/DisBot", "sub_path": "cogs/images.py", "file_name": "images.py", "file_ext": "py", "file_size_in_byte": 4458, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "12", "api": [{"api_name": "praw.Reddit", "line_number": 8, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 9, "usage_type": "attribute"}, {"api_name": "os.environ", "line_number": 11, "usage_type": "attribute"}, {"api_name": "discord.ext.commands.Cog", "line_number": 14, "usage_type": "attribute"}, {"api_name": "discord.ext.commands", "line_number": 14, "usage_type": "name"}, {"api_name": "random.choice", "line_number": 27, "usage_type": "call"}, {"api_name": "discord.Embed", "line_number": 30, "usage_type": "call"}, {"api_name": "discord.Colour.blue", "line_number": 30, "usage_type": "call"}, {"api_name": "discord.Colour", "line_number": 30, "usage_type": "attribute"}, {"api_name": "discord.ext.commands.command", "line_number": 18, "usage_type": "call"}, {"api_name": "discord.ext.commands", "line_number": 18, "usage_type": "name"}, {"api_name": "random.choice", "line_number": 43, "usage_type": "call"}, {"api_name": "discord.Embed", "line_number": 46, "usage_type": "call"}, {"api_name": "discord.Colour.teal", "line_number": 46, "usage_type": "call"}, {"api_name": "discord.Colour", "line_number": 46, "usage_type": "attribute"}, {"api_name": "discord.ext.commands.command", "line_number": 34, "usage_type": "call"}, {"api_name": "discord.ext.commands", "line_number": 34, "usage_type": "name"}, {"api_name": "random.choice", "line_number": 59, "usage_type": "call"}, {"api_name": "discord.Embed", "line_number": 62, "usage_type": "call"}, {"api_name": "discord.Colour.teal", "line_number": 62, "usage_type": "call"}, {"api_name": "discord.Colour", "line_number": 62, "usage_type": "attribute"}, {"api_name": "discord.ext.commands.command", "line_number": 50, "usage_type": "call"}, {"api_name": "discord.ext.commands", "line_number": 50, "usage_type": "name"}, {"api_name": "random.choice", "line_number": 75, "usage_type": "call"}, {"api_name": "discord.Embed", "line_number": 78, "usage_type": "call"}, {"api_name": "discord.Colour.teal", "line_number": 78, "usage_type": "call"}, {"api_name": "discord.Colour", "line_number": 78, "usage_type": "attribute"}, {"api_name": "discord.ext.commands.command", "line_number": 66, "usage_type": "call"}, {"api_name": "discord.ext.commands", "line_number": 66, "usage_type": "name"}, {"api_name": "random.choice", "line_number": 91, "usage_type": "call"}, {"api_name": "discord.Embed", "line_number": 94, "usage_type": "call"}, {"api_name": "discord.Colour.teal", "line_number": 94, "usage_type": "call"}, {"api_name": "discord.Colour", "line_number": 94, "usage_type": "attribute"}, {"api_name": "discord.ext.commands.command", "line_number": 82, "usage_type": "call"}, {"api_name": "discord.ext.commands", "line_number": 82, "usage_type": "name"}, {"api_name": "random.choice", "line_number": 107, "usage_type": "call"}, {"api_name": "discord.Embed", "line_number": 110, "usage_type": "call"}, {"api_name": "discord.Colour.teal", "line_number": 110, "usage_type": "call"}, {"api_name": "discord.Colour", "line_number": 110, "usage_type": "attribute"}, {"api_name": "discord.ext.commands.command", "line_number": 98, "usage_type": "call"}, {"api_name": "discord.ext.commands", "line_number": 98, "usage_type": "name"}]} +{"seq_id": "33881553415", "text": "#!/bin/python3.8\nfrom typing import Tuple\n\nimport torch\nimport torch.nn as nn\n\nfrom src.ai.architectural_components import ResidualBlock\nfrom src.ai.architectures.bc_deeply_supervised_auto_encoder import Net as BaseNet\n#from src.ai.architectures.auto_encoder_deeply_supervised_share_weights import Net as BaseNet\nfrom src.ai.base_net import ArchitectureConfig\nfrom src.ai.utils import mlp_creator, conv_creator\nfrom src.core.data_types import Action\nfrom src.core.logger import get_logger, cprint\nfrom src.core.utils import get_filename_without_extension\n\n\"\"\"\nDeep Supervision net with discriminator.\nDiscriminator is used to improve the predictions from the network on unlabeled real data.\nDiscriminator discriminates between simulated (training) data prediction (0) and real (test) data prediction (1).\nThe main network can then be trained also on unlabeled real data to minimize the discriminators output.\n\"\"\"\n\n\nclass Net(BaseNet):\n\n def __init__(self, config: ArchitectureConfig, quiet: bool = False):\n super().__init__(config=config, quiet=True)\n self._deeply_supervised_parameter_names = [name for name, _ in self.named_parameters()]\n self._discriminator = conv_creator(channels=[1, 3, 6, 9],\n kernel_sizes=[5, 5, 5],\n strides=[3, 3, 3],\n activation=nn.LeakyReLU(),\n output_activation=nn.LeakyReLU(),\n batch_norm=self._config.batch_normalisation)\n self._discriminator_decision = mlp_creator([9*6*6, 1], output_activation=nn.Sigmoid(),\n bias_in_last_layer=False)\n if not quiet:\n self._logger = get_logger(name=get_filename_without_extension(__file__),\n output_path=config.output_path,\n quiet=False)\n self.initialize_architecture()\n cprint(f'Started.', self._logger)\n\n def deeply_supervised_parameters(self, recurse=True):\n for name, param in self.named_parameters(recurse=recurse):\n if name in self._deeply_supervised_parameter_names:\n yield param\n\n def discriminator_parameters(self, recurse=True):\n for p in self._discriminator.parameters(recurse=recurse):\n yield p\n for p in self._discriminator_decision.parameters(recurse=recurse):\n yield p\n\n def forward_with_all_outputs(self, inputs, train: bool = False) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor,\n torch.Tensor, torch.Tensor]:\n for p in self.deeply_supervised_parameters():\n p.requires_grad = train\n return super().forward_with_all_outputs(inputs, train=train)\n\n def discriminate(self, predictions, train: bool = False) -> torch.Tensor:\n \"\"\"\n Evaluate predictions on whether they come from simulated (0) or real (1) data\n :param predictions: NxCxHxW with CxHxW corresponding to the output size\n :param train: train the discriminator part or evaluate\n :return: output 0 --> simulated, 1 --> real\n \"\"\"\n self._discriminator.train(train)\n for p in self.discriminator_parameters():\n p.requires_grad = train\n feature = self._discriminator(predictions).view(-1, 9*6*6)\n return self._discriminator_decision(feature)\n", "repo_name": "kkelchte/imitation-learning-codebase", "sub_path": "src/ai/architectures/auto_encoder_deeply_supervised_with_discriminator.py", "file_name": "auto_encoder_deeply_supervised_with_discriminator.py", "file_ext": "py", "file_size_in_byte": 3546, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "12", "api": [{"api_name": "src.ai.architectures.bc_deeply_supervised_auto_encoder.Net", "line_number": 24, "usage_type": "name"}, {"api_name": "src.ai.base_net.ArchitectureConfig", "line_number": 26, "usage_type": "name"}, {"api_name": "src.ai.utils.conv_creator", "line_number": 29, "usage_type": "call"}, {"api_name": "torch.nn.LeakyReLU", "line_number": 32, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 32, "usage_type": "name"}, {"api_name": "torch.nn.LeakyReLU", "line_number": 33, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 33, "usage_type": "name"}, {"api_name": "src.ai.utils.mlp_creator", "line_number": 35, "usage_type": "call"}, {"api_name": "torch.nn.Sigmoid", "line_number": 35, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 35, "usage_type": "name"}, {"api_name": "src.core.logger.get_logger", "line_number": 38, "usage_type": "call"}, {"api_name": "src.core.utils.get_filename_without_extension", "line_number": 38, "usage_type": "call"}, {"api_name": "src.core.logger.cprint", "line_number": 42, "usage_type": "call"}, {"api_name": "typing.Tuple", "line_number": 55, "usage_type": "name"}, {"api_name": "torch.Tensor", "line_number": 55, "usage_type": "attribute"}, {"api_name": "torch.Tensor", "line_number": 56, "usage_type": "attribute"}, {"api_name": "torch.Tensor", "line_number": 61, "usage_type": "attribute"}]} +{"seq_id": "32130576011", "text": "import time\nimport pandas as pd\nfrom mpi4py import MPI\n\nstart = time.time()\ncomm = MPI.COMM_WORLD\nsize = comm.Get_size()\nrank = comm.Get_rank()\ndataset = 'datasets/Combined_Flights_2021.csv'\n\n\n# find_max fn. iterates over the aggregated input and picks up the key corresponding to the highest integer value\ndef find_max(final_out: dict):\n most_cancelled = \"\"\n maximum = 0\n for k, val in final_out.items():\n if final_out.get(k) > maximum:\n most_cancelled = k\n maximum = val\n\n print(f'{most_cancelled} had the most canceled flights in September 2021')\n end = time.time()\n print(f'time taken with (MPI execution): {round(end - start, 2)} second(s)')\n\n\nif rank == 0:\n start = time.time()\n \"\"\"\n Master worker (with rank 0) is responsible for distributes the workload evenly \n between slave workers.\n \"\"\"\n\n def distribute_rows(n_rows: int, n_processes):\n reading_info = []\n skip_rows = 1\n reading_info.append([n_rows - skip_rows, skip_rows])\n skip_rows = n_rows\n\n for _ in range(1, n_processes - 1):\n reading_info.append([n_rows, skip_rows])\n skip_rows = skip_rows + n_rows\n reading_info.append([None, skip_rows])\n return reading_info\n\n\n slave_workers = size - 1\n # distributing data among 4 slaves\n chunk_distribution = distribute_rows(n_rows=1600000, n_processes=slave_workers)\n\n # distribute tasks to slaves\n for worker in range(1, size):\n chunk_to_process = worker - 1\n comm.send(chunk_distribution[chunk_to_process], dest=worker)\n\n # receive and aggregate results from slave\n results = []\n for worker in (range(1, size)): # receive\n result = comm.recv(source=worker)\n results.append(result)\n\n out = {}\n for r in results:\n for key, value in r.to_dict().items():\n if key in out:\n out[key] = out[key] + value\n else:\n out[key] = value\n find_max(out)\n\n\n# All workers perform processing on the given chunk of data and return the output to master\nelif rank > 0:\n chunk_to_process = comm.recv()\n inp = pd.read_csv(dataset, nrows=chunk_to_process[0], skiprows=chunk_to_process[1], header=None)\n # In order to filter out date values using \".dt.month\" and \".dt.year\" changing datatype of FlightDate column to\n # datetime\n inp.isetitem(0, pd.to_datetime(inp.iloc[:, 0]))\n # filtering dataframe to fetch cancelled flights in Sep. 2021\n filtered_data = inp[(inp.iloc[:, 4] == True) & (inp.iloc[:, 0].dt.month == 9) & (inp.iloc[:, 0].dt.year == 2021)]\n # calculating number of flights cancelled per airline\n result = filtered_data.iloc[:, 1].value_counts()\n # sending processed result to master\n comm.send(result, dest=0)\n\n", "repo_name": "msrana25/Distributed-System-Concepts-1", "sub_path": "Implementation/Q1/T3.py", "file_name": "T3.py", "file_ext": "py", "file_size_in_byte": 2797, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "12", "api": [{"api_name": "time.time", "line_number": 5, "usage_type": "call"}, {"api_name": "mpi4py.MPI.COMM_WORLD", "line_number": 6, "usage_type": "attribute"}, {"api_name": "mpi4py.MPI", "line_number": 6, "usage_type": "name"}, {"api_name": "time.time", "line_number": 22, "usage_type": "call"}, {"api_name": "time.time", "line_number": 27, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 74, "usage_type": "call"}, {"api_name": "pandas.to_datetime", "line_number": 77, "usage_type": "call"}]} +{"seq_id": "72305378900", "text": "''' script contains preprocessing pipelines for linear and tree based models, all transformers and helper function using in pipelines ''' \n\nimport pandas as pd\nimport numpy as np\nfrom sklearn.pipeline import Pipeline, FeatureUnion\nfrom sklearn.preprocessing import OrdinalEncoder, StandardScaler\nfrom sklearn.base import BaseEstimator, TransformerMixin\nfrom sklearn.metrics import accuracy_score, log_loss\n\n''' helper functions '''\n\ndef feature_reduction_pipeline(model, X, y):\n ''' reduce number of features using perturbation techinque'''\n\n model.fit(X,y)\n \n base_acc = accuracy_score(y, model.predict(X))\n base_log_loss= log_loss( y, model.predict_proba(X)[:,1] )\n \n best_features_idx = []\n \n for i in range(X.shape[1]):\n\n hold = X.copy()\n np.random.shuffle(X[:, i])\n\n curr_acc = accuracy_score( y, model.predict(X) )\n diff_acc = curr_acc - base_acc\n\n curr_log_loss = log_loss( y, model.predict_proba(X)[:,1] )\n diff_log_loss = curr_log_loss - base_log_loss\n \n if diff_log_loss > 0: # if diff_acc < 0 and diff_log_loss > 0:\n best_features_idx.append(i)\n\n X = hold\n \n if not best_features_idx:\n best_features_idx = list(range(X.shape[1]))\n \n return np.array(best_features_idx)\n\n\ndef feature_reduction_ann_pipeline(model, X, y):\n ''' reduce number of features for ann using perturbation techinque'''\n \n model.set_params(input_shape=X.shape[1:])\n model.fit(X,y)\n \n base_acc = accuracy_score(y, model.predict(X))\n base_log_loss = log_loss( y, model.predict_proba(X)[:,1] )\n \n best_features_idx = []\n \n for i in range(X.shape[1]):\n\n hold = X.copy()\n np.random.shuffle(X[:, i])\n\n curr_acc = accuracy_score( y, model.predict(X) )\n diff_acc = curr_acc - base_acc\n\n curr_log_loss = log_loss( y, model.predict_proba(X)[:,1] )\n diff_log_loss = curr_log_loss - base_log_loss\n\n if diff_log_loss > 0: # if diff_acc < 0 and diff_log_loss > 0:\n best_features_idx.append(i)\n\n X = hold\n \n if not best_features_idx:\n best_features_idx = list(range(X.shape[1]))\n \n return np.array(best_features_idx)\n\n\ndef feature_reduction_rnn_pipeline(model, X, y):\n ''' reduce number of features for rnn using perturbation techinque'''\n\n X_reshaped = X.reshape(X.shape[0], 1, X.shape[1])\n \n model.set_params(input_shape=X_reshaped.shape[1:])\n model.fit(X_reshaped, y)\n \n base_acc = accuracy_score(y, model.predict(X_reshaped))\n base_log_loss = log_loss( y, model.predict_proba(X_reshaped)[:,1] )\n best_features_idx = []\n \n for i in range(X.shape[1]):\n\n hold = X_reshaped.copy()\n np.random.shuffle(X_reshaped[:, :, i])\n\n curr_acc = accuracy_score( y, model.predict(X_reshaped) )\n diff_acc = curr_acc - base_acc\n curr_log_loss = log_loss( y, model.predict_proba(X_reshaped)[:,1] ) \n diff_log_loss = curr_log_loss - base_log_loss\n\n if diff_log_loss > 0: # if diff_acc < 0 and diff_log_loss > 0:\n best_features_idx.append(i)\n\n X_reshaped = hold\n \n if not best_features_idx:\n best_features_idx = list(range(X.shape[1]))\n \n return np.array(best_features_idx)\n\n \ndef target_mean_encoding(df, cat_name, target, weight=10):\n ''' function return smoothing target mean encoding '''\n\n # Compute the global mean\n mean = df[target].mean()\n\n # Compute the number of values and the mean of each group\n agg = df.groupby(cat_name)[target].agg(['count', 'mean'])\n\n counts = agg['count']\n means = agg['mean']\n\n # Compute the \"smoothed\" means\n smooth = (counts * means + weight * mean) / (counts + weight)\n\n return smooth, mean\n\n\n''' pipeline transformers '''\n\nclass DataFrameSelector(BaseEstimator, TransformerMixin):\n ''' select columns from dataframe and return numpy array '''\n def __init__(self, attribute_names):\n self.attribute_names = attribute_names\n \n def fit(self, X, y=None):\n return self\n \n def transform(self, X, y=None):\n return np.array(X[self.attribute_names])\n\n\nclass TwoColumnScaler(BaseEstimator, TransformerMixin):\n ''' take two columns and scaling it's keeping original ratio between them '''\n def __init__(self, scaler):\n self.scaler = scaler\n \n def fit(self, X, y=None):\n columns_merged = np.concatenate((X[:,0], X[:,1]), axis=0)\n self.scaler.fit(columns_merged.reshape(-1,1))\n return self\n \n def transform(self, X, y=None):\n X1 = self.scaler.transform(X[:, 0].reshape(-1,1))\n X2 = self.scaler.transform(X[:, 1].reshape(-1,1))\n X_new = np.concatenate((X1, X2), axis=1)\n return X_new\n\n \nclass DictionaryEncoder(BaseEstimator, TransformerMixin):\n ''' encoding labels using dictionary '''\n def __init__(self, dictionary):\n self.dictionary = dictionary\n \n def fit(self, X, y=None):\n return self\n \n def transform(self, X, y=None):\n return X.replace(self.dictionary).values\n \n\nclass ToDataFrame(BaseEstimator, TransformerMixin):\n ''' transform numpy array to dataframe '''\n def __init__(self, columns):\n self.columns = columns\n \n def fit(self, X, y=None):\n return self\n \n def transform(self, X, y=None):\n return pd.DataFrame(X, columns=self.columns)\n\n \nclass Array3dTransformer(BaseEstimator, TransformerMixin):\n ''' transform 2d numpy array to 3d numpy array '''\n def fit(self, X, y=None):\n return self\n \n def transform(self, X, y=None):\n return X.reshape(*X.shape,1)\n\n \nclass ImportantFeaturesSelector(BaseEstimator, TransformerMixin):\n ''' select most important features from numpy array'''\n def __init__(self, model, model_type):\n self.model = model\n self.model_type = model_type\n \n def fit(self, X, y=None):\n if self.model_type == 'basic':\n self.important_features = feature_reduction_pipeline(self.model, X, y)\n elif self.model_type == 'ann':\n self.important_features = feature_reduction_ann_pipeline(self.model, X, y)\n elif self.model_type == 'rnn':\n self.important_features = feature_reduction_rnn_pipeline(self.model, X, y)\n else:\n raise TypeError('model_type have to be basic, ann or rnn')\n return self\n \n def transform(self, X, y=None):\n return X[:, self.important_features]\n \n\nclass TargetMeanEncodingTransformer(BaseEstimator, TransformerMixin):\n ''' transform feature using target mean encoding'''\n def __init__(self, cat_name, target):\n self.cat_name = cat_name\n self.target = target\n \n def fit(self, X, y=None):\n self.target_dict, self.global_mean = target_mean_encoding(X, self.cat_name, self.target)\n return self\n\n def transform(self, X, y=None):\n X_arr = np.zeros(len(X)).reshape(-1,1)\n for i in range(len(X_arr)):\n try:\n X_arr[i] = self.target_dict.loc[ X[self.cat_name].iloc[i] ]\n except KeyError: # category doesnt occur in training set\n X_arr[i] = self.global_mean\n return X_arr\n \n \n''' basic pipelines ''' \n\n# read raw data\nX_train_set = pd.read_csv('./preprocessed_data/train_set_stage2.csv', index_col=0)\n\n# create list of team names for ordinal encoder\nhome_team_names = np.unique(X_train_set['HomeTeam'])\naway_team_names = np.unique(X_train_set['AwayTeam'])\nteam_names=[home_team_names, away_team_names]\n\n# assign manually features to the groups\ntarget_col = ['FTR']\n\nteams_cols =['HomeTeam','AwayTeam']\n\nteams_ratio_cols = ['HomeTeamWinRatio', 'AwayTeamWinRatio']\n\nteams_ratio_cat_cols = ['HomeTeamWinRatio_Cat', 'AwayTeamWinRatio_Cat']\n\nlast_year_postion_cols = ['HomeTeamLastYearPosition', 'AwayTeamLastYearPosition']\n\ntotal_cols = ['HomeTeamGoalsScored','AwayTeamGoalsScored','HomeTeamGoalsLost','AwayTeamGoalsLost','HomeTeamShootsMade', \n 'AwayTeamShootsMade','HomeTeamTargetShootsMade','AwayTeamTargetShootsMade','HomeTeamCorners','AwayTeamCorners',\n 'HomeTeamTotalPoints','AwayTeamTotalPoints']\n\ntotal_cat_cols = ['HomeTeamTargetShootsMade_Cat', 'AwayTeamTargetShootsMade_Cat', 'HomeTeamGoalsScored_Cat',\n 'AwayTeamGoalsScored_Cat', 'HomeTeamGoalsLost_Cat','AwayTeamGoalsLost_Cat', 'HomeTeamShootsMade_Cat',\n 'AwayTeamShootsMade_Cat','HomeTeamCorners_Cat', 'AwayTeamCorners_Cat', 'HomeTeamTotalPoints_Cat',\n 'AwayTeamTotalPoints_Cat',]\n\nlast_matches_results_cols = ['HomeTeamLast1Match','AwayTeamLast1Match', 'HomeTeamLast2Match', 'AwayTeamLast2Match',\n 'HomeTeamLast3Match', 'AwayTeamLast3Match', 'HomeTeamLast4Match','AwayTeamLast4Match', \n 'HomeTeamLast5Match', 'AwayTeamLast5Match',]\n\nlast_matches_points_cols = ['HomeTeamPointsFromLast3Matches','AwayTeamPointsFromLast3Matches', \n 'HomeTeamPointsFromLast5Matches','AwayTeamPointsFromLast5Matches', \n 'HomeTeamPointsFromLast10Matches','AwayTeamPointsFromLast10Matches']\n\nbinary_cols = ['HomeTeamWinStreak3', 'HomeTeamWinStreak5', 'HomeTeamLossStreak3','HomeTeamLossStreak5', \n 'AwayTeamWinStreak3', 'AwayTeamWinStreak5','AwayTeamLossStreak3', 'AwayTeamLossStreak5',\n 'IsHomeTeamRegulars', 'IsAwayTeamRegulars', 'IsHomeTeamRookie', 'IsAwayTeamRookie']\n\ndiff_cols = ['HomeTeamGoalsDifference', 'AwayTeamGoalsDifference','TotalGoalsDifference','DifferenceTotalPoints',\n 'Difference1MatchPoints', 'Difference3MatchesPoints','Difference5MatchesPoints','Difference10MatchesPoints',\n 'DifferenceInShoots', 'DifferenceInTargetShoots', 'DifferenceInCorners','DifferenceInLastYearPosition'] \n\ndiff_cat_cols = ['HomeTeamGoalsDifference_Cat','AwayTeamGoalsDifference_Cat', 'TotalGoalsDifference_Cat',\n 'DifferenceTotalPoints_Cat', 'Difference10MatchesPoints_Cat','DifferenceInShoots_Cat',\n 'DifferenceInTargetShoots_Cat','DifferenceInCorners_Cat']\n\n\n''' Base pipeline for tree-based models '''\n\nstandard_scaling_base_pipeline = Pipeline([\n ('select_cols', DataFrameSelector([*binary_cols, *teams_ratio_cols, *last_matches_points_cols, \n *last_matches_results_cols, *last_year_postion_cols, *diff_cols]) ),\n ('standard_scaler', StandardScaler() )\n])\n\n# label enocoding team names\nordinal_encoder_pipeline = Pipeline([\n ('select_cols', DataFrameSelector([*teams_cols]) ),\n ('ordinal_encoder', OrdinalEncoder(categories=team_names) ),\n ('standard_scaler', StandardScaler() )\n])\n\n# process two features to the same scale(leaving dependencies between them)\ngoals_scored_pipeline = Pipeline([\n ('select_cols', DataFrameSelector([total_cols[0], total_cols[1]]) ),\n ('two_column_scaler', TwoColumnScaler(scaler=StandardScaler() ))\n])\n\ngoals_lost_pipeline = Pipeline([\n ('select_cols', DataFrameSelector([total_cols[2], total_cols[3]]) ),\n ('two_column_scaler', TwoColumnScaler(scaler=StandardScaler() ))\n])\n\nshoot_made_pipeline = Pipeline([\n ('select_cols', DataFrameSelector([total_cols[4], total_cols[5]]) ),\n ('two_column_scaler', TwoColumnScaler(scaler=StandardScaler() ))\n])\n\ntotal_shoot_made_pipeline = Pipeline([\n ('select_cols', DataFrameSelector([total_cols[6], total_cols[7]]) ),\n ('two_column_scaler', TwoColumnScaler(scaler=StandardScaler() ))\n])\n\ncorners_pipeline = Pipeline([\n ('select_cols', DataFrameSelector([total_cols[8], total_cols[9]]) ),\n ('two_column_scaler', TwoColumnScaler(scaler=StandardScaler() ))\n])\n\ntotal_points_pipeline = Pipeline([\n ('select_cols', DataFrameSelector([total_cols[10], total_cols[11]]) ),\n ('two_column_scaler', TwoColumnScaler(scaler=StandardScaler() ))\n])\n\nbasic_preprocess_pipeline = FeatureUnion(transformer_list=[\n ('standard_scaling_pipeline', standard_scaling_base_pipeline),\n ('ordinal_encoder_pipeline', ordinal_encoder_pipeline),\n ('goals_scored_pipeline', goals_scored_pipeline),\n ('goals_lost_pipeline', goals_lost_pipeline),\n ('shoot_made_pipeline', shoot_made_pipeline),\n ('total_shoot_made_pipeline', total_shoot_made_pipeline),\n ('corners_pipeline', corners_pipeline),\n ('total_points_pipeline', total_points_pipeline),\n])\n\n\n''' Pipeline for linear models '''\n\nbase_cat_pipeline = Pipeline([\n ('select_cols', DataFrameSelector([*binary_cols]) ),\n ('standard_scaler', StandardScaler() )\n])\n\nhome_team_encoding_pipeline = Pipeline([\n ('encoding', TargetMeanEncodingTransformer(teams_cols[0], *target_col) ),\n ('standard_scaler', StandardScaler() )\n])\n\naway_team_encoding_pipeline = Pipeline([\n ('encoding', TargetMeanEncodingTransformer(teams_cols[1], *target_col) ),\n ('standard_scaler', StandardScaler() )\n])\n\nstandard_scaling_cat_pipeline = Pipeline([\n ('select_cols', DataFrameSelector([*teams_ratio_cat_cols, *last_matches_points_cols, *last_matches_results_cols,\n *last_year_postion_cols, *diff_cat_cols, *total_cat_cols]) ),\n ('standard_scaler', StandardScaler() )\n])\n\ncategorical_preprocess_pipeline = FeatureUnion(transformer_list=[\n ('home_teams_encoding', home_team_encoding_pipeline),\n ('away_teams_encoding', away_team_encoding_pipeline),\n ('base_pipeline ', base_cat_pipeline),\n ('standard_scaling_pipeline', standard_scaling_cat_pipeline),\n])", "repo_name": "Cyki89/Predicting_Winning_Team", "sub_path": "preprocessing_pipelines.py", "file_name": "preprocessing_pipelines.py", "file_ext": "py", "file_size_in_byte": 13959, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 4, "dataset": "github-code", "pt": "12", "api": [{"api_name": "sklearn.metrics.accuracy_score", "line_number": 17, "usage_type": "call"}, {"api_name": "sklearn.metrics.log_loss", "line_number": 18, "usage_type": "call"}, {"api_name": "numpy.random.shuffle", "line_number": 25, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 25, "usage_type": "attribute"}, {"api_name": "sklearn.metrics.accuracy_score", "line_number": 27, "usage_type": "call"}, {"api_name": "sklearn.metrics.log_loss", "line_number": 30, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 41, "usage_type": "call"}, {"api_name": "sklearn.metrics.accuracy_score", "line_number": 50, "usage_type": "call"}, {"api_name": "sklearn.metrics.log_loss", "line_number": 51, "usage_type": "call"}, {"api_name": "numpy.random.shuffle", "line_number": 58, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 58, "usage_type": "attribute"}, {"api_name": "sklearn.metrics.accuracy_score", "line_number": 60, "usage_type": "call"}, {"api_name": "sklearn.metrics.log_loss", "line_number": 63, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 74, "usage_type": "call"}, {"api_name": "sklearn.metrics.accuracy_score", "line_number": 85, "usage_type": "call"}, {"api_name": "sklearn.metrics.log_loss", "line_number": 86, "usage_type": "call"}, {"api_name": "numpy.random.shuffle", "line_number": 92, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 92, "usage_type": "attribute"}, {"api_name": "sklearn.metrics.accuracy_score", "line_number": 94, "usage_type": "call"}, {"api_name": "sklearn.metrics.log_loss", "line_number": 96, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 107, "usage_type": "call"}, {"api_name": "sklearn.base.BaseEstimator", "line_number": 130, "usage_type": "name"}, {"api_name": "sklearn.base.TransformerMixin", "line_number": 130, "usage_type": "name"}, {"api_name": "numpy.array", "line_number": 139, "usage_type": "call"}, {"api_name": "sklearn.base.BaseEstimator", "line_number": 142, "usage_type": "name"}, {"api_name": "sklearn.base.TransformerMixin", "line_number": 142, "usage_type": "name"}, {"api_name": "numpy.concatenate", "line_number": 148, "usage_type": "call"}, {"api_name": "numpy.concatenate", "line_number": 155, "usage_type": "call"}, {"api_name": "sklearn.base.BaseEstimator", "line_number": 159, "usage_type": "name"}, {"api_name": "sklearn.base.TransformerMixin", "line_number": 159, "usage_type": "name"}, {"api_name": "sklearn.base.BaseEstimator", "line_number": 171, "usage_type": "name"}, {"api_name": "sklearn.base.TransformerMixin", "line_number": 171, "usage_type": "name"}, {"api_name": "pandas.DataFrame", "line_number": 180, "usage_type": "call"}, {"api_name": "sklearn.base.BaseEstimator", "line_number": 183, "usage_type": "name"}, {"api_name": "sklearn.base.TransformerMixin", "line_number": 183, "usage_type": "name"}, {"api_name": "sklearn.base.BaseEstimator", "line_number": 192, "usage_type": "name"}, {"api_name": "sklearn.base.TransformerMixin", "line_number": 192, "usage_type": "name"}, {"api_name": "sklearn.base.BaseEstimator", "line_number": 213, "usage_type": "name"}, {"api_name": "sklearn.base.TransformerMixin", "line_number": 213, "usage_type": "name"}, {"api_name": "numpy.zeros", "line_number": 224, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 236, "usage_type": "call"}, {"api_name": "numpy.unique", "line_number": 239, "usage_type": "call"}, {"api_name": "numpy.unique", "line_number": 240, "usage_type": "call"}, {"api_name": "sklearn.pipeline.Pipeline", "line_number": 286, "usage_type": "call"}, {"api_name": "sklearn.preprocessing.StandardScaler", "line_number": 289, "usage_type": "call"}, {"api_name": "sklearn.pipeline.Pipeline", "line_number": 293, "usage_type": "call"}, {"api_name": "sklearn.preprocessing.OrdinalEncoder", "line_number": 295, "usage_type": "call"}, {"api_name": "sklearn.preprocessing.StandardScaler", "line_number": 296, "usage_type": "call"}, {"api_name": "sklearn.pipeline.Pipeline", "line_number": 300, "usage_type": "call"}, {"api_name": "sklearn.preprocessing.StandardScaler", "line_number": 302, "usage_type": "call"}, {"api_name": "sklearn.pipeline.Pipeline", "line_number": 305, "usage_type": "call"}, {"api_name": "sklearn.preprocessing.StandardScaler", "line_number": 307, "usage_type": "call"}, {"api_name": "sklearn.pipeline.Pipeline", "line_number": 310, "usage_type": "call"}, {"api_name": "sklearn.preprocessing.StandardScaler", "line_number": 312, "usage_type": "call"}, {"api_name": "sklearn.pipeline.Pipeline", "line_number": 315, "usage_type": "call"}, {"api_name": "sklearn.preprocessing.StandardScaler", "line_number": 317, "usage_type": "call"}, {"api_name": "sklearn.pipeline.Pipeline", "line_number": 320, "usage_type": "call"}, {"api_name": "sklearn.preprocessing.StandardScaler", "line_number": 322, "usage_type": "call"}, {"api_name": "sklearn.pipeline.Pipeline", "line_number": 325, "usage_type": "call"}, {"api_name": "sklearn.preprocessing.StandardScaler", "line_number": 327, "usage_type": "call"}, {"api_name": "sklearn.pipeline.FeatureUnion", "line_number": 330, "usage_type": "call"}, {"api_name": "sklearn.pipeline.Pipeline", "line_number": 344, "usage_type": "call"}, {"api_name": "sklearn.preprocessing.StandardScaler", "line_number": 346, "usage_type": "call"}, {"api_name": "sklearn.pipeline.Pipeline", "line_number": 349, "usage_type": "call"}, {"api_name": "sklearn.preprocessing.StandardScaler", "line_number": 351, "usage_type": "call"}, {"api_name": "sklearn.pipeline.Pipeline", "line_number": 354, "usage_type": "call"}, {"api_name": "sklearn.preprocessing.StandardScaler", "line_number": 356, "usage_type": "call"}, {"api_name": "sklearn.pipeline.Pipeline", "line_number": 359, "usage_type": "call"}, {"api_name": "sklearn.preprocessing.StandardScaler", "line_number": 362, "usage_type": "call"}, {"api_name": "sklearn.pipeline.FeatureUnion", "line_number": 365, "usage_type": "call"}]} +{"seq_id": "24343857221", "text": "import csv\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom matplotlib.ticker import (AutoMinorLocator, MultipleLocator)\n\n# Definitions\nMESSAGE_STATISTICS_FILENAME = './../../data/camstat/message_statistics.csv'\nPLOT_START_TIME = 300.0\nPLOT_END_TIME = 1200.0\n\n# Define data dictionaries\narrInterarrivalTime = [ ]\narrCamSize = [ ]\nmapMaxLatency = { }\nmapMaxDistance = { }\nmapReliableDistance100 = { }\nmapReliableDistance95 = { }\nmapReliableDistance80 = { }\n\n# Read from station statistics file\nline_number = 0\nrow_time = 0\nprev_time = 0\ninterarrival_time = 0\ncam_size = 0\nmax_latency = 0\nmax_distance = 0\nreliable_distance_100_sum = 0\nreliable_distance_95_sum = 0\nreliable_distance_80_sum = 0\nreliable_distance_data_counter = 0\nwith open(MESSAGE_STATISTICS_FILENAME, 'r') as csv_file:\n csv_reader = csv.reader(csv_file, delimiter=',')\n for row in csv_reader:\n line_number += 1\n if line_number == 1:\n continue\n row_time = int(float(row[0]) / 10) * 10\n if row_time < PLOT_START_TIME or row_time > PLOT_END_TIME:\n continue\n if row_time > prev_time:\n if prev_time > 0:\n mapMaxLatency[prev_time] = max_latency\n mapMaxDistance[prev_time] = max_distance\n if reliable_distance_data_counter > 0:\n mapReliableDistance100[prev_time] = reliable_distance_100_sum / float(reliable_distance_data_counter)\n mapReliableDistance95[prev_time] = reliable_distance_95_sum / float(reliable_distance_data_counter)\n mapReliableDistance80[prev_time] = reliable_distance_80_sum / float(reliable_distance_data_counter)\n prev_time = row_time\n max_latency = 0\n max_distance = 0\n reliable_distance_100_sum = 0\n reliable_distance_95_sum = 0\n reliable_distance_80_sum = 0\n reliable_distance_data_counter = 0\n arrInterarrivalTime.append(float(row[2]))\n arrCamSize.append(int(row[4]))\n if float(row[7]) > max_latency:\n max_latency = float(row[7])\n if float(row[8]) > max_distance:\n max_distance = float(row[8])\n if int(row[5]) > 0 and int(row[6]) > 0:\n reliable_distance_100_sum += float(row[9])\n reliable_distance_95_sum += float(row[10])\n reliable_distance_80_sum += float(row[11])\n reliable_distance_data_counter += 1\n\n# Plot interarrival time\nfigure1, axes1 = plt.subplots(figsize=(8, 6))\nfigure1.tight_layout(pad=5.0)\naxes1.hist(arrInterarrivalTime, bins = np.arange(0.1, 0.51, 0.01) - 0.005, rwidth=0.5)\naxes1.xaxis.set_major_locator(MultipleLocator(0.05))\naxes1.xaxis.set_minor_locator(AutoMinorLocator(5))\naxes1.grid(which='major', color='#CCCCCC', linestyle='--')\naxes1.grid(which='minor', color='#CCCCCC', linestyle=':')\naxes1.set_title('Interarrival Times')\naxes1.set_xlabel('time (s)')\n\n# Plot cam size\nfigure2, axes2 = plt.subplots(figsize=(8, 6))\nfigure2.tight_layout(pad=5.0)\naxes2.hist(arrCamSize)\naxes2.set_title('Cam Lengths')\naxes2.set_xlabel('size (bytes)')\n\n# Plot maximum latency\nX = mapMaxLatency.keys()\nY = mapMaxLatency.values()\nfigure3, axes3 = plt.subplots(figsize=(8, 6))\nfigure3.tight_layout(pad=5.0)\naxes3.set_xlim(PLOT_START_TIME, PLOT_END_TIME)\naxes3.xaxis.set_major_locator(MultipleLocator(300))\naxes3.xaxis.set_minor_locator(AutoMinorLocator(5))\naxes3.set_ylim(0, 0.005)\naxes3.yaxis.set_major_locator(MultipleLocator(0.001))\naxes3.yaxis.set_minor_locator(AutoMinorLocator(5))\naxes3.grid(which='major', color='#CCCCCC', linestyle='--')\naxes3.grid(which='minor', color='#CCCCCC', linestyle=':')\naxes3.set_title('Maximum Latency')\naxes3.set_xlabel('time (s)')\naxes3.set_ylabel('maximum transmission latency\\nmeasured per 10 seconds intervals')\naxes3.plot(X, Y)\n\n# Plot maximum distance\nX = mapMaxDistance.keys()\nY = mapMaxDistance.values()\nfigure4, axes4 = plt.subplots(figsize=(8, 6))\nfigure4.tight_layout(pad=5.0)\naxes4.set_xlim(PLOT_START_TIME, PLOT_END_TIME)\naxes4.xaxis.set_major_locator(MultipleLocator(300))\naxes4.xaxis.set_minor_locator(AutoMinorLocator(5))\naxes4.set_ylim(0, 2500)\naxes4.yaxis.set_major_locator(MultipleLocator(500))\naxes4.yaxis.set_minor_locator(AutoMinorLocator(5))\naxes4.grid(which='major', color='#CCCCCC', linestyle='--')\naxes4.grid(which='minor', color='#CCCCCC', linestyle=':')\naxes4.set_title('Maximum Distance')\naxes4.set_xlabel('time (s)')\naxes4.set_ylabel('maximum transmission distance (meters)\\nmeasured per 10 seconds intervals')\naxes4.plot(X, Y)\n\n# Plot reliable distance 80\nX = mapReliableDistance80.keys()\nY = mapReliableDistance80.values()\nfigure5, axes5 = plt.subplots(figsize=(8, 6))\nfigure5.tight_layout(pad=5.0)\naxes5.set_xlim(PLOT_START_TIME, PLOT_END_TIME)\naxes5.xaxis.set_major_locator(MultipleLocator(300))\naxes5.xaxis.set_minor_locator(AutoMinorLocator(5))\naxes5.set_ylim(0, 250)\naxes5.yaxis.set_major_locator(MultipleLocator(50))\naxes5.yaxis.set_minor_locator(AutoMinorLocator(5))\naxes5.grid(which='major', color='#CCCCCC', linestyle='--')\naxes5.grid(which='minor', color='#CCCCCC', linestyle=':')\naxes5.set_title('80% Distance for Transmitted CAMs')\naxes5.set_xlabel('time (s)')\naxes5.set_ylabel('mean of 80% distances (meters)\\nmeasured per 10 seconds intervals')\naxes5.plot(X, Y)\n\n# Plot reliable distance 95\nX = mapReliableDistance95.keys()\nY = mapReliableDistance95.values()\nfigure6, axes6 = plt.subplots(figsize=(8, 6))\nfigure6.tight_layout(pad=5.0)\naxes6.set_xlim(PLOT_START_TIME, PLOT_END_TIME)\naxes6.xaxis.set_major_locator(MultipleLocator(300))\naxes6.xaxis.set_minor_locator(AutoMinorLocator(5))\naxes6.set_ylim(0, 250)\naxes6.yaxis.set_major_locator(MultipleLocator(50))\naxes6.yaxis.set_minor_locator(AutoMinorLocator(5))\naxes6.grid(which='major', color='#CCCCCC', linestyle='--')\naxes6.grid(which='minor', color='#CCCCCC', linestyle=':')\naxes6.set_title('95% Distance for Transmitted CAMs')\naxes6.set_xlabel('time (s)')\naxes6.set_ylabel('mean of 95% distances (meters)\\nmeasured per 10 seconds intervals')\naxes6.plot(X, Y)\n\n# Plot reliable distance 100\nX = mapReliableDistance100.keys()\nY = mapReliableDistance100.values()\nfigure7, axes7 = plt.subplots(figsize=(8, 6))\nfigure7.tight_layout(pad=5.0)\naxes7.set_xlim(PLOT_START_TIME, PLOT_END_TIME)\naxes7.xaxis.set_major_locator(MultipleLocator(300))\naxes7.xaxis.set_minor_locator(AutoMinorLocator(5))\naxes7.set_ylim(0, 250)\naxes7.yaxis.set_major_locator(MultipleLocator(50))\naxes7.yaxis.set_minor_locator(AutoMinorLocator(5))\naxes7.grid(which='major', color='#CCCCCC', linestyle='--')\naxes7.grid(which='minor', color='#CCCCCC', linestyle=':')\naxes7.set_title('100% Distance for Transmitted CAMs')\naxes7.set_xlabel('time (s)')\naxes7.set_ylabel('mean of 100% distances (meters)\\nmeasured per 10 seconds intervals')\naxes7.plot(X, Y)\n\n# Plot reliable distances all in one\nX100 = mapReliableDistance100.keys()\nY100 = mapReliableDistance100.values()\nX95 = mapReliableDistance95.keys()\nY95 = mapReliableDistance95.values()\nX80 = mapReliableDistance80.keys()\nY80 = mapReliableDistance80.values()\nfigure8, axes8 = plt.subplots(figsize=(8, 6))\nfigure8.tight_layout(pad=5.0)\naxes8.set_xlim(PLOT_START_TIME, PLOT_END_TIME)\naxes8.xaxis.set_major_locator(MultipleLocator(300))\naxes8.xaxis.set_minor_locator(AutoMinorLocator(5))\naxes8.set_ylim(0, 250)\naxes8.yaxis.set_major_locator(MultipleLocator(50))\naxes8.yaxis.set_minor_locator(AutoMinorLocator(5))\naxes8.grid(which='major', color='#CCCCCC', linestyle='--')\naxes8.grid(which='minor', color='#CCCCCC', linestyle=':')\naxes8.set_title('Comparison of XY% Distances for Transmitted CAMs')\naxes8.set_xlabel('time (s)')\naxes8.set_ylabel('mean of XY% distances (meters)\\nmeasured per 10 seconds intervals')\naxes8.plot(X100, Y100)\naxes8.plot(X95, Y95)\naxes8.plot(X80, Y80)\naxes8.legend(['100% Distance', '95% Distance', '80% Distance'], loc =\"lower right\")\n\n\n# Show plotted figures\nplt.show()", "repo_name": "kctnky/v2x-work", "sub_path": "code/python/plotMessageStatistics.py", "file_name": "plotMessageStatistics.py", "file_ext": "py", "file_size_in_byte": 7891, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "12", "api": [{"api_name": "csv.reader", "line_number": 33, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.subplots", "line_number": 69, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 69, "usage_type": "name"}, {"api_name": "numpy.arange", "line_number": 71, "usage_type": "call"}, {"api_name": "matplotlib.ticker.MultipleLocator", "line_number": 72, "usage_type": "call"}, {"api_name": "matplotlib.ticker.AutoMinorLocator", "line_number": 73, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.subplots", "line_number": 80, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 80, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplots", "line_number": 89, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 89, "usage_type": "name"}, {"api_name": "matplotlib.ticker.MultipleLocator", "line_number": 92, "usage_type": "call"}, {"api_name": "matplotlib.ticker.AutoMinorLocator", "line_number": 93, "usage_type": "call"}, {"api_name": "matplotlib.ticker.MultipleLocator", "line_number": 95, "usage_type": "call"}, {"api_name": "matplotlib.ticker.AutoMinorLocator", "line_number": 96, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.subplots", "line_number": 107, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 107, "usage_type": "name"}, {"api_name": "matplotlib.ticker.MultipleLocator", "line_number": 110, "usage_type": "call"}, {"api_name": "matplotlib.ticker.AutoMinorLocator", "line_number": 111, "usage_type": "call"}, {"api_name": "matplotlib.ticker.MultipleLocator", "line_number": 113, "usage_type": "call"}, {"api_name": "matplotlib.ticker.AutoMinorLocator", "line_number": 114, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.subplots", "line_number": 125, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 125, "usage_type": "name"}, {"api_name": "matplotlib.ticker.MultipleLocator", "line_number": 128, "usage_type": "call"}, {"api_name": "matplotlib.ticker.AutoMinorLocator", "line_number": 129, "usage_type": "call"}, {"api_name": "matplotlib.ticker.MultipleLocator", "line_number": 131, "usage_type": "call"}, {"api_name": "matplotlib.ticker.AutoMinorLocator", "line_number": 132, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.subplots", "line_number": 143, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 143, "usage_type": "name"}, {"api_name": "matplotlib.ticker.MultipleLocator", "line_number": 146, "usage_type": "call"}, {"api_name": "matplotlib.ticker.AutoMinorLocator", "line_number": 147, "usage_type": "call"}, {"api_name": "matplotlib.ticker.MultipleLocator", "line_number": 149, "usage_type": "call"}, {"api_name": "matplotlib.ticker.AutoMinorLocator", "line_number": 150, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.subplots", "line_number": 161, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 161, "usage_type": "name"}, {"api_name": "matplotlib.ticker.MultipleLocator", "line_number": 164, "usage_type": "call"}, {"api_name": "matplotlib.ticker.AutoMinorLocator", "line_number": 165, "usage_type": "call"}, {"api_name": "matplotlib.ticker.MultipleLocator", "line_number": 167, "usage_type": "call"}, {"api_name": "matplotlib.ticker.AutoMinorLocator", "line_number": 168, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.subplots", "line_number": 183, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 183, "usage_type": "name"}, {"api_name": "matplotlib.ticker.MultipleLocator", "line_number": 186, "usage_type": "call"}, {"api_name": "matplotlib.ticker.AutoMinorLocator", "line_number": 187, "usage_type": "call"}, {"api_name": "matplotlib.ticker.MultipleLocator", "line_number": 189, "usage_type": "call"}, {"api_name": "matplotlib.ticker.AutoMinorLocator", "line_number": 190, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.show", "line_number": 203, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 203, "usage_type": "name"}]} +{"seq_id": "28156695411", "text": "import tensorflow as tf\nfrom keras import backend as K\nimport numpy as np\nfrom tensorflow.keras import datasets, layers, models\nfrom copy import copy\nimport os,sys,inspect\nimport time\nimport math\nfrom tqdm import tqdm\nfrom tqdm import trange\ncurrent_dir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))\nparent_dir = os.path.dirname(current_dir)\nsys.path.insert(0, parent_dir)\n\nfrom Models.test_model import make_prediction\n\ndef zip_and_shuffle(img, lab):\n if len(img) != len(lab):\n raise IndexError(\"The image list and lable list does not have the same length\")\n try:\n zip_test = [(img[i], lab[i]) for i in range(len(img))]\n np.random.shuffle(zip_test)\n img = [zip_test[i][0] for i in range(len(zip_test))] \n lab = [zip_test[i][1] for i in range(len(zip_test))] \n except Exception as e:\n print(f\"ERROR: {e}\")\n raise Exception\n return img, lab\n\ndef get_batch(images, lables, batch_size, noises, noise_method, shuffle=True,\n drop_last=True, augmentation=False):\n idx = len(images)\n \n \n if drop_last:\n n_batches = idx // batch_size\n else:\n n_batches = np.ceil(idx / batch_size).astype(np.int32)\n \n if shuffle:\n images, lables = zip_and_shuffle(images, lables)\n \n for b in range(n_batches):\n left_idx = b * batch_size\n right_idx = min((b+1)*batch_size, idx)\n img_batch, lab_batch = images[left_idx:right_idx], lables[left_idx:right_idx]\n\n if augmentation:\n try:\n img_batch = noise_method(img_batch, noises, batch_size)\n except Exception as e:\n print(f\"ERROR: {e}\")\n raise Exception\n\n yield img_batch, lab_batch\n\n\n\ndef apply_noise_evenly(img_batch, noises, batch_size):\n global_idx = 0\n aug_bs = batch_size // len(noises)\n \n for i, noise in enumerate(noises):\n for img in img_batch[i*aug_bs:i+1*aug_bs]:\n img_batch[global_idx] = noise + img\n global_idx += 1\n\n return img_batch\n\ndef should_early_stop(best_epoch, epoch, patience):\n return best_epoch + patience <= epoch\n\ndef sum_accuracy(right, wrong):\n if len(right) != len(wrong):\n raise IndexError(\"The list 'right' and 'wrong' are not the same lenght\")\n \n return [100*(right[i]/(right[i]+wrong[i])) for i in range(len(right))]\n\ndef lr_exp_decay(epoch, lr):\n k = 0.1\n return lr * math.exp(-k*epoch)\n\ndef validate_monitor(monitor, best_accuracy, accuracy, best_loss, loss):\n if monitor == 'val_loss':\n return loss < best_loss or best_loss == -1\n elif monitor == 'val_acc':\n return accuracy > best_accuracy or best_accuracy == -1\n else:\n raise TypeError(f'{monitor} is not a valid evaluation monitor')\n\ndef calc_accuracy(right, wrong):\n return right / (right + wrong)\n\ndef average(tim):\n return sum(tim) / len(tim)\n\ndef fit_model(model, train_img, train_lab, val_img, val_lab, filter_names, apply_noise_method, monitor='val_loss',\n delta_value=None, patience=10, epochs=100, restore_weights=False, augmentation=False\n ):\n scce = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True)\n history_monitor = []\n batch_size = 32\n \n best_epoch = -1\n best_loss = -1\n best_accuracy = -1\n best_model = copy(model)\n \n current_learning_rate = 0.001\n \n right, wrong = [], []\n \n done = epochs\n progress = trange(done, desc='epoch stuff', leave=True)\n loss = 0\n \n times = []\n tik = time.perf_counter()\n for epoch in progress:\n progress.set_description(f\"E = {epoch}, LR = {current_learning_rate}, LOSS = {loss}\")\n progress.refresh()\n \n if epoch != 0:\n current_learning_rate = lr_exp_decay(epoch, current_learning_rate)\n K.set_value(model.optimizer.learning_rate, current_learning_rate)\n \n for (xb, yb) in get_batch(train_img, train_lab, batch_size, filter_names, apply_noise_method, augmentation=augmentation):\n \n xb = np.array(xb)\n yb = np.array(yb)\n try:\n _ = model.train_on_batch(tf.convert_to_tensor(xb) , tf.convert_to_tensor(yb))\n except Exception as e:\n print(f\"ERROR: {e}\")\n raise Exception\n \n img_predict = []\n img_true = []\n \n right.append(0)\n wrong.append(0)\n \n for xb, yb in get_batch(val_img, val_lab, batch_size, filter_names, apply_noise_method, augmentation=augmentation):\n for i in range(len(xb)):\n prediction = make_prediction(model, xb[i], (52, 52, 3)).numpy()[0] #TODO resolution is hard coded. pls fix\n img_predict.append(prediction)\n img_true.append(int(yb[i]))\n \n predicted_label = np.argmax(prediction)\n \n if predicted_label == int(yb[i]):\n right[-1] += 1\n else:\n wrong[-1] += 1\n\n loss = scce(img_true, img_predict).numpy()\n history_monitor.append(loss)\n \n current_accuracy = calc_accuracy(right[-1], wrong[-1])\n if validate_monitor(monitor, best_accuracy, current_accuracy, best_loss, loss) and should_early_stop:\n # if loss < best_loss or best_loss == -1 and should_early_stop:\n best_epoch = epoch\n best_loss = loss\n best_accuracy = calc_accuracy(right[-1], wrong[-1])\n best_model = copy(model)\n elif should_early_stop(best_epoch, epoch, patience) and should_early_stop:\n return best_model, history_monitor, sum_accuracy(right, wrong)\n\n\n tok = time.perf_counter()\n print(f\"EPOCH TIME: {tok-tik}\")\n \n return_acuracy = sum_accuracy(right, wrong)\n \n if should_early_stop:\n return best_model, history_monitor, return_acuracy\n else:\n return model, history_monitor, return_acuracy\n \n \n # return best_model, history_monitor, return_acuracy if should_early_stop else model, history_monitor, return_acuracy\n", "repo_name": "Biksbois/BiksTurePy", "sub_path": "phase_one/fit_model_on_batch.py", "file_name": "fit_model_on_batch.py", "file_ext": "py", "file_size_in_byte": 6136, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "12", "api": [{"api_name": "os.path.dirname", "line_number": 11, "usage_type": "call"}, {"api_name": "os.path", "line_number": 11, "usage_type": "attribute"}, {"api_name": "os.path.abspath", "line_number": 11, "usage_type": "call"}, {"api_name": "inspect.getfile", "line_number": 11, "usage_type": "call"}, {"api_name": "inspect.currentframe", "line_number": 11, "usage_type": "call"}, {"api_name": "os.path.dirname", "line_number": 12, "usage_type": "call"}, {"api_name": "os.path", "line_number": 12, "usage_type": "attribute"}, {"api_name": "sys.path.insert", "line_number": 13, "usage_type": "call"}, {"api_name": "sys.path", "line_number": 13, "usage_type": "attribute"}, {"api_name": "numpy.random.shuffle", "line_number": 22, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 22, "usage_type": "attribute"}, {"api_name": "numpy.ceil", "line_number": 38, "usage_type": "call"}, {"api_name": "numpy.int32", "line_number": 38, "usage_type": "attribute"}, {"api_name": "math.exp", "line_number": 81, "usage_type": "call"}, {"api_name": "tensorflow.keras.losses.SparseCategoricalCrossentropy", "line_number": 100, "usage_type": "call"}, {"api_name": "tensorflow.keras", "line_number": 100, "usage_type": "attribute"}, {"api_name": "copy.copy", "line_number": 107, "usage_type": "call"}, {"api_name": "tqdm.trange", "line_number": 114, "usage_type": "call"}, {"api_name": "time.perf_counter", "line_number": 118, "usage_type": "call"}, {"api_name": "keras.backend.set_value", "line_number": 125, "usage_type": "call"}, {"api_name": "keras.backend", "line_number": 125, "usage_type": "name"}, {"api_name": "numpy.array", "line_number": 129, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 130, "usage_type": "call"}, {"api_name": "tensorflow.convert_to_tensor", "line_number": 132, "usage_type": "call"}, {"api_name": "Models.test_model.make_prediction", "line_number": 145, "usage_type": "call"}, {"api_name": "numpy.argmax", "line_number": 149, "usage_type": "call"}, {"api_name": "copy.copy", "line_number": 165, "usage_type": "call"}, {"api_name": "time.perf_counter", "line_number": 170, "usage_type": "call"}]} +{"seq_id": "16800830903", "text": "import codecs\nimport time\nimport json\nimport threading\nfrom os import listdir, SEEK_CUR\nfrom os.path import join, isdir, basename\nfrom watchdog.observers import Observer\nfrom watchdog.events import FileSystemEventHandler\n\nimport events\nfrom config import config\n\n\nclass JournalHandler(FileSystemEventHandler):\n\n def __init__(self):\n self.journal_dir = config['journal_dir']\n self.logfile = None\n self.loghandle = None\n self.observer = None\n self.thread = None\n self.event_queue = []\n self.state = {\n 'Commander': None,\n 'Ship_Localised': None,\n 'ShipName': None,\n 'ShipIdent': None,\n 'FuelLevel': None,\n 'FuelCapacity': None,\n 'GameMode': None,\n 'Credits': None,\n\n 'Docked': None,\n 'StarSystem': None,\n 'StarSystemBodies': {},\n 'SystemSecurity_Localised': None,\n 'Population': 0,\n 'Body': None,\n 'BodyType': None,\n\n 'Latitude': None,\n 'Longitude': None,\n\n 'StationName': None,\n 'StationType': None,\n\n 'Raw': {},\n 'Manufactured': {},\n 'Encoded': {},\n }\n\n def start(self):\n if not self.journal_dir or not isdir(self.journal_dir):\n self.stop()\n return False\n\n try:\n logfiles = sorted(\n [f for f in listdir(self.journal_dir)\n if f.startswith('Journal') and f.endswith('.log')],\n key=lambda x: x.split('.')[1:]\n )\n if logfiles:\n self.logfile = join(self.journal_dir, logfiles[-1]) or None\n except OSError:\n self.logfile = None\n return False\n\n self.observer = Observer()\n self.observer.daemon = True\n self.observer.schedule(self, self.journal_dir)\n self.observer.start()\n\n if not self.running():\n self.thread = threading.Thread(\n target=self.worker,\n name='Journal worker')\n self.thread.daemon = True\n self.thread.start()\n\n return True\n\n def stop(self):\n self.thread = None\n if self.observer:\n self.observer.stop()\n self.observer.join()\n self.observer = None\n\n def running(self):\n return self.thread and self.thread.is_alive()\n\n def on_created(self, event):\n cond1 = not event.is_directory\n cond2 = basename(event.src_path).startswith('Journal')\n cond3 = basename(event.src_path).endswith('.log')\n if cond1 and cond2 and cond3:\n newlogfile = event.src_path\n\n if self.loghandle:\n self.loghandle.close()\n\n self.logfile = newlogfile\n self.loghandle = open(newlogfile, 'r')\n\n print(self.logfile)\n\n def worker(self):\n if not self.logfile:\n return\n self.loghandle = codecs.open(join(self.journal_dir, self.logfile), 'r', encoding='utf-8')\n\n while True:\n loghandle = self.loghandle\n if loghandle:\n loghandle.seek(0, SEEK_CUR)\n for line in loghandle:\n self.parse(line)\n\n time.sleep(1)\n\n if threading.current_thread() != self.thread:\n return\n\n def parse(self, line):\n entry = json.loads(line)\n event = entry['event']\n\n if event == 'FSDJump':\n self.state['BodyType'] = 'Star'\n for k, v in entry.items():\n if k in self.state:\n self.state[k] = v\n entry.update({'FuelCapacity': self.state['FuelCapacity']})\n line = json.dumps(entry, separators=(', ', ':'))\n\n elif event == 'FuelScoop':\n self.state['FuelLevel'] = entry['Total']\n\n elif event in ['RefuelAll', 'RefuelPartial']:\n self.state['FuelLevel'] += entry['Amount']\n\n elif event == 'Scan':\n body_scan = events.Scan(entry).body_scan\n self.state['StarSystemBodies'].update({\n (self.state['StarSystem'], entry['BodyID']): body_scan,\n })\n\n elif event == 'SupercruiseEntry':\n self.state['BodyType'] = 'Null'\n\n elif event == 'SupercruiseExit':\n for k, v in entry.items():\n if k in self.state:\n self.state[k] = v\n\n elif event == 'ApproachBody':\n self.state['BodyType'] = 'Planet'\n for k, v in entry.items():\n if k in self.state:\n self.state[k] = v\n\n elif event == 'LeaveBody':\n self.state['BodyType'] = 'Null'\n for k, v in entry.items():\n if k in self.state:\n self.state[k] = v\n\n elif event == 'Touchdown':\n self.state['Latitude'] = entry.get('Latitude')\n self.state['Longitude'] = entry.get('Longitude')\n\n elif event == 'Liftoff':\n self.state['Latitude'] = None\n self.state['Longitude'] = None\n\n elif event == 'Materials':\n for category in ['Raw', 'Manufactured', 'Encoded']:\n for material in entry.get(category, []):\n count = material['Count']\n name = material.get('Name_Localised')\n if not name:\n name = material['Name']\n self.state[category].update({name: count})\n\n elif event in ['MaterialCollected', 'MaterialDiscarded']:\n category = entry['Category']\n count = entry['Count']\n name = entry.get('Name_Localised')\n if not name:\n name = entry['Name']\n\n if event == 'MaterialCollected':\n total = self.state[category].get(name, 0) + count\n elif event == 'MaterialDiscarded':\n total = self.state[category][name] - count\n\n self.state[category].update({name: total})\n\n entry.update({'Total': total})\n line = json.dumps(entry, separators=(', ', ':'))\n\n elif event == 'Docked':\n self.state['Docked'] = True\n self.state['StationName'] = entry['StationName']\n self.state['StationType'] = entry['StationType']\n\n elif event == 'Undocked':\n self.state['Docked'] = False\n self.state['StationName'] = None\n self.state['StationType'] = None\n\n elif event == 'SetUserShipName':\n self.state['ShipName'] = entry['UserShipName']\n self.state['ShipIdent'] = entry['UserShipId']\n\n elif event in ['ShipyardNew', 'ShipyardSwap']:\n self.state['Ship_Localised'] = entry.get('ShipType')\n self.state['ShipName'] = None\n self.state['ShipIdent'] = None\n\n elif event == 'Commander':\n self.state['Commander'] = entry['Name']\n\n elif event in ['LoadGame', 'Location']:\n for k, v in entry.items():\n if k in self.state:\n self.state[k] = v\n\n elif (event == 'Loadout' and\n not entry['Ship'].lower().endswith('fighter')):\n self.state['ShipName'] = entry['ShipName']\n self.state['ShipIdent'] = entry['ShipIdent']\n fuel_capacity = 0\n for module in entry['Modules']:\n if module['Item'].lower().find('fueltank') > -1:\n item = module['Item'].split('_')\n size = int(item[2][-1])\n fuel_capacity += 2 ** size\n self.state['FuelCapacity'] = fuel_capacity\n\n elif event == 'NewCommander':\n self.state['Commander'] = entry['Name']\n\n self.event_queue.append(line)\n\n def get_entry(self):\n if not self.event_queue:\n return None\n\n entry = self.event_queue.pop(0)\n\n return entry\n\n\nmonitor = JournalHandler()\n", "repo_name": "alturus/EDLogPrint", "sub_path": "monitor.py", "file_name": "monitor.py", "file_ext": "py", "file_size_in_byte": 8009, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "12", "api": [{"api_name": "watchdog.events.FileSystemEventHandler", "line_number": 14, "usage_type": "name"}, {"api_name": "config.config", "line_number": 17, "usage_type": "name"}, {"api_name": "os.path.isdir", "line_number": 53, "usage_type": "call"}, {"api_name": "os.listdir", "line_number": 59, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 64, "usage_type": "call"}, {"api_name": "watchdog.observers.Observer", "line_number": 69, "usage_type": "call"}, {"api_name": "threading.Thread", "line_number": 75, "usage_type": "call"}, {"api_name": "os.path.basename", "line_number": 95, "usage_type": "call"}, {"api_name": "os.path.basename", "line_number": 96, "usage_type": "call"}, {"api_name": "codecs.open", "line_number": 111, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 111, "usage_type": "call"}, {"api_name": "os.SEEK_CUR", "line_number": 116, "usage_type": "argument"}, {"api_name": "time.sleep", "line_number": 120, "usage_type": "call"}, {"api_name": "threading.current_thread", "line_number": 122, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 126, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 135, "usage_type": "call"}, {"api_name": "events.Scan", "line_number": 144, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 201, "usage_type": "call"}]} +{"seq_id": "31415487840", "text": "#!/usr/bin/env python3\n# -*- coding:utf-8 -*-\n# datetime:2020/5/29 17:33\nfrom pyspark.sql import functions as f\nfrom pyspark.sql import SparkSession\nfrom delta.tables import DeltaTable\n\n\ndef merge(spark, update, tableName, cols, key):\n \"\"\"\n 将DataFrame和delta表进行merge操作,insert操作要求DataFrame必须包含delta表所有的列(0.5版本)\n 当我们使用merge操作更新/插入delta表其中几列时,指定在DataFrame中不存在的列的值为null。\n\n 注:DataFrame中要写入delta表的列要和delta表一样\n :param spark,SparkSession实例\n :param update,spark DataFrame\n :param tableName,要更新的delta表\n \"\"\"\n # 如果没有dt列,创建当前日期的dt列\n if \"dt\" not in cols:\n update = update.withColumn(\"dt\", f.current_date())\n cols.append(\"dt\")\n\n # 1.构建merge条件\n mergeExpr = f\"origin.{key}=update.{key}\"\n print(f\"merge expression:{mergeExpr}\")\n\n # 2.构建更新表达式\n updateExpr = {}\n for c in cols:\n updateExpr[c] = f\"update.{c}\"\n\n print(f\"update expression:{updateExpr}\")\n\n origin = DeltaTable.forPath(spark, tableName)\n origin_cols = origin.toDF().columns\n\n # 3.构建插入表达式\n insertExpr = {}\n for origin_col in origin_cols:\n if origin_col in cols:\n insertExpr[origin_col] = f\"update.{origin_col}\"\n else:\n # 不存在,插入null值(不是字符串)\n insertExpr[origin_col] = \"null\"\n\n print(f\"insert expression:{insertExpr}\")\n\n # for origin_col in origin_cols:\n # if origin_col not in cols:\n # update=update.withColumn(origin_col,f.lit(None))\n\n origin.alias(\"origin\") \\\n .merge(update.alias(\"update\"), mergeExpr) \\\n .whenMatchedUpdate(set=updateExpr) \\\n .whenNotMatchedInsert(values=insertExpr) \\\n .execute()\n\nif __name__==\"__main__\":\n deltaTable = \"/user/delta/test\"\n\n spark = SparkSession.builder.appName(\"delta\").master(\"local[2]\").getOrCreate()\n\n #创建delta表\n df = spark.createDataFrame(data=[[None for i in range(8)]],\n schema=\"id long,c0 int,c1 long,c2 float,c3 double,c4 string,c5 date,c6 timestamp\") \\\n .withColumn(\"dt\", f.current_date())\n df.limit(0).write.partitionBy(\"dt\").format(\"delta\").mode(\"append\").save(deltaTable)\n\n #插入数据\n update = spark.range(0, 10)\\\n .withColumn(\"dt\", f.current_date()) \\\n .withColumn(\"c1\", f.lit(0).cast(\"long\"))\n update.write.partitionBy(\"dt\").format(\"delta\").mode(\"append\").save(deltaTable)\n\n\n update = spark.range(5, 15) \\\n .withColumn(\"dt\", f.current_date()) \\\n .withColumn(\"c1\", f.lit(1).cast(\"long\"))\n\n\n merge(spark,update,deltaTable,[\"id\",\"dt\",\"c1\"],\"id\")", "repo_name": "ZhiYinZhang/study", "sub_path": "pysparkDemo/delta/mergeOpt.py", "file_name": "mergeOpt.py", "file_ext": "py", "file_size_in_byte": 2783, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "12", "api": [{"api_name": "pyspark.sql.functions.current_date", "line_number": 21, "usage_type": "call"}, {"api_name": "pyspark.sql.functions", "line_number": 21, "usage_type": "name"}, {"api_name": "delta.tables.DeltaTable.forPath", "line_number": 35, "usage_type": "call"}, {"api_name": "delta.tables.DeltaTable", "line_number": 35, "usage_type": "name"}, {"api_name": "pyspark.sql.SparkSession.builder.appName", "line_number": 62, "usage_type": "call"}, {"api_name": "pyspark.sql.SparkSession.builder", "line_number": 62, "usage_type": "attribute"}, {"api_name": "pyspark.sql.SparkSession", "line_number": 62, "usage_type": "name"}, {"api_name": "pyspark.sql.functions.current_date", "line_number": 67, "usage_type": "call"}, {"api_name": "pyspark.sql.functions", "line_number": 67, "usage_type": "name"}, {"api_name": "pyspark.sql.functions.current_date", "line_number": 72, "usage_type": "call"}, {"api_name": "pyspark.sql.functions", "line_number": 72, "usage_type": "name"}, {"api_name": "pyspark.sql.functions.lit", "line_number": 73, "usage_type": "call"}, {"api_name": "pyspark.sql.functions", "line_number": 73, "usage_type": "name"}, {"api_name": "pyspark.sql.functions.current_date", "line_number": 78, "usage_type": "call"}, {"api_name": "pyspark.sql.functions", "line_number": 78, "usage_type": "name"}, {"api_name": "pyspark.sql.functions.lit", "line_number": 79, "usage_type": "call"}, {"api_name": "pyspark.sql.functions", "line_number": 79, "usage_type": "name"}]} +{"seq_id": "11888102651", "text": "from pynwb.form.build import GroupBuilder, DatasetBuilder\n\nfrom pynwb import TimeSeries\n\nfrom . import base\n\n\n@base.container_test(TimeSeries)\nclass TestTimeSeriesIO(base.TestMapRoundTrip):\n\n def setUpContainer(self):\n return TimeSeries('test_timeseries', 'example_source', list(range(100, 200, 10)),\n 'SIunit', timestamps=list(range(10)), resolution=0.1)\n\n def setUpBuilder(self):\n return GroupBuilder('test_timeseries',\n attributes={'source': 'example_source',\n 'namespace': base.CORE_NAMESPACE,\n 'neurodata_type': 'TimeSeries',\n 'description': 'no description',\n 'comments': 'no comments',\n 'help': 'General time series object'},\n datasets={'data': DatasetBuilder('data', list(range(100, 200, 10)),\n attributes={'unit': 'SIunit',\n 'conversion': 1.0,\n 'resolution': 0.1}),\n 'timestamps': DatasetBuilder('timestamps', list(range(10)),\n attributes={'unit': 'Seconds', 'interval': 1})})\n", "repo_name": "q0j0p/pynwb", "sub_path": "tests/integration/ui_write/test_base.py", "file_name": "test_base.py", "file_ext": "py", "file_size_in_byte": 1487, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "12", "api": [{"api_name": "pynwb.TimeSeries", "line_number": 12, "usage_type": "call"}, {"api_name": "pynwb.form.build.GroupBuilder", "line_number": 16, "usage_type": "call"}, {"api_name": "pynwb.form.build.DatasetBuilder", "line_number": 23, "usage_type": "call"}, {"api_name": "pynwb.form.build.DatasetBuilder", "line_number": 27, "usage_type": "call"}, {"api_name": "pynwb.TimeSeries", "line_number": 8, "usage_type": "argument"}]} +{"seq_id": "28346394216", "text": "import argparse\nimport cv2\nimport time\nimport math\nimport onnxruntime\nimport numpy as np\nfrom math import cos, sin\nimport mediapipe as mp\nimport os\n\n# HEADPOSE DRAW FUNC\ndef draw_axis(img, yaw, pitch, roll, tdx=None, tdy=None, size=50, img_size=50):\n # Referenced from HopeNet https://github.com/natanielruiz/deep-head-pose\n if math.isnan(yaw) or math.isnan(pitch):\n return img\n pitch = pitch * np.pi / 180\n yaw = -(yaw * np.pi / 180)\n if tdx != None and tdy != None:\n tdx = tdx\n tdy = tdy\n else:\n height, width = img.shape[:2]\n tdx = width / 2\n tdy = height / 2\n if math.isnan(roll):\n print('roll is nan')\n else:\n roll = roll * np.pi / 180\n # X-Axis pointing to right. drawn in red\n x1 = size * (cos(yaw) * cos(roll)) + tdx\n y1 = size * (cos(pitch) * sin(roll) + cos(roll) * sin(pitch) * sin(yaw)) + tdy\n # Y-Axis | drawn in green\n # v\n x2 = size * (-cos(yaw) * sin(roll)) + tdx\n y2 = size * (cos(pitch) * cos(roll) - sin(pitch) * sin(yaw) * sin(roll)) + tdy\n cv2.line(img, (int(tdx), int(tdy)), (int(x1), int(y1)), (0, 0, 255), 2)\n cv2.line(img, (int(tdx), int(tdy)), (int(x2), int(y2)), (0, 255, 0), 2)\n # Z-Axis (out of the screen) drawn in blue\n # x3 = size * (sin(yaw)) + tdx\n # y3 = size * (-cos(yaw) * sin(pitch)) + tdy\n x3 = img_size * (sin(yaw)) + tdx\n y3 = img_size * (-cos(yaw) * sin(pitch)) + tdy\n cv2.line(img, (int(tdx), int(tdy)), (int(x3),int(y3)),(255,0,0),2)\n\n return img\n\n#\ndef plot_pose_cube(img, yaw, pitch, roll, tdx=None, tdy=None, size=150.):\n # Input is a cv2 image\n # pose_params: (pitch, yaw, roll, tdx, tdy)\n # Where (tdx, tdy) is the translation of the face.\n # For pose we have [pitch yaw roll tdx tdy tdz scale_factor]\n if math.isnan(yaw) or math.isnan(pitch):\n return img\n\n p = pitch * np.pi / 180\n y = -(yaw * np.pi / 180)\n r = roll * np.pi / 180\n if tdx != None and tdy != None:\n tdx = tdx\n tdy = tdy\n face_x = tdx - 0.50 * size\n face_y = tdy - 0.50 * size\n else:\n height, width = img.shape[:2]\n tdx = width / 2\n tdy = height / 2\n face_x = width / 2 - 0.5 * size\n face_y = height / 2 - 0.5 * size\n\n x1 = size * (cos(y) * cos(r)) + face_x\n y1 = size * (cos(p) * sin(r) + cos(r) * sin(p) * sin(y)) + face_y\n x2 = size * (-cos(y) * sin(r)) + face_x\n y2 = size * (cos(p) * cos(r) - sin(p) * sin(y) * sin(r)) + face_y\n x3 = size * (sin(y)) + face_x\n y3 = size * (-cos(y) * sin(p)) + face_y\n\n # Draw base in red\n cv2.line(img, (int(face_x), int(face_y)), (int(x1),int(y1)),(0,0,255),3)\n cv2.line(img, (int(face_x), int(face_y)), (int(x2),int(y2)),(0,0,255),3)\n cv2.line(img, (int(x2), int(y2)), (int(x2+x1-face_x),int(y2+y1-face_y)),(0,0,255),3)\n cv2.line(img, (int(x1), int(y1)), (int(x1+x2-face_x),int(y1+y2-face_y)),(0,0,255),3)\n # Draw pillars in blue\n cv2.line(img, (int(face_x), int(face_y)), (int(x3),int(y3)),(255,0,0),2)\n cv2.line(img, (int(x1), int(y1)), (int(x1+x3-face_x),int(y1+y3-face_y)),(255,0,0),2)\n cv2.line(img, (int(x2), int(y2)), (int(x2+x3-face_x),int(y2+y3-face_y)),(255,0,0),2)\n cv2.line(img, (int(x2+x1-face_x),int(y2+y1-face_y)), (int(x3+x1+x2-2*face_x),int(y3+y2+y1-2*face_y)),(255,0,0),2)\n # Draw top in green\n cv2.line(img, (int(x3+x1-face_x),int(y3+y1-face_y)), (int(x3+x1+x2-2*face_x),int(y3+y2+y1-2*face_y)),(0,255,0),2)\n cv2.line(img, (int(x2+x3-face_x),int(y2+y3-face_y)), (int(x3+x1+x2-2*face_x),int(y3+y2+y1-2*face_y)),(0,255,0),2)\n cv2.line(img, (int(x3), int(y3)), (int(x3+x1-face_x),int(y3+y1-face_y)),(0,255,0),2)\n cv2.line(img, (int(x3), int(y3)), (int(x3+x2-face_x),int(y3+y2-face_y)),(0,255,0),2)\n\n return img\n\n# BBOX, HEADPOSE DRAW\ndef draw_bbox_axis(frame, face_pos, add_face, yaw, pitch, roll, draw_bbox=0, draw_cube=1, draw_line=0):\n\n (x, y, w, h) = face_pos\n (x2, y2) = add_face\n w = x2-x\n h = y2-y\n\n # Draw bbox\n if draw_bbox:\n deg_norm = 1.0 - abs(yaw / 180)\n blue = int(255 * deg_norm)\n cv2.rectangle(frame, (int(x), int(y)), (int(x2), int(y2)), color=(blue, 0, 255 - blue), thickness=2)\n\n # Draw pose cube\n if draw_cube:\n frame = plot_pose_cube(frame, yaw, pitch, roll, tdx=x + w / 2, tdy=y + h / 2, size=w)\n\n # Draw pose axis\n if draw_line:\n frame = draw_axis(frame, yaw, pitch, roll, tdx=x + w / 2, tdy=y + h / 2, size=w // 2)\n\n return frame\n\n# ONNX LOAD\ndef load_onnx_model(path, name):\n onnx_model = onnxruntime.InferenceSession(path_or_bytes=os.path.join((os.getcwd() + os.path.sep).split('src')[0], 'models', path))\n globals()['onnx_input_{}'.format(name)] = onnx_model.get_inputs()[0].name\n print(\">>> onnx model load : {}\".format(name))\n print(\">>> input name : {}\".format(onnx_model.get_inputs()[0].name))\n print(\">>> input shape : {}\".format(onnx_model.get_inputs()[0].shape))\n print(\">>> done.\\n\")\n return onnx_model\n\n# 6DREPNET\ndef headpose_6drepnet2(rgb_img, x, y, x2, y2, onnx_input_sixdrepnet, sixdrepnet_model):\n\n face_img = rgb_img[y:y2, x:x2, :]\n\n face_img = cv2.resize(face_img, (256, 256))\n face_img = face_img[16:240,16:240,0:3]\n\n # 공식 깃헙 노말라이즈 구현\n face_img = np.array(face_img, dtype=np.uint8)\n face_img = face_img / 255\n face_img[:,:,0] = (face_img[:,:,0] - 0.485) / 0.229\n face_img[:,:,1] = (face_img[:,:,1] - 0.456) / 0.224\n face_img[:,:,2] = (face_img[:,:,2] - 0.406) / 0.225\n\n face_img = face_img.transpose(2, 0, 1)\n\n face_img = np.expand_dims(face_img, axis=0)\n face_img = np.array(face_img, dtype=np.float32)\n\n st_time = time.time()\n outputs = sixdrepnet_model.run(None, input_feed={onnx_input_sixdrepnet: face_img})[0]\n\n R = outputs\n sy = np.sqrt(R[:, 0, 0] * R[:, 0, 0] + R[:, 1, 0] * R[:, 1, 0])\n singular = sy < 1e-6\n\n x = np.arctan2(R[:, 2, 1], R[:, 2, 2])\n y = np.arctan2(-R[:, 2, 0], sy)\n z = np.arctan2(R[:, 1, 0], R[:, 0, 0])\n xs = np.arctan2(-R[:,1,2], R[:,1,1])\n ys = np.arctan2(-R[:,2,0], sy)\n zs = R[:, 1, 0] * 0\n\n pitch = (x * (1 - singular) + xs * singular)[0] * 180 / np.pi\n yaw = (y * (1 - singular) + ys * singular)[0] * 180 / np.pi\n roll = (z * (1 - singular) + zs * singular)[0] * 180 / np.pi\n\n print(\">>> 6DREPNET Use Time : {}\".format(time.time() - st_time))\n print(yaw, pitch, roll)\n\n return yaw, pitch, roll\n\n# MAIN\ndef main(draw_bbox, draw_cube, draw_line):\n\n # Load 6DPRepNet\n sixdrepnet_model = load_onnx_model(path='sixdrepnet.onnx', name='sixdrepnet')\n\n # Load Mediapipe to predict Face\n face_detection = mp.solutions.face_detection.FaceDetection(min_detection_confidence=0.9)\n\n # Capture\n cap = cv2.VideoCapture(0)\n\n # Start Loop\n while 1:\n ret, frame = cap.read()\n if not ret:\n break\n frame = cv2.flip(frame, 1)\n output_frame = frame.copy()\n rgb_img = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)\n start_time, st_time = time.time(), time.time()\n\n # Face Detect (using Mediapipe)\n detected = face_detection.process(rgb_img)\n print(\">>> BlazeFace Use Time : {}\".format(time.time() - st_time))\n\n if detected.detections:\n\n face_pos = detected.detections[0].location_data.relative_bounding_box\n x = int(rgb_img.shape[1] * max(face_pos.xmin, 0))\n y = int(rgb_img.shape[0] * max(face_pos.ymin, 0))\n w = int(rgb_img.shape[1] * min(face_pos.width, 1))\n h = int(rgb_img.shape[0] * min(face_pos.height, 1))\n\n # bbox\n face_plus_scalar = 5\n x2 = min(x + w + face_plus_scalar, rgb_img.shape[1])\n y2 = min(y + h + face_plus_scalar, rgb_img.shape[0])\n x = max(0, x - face_plus_scalar)\n y = max(0, y - face_plus_scalar)\n face_pos = (x, y, w, h)\n\n # headpose\n yaw, pitch, roll = headpose_6drepnet2(rgb_img, x, y, x2, y2, onnx_input_sixdrepnet, sixdrepnet_model)\n\n # draw bbox, axis\n draw_bbox_axis(output_frame, face_pos, (x2, y2), yaw, pitch, roll,\n draw_bbox=draw_bbox, draw_cube=draw_cube, draw_line=draw_line)\n\n print(\">>> Total Loop Time : {}\\n\".format(time.time() - start_time))\n cv2.imshow('demo', output_frame)\n\n if cv2.waitKey(1) == 27:\n cap.release()\n cv2.destroyAllWindows()\n break\n\nif __name__ == '__main__':\n\n parser = argparse.ArgumentParser(description='6DRepNet to ONNX')\n parser.add_argument('--draw_bbox', default=1, type=int)\n parser.add_argument('--draw_cube', default=0, type=int)\n parser.add_argument('--draw_line', default=1, type=int)\n args = parser.parse_args()\n\n main(args.draw_bbox, args.draw_cube, args.draw_line)", "repo_name": "saeu5407/6drepnet-onnx", "sub_path": "src/demo.py", "file_name": "demo.py", "file_ext": "py", "file_size_in_byte": 8872, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "12", "api": [{"api_name": "math.isnan", "line_number": 14, "usage_type": "call"}, {"api_name": "numpy.pi", "line_number": 16, "usage_type": "attribute"}, {"api_name": "numpy.pi", "line_number": 17, "usage_type": "attribute"}, {"api_name": "math.isnan", "line_number": 25, "usage_type": "call"}, {"api_name": "numpy.pi", "line_number": 28, "usage_type": "attribute"}, {"api_name": "math.cos", "line_number": 30, "usage_type": "call"}, {"api_name": "math.cos", "line_number": 31, "usage_type": "call"}, {"api_name": "math.sin", "line_number": 31, "usage_type": "call"}, {"api_name": "math.cos", "line_number": 34, "usage_type": "call"}, {"api_name": "math.sin", "line_number": 34, "usage_type": "call"}, {"api_name": "math.cos", "line_number": 35, "usage_type": "call"}, {"api_name": "math.sin", "line_number": 35, "usage_type": "call"}, {"api_name": "cv2.line", "line_number": 36, "usage_type": "call"}, {"api_name": "cv2.line", "line_number": 37, "usage_type": "call"}, {"api_name": "math.sin", "line_number": 41, "usage_type": "call"}, {"api_name": "math.cos", "line_number": 42, "usage_type": "call"}, {"api_name": "math.sin", "line_number": 42, "usage_type": "call"}, {"api_name": "cv2.line", "line_number": 43, "usage_type": "call"}, {"api_name": "math.isnan", "line_number": 53, "usage_type": "call"}, {"api_name": "numpy.pi", "line_number": 56, "usage_type": "attribute"}, {"api_name": "numpy.pi", "line_number": 57, "usage_type": "attribute"}, {"api_name": "numpy.pi", "line_number": 58, "usage_type": "attribute"}, {"api_name": "math.cos", "line_number": 71, "usage_type": "call"}, {"api_name": "math.cos", "line_number": 72, "usage_type": "call"}, {"api_name": "math.sin", "line_number": 72, "usage_type": "call"}, {"api_name": "math.cos", "line_number": 73, "usage_type": "call"}, {"api_name": "math.sin", "line_number": 73, "usage_type": "call"}, {"api_name": "math.cos", "line_number": 74, "usage_type": "call"}, {"api_name": "math.sin", "line_number": 74, "usage_type": "call"}, {"api_name": "math.sin", "line_number": 75, "usage_type": "call"}, {"api_name": "math.cos", "line_number": 76, "usage_type": "call"}, {"api_name": "math.sin", "line_number": 76, "usage_type": "call"}, {"api_name": "cv2.line", "line_number": 79, "usage_type": "call"}, {"api_name": "cv2.line", "line_number": 80, "usage_type": "call"}, {"api_name": "cv2.line", "line_number": 81, "usage_type": "call"}, {"api_name": "cv2.line", "line_number": 82, "usage_type": "call"}, {"api_name": "cv2.line", "line_number": 84, "usage_type": "call"}, {"api_name": "cv2.line", "line_number": 85, "usage_type": "call"}, {"api_name": "cv2.line", "line_number": 86, "usage_type": "call"}, {"api_name": "cv2.line", "line_number": 87, "usage_type": "call"}, {"api_name": "cv2.line", "line_number": 89, "usage_type": "call"}, {"api_name": "cv2.line", "line_number": 90, "usage_type": "call"}, {"api_name": "cv2.line", "line_number": 91, "usage_type": "call"}, {"api_name": "cv2.line", "line_number": 92, "usage_type": "call"}, {"api_name": "cv2.rectangle", "line_number": 108, "usage_type": "call"}, {"api_name": "onnxruntime.InferenceSession", "line_number": 122, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 122, "usage_type": "call"}, {"api_name": "os.path", "line_number": 122, "usage_type": "attribute"}, {"api_name": "os.getcwd", "line_number": 122, "usage_type": "call"}, {"api_name": "cv2.resize", "line_number": 135, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 139, "usage_type": "call"}, {"api_name": "numpy.uint8", "line_number": 139, "usage_type": "attribute"}, {"api_name": "numpy.expand_dims", "line_number": 147, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 148, "usage_type": "call"}, {"api_name": "numpy.float32", "line_number": 148, "usage_type": "attribute"}, {"api_name": "time.time", "line_number": 150, "usage_type": "call"}, {"api_name": "numpy.sqrt", "line_number": 154, "usage_type": "call"}, {"api_name": "numpy.arctan2", "line_number": 157, "usage_type": "call"}, {"api_name": "numpy.arctan2", "line_number": 158, "usage_type": "call"}, {"api_name": "numpy.arctan2", "line_number": 159, "usage_type": "call"}, {"api_name": "numpy.arctan2", "line_number": 160, "usage_type": "call"}, {"api_name": "numpy.arctan2", "line_number": 161, "usage_type": "call"}, {"api_name": "numpy.pi", "line_number": 164, "usage_type": "attribute"}, {"api_name": "numpy.pi", "line_number": 165, "usage_type": "attribute"}, {"api_name": "numpy.pi", "line_number": 166, "usage_type": "attribute"}, {"api_name": "time.time", "line_number": 168, "usage_type": "call"}, {"api_name": "mediapipe.solutions.face_detection.FaceDetection", "line_number": 180, "usage_type": "call"}, {"api_name": "mediapipe.solutions", "line_number": 180, "usage_type": "attribute"}, {"api_name": "cv2.VideoCapture", "line_number": 183, "usage_type": "call"}, {"api_name": "cv2.flip", "line_number": 190, "usage_type": "call"}, {"api_name": "cv2.cvtColor", "line_number": 192, "usage_type": "call"}, {"api_name": "cv2.COLOR_BGR2RGB", "line_number": 192, "usage_type": "attribute"}, {"api_name": "time.time", "line_number": 193, "usage_type": "call"}, {"api_name": "time.time", "line_number": 197, "usage_type": "call"}, {"api_name": "time.time", "line_number": 222, "usage_type": "call"}, {"api_name": "cv2.imshow", "line_number": 223, "usage_type": "call"}, {"api_name": "cv2.waitKey", "line_number": 225, "usage_type": "call"}, {"api_name": "cv2.destroyAllWindows", "line_number": 227, "usage_type": "call"}, {"api_name": "argparse.ArgumentParser", "line_number": 232, "usage_type": "call"}]} +{"seq_id": "16516879805", "text": "from imdb import IMDb\nfrom pyrogram import Client, filters\nfrom pyrogram.types import InlineKeyboardButton, InlineKeyboardMarkup\n\nfrom Stark import error_handler\n\nia = IMDb()\n\n\n@Client.on_message(filters.command([\"imdb\", \"IMDb\"]))\n@error_handler\nasync def search_movie(client, message):\n if len(message.command) < 2:\n await client.send_message(\n chat_id=message.chat.id,\n text=\"`Please provide a movie or TV series name after the /imdb command.`\"\n )\n return\n # Get the movie name from the user's message \n movie_name = message.text.split(\" \", 1)[1]\n if len(movie_name) < 1:\n await client.send_message(\n chat_id=message.chat.id,\n text=\"`Please provide a movie or TV series name after the /imdb command.`\"\n )\n return\n if len(str(movie_name)) > 40:\n await client.send_message(\n chat_id=message.chat.id,\n text=\"`Please provide a movie or TV series name. Not a paragraph! :)`\"\n )\n return\n mv = await message.reply_photo(\"https://exchange4media.gumlet.io/news-photo/123661-93930-IMDbAmazon.jpg\", caption=f\"`Searching for {movie_name}`\")\n movies = ia.search_movie(movie_name, results=10)\n if len(movies) == 0:\n await mv.edit(\"**__No movies found with that name!__**\")\n return\n button_list = []\n for i, movie in enumerate(movies[:10]):\n button_list.append([InlineKeyboardButton(text=movie['title'], callback_data=f\"{message.from_user.id}.more_details {movie.movieID} :{movie_name}:\")])\n # button_list.append([InlineKeyboardButton(text=\"\", callback_data=f\"more_details {movie.movieID}\")])\n # Add the buttons to an InlineKeyboardMarkup object\n keyboard = InlineKeyboardMarkup(button_list)\n\n # Send a message to the user with the search results and buttons\n message_text = f\"Found {len(movies)} results. Please select a movie:\"\n await mv.edit(\n text=message_text,\n reply_markup=keyboard,\n disable_web_page_preview=True\n )\n", "repo_name": "Naveen-X/Mr.Stark", "sub_path": "Stark/Plugins/movie.py", "file_name": "movie.py", "file_ext": "py", "file_size_in_byte": 2033, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 4, "dataset": "github-code", "pt": "12", "api": [{"api_name": "imdb.IMDb", "line_number": 7, "usage_type": "call"}, {"api_name": "pyrogram.types.InlineKeyboardButton", "line_number": 40, "usage_type": "call"}, {"api_name": "pyrogram.types.InlineKeyboardMarkup", "line_number": 43, "usage_type": "call"}, {"api_name": "pyrogram.Client.on_message", "line_number": 10, "usage_type": "call"}, {"api_name": "pyrogram.Client", "line_number": 10, "usage_type": "name"}, {"api_name": "pyrogram.filters.command", "line_number": 10, "usage_type": "call"}, {"api_name": "pyrogram.filters", "line_number": 10, "usage_type": "name"}, {"api_name": "Stark.error_handler", "line_number": 11, "usage_type": "name"}]} +{"seq_id": "3269883429", "text": "#!/usr/bin/env python\n# -*- coding:utf-8 -*-\nimport time\nimport hashlib\nimport requests\n\n\nclass AttendanceUtil(object):\n\n def calculate_sign(self):\n \"\"\"\n 获取md5加密签名\n :return: 返回签名\n \"\"\"\n timestamp = self.get_timestamp()\n app_secret, app_key = \"47F9B660196F0F23B55908786E8A327B\", \"E1B559D014E90F7EF8047949A7440F3E\"\n md5_val = hashlib.md5((app_key + timestamp + app_secret).lower().encode(\"utf-8\")).hexdigest()\n return md5_val, timestamp\n\n def app_auth(self, host, path):\n \"\"\"\n 获取token\n :param host:请求域名\n :param path:请求路径\n :return: 返回token\n \"\"\"\n url = host + path\n data = {\n 'app_id': '15676497800668552d',\n 'app_key': 'E1B559D014E90F7EF8047949A7440F3E',\n 'timestamp': self.calculate_sign()[-1],\n 'sign': self.calculate_sign()[0]\n }\n rs = requests.post(url, json=data)\n return rs.json()['data']\n\n def get_timestamp(self):\n \"\"\"\n 获取时间戳\n :return: 返回时间戳\n \"\"\"\n timestamp = str(round(time.time() * 1000))\n return timestamp\n\n\nif __name__ == '__main__':\n host = 'http://attendance.yooticloud.cn/api/v1/'\n path = 'app/auth'\n au = AttendanceUtil()\n token = au.app_auth(host, path)\n print(token)\n", "repo_name": "caijianwei01/tlischool_robotframework_api", "sub_path": "verification_library/attendance_util.py", "file_name": "attendance_util.py", "file_ext": "py", "file_size_in_byte": 1388, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "12", "api": [{"api_name": "hashlib.md5", "line_number": 17, "usage_type": "call"}, {"api_name": "requests.post", "line_number": 34, "usage_type": "call"}, {"api_name": "time.time", "line_number": 42, "usage_type": "call"}]} +{"seq_id": "14987232999", "text": "from typing import List\nfrom pydantic import BaseModel\n\n\nclass CreateSchool(BaseModel):\n school_name: str\n address: str\n\n\nclass CreateStudent(BaseModel):\n first_name: str\n last_name: str\n address: str\n school: int\n email: str\n\n\nclass UserInfoBase(BaseModel):\n username: str\n fullname: str\n\n\nclass UserCreate(UserInfoBase):\n password: str\n\n\nclass UserInfo(UserInfoBase):\n id: int\n username: str\n fullname: str\n\n class Config:\n orm_mode = True\n\n\nclass SchoolInfo(CreateSchool):\n id: int\n school_name: str\n address: str\n\n class Config:\n orm_mode = True\n\n\nclass StudentInfo(CreateStudent):\n id: int\n\n class Config:\n orm_mode = True\n", "repo_name": "Allwin12/student-management-system-using-fast-api", "sub_path": "sql_app/schemas.py", "file_name": "schemas.py", "file_ext": "py", "file_size_in_byte": 709, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "12", "api": [{"api_name": "pydantic.BaseModel", "line_number": 5, "usage_type": "name"}, {"api_name": "pydantic.BaseModel", "line_number": 10, "usage_type": "name"}, {"api_name": "pydantic.BaseModel", "line_number": 18, "usage_type": "name"}]} +{"seq_id": "7418309882", "text": "import logging\nimport sys\nfrom typing import Optional\n\nimport click\n\nsys.path.append(\"..\")\n\nfrom pulse_jig.config import settings\nfrom lib.jig_client import JigClient\nfrom lib.ui.jig_gui import JigGUI\nfrom lib.provisioner.provisioner import Provisioner\nfrom lib.registrar import Registrar\nfrom lib.pulse_manager import PulseManager\n\n\ndef _configure_logging(debug):\n logging.basicConfig(\n level=logging.DEBUG if debug else logging.INFO,\n format=\"[%(asctime)s] [%(levelname)-5s] [%(name)s.%(funcName)s:%(lineno)d] %(message)s\",\n datefmt=\"%Y-%m-%d %H:%M:%S\",\n )\n logging.getLogger(\"transitions\").setLevel(logging.INFO if debug else logging.ERROR)\n logging.getLogger(\"botocore\").setLevel(logging.WARN if debug else logging.ERROR)\n\n\n@click.command()\n@click.option(\"--dev\", default=lambda: JigClient.find_device())\n@click.option(\"--reset-pin\", default=6)\n@click.option(\"--pcb-sense-pin\", default=5)\n@click.option(\"--xdot-volume\", default=\"/media/pi/XDOT\")\ndef main(dev: Optional[str], reset_pin: int, pcb_sense_pin: int, xdot_volume: str):\n if dev is None:\n print(\"Could not detect device\")\n exit(1)\n\n _configure_logging(settings.app.debug)\n\n registrar = Registrar()\n registrar.network_check()\n\n pulse_manager = PulseManager(reset_pin, pcb_sense_pin, xdot_volume)\n provisioner_factory = Provisioner.build_factory(registrar, pulse_manager, dev)\n\n app = JigGUI()\n app.run(provisioner_factory, registrar)\n\n\nif __name__ == \"__main__\":\n main()\n", "repo_name": "syamkg/pulse-production-jig-app", "sub_path": "pulse_jig/app.py", "file_name": "app.py", "file_ext": "py", "file_size_in_byte": 1507, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "12", "api": [{"api_name": "sys.path.append", "line_number": 7, "usage_type": "call"}, {"api_name": "sys.path", "line_number": 7, "usage_type": "attribute"}, {"api_name": "logging.basicConfig", "line_number": 18, "usage_type": "call"}, {"api_name": "logging.DEBUG", "line_number": 19, "usage_type": "attribute"}, {"api_name": "logging.INFO", "line_number": 19, "usage_type": "attribute"}, {"api_name": "logging.getLogger", "line_number": 23, "usage_type": "call"}, {"api_name": "logging.INFO", "line_number": 23, "usage_type": "attribute"}, {"api_name": "logging.ERROR", "line_number": 23, "usage_type": "attribute"}, {"api_name": "logging.getLogger", "line_number": 24, "usage_type": "call"}, {"api_name": "logging.WARN", "line_number": 24, "usage_type": "attribute"}, {"api_name": "logging.ERROR", "line_number": 24, "usage_type": "attribute"}, {"api_name": "typing.Optional", "line_number": 32, "usage_type": "name"}, {"api_name": "pulse_jig.config.settings.app", "line_number": 37, "usage_type": "attribute"}, {"api_name": "pulse_jig.config.settings", "line_number": 37, "usage_type": "name"}, {"api_name": "lib.registrar.Registrar", "line_number": 39, "usage_type": "call"}, {"api_name": "lib.pulse_manager.PulseManager", "line_number": 42, "usage_type": "call"}, {"api_name": "lib.provisioner.provisioner.Provisioner.build_factory", "line_number": 43, "usage_type": "call"}, {"api_name": "lib.provisioner.provisioner.Provisioner", "line_number": 43, "usage_type": "name"}, {"api_name": "lib.ui.jig_gui.JigGUI", "line_number": 45, "usage_type": "call"}, {"api_name": "click.command", "line_number": 27, "usage_type": "call"}, {"api_name": "click.option", "line_number": 28, "usage_type": "call"}, {"api_name": "lib.jig_client.JigClient.find_device", "line_number": 28, "usage_type": "call"}, {"api_name": "lib.jig_client.JigClient", "line_number": 28, "usage_type": "name"}, {"api_name": "click.option", "line_number": 29, "usage_type": "call"}, {"api_name": "click.option", "line_number": 30, "usage_type": "call"}, {"api_name": "click.option", "line_number": 31, "usage_type": "call"}]} +{"seq_id": "20785533287", "text": "#!/usr/bin/env python\n\nimport os\nimport sys\nimport base64\nimport textwrap\nimport time\nfrom os.path import join as pjoin\n\nimport requests\nimport numpy as np\nimport pandas as pd\nimport yaml\nfrom yaml import Loader\nfrom regions import Regions\nfrom astropy.table import Table\n\nfrom baselayer.app.env import load_env, parser\n\nfrom skyportal.tests import api\nfrom skyportal.tests.patch_requests import patch_requests\n\n\npatch_requests()\n\n\nif __name__ == \"__main__\":\n parser.description = 'Load data into SkyPortal'\n parser.add_argument(\n 'data_files', type=str, nargs='+', help='YAML files with data to load'\n )\n parser.add_argument(\n '--host',\n help=textwrap.dedent(\n '''Fully specified URI of the running SkyPortal instance.\n E.g., https://myserver.com:9000.\n\n Defaults to http://localhost on the port specified\n in the SkyPortal configuration file.'''\n ),\n )\n parser.add_argument(\n '--token',\n help=textwrap.dedent(\n '''Token required for accessing the SkyPortal API.\n\n By default, SkyPortal produces a token that is\n written to .tokens.yaml. If no token is specified\n here, that token will be used.'''\n ),\n )\n parser.add_argument(\n '--create_tables',\n action='store_true',\n help=\"Set to create the SkyPortal database tables before inserting data.\",\n )\n\n env, cfg = load_env()\n\n # TODO: load multiple files\n if len(env.data_files) > 1:\n raise NotImplementedError(\"Cannot yet handle multiple data files\")\n\n fname = env.data_files[0]\n src = yaml.load(open(fname), Loader=Loader)\n src_path = os.path.dirname(fname)\n\n if env.create_tables:\n from baselayer.app.model_util import create_tables\n from skyportal.models import init_db\n\n RETRIES = 6\n timeout = 3\n for i in range(RETRIES):\n try:\n print(f\"Connecting to database {cfg['database']['database']}\")\n init_db(**cfg['database'])\n except TimeoutError:\n if i == RETRIES - 1:\n print('FAIL')\n print()\n print(\n f'Error: Could not connect to SkyPortal database; trying again in {timeout}s'\n )\n sys.exit(-1)\n else:\n time.sleep(timeout)\n timeout = max(timeout * 2, 30)\n print('Retrying connection...')\n\n print(\"Creating tables\")\n create_tables()\n\n def get_token():\n if env.token:\n return env.token\n\n try:\n token = yaml.load(open('.tokens.yaml'), Loader=yaml.Loader)['INITIAL_ADMIN']\n return token\n except (FileNotFoundError, TypeError, KeyError):\n return None\n\n print('Testing connection...', end='')\n\n RETRIES = 30\n timeout = 3\n admin_token = None\n status = None\n for i in range(RETRIES):\n try:\n previous_admin_token = admin_token\n admin_token = get_token()\n if admin_token != previous_admin_token:\n print('Loaded token from SkyPortal provisioned .tokens.yaml')\n\n def get(endpoint, token=admin_token):\n response_status, data = api(\"GET\", endpoint, token=token, host=env.host)\n return response_status, data\n\n def post(endpoint, data, token=admin_token):\n response_status, data = api(\n \"POST\", endpoint, data=data, token=token, host=env.host\n )\n return response_status, data\n\n def assert_post(endpoint, data, token=admin_token):\n response_status, data = post(endpoint, data, token)\n if not response_status == 200 and data[\"status\"] == \"success\":\n raise RuntimeError(\n f'API call to {endpoint} failed with status {status}: {data[\"message\"]}'\n )\n return data\n\n if admin_token:\n status, data = get('sysinfo')\n else:\n print('No token specified; reading from ', end='')\n print('SkyPortal generated .tokens.yaml')\n time.sleep(timeout)\n continue\n\n if status == 200 and data['status'] == 'success':\n break\n else:\n if i == RETRIES - 1:\n print('FAIL')\n else:\n time.sleep(timeout)\n print(f'Expected HTTP 200, received {status}. Trying again.')\n continue\n except requests.exceptions.ConnectionError:\n host = env.host or f'http://localhost:{cfg[\"ports.app\"]}'\n if i == RETRIES - 1:\n print('FAIL')\n print()\n print('Error: Could not connect to SkyPortal instance; please ensure ')\n print(f' it is running at the given host/port [{host}]')\n sys.exit(-1)\n else:\n time.sleep(timeout)\n print(f'Could not connect to {host}. Trying again.')\n\n if status not in (200, 400):\n print(f'Fatal: could not connect to server (HTTP status {status})')\n sys.exit(-1)\n\n if data['status'] != 'success':\n print(\n 'Error: Could not authenticate against SkyPortal; please specify a valid token.'\n )\n sys.exit(-1)\n\n status, response = get('groups/public')\n if status != 200 or response['status'] != 'success':\n print('Error: no public group found; aborting')\n sys.exit(-1)\n public_group_id = response['data']['id']\n\n error_log = []\n\n references = {'public_group_id': public_group_id}\n\n def inject_references(obj):\n if isinstance(obj, dict):\n if 'file' in obj:\n filename = pjoin(src_path, obj['file'])\n if filename.endswith('csv'):\n df = pd.read_csv(filename).replace({np.nan: None})\n obj.pop('file')\n obj.update(df.to_dict(orient='list'))\n elif filename.endswith('.png'):\n return base64.b64encode(open(filename, 'rb').read())\n elif filename.endswith('xml'):\n with open(filename, 'rb') as fid:\n payload = fid.read()\n return payload\n elif filename.endswith('reg'):\n return Regions.read(filename).serialize(format='ds9')\n elif filename.endswith('h5') or filename.endswith('hdf5'):\n try:\n payload = (\n Table.read(filename)\n .to_pandas()\n .replace({np.nan: None})\n .to_dict(orient='list')\n )\n except Exception as e:\n # sometimes we save HDF5 files using an HDFStore.\n # in this case we read it as a binary file and return it as \"data\"\n if 'values_block_0' in str(e):\n with open(filename, 'rb') as fid:\n payload = base64.b64encode(fid.read())\n else:\n raise e\n return payload\n elif filename.endswith('bz2'):\n payload = (\n pd.read_csv(filename, compression='bz2')\n .replace({np.nan: None})\n .to_dict(orient='list')\n )\n return payload\n elif filename.endswith('log'):\n with open(filename) as f:\n return f.read()\n else:\n raise NotImplementedError(\n f'{filename}: Only CSV, PNG, xml, reg, and hdf5 files '\n 'currently supported for extending individual objects'\n )\n\n for k, v in obj.items():\n obj[k] = inject_references(v)\n return obj\n elif isinstance(obj, str) and obj.startswith('='):\n try:\n return references[obj[1:]]\n except KeyError:\n print(\n f'\\nReference {obj[1:]} not found while posting to {endpoint}; skipping'\n )\n raise\n elif isinstance(obj, list):\n return [inject_references(item) for item in obj]\n else:\n return obj\n\n ENDPOINT_RETRIES = 3\n\n for endpoint, to_post in src.items():\n # Substitute references in path\n endpoint_parts = endpoint.split('/')\n try:\n for i, part in enumerate(endpoint_parts):\n if part.startswith('='):\n endpoint_parts[i] = str(references[part[1:]])\n except KeyError:\n print(\n f'\\nReference {part[1:]} not found while interpolating endpoint {endpoint}; skipping'\n )\n continue\n\n endpoint = '/'.join(endpoint_parts)\n\n print(f'Posting to {endpoint}: ', end='')\n if 'file' in to_post:\n filename = pjoin(src_path, to_post['file'])\n post_objs = yaml.load(open(filename), Loader=yaml.Loader)\n else:\n post_objs = to_post\n\n for obj in post_objs:\n # Fields that start with =, such as =id, get saved for using as\n # references later on\n saved_fields = {v: k[1:] for k, v in obj.items() if k.startswith('=')}\n\n # Remove all such fields from the object to be posted\n obj = {k: v for k, v in obj.items() if not k.startswith('=')}\n\n # Replace all references of the format field: =key or [=key, ..]\n # with the appropriate reference value\n try:\n inject_references(obj)\n except KeyError:\n continue\n\n if \"payload\" in obj:\n date_keys = [\"start_date\", \"end_date\"]\n for key in date_keys:\n if key in obj[\"payload\"]:\n obj[\"payload\"][key] = obj[\"payload\"][key].isoformat()\n\n ntries = 0\n posted_success = False\n while (ntries < ENDPOINT_RETRIES) and not posted_success:\n status, response = post(endpoint, data=obj)\n\n print('.' if status == 200 else 'X', end='')\n if status != 200:\n ntries = ntries + 1\n continue\n else:\n posted_success = True\n\n if status != 200:\n error_log.append(\n f\"/{endpoint}: {response['message'] if response else None}\"\n )\n else:\n # Save all references from the response\n for target, field in saved_fields.items():\n references[target] = response['data'][field]\n\n print()\n\n if error_log:\n print(\"\\nError log:\")\n print(\"----------\")\n print(\"\\n\".join(error_log))\n\n sys.exit(-1)\n", "repo_name": "skyportal/skyportal", "sub_path": "tools/data_loader.py", "file_name": "data_loader.py", "file_ext": "py", "file_size_in_byte": 11403, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 79, "dataset": "github-code", "pt": "12", "api": [{"api_name": "skyportal.tests.patch_requests.patch_requests", "line_number": 24, "usage_type": "call"}, {"api_name": "baselayer.app.env.parser.description", "line_number": 28, "usage_type": "attribute"}, {"api_name": "baselayer.app.env.parser", "line_number": 28, "usage_type": "name"}, {"api_name": "baselayer.app.env.parser.add_argument", "line_number": 29, "usage_type": "call"}, {"api_name": "baselayer.app.env.parser", "line_number": 29, "usage_type": "name"}, {"api_name": "baselayer.app.env.parser.add_argument", "line_number": 32, "usage_type": "call"}, {"api_name": "baselayer.app.env.parser", "line_number": 32, "usage_type": "name"}, {"api_name": "textwrap.dedent", "line_number": 34, "usage_type": "call"}, {"api_name": "baselayer.app.env.parser.add_argument", "line_number": 42, "usage_type": "call"}, {"api_name": "baselayer.app.env.parser", "line_number": 42, "usage_type": "name"}, {"api_name": "textwrap.dedent", "line_number": 44, "usage_type": "call"}, {"api_name": "baselayer.app.env.parser.add_argument", "line_number": 52, "usage_type": "call"}, {"api_name": "baselayer.app.env.parser", "line_number": 52, "usage_type": "name"}, {"api_name": "baselayer.app.env.load_env", "line_number": 58, "usage_type": "call"}, {"api_name": "yaml.load", "line_number": 65, "usage_type": "call"}, {"api_name": "yaml.Loader", "line_number": 65, "usage_type": "name"}, {"api_name": "os.path.dirname", "line_number": 66, "usage_type": "call"}, {"api_name": "os.path", "line_number": 66, "usage_type": "attribute"}, {"api_name": "skyportal.models.init_db", "line_number": 77, "usage_type": "call"}, {"api_name": "sys.exit", "line_number": 85, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 87, "usage_type": "call"}, {"api_name": "baselayer.app.model_util.create_tables", "line_number": 92, "usage_type": "call"}, {"api_name": "yaml.load", "line_number": 99, "usage_type": "call"}, {"api_name": "yaml.Loader", "line_number": 99, "usage_type": "attribute"}, {"api_name": "skyportal.tests.api", "line_number": 118, "usage_type": "call"}, {"api_name": "skyportal.tests.api", "line_number": 122, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 140, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 149, "usage_type": "call"}, {"api_name": "requests.exceptions", "line_number": 152, "usage_type": "attribute"}, {"api_name": "sys.exit", "line_number": 159, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 161, "usage_type": "call"}, {"api_name": "sys.exit", "line_number": 166, "usage_type": "call"}, {"api_name": "sys.exit", "line_number": 172, "usage_type": "call"}, {"api_name": "sys.exit", "line_number": 177, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 187, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 189, "usage_type": "call"}, {"api_name": "numpy.nan", "line_number": 189, "usage_type": "attribute"}, {"api_name": "base64.b64encode", "line_number": 193, "usage_type": "call"}, {"api_name": "regions.Regions.read", "line_number": 199, "usage_type": "call"}, {"api_name": "regions.Regions", "line_number": 199, "usage_type": "name"}, {"api_name": "astropy.table.Table.read", "line_number": 203, "usage_type": "call"}, {"api_name": "astropy.table.Table", "line_number": 203, "usage_type": "name"}, {"api_name": "numpy.nan", "line_number": 205, "usage_type": "attribute"}, {"api_name": "base64.b64encode", "line_number": 213, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 219, "usage_type": "call"}, {"api_name": "numpy.nan", "line_number": 220, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 268, "usage_type": "call"}, {"api_name": "yaml.load", "line_number": 269, "usage_type": "call"}, {"api_name": "yaml.Loader", "line_number": 269, "usage_type": "attribute"}, {"api_name": "sys.exit", "line_number": 322, "usage_type": "call"}]} +{"seq_id": "41344382840", "text": "from flask import Blueprint, request, jsonify\n\nfrom myshop.controllers import basket as basket_ctrl\nfrom myshop.exceptions import BadRequest, NotFound\nfrom myshop.libs import auth\n\n\nbp = Blueprint(__name__, \"basket\")\n\n@bp.route(\"/basket/create_or_add\", methods=[\"POST\"])\ndef basket_create():\n product_id = request.form.get(\"product_id\")\n total = request.form.get(\"total\")\n\n product_ids = []\n # product_id separate with comma if more than one\n for i in product_id.split(\",\"):\n product_ids.append(int(i))\n\n totals = []\n # total separate with comma if more than one\n for i in total.split(\",\"):\n totals.append(int(i))\n\n if None in (product_id, total):\n raise BadRequest(\"terdapat komponen yang kosong\")\n\n basket = basket_ctrl.create(\n user_id=auth.user.id,\n product_ids=product_ids,\n totals=totals,\n )\n\n response = {\n \"status\": 200,\n \"id\": basket.id,\n }\n\n return jsonify(response)\n\n\n@bp.route(\"/basket/user/\", methods=[\"GET\"])\ndef basket_by_user(user_id):\n \"\"\"Get basket\n\n \"\"\"\n basket = basket_ctrl.get_by_user(\n user_id=user_id\n )\n\n if not basket:\n response = {\n \"status\": 204,\n \"message\": \"Keranjang tidak ditemukan\"\n }\n\n else:\n response = {\n \"status\": 200,\n \"id\": basket.id,\n \"user\": basket.user_json,\n \"basket_product\": basket.basket_product_json,\n \"total_product\": basket.total_product,\n \"sub_total\": basket.sub_total,\n \"created_on\": basket.created_on.timestamp(),\n }\n\n return jsonify(response)\n\n\n@bp.route(\"/basket/item/delete\", methods=[\"POST\"])\ndef basket_delete():\n basket_id = request.form.get(\"basket_id\")\n product_id = request.form.get(\"product_id\")\n\n product_ids = []\n # product_id separate with comma if more than one\n for i in product_id.split(\",\"):\n product_ids.append(int(i))\n\n if None in (basket_id, product_id):\n raise BadRequest(\"terdapat komponen yang kosong\")\n\n basket = basket_ctrl.item_delete(\n basket_id=basket_id,\n product_ids=product_ids,\n )\n\n response = {\n \"status\": 200,\n \"id\": basket.id,\n \"user\": basket.user_json,\n \"basket_product\": basket.basket_product_json,\n \"updated_on\": basket.updated_on.timestamp(),\n }\n\n return jsonify(response)", "repo_name": "IsnandaZain/e-commerce-api", "sub_path": "myshop/routes/v1/basket.py", "file_name": "basket.py", "file_ext": "py", "file_size_in_byte": 2425, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "12", "api": [{"api_name": "flask.Blueprint", "line_number": 8, "usage_type": "call"}, {"api_name": "flask.request.form.get", "line_number": 12, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 12, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 12, "usage_type": "name"}, {"api_name": "flask.request.form.get", "line_number": 13, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 13, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 13, "usage_type": "name"}, {"api_name": "myshop.exceptions.BadRequest", "line_number": 26, "usage_type": "call"}, {"api_name": "myshop.controllers.basket.create", "line_number": 28, "usage_type": "call"}, {"api_name": "myshop.controllers.basket", "line_number": 28, "usage_type": "name"}, {"api_name": "myshop.libs.auth.user", "line_number": 29, "usage_type": "attribute"}, {"api_name": "myshop.libs.auth", "line_number": 29, "usage_type": "name"}, {"api_name": "flask.jsonify", "line_number": 39, "usage_type": "call"}, {"api_name": "myshop.controllers.basket.get_by_user", "line_number": 47, "usage_type": "call"}, {"api_name": "myshop.controllers.basket", "line_number": 47, "usage_type": "name"}, {"api_name": "flask.jsonify", "line_number": 68, "usage_type": "call"}, {"api_name": "flask.request.form.get", "line_number": 73, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 73, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 73, "usage_type": "name"}, {"api_name": "flask.request.form.get", "line_number": 74, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 74, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 74, "usage_type": "name"}, {"api_name": "myshop.exceptions.BadRequest", "line_number": 82, "usage_type": "call"}, {"api_name": "myshop.controllers.basket.item_delete", "line_number": 84, "usage_type": "call"}, {"api_name": "myshop.controllers.basket", "line_number": 84, "usage_type": "name"}, {"api_name": "flask.jsonify", "line_number": 97, "usage_type": "call"}]} +{"seq_id": "74629136020", "text": "import pandas as pd\nimport pyglet\nimport animation_manager\nimport data_functions\nimport headings\nimport read_database\nimport readout\nimport trace\nimport minimap\n\n\npyglet.font.add_directory(\"fonts\")\n\nwindow = pyglet.window.Window(640, 640)\nbatch = pyglet.graphics.Batch()\ngroups = [\n \"background\",\n \"midground\",\n \"foreground\",\n \"overlay\",\n \"GUI_back\",\n \"GUI_mid\",\n \"GUI_front\"\n]\ngroup_dict = {}\nfor i, group_name in enumerate(groups):\n group_dict[group_name] = pyglet.graphics.OrderedGroup(i)\n\nanimation_manager = animation_manager.AnimationManager(window)\n\nstatic_elements = []\n\n\ndef full_lap_follow(session_date, session_name, driver_lap_tcam_tracked_tuples, heading1, heading2, buffer_seconds, master_lap_index=0):\n raw_frames = []\n smooth_frames = []\n tracked_traces = []\n\n # Driver traces\n for driver, lap, tcam, tracked in driver_lap_tcam_tracked_tuples:\n # Get frames\n frame = read_database.read_lap_samples(session_date, session_name, driver, lap, buffer_seconds)\n raw_frame = frame.copy()\n raw_frame = data_functions.interpolate_gaps(raw_frame)\n raw_frame = data_functions.add_animation_time(raw_frame)\n raw_frames.append(raw_frame)\n\n smooth_frame = frame.copy()\n smooth_frame = data_functions.interpolate_gaps(smooth_frame)\n smooth_frame = data_functions.sample_smoothing(smooth_frame)\n smooth_frame = data_functions.add_animation_time(smooth_frame)\n smooth_frames.append(smooth_frame)\n\n tracking_window = data_functions.get_tracking_window(smooth_frame)\n\n # Make traces\n raw_trace = trace.Trace(\n batch=batch, \n group_dict=group_dict, \n radius=5, \n frame=raw_frame, \n animation_manager=animation_manager, \n tracking_window=None, \n tla=False, \n tcam=tcam, \n tail=True\n )\n smooth_trace = trace.Trace(\n batch=batch,\n group_dict=group_dict,\n radius=10,\n frame=smooth_frame,\n animation_manager=animation_manager,\n tracking_window=tracking_window,\n tla=True,\n tcam=tcam,\n tail=False\n )\n\n if tracked: tracked_traces.append(smooth_trace)\n\n animation_manager.tracked_traces = tracked_traces\n\n # Racing line and start/finish marker based on master lap\n trace.RollingRacingLine(\n batch=batch, \n group_dict=group_dict, \n width=3, \n frame=raw_frames[master_lap_index], \n rolling_samples=50, \n animation_manager=animation_manager\n )\n start_finish_point = data_functions.make_start_finish_point(raw_frames[master_lap_index])\n trace.StartFinishPoint(\n world_point=start_finish_point, \n radius=5, \n color=(0, 0, 0), \n batch=batch, \n group_dict=group_dict, \n animation_manager=animation_manager\n )\n\n # Minimap, headings, etc.\n minimap.Minimap((20, 20), 180, raw_frames[master_lap_index], batch, group_dict, animation_manager)\n\n h1 = headings.Heading(window, window.height - 40, 40, heading1, 18, (255, 255, 255, 255), (255, 30, 0), batch, group_dict)\n h2 = headings.Heading(window, window.height - 70, 30, heading2, 14, (255, 255, 255, 255), (0, 0, 0), batch, group_dict)\n for h in (h1, h2): static_elements.append(h)\n\n note_text = \"Note: This animation contains imprecisions due to source telemetry's low sample rate (~5Hz) and significant jitter.\" \\\n \"Small markers follow an interpolated version of the raw signal. Large markers represent a smooth, heavily filtered signal.\"\n \n note_doc = pyglet.text.document.UnformattedDocument(note_text)\n note_doc.set_style(0, 100, attributes={\n \"font_name\": \"TitilliumWeb-Regular\",\n \"font_size\": 9,\n \"color\": (21, 21, 30, 255)\n })\n note_layout = pyglet.text.layout.TextLayout(note_doc, 350, 60, True, batch=batch, group=group_dict[\"GUI_front\"], wrap_lines=True)\n note_layout.position = (250, 10)\n\n static_elements.append(note_layout)\n\n # Lap/sector time readouts\n readout_frames = []\n for driver, lap, tcam, tracked in driver_lap_tcam_tracked_tuples:\n readout_data = read_database.read_times(session_date, session_name, driver, lap)\n readout_data = data_functions.add_readout_animation_times(readout_data)\n readout_frames.append(readout_data)\n readout_frame = pd.concat(readout_frames)\n readout_frame.reset_index(inplace=True, drop=True)\n readout_frame = data_functions.add_readout_deltas(readout_frame)\n\n readout.Readout(readout_frame, (420, 540), animation_manager, batch, group_dict)\n\n\ndriver_lap_tcam_tracked_tuples = [\n (16, 12, False, True),\n (1, 14, False, True),\n (55, 11, True, True),\n (11, 14, True, False),\n (44, 16, True, False),\n (63, 14, False, False),\n (4, 18, True, False),\n (3, 19, False, False)\n]\nfull_lap_follow(\"2022-09-10\", \"Qualifying\", driver_lap_tcam_tracked_tuples, \"Italian Grand Prix 2022\", \"Final Qualifying Laps\", 3, 0)\n\n\npyglet.options[\"vsync\"] = False\npyglet.gl.glClearColor(247/255, 244/255, 241/255, 1)\n\npyglet.clock.schedule(animation_manager.update_traces)\n\n@window.event\ndef on_draw():\n window.clear()\n batch.draw()\n\n\nif __name__ == \"__main__\":\n animation_manager.run()\n pyglet.app.run()", "repo_name": "FraserTarbet/F1Tracer", "sub_path": "main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 5369, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 8, "dataset": "github-code", "pt": "12", "api": [{"api_name": "pyglet.font.add_directory", "line_number": 12, "usage_type": "call"}, {"api_name": "pyglet.font", "line_number": 12, "usage_type": "attribute"}, {"api_name": "pyglet.window.Window", "line_number": 14, "usage_type": "call"}, {"api_name": "pyglet.window", "line_number": 14, "usage_type": "attribute"}, {"api_name": "pyglet.graphics.Batch", "line_number": 15, "usage_type": "call"}, {"api_name": "pyglet.graphics", "line_number": 15, "usage_type": "attribute"}, {"api_name": "pyglet.graphics.OrderedGroup", "line_number": 27, "usage_type": "call"}, {"api_name": "pyglet.graphics", "line_number": 27, "usage_type": "attribute"}, {"api_name": "animation_manager.AnimationManager", "line_number": 29, "usage_type": "call"}, {"api_name": "read_database.read_lap_samples", "line_number": 42, "usage_type": "call"}, {"api_name": "data_functions.interpolate_gaps", "line_number": 44, "usage_type": "call"}, {"api_name": "data_functions.add_animation_time", "line_number": 45, "usage_type": "call"}, {"api_name": "data_functions.interpolate_gaps", "line_number": 49, "usage_type": "call"}, {"api_name": "data_functions.sample_smoothing", "line_number": 50, "usage_type": "call"}, {"api_name": "data_functions.add_animation_time", "line_number": 51, "usage_type": "call"}, {"api_name": "data_functions.get_tracking_window", "line_number": 54, "usage_type": "call"}, {"api_name": "trace.Trace", "line_number": 57, "usage_type": "call"}, {"api_name": "trace.Trace", "line_number": 68, "usage_type": "call"}, {"api_name": "animation_manager.tracked_traces", "line_number": 82, "usage_type": "attribute"}, {"api_name": "trace.RollingRacingLine", "line_number": 85, "usage_type": "call"}, {"api_name": "data_functions.make_start_finish_point", "line_number": 93, "usage_type": "call"}, {"api_name": "trace.StartFinishPoint", "line_number": 94, "usage_type": "call"}, {"api_name": "minimap.Minimap", "line_number": 104, "usage_type": "call"}, {"api_name": "headings.Heading", "line_number": 106, "usage_type": "call"}, {"api_name": "headings.Heading", "line_number": 107, "usage_type": "call"}, {"api_name": "pyglet.text.document.UnformattedDocument", "line_number": 113, "usage_type": "call"}, {"api_name": "pyglet.text", "line_number": 113, "usage_type": "attribute"}, {"api_name": "pyglet.text.layout.TextLayout", "line_number": 119, "usage_type": "call"}, {"api_name": "pyglet.text", "line_number": 119, "usage_type": "attribute"}, {"api_name": "read_database.read_times", "line_number": 127, "usage_type": "call"}, {"api_name": "data_functions.add_readout_animation_times", "line_number": 128, "usage_type": "call"}, {"api_name": "pandas.concat", "line_number": 130, "usage_type": "call"}, {"api_name": "data_functions.add_readout_deltas", "line_number": 132, "usage_type": "call"}, {"api_name": "readout.Readout", "line_number": 134, "usage_type": "call"}, {"api_name": "pyglet.options", "line_number": 150, "usage_type": "attribute"}, {"api_name": "pyglet.gl.glClearColor", "line_number": 151, "usage_type": "call"}, {"api_name": "pyglet.gl", "line_number": 151, "usage_type": "attribute"}, {"api_name": "pyglet.clock.schedule", "line_number": 153, "usage_type": "call"}, {"api_name": "pyglet.clock", "line_number": 153, "usage_type": "attribute"}, {"api_name": "animation_manager.update_traces", "line_number": 153, "usage_type": "attribute"}, {"api_name": "animation_manager.run", "line_number": 162, "usage_type": "call"}, {"api_name": "pyglet.app.run", "line_number": 163, "usage_type": "call"}, {"api_name": "pyglet.app", "line_number": 163, "usage_type": "attribute"}]} +{"seq_id": "5384605499", "text": "import tqdm\nimport os\nimport numpy as np\nimport random\nimport pandas as pd\nimport random\n\n# DiveFace data are divided in six folders (demographic groups)\nfolders = ['AM4K', 'AW4K', 'BM4K', 'BW4K', 'CM4K', 'CW4K' ]\n\n\ndef _get_label_coded(label):\n\tsex = 0 if label[1] == 'M' else 1\n\teth = 0 if label[0] == 'A' else 1 if label[0] == 'B' else 2\n\treturn [sex, None, eth]\n\n\ndef get_embeddings(embeddings_path):\n\tembeddings = []\n\tlabels = []\n\tfilenames = []\n\tusers = []\n\t# return embeddings and the list of filenames, in the same df\n\tfor path, subdirs, files in tqdm.tqdm(os.walk(embeddings_path)):\n\t\tfor name in [f for f in files if f.find('.npy') >= 0]:\n\t\t\t# store embeddings normalized with L2-norm\n\t\t\tembedding = np.load(os.path.join(path, name))\n\t\t\tembedding = embedding / np.linalg.norm(embedding, ord=2)\n\t\t\tembeddings.append(embedding)\n\t\t\t# get the user name\n\t\t\tuser = path.split('\\\\')[-1]\n\t\t\tusers.append(user)\n\t\t\t# get the labels (contained in the name of the second last folder)\n\t\t\tlabel = _get_label_coded(path.split('\\\\')[-2])\n\t\t\tlabels.append(label)\n\t\t\t# remove file extension (npy)\n\t\t\tfilename = name[:-4]\n\t\t\tfilenames.append(filename)\n\n\t# create DataFrame with filenames, embeddings, users, and files\n\tembeddings = np.array(embeddings)\n\tfiles_embeddings_df = pd.DataFrame(embeddings, columns=['f'+str(i) for i in range(len(embedding))])\n\tfiles_embeddings_df['filename'] = filenames\n\tfiles_embeddings_df['users'] = users\n\n\tlabels = np.array(labels)\n\tlabels_df = pd.DataFrame(labels, columns=['sex', 'age', 'ethnicity'])\n\tfiles_embeddings_df = pd.concat([files_embeddings_df, labels_df], axis=1)\n\n\treturn files_embeddings_df, len(embedding)\n\n\ndef get_diveface_df(embeddings_path, seed, save_files=False, limit_size=False):\n\tordered_filenames_lab_df, length_embeddings = get_embeddings(embeddings_path)\n\tif limit_size:\n\t\trandom.seed(seed)\n\t\tpats_to_keep = []\n\t\t# specific for diveface\n\t\tfor sex_code in range(2):\n\t\t\tfor eth_code in range(3):\n\t\t\t\ttmp = ordered_filenames_lab_df.loc[(ordered_filenames_lab_df['sex'] == sex_code) &\n\t\t\t\t\t\t\t\t\t\t\t\t (ordered_filenames_lab_df['ethnicity'] == eth_code)]\n\t\t\t\tpats = list(tmp['users'].unique())\n\t\t\t\trandom.shuffle(pats)\n\t\t\t\tpats_to_keep += pats[:1000]\n\n\t\tordered_filenames_lab_df = ordered_filenames_lab_df[ordered_filenames_lab_df['users'].isin(pats_to_keep)]\n\n\tordered_filenames_lab_df = ordered_filenames_lab_df.sample(frac=1, random_state=seed).reset_index(drop=True)\n\n\tif save_files:\n\t\tordered_filenames_lab_df.to_csv('data/diveface_df.csv', index=False)\n\n\treturn ordered_filenames_lab_df, length_embeddings\n\n\ndef get_sb_train_test_indexes(diveface_df, seed, length_embedding, spl=0.7):\n\ttrain_indexes = []\n\ttest_indexes = []\n\trandom.seed(seed)\n\t# here we do not need the embeddings\n\tdiveface_df = diveface_df.drop(['f' + str(i) for i in range(length_embedding)], axis=1)\n\t# to maintain the original indexes\n\tdiveface_df['initialIndex'] = diveface_df.index.values\n\t# consider one random sample for each subject\n\tdf_gby = diveface_df.groupby('users').apply(lambda x: x.sample(1, random_state=seed)).reset_index(drop=True)\n\t# specific for diveface\n\tfor sex_code in range(2):\n\t\tfor eth_code in range(3):\n\t\t\tref = df_gby.loc[(df_gby['sex']==sex_code) & (df_gby['ethnicity']==eth_code)]\n\t\t\tindexes = list(ref['initialIndex'])\n\t\t\trandom.shuffle(indexes)\n\t\t\ttrain_indexes += indexes[:int(spl * len(indexes))]\n\t\t\ttest_indexes += indexes[int(spl * len(indexes)):]\n\n\trandom.shuffle(train_indexes)\n\treturn train_indexes, test_indexes\n\n\ndef get_verification_indexes(diveface_df, seed, length_embedding, genuine=3):\n\tdict_verification_indexes = {}\n\trandom.seed(seed)\n\t# here we do not need the embeddings\n\tdiveface_df = diveface_df.drop(['f' + str(i) for i in range(length_embedding)], axis=1)\n\t# consider three random sample for each subject\n\tdf_gby = diveface_df.groupby('users').apply(lambda x: x.sample(min(genuine, len(x)), random_state=seed))\n\n\tfor ax, _ in df_gby.iterrows():\n\t\tuser = ax[0]\n\t\tinitial_index = ax[1]\n\t\tif user not in list(dict_verification_indexes.keys()):\n\t\t\tdict_verification_indexes[user] = []\n\t\tdict_verification_indexes[user].append(initial_index)\n\n\treturn dict_verification_indexes\n\n\ndef get_x_ready(df, length_embeddings):\n\t# get the embeddings\n\tx = df[['f'+str(i) for i in range(length_embeddings)]]\n\tx = x.to_numpy()\n\treturn x\n\n\ndef get_y_ready(df, labels):\n\t# get the labels\n\ty = df[labels]\n\ty = y.to_numpy()\n\treturn y\n", "repo_name": "otroshi/multi-ive", "sub_path": "evaluation/load_diveface.py", "file_name": "load_diveface.py", "file_ext": "py", "file_size_in_byte": 4389, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 2, "dataset": "github-code", "pt": "12", "api": [{"api_name": "tqdm.tqdm", "line_number": 24, "usage_type": "call"}, {"api_name": "os.walk", "line_number": 24, "usage_type": "call"}, {"api_name": "numpy.load", "line_number": 27, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 27, "usage_type": "call"}, {"api_name": "os.path", "line_number": 27, "usage_type": "attribute"}, {"api_name": "numpy.linalg.norm", "line_number": 28, "usage_type": "call"}, {"api_name": "numpy.linalg", "line_number": 28, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 41, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 42, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 46, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 47, "usage_type": "call"}, {"api_name": "pandas.concat", "line_number": 48, "usage_type": "call"}, {"api_name": "random.seed", "line_number": 56, "usage_type": "call"}, {"api_name": "random.shuffle", "line_number": 64, "usage_type": "call"}, {"api_name": "random.seed", "line_number": 80, "usage_type": "call"}, {"api_name": "random.shuffle", "line_number": 92, "usage_type": "call"}, {"api_name": "random.shuffle", "line_number": 96, "usage_type": "call"}, {"api_name": "random.seed", "line_number": 102, "usage_type": "call"}]} +{"seq_id": "1962653804", "text": "from django.urls import path\n\nfrom . import views\n\napp_name = 'annotate'\n\nurlpatterns = [\n path('index', views.index, name='index'),\n path('task/', views.task, name='task'),\n path('getimage/', views.getimage, name='getimage'),\n path('tokenlogin', views.tokenlogin, name='tokenlogin'),\n]\n", "repo_name": "yuantailing/qrcode-annotate", "sub_path": "server/annotate/urls.py", "file_name": "urls.py", "file_ext": "py", "file_size_in_byte": 315, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "12", "api": [{"api_name": "django.urls.path", "line_number": 8, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 9, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 10, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 11, "usage_type": "call"}]} +{"seq_id": "40965746508", "text": "\"Code retrieved from: https://stackoverflow.com/a/64682734/5647511\"\nfrom typing import Type, Any, TypeVar\n\n\nT = TypeVar(\"T\")\n\n\nclass NoPublicConstructor(type):\n \"\"\"Metaclass that ensures a private constructor\n\n If a class uses this metaclass like this:\n\n class SomeClass(metaclass=NoPublicConstructor):\n pass\n\n If you try to instantiate your class (`SomeClass()`),\n a `TypeError` will be thrown.\n \"\"\"\n\n def __call__(cls, *args, **kwargs):\n raise TypeError(\n f\"{cls.__module__}.{cls.__qualname__} has no public constructor. \"\n f\"Use one of the create methods instead.\"\n )\n\n def _create(cls: Type[T], *args: Any, **kwargs: Any) -> T:\n return super().__call__(*args, **kwargs) # type: ignore\n", "repo_name": "SURGroup/UQpy", "sub_path": "src/UQpy/utilities/NoPublicConstructor.py", "file_name": "NoPublicConstructor.py", "file_ext": "py", "file_size_in_byte": 772, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 216, "dataset": "github-code", "pt": "12", "api": [{"api_name": "typing.TypeVar", "line_number": 5, "usage_type": "call"}, {"api_name": "typing.Type", "line_number": 26, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 26, "usage_type": "name"}]} +{"seq_id": "72414764820", "text": "# firestore test\nfrom google.cloud import firestore\nfrom google.cloud.firestore_v1beta1 import GeoPoint\nimport json\n\n\n# 모든 waypoints를 업로드한다. \ndef upload_all_waypoints(db):\n upload_subway_stations(db)\n upload_subway_gates(db)\n upload_bus_stations(db)\n upload_bicycle_stations(db)\n upload_car_stations(db)\n\n\n# 지하철 역 waypoints\ndef upload_subway_stations(db):\n with open('./data/subway_station.json', 'r') as f:\n subway_station = json.loads(f.read())\n\n ref = db.collection('waypoints')\n for n in subway_station:\n lat = subway_station[n]['location']['latitude']\n lon = subway_station[n]['location']['longitude']\n point = GeoPoint(lat, lon)\n metadata = {}\n metadata.update({\n 'fr_code': subway_station[n]['fr_code'],\n 'line_num': subway_station[n]['line_num']\n })\n\n ref.document(n).set({\n 'type': subway_station[n]['type'],\n 'name': subway_station[n]['name'],\n 'address': subway_station[n]['address'],\n 'location': point,\n 'metadata': metadata\n })\n\n\n# 지하철 출구 waypoints\ndef upload_subway_gates(db):\n with open('./data/subway_gate.json', 'r') as f:\n subway_gates = json.loads(f.read())\n\n ref = db.collection('waypoints')\n for n in subway_gates:\n lat = subway_gates[n]['location']['latitude']\n lon = subway_gates[n]['location']['longitude']\n point = GeoPoint(lat, lon)\n\n ref.document(n).set({\n 'type': subway_gates[n]['type'],\n 'name': subway_gates[n]['name'],\n 'address': subway_gates[n]['address'],\n 'location': point\n })\n\n\n# 버스 정류장 waypoints\ndef upload_bus_stations(db):\n with open('./data/bus_station.json', 'r') as f:\n bus_stations = json.loads(f.read())\n\n ref = db.collection('waypoints')\n for n in bus_stations:\n lat = bus_stations[n]['location']['latitude']\n lon = bus_stations[n]['location']['longitude']\n point = GeoPoint(lat, lon)\n metadata = {}\n metadata.update({\n 'ars_id': bus_stations[n]['id']\n })\n\n ref.document(n).set({\n 'type': bus_stations[n]['type'],\n 'name': bus_stations[n]['name'],\n 'address': bus_stations[n]['address'],\n 'location': point,\n 'metadata': metadata\n })\n\n\n# 자전거 정류장 waypoints\ndef upload_bicycle_stations(db):\n with open('./data/bicycle_node.json', 'r') as f:\n bicycle_stations = json.loads(f.read())\n\n ref = db.collection('nodes')\n for n in bicycle_stations:\n lat = bicycle_stations[n]['location']['latitude']\n lon = bicycle_stations[n]['location']['longitude']\n point = GeoPoint(lat, lon)\n metadata = {}\n metadata.update({\n 'id': bicycle_stations[n]['id']\n })\n\n ref.document(n).set({\n 'type': bicycle_stations[n]['type'],\n 'name': bicycle_stations[n]['name'],\n 'address': bicycle_stations[n]['address'],\n 'location': point,\n 'metadata': metadata\n })\n\n\n# 나눔카 정류장 waypoints\ndef upload_car_stations(db):\n with open('./data/car_node.json', 'r') as f:\n car_stations = json.loads(f.read())\n\n ref = db.collection('waypoints')\n for n in car_stations:\n lat = car_stations[n]['location']['latitude']\n lon = car_stations[n]['location']['longitude']\n point = GeoPoint(lat, lon)\n metadata = {}\n metadata.update({\n 'id': car_stations[n]['id']\n })\n\n ref.document(n).set({\n 'type': car_stations[n]['type'],\n 'name': car_stations[n]['name'],\n 'address': car_stations[n]['address'],\n 'location': point,\n 'metadata': metadata\n })\n\n\n# 모든 라인을 업로드\ndef upload_all_lines(db):\n upload_bus_lines(db)\n upload_subway_lines(db)\n\n\ndef upload_bus_lines(db):\n with open('./data/bus_line.json', 'r') as f:\n bus_lines = json.loads(f.read())\n\n ref = db.collection('lines')\n for n in bus_lines:\n ref.document(n).set({\n 'type': bus_lines[n]['type'],\n 'name': bus_lines[n]['name'],\n 'id': bus_lines[n]['id'],\n })\n\n\ndef upload_subway_lines(db):\n with open('./data/subway_line.json', 'r') as f:\n subway_lines = json.loads(f.read())\n\n ref = db.collection('lines')\n for n in subway_lines:\n ref.document(n).set({\n 'type': subway_lines[n]['type'],\n 'name': subway_lines[n]['name'],\n 'id': subway_lines[n]['id'],\n })\n\n\n# 모든 directions를 업로드\ndef upload_all_directions(db):\n # 지하철 링크\n upload_subway_links(db)\n # 지하철 환승\n upload_subway_transfer_links(db)\n # 지하철역 - 출구\n upload_gate_links(db)\n # 지전거\n upload_bicycle_links(db)\n # 버스\n upload_bus_links(db)\n # 걸어서 닿을 수 있는 거리\n upload_bus_walk_link(db)\n\n\n# 지하철 링크\ndef upload_subway_links(db):\n with open('./data/subway_link.json', 'r') as f:\n subway_links = json.loads(f.read())\n\n ref = db.collection('directions')\n for n in subway_links:\n ref.document(n).set(subway_links[n])\n\n\n# 지하철 환승\ndef upload_subway_transfer_links(db):\n with open('./data/subway_transfer.json', 'r') as f:\n subway_transfer_links = json.loads(f.read())\n\n ref = db.collection('directions')\n for n in subway_transfer_links:\n ref.document(n).set(subway_transfer_links[n])\n\n\n# 지하철역 - 출구\ndef upload_gate_links(db):\n with open('./data/gate_link.json', 'r') as f:\n gate_links = json.loads(f.read())\n\n ref = db.collection('directions')\n for n in gate_links:\n ref.document(n).set(gate_links[n])\n\n\n# 지전거\ndef upload_bicycle_links(db):\n with open('./data/bicycle_link.json', 'r') as f:\n bicycle_links = json.loads(f.read())\n\n ref = db.collection('directions')\n for n in bicycle_links:\n ref.document(n).set(bicycle_links[n])\n\n\n# 버스\ndef upload_bus_links(db):\n with open('./data/bus_link.json', 'r') as f:\n bus_links = json.loads(f.read())\n\n ref = db.collection('directions')\n for n in bus_links:\n ref.document(n).set(bus_links[n])\n\n\n# 걸어서 닿을 수 있는 거리\ndef upload_bus_walk_link(db):\n with open('./data/walk_link.json', 'r') as f:\n walk_links = json.loads(f.read())\n\n ref = db.collection('directions')\n for n in walk_links:\n ref.document(n).set(walk_links[n])\n\n\ndef run():\n db = firestore.Client()\n upload_all_waypoints(db)\n upload_all_lines(db)\n upload_all_directions(db)\n", "repo_name": "notesquare/zigmap-tool", "sub_path": "proj/upload/firestore.py", "file_name": "firestore.py", "file_ext": "py", "file_size_in_byte": 6760, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "12", "api": [{"api_name": "json.loads", "line_number": 19, "usage_type": "call"}, {"api_name": "google.cloud.firestore_v1beta1.GeoPoint", "line_number": 25, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 44, "usage_type": "call"}, {"api_name": "google.cloud.firestore_v1beta1.GeoPoint", "line_number": 50, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 63, "usage_type": "call"}, {"api_name": "google.cloud.firestore_v1beta1.GeoPoint", "line_number": 69, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 87, "usage_type": "call"}, {"api_name": "google.cloud.firestore_v1beta1.GeoPoint", "line_number": 93, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 111, "usage_type": "call"}, {"api_name": "google.cloud.firestore_v1beta1.GeoPoint", "line_number": 117, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 140, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 153, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 183, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 193, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 203, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 213, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 223, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 233, "usage_type": "call"}, {"api_name": "google.cloud.firestore.Client", "line_number": 241, "usage_type": "call"}, {"api_name": "google.cloud.firestore", "line_number": 241, "usage_type": "name"}]} +{"seq_id": "14634993638", "text": "# -*- coding: utf-8 -*-\n#!/usr/bin/env python\n\nfrom xml.dom.minidom import parseString\nimport urllib.request, urllib.error, urllib.parse\nimport saldo_util\nimport urllib.request, urllib.parse, urllib.error\nimport re\n\n\ndef sblex(sense):\n senses = \"|\".join([saldo.encode(\"UTF-8\") for saldo in saldo_util.lookup_md1(sense)])\n sblex_address = \"http://demosb.spraakdata.gu.se/ws/lexikon\"\n params = {}\n params[\"lexikon\"] = \"dalin\"\n params[\"saldo\"] = senses\n data = urllib.parse.urlencode(params)\n req = urllib.request.Request(sblex_address, data)\n content = urllib.request.urlopen(req).read()\n dom = parseString(content)\n result = []\n for entry in dom.getElementsByTagName(\"LexicalEntry\"):\n eid = entry.getElementsByTagName(\"eid\")[0].childNodes[0].data\n wfs = set()\n for wf in entry.getElementsByTagName(\"wf\"):\n wfs.add(wf.childNodes[0].data)\n result.append((eid, list(wfs)))\n return result\n", "repo_name": "spraakbanken/saldo-dalin-ws", "sub_path": "dalin-ws/sblex.py", "file_name": "sblex.py", "file_ext": "py", "file_size_in_byte": 959, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "12", "api": [{"api_name": "saldo_util.lookup_md1", "line_number": 12, "usage_type": "call"}, {"api_name": "urllib.request.parse.urlencode", "line_number": 17, "usage_type": "call"}, {"api_name": "urllib.request.parse", "line_number": 17, "usage_type": "attribute"}, {"api_name": "urllib.request", "line_number": 17, "usage_type": "name"}, {"api_name": "urllib.request.request.Request", "line_number": 18, "usage_type": "call"}, {"api_name": "urllib.request.request", "line_number": 18, "usage_type": "attribute"}, {"api_name": "urllib.request", "line_number": 18, "usage_type": "name"}, {"api_name": "urllib.request.request.urlopen", "line_number": 19, "usage_type": "call"}, {"api_name": "urllib.request.request", "line_number": 19, "usage_type": "attribute"}, {"api_name": "urllib.request", "line_number": 19, "usage_type": "name"}, {"api_name": "xml.dom.minidom.parseString", "line_number": 20, "usage_type": "call"}]} +{"seq_id": "13143011861", "text": "import torch\nfrom pathlib import Path\n\nMODEL_NAME = 'range3/textgen'\ndevice = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n\nmax_length_src = 30\nmax_length_target = 300\n\nbatch_size_train = 8\nbatch_size_valid = 8\n\nepochs = 1000\npatience = 20\n\nWORKSPACE_ROOT_DIR = Path(__file__).parent.parent \nNOVEL_DATA_PATH = (WORKSPACE_ROOT_DIR / 'data/novels/narou').resolve()\nSENTENCEPIECE_MODEL_DIR = WORKSPACE_ROOT_DIR / 'models/sentencepiece'\n", "repo_name": "range3/pytorch-practice", "sub_path": "textgen/config.py", "file_name": "config.py", "file_ext": "py", "file_size_in_byte": 449, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "12", "api": [{"api_name": "torch.device", "line_number": 5, "usage_type": "call"}, {"api_name": "torch.cuda.is_available", "line_number": 5, "usage_type": "call"}, {"api_name": "torch.cuda", "line_number": 5, "usage_type": "attribute"}, {"api_name": "pathlib.Path", "line_number": 16, "usage_type": "call"}]} +{"seq_id": "16930480751", "text": "from setuptools import setup\n\nwith open(\"README.md\", 'r') as f:\n long_description = f.read()\n\nsetup(\n name='mlutil',\n version='0.1',\n description='Util for ML',\n author='Neil Jie Yan',\n author_email='yanjie@ict.ac.cn, jiey@msr',\n packages=['mlutil'],\n url=\"http://weristdas\",\n install_requires=['numpy', 'pandas', 'pykalman'], #external dependent packages\n)\n", "repo_name": "weristdas/mlutil", "sub_path": "setup.py", "file_name": "setup.py", "file_ext": "py", "file_size_in_byte": 377, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "12", "api": [{"api_name": "setuptools.setup", "line_number": 6, "usage_type": "call"}]} +{"seq_id": "18244718086", "text": "from datetime import datetime, timedelta\n\nimport pandas as pd\nfrom matplotlib import pyplot as plt\nfrom sklearn.metrics import mean_absolute_error, mean_squared_error\nfrom sklearn.base import clone\nfrom graph_traffic.model_selection import timeseries_cv\nfrom graph_traffic.custom_transformer import transform_df\nfrom graph_traffic.config import project_path\nfrom graph_traffic.merge_data import merge_data\nfrom graph_traffic.get_data import get_mmagns\nimport itertools\nfrom time import time\nimport pickle\nimport matplotlib as mpl\nimport numpy as np\n\nmpl.rcParams['axes.grid'] = False\n\n\n\ndef get_combinations(dict_possible):\n keys, values = zip(*dict_possible.items())\n return [dict(zip(keys, v)) for v in itertools.product(*values)]\n\n\ndef try_combinations(data_dict, meteo_combinations, temporal_combinations, pipeline):\n training_datetime = datetime.now().strftime(\"%Y%m%d_%H%M%S\")\n training_folder = f\"{project_path}/training_history/regression\"\n\n meteo_values = {}\n temporal_values = {}\n results = {}\n training_time = {}\n alpha = {}\n\n for i, meteo_dict in enumerate(meteo_combinations):\n print(f\"\\n{i}\")\n meteo_values[i] = meteo_dict\n\n mmagns = get_mmagns(meteo_dict)\n\n df = merge_data(data_dict[\"ids_list\"][0], data_dict[\"from_date\"], data_dict[\"to_date\"], data_dict[\"target\"], mmagns)\n\n with open(f\"{training_folder}/{training_datetime}_meteo_values.pkl\", \"wb\") as f:\n pickle.dump(meteo_values, f)\n\n for j, temporal_dict in enumerate(temporal_combinations):\n df_t = transform_df(df, meteo_dict, temporal_dict, data_dict[\"interactions\"], data_dict[\"target\"])\n\n data_size = df_t.shape[0]\n\n train_x = df_t[:int(0.8 * data_size):11, 1:]\n train_y = df_t[:int(0.8 * data_size):11, 0].ravel()\n\n if np.linalg.matrix_rank(train_x) != train_x.shape[1]:\n continue\n\n temporal_values[j] = temporal_dict\n print(j, end=\"\\r\")\n\n start_time = time()\n _, _, results[(i, j)], alpha[(i, j)] = timeseries_cv(pipeline, train_x, train_y, with_previous_timesteps=False,\n with_alpha=True)\n training_time[(i, j)] = time() - start_time\n\n if i == 0:\n with open(f\"{training_folder}/{training_datetime}_temporal_values.pkl\", \"wb\") as f:\n pickle.dump(temporal_values, f)\n\n with open(f\"{training_folder}/{training_datetime}_results.pkl\", \"wb\") as f:\n pickle.dump(results, f)\n\n with open(f\"{training_folder}/{training_datetime}_times.pkl\", \"wb\") as f:\n pickle.dump(training_time, f)\n\n with open(f\"{training_folder}/{training_datetime}_alphas.pkl\", \"wb\") as f:\n pickle.dump(alpha, f)\n\n\ndef train_with_args(data_dict, meteo_dict, temporal_dict, pipeline_class, train_until=None):\n mmagns = get_mmagns(meteo_dict)\n #dates = pd.date_range(data_dict[\"from_date\"], data_dict[\"to_date\"], freq=\"15min\")\n dfs_dict = {}\n ids_used = []\n train_sizes = {}\n test_sizes = {}\n for i in data_dict[\"ids_list\"]:\n print(i, end=\"\\r\")\n dfs_dict[i] = merge_data(i, data_dict[\"from_date\"], data_dict[\"to_date\"], data_dict[\"target\"], mmagns)\n if train_until is None:\n train_sizes[i] = int(0.8 * dfs_dict[i].shape[0])\n test_sizes[i] = int(0.2 * dfs_dict[i].shape[0])\n else:\n train_sizes[i] = len(dfs_dict[i][dfs_dict[i].date <= train_until])\n test_sizes[i] = len(dfs_dict[i][(dfs_dict[i].date > train_until) &\n (dfs_dict[i].date <= train_until + timedelta(days=30))])\n #if dates.intersection(dfs_dict[i].date).empty:\n # continue\n #dates = dates.intersection(dfs_dict[i].date)\n #ids_used.append(i)\n\n for i in data_dict[\"ids_list\"]:\n df = dfs_dict[i]\n #df = df[df.date.isin(dates)]\n dfs_dict[i] = transform_df(df, meteo_dict, temporal_dict, data_dict[\"interactions\"], data_dict[\"target\"])\n\n #data_size = dfs_dict[i].shape[0]\n\n #all_hours = dates.hour + dates.minute / 60\n\n #test_dates = all_hours.values[int(0.8 * data_size):]\n\n estimators = {}\n maes = {}\n mses = {}\n for sensor_id in data_dict[\"ids_list\"]:\n print(sensor_id)\n train_x = dfs_dict[sensor_id][:train_sizes[sensor_id], 1:]\n train_y = dfs_dict[sensor_id][:train_sizes[sensor_id], 0].ravel()\n\n test_x = dfs_dict[sensor_id][train_sizes[sensor_id]:train_sizes[sensor_id]+test_sizes[sensor_id], 1:]\n test_y = dfs_dict[sensor_id][train_sizes[sensor_id]:train_sizes[sensor_id]+test_sizes[sensor_id], 0].ravel()\n pipeline = clone(pipeline_class)\n print(\"Shape of train predictors and labels:\", train_x.shape, train_y.shape)\n pipeline.fit(train_x, train_y)\n\n estimators[sensor_id] = pipeline\n\n test_pred = pipeline.predict(test_x)\n maes[sensor_id] = mean_absolute_error(test_y, test_pred)\n mses[sensor_id] = mean_squared_error(test_y, test_pred)\n print(\"MAE:\", maes[sensor_id])\n print(\"MSE:\", mses[sensor_id])\n\n return ids_used, estimators, dfs_dict, maes, mses\n\n\ndef coefs_plot(ids_used, estimators, column_names, title=\"Model coefficients\"):\n fig, axs = plt.subplots(1, len(ids_used), figsize=(8, 10), sharey=True)\n for j, i in enumerate(ids_used):\n ax = axs[j]\n coefs = estimators[i][-1].coef_\n pd.DataFrame(zip(coefs, column_names)).iloc[::-1].rename(columns={0: \"importances\", 1: \"features\"}).plot.barh(\n x=1, ax=ax, legend=False)\n ax.set_title(f\"{i}\")\n fig.suptitle(title)\n plt.show()\n\n", "repo_name": "elena-sg/madrid-traffic", "sub_path": "graph_traffic/graph_traffic/regression.py", "file_name": "regression.py", "file_ext": "py", "file_size_in_byte": 5712, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "12", "api": [{"api_name": "matplotlib.rcParams", "line_number": 18, "usage_type": "attribute"}, {"api_name": "itertools.product", "line_number": 24, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 28, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 28, "usage_type": "name"}, {"api_name": "graph_traffic.config.project_path", "line_number": 29, "usage_type": "name"}, {"api_name": "graph_traffic.get_data.get_mmagns", "line_number": 41, "usage_type": "call"}, {"api_name": "graph_traffic.merge_data.merge_data", "line_number": 43, "usage_type": "call"}, {"api_name": "pickle.dump", "line_number": 46, "usage_type": "call"}, {"api_name": "graph_traffic.custom_transformer.transform_df", "line_number": 49, "usage_type": "call"}, {"api_name": "numpy.linalg.matrix_rank", "line_number": 56, "usage_type": "call"}, {"api_name": "numpy.linalg", "line_number": 56, "usage_type": "attribute"}, {"api_name": "time.time", "line_number": 62, "usage_type": "call"}, {"api_name": "graph_traffic.model_selection.timeseries_cv", "line_number": 63, "usage_type": "call"}, {"api_name": "time.time", "line_number": 65, "usage_type": "call"}, {"api_name": "pickle.dump", "line_number": 69, "usage_type": "call"}, {"api_name": "pickle.dump", "line_number": 72, "usage_type": "call"}, {"api_name": "pickle.dump", "line_number": 75, "usage_type": "call"}, {"api_name": "pickle.dump", "line_number": 78, "usage_type": "call"}, {"api_name": "graph_traffic.get_data.get_mmagns", "line_number": 82, "usage_type": "call"}, {"api_name": "graph_traffic.merge_data.merge_data", "line_number": 90, "usage_type": "call"}, {"api_name": "datetime.timedelta", "line_number": 97, "usage_type": "call"}, {"api_name": "graph_traffic.custom_transformer.transform_df", "line_number": 106, "usage_type": "call"}, {"api_name": "sklearn.base.clone", "line_number": 124, "usage_type": "call"}, {"api_name": "sklearn.metrics.mean_absolute_error", "line_number": 131, "usage_type": "call"}, {"api_name": "sklearn.metrics.mean_squared_error", "line_number": 132, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.subplots", "line_number": 140, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 140, "usage_type": "name"}, {"api_name": "pandas.DataFrame", "line_number": 144, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.show", "line_number": 148, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 148, "usage_type": "name"}]} +{"seq_id": "29248165674", "text": "import pymongo\nimport json\n\nclient = pymongo.MongoClient(\"mongodb+srv://shawn:shawn@cluster0.uebyo.mongodb.net/plannerbee?retryWrites=true&w=majority\")\ndb = client[\"plannerbee\"]\ncol = db[\"transactions_users\"]\n\ndef initiateTransactions():\n #raw data\n usertrans = {\n \"_id\": \"16cecd11-2f83-4864-bf6b-f270f4be88cb\",\n \"local_currency_code\": \"SGD\",\n \"transactions\": {\n \"250773972570868310\": {\n \"base_currency_amount\": -1.3,\n \"base_currency_code\": \"SGD\",\n \"local_currency_amount\": -1.3,\n \"transacted_at\": \"2020-04-03T00:00:00Z\",\n \"description\": \"COLD STORAGE-BJ SINGAPORE SG\",\n \"category\": \"shopping\"\n },\n \"250773972570868311\": {\n \"base_currency_amount\": -2.62,\n \"base_currency_code\": \"SGD\",\n \"local_currency_amount\": -2.62,\n \"transacted_at\": \"2020-04-03T00:00:00Z\",\n \"description\": \"BUS/MRT 33803686 SINGAPORE SG\",\n \"category\": \"transfers\"\n },\n \"250773972570868312\": {\n \"base_currency_amount\": -11.8,\n \"base_currency_code\": \"SGD\",\n \"local_currency_amount\": -11.8,\n \"transacted_at\": \"2020-04-03T00:00:00Z\",\n \"description\": \"UNIQLO BUGIS+ SINGAPORE SG\",\n \"category\": \"shopping\"\n },\n \"250773972570868313\": {\n \"base_currency_amount\": -4.32,\n \"base_currency_code\": \"SGD\",\n \"local_currency_amount\": -4.32,\n \"transacted_at\": \"2020-04-05T00:00:00Z\",\n \"description\": \"POPULAR BOOK COMPANY-M SINGAPORE SG\",\n \"category\": \"education\"\n },\n \"250773972570868314\": {\n \"base_currency_amount\": -50.29,\n \"base_currency_code\": \"SGD\",\n \"local_currency_amount\": -50.29,\n \"transacted_at\": \"2020-05-05T00:00:00Z\",\n \"description\": \"SWENSEN'S-PWP SINGAPORE SG\",\n \"category\": \"shopping\"\n },\n \"250773972570868315\": {\n \"base_currency_amount\": 271.86,\n \"base_currency_code\": \"SGD\",\n \"local_currency_amount\": 271.86,\n \"transacted_at\": \"2020-05-07T00:00:00Z\",\n \"description\": \"GIRO PAYMENT\",\n \"category\": \"transfers\"\n },\n \"250773972570868316\": {\n \"base_currency_amount\": -138.0,\n \"base_currency_code\": \"SGD\",\n \"local_currency_amount\": -138.0,\n \"transacted_at\": \"2020-05-09T00:00:00Z\",\n \"description\": \"EU YAN SANG SINGAPORE SINGAPORE SG\",\n \"category\": \"personal_care\"\n },\n \"250773972579256925\": {\n \"base_currency_amount\": -1.5,\n \"base_currency_code\": \"SGD\",\n \"local_currency_amount\": -1.5,\n \"transacted_at\": \"2020-05-11T00:00:00Z\",\n \"description\": \"HAO MART - MANDARIN GA SINGAPORE SG\",\n \"category\": \"groceries\"\n },\n \"250773972579256926\": {\n \"base_currency_amount\": 9.36,\n \"base_currency_code\": \"SGD\",\n \"local_currency_amount\": 9.36,\n \"transacted_at\": \"2020-06-16T00:00:00Z\",\n \"description\": \"30CASHBACK\",\n \"category\": \"transfers\"\n },\n \"250773972579256927\": {\n \"base_currency_amount\": -17.19,\n \"base_currency_code\": \"SGD\",\n \"local_currency_amount\": -17.19,\n \"transacted_at\": \"2020-06-20T00:00:00Z\",\n \"description\": \"DELIVEROO SINGAPORE SG\",\n \"category\": \"shopping\"\n },\n \"250773972579256928\": {\n \"base_currency_amount\": 614.87,\n \"base_currency_code\": \"SGD\",\n \"local_currency_amount\": 614.87,\n \"transacted_at\": \"2020-06-21T00:00:00Z\",\n \"description\": \"PAYMENT - THANK YOU\",\n \"category\": \"income\"\n },\n \"250773972579256929\": {\n \"base_currency_amount\": -46.53,\n \"base_currency_code\": \"SGD\",\n \"local_currency_amount\": -46.53,\n \"transacted_at\": \"2020-06-07T00:00:00Z\",\n \"description\": \"FAIRPRICE FINEST-MARIN SINGAPORE SG\",\n \"category\": \"groceries\"\n },\n \"250773972579256930\": {\n \"base_currency_amount\": 63.72,\n \"base_currency_code\": \"SGD\",\n \"local_currency_amount\": 63.72,\n \"transacted_at\": \"2020-07-05T00:00:00Z\",\n \"description\": \"PAYMENT - THANK YOU\",\n \"category\": \"transfers\"\n },\n \"250773972579256931\": {\n \"base_currency_amount\": -33.89,\n \"base_currency_code\": \"SGD\",\n \"local_currency_amount\": -33.89,\n \"transacted_at\": \"2020-07-05T00:00:00Z\",\n \"description\": \"DELIVEROO SINGAPORE SG\",\n \"category\": \"shopping\"\n },\n \"250773972579256932\": {\n \"base_currency_amount\": -55.27,\n \"base_currency_code\": \"SGD\",\n \"local_currency_amount\": -55.27,\n \"transacted_at\": \"2020-07-05T00:00:00Z\",\n \"description\": \"DELIVEROO SINGAPORE SG\",\n \"category\": \"shopping\"\n },\n \"250773972579256933\": {\n \"base_currency_amount\": 33.89,\n \"base_currency_code\": \"SGD\",\n \"local_currency_amount\": 33.89,\n \"transacted_at\": \"2020-07-05T00:00:00Z\",\n \"description\": \"PAYMENT - THANK YOU\",\n \"category\": \"transfers\"\n },\n \"250773972587645542\": {\n \"base_currency_amount\": -13.65,\n \"base_currency_code\": \"SGD\",\n \"local_currency_amount\": -13.65,\n \"transacted_at\": \"2020-07-06T00:00:00Z\",\n \"description\": \"NTUC FP-BEDOK B SINGAPORE SG\",\n \"category\": \"groceries\"\n },\n \"250773972587645543\": {\n \"base_currency_amount\": 2.5,\n \"base_currency_code\": \"SGD\",\n \"local_currency_amount\": 2.5,\n \"transacted_at\": \"2020-07-06T00:00:00Z\",\n \"description\": \"30CASHBACK\",\n \"category\": \"transfers\"\n },\n \"250773972587645544\": {\n \"base_currency_amount\": -10.5,\n \"base_currency_code\": \"SGD\",\n \"local_currency_amount\": -10.5,\n \"transacted_at\": \"2020-07-06T00:00:00Z\",\n \"description\": \"HOMEGROUND COFFEE ROAS SINGAPORE SG\",\n \"category\": \"shopping\"\n }\n }\n }\n #insert or update data to mongodb\n x = col.update_one(\n {\"_id\": \"16cecd11-2f83-4864-bf6b-f270f4be88cb\"},\n {\"$setOnInsert\":usertrans},\n upsert = True\n )\n \n return x.modified_count", "repo_name": "ShawnWon/MyFirstFastAPI", "sub_path": "project/functions/transactions.py", "file_name": "transactions.py", "file_ext": "py", "file_size_in_byte": 7224, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "14", "api": [{"api_name": "pymongo.MongoClient", "line_number": 4, "usage_type": "call"}]} +{"seq_id": "15699515317", "text": "import os\nimport re\n\nfrom sqlalchemy import text\nfrom sqlalchemy.exc import OperationalError\n\nfrom pilotscope.Common.Index import Index\nfrom pilotscope.Common.SSHConnector import SSHConnector\nfrom pilotscope.DBController.BaseDBController import BaseDBController\nfrom pilotscope.Exception.Exception import DBStatementTimeoutException, DatabaseCrashException, DatabaseStartException, \\\n PilotScopeInternalError\nfrom pilotscope.PilotConfig import PostgreSQLConfig\n\n\nclass PostgreSQLController(BaseDBController):\n _instances = set()\n\n def __new__(cls, *args, **kwargs):\n instance = super().__new__(cls)\n cls._instances.add(instance)\n return instance\n\n def __del__(self):\n self._disconnect()\n type(self)._instances.remove(self)\n\n def __init__(self, config: PostgreSQLConfig, echo=True, enable_simulate_index=False):\n super().__init__(config, echo)\n self.config: PostgreSQLConfig = config\n\n self.enable_simulate_index = enable_simulate_index\n self._add_extension()\n if self.enable_simulate_index:\n self.simulate_index_visitor = SimulateIndexVisitor(self)\n for index in super().get_all_indexes():\n sql = f\"SELECT hypopg_hide_index('{index.index_name}'::REGCLASS)\"\n self.execute(sql)\n\n def _add_extension(self):\n extensions = self.get_available_extensions()\n if \"pg_buffercache\" not in extensions:\n self.execute(\"create extension pg_buffercache\")\n if \"pg_hint_plan\" not in extensions:\n self.execute(\"create extension pg_hint_plan\")\n if self.enable_simulate_index and \"hypopg\" not in extensions:\n self.execute(\"create extension hypopg\")\n\n def get_available_extensions(self):\n \"\"\"\n Get all extensions that have installed in the connected database\n :return: the list of extension names\n \"\"\"\n sql = (\"SELECT name, default_version, installed_version FROM\"\n \" pg_available_extensions WHERE installed_version is not NULL ORDER BY name;\")\n res = self.execute(sql, fetch=True)\n extensions = []\n for row in res:\n extensions.append(row[0])\n return extensions\n\n def _create_conn_str(self):\n return \"{}://{}:{}@{}:{}/{}?{}\".format(\"postgresql\", self.config.db_user, self.config.db_user_pwd,\n self.config.db_host,\n self.config.db_port, self.config.db, \"connect_timeout=2\")\n\n def execute(self, sql, fetch=False, fetch_column_name=False):\n \"\"\"\n Execute a SQL query.\n\n :param sql: the SQL query to execute\n :param fetch: it indicates whether to fetch the result of the query\n :param fetch_column_name: it indicates whether to fetch the column names of the result.\n :return: the result of the query if fetch is True, otherwise None\n \"\"\"\n row = None\n try:\n self._connect_if_loss()\n conn = self._get_connection()\n result = conn.execute(text(sql) if isinstance(sql, str) else sql)\n if fetch:\n row = result.all()\n if fetch_column_name:\n row = [tuple(result.keys()), *row]\n except OperationalError as e:\n if \"canceling statement due to statement timeout\" in str(e):\n raise DBStatementTimeoutException(str(e))\n else:\n raise e\n except Exception as e:\n if \"Can not find the corresponding sub-plan query in push anchor\" in str(e):\n raise PilotScopeInternalError(str(e))\n if \"PilotScopePullEnd\" not in str(e):\n raise e\n return row\n\n def set_hint(self, key, value):\n \"\"\"\n Set the value of each hint (i.e., the run-time config) when execute SQL queries.\n The hints can be used to control the behavior of the database system in a session.\n For PostgreSQL, you can find all valid hints in https://www.postgresql.org/docs/13/runtime-config.html.\n\n :param key: the name of the hint\n :param value: the value of the hint\n \"\"\"\n sql = \"SET {} TO {}\".format(key, value)\n self.execute(sql)\n\n def create_index(self, index: Index):\n \"\"\"\n Create an index on columns `index.columns` of table `index.table` with name `index.index_name`.\n\n :param index: a Index object including the information of the index\n \"\"\"\n if self.enable_simulate_index:\n self.simulate_index_visitor.create_index(index)\n else:\n column_names = index.joined_column_names()\n sql = f\"create index {index.index_name} on {index.table} ({column_names});\"\n self.execute(sql, fetch=False)\n\n def drop_index(self, index: Index):\n \"\"\"\n Drop an index by its index name.\n\n :param index: an index that will be dropped\n \"\"\"\n if self.enable_simulate_index:\n self.simulate_index_visitor.drop_index(index)\n else:\n statement = (\n f\"DROP INDEX IF EXISTS {index.index_name};\"\n )\n self.execute(statement, fetch=False)\n\n def drop_all_indexes(self):\n \"\"\"\n Drop all indexes across all tables in the database. This will not delete the system indexes and unique indexes.\n \"\"\"\n if self.enable_simulate_index:\n self.simulate_index_visitor.drop_all_indexes()\n else:\n indexes = self.get_all_indexes()\n for index in indexes:\n self.drop_index(index)\n\n def get_all_indexes_byte(self):\n \"\"\"\n Get the size of all indexes across all tables in the database in bytes.\n This will include the system indexes and unique indexes.\n\n :return: the size of all indexes in bytes\n \"\"\"\n if self.enable_simulate_index:\n result = self.simulate_index_visitor.get_all_indexes_byte()\n else:\n sql = (\"select sum(pg_indexes_size(table_name::text)) from \"\n \"(select table_name from information_schema.tables \"\n \"where table_schema='public') as all_tables;\")\n result = float(self.execute(sql, fetch=True)[0][0])\n return result\n\n def get_table_indexes_byte(self, table_name):\n \"\"\"\n Get the size of all indexes on a table in bytes.\n This will include the system indexes and unique indexes.\n\n :param table_name: a table name that the indexes belong to\n :return: the size of all indexes on the table in bytes\n \"\"\"\n if self.enable_simulate_index:\n result = self.simulate_index_visitor.get_table_indexes_byte(table_name)\n else:\n sql = f\"select pg_indexes_size('{table_name}');\"\n result = float(self.execute(sql, fetch=True)[0][0])\n return result\n\n def get_index_byte(self, index: Index):\n \"\"\"\n Get the size of an index in bytes by its index name.\n\n :param index: the index to get size\n :return: the size of the index in bytes\n \"\"\"\n if self.enable_simulate_index:\n return self.simulate_index_visitor.get_index_byte(index)\n sql = f\"select pg_table_size('{index.get_index_name()}');\"\n result = int(self.execute(sql, fetch=True)[0][0])\n return result\n\n def get_existed_indexes(self, table):\n if self.enable_simulate_index:\n return self.simulate_index_visitor.get_existed_index(table)\n else:\n return super().get_existed_indexes(table)\n\n def get_all_indexes(self):\n \"\"\"\n Get all indexes across all tables in the database.\n\n :return: A collection containing the details of all indexes.\n \"\"\"\n if self.enable_simulate_index:\n return self.simulate_index_visitor.get_all_indexes()\n else:\n return super().get_all_indexes()\n\n def get_index_number(self, table):\n \"\"\"\n Get the number of indexes built on the specified table.\n\n :param table: The name of the table for which to count indexes.\n :return: The number of indexes on the specified table.\n \"\"\"\n if self.enable_simulate_index:\n return self.simulate_index_visitor.get_index_number(table)\n else:\n return super().get_index_number(table)\n\n def explain_physical_plan(self, sql, comment=\"\"):\n \"\"\"\n Get the physical plan from database's optimizer of a SQL query.\n\n :param sql: The SQL query to be explained.\n :param comment: A SQL comment will be added to the beginning of the SQL query.\n :return: The physical plan of the SQL query.\n \"\"\"\n return self._explain(sql, comment, False)\n\n def explain_execution_plan(self, sql, comment=\"\"):\n \"\"\"\n Get the execution plan from database's optimizer of a SQL query.\n\n :param sql: The SQL query to be explained.\n :param comment: A SQL comment will be added to the beginning of the SQL query.\n :return: The execution plan of the SQL query.\n \"\"\"\n return self._explain(sql, comment, True)\n\n def _explain(self, sql, comment, execute: bool):\n return self.execute(text(self.get_explain_sql(sql, execute, comment)), True)[0][0][0]\n\n def get_estimated_cost(self, sql, comment=\"\"):\n \"\"\"\n Get an estimated cost of a SQL query.\n\n :param sql: The SQL query for which to estimate the cost.\n :param comment: A SQL comment will be added to the beginning of the SQL query.\n :return: The estimated total cost of executing the SQL query.\n \"\"\"\n plan = self.explain_physical_plan(sql, comment=comment)\n return plan[\"Plan\"][\"Total Cost\"]\n\n def get_explain_sql(self, sql, execute: bool, comment=\"\"):\n \"\"\"\n Constructs an EXPLAIN SQL statement for a given SQL query.\n\n :param sql: The SQL query to explain.\n :param execute: A boolean flag indicating whether to execute the query plan.\n :param comment: A SQL comment will be added to the beginning of the SQL query.\n :return: The result of executing the `EXPLAIN` SQL statement.\n \"\"\"\n return \"{} explain ({} VERBOSE, SETTINGS, SUMMARY, FORMAT JSON) {}\".format(comment,\n \"ANALYZE,\" if execute else \"\",\n sql)\n\n def get_buffercache(self):\n \"\"\"\n Get the numbers of buffer per table in the shared buffer cache in real time.\n\n :return: a dict, where keys are the names of table and values are the numbers of buffer per table\n \"\"\"\n sql = \"\"\"\n SELECT c.relname, count(*) AS buffers\n FROM pg_buffercache b JOIN pg_class c\n ON b.relfilenode = pg_relation_filenode(c.oid) AND\n b.reldatabase IN (0, (SELECT oid FROM pg_database\n WHERE datname = current_database()))\n JOIN pg_namespace n ON n.oid = c.relnamespace\n GROUP BY c.relname;\n \"\"\"\n res = self.execute(sql, fetch=True)\n return {k: v for k, v in res if not k.startswith(\"pg_\")}\n\n def shutdown(self):\n \"\"\"\n Shutdown the database\n \"\"\"\n\n self._check_enable_deep_control()\n\n for instance in type(self)._instances:\n # if hasattr(instance, \"engine\"):\n instance._disconnect() # to set DBController's self.connection_thread.conn is None\n instance.engine.dispose(close=True)\n # del instance.engine\n self._surun(\"{} stop -P {} -D {} 2>&1 > /dev/null\".format(self.config.pg_ctl, self.config.db_port, self.config.pgdata))\n\n def start(self):\n \"\"\"\n Try to start DBMS. If fails the first time, recover config to self.config.backup_db_config_path and raise DatabaseStartException.\n If fails again after recovering config, raise DatabaseCrashException.\n\n :raises DatabaseStartException\n \"\"\"\n\n self._check_enable_deep_control()\n\n self._surun(\"{} start -P {} -D {} 2>&1 > /dev/null\".format(self.config.pg_ctl, self.config.db_port, self.config.pgdata))\n if not self.is_running():\n raise DatabaseCrashException\n\n for instance in type(self)._instances:\n instance._connect_if_loss()\n\n def is_running(self):\n \"\"\"\n Check whether the database is running.\n\n :return: True if the database is running, False otherwise.\n \"\"\"\n self._check_enable_deep_control()\n\n check_db_running_cmd = \"echo {} | su {} -c '{} status -P {} -D {}'\".format(self.config.db_host_pwd, self.config.db_host_user, \n self.config.pg_ctl, self.config.db_port, self.config.pgdata)\n if self.config._is_local:\n with os.popen(check_db_running_cmd) as res:\n status = res.read()\n else:\n ssh_conn = SSHConnector(self.config.db_host, self.config.db_host_user, self.config.db_host_pwd,\n self.config.db_host_port)\n ssh_conn.connect()\n res_out, res_err = ssh_conn.remote_exec_cmd(check_db_running_cmd)\n ssh_conn.close()\n status = \"{},{}\".format(res_out, res_err)\n\n return \"server is running\" in status\n\n def write_knob_to_file(self, key_2_value_knob: dict):\n \"\"\"\n Write knobs to config file, you should restart database to make it work.\n\n :param key_2_value_knob: a dict with keys as the names of the knobs and values as the values to be set.\n \"\"\"\n\n self._check_enable_deep_control()\n\n with open(self.config.db_config_path, \"a\") as f:\n f.write(\"\\n\")\n for k, v in key_2_value_knob.items():\n f.write(\"{} = {}\\n\".format(k, v))\n\n def recover_config(self):\n \"\"\"\n Recover config file of database to the lasted saved config file by `backup_config()`\n \"\"\"\n\n self._check_enable_deep_control()\n\n with open(self.config.backup_db_config_path, \"r\") as f:\n db_config_file = f.read()\n with open(self.config.db_config_path, \"w\") as f:\n f.write(db_config_file)\n\n def backup_config(self):\n \"\"\"\n Creates a backup of the database configuration file.\n \"\"\"\n\n self._check_enable_deep_control()\n\n with open(self.config.db_config_path, \"r\") as f:\n with open(self.config.backup_db_config_path, \"w\") as w:\n w.write(f.read())\n\n def get_table_columns(self, table_name, enable_all_schema=False):\n \"\"\"\n Retrieves all column names for a given table. If enable_all_schema is true,\n Pilotscope will search it across all schemas in the database.\n Otherwise, Pilotscope will only search it in the public schema.\n\n :param table_name: The name of the table for which to retrieve column names.\n :param enable_all_schema:\n :return: A list of column names for the specified table.\n \"\"\"\n if enable_all_schema:\n sql = \"SELECT column_name FROM information_schema.columns WHERE table_name = '{}';\".format(table_name)\n else:\n sql = \"SELECT column_name FROM information_schema.columns WHERE table_name = '{}' and table_schema='public';\".format(\n table_name)\n return [x[0] for x in self.execute(sql, fetch=True)]\n\n def get_number_of_distinct_value(self, table_name, column_name):\n \"\"\"\n Get the number of distinct value of a column\n\n :param table_name: the name of the table that the column belongs to\n :param column_name: the name of the column\n :return: the number of distinct value, type of which is same as the data of the column\n \"\"\"\n return self.execute(f\"select count(distinct {column_name}) from {table_name};\", True)[0][0]\n\n # switch user and run\n def _surun(self, cmd):\n su_and_cmd = \"echo {} | su {} -c '{}'\".format(self.config.db_host_pwd, self.config.db_host_user, cmd)\n if self.config._is_local:\n return os.system(su_and_cmd)\n else:\n ssh_conn = SSHConnector(self.config.db_host, self.config.db_host_user, self.config.db_host_pwd,\n self.config.db_host_port)\n ssh_conn.connect()\n ssh_conn.remote_exec_cmd(su_and_cmd)\n ssh_conn.close()\n\n\nclass SimulateIndexVisitor:\n\n def __init__(self, db_controller: PostgreSQLController):\n super().__init__()\n self.db_controller = db_controller\n\n def create_index(self, index: Index):\n columns = index.joined_column_names()\n statement = (\n \"select * from hypopg_create_index( \"\n f\"'create index on {index.table} \"\n f\"({columns})')\"\n )\n result = self.db_controller.execute(statement, fetch=True)[0]\n index.hypopg_oid = result[0]\n index.hypopg_name = result[1]\n\n def _get_oid_by_indexname(self, index_name):\n sql = f\"SELECT indexrelid FROM hypopg_list_indexes WHERE index_name like '%{index_name}%'\"\n res = self.db_controller.execute(sql, fetch=True)\n assert len(res) == 1, f\"No oid or more than one oid named like '%{index_name}%'\"\n return res[0][0]\n\n def _get_oid_of_index(self, index: Index):\n if index.hypopg_oid is not None:\n return index.hypopg_oid\n elif index.hypopg_name is not None:\n return self._get_oid_by_indexname(index_name=index.hypopg_name)\n else:\n return self._get_oid_by_indexname(index_name=index.index_name)\n\n def drop_index(self, index: Index):\n oid = self._get_oid_of_index(index)\n statement = f\"select * from hypopg_drop_index({oid})\"\n result = self.db_controller.execute(statement, fetch=True)\n assert result[0][0] is True, f\"Could not drop simulated index with oid = {oid}.\"\n\n def drop_all_indexes(self):\n sql = \"select hypopg_reset()\"\n self.db_controller.execute(sql)\n\n def get_all_indexes_byte(self):\n return self.get_table_indexes_byte(\"1' or '1'='1\")\n\n def get_table_indexes_byte(self, table):\n sql = f\"SELECT sum(hypopg_relation_size(h.indexrelid)) from hypopg() h left join pg_class t on h.indrelid=t.oid where t.relname = '{table}'\"\n res = self.db_controller.execute(sql, fetch=True)[0][0]\n return 0 if res is None else float(res)\n\n def get_index_byte(self, index: Index):\n try:\n oid = self._get_oid_of_index(index)\n statement = f\"select hypopg_relation_size({oid})\"\n result = self.db_controller.execute(statement, fetch=True)[0][0]\n assert result > 0, \"Hypothetical index does not exist.\"\n return float(result)\n except:\n raise RuntimeError\n\n def get_index_number(self, table):\n sql = f\"SELECT COUNT(*) from hypopg() h left join pg_class t on h.indrelid=t.oid where t.relname = '{table}'\"\n return int(self.db_controller.execute(sql, fetch=True)[0][0])\n\n def get_all_indexes(self):\n return self.get_existed_index(\"1' or '1'='1\")\n\n def get_existed_index(self, table):\n sql = f\"SELECT h.indexrelid, h.indexname, hypopg_get_indexdef(h.indexrelid), t.relname from hypopg() h left join pg_class t on h.indrelid=t.oid where t.relname = '{table}'\"\n res = self.db_controller.execute(sql, fetch=True)\n indexes = []\n for indexrelid, indexname, indexdef, relname in res:\n col = [col.strip() for col in re.search(r\"\\([\\S\\s]*\\)\", indexdef).group(0)[1:-1].split(\",\")]\n index = Index(columns=col, table=relname, index_name=None)\n index.hypopg_name = indexname\n index.hypopg_oid = indexrelid\n indexes.append(index)\n return indexes\n", "repo_name": "alibaba/pilotscope", "sub_path": "pilotscope/DBController/PostgreSQLController.py", "file_name": "PostgreSQLController.py", "file_ext": "py", "file_size_in_byte": 20047, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "12", "api": [{"api_name": "pilotscope.DBController.BaseDBController.BaseDBController", "line_number": 15, "usage_type": "name"}, {"api_name": "pilotscope.PilotConfig.PostgreSQLConfig", "line_number": 27, "usage_type": "name"}, {"api_name": "pilotscope.PilotConfig.PostgreSQLConfig", "line_number": 29, "usage_type": "name"}, {"api_name": "sqlalchemy.text", "line_number": 79, "usage_type": "call"}, {"api_name": "sqlalchemy.exc.OperationalError", "line_number": 84, "usage_type": "name"}, {"api_name": "pilotscope.Exception.Exception.DBStatementTimeoutException", "line_number": 86, "usage_type": "call"}, {"api_name": "pilotscope.Exception.Exception.PilotScopeInternalError", "line_number": 91, "usage_type": "call"}, {"api_name": "pilotscope.Common.Index.Index", "line_number": 108, "usage_type": "name"}, {"api_name": "pilotscope.Common.Index.Index", "line_number": 121, "usage_type": "name"}, {"api_name": "pilotscope.Common.Index.Index", "line_number": 177, "usage_type": "name"}, {"api_name": "sqlalchemy.text", "line_number": 240, "usage_type": "call"}, {"api_name": "pilotscope.Exception.Exception.DatabaseCrashException", "line_number": 310, "usage_type": "name"}, {"api_name": "os.popen", "line_number": 326, "usage_type": "call"}, {"api_name": "pilotscope.Common.SSHConnector.SSHConnector", "line_number": 329, "usage_type": "call"}, {"api_name": "os.system", "line_number": 406, "usage_type": "call"}, {"api_name": "pilotscope.Common.SSHConnector.SSHConnector", "line_number": 408, "usage_type": "call"}, {"api_name": "pilotscope.Common.Index.Index", "line_number": 421, "usage_type": "name"}, {"api_name": "pilotscope.Common.Index.Index", "line_number": 438, "usage_type": "name"}, {"api_name": "pilotscope.Common.Index.Index", "line_number": 446, "usage_type": "name"}, {"api_name": "pilotscope.Common.Index.Index", "line_number": 464, "usage_type": "name"}, {"api_name": "re.search", "line_number": 486, "usage_type": "call"}, {"api_name": "pilotscope.Common.Index.Index", "line_number": 487, "usage_type": "call"}]} +{"seq_id": "34341371424", "text": "from django.db import DatabaseError, OperationalError\nfrom django.http import HttpResponseServerError\nimport time\n\nclass DatabaseErrorMiddleware:\n def __init__(self, get_response):\n self.get_response = get_response\n\n def __call__(self, request):\n response = self.get_response(request)\n return response\n\n def process_exception(self, request, exception):\n if isinstance(exception, (DatabaseError, OperationalError)):\n retries = 3\n delay = 0.5\n \n while retries > 0:\n try:\n response = self.get_response(request)\n return response\n except (DatabaseError, OperationalError):\n print(\"Database connection error. Retrying...\")\n retries -= 1\n if retries == 0:\n return HttpResponseServerError(\"Database connection error after multiple retries.\")\n time.sleep(delay)\n", "repo_name": "arkterra90/giftzilla", "sub_path": "giftzillaenv/giftzilla/giftzilla/middleware/data_error_middleware.py", "file_name": "data_error_middleware.py", "file_ext": "py", "file_size_in_byte": 1001, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "12", "api": [{"api_name": "django.db.DatabaseError", "line_number": 14, "usage_type": "name"}, {"api_name": "django.db.OperationalError", "line_number": 14, "usage_type": "name"}, {"api_name": "django.db.DatabaseError", "line_number": 22, "usage_type": "name"}, {"api_name": "django.db.OperationalError", "line_number": 22, "usage_type": "name"}, {"api_name": "django.http.HttpResponseServerError", "line_number": 26, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 27, "usage_type": "call"}]} +{"seq_id": "70302272974", "text": "import torch\nimport os\nimport random\nimport numpy as np\nfrom tqdm import tqdm\nimport matplotlib.pyplot as plt\nfrom torch import nn\nimport torchvision.transforms as T\nimport torch.nn.functional as F\nimport kornia.augmentation as K\nimport kornia\nimport torchvision\nimport argparse\nfrom wrappers.dataset_selector import DatasetSelector\nfrom vit_pytorch import ViT\nfrom vit_pytorch.cross_vit import CrossViT\nfrom contrastive_framework.byol import BYOL\n\nfrom torchvision import models\n\nfrom sklearn.metrics import auc, roc_curve, recall_score, precision_score\nfrom sklearn.covariance import EmpiricalCovariance, LedoitWolf, ShrunkCovariance\nfrom utils.train_utils import AverageMeter\nfrom models.unet import UNet\nimport pdb\nfrom PIL import Image\nimport optuna\nfrom vit_pytorch.nest import NesT\n\ndevice = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n\nimport torch.multiprocessing\ntorch.multiprocessing.set_sharing_strategy('file_system')\n\nclass RandomApply(nn.Module):\n def __init__(self, fn, p):\n super().__init__()\n self.fn = fn\n self.p = p\n def forward(self, x):\n if random.random() > self.p:\n return x\n return self.fn(x)\n\ndef load_image(img_path):\n with open(img_path, 'rb') as f:\n img = Image.open(f)\n return img.convert('RGB')\n\ndef train(args, learner, optimizer, loader, epoch, lr_scheduler=None):\n\n losses = AverageMeter(f\"Epoch {epoch +1}\")\n learner = learner.train()\n \n local_progress= tqdm(loader, desc=f'Epoch {epoch+1}/{args.epochs}')\n for idx, (x, _) in enumerate(local_progress):\n optimizer.zero_grad()\n x = x.to(device)\n\n loss = learner(x)\n\n loss.backward()\n optimizer.step()\n if lr_scheduler:\n lr_scheduler.step()\n losses.update(loss.item(), x.size(0))\n\n data_dict = {\"avg loss\": losses.avg}\n local_progress.set_postfix(data_dict)\n\n return losses.avg\n\ndef get_features(model, dataloader):\n extracted_features, labels = [], []\n with torch.no_grad():\n # extract features\n for x, y in dataloader:\n x = T.Resize(args.image_size)(x)\n x = x.to(device)\n \n _, features = model(x, return_embedding=True)\n\n extracted_features += list(features)\n labels += list(y)\n\n labels = np.array(labels)\n \n \n out_dim = extracted_features[0].size(-1)\n return torch.stack(extracted_features).reshape(-1, out_dim).to(device), labels\n\n\ndef val(args, model, train_dataloader, val_dataloader, epoch):\n\n group_lasso = LedoitWolf(assume_centered=False)\n\n model = model.eval()\n\n train_features, _ = get_features(model, train_dataloader)\n val_features, labels = get_features(model, val_dataloader)\n\n train_features = F.normalize(train_features, dim=-1, p=2)\n val_features = F.normalize(val_features, dim=-1, p=2)\n cov = group_lasso.fit(train_features.cpu().numpy())\n # pdb.set_trace()\n scores = cov.mahalanobis(val_features.cpu().numpy())\n fpr, tpr, threshold = roc_curve(labels, scores)\n auc_score = auc(fpr, tpr)\n\n return auc_score\n\ndef train_model(args, model, train_dataloader, val_dataloader, trial=None):\n model = model.to(device)\n \n print(args)\n\n # if args.optname in [\"SGD\"]:\n # optimizer = getattr(torch.optim, args.optname)(model.parameters(), lr=args.lr, momentum=args.momentum, weight_decay=args.weight_decay)\n # else:\n optimizer = getattr(torch.optim, args.optname)(model.parameters(), lr=args.lr, weight_decay=args.weight_decay, amsgrad=args.amsgrad)\n # lr_scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(\n # optimizer, args.epochs * len(train_dataloader), 1e-4\n # )\n # lr_scheduler = torch.optim.lr_scheduler.MultiStepLR(\n # optimizer, milestones=[5], gamma=0.1\n # )\n\n best_auc = 0\n for epoch in range(args.epochs):\n avg_loss = train(args, model, optimizer, train_dataloader, epoch)\n auc_score = val(args, model, train_dataloader, val_dataloader, epoch)\n\n if trial:\n trial.report(auc_score, epoch+1)\n\n if trial.should_prune():\n raise optuna.exceptions.TrialPruned()\n\n print(f'auc: {auc_score:.6f}')\n if auc_score > best_auc:\n best_auc = auc_score\n print(f'Saving Model AUC: {best_auc:.6f}')\n model_path = os.path.join(args.model_path)\n torch.save(model.state_dict(), model_path)\n\n return best_auc \n\ndef run(args, trial=None):\n from utils.kornia_utils import GaussianBlur\n kornia_transforms = nn.Sequential(\n K.ColorJitter(0.8, 0.8, 0.8, 0.2, p = 0.3),\n K.RandomGrayscale(p=0.2),\n K.RandomHorizontalFlip(p=.5),\n GaussianBlur((3, 3), (1.0, 2.0), p=0.2),\n K.RandomResizedCrop((args.image_size, args.image_size), p=.5),\n K.Normalize(mean=torch.tensor([0.485, 0.456, 0.406]), std=torch.tensor([0.229, 0.224, 0.225])) # )\n )\n\n # transform = torch.nn.Sequential(\n # T.RandomHorizontalFlip(),\n # RandomApply(\n # T.GaussianBlur((3, 3), (1.0, 2.0)),\n # p = 0.2\n # ),\n # RandomApply(\n # T.RandomResizedCrop((args.image_size // 2, args.image_size // 2)),\n # p = 0.5\n # ),\n # )\n\n in_channels = 3 if args.dataset == 'cifar-10' or args.dataset == 'mvtech-ad' else 1\n\n # model = ViT(\n # image_size = args.image_size,\n # patch_size = 16,\n # num_classes = 10,\n # dim = 512, # 512\n # depth = 6,\n # heads = 16,\n # mlp_dim = 1024, # 1024\n # dropout = 0.5,\n # emb_dropout = 0.1,\n # channels = in_channels\n # )\n\n model = NesT(\n image_size = args.image_size,\n patch_size = 4,\n dim = 96,\n heads = 3,\n num_hierarchies = 3, # number of hierarchies\n block_repeats = (8, 4, 1), # the number of transformer blocks at each heirarchy, starting from the bottom\n num_classes = 512\n )\n\n\n # model = models.resnet50(pretrained=False)\n # model.conv1 = nn.Conv2d(in_channels, 64, kernel_size=7, stride=2, padding=3, bias=False)\n\n learner = BYOL(\n model,\n augment_fn=kornia_transforms,\n image_size = args.image_size,\n hidden_layer='mlp_head', #hidden_layer= 'to_latent',\n use_momentum = False # turn off momentum in the target encoder\n )\n\n train_dataloader, val_dataloader, _ = DatasetSelector.select_dataset(args)\n\n best_auc = train_model(args, learner, train_dataloader, val_dataloader, trial)\n return best_auc\n\ndef objective(args):\n\n def final(trial):\n lr = trial.suggest_float(\"lr\", 1e-5, 1e-1, log=True)\n weight_decay = trial.suggest_float(\"weight_decay\", 0, 0.9)\n momentum = trial.suggest_float(\"momentum\", 0, 0.9)\n amsgrad = trial.suggest_categorical(\"amsgrad\", [True, False])\n args.lr = lr\n # args.optname = optname\n args.amsgrad = amsgrad\n args.weight_decay = weight_decay\n args.momentum = momentum\n return run(args, trial)\n\n return final\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(description='RIAD anomaly detection')\n parser.add_argument('--pdata', type=float, default=1.0, help='learning rate of Adam')\n parser.add_argument('--obj', type=str, default='screw')\n parser.add_argument('--model_path', default='saved_models/contrastive/best_model_resnet_mvtech', type=str)\n parser.add_argument('--eval', default=False, type=bool)\n parser.add_argument('--dataset', type=str, default='mvtech-ad') #kaggle_pneumonia\n parser.add_argument('--epochs', type=int, default=20, help='maximum training epochs')\n parser.add_argument('--batch_size', type=int, default=12) # 12\n parser.add_argument('--test_batch_size', type=int, default=1)\n parser.add_argument('--val_batch_size', type=int, default=1)\n parser.add_argument('--image_size', type=int, default=256) # 256\n parser.add_argument('--alpha', type=float, default=1.0)\n parser.add_argument('--belta', type=float, default=1.0)\n parser.add_argument('--gamma', type=float, default=1.0)\n parser.add_argument('--lr', type=float, default=0.0006949058882671142, help='learning rate of Adam') #0.0006949058882671142\n parser.add_argument('--num_workers', type=int, default=2)\n parser.add_argument('--in_cls', default=0, type=int)\n parser.add_argument('--seed', default=123, type=int)\n parser.add_argument('--optname', default='Adam', type=str)\n parser.add_argument('--weight-decay', default=0, type=float)\n parser.add_argument('--momentum', default=0, type=float)\n parser.add_argument('--amsgrad', default=False, type=bool)\n\n args = parser.parse_args()\n\n seed = args.seed\n os.environ[\"PYTHONHASHSEED\"] = str(seed)\n random.seed(seed)\n np.random.seed(seed)\n\n torch.manual_seed(seed)\n torch.cuda.manual_seed_all(seed)\n torch.backends.cudnn.deterministic = True\n torch.backends.cudnn.benchmark = False\n \n # study = optuna.create_study(direction=\"maximize\", storage=\"sqlite:///mvtech_experiments.db\", study_name=\"mvtech_cable_vit_adam\", load_if_exists=True)\n # study.optimize(objective(args), n_trials=100)\n\n # # pruned_trials = study.get_trials(deepcopy=False, states=[TrialState.PRUNED])\n # complete_trials = study.get_trials(deepcopy=False, states=[TrialState.COMPLETE])\n\n # print(\"Study statistics: \")\n # print(\" Number of finished trials: \", len(study.trials))\n # # print(\" Number of pruned trials: \", len(pruned_trials))\n # print(\" Number of complete trials: \", len(complete_trials))\n\n # print(\"Best trial:\")\n # trial = study.best_trial\n\n # print(\" Value: \", trial.value)\n\n # print(\" Params: \")\n # for key, value in trial.params.items():\n # print(\" {}: {}\".format(key, value))\n auc = run(args)\n\n with open(f'contrastive_results_{args.seed}.txt', 'a') as fl:\n print(f'obj={args.obj} auc: {auc:.3f}', file=fl)\n", "repo_name": "esdrascosta/anomaly-detection", "sub_path": "contrastive_train.py", "file_name": "contrastive_train.py", "file_ext": "py", "file_size_in_byte": 10045, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "14", "api": [{"api_name": "torch.device", "line_number": 30, "usage_type": "call"}, {"api_name": "torch.cuda.is_available", "line_number": 30, "usage_type": "call"}, {"api_name": "torch.cuda", "line_number": 30, "usage_type": "attribute"}, {"api_name": "torch.multiprocessing.set_sharing_strategy", "line_number": 33, "usage_type": "call"}, {"api_name": "torch.multiprocessing", "line_number": 33, "usage_type": "attribute"}, {"api_name": "torch.nn.Module", "line_number": 35, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 35, "usage_type": "name"}, {"api_name": "random.random", "line_number": 41, "usage_type": "call"}, {"api_name": "PIL.Image.open", "line_number": 47, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 47, "usage_type": "name"}, {"api_name": "utils.train_utils.AverageMeter", "line_number": 52, "usage_type": "call"}, {"api_name": "tqdm.tqdm", "line_number": 55, "usage_type": "call"}, {"api_name": "torch.no_grad", "line_number": 75, "usage_type": "call"}, {"api_name": "torchvision.transforms.Resize", "line_number": 78, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 78, "usage_type": "name"}, {"api_name": "numpy.array", "line_number": 86, "usage_type": "call"}, {"api_name": "torch.stack", "line_number": 90, "usage_type": "call"}, {"api_name": "sklearn.covariance.LedoitWolf", "line_number": 95, "usage_type": "call"}, {"api_name": "torch.nn.functional.normalize", "line_number": 102, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 102, "usage_type": "name"}, {"api_name": "torch.nn.functional.normalize", "line_number": 103, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 103, "usage_type": "name"}, {"api_name": "sklearn.metrics.roc_curve", "line_number": 107, "usage_type": "call"}, {"api_name": "sklearn.metrics.auc", "line_number": 108, "usage_type": "call"}, {"api_name": "torch.optim", "line_number": 120, "usage_type": "attribute"}, {"api_name": "optuna.exceptions.TrialPruned", "line_number": 137, "usage_type": "call"}, {"api_name": "optuna.exceptions", "line_number": 137, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 143, "usage_type": "call"}, {"api_name": "os.path", "line_number": 143, "usage_type": "attribute"}, {"api_name": "torch.save", "line_number": 144, "usage_type": "call"}, {"api_name": "torch.nn.Sequential", "line_number": 150, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 150, "usage_type": "name"}, {"api_name": "kornia.augmentation.ColorJitter", "line_number": 151, "usage_type": "call"}, {"api_name": "kornia.augmentation", "line_number": 151, "usage_type": "name"}, {"api_name": "kornia.augmentation.RandomGrayscale", "line_number": 152, "usage_type": "call"}, {"api_name": "kornia.augmentation", "line_number": 152, "usage_type": "name"}, {"api_name": "kornia.augmentation.RandomHorizontalFlip", "line_number": 153, "usage_type": "call"}, {"api_name": "kornia.augmentation", "line_number": 153, "usage_type": "name"}, {"api_name": "utils.kornia_utils.GaussianBlur", "line_number": 154, "usage_type": "call"}, {"api_name": "kornia.augmentation.RandomResizedCrop", "line_number": 155, "usage_type": "call"}, {"api_name": "kornia.augmentation", "line_number": 155, "usage_type": "name"}, {"api_name": "kornia.augmentation.Normalize", "line_number": 156, "usage_type": "call"}, {"api_name": "kornia.augmentation", "line_number": 156, "usage_type": "name"}, {"api_name": "torch.tensor", "line_number": 156, "usage_type": "call"}, {"api_name": "vit_pytorch.nest.NesT", "line_number": 186, "usage_type": "call"}, {"api_name": "contrastive_framework.byol.BYOL", "line_number": 200, "usage_type": "call"}, {"api_name": "wrappers.dataset_selector.DatasetSelector.select_dataset", "line_number": 208, "usage_type": "call"}, {"api_name": "wrappers.dataset_selector.DatasetSelector", "line_number": 208, "usage_type": "name"}, {"api_name": "argparse.ArgumentParser", "line_number": 229, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 255, "usage_type": "attribute"}, {"api_name": "random.seed", "line_number": 256, "usage_type": "call"}, {"api_name": "numpy.random.seed", "line_number": 257, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 257, "usage_type": "attribute"}, {"api_name": "torch.manual_seed", "line_number": 259, "usage_type": "call"}, {"api_name": "torch.cuda.manual_seed_all", "line_number": 260, "usage_type": "call"}, {"api_name": "torch.cuda", "line_number": 260, "usage_type": "attribute"}, {"api_name": "torch.backends", "line_number": 261, "usage_type": "attribute"}, {"api_name": "torch.backends", "line_number": 262, "usage_type": "attribute"}, {"api_name": "sklearn.metrics.auc", "line_number": 283, "usage_type": "name"}, {"api_name": "sklearn.metrics.auc", "line_number": 286, "usage_type": "name"}]} +{"seq_id": "5411074921", "text": "from django.shortcuts import render, get_object_or_404, redirect\nfrom .models import Post\nfrom django.utils import timezone\nfrom .forms import PostForm\nfrom rest_framework.response import Response\nfrom rest_framework import generics\nfrom .serializers import PostSerializer\nfrom django.conf import settings\nfrom django.contrib.auth.models import User\nfrom rest_framework import status\nfrom django.views.decorators.csrf import csrf_protect\nfrom django.views.generic.edit import CreateView\nfrom django.urls import reverse_lazy\nfrom django.contrib.auth.forms import UserCreationForm\nfrom django.contrib.auth import login\n\ndef home(request):\n if settings.DEBUG:\n template_name = \"index-dev.html\"\n else:\n template_name = \"index.html\"\n return render(request, template_name)\n\ndef register(request): \n if request.POST == 'POST': \n form = UserCreationForm(request.POST) \n if form.is_valid(): \n user = form.save()\n login(request, user)\n return redirect('home') \n else:\n console.log(\"not valid\")\n else: \n form = UserCreationForm() \n return render(request, 'registration/signup.html', {'form':form} )\n\nclass PostView(generics.RetrieveAPIView):\n queryset = Post.objects.all()\n serializer_class = PostSerializer\n\n #get post by Id or get all posts if there is no Id\n def get(self, request, *args, **kwargs):\n try:\n id = request.query_params[\"id\"]\n if id != None:\n post = Post.objects.get(id=id)\n serializer = PostSerializer(post)\n except:\n queryset = self.get_queryset()\n serializer = PostSerializer(queryset, many=True)\n \n return Response(serializer.data)\n\n #add new post\n @csrf_protect\n def post(self, request, *args, **kwargs):\n new_post_data = request.data\n\n user = User.objects.get(id=new_post_data[\"author\"])\n\n new_post = Post.objects.create(\n author= user,\n title=new_post_data[\"title\"], text=new_post_data[\"text\"],\n created_date=timezone.now(), published_date=timezone.now())\n\n queryset = self.get_queryset()\n serializer = PostSerializer(queryset, many=True)\n \n return Response(serializer.data)\n\n #change post by id\n @csrf_protect\n def put(self, request, *args, **kwargs):\n id = request.query_params[\"id\"]\n \n if id != None:\n post_object = Post.objects.get(id=id) \n data = request.data\n user = User.objects.get(id=data[\"author\"])\n\n post_object.author = user\n post_object.text = data[\"text\"]\n post_object.created_date = data['created_date']\n post_object.published_date = timezone.now()\n post_object.title = data[\"title\"]\n\n post_object.save()\n\n serializer = PostSerializer(post_object)\n \n return Response(serializer.data)\n\n #delete post by id\n @csrf_protect\n def delete(self, request, *args, **kwargs):\n id = request.query_params[\"id\"]\n \n if id != None:\n post_to_delete=Post.objects.get(id=id)\n post_to_delete.delete()\n return Response(status=status.HTTP_204_NO_CONTENT)\n\n\n# from django.contrib.auth import login as auth_login\n# from django.contrib.auth.forms import UserCreationForm\n# from django.shortcuts import render, redirect\n\n# def signup(request):\n# if request.method == 'POST':\n# form = UserCreationForm(request.POST)\n# if form.is_valid():\n# user = form.save()\n# auth_login(request, user)\n# return redirect('home')\n# else:\n# form = UserCreationForm()\n# return render(request, 'signup.html', {'form': form})", "repo_name": "Sandreykina/testProjectOnDjango2", "sub_path": "blog/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 3827, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "12", "api": [{"api_name": "django.conf.settings.DEBUG", "line_number": 18, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 18, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 22, "usage_type": "call"}, {"api_name": "django.contrib.auth.forms.UserCreationForm", "line_number": 26, "usage_type": "call"}, {"api_name": "django.contrib.auth.login", "line_number": 29, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 30, "usage_type": "call"}, {"api_name": "django.contrib.auth.forms.UserCreationForm", "line_number": 34, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 35, "usage_type": "call"}, {"api_name": "rest_framework.generics.RetrieveAPIView", "line_number": 37, "usage_type": "attribute"}, {"api_name": "rest_framework.generics", "line_number": 37, "usage_type": "name"}, {"api_name": "models.Post.objects.all", "line_number": 38, "usage_type": "call"}, {"api_name": "models.Post.objects", "line_number": 38, "usage_type": "attribute"}, {"api_name": "models.Post", "line_number": 38, "usage_type": "name"}, {"api_name": "serializers.PostSerializer", "line_number": 39, "usage_type": "name"}, {"api_name": "models.Post.objects.get", "line_number": 46, "usage_type": "call"}, {"api_name": "models.Post.objects", "line_number": 46, "usage_type": "attribute"}, {"api_name": "models.Post", "line_number": 46, "usage_type": "name"}, {"api_name": "serializers.PostSerializer", "line_number": 47, "usage_type": "call"}, {"api_name": "serializers.PostSerializer", "line_number": 50, "usage_type": "call"}, {"api_name": "rest_framework.response.Response", "line_number": 52, "usage_type": "call"}, {"api_name": "django.contrib.auth.models.User.objects.get", "line_number": 59, "usage_type": "call"}, {"api_name": "django.contrib.auth.models.User.objects", "line_number": 59, "usage_type": "attribute"}, {"api_name": "django.contrib.auth.models.User", "line_number": 59, "usage_type": "name"}, {"api_name": "models.Post.objects.create", "line_number": 61, "usage_type": "call"}, {"api_name": "models.Post.objects", "line_number": 61, "usage_type": "attribute"}, {"api_name": "models.Post", "line_number": 61, "usage_type": "name"}, {"api_name": "django.utils.timezone.now", "line_number": 64, "usage_type": "call"}, {"api_name": "django.utils.timezone", "line_number": 64, "usage_type": "name"}, {"api_name": "serializers.PostSerializer", "line_number": 67, "usage_type": "call"}, {"api_name": "rest_framework.response.Response", "line_number": 69, "usage_type": "call"}, {"api_name": "django.views.decorators.csrf.csrf_protect", "line_number": 55, "usage_type": "name"}, {"api_name": "models.Post.objects.get", "line_number": 77, "usage_type": "call"}, {"api_name": "models.Post.objects", "line_number": 77, "usage_type": "attribute"}, {"api_name": "models.Post", "line_number": 77, "usage_type": "name"}, {"api_name": "django.contrib.auth.models.User.objects.get", "line_number": 79, "usage_type": "call"}, {"api_name": "django.contrib.auth.models.User.objects", "line_number": 79, "usage_type": "attribute"}, {"api_name": "django.contrib.auth.models.User", "line_number": 79, "usage_type": "name"}, {"api_name": "django.utils.timezone.now", "line_number": 84, "usage_type": "call"}, {"api_name": "django.utils.timezone", "line_number": 84, "usage_type": "name"}, {"api_name": "serializers.PostSerializer", "line_number": 89, "usage_type": "call"}, {"api_name": "rest_framework.response.Response", "line_number": 91, "usage_type": "call"}, {"api_name": "django.views.decorators.csrf.csrf_protect", "line_number": 72, "usage_type": "name"}, {"api_name": "models.Post.objects.get", "line_number": 99, "usage_type": "call"}, {"api_name": "models.Post.objects", "line_number": 99, "usage_type": "attribute"}, {"api_name": "models.Post", "line_number": 99, "usage_type": "name"}, {"api_name": "rest_framework.response.Response", "line_number": 101, "usage_type": "call"}, {"api_name": "rest_framework.status.HTTP_204_NO_CONTENT", "line_number": 101, "usage_type": "attribute"}, {"api_name": "rest_framework.status", "line_number": 101, "usage_type": "name"}, {"api_name": "django.views.decorators.csrf.csrf_protect", "line_number": 94, "usage_type": "name"}]} +{"seq_id": "43036747833", "text": "import pandas as pd\n\nfrom pyspark.sql import functions as F, types as T, SparkSession\n\n# initialize spark session\nspark = SparkSession.builder.getOrCreate()\n\n# read data\nbase_path = \"/home/brett/git/earnings_call_predictor/docs\"\ndf = pd.read_csv(f\"{base_path}/sample_price_data.csv\")\nsdf = spark.read.csv(f\"{base_path}/sample_price_data.csv\",\n header=True)\n\n# spark is a lazy executor, so it doesn't actually run the process until you cache/some other \"trigger\" operation\nsdf.cache().count()\n\n# add column\ndf.loc[:, \"price_vol\"] = df.loc[:, \"close_price\"] + df.loc[:, \"volume\"]\nsdf = sdf.withColumn(\"price_vol\",\n F.col(\"close_price\") + F.col(\"volume\"))\nsdf.cache().count()\n\n# rename column\ndf.rename({\"price_vol\": \"pv\"},\n axis=1,\n inplace=True)\n\nsdf = sdf.withColumnRenamed(\"price_vol\",\n \"pv\")\nsdf.cache().count()\n", "repo_name": "brian-nebeker/earnings_call_predictor", "sub_path": "docs/pyspark_examples.py", "file_name": "pyspark_examples.py", "file_ext": "py", "file_size_in_byte": 899, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "12", "api": [{"api_name": "pyspark.sql.SparkSession.builder.getOrCreate", "line_number": 6, "usage_type": "call"}, {"api_name": "pyspark.sql.SparkSession.builder", "line_number": 6, "usage_type": "attribute"}, {"api_name": "pyspark.sql.SparkSession", "line_number": 6, "usage_type": "name"}, {"api_name": "pandas.read_csv", "line_number": 10, "usage_type": "call"}, {"api_name": "pyspark.sql.functions.col", "line_number": 20, "usage_type": "call"}, {"api_name": "pyspark.sql.functions", "line_number": 20, "usage_type": "name"}]} +{"seq_id": "71907552982", "text": "import os\nimport crud, models, schemas\n\nfrom database import SessionLocal\nfrom fastapi import FastAPI, Depends, HTTPException, Request, Form\nfrom fastapi.middleware.cors import CORSMiddleware\nfrom fastapi.responses import HTMLResponse\nfrom sqlalchemy.orm import Session\nfrom typing import List\n\n\napp = FastAPI(root_path=os.environ['ROOT_PATH'])\n\norigins = ['*']\n\napp.add_middleware(\n CORSMiddleware,\n allow_origins=origins,\n allow_credentials=True,\n allow_methods=['*'],\n allow_headers=['*']\n)\n\ndef get_db():\n db = SessionLocal()\n try:\n yield db\n finally:\n db.close()\n\n\n# MAIN\n\n@app.get(\"/\")\ndef root():\n return {\"message\": \"Welcome to Smart Inventory\"}\n\n# USERS\n\n@app.get(\"/users/\", response_model=List[schemas.User])\ndef read_all_users(db: Session = Depends(get_db)):\n return crud.get_all_users(db)\n\n@app.get(\"/user/{uid}/\", response_model=schemas.User)\ndef read_user_by_uid(uid: str, db: Session = Depends(get_db)):\n db_user = crud.get_user_by_uid(db, uid)\n if db_user is None:\n raise HTTPException(status_code=404, detail=\"User not found\")\n return db_user\n\n@app.post(\"/user/\", response_model=schemas.User)\ndef create_user(user: schemas.UserCreate, db: Session = Depends(get_db)):\n db_user = crud.get_user_by_uid(db, user.uid)\n if db_user:\n raise HTTPException(status_code=400, detail=\"User already exists\")\n return crud.create_user(db=db, user=user)\n\n@app.delete(\"/user/{uid}/\")\ndef delete_user_by_uid(uid: str, db: Session = Depends(get_db)):\n db_user = crud.get_user_by_uid(db, uid)\n if db_user is None:\n raise HTTPException(status_code=404, detail=\"User not found\")\n db.delete(db_user)\n db.commit()\n return {'Deleted user with uid': uid}\n\n# CABINETS\n\n@app.get(\"/cabinets/\", response_model=List[schemas.Cabinet])\ndef read_all_cabinets(db: Session = Depends(get_db)):\n return crud.get_all_cabinets(db)\n\n@app.get(\"/cabinet/{id}/\", response_model=schemas.Cabinet)\ndef read_cabinet_by_id(id: str, db: Session = Depends(get_db)):\n db_cabinet = crud.get_cabinet_by_id(db, id)\n if db_cabinet is None:\n raise HTTPException(status_code=404, detail=\"Cabinet not found\")\n return db_cabinet\n\n@app.post(\"/cabinet/\", response_model=schemas.Cabinet)\ndef create_cabinet(cabinet: schemas.CabinetCreate, db: Session = Depends(get_db)):\n db_cabinet = crud.get_cabinet_by_id(db, cabinet.id)\n if db_cabinet:\n raise HTTPException(status_code=400, detail=\"Cabinet already exists\")\n return crud.create_cabinet(db, cabinet)\n\n@app.delete(\"/cabinet/{id}/\")\ndef delete_cabinet_by_id(id: str, db: Session = Depends(get_db)):\n db_cabinet = crud.get_cabinet_by_id(db, id)\n if db_cabinet is None:\n raise HTTPException(status_code=404, detail=\"Cabinet not found\")\n db.delete(db_cabinet)\n db.commit()\n return {'Deleted cabinet with id': id}\n\n# CATEGORIES\n\n@app.get(\"/categories/\", response_model=List[schemas.Category]) # reads all categories\ndef read_all_categories(db: Session = Depends(get_db)):\n return crud.get_all_categories(db)\n\n@app.get(\"/categories/root/\", response_model=List[schemas.Category]) # reads all root categories\ndef read_root_categories(db: Session = Depends(get_db)):\n return crud.get_root_categories(db)\n\n@app.get(\"/category/{id}/\", response_model=schemas.Category)\ndef read_category_by_id(id: str, db: Session = Depends(get_db)):\n db_category = crud.get_category_by_id(db, id)\n if db_category is None:\n raise HTTPException(status_code=404, detail=\"Category not found\")\n return db_category\n\n@app.get(\"/categories/subcategories/{parent_id}/\", response_model=List[schemas.Category]) # reads all sub-categories of a category\ndef read_sub_categories(parent_id: int, db: Session = Depends(get_db)):\n parent_category = crud.get_category_by_id(db, parent_id)\n if not parent_category:\n raise HTTPException(status_code=404, detail=\"Parent category not found\")\n return crud.get_sub_categories(db, parent_id)\n\n@app.post(\"/category/\", response_model=schemas.Category)\ndef create_category(category: schemas.CategoryCreate, db: Session = Depends(get_db)):\n db_category = crud.get_category_by_title(db, category.title)\n if db_category:\n raise HTTPException(status_code=400, detail=\"Category already exists\")\n if category.parent_id is not None:\n db_parent_category = crud.get_category_by_id(db, category.parent_id)\n if db_parent_category is None:\n raise HTTPException(status_code=404, detail=\"Parent category not found\")\n return crud.create_category(db, category)\n\n@app.delete(\"/category/{id}/\")\ndef delete_category_by_id(id: int, db: Session = Depends(get_db)):\n db_category = crud.get_category_by_id(db, id)\n if db_category is None:\n raise HTTPException(status_code=404, detail=\"Category not found\")\n db.delete(db_category)\n db.commit()\n return {'Deleted category with id': id}\n\n# ITEMS\n\n@app.get(\"/items/\", response_model=List[schemas.Item])\ndef read_all_items(db: Session = Depends(get_db)):\n return crud.get_all_items(db)\n\n@app.get(\"/item/{id}/\", response_model=schemas.Item)\ndef read_item_by_id(id: int, db: Session = Depends(get_db)):\n db_item = crud.get_item_by_id(db, id)\n if db_item is None:\n raise HTTPException(status_code=404, detail=\"Item not found\")\n return db_item\n\n@app.get(\"/categories/{category_id}/items/\", response_model=List[schemas.Item]) # reads all items under a category\ndef read_all_items(category_id: int, db: Session = Depends(get_db)):\n category = crud.get_category_by_id(db, category_id)\n if not category:\n raise HTTPException(status_code=404, detail=\"Category not found\")\n return crud.get_items_by_category_id(db, category_id)\n\n@app.post(\"/item/\", response_model=schemas.Item)\ndef create_item(item: schemas.ItemCreate, db: Session = Depends(get_db)):\n if item.category_id is not None:\n db_category = crud.get_category_by_id(db, item.category_id)\n if not db_category:\n raise HTTPException(status_code=404, detail=\"Category not found\")\n db_item = crud.get_item_by_title(db, item.title)\n if db_item:\n raise HTTPException(status_code=400, detail=\"Item already exists\")\n return crud.create_item(db, item)\n\n@app.delete(\"/item/{id}/\")\ndef delete_item_by_id(id: int, db: Session = Depends(get_db)):\n db_item = crud.get_item_by_id(db, id)\n if db_item is None:\n raise HTTPException(status_code=404, detail=\"Item not found\")\n db.delete(db_item)\n db.commit()\n return {'Deleted item with id': id}\n\n# ORDER REQUESTS\n\n@app.get(\"/order-requests/\", response_model=List[schemas.OrderRequest])\ndef read_all_order_requests(db: Session = Depends(get_db)):\n return crud.get_all_order_requests(db)\n\n@app.get(\"/order-requests/item/{id}/\", response_model=List[schemas.OrderRequest])\ndef read_order_requests_by_item_id(id: int, db: Session = Depends(get_db)):\n db_item = crud.get_item_by_id(db, id)\n if db_item is None:\n raise HTTPException(status_code=404, detail=\"Item not found\")\n return crud.get_order_requests_by_item_id(db, id)\n\n@app.get(\"/order-requests/user/{uid}/\", response_model=List[schemas.OrderRequest])\ndef read_order_requests_by_user_id(uid: str, db: Session = Depends(get_db)):\n db_user = crud.get_user_by_uid(db, uid)\n if db_user is None:\n raise HTTPException(status_code=404, detail=\"User not found\")\n return crud.get_order_requests_by_user_id(db, uid)\n\n@app.get(\"/order-requests/state/{state}/\", response_model=List[schemas.OrderRequest])\ndef read_order_requests_by_state(state: int, db: Session = Depends(get_db)):\n return crud.get_order_requests_by_state(db, state)\n\n@app.post(\"/order-request/\", response_model=schemas.OrderRequest)\ndef create_order_request(order_request: schemas.OrderRequestCreate, db: Session = Depends(get_db)):\n db_item = crud.get_item_by_id(db, order_request.item_id)\n db_user = crud.get_user_by_uid(db, order_request.user_id)\n if db_item is None or db_user is None:\n raise HTTPException(status_code=404, detail=\"Item or user not found\")\n db_order_request = crud.get_order_requests_by_item_and_user_id(db, order_request.item_id, order_request.user_id)\n if db_order_request:\n raise HTTPException(status_code=400, detail=\"Order already requested by this user\")\n return crud.create_order_request(db, order_request)\n\n@app.delete(\"/order-request/{id}/\")\ndef delete_order_request_by_id(id: int, db: Session = Depends(get_db)):\n db_order_request = crud.get_order_request_by_id(db, id)\n if db_order_request is None:\n raise HTTPException(status_code=404, detail=\"Order request not found\")\n db.delete(db_order_request)\n db.commit()\n return {'Deleted order request with id': id}\n\n# STORAGE UNITS\n\n@app.get(\"/storage-units/\", response_model=List[schemas.StorageUnit])\ndef read_all_storage_units(db: Session = Depends(get_db)):\n return crud.get_all_storage_units(db)\n\n@app.get(\"/storage-unit/{id}/\", response_model=schemas.StorageUnit)\ndef read_storage_unit_by_id(id: int, db: Session = Depends(get_db)):\n db_storage_unit = crud.get_storage_unit_by_id(db, id)\n if db_storage_unit is None:\n raise HTTPException(status_code=404, detail=\"Storage unit not found\")\n return db_storage_unit\n\n@app.get(\"/storage-units/cabinet/{cabinet_id}/\", response_model=List[schemas.StorageUnit])\ndef read_storage_units_by_cabinet_id(cabinet_id: str, db: Session = Depends(get_db)):\n db_cabinet = crud.get_cabinet_by_id(db, cabinet_id)\n if db_cabinet is None:\n raise HTTPException(status_code=404, detail=\"Cabinet not found\")\n return crud.get_storage_units_by_cabinet_id(db, cabinet_id) \n\n@app.post(\"/storage-unit/\", response_model=schemas.StorageUnit)\ndef create_storage_unit(storage_unit: schemas.StorageUnitCreate, db: Session = Depends(get_db)):\n db_item = crud.get_item_by_id(db, storage_unit.item_id)\n if db_item is None:\n raise HTTPException(status_code=404, detail=\"Item not found\")\n if storage_unit.cabinet_id is not None:\n db_cabinet = crud.get_cabinet_by_id(db, storage_unit.cabinet_id)\n if db_cabinet is None:\n raise HTTPException(status_code=404, detail=\"Cabinet not found\")\n db_storage_unit = crud.get_storage_unit_by_id(db, storage_unit.id)\n if db_storage_unit:\n raise HTTPException(status_code=400, detail=\"Storage unit ID already assigned\")\n return crud.create_storage_unit(db, storage_unit)\n\n@app.delete(\"/storage-unit/{id}/\")\ndef delete_storage_unit_by_id(id: int, db: Session = Depends(get_db)):\n db_storage_unit = crud.get_storage_unit_by_id(db, id)\n if db_storage_unit is None:\n raise HTTPException(status_code=404, detail=\"Storage unit not found\")\n db.delete(db_storage_unit)\n db.commit()\n return {'Deleted storage unit with id': id}\n\n# CABINETS UNLOCK ATTEMPTS\n\n@app.get(\"/unlock-attempts/\", response_model=List[schemas.CabinetUnlockAttempt])\ndef read_all_unlock_attempts(db: Session = Depends(get_db)):\n return crud.get_all_unlock_attempts(db)\n\n@app.get(\"/unlock-attempts/cabinet/{cabinet_id}/\", response_model=List[schemas.CabinetUnlockAttempt])\ndef read_unlock_attempts_by_cabinet_id(cabinet_id: str, db: Session = Depends(get_db)):\n db_cabinet = crud.get_cabinet_by_id(db, cabinet_id)\n if db_cabinet is None:\n raise HTTPException(status_code=404, detail=\"Cabinet not found\")\n return crud.get_unlock_attempts_by_cabinet_id(db, cabinet_id)\n\n@app.get(\"/unlock-attempts/user/{uid}/\", response_model=List[schemas.CabinetUnlockAttempt])\ndef read_unlock_attempts_by_user_id(uid: str, db: Session = Depends(get_db)):\n db_user = crud.get_user_by_uid(db, uid)\n if db_user is None:\n raise HTTPException(status_code=404, detail=\"User not found\")\n return crud.get_unlock_attempts_by_user_id(db, uid)\n\n@app.get(\"/unlock-attempts/cabinet/{cabinet_id}/user/{uid}/\", response_model=List[schemas.CabinetUnlockAttempt])\ndef read_unlock_attempts_by_cabinet_and_user_id(cabinet_id, uid: str, db: Session = Depends(get_db)):\n db_user = crud.get_user_by_uid(db, uid)\n db_cabinet = crud.get_cabinet_by_id(db, cabinet_id)\n if db_user is None or db_cabinet is None:\n raise HTTPException(status_code=404, detail=\"User or cabinet not found\")\n return crud.get_unlock_attempts_by_cabinet_and_user_id(db, cabinet_id, uid)\n\n@app.post(\"/unlock-attempt/\", response_model=schemas.CabinetUnlockAttempt)\ndef create_unlock_attempt(unlock_attempt: schemas.CabinetUnlockAttemptCreate , db: Session = Depends(get_db)):\n db_user = crud.get_user_by_uid(db, unlock_attempt.user_id)\n db_cabinet = crud.get_cabinet_by_id(db, unlock_attempt.cabinet_id)\n if db_user is None or db_cabinet is None:\n raise HTTPException(status_code=404, detail=\"User or cabinet not found\")\n return crud.create_unlock_attempt(db, unlock_attempt)\n\n@app.delete(\"/unlock-attempts/days/{n}/\")\ndef delete_unlock_attempts_older_than(n: int, db: Session = Depends(get_db)):\n db.execute(f\"delete from cabinets_unlock_attempts where date < now() - interval '{n} days';\")\n db.commit()\n return {'Deleted all cabinets unlock attempts older than number of days': n}", "repo_name": "DeVinci-Innovation-Center/SMART-INVENTORY-DB-API", "sub_path": "API/src/main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 13159, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "12", "api": [{"api_name": "fastapi.FastAPI", "line_number": 12, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 12, "usage_type": "attribute"}, {"api_name": "fastapi.middleware.cors.CORSMiddleware", "line_number": 17, "usage_type": "argument"}, {"api_name": "database.SessionLocal", "line_number": 25, "usage_type": "call"}, {"api_name": "sqlalchemy.orm.Session", "line_number": 41, "usage_type": "name"}, {"api_name": "fastapi.Depends", "line_number": 41, "usage_type": "call"}, {"api_name": "crud.get_all_users", "line_number": 42, "usage_type": "call"}, {"api_name": "typing.List", "line_number": 40, "usage_type": "name"}, {"api_name": "schemas.User", "line_number": 40, "usage_type": "attribute"}, {"api_name": "sqlalchemy.orm.Session", "line_number": 45, "usage_type": "name"}, {"api_name": "fastapi.Depends", "line_number": 45, "usage_type": "call"}, {"api_name": "crud.get_user_by_uid", "line_number": 46, "usage_type": "call"}, {"api_name": "fastapi.HTTPException", "line_number": 48, "usage_type": "call"}, {"api_name": "schemas.User", "line_number": 44, "usage_type": "attribute"}, {"api_name": "schemas.UserCreate", "line_number": 52, "usage_type": "attribute"}, {"api_name": "sqlalchemy.orm.Session", "line_number": 52, "usage_type": "name"}, {"api_name": "fastapi.Depends", "line_number": 52, "usage_type": "call"}, {"api_name": "crud.get_user_by_uid", "line_number": 53, "usage_type": "call"}, {"api_name": "fastapi.HTTPException", "line_number": 55, "usage_type": "call"}, {"api_name": "crud.create_user", "line_number": 56, "usage_type": "call"}, {"api_name": "schemas.User", "line_number": 51, "usage_type": "attribute"}, {"api_name": "sqlalchemy.orm.Session", "line_number": 59, "usage_type": "name"}, {"api_name": "fastapi.Depends", "line_number": 59, "usage_type": "call"}, {"api_name": "crud.get_user_by_uid", "line_number": 60, "usage_type": "call"}, {"api_name": "fastapi.HTTPException", "line_number": 62, "usage_type": "call"}, {"api_name": "sqlalchemy.orm.Session", "line_number": 70, "usage_type": "name"}, {"api_name": "fastapi.Depends", "line_number": 70, "usage_type": "call"}, {"api_name": "crud.get_all_cabinets", "line_number": 71, "usage_type": "call"}, {"api_name": "typing.List", "line_number": 69, "usage_type": "name"}, {"api_name": "schemas.Cabinet", "line_number": 69, "usage_type": "attribute"}, {"api_name": "sqlalchemy.orm.Session", "line_number": 74, "usage_type": "name"}, {"api_name": "fastapi.Depends", "line_number": 74, "usage_type": "call"}, {"api_name": "crud.get_cabinet_by_id", "line_number": 75, "usage_type": "call"}, {"api_name": "fastapi.HTTPException", "line_number": 77, "usage_type": "call"}, {"api_name": "schemas.Cabinet", "line_number": 73, "usage_type": "attribute"}, {"api_name": "schemas.CabinetCreate", "line_number": 81, "usage_type": "attribute"}, {"api_name": "sqlalchemy.orm.Session", "line_number": 81, "usage_type": "name"}, {"api_name": "fastapi.Depends", "line_number": 81, "usage_type": "call"}, {"api_name": "crud.get_cabinet_by_id", "line_number": 82, "usage_type": "call"}, {"api_name": "fastapi.HTTPException", "line_number": 84, "usage_type": "call"}, {"api_name": "crud.create_cabinet", "line_number": 85, "usage_type": "call"}, {"api_name": "schemas.Cabinet", "line_number": 80, "usage_type": "attribute"}, {"api_name": "sqlalchemy.orm.Session", "line_number": 88, "usage_type": "name"}, {"api_name": "fastapi.Depends", "line_number": 88, "usage_type": "call"}, {"api_name": "crud.get_cabinet_by_id", "line_number": 89, "usage_type": "call"}, {"api_name": "fastapi.HTTPException", "line_number": 91, "usage_type": "call"}, {"api_name": "sqlalchemy.orm.Session", "line_number": 99, "usage_type": "name"}, {"api_name": "fastapi.Depends", "line_number": 99, "usage_type": "call"}, {"api_name": "crud.get_all_categories", "line_number": 100, "usage_type": "call"}, {"api_name": "typing.List", "line_number": 98, "usage_type": "name"}, {"api_name": "schemas.Category", "line_number": 98, "usage_type": "attribute"}, {"api_name": "sqlalchemy.orm.Session", "line_number": 103, "usage_type": "name"}, {"api_name": "fastapi.Depends", "line_number": 103, "usage_type": "call"}, {"api_name": "crud.get_root_categories", "line_number": 104, "usage_type": "call"}, {"api_name": "typing.List", "line_number": 102, "usage_type": "name"}, {"api_name": "schemas.Category", "line_number": 102, "usage_type": "attribute"}, {"api_name": "sqlalchemy.orm.Session", "line_number": 107, "usage_type": "name"}, {"api_name": "fastapi.Depends", "line_number": 107, "usage_type": "call"}, {"api_name": "crud.get_category_by_id", "line_number": 108, "usage_type": "call"}, {"api_name": "fastapi.HTTPException", "line_number": 110, "usage_type": "call"}, {"api_name": "schemas.Category", "line_number": 106, "usage_type": "attribute"}, {"api_name": "sqlalchemy.orm.Session", "line_number": 114, "usage_type": "name"}, {"api_name": "fastapi.Depends", "line_number": 114, "usage_type": "call"}, {"api_name": "crud.get_category_by_id", "line_number": 115, "usage_type": "call"}, {"api_name": "fastapi.HTTPException", "line_number": 117, "usage_type": "call"}, {"api_name": "crud.get_sub_categories", "line_number": 118, "usage_type": "call"}, {"api_name": "typing.List", "line_number": 113, "usage_type": "name"}, {"api_name": "schemas.Category", "line_number": 113, "usage_type": "attribute"}, {"api_name": "schemas.CategoryCreate", "line_number": 121, "usage_type": "attribute"}, {"api_name": "sqlalchemy.orm.Session", "line_number": 121, "usage_type": "name"}, {"api_name": "fastapi.Depends", "line_number": 121, "usage_type": "call"}, {"api_name": "crud.get_category_by_title", "line_number": 122, "usage_type": "call"}, {"api_name": "fastapi.HTTPException", "line_number": 124, "usage_type": "call"}, {"api_name": "crud.get_category_by_id", "line_number": 126, "usage_type": "call"}, {"api_name": "fastapi.HTTPException", "line_number": 128, "usage_type": "call"}, {"api_name": "crud.create_category", "line_number": 129, "usage_type": "call"}, {"api_name": "schemas.Category", "line_number": 120, "usage_type": "attribute"}, {"api_name": "sqlalchemy.orm.Session", "line_number": 132, "usage_type": "name"}, {"api_name": "fastapi.Depends", "line_number": 132, "usage_type": "call"}, {"api_name": "crud.get_category_by_id", "line_number": 133, "usage_type": "call"}, {"api_name": "fastapi.HTTPException", "line_number": 135, "usage_type": "call"}, {"api_name": "sqlalchemy.orm.Session", "line_number": 143, "usage_type": "name"}, {"api_name": "fastapi.Depends", "line_number": 143, "usage_type": "call"}, {"api_name": "crud.get_all_items", "line_number": 144, "usage_type": "call"}, {"api_name": "typing.List", "line_number": 142, "usage_type": "name"}, {"api_name": "schemas.Item", "line_number": 142, "usage_type": "attribute"}, {"api_name": "sqlalchemy.orm.Session", "line_number": 147, "usage_type": "name"}, {"api_name": "fastapi.Depends", "line_number": 147, "usage_type": "call"}, {"api_name": "crud.get_item_by_id", "line_number": 148, "usage_type": "call"}, {"api_name": "fastapi.HTTPException", "line_number": 150, "usage_type": "call"}, {"api_name": "schemas.Item", "line_number": 146, "usage_type": "attribute"}, {"api_name": "sqlalchemy.orm.Session", "line_number": 154, "usage_type": "name"}, {"api_name": "fastapi.Depends", "line_number": 154, "usage_type": "call"}, {"api_name": "crud.get_category_by_id", "line_number": 155, "usage_type": "call"}, {"api_name": "fastapi.HTTPException", "line_number": 157, "usage_type": "call"}, {"api_name": "crud.get_items_by_category_id", "line_number": 158, "usage_type": "call"}, {"api_name": "typing.List", "line_number": 153, "usage_type": "name"}, {"api_name": "schemas.Item", "line_number": 153, "usage_type": "attribute"}, {"api_name": "schemas.ItemCreate", "line_number": 161, "usage_type": "attribute"}, {"api_name": "sqlalchemy.orm.Session", "line_number": 161, "usage_type": "name"}, {"api_name": "fastapi.Depends", "line_number": 161, "usage_type": "call"}, {"api_name": "crud.get_category_by_id", "line_number": 163, "usage_type": "call"}, {"api_name": "fastapi.HTTPException", "line_number": 165, "usage_type": "call"}, {"api_name": "crud.get_item_by_title", "line_number": 166, "usage_type": "call"}, {"api_name": "fastapi.HTTPException", "line_number": 168, "usage_type": "call"}, {"api_name": "crud.create_item", "line_number": 169, "usage_type": "call"}, {"api_name": "schemas.Item", "line_number": 160, "usage_type": "attribute"}, {"api_name": "sqlalchemy.orm.Session", "line_number": 172, "usage_type": "name"}, {"api_name": "fastapi.Depends", "line_number": 172, "usage_type": "call"}, {"api_name": "crud.get_item_by_id", "line_number": 173, "usage_type": "call"}, {"api_name": "fastapi.HTTPException", "line_number": 175, "usage_type": "call"}, {"api_name": "sqlalchemy.orm.Session", "line_number": 183, "usage_type": "name"}, {"api_name": "fastapi.Depends", "line_number": 183, "usage_type": "call"}, {"api_name": "crud.get_all_order_requests", "line_number": 184, "usage_type": "call"}, {"api_name": "typing.List", "line_number": 182, "usage_type": "name"}, {"api_name": "schemas.OrderRequest", "line_number": 182, "usage_type": "attribute"}, {"api_name": "sqlalchemy.orm.Session", "line_number": 187, "usage_type": "name"}, {"api_name": "fastapi.Depends", "line_number": 187, "usage_type": "call"}, {"api_name": "crud.get_item_by_id", "line_number": 188, "usage_type": "call"}, {"api_name": "fastapi.HTTPException", "line_number": 190, "usage_type": "call"}, {"api_name": "crud.get_order_requests_by_item_id", "line_number": 191, "usage_type": "call"}, {"api_name": "typing.List", "line_number": 186, "usage_type": "name"}, {"api_name": "schemas.OrderRequest", "line_number": 186, "usage_type": "attribute"}, {"api_name": "sqlalchemy.orm.Session", "line_number": 194, "usage_type": "name"}, {"api_name": "fastapi.Depends", "line_number": 194, "usage_type": "call"}, {"api_name": "crud.get_user_by_uid", "line_number": 195, "usage_type": "call"}, {"api_name": "fastapi.HTTPException", "line_number": 197, "usage_type": "call"}, {"api_name": "crud.get_order_requests_by_user_id", "line_number": 198, "usage_type": "call"}, {"api_name": "typing.List", "line_number": 193, "usage_type": "name"}, {"api_name": "schemas.OrderRequest", "line_number": 193, "usage_type": "attribute"}, {"api_name": "sqlalchemy.orm.Session", "line_number": 201, "usage_type": "name"}, {"api_name": "fastapi.Depends", "line_number": 201, "usage_type": "call"}, {"api_name": "crud.get_order_requests_by_state", "line_number": 202, "usage_type": "call"}, {"api_name": "typing.List", "line_number": 200, "usage_type": "name"}, {"api_name": "schemas.OrderRequest", "line_number": 200, "usage_type": "attribute"}, {"api_name": "schemas.OrderRequestCreate", "line_number": 205, "usage_type": "attribute"}, {"api_name": "sqlalchemy.orm.Session", "line_number": 205, "usage_type": "name"}, {"api_name": "fastapi.Depends", "line_number": 205, "usage_type": "call"}, {"api_name": "crud.get_item_by_id", "line_number": 206, "usage_type": "call"}, {"api_name": "crud.get_user_by_uid", "line_number": 207, "usage_type": "call"}, {"api_name": "fastapi.HTTPException", "line_number": 209, "usage_type": "call"}, {"api_name": "crud.get_order_requests_by_item_and_user_id", "line_number": 210, "usage_type": "call"}, {"api_name": "fastapi.HTTPException", "line_number": 212, "usage_type": "call"}, {"api_name": "crud.create_order_request", "line_number": 213, "usage_type": "call"}, {"api_name": "schemas.OrderRequest", "line_number": 204, "usage_type": "attribute"}, {"api_name": "sqlalchemy.orm.Session", "line_number": 216, "usage_type": "name"}, {"api_name": "fastapi.Depends", "line_number": 216, "usage_type": "call"}, {"api_name": "crud.get_order_request_by_id", "line_number": 217, "usage_type": "call"}, {"api_name": "fastapi.HTTPException", "line_number": 219, "usage_type": "call"}, {"api_name": "sqlalchemy.orm.Session", "line_number": 227, "usage_type": "name"}, {"api_name": "fastapi.Depends", "line_number": 227, "usage_type": "call"}, {"api_name": "crud.get_all_storage_units", "line_number": 228, "usage_type": "call"}, {"api_name": "typing.List", "line_number": 226, "usage_type": "name"}, {"api_name": "schemas.StorageUnit", "line_number": 226, "usage_type": "attribute"}, {"api_name": "sqlalchemy.orm.Session", "line_number": 231, "usage_type": "name"}, {"api_name": "fastapi.Depends", "line_number": 231, "usage_type": "call"}, {"api_name": "crud.get_storage_unit_by_id", "line_number": 232, "usage_type": "call"}, {"api_name": "fastapi.HTTPException", "line_number": 234, "usage_type": "call"}, {"api_name": "schemas.StorageUnit", "line_number": 230, "usage_type": "attribute"}, {"api_name": "sqlalchemy.orm.Session", "line_number": 238, "usage_type": "name"}, {"api_name": "fastapi.Depends", "line_number": 238, "usage_type": "call"}, {"api_name": "crud.get_cabinet_by_id", "line_number": 239, "usage_type": "call"}, {"api_name": "fastapi.HTTPException", "line_number": 241, "usage_type": "call"}, {"api_name": "crud.get_storage_units_by_cabinet_id", "line_number": 242, "usage_type": "call"}, {"api_name": "typing.List", "line_number": 237, "usage_type": "name"}, {"api_name": "schemas.StorageUnit", "line_number": 237, "usage_type": "attribute"}, {"api_name": "schemas.StorageUnitCreate", "line_number": 245, "usage_type": "attribute"}, {"api_name": "sqlalchemy.orm.Session", "line_number": 245, "usage_type": "name"}, {"api_name": "fastapi.Depends", "line_number": 245, "usage_type": "call"}, {"api_name": "crud.get_item_by_id", "line_number": 246, "usage_type": "call"}, {"api_name": "fastapi.HTTPException", "line_number": 248, "usage_type": "call"}, {"api_name": "crud.get_cabinet_by_id", "line_number": 250, "usage_type": "call"}, {"api_name": "fastapi.HTTPException", "line_number": 252, "usage_type": "call"}, {"api_name": "crud.get_storage_unit_by_id", "line_number": 253, "usage_type": "call"}, {"api_name": "fastapi.HTTPException", "line_number": 255, "usage_type": "call"}, {"api_name": "crud.create_storage_unit", "line_number": 256, "usage_type": "call"}, {"api_name": "schemas.StorageUnit", "line_number": 244, "usage_type": "attribute"}, {"api_name": "sqlalchemy.orm.Session", "line_number": 259, "usage_type": "name"}, {"api_name": "fastapi.Depends", "line_number": 259, "usage_type": "call"}, {"api_name": "crud.get_storage_unit_by_id", "line_number": 260, "usage_type": "call"}, {"api_name": "fastapi.HTTPException", "line_number": 262, "usage_type": "call"}, {"api_name": "sqlalchemy.orm.Session", "line_number": 270, "usage_type": "name"}, {"api_name": "fastapi.Depends", "line_number": 270, "usage_type": "call"}, {"api_name": "crud.get_all_unlock_attempts", "line_number": 271, "usage_type": "call"}, {"api_name": "typing.List", "line_number": 269, "usage_type": "name"}, {"api_name": "schemas.CabinetUnlockAttempt", "line_number": 269, "usage_type": "attribute"}, {"api_name": "sqlalchemy.orm.Session", "line_number": 274, "usage_type": "name"}, {"api_name": "fastapi.Depends", "line_number": 274, "usage_type": "call"}, {"api_name": "crud.get_cabinet_by_id", "line_number": 275, "usage_type": "call"}, {"api_name": "fastapi.HTTPException", "line_number": 277, "usage_type": "call"}, {"api_name": "crud.get_unlock_attempts_by_cabinet_id", "line_number": 278, "usage_type": "call"}, {"api_name": "typing.List", "line_number": 273, "usage_type": "name"}, {"api_name": "schemas.CabinetUnlockAttempt", "line_number": 273, "usage_type": "attribute"}, {"api_name": "sqlalchemy.orm.Session", "line_number": 281, "usage_type": "name"}, {"api_name": "fastapi.Depends", "line_number": 281, "usage_type": "call"}, {"api_name": "crud.get_user_by_uid", "line_number": 282, "usage_type": "call"}, {"api_name": "fastapi.HTTPException", "line_number": 284, "usage_type": "call"}, {"api_name": "crud.get_unlock_attempts_by_user_id", "line_number": 285, "usage_type": "call"}, {"api_name": "typing.List", "line_number": 280, "usage_type": "name"}, {"api_name": "schemas.CabinetUnlockAttempt", "line_number": 280, "usage_type": "attribute"}, {"api_name": "sqlalchemy.orm.Session", "line_number": 288, "usage_type": "name"}, {"api_name": "fastapi.Depends", "line_number": 288, "usage_type": "call"}, {"api_name": "crud.get_user_by_uid", "line_number": 289, "usage_type": "call"}, {"api_name": "crud.get_cabinet_by_id", "line_number": 290, "usage_type": "call"}, {"api_name": "fastapi.HTTPException", "line_number": 292, "usage_type": "call"}, {"api_name": "crud.get_unlock_attempts_by_cabinet_and_user_id", "line_number": 293, "usage_type": "call"}, {"api_name": "typing.List", "line_number": 287, "usage_type": "name"}, {"api_name": "schemas.CabinetUnlockAttempt", "line_number": 287, "usage_type": "attribute"}, {"api_name": "schemas.CabinetUnlockAttemptCreate", "line_number": 296, "usage_type": "attribute"}, {"api_name": "sqlalchemy.orm.Session", "line_number": 296, "usage_type": "name"}, {"api_name": "fastapi.Depends", "line_number": 296, "usage_type": "call"}, {"api_name": "crud.get_user_by_uid", "line_number": 297, "usage_type": "call"}, {"api_name": "crud.get_cabinet_by_id", "line_number": 298, "usage_type": "call"}, {"api_name": "fastapi.HTTPException", "line_number": 300, "usage_type": "call"}, {"api_name": "crud.create_unlock_attempt", "line_number": 301, "usage_type": "call"}, {"api_name": "schemas.CabinetUnlockAttempt", "line_number": 295, "usage_type": "attribute"}, {"api_name": "sqlalchemy.orm.Session", "line_number": 304, "usage_type": "name"}, {"api_name": "fastapi.Depends", "line_number": 304, "usage_type": "call"}]} +{"seq_id": "32158804864", "text": "from datetime import datetime, timedelta\nfrom itertools import chain\nfrom django.utils import timezone\nfrom .models import Task, Event, Routine, TimeSlot\n\n\n# We'll need to be able to figure out a concrete date to place routine events on\n# by deriving it from the current date and their assigned weekday\ndef get_date_from_weekday(day):\n today = datetime.today()\n delta = (day - today.weekday()) % 7\n date = datetime.today() + timedelta(days=delta)\n return date\n\n\n# When we run the scheduling algorithm, we'll want to clean out any time slots\n# from last time\ndef clean_time_slots(date):\n TimeSlot.objects.filter(date=date).delete()\n\n\n# Here's the scheduler, it runs based on a given weekday rather than a date.\n# This makes things easier\ndef update_schedule(day):\n # Get the date and clean out time slots\n date = get_date_from_weekday(day)\n clean_time_slots(date)\n\n # Get all events and routine events for today, ordered by start time\n routines = Routine.objects.filter(day=day).order_by(\"start_time\")\n events = Event.objects.filter(date=date).order_by(\"start_time\")\n\n # Sort them together\n all_events = sorted(\n chain(routines, events), key=lambda instance: instance.start_time\n )\n\n # Get all the tasks, ordered by due date, time estimate and descending priority level\n tasks = Task.objects.filter(done=False).order_by(\n \"due_date\", \"time_estimate\", \"-priority\"\n )\n\n # Convert the iterable into a list, this is easier to handle and we can remove tasks\n # from the list once they have been allocated a time slot\n task_list = list(tasks)\n\n # Initialise an empty list for holding the time slots, we'll write them all to the\n # database at the end\n time_slots = []\n\n # Iterate over all the events and routines,\n # creating corresponding time slots\n for item in all_events:\n if isinstance(item, Event):\n ts = TimeSlot(\n date=date,\n start_time=item.get_start(),\n end_time=item.get_end(),\n associated_type=\"E\",\n associated_event=item,\n )\n\n elif isinstance(item, Routine):\n ts = TimeSlot(\n date=date,\n start_time=item.get_start(),\n end_time=item.get_end(),\n associated_type=\"R\",\n associated_routine=item,\n )\n\n # Before adding the timslot to the list,\n # check that it has sensible timings.\n # If it doesn't, we can just discard it.\n if ts.start_time <= ts.end_time:\n time_slots.append(ts)\n\n # We can't use a for loop to iterate through the time slots,\n # because we're going to be changing the length of the list.\n # So we have to use a while loop and a counter to keep track of our position.\n pos = 1\n\n # Iterate through the time slots\n while pos < len(time_slots):\n # Start by assuming that there is at least one task which will fit in this time gap\n is_room = True\n\n # As long as tasks keep getting getting inserted,\n # we need to stay here.\n while is_room:\n # Take a note of where we are\n pos_start_loop = pos\n # Iterate over the tasks which are not yet assigned\n for task in task_list:\n # Get the time gap between this timeslot and the last\n prev = time_slots[pos - 1]\n curr = time_slots[pos]\n tdelta = datetime.combine(date, curr.get_start()) - datetime.combine(\n date, prev.get_end()\n )\n\n # If the gap is large enought,\n # create a time slot corresponding to the task and put it here\n if tdelta > task.time_estimate:\n start = prev.get_end()\n end = (\n datetime.combine(date, prev.get_end()) + task.time_estimate\n ).time()\n time_slots.insert(\n pos,\n TimeSlot(\n date=date,\n start_time=start,\n end_time=end,\n associated_type=\"T\",\n associated_task=task,\n ),\n )\n\n task_list.remove(task)\n\n # Increment the position,\n # unless we've reached the end of the list,\n # in which case there are no more spaces so stop.\n if pos <= len(time_slots):\n pos += 1\n else:\n break\n\n # If we reach the end of the loop and the position is the same,\n # that means there's no more room for tasks here,\n # so flag that there is no room and increment position.\n # Otherwise, we go for another loop.\n if pos == pos_start_loop:\n is_room = False\n pos += 1\n\n # Finally, we save all the time slots to the database\n for item in time_slots:\n item.save()\n", "repo_name": "highgateschool/MyTime", "sub_path": "mysite/tasks/scheduler.py", "file_name": "scheduler.py", "file_ext": "py", "file_size_in_byte": 5171, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "12", "api": [{"api_name": "datetime.datetime.today", "line_number": 10, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 10, "usage_type": "name"}, {"api_name": "datetime.datetime.today", "line_number": 12, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 12, "usage_type": "name"}, {"api_name": "datetime.timedelta", "line_number": 12, "usage_type": "call"}, {"api_name": "models.TimeSlot.objects.filter", "line_number": 19, "usage_type": "call"}, {"api_name": "models.TimeSlot.objects", "line_number": 19, "usage_type": "attribute"}, {"api_name": "models.TimeSlot", "line_number": 19, "usage_type": "name"}, {"api_name": "models.Routine.objects.filter", "line_number": 30, "usage_type": "call"}, {"api_name": "models.Routine.objects", "line_number": 30, "usage_type": "attribute"}, {"api_name": "models.Routine", "line_number": 30, "usage_type": "name"}, {"api_name": "models.Event.objects.filter", "line_number": 31, "usage_type": "call"}, {"api_name": "models.Event.objects", "line_number": 31, "usage_type": "attribute"}, {"api_name": "models.Event", "line_number": 31, "usage_type": "name"}, {"api_name": "itertools.chain", "line_number": 35, "usage_type": "call"}, {"api_name": "models.Task.objects.filter", "line_number": 39, "usage_type": "call"}, {"api_name": "models.Task.objects", "line_number": 39, "usage_type": "attribute"}, {"api_name": "models.Task", "line_number": 39, "usage_type": "name"}, {"api_name": "models.Event", "line_number": 54, "usage_type": "argument"}, {"api_name": "models.TimeSlot", "line_number": 55, "usage_type": "call"}, {"api_name": "models.Routine", "line_number": 63, "usage_type": "argument"}, {"api_name": "models.TimeSlot", "line_number": 64, "usage_type": "call"}, {"api_name": "datetime.datetime.combine", "line_number": 98, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 98, "usage_type": "name"}, {"api_name": "datetime.datetime.combine", "line_number": 107, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 107, "usage_type": "name"}, {"api_name": "models.TimeSlot", "line_number": 111, "usage_type": "call"}]} +{"seq_id": "20196751457", "text": "from django.db import models\nfrom django.contrib.auth.models import AbstractUser\nfrom django.utils import timezone\n# Create your models here.\n\n\nclass User(AbstractUser):\n pass\n\n\nclass Departamento(models.Model):\n nome = models.CharField(max_length=100)\n\n def __str__(self):\n return self.nome\n\n\nclass Secretaria(models.Model):\n nome = models.CharField(max_length=50)\n sigla = models.CharField(max_length=10)\n\n def __str__(self):\n return self.nome\n\n\nclass Setor(models.Model):\n\n class Meta:\n verbose_name_plural = 'Setores'\n\n nome = models.CharField(max_length=50)\n secretaria = models.ForeignKey(Secretaria, on_delete=models.CASCADE)\n\n def __str__(self):\n return self.nome + ' - ' + self.secretaria.sigla\n\n\nclass Ticket(models.Model):\n ABERTO = 0\n EM_ATENDIMENTO = 1\n ENCERRADO = 2\n CANCELADO = 3\n\n STATUS = (\n (ABERTO, 'Aberto'),\n (EM_ATENDIMENTO, 'Em atendimento'),\n (ENCERRADO, 'Encerrado'),\n (CANCELADO, 'Cancelado')\n )\n\n departamento = models.ForeignKey(Departamento, on_delete=models.PROTECT)\n responsavel = models.ForeignKey(\n User, on_delete=models.PROTECT, null=True, blank=True,\n related_name='responsavel_por', editable=False)\n criado_em = models.DateTimeField(auto_now_add=True, editable=False)\n iniciado_em = models.DateTimeField(null=True, blank=True, editable=False)\n encerrado_em = models.DateTimeField(null=True, blank=True, editable=False)\n setor = models.ForeignKey(Setor, on_delete=models.PROTECT)\n status = models.SmallIntegerField(\n choices=STATUS, default=ABERTO, editable=False)\n patrimonio = models.CharField(max_length=5)\n contato = models.CharField(max_length=10, null=True, blank=True)\n\n class Meta:\n ordering = [\"criado_em\"]\n\n def iniciar_atendimento(self, user):\n self.responsavel = user\n self.status = self.EM_ATENDIMENTO\n self.iniciado_em = timezone.localtime()\n self.save()\n\n def encerrar_atendimento(self):\n self.status = self.ENCERRADO\n self.encerrado_em = timezone.localtime()\n self.save()\n\n def get_absolute_url(self):\n from django.shortcuts import reverse\n return reverse(\"ticket_detail\", kwargs={\"pk\": self.pk})\n\n\nclass Comentario(models.Model):\n ticket = models.ForeignKey(Ticket, on_delete=models.CASCADE)\n criado_em = models.DateTimeField(auto_now_add=True)\n texto = models.TextField()\n autor = models.ForeignKey(\n User, on_delete=models.PROTECT, null=True, blank=True, editable=False)\n\n class Meta:\n ordering = [\"-criado_em\"]\n\n def __str__(self):\n return self.texto\n", "repo_name": "cctquissama/tiqt", "sub_path": "tiqt/apps/core/models.py", "file_name": "models.py", "file_ext": "py", "file_size_in_byte": 2681, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 2, "dataset": "github-code", "pt": "14", "api": [{"api_name": "django.contrib.auth.models.AbstractUser", "line_number": 7, "usage_type": "name"}, {"api_name": "django.db.models.Model", "line_number": 11, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 11, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 12, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 12, "usage_type": "name"}, {"api_name": "django.db.models.Model", "line_number": 18, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 18, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 19, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 19, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 20, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 20, "usage_type": "name"}, {"api_name": "django.db.models.Model", "line_number": 26, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 26, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 31, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 31, "usage_type": "name"}, {"api_name": "django.db.models.ForeignKey", "line_number": 32, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 32, "usage_type": "name"}, {"api_name": "django.db.models.CASCADE", "line_number": 32, "usage_type": "attribute"}, {"api_name": "django.db.models.Model", "line_number": 38, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 38, "usage_type": "name"}, {"api_name": "django.db.models.ForeignKey", "line_number": 51, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 51, "usage_type": "name"}, {"api_name": "django.db.models.PROTECT", "line_number": 51, "usage_type": "attribute"}, {"api_name": "django.db.models.ForeignKey", "line_number": 52, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 52, "usage_type": "name"}, {"api_name": "django.db.models.PROTECT", "line_number": 53, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 53, "usage_type": "name"}, {"api_name": "django.db.models.DateTimeField", "line_number": 55, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 55, "usage_type": "name"}, {"api_name": "django.db.models.DateTimeField", "line_number": 56, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 56, "usage_type": "name"}, {"api_name": "django.db.models.DateTimeField", "line_number": 57, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 57, "usage_type": "name"}, {"api_name": "django.db.models.ForeignKey", "line_number": 58, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 58, "usage_type": "name"}, {"api_name": "django.db.models.PROTECT", "line_number": 58, "usage_type": "attribute"}, {"api_name": "django.db.models.SmallIntegerField", "line_number": 59, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 59, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 61, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 61, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 62, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 62, "usage_type": "name"}, {"api_name": "django.utils.timezone.localtime", "line_number": 70, "usage_type": "call"}, {"api_name": "django.utils.timezone", "line_number": 70, "usage_type": "name"}, {"api_name": "django.utils.timezone.localtime", "line_number": 75, "usage_type": "call"}, {"api_name": "django.utils.timezone", "line_number": 75, "usage_type": "name"}, {"api_name": "django.shortcuts.reverse", "line_number": 80, "usage_type": "call"}, {"api_name": "django.db.models.Model", "line_number": 83, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 83, "usage_type": "name"}, {"api_name": "django.db.models.ForeignKey", "line_number": 84, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 84, "usage_type": "name"}, {"api_name": "django.db.models.CASCADE", "line_number": 84, "usage_type": "attribute"}, {"api_name": "django.db.models.DateTimeField", "line_number": 85, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 85, "usage_type": "name"}, {"api_name": "django.db.models.TextField", "line_number": 86, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 86, "usage_type": "name"}, {"api_name": "django.db.models.ForeignKey", "line_number": 87, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 87, "usage_type": "name"}, {"api_name": "django.db.models.PROTECT", "line_number": 88, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 88, "usage_type": "name"}]} +{"seq_id": "19792764667", "text": "import itertools\nfrom random import randint\n\n_DEFAULT_PRIME = 1073750017\n\n\ndef maximum_matching(edges, mod=_DEFAULT_PRIME):\n \"\"\"\n Returns the maximum cardinality matching of any simple graph (undirected, unweighted, no self-loops)\n Uses a randomized algorithm to compute the rank of the Tutte matrix\n The rank of the Tutte matrix is equal to twice the size of the maximum matching with high probability\n The probability for error is not more than n/mod\n\n Complexity: O(n ^ 3) worst case, O(n * |matching_size|) on average\n\n :param edges: a list of edges, assume nodes can be anything numbered from 0 to max number in edges\n :param mod: optional, a large random prime\n :return: the maximum cardinality matching of the graph\n \"\"\"\n\n n = max(itertools.chain(*edges)) + 1\n matrix = _get_tutte_matrix(n, edges, mod)\n return _gauss(n, matrix, mod) // 2\n\n\ndef _get_tutte_matrix(n, edges, mod):\n matrix = [[0] * n for _ in range(n)]\n\n for u, v in edges:\n val = randint(1, mod - 1)\n matrix[u][v], matrix[v][u] = val, mod - val\n\n return matrix\n\n\ndef _gauss(n, matrix, mod):\n r = 0\n for j in range(n):\n k = r\n while k < n and not matrix[k][j]:\n k += 1\n\n if k == n:\n continue\n\n inv = pow(matrix[k][j], mod - 2, mod)\n for i in range(n):\n matrix[k][i] = inv * matrix[k][i] % mod\n matrix[k], matrix[r] = matrix[r], matrix[k]\n\n for u in range(r + 1, n):\n # reducing indexing costs to gain performance boost for the next loop\n matrix_u, matrix_r = matrix[u], matrix[r]\n if matrix_u[j]:\n for v in range(j + 1, n):\n if matrix_r[v]:\n matrix_u[v] = (matrix_u[v] - matrix_r[v] * matrix_u[j]) % mod\n\n r += 1\n\n return r\n", "repo_name": "cheran-senthil/PyRival", "sub_path": "pyrival/graphs/maximum_matching.py", "file_name": "maximum_matching.py", "file_ext": "py", "file_size_in_byte": 1845, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1039, "dataset": "github-code", "pt": "12", "api": [{"api_name": "itertools.chain", "line_number": 21, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 30, "usage_type": "call"}]} +{"seq_id": "38363545334", "text": "from PyQt5.QtWidgets import QApplication, QWidget, QHBoxLayout, QVBoxLayout, QGridLayout, QLabel, QComboBox, \\\n QPushButton, QLineEdit, QListWidget\n\nfrom PyQt5.QtCore import Qt,QTimer\n\nimport sys\nimport serial\nimport serial.tools.list_ports as listport\n\nport=serial.Serial()\n\n\nclass Pencere(QWidget):\n def __init__(self):\n super().__init__()\n self.arayuz()\n self.show()\n\n\n def arayuz(self): # UI design\n self.setWindowTitle(\"Modbus RTU by Python\")\n vboxAna=QVBoxLayout()\n hbox1=QHBoxLayout()\n grid1=QGridLayout()\n labelComport=QLabel(\"COM Port\")\n grid1.addWidget(labelComport,1,1,Qt.AlignLeft)\n self.comboboxComPort = QComboBox()\n grid1.addWidget(self.comboboxComPort,2,1,Qt.AlignLeft)\n labelBaudrate=QLabel(\"Baudrate\")\n grid1.addWidget(labelBaudrate, 1, 2, Qt.AlignLeft)\n self.comboboxBaudrate = QComboBox()\n grid1.addWidget(self.comboboxBaudrate, 2, 2, Qt.AlignLeft)\n labelAyarlar = QLabel(\"Ayarlar\")\n grid1.addWidget(labelAyarlar, 1, 3, Qt.AlignLeft)\n self.comboboxAyarlar = QComboBox()\n grid1.addWidget(self.comboboxAyarlar, 2, 3, Qt.AlignLeft)\n self.pushbuttonBaglan = QPushButton(\"Bağlan\")\n grid1.addWidget(self.pushbuttonBaglan, 1, 4, Qt.AlignLeft)\n self.pushbuttonBaglantiKes = QPushButton(\"Bağlantı Kes\") # Close Connection Button\n grid1.addWidget(self.pushbuttonBaglantiKes, 2, 4, Qt.AlignLeft)\n\n hbox1.addLayout(grid1)\n\n vboxAna.addLayout(hbox1)\n vboxAna.addSpacing(40)\n hbox2=QHBoxLayout()\n grid2= QGridLayout()\n\n labelAdres = QLabel(\"Adres\")\n grid2.addWidget(labelAdres, 1, 1, Qt.AlignLeft)\n self.lineeditAdres = QLineEdit()\n self.lineeditAdres.setText(\"01\")\n self.lineeditAdres.setFixedWidth(40)\n grid2.addWidget(self.lineeditAdres, 2, 1, Qt.AlignLeft)\n\n labelKomut = QLabel(\"Komut ?\")\n labelKomut.setToolTip(\"01- Tek Bobin Durumu Oku \\n\" +\n \"02- Giriş Durumu Oku\\n03- Tutucu Registerleri Oku \\n\" +\n \"04- Giriş Registerleri Oku \\n\" +\n \"05- Sadece Bir bobin durumu değiştir \\n\" +\n \"06- Sadece Bir Register durumunu değiştir \\n\" +\n \"0F- Birden fazla Bobin içeriği değiştir \\n\" +\n \"10- Birden fazla Registere Değer atamak \")\n grid2.addWidget(labelKomut, 1, 2, Qt.AlignLeft)\n self.lineeditKomut = QLineEdit()\n self.lineeditKomut.setText(\"06\")\n self.lineeditKomut.setFixedWidth(40)\n grid2.addWidget(self.lineeditKomut, 2, 2, Qt.AlignLeft)\n labelParametre = QLabel(\"Parametre\")\n grid2.addWidget(labelParametre, 1, 3, Qt.AlignLeft)\n self.lineeditParametre = QLineEdit()\n self.lineeditParametre.setText(\"20010DAC\")\n self.lineeditParametre.setFixedWidth(160)\n grid2.addWidget(self.lineeditParametre, 2, 3, Qt.AlignLeft)\n labelCrc = QLabel(\"CRC\")\n grid2.addWidget(labelCrc, 1, 4, Qt.AlignLeft)\n self.lineeditCrc = QLineEdit()\n self.lineeditCrc.setFixedWidth(40)\n grid2.addWidget(self.lineeditCrc, 2, 4, Qt.AlignLeft)\n hbox2.addLayout(grid2)\n\n vboxAna.addLayout(hbox2)\n vbox1 = QVBoxLayout()\n self.pushbuttonGonder = QPushButton(\"Gönder\")\n self.listCevap = QListWidget()\n labelCevap=QLabel(\"Gelen Cevap\")\n vbox1.addWidget(self.pushbuttonGonder) # Send Data\n vbox1.addWidget(labelCevap)\n vbox1.addWidget(self.listCevap)\n vboxAna.addLayout(vbox1)\n\n self.setLayout(vboxAna)\n self.ilkdurum()\n self.olaylar()\n\n def ilkdurum(self): #initialize\n portlar=listport.comports()\n # Put all serial interfaces in combobox\n for cp in portlar:\n self.comboboxComPort.addItem(str(cp.device))\n ayarliste= [\"8,O,1\",\"8,E,1\",\"8,N,2\"]\n liste=[\"9600\",\"14400\", \"19200\", \"38400\", \"57600\", \"115200\"]\n self.comboboxBaudrate.addItems(liste)\n self.comboboxAyarlar.addItems(ayarliste)\n self.pushbuttonBaglantiKes.setEnabled(False)\n self.pushbuttonGonder.setEnabled(False)\n\n def olaylar(self): #Events\n self.pushbuttonBaglan.clicked.connect(self.baglan) #open serialport\n self.pushbuttonBaglantiKes.clicked.connect(self.baglantikes) #close serialport\n self.pushbuttonGonder.clicked.connect(self.gonder) # send data\n \n\n def baglan(self):\n\n port.baudrate = int(self.comboboxBaudrate.currentText())\n ayar=self.comboboxAyarlar.currentText() # take settings from setting combobox\n\n port.bytesize = serial.EIGHTBITS\n\n if ayar[2] == \"E\":\n port.parity = serial.PARITY_EVEN\n if ayar[2] == \"O\":\n port.parity = serial.PARITY_ODD\n if ayar[2] == \"N\":\n port.parity = serial.PARITY_NONE\n if ayar[4] == \"1\":\n port.stopbits = serial.STOPBITS_ONE\n if ayar[4] == \"2\":\n port.stopbits = serial.STOPBITS_TWO\n port.port = self.comboboxComPort.currentText()\n if not port.is_open:\n port.open()\n if port.is_open:\n self.pushbuttonBaglan.setEnabled(False)\n self.pushbuttonGonder.setEnabled(True)\n self.pushbuttonBaglantiKes.setEnabled(True)\n self.timer=QTimer()\n self.timer.timeout.connect(self.verial)\n self.timer.start(100)\n\n\n\n\n def baglantikes(self): #close connection\n\n if port.is_open:\n port.close()\n if not port.is_open:\n self.pushbuttonBaglan.setEnabled(True)\n self.pushbuttonGonder.setEnabled(False)\n self.pushbuttonBaglantiKes.setEnabled(False)\n self.timer.stop()\n\n def verial(self): #read data from serialport\n veri=\"\"\n if port.is_open:\n gelenVeri = port.read(port.in_waiting)\n\n if not gelenVeri==b'':\n for a in gelenVeri:\n if len(str(hex(a))[2:4].upper())==1:\n veri+=\"0\"+str(hex(a))[2:4].upper()+\"-\"\n\n else:\n veri+= str(hex(a))[2:4].upper()+\"-\"\n\n self.listCevap.insertItem(0, veri)\n\n\n\n def gonder(self): #send data from serialport\n data=self.lineeditAdres.text()+self.lineeditKomut.text()+self.lineeditParametre.text()\n\n data1=[]\n\n for a in range(0,len(data), 2):\n data1.append(int(data[a:a+2],16))\n msbyte, lsbyte =self.crc16(data1)\n self.lineeditCrc.setText(str(hex(msbyte))[2:4].upper()+str(hex(lsbyte))[2:4].upper())\n\n data1.append(msbyte)\n data1.append(lsbyte)\n\n\n port.write(data1)\n\n #calculation of crc16\n def crc16(self,data: bytes, poly=0xA001):\n\n crc = 0xFFFF\n for b in data:\n\n cur_byte = 0xFF & b\n\n for _ in range(0, 8):\n if (crc & 0x0001) ^ (cur_byte & 0x0001):\n crc = (crc >> 1) ^ poly\n else:\n crc >>= 1\n cur_byte >>= 1\n\n crc = (crc << 8) | ((crc >> 8) & 0xFF)\n msbyte = crc >> 8\n lsbyte = crc & 0x00FF\n #returns tupple\n return msbyte & 0xFF, lsbyte & 0xFF\n\n#main\n\nif __name__==\"__main__\":\n app=QApplication(sys.argv)\n pen=Pencere()\n sys.exit(app.exec())", "repo_name": "eaglebjkbv/PythonExamples", "sub_path": "ModbusPythonQt/main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 7421, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 4, "dataset": "github-code", "pt": "12", "api": [{"api_name": "serial.Serial", "line_number": 10, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QWidget", "line_number": 13, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QVBoxLayout", "line_number": 22, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QHBoxLayout", "line_number": 23, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QGridLayout", "line_number": 24, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QLabel", "line_number": 25, "usage_type": "call"}, {"api_name": "PyQt5.QtCore.Qt.AlignLeft", "line_number": 26, "usage_type": "attribute"}, {"api_name": "PyQt5.QtCore.Qt", "line_number": 26, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QComboBox", "line_number": 27, "usage_type": "call"}, {"api_name": "PyQt5.QtCore.Qt.AlignLeft", "line_number": 28, "usage_type": "attribute"}, {"api_name": "PyQt5.QtCore.Qt", "line_number": 28, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QLabel", "line_number": 29, "usage_type": "call"}, {"api_name": "PyQt5.QtCore.Qt.AlignLeft", "line_number": 30, "usage_type": "attribute"}, {"api_name": "PyQt5.QtCore.Qt", "line_number": 30, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QComboBox", "line_number": 31, "usage_type": "call"}, {"api_name": "PyQt5.QtCore.Qt.AlignLeft", "line_number": 32, "usage_type": "attribute"}, {"api_name": "PyQt5.QtCore.Qt", "line_number": 32, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QLabel", "line_number": 33, "usage_type": "call"}, {"api_name": "PyQt5.QtCore.Qt.AlignLeft", "line_number": 34, "usage_type": "attribute"}, {"api_name": "PyQt5.QtCore.Qt", "line_number": 34, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QComboBox", "line_number": 35, "usage_type": "call"}, {"api_name": "PyQt5.QtCore.Qt.AlignLeft", "line_number": 36, "usage_type": "attribute"}, {"api_name": "PyQt5.QtCore.Qt", "line_number": 36, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QPushButton", "line_number": 37, "usage_type": "call"}, {"api_name": "PyQt5.QtCore.Qt.AlignLeft", "line_number": 38, "usage_type": "attribute"}, {"api_name": "PyQt5.QtCore.Qt", "line_number": 38, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QPushButton", "line_number": 39, "usage_type": "call"}, {"api_name": "PyQt5.QtCore.Qt.AlignLeft", "line_number": 40, "usage_type": "attribute"}, {"api_name": "PyQt5.QtCore.Qt", "line_number": 40, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QHBoxLayout", "line_number": 46, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QGridLayout", "line_number": 47, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QLabel", "line_number": 49, "usage_type": "call"}, {"api_name": "PyQt5.QtCore.Qt.AlignLeft", "line_number": 50, "usage_type": "attribute"}, {"api_name": "PyQt5.QtCore.Qt", "line_number": 50, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QLineEdit", "line_number": 51, "usage_type": "call"}, {"api_name": "PyQt5.QtCore.Qt.AlignLeft", "line_number": 54, "usage_type": "attribute"}, {"api_name": "PyQt5.QtCore.Qt", "line_number": 54, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QLabel", "line_number": 56, "usage_type": "call"}, {"api_name": "PyQt5.QtCore.Qt.AlignLeft", "line_number": 64, "usage_type": "attribute"}, {"api_name": "PyQt5.QtCore.Qt", "line_number": 64, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QLineEdit", "line_number": 65, "usage_type": "call"}, {"api_name": "PyQt5.QtCore.Qt.AlignLeft", "line_number": 68, "usage_type": "attribute"}, {"api_name": "PyQt5.QtCore.Qt", "line_number": 68, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QLabel", "line_number": 69, "usage_type": "call"}, {"api_name": "PyQt5.QtCore.Qt.AlignLeft", "line_number": 70, "usage_type": "attribute"}, {"api_name": "PyQt5.QtCore.Qt", "line_number": 70, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QLineEdit", "line_number": 71, "usage_type": "call"}, {"api_name": "PyQt5.QtCore.Qt.AlignLeft", "line_number": 74, "usage_type": "attribute"}, {"api_name": "PyQt5.QtCore.Qt", "line_number": 74, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QLabel", "line_number": 75, "usage_type": "call"}, {"api_name": "PyQt5.QtCore.Qt.AlignLeft", "line_number": 76, "usage_type": "attribute"}, {"api_name": "PyQt5.QtCore.Qt", "line_number": 76, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QLineEdit", "line_number": 77, "usage_type": "call"}, {"api_name": "PyQt5.QtCore.Qt.AlignLeft", "line_number": 79, "usage_type": "attribute"}, {"api_name": "PyQt5.QtCore.Qt", "line_number": 79, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QVBoxLayout", "line_number": 83, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QPushButton", "line_number": 84, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QListWidget", "line_number": 85, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QLabel", "line_number": 86, "usage_type": "call"}, {"api_name": "serial.tools.list_ports.comports", "line_number": 97, "usage_type": "call"}, {"api_name": "serial.tools.list_ports", "line_number": 97, "usage_type": "name"}, {"api_name": "serial.EIGHTBITS", "line_number": 119, "usage_type": "attribute"}, {"api_name": "serial.PARITY_EVEN", "line_number": 122, "usage_type": "attribute"}, {"api_name": "serial.PARITY_ODD", "line_number": 124, "usage_type": "attribute"}, {"api_name": "serial.PARITY_NONE", "line_number": 126, "usage_type": "attribute"}, {"api_name": "serial.STOPBITS_ONE", "line_number": 128, "usage_type": "attribute"}, {"api_name": "serial.STOPBITS_TWO", "line_number": 130, "usage_type": "attribute"}, {"api_name": "PyQt5.QtCore.QTimer", "line_number": 138, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QApplication", "line_number": 212, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 212, "usage_type": "attribute"}, {"api_name": "sys.exit", "line_number": 214, "usage_type": "call"}]} +{"seq_id": "38516921232", "text": "# @file WindowsVsToolChain.py\r\n# Plugin to configures paths for the VS2017 and VS2019 tool chain\r\n##\r\n# This plugin works in conjuncture with the tools_def\r\n#\r\n# Copyright (c) Microsoft Corporation\r\n# SPDX-License-Identifier: BSD-2-Clause-Patent\r\n##\r\nimport os\r\nimport logging\r\nfrom edk2toolext.environment.plugintypes.uefi_build_plugin import IUefiBuildPlugin\r\nimport edk2toollib.windows.locate_tools as locate_tools\r\nfrom edk2toollib.windows.locate_tools import FindWithVsWhere\r\nfrom edk2toolext.environment import shell_environment\r\nfrom edk2toolext.environment import version_aggregator\r\nfrom edk2toollib.utility_functions import GetHostInfo\r\n\r\n\r\nclass WindowsVsToolChain(IUefiBuildPlugin):\r\n\r\n def do_post_build(self, thebuilder):\r\n return 0\r\n\r\n def do_pre_build(self, thebuilder):\r\n self.Logger = logging.getLogger(\"WindowsVsToolChain\")\r\n interesting_keys = [\"ExtensionSdkDir\", \"INCLUDE\", \"LIB\", \"LIBPATH\", \"UniversalCRTSdkDir\",\r\n \"UCRTVersion\", \"WindowsLibPath\", \"WindowsSdkBinPath\", \"WindowsSdkDir\", \"WindowsSdkVerBinPath\",\r\n \"WindowsSDKVersion\", \"VCToolsInstallDir\", \"Path\"]\r\n\r\n #\r\n # VS2017 - Follow VS2017 where there is potential for many versions of the tools.\r\n # If a specific version is required then the user must set both env variables:\r\n # VS150INSTALLPATH: base install path on system to VC install dir. Here you will find the VC folder, etc\r\n # VS150TOOLVER: version number for the VC compiler tools\r\n # VS2017_PREFIX: path to MSVC compiler folder with trailing slash (can be used instead of two vars above)\r\n # VS2017_HOST: set the host architecture to use for host tools, and host libs, etc\r\n if thebuilder.env.GetValue(\"TOOL_CHAIN_TAG\") == \"VS2017\":\r\n\r\n # check to see if host is configured\r\n # HostType for VS2017 should be (defined in tools_def):\r\n # x86 == 32bit Intel\r\n # x64 == 64bit Intel\r\n # arm == 32bit Arm\r\n # arm64 == 64bit Arm\r\n #\r\n HostType = shell_environment.GetEnvironment().get_shell_var(\"VS2017_HOST\")\r\n if HostType is not None:\r\n HostType = HostType.lower()\r\n self.Logger.info(\r\n f\"HOST TYPE defined by environment. Host Type is {HostType}\")\r\n else:\r\n HostInfo = GetHostInfo()\r\n if HostInfo.arch == \"x86\":\r\n if HostInfo.bit == \"32\":\r\n HostType = \"x86\"\r\n elif HostInfo.bit == \"64\":\r\n HostType = \"x64\"\r\n else:\r\n raise NotImplementedError()\r\n\r\n # VS2017_HOST options are not exactly the same as QueryVcVariables. This translates.\r\n VC_HOST_ARCH_TRANSLATOR = {\r\n \"x86\": \"x86\", \"x64\": \"AMD64\", \"arm\": \"not supported\", \"arm64\": \"not supported\"}\r\n\r\n # check to see if full path already configured\r\n if shell_environment.GetEnvironment().get_shell_var(\"VS2017_PREFIX\") != None:\r\n self.Logger.info(\"VS2017_PREFIX is already set.\")\r\n\r\n else:\r\n install_path = self._get_vs_install_path(\r\n \"VS2017\".lower(), \"VS150INSTALLPATH\")\r\n vc_ver = self._get_vc_version(install_path, \"VS150TOOLVER\")\r\n\r\n if install_path is None or vc_ver is None:\r\n self.Logger.error(\r\n \"Failed to configure environment for VS2017\")\r\n return -1\r\n\r\n version_aggregator.GetVersionAggregator().ReportVersion(\r\n \"Visual Studio Install Path\", install_path, version_aggregator.VersionTypes.INFO)\r\n version_aggregator.GetVersionAggregator().ReportVersion(\r\n \"VC Version\", vc_ver, version_aggregator.VersionTypes.TOOL)\r\n\r\n # make VS2017_PREFIX to align with tools_def.txt\r\n prefix = os.path.join(install_path, \"VC\",\r\n \"Tools\", \"MSVC\", vc_ver)\r\n prefix = prefix + os.path.sep\r\n shell_environment.GetEnvironment().set_shell_var(\"VS2017_PREFIX\", prefix)\r\n shell_environment.GetEnvironment().set_shell_var(\"VS2017_HOST\", HostType)\r\n\r\n shell_env = shell_environment.GetEnvironment()\r\n # Use the tools lib to determine the correct values for the vars that interest us.\r\n vs_vars = locate_tools.QueryVcVariables(\r\n interesting_keys, VC_HOST_ARCH_TRANSLATOR[HostType], vs_version=\"vs2017\")\r\n for (k, v) in vs_vars.items():\r\n shell_env.set_shell_var(k, v)\r\n\r\n # now confirm it exists\r\n if not os.path.exists(shell_environment.GetEnvironment().get_shell_var(\"VS2017_PREFIX\")):\r\n self.Logger.error(\"Path for VS2017 toolchain is invalid\")\r\n return -2\r\n\r\n #\r\n # VS2019 - Follow VS2019 where there is potential for many versions of the tools.\r\n # If a specific version is required then the user must set both env variables:\r\n # VS160INSTALLPATH: base install path on system to VC install dir. Here you will find the VC folder, etc\r\n # VS160TOOLVER: version number for the VC compiler tools\r\n # VS2019_PREFIX: path to MSVC compiler folder with trailing slash (can be used instead of two vars above)\r\n # VS2017_HOST: set the host architecture to use for host tools, and host libs, etc\r\n elif thebuilder.env.GetValue(\"TOOL_CHAIN_TAG\") == \"VS2019\":\r\n\r\n # check to see if host is configured\r\n # HostType for VS2019 should be (defined in tools_def):\r\n # x86 == 32bit Intel\r\n # x64 == 64bit Intel\r\n # arm == 32bit Arm\r\n # arm64 == 64bit Arm\r\n #\r\n HostType = shell_environment.GetEnvironment().get_shell_var(\"VS2019_HOST\")\r\n if HostType is not None:\r\n HostType = HostType.lower()\r\n self.Logger.info(\r\n f\"HOST TYPE defined by environment. Host Type is {HostType}\")\r\n else:\r\n HostInfo = GetHostInfo()\r\n if HostInfo.arch == \"x86\":\r\n if HostInfo.bit == \"32\":\r\n HostType = \"x86\"\r\n elif HostInfo.bit == \"64\":\r\n HostType = \"x64\"\r\n else:\r\n raise NotImplementedError()\r\n\r\n # VS2019_HOST options are not exactly the same as QueryVcVariables. This translates.\r\n VC_HOST_ARCH_TRANSLATOR = {\r\n \"x86\": \"x86\", \"x64\": \"AMD64\", \"arm\": \"not supported\", \"arm64\": \"not supported\"}\r\n\r\n # check to see if full path already configured\r\n if shell_environment.GetEnvironment().get_shell_var(\"VS2019_PREFIX\") != None:\r\n self.Logger.info(\"VS2019_PREFIX is already set.\")\r\n\r\n else:\r\n install_path = self._get_vs_install_path(\r\n \"VS2019\".lower(), \"VS160INSTALLPATH\")\r\n vc_ver = self._get_vc_version(install_path, \"VS160TOOLVER\")\r\n\r\n if install_path is None or vc_ver is None:\r\n self.Logger.error(\r\n \"Failed to configure environment for VS2019\")\r\n return -1\r\n\r\n version_aggregator.GetVersionAggregator().ReportVersion(\r\n \"Visual Studio Install Path\", install_path, version_aggregator.VersionTypes.INFO)\r\n version_aggregator.GetVersionAggregator().ReportVersion(\r\n \"VC Version\", vc_ver, version_aggregator.VersionTypes.TOOL)\r\n\r\n # make VS2019_PREFIX to align with tools_def.txt\r\n prefix = os.path.join(install_path, \"VC\",\r\n \"Tools\", \"MSVC\", vc_ver)\r\n prefix = prefix + os.path.sep\r\n shell_environment.GetEnvironment().set_shell_var(\"VS2019_PREFIX\", prefix)\r\n shell_environment.GetEnvironment().set_shell_var(\"VS2019_HOST\", HostType)\r\n\r\n shell_env = shell_environment.GetEnvironment()\r\n # Use the tools lib to determine the correct values for the vars that interest us.\r\n vs_vars = locate_tools.QueryVcVariables(\r\n interesting_keys, VC_HOST_ARCH_TRANSLATOR[HostType], vs_version=\"vs2019\")\r\n for (k, v) in vs_vars.items():\r\n shell_env.set_shell_var(k, v)\r\n\r\n # now confirm it exists\r\n if not os.path.exists(shell_environment.GetEnvironment().get_shell_var(\"VS2019_PREFIX\")):\r\n self.Logger.error(\"Path for VS2019 toolchain is invalid\")\r\n return -2\r\n\r\n return 0\r\n\r\n def _get_vs_install_path(self, vs_version, varname):\r\n # check if already specified\r\n path = None\r\n if varname is not None:\r\n path = shell_environment.GetEnvironment().get_shell_var(varname)\r\n\r\n if(path is None):\r\n # Not specified...find latest\r\n try:\r\n path = FindWithVsWhere(vs_version=vs_version)\r\n except (EnvironmentError, ValueError, RuntimeError) as e:\r\n self.Logger.error(str(e))\r\n return None\r\n\r\n if path is not None and os.path.exists(path):\r\n self.Logger.debug(\"Found VS instance for %s\", vs_version)\r\n else:\r\n self.Logger.error(\r\n f\"VsWhere successfully executed, but could not find VS instance for {vs_version}.\")\r\n return path\r\n\r\n def _get_vc_version(self, path, varname):\r\n # check if already specified\r\n vc_ver = shell_environment.GetEnvironment().get_shell_var(varname)\r\n if (path is None):\r\n self.Logger.critical(\r\n \"Failed to find Visual Studio tools. Might need to check for VS install\")\r\n return vc_ver\r\n if(vc_ver is None):\r\n # Not specified...find latest\r\n p2 = os.path.join(path, \"VC\", \"Tools\", \"MSVC\")\r\n if not os.path.isdir(p2):\r\n self.Logger.critical(\r\n \"Failed to find VC tools. Might need to check for VS install\")\r\n return vc_ver\r\n vc_ver = os.listdir(p2)[-1].strip() # get last in list\r\n self.Logger.debug(\"Found VC Tool version is %s\" % vc_ver)\r\n return vc_ver\r\n", "repo_name": "tianocore/edk2", "sub_path": "BaseTools/Plugin/WindowsVsToolChain/WindowsVsToolChain.py", "file_name": "WindowsVsToolChain.py", "file_ext": "py", "file_size_in_byte": 10621, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 3931, "dataset": "github-code", "pt": "12", "api": [{"api_name": "edk2toolext.environment.plugintypes.uefi_build_plugin.IUefiBuildPlugin", "line_number": 19, "usage_type": "name"}, {"api_name": "logging.getLogger", "line_number": 25, "usage_type": "call"}, {"api_name": "edk2toolext.environment.shell_environment.GetEnvironment", "line_number": 46, "usage_type": "call"}, {"api_name": "edk2toolext.environment.shell_environment", "line_number": 46, "usage_type": "name"}, {"api_name": "edk2toollib.utility_functions.GetHostInfo", "line_number": 52, "usage_type": "call"}, {"api_name": "edk2toolext.environment.shell_environment.GetEnvironment", "line_number": 66, "usage_type": "call"}, {"api_name": "edk2toolext.environment.shell_environment", "line_number": 66, "usage_type": "name"}, {"api_name": "edk2toolext.environment.version_aggregator.GetVersionAggregator", "line_number": 79, "usage_type": "call"}, {"api_name": "edk2toolext.environment.version_aggregator", "line_number": 79, "usage_type": "name"}, {"api_name": "edk2toolext.environment.version_aggregator.VersionTypes", "line_number": 80, "usage_type": "attribute"}, {"api_name": "edk2toolext.environment.version_aggregator", "line_number": 80, "usage_type": "name"}, {"api_name": "edk2toolext.environment.version_aggregator.GetVersionAggregator", "line_number": 81, "usage_type": "call"}, {"api_name": "edk2toolext.environment.version_aggregator", "line_number": 81, "usage_type": "name"}, {"api_name": "edk2toolext.environment.version_aggregator.VersionTypes", "line_number": 82, "usage_type": "attribute"}, {"api_name": "edk2toolext.environment.version_aggregator", "line_number": 82, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 85, "usage_type": "call"}, {"api_name": "os.path", "line_number": 85, "usage_type": "attribute"}, {"api_name": "os.path", "line_number": 87, "usage_type": "attribute"}, {"api_name": "edk2toolext.environment.shell_environment.GetEnvironment", "line_number": 88, "usage_type": "call"}, {"api_name": "edk2toolext.environment.shell_environment", "line_number": 88, "usage_type": "name"}, {"api_name": "edk2toolext.environment.shell_environment.GetEnvironment", "line_number": 89, "usage_type": "call"}, {"api_name": "edk2toolext.environment.shell_environment", "line_number": 89, "usage_type": "name"}, {"api_name": "edk2toolext.environment.shell_environment.GetEnvironment", "line_number": 91, "usage_type": "call"}, {"api_name": "edk2toolext.environment.shell_environment", "line_number": 91, "usage_type": "name"}, {"api_name": "edk2toollib.windows.locate_tools.QueryVcVariables", "line_number": 93, "usage_type": "call"}, {"api_name": "edk2toollib.windows.locate_tools", "line_number": 93, "usage_type": "name"}, {"api_name": "os.path.exists", "line_number": 99, "usage_type": "call"}, {"api_name": "os.path", "line_number": 99, "usage_type": "attribute"}, {"api_name": "edk2toolext.environment.shell_environment.GetEnvironment", "line_number": 99, "usage_type": "call"}, {"api_name": "edk2toolext.environment.shell_environment", "line_number": 99, "usage_type": "name"}, {"api_name": "edk2toolext.environment.shell_environment.GetEnvironment", "line_number": 119, "usage_type": "call"}, {"api_name": "edk2toolext.environment.shell_environment", "line_number": 119, "usage_type": "name"}, {"api_name": "edk2toollib.utility_functions.GetHostInfo", "line_number": 125, "usage_type": "call"}, {"api_name": "edk2toolext.environment.shell_environment.GetEnvironment", "line_number": 139, "usage_type": "call"}, {"api_name": "edk2toolext.environment.shell_environment", "line_number": 139, "usage_type": "name"}, {"api_name": "edk2toolext.environment.version_aggregator.GetVersionAggregator", "line_number": 152, "usage_type": "call"}, {"api_name": "edk2toolext.environment.version_aggregator", "line_number": 152, "usage_type": "name"}, {"api_name": "edk2toolext.environment.version_aggregator.VersionTypes", "line_number": 153, "usage_type": "attribute"}, {"api_name": "edk2toolext.environment.version_aggregator", "line_number": 153, "usage_type": "name"}, {"api_name": "edk2toolext.environment.version_aggregator.GetVersionAggregator", "line_number": 154, "usage_type": "call"}, {"api_name": "edk2toolext.environment.version_aggregator", "line_number": 154, "usage_type": "name"}, {"api_name": "edk2toolext.environment.version_aggregator.VersionTypes", "line_number": 155, "usage_type": "attribute"}, {"api_name": "edk2toolext.environment.version_aggregator", "line_number": 155, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 158, "usage_type": "call"}, {"api_name": "os.path", "line_number": 158, "usage_type": "attribute"}, {"api_name": "os.path", "line_number": 160, "usage_type": "attribute"}, {"api_name": "edk2toolext.environment.shell_environment.GetEnvironment", "line_number": 161, "usage_type": "call"}, {"api_name": "edk2toolext.environment.shell_environment", "line_number": 161, "usage_type": "name"}, {"api_name": "edk2toolext.environment.shell_environment.GetEnvironment", "line_number": 162, "usage_type": "call"}, {"api_name": "edk2toolext.environment.shell_environment", "line_number": 162, "usage_type": "name"}, {"api_name": "edk2toolext.environment.shell_environment.GetEnvironment", "line_number": 164, "usage_type": "call"}, {"api_name": "edk2toolext.environment.shell_environment", "line_number": 164, "usage_type": "name"}, {"api_name": "edk2toollib.windows.locate_tools.QueryVcVariables", "line_number": 166, "usage_type": "call"}, {"api_name": "edk2toollib.windows.locate_tools", "line_number": 166, "usage_type": "name"}, {"api_name": "os.path.exists", "line_number": 172, "usage_type": "call"}, {"api_name": "os.path", "line_number": 172, "usage_type": "attribute"}, {"api_name": "edk2toolext.environment.shell_environment.GetEnvironment", "line_number": 172, "usage_type": "call"}, {"api_name": "edk2toolext.environment.shell_environment", "line_number": 172, "usage_type": "name"}, {"api_name": "edk2toolext.environment.shell_environment.GetEnvironment", "line_number": 182, "usage_type": "call"}, {"api_name": "edk2toolext.environment.shell_environment", "line_number": 182, "usage_type": "name"}, {"api_name": "edk2toollib.windows.locate_tools.FindWithVsWhere", "line_number": 187, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 192, "usage_type": "call"}, {"api_name": "os.path", "line_number": 192, "usage_type": "attribute"}, {"api_name": "edk2toolext.environment.shell_environment.GetEnvironment", "line_number": 201, "usage_type": "call"}, {"api_name": "edk2toolext.environment.shell_environment", "line_number": 201, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 208, "usage_type": "call"}, {"api_name": "os.path", "line_number": 208, "usage_type": "attribute"}, {"api_name": "os.path.isdir", "line_number": 209, "usage_type": "call"}, {"api_name": "os.path", "line_number": 209, "usage_type": "attribute"}, {"api_name": "os.listdir", "line_number": 213, "usage_type": "call"}]} +{"seq_id": "41129978990", "text": "#!/usr/bin/env python\n\n# - \"curses\" menu based on https://stackoverflow.com/a/14205494\n\nimport curses,sys,time\nfrom bluetool import Bluetooth\nfrom curses import panel\n\nclass Menu(object):\n def __init__(self, items, stdscreen):\n self.window = stdscreen.subwin(0, 0)\n self.window.keypad(1)\n self.panel = panel.new_panel(self.window)\n self.panel.hide()\n panel.update_panels()\n\n self.position = 0\n self.items = items\n self.items.append((\"Back / Exit\", \"exit\"))\n\n def navigate(self, n):\n self.position += n\n if self.position < 0:\n self.position = 0\n elif self.position >= len(self.items):\n self.position = len(self.items) - 1\n\n def display(self):\n self.panel.top()\n self.panel.show()\n self.window.clear()\n\n while True:\n self.window.refresh()\n curses.doupdate()\n for index, item in enumerate(self.items):\n if index == self.position:\n mode = curses.A_REVERSE\n else:\n mode = curses.A_NORMAL\n\n msg = \"%d. %s\" % (index, item[0])\n self.window.addstr(1 + index, 1, msg, mode)\n\n key = self.window.getch()\n\n if key in [curses.KEY_ENTER, ord(\"\\n\")]:\n if self.position == len(self.items) - 1:\n break\n else:\n self.items[self.position][1]()\n\n elif key == curses.KEY_UP:\n self.navigate(-1)\n\n elif key == curses.KEY_DOWN:\n self.navigate(1)\n\n self.window.clear()\n self.panel.hide()\n panel.update_panels()\n curses.doupdate()\n\nclass MyApp(object):\n def __init__(self, stdscreen):\n self.scan_timeout = 90\n self.bt = Bluetooth()\n self.bt.start_scanning(self.scan_timeout)\n\n self.screen = stdscreen\n curses.curs_set(0)\n mainMenu = [\n ('Rescan devices\\t\\t(scans for {} seconds in background, system bus will be processed every 10 seconds)'.format(self.scan_timeout), self.rescan_devices),\n ('Trust controller\\t\\t(shows only untrusted pairable controllers)', self.trust_controller_menu),\n ('Pair controller\\t\\t(shows only unpaired pairable controllers)', self.pair_controller_menu),\n ('Connect controller\\t\\t(shows only paired and trusted connectable controllers)', self.connect_device_menu),\n ('Disconnect controller\\t(shows only connected controllers)', self.disconnect_device_menu),\n ('Remove controller\\t\\t(shows only trusted, paired OR connected controllers)', self.remove_device_menu),\n ]\n self.make_menu(mainMenu)\n self.menu.display()\n\n def make_menu(self, menulist):\n self.menu = Menu(menulist, self.screen)\n\n def trust_controller_menu(self):\n properties = [\n 'Icon',\n 'RSSI',\n 'Trusted',\n ]\n menu = []\n for device in self.bt.get_available_devices():\n mac_address = device['mac_address']\n for property in properties:\n device[property] = self.bt.get_device_property(mac_address,property)\n if ((device['Icon'] == 'input-gaming') and (device['Trusted'] == 0)):\n menu.append(('{}\\t{}\\tRSSI: {}'.format(device['mac_address'],device['name'],device['RSSI']),self.trust_controller))\n self.make_menu(menu)\n self.menu.display()\n\n def trust_controller(self):\n mac = self.get_selected_device()[0]\n self.bt.trust(mac)\n if self.bt.get_device_property(mac,'Trusted') == 1:\n self.menu.items[self.menu.position] = ('MAC {} ({}) trusted!\\n'.format(mac,self.get_selected_device()[1]),self.navigate_to_back)\n else:\n self.menu.items[self.menu.position] = ('Error trusting MAC {} ({})!\\n'.format(mac,self.get_selected_device()[1]),self.navigate_to_back)\n\n def pair_controller_menu(self):\n properties = [\n 'Icon',\n 'Paired',\n 'RSSI',\n 'Trusted',\n ]\n menu = []\n for device in self.bt.get_devices_to_pair():\n mac_address = device['mac_address']\n for property in properties:\n device[property] = self.bt.get_device_property(mac_address,property)\n if ((device['Icon'] == 'input-gaming') and (device['Trusted'] == 1) and device['Paired'] == 0):\n menu.append(('{}\\t{}\\tRSSI: {}'.format(device['mac_address'],device['name'],device['RSSI']),self.pair_controller))\n self.make_menu(menu)\n self.menu.display()\n\n def pair_controller(self):\n mac = self.get_selected_device()[0]\n self.bt.pair(mac)\n if self.bt.get_device_property(mac,'Paired') == 1:\n self.menu.items[self.menu.position] = ('MAC {} ({}) paired!\\n'.format(mac,self.get_selected_device()[1]),self.navigate_to_back)\n else:\n self.menu.items[self.menu.position] = ('Error paring MAC {} ({})!\\n'.format(mac,self.get_selected_device()[1]),self.navigate_to_back) \n\n def connect_device_menu(self):\n properties = [\n 'Icon',\n 'RSSI',\n 'Connected',\n 'Paired',\n 'Trusted',\n ]\n menu = []\n for device in self.bt.get_available_devices():\n mac_address = device['mac_address']\n for property in properties:\n device[property] = self.bt.get_device_property(mac_address,property)\n if ((device['Icon'] == 'input-gaming') and (device['Paired'] == 1) and (device['Trusted'] == 1) and (device['Connected'] == 0)):\n menu.append(('{}\\t{}\\tRSSI: {}'.format(device['mac_address'],device['name'],device['RSSI']),self.connect_device))\n self.make_menu(menu)\n self.menu.display()\n\n def connect_device(self):\n mac = self.get_selected_device()[0]\n self.bt.connect(mac)\n if self.bt.get_device_property(mac,'Connected') == 1:\n self.menu.items[self.menu.position] = ('MAC {} ({}) connected!\\n'.format(mac,self.get_selected_device()[1]),self.navigate_to_back)\n else:\n self.menu.items[self.menu.position] = ('Error connecting MAC {} ({})!\\n'.format(mac,self.get_selected_device()[1]),self.navigate_to_back) \n\n\n def disconnect_device_menu(self):\n properties = [\n 'Icon',\n 'Connected',\n 'RSSI',\n ]\n menu = []\n for device in self.bt.get_connected_devices():\n mac_address = device['mac_address']\n for property in properties:\n device[property] = self.bt.get_device_property(mac_address,property)\n if ((device['Icon'] == 'input-gaming') and (device['Connected'] == 1)):\n menu.append(('{}\\t{}\\tRSSI: {}'.format(device['mac_address'],device['name'],device['RSSI']),self.disconnect_device))\n self.make_menu(menu)\n self.menu.display()\n\n def disconnect_device(self):\n mac = self.get_selected_device()[0]\n self.bt.disconnect(mac)\n if self.bt.get_device_property(mac,'Connected') == 0:\n self.menu.items[self.menu.position] = ('MAC {} ({}) disconnected!\\n'.format(mac,self.get_selected_device()[1]),self.navigate_to_back)\n else:\n self.menu.items[self.menu.position] = ('Error disconnecting MAC {} ({})!\\n'.format(mac,self.get_selected_device()[1]),self.navigate_to_back) \n\n def remove_device_menu(self):\n properties = [\n 'Icon',\n 'Paired',\n 'Trusted',\n 'RSSI',\n 'Blocked',\n 'Connected',\n ]\n menu = []\n for device in self.bt.get_available_devices():\n mac_address = device['mac_address']\n for property in properties:\n device[property] = self.bt.get_device_property(mac_address,property)\n if ((device['Icon'] == 'input-gaming') and ((device['Paired'] == 1) or (device['Trusted'] == 1) or (device['Blocked'] == 1))):\n menu.append(('{}\\t{}\\tRSSI: {}\\tTrusted: {}\\tPaired: {}\\tConnected: {}\\tBlocked: {}'.format(device['mac_address'],device['name'],device['RSSI'],device['Trusted'],device['Paired'],device['Connected'],device['Blocked']),self.remove_device))\n self.make_menu(menu)\n self.menu.display()\n\n def remove_device(self):\n mac = self.get_selected_device()[0]\n self.bt.remove(mac)\n self.menu.items[self.menu.position] = ('MAC {} ({}) removed!\\n'.format(mac,self.get_selected_device()[1]),self.navigate_to_back)\n\n def rescan_devices(self):\n self.menu.window.addstr(9, 1, 'Scanning for device for {} seconds in background now, please refresh views...'.format(self.scan_timeout), curses.A_NORMAL)\n self.bt.start_scanning(self.scan_timeout)\n\n def get_selected_device(self):\n return(self.menu.items[self.menu.position][0].split('\\t'))\n\n def navigate_to_back(self):\n self.menu.navigate(len(self.menu.items) -1)\n\nif __name__ == \"__main__\":\n if (len(sys.argv) == 1):\n bt = Bluetooth()\n print('Scanning for available devices for 15 seconds, please wait...')\n bt.start_scanning(15)\n time.sleep(15)\n print('Getting pairable devices, please wait...')\n devices = bt.get_devices_to_pair()\n print(devices)\n for device in devices:\n mac = device['mac_address']\n name = device['name']\n print('Found MAC: {}\\tName: {}'.format(mac,name))\n if bt.get_device_property(mac,'Icon') == 'input-gaming':\n print('Found controller {} Name: {}, trusting...'.format(mac,name))\n bt.trust(mac)\n if bt.get_device_property(mac,'Trusted') == 1:\n print('Trusted {}, quick pause, then pairing...'.format(name))\n time.sleep(5)\n bt.pair(mac)\n if bt.get_device_property(mac,'Paired') == 1:\n print('Paired {}, quick pause, then connecting...'.format(name))\n time.sleep(5)\n bt.connect(mac)\n if bt.get_device_property(mac,'Connected') == 1:\n print('Connected {}, exiting...'.format(name))\n else:\n curses.wrapper(MyApp)\n", "repo_name": "AmberELEC/AmberELEC", "sub_path": "packages/amberelec/config/distribution/scriptmodules/supplementary/bluetoothcontroller.py", "file_name": "bluetoothcontroller.py", "file_ext": "py", "file_size_in_byte": 10426, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 971, "dataset": "github-code", "pt": "12", "api": [{"api_name": "curses.panel.new_panel", "line_number": 13, "usage_type": "call"}, {"api_name": "curses.panel", "line_number": 13, "usage_type": "name"}, {"api_name": "curses.panel.update_panels", "line_number": 15, "usage_type": "call"}, {"api_name": "curses.panel", "line_number": 15, "usage_type": "name"}, {"api_name": "curses.doupdate", "line_number": 35, "usage_type": "call"}, {"api_name": "curses.A_REVERSE", "line_number": 38, "usage_type": "attribute"}, {"api_name": "curses.A_NORMAL", "line_number": 40, "usage_type": "attribute"}, {"api_name": "curses.KEY_ENTER", "line_number": 47, "usage_type": "attribute"}, {"api_name": "curses.KEY_UP", "line_number": 53, "usage_type": "attribute"}, {"api_name": "curses.KEY_DOWN", "line_number": 56, "usage_type": "attribute"}, {"api_name": "curses.panel.update_panels", "line_number": 61, "usage_type": "call"}, {"api_name": "curses.panel", "line_number": 61, "usage_type": "name"}, {"api_name": "curses.doupdate", "line_number": 62, "usage_type": "call"}, {"api_name": "bluetool.Bluetooth", "line_number": 67, "usage_type": "call"}, {"api_name": "curses.curs_set", "line_number": 71, "usage_type": "call"}, {"api_name": "curses.A_NORMAL", "line_number": 211, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 221, "usage_type": "attribute"}, {"api_name": "bluetool.Bluetooth", "line_number": 222, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 225, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 238, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 242, "usage_type": "call"}, {"api_name": "curses.wrapper", "line_number": 247, "usage_type": "call"}]} +{"seq_id": "70944001302", "text": "from __future__ import annotations\r\nfrom typing import List, Dict, Any, Union\r\nfrom collections import deque\r\n\r\nJSONDict = Dict[str, Any]\r\n\r\nclass Node:\r\n \"\"\"\r\n Representation of one step into a JSON Tree\r\n \"\"\"\r\n\r\n def __init__(self, json_data: Any, tree: 'JsonTree', linked_list: deque = deque([]), prior_keys: List[Union[str, int]] = []) -> None:\r\n self.json_data = json_data\r\n self.tree = tree\r\n self.linked_list = linked_list\r\n self.prior_keys = prior_keys\r\n\r\n self.dtype = type(self.json_data)\r\n\r\n self.nodes = []\r\n\r\n #If the node is a leaf then it has no edges\r\n if self.is_leaf:\r\n self.json_data = {prior_keys[-1]: self.json_data}\r\n self.tree.leaf_nodes.append(self)\r\n\r\n else:\r\n self.get_edges()\r\n\r\n @property\r\n def is_leaf(self):\r\n \"\"\"\r\n If the dtype of self.json_data is not a dict or a list then it must be\r\n a leaf node\r\n \"\"\"\r\n return self.dtype is not list and self.dtype is not dict\r\n\r\n def get_edges(self):\r\n \"\"\"\r\n Get all edges connected to current Node\r\n \"\"\"\r\n iter_arr = zip(range(len(self.json_data)),\r\n self.json_data) if self.dtype is list else self.json_data.items()\r\n\r\n for key, value in iter_arr:\r\n next_linked_list = self.linked_list + deque([self])\r\n next_key = self.prior_keys + [key]\r\n node = Node(value, self.tree, next_linked_list, next_key)\r\n self.nodes.append(node)\r\n\r\n def __repr__(self):\r\n return str(self.json_data)\r\n", "repo_name": "chris-greening/json-tree-flattener", "sub_path": "python3/node.py", "file_name": "node.py", "file_ext": "py", "file_size_in_byte": 1614, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "12", "api": [{"api_name": "typing.Dict", "line_number": 5, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 5, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 12, "usage_type": "name"}, {"api_name": "collections.deque", "line_number": 12, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 12, "usage_type": "name"}, {"api_name": "typing.Union", "line_number": 12, "usage_type": "name"}, {"api_name": "collections.deque", "line_number": 46, "usage_type": "call"}]} +{"seq_id": "5743793128", "text": "import tkinter as tk\r\nfrom tkinter import messagebox\r\nfrom predict import predictor\r\nfrom tkcalendar import Calendar, DateEntry\r\nfrom datetime import datetime\r\nfrom plot_load import plot_load\r\nimport time\r\nimport os\r\nimport sys\r\nimport subprocess\r\nimport pickle\r\n\r\ncurrent_dir = os.path.dirname(__file__)\r\n\r\nclass GUI(tk.Tk):\r\n PRIMARY_COLOR = \"#fff\"\r\n SECONDARY_COLOR = \"#BDBDBD\"\r\n BUTTON_COLOR = \"#FF5733\"\r\n NOW = datetime.now()\r\n DAY= NOW.day\r\n MONTH = NOW.month\r\n YEAR = NOW.year\r\n # PREDICTOR = predictor()\r\n def __init__(self):\r\n self.ignore_warning = False\r\n self.model = os.path.join(current_dir,\"model/m.h5\")\r\n self.graph_process = None\r\n self.predictor_process = None \r\n tk.Tk.__init__(self)\r\n self.resizable(False,False)\r\n self.title(\"Load Predictor\")\r\n num_biomass = 3\r\n num_biogas = 2\r\n num_solar = 2\r\n biomass_pv = 12.35\r\n biogas_pv = 14\r\n # Try to load previous configuration\r\n try:\r\n config_path = os.path.join(current_dir,\"config.pickle\")\r\n with open(config_path,\"rb\") as r:\r\n config = pickle.load(r)\r\n num_biomass = config[\"num_biomass\"]\r\n num_biogas = config[\"num_biogas\"]\r\n num_solar = config[\"num_solar\"]\r\n biomass_pv = config[\"biomass_pv\"]\r\n biogas_pv = config[\"biogas_pv\"]\r\n \r\n except Exception as err:\r\n print(err)\r\n \r\n \r\n self.options = {\r\n \"num_biomass\":tk.IntVar(self,value=num_biomass),\r\n \"num_biogas\":tk.IntVar(self,value=num_biogas),\r\n \"num_solar\":tk.IntVar(self,value=num_solar),\r\n \"biomass_pv\":tk.DoubleVar(self,value=biomass_pv),\r\n \"biogas_pv\":tk.DoubleVar(self,value=biogas_pv),\r\n \"use_gpu\":tk.IntVar(self,value=0),\r\n \"scatter\":tk.IntVar(self,value=0),\r\n \"fill\":tk.IntVar(self,value=1)\r\n }\r\n self.mainframe = tk.Frame(self,bg=GUI.PRIMARY_COLOR)\r\n self.mainframe.config(width=300,height=250)\r\n self.mainframe.pack()\r\n self.add_option() # plot option frame\r\n self.add_calendar() # add calendar \r\n self.add_generator_option() # add generator option frame\r\n self.add_control_button() # add start prediction button\r\n def on_close():\r\n try:\r\n subprocess.Popen.kill(self.graph_process)\r\n except:\r\n pass\r\n try:\r\n self.stop_prediction()\r\n except:\r\n pass\r\n self.destroy() \r\n self.protocol(\"WM_DELETE_WINDOW\", on_close)\r\n self.mainloop()\r\n \r\n def power_error_callback(self,title,message): # handle power failure event\r\n if not self.ignore_warning:\r\n \"\"\"\r\n Yes -> True\r\n No -> False\r\n Cancel -> None\r\n \"\"\" \r\n value = messagebox.askokcancel(title,message)\r\n if value == True:\r\n self.pred.should_terminate = True # force to terminate \r\n try:\r\n subprocess.Popen.kill(self.graph_process)\r\n except:\r\n pass\r\n \r\n def add_option(self):\r\n self.option_frame = tk.Frame(self.mainframe,bg=GUI.PRIMARY_COLOR)\r\n self.option_frame.grid(row=2,column =0,sticky=\"nwe\")\r\n \r\n \r\n tk.Label(self.option_frame,text=\" TF Option\",bg=GUI.PRIMARY_COLOR).pack(anchor=\"nw\")\r\n tk.Checkbutton(self.option_frame, \r\n text=\"Use GPU\",\r\n variable=self.options[\"use_gpu\"],\r\n bg=GUI.PRIMARY_COLOR).pack(anchor=\"nw\")\r\n \r\n tk.Label(self.option_frame,text=\" Plot Option\",bg=GUI.PRIMARY_COLOR).pack(anchor=\"nw\")\r\n tk.Checkbutton(self.option_frame, \r\n text=\"Scatter Plot\",\r\n variable=self.options[\"scatter\"],\r\n # command=lambda : toggle(\"scatter\") ,\r\n bg=GUI.PRIMARY_COLOR).pack(anchor=\"nw\")\r\n\r\n tk.Checkbutton(self.option_frame, \r\n text=\"Fill Plot\",\r\n variable=self.options[\"fill\"],\r\n # command=lambda : toggle(\"scatter\") ,\r\n bg=GUI.PRIMARY_COLOR).pack(anchor=\"nw\")\r\n tk.Label(self.option_frame,text=\" Predict Interval (ms)\",bg=GUI.PRIMARY_COLOR).pack(anchor=\"w\")\r\n self.interval_scaler = tk.Scale(self.option_frame, from_=0, to=2500, orient=tk.HORIZONTAL,bg=GUI.PRIMARY_COLOR)\r\n self.interval_scaler.pack()\r\n self.interval_scaler.set(250) # set default value\r\n \r\n\r\n def add_calendar(self):\r\n tk.Label(self.mainframe,text=\"Select Prediction Date\",bg=GUI.PRIMARY_COLOR).grid(row=1,column=1,columnspan=2,sticky=\"W\")\r\n self.calendar_frame = tk.Frame(self.mainframe,bg=GUI.SECONDARY_COLOR)\r\n self.calendar_frame.grid(row=2,column=1)\r\n self.calendar = Calendar(self.calendar_frame,\r\n font=\"Arial 10\", selectmode='day',\r\n date_pattern=\"y-mm-dd\",\r\n year=GUI.YEAR, month=GUI.MONTH, day=GUI.DAY)\r\n self.calendar.pack(fill=\"both\", expand=True)\r\n \r\n def add_generator_option(self):\r\n self.gen_option_frame = tk.Frame(self.mainframe,bg=GUI.PRIMARY_COLOR)\r\n self.gen_option_frame.grid(row=3,column=1)\r\n # ADD SCALERS COLUMN 0\r\n tk.Label(self.gen_option_frame,text=\"Biomass Generator\",bg=GUI.PRIMARY_COLOR).grid(row=1,column=0)\r\n tk.Scale(self.gen_option_frame, from_=0, to=15, orient=tk.HORIZONTAL,variable=self.options[\"num_biomass\"],bg=GUI.PRIMARY_COLOR).grid(row=2,column=0)\r\n \r\n tk.Label(self.gen_option_frame,text=\"Biogas Generator\",bg=GUI.PRIMARY_COLOR).grid(row=3,column=0)\r\n tk.Scale(self.gen_option_frame, from_=0, to=15, orient=tk.HORIZONTAL,variable=self.options[\"num_biogas\"],bg=GUI.PRIMARY_COLOR).grid(row=4,column=0)\r\n \r\n tk.Label(self.gen_option_frame,text=\"Solar Cell Generator\",bg=GUI.PRIMARY_COLOR).grid(row=5,column=0)\r\n tk.Scale(self.gen_option_frame, from_=0, to=15, orient=tk.HORIZONTAL,variable=self.options[\"num_solar\"],bg=GUI.PRIMARY_COLOR).grid(row=6,column=0)\r\n \r\n \r\n # ADD SCALERS COLUMN 1\r\n tk.Label(self.gen_option_frame,text=\"Biomass Gen. Power (kWH)\",bg=GUI.PRIMARY_COLOR).grid(row=1,column=1)\r\n tk.Scale(self.gen_option_frame, from_=0, to=100,digits=4,resolution = 0.01, orient=tk.HORIZONTAL,variable=self.options[\"biomass_pv\"],bg=GUI.PRIMARY_COLOR).grid(row=2,column=1)\r\n \r\n tk.Label(self.gen_option_frame,text=\"Biogas Gen. Power (kWH)\",bg=GUI.PRIMARY_COLOR).grid(row=3,column=1)\r\n tk.Scale(self.gen_option_frame, from_=0, to=100,digits=4,resolution = 0.01, orient=tk.HORIZONTAL,variable=self.options[\"biogas_pv\"],bg=GUI.PRIMARY_COLOR).grid(row=4,column=1)\r\n \r\n \r\n def stop_prediction(self):\r\n try:\r\n self.pred.should_terminate = True\r\n except:\r\n pass\r\n try:\r\n subprocess.Popen.kill(self.graph_process)\r\n except:\r\n pass\r\n \r\n \r\n def save_config(self):\r\n config = {\r\n \"num_biomass\": self.options[\"num_biomass\"].get(),\r\n \"num_biogas\":self.options[\"num_biogas\"].get(),\r\n \"num_solar\":self.options[\"num_solar\"].get(),\r\n \"biomass_pv\":self.options[\"biomass_pv\"].get(),\r\n \"biogas_pv\":self.options[\"biogas_pv\"].get()\r\n }\r\n with open('config.pickle', 'wb') as f:\r\n pickle.dump(config, f)\r\n messagebox.showinfo(\"Save\",\"Save Configuration Succeeded!\")\r\n \r\n\r\n def add_control_button(self):\r\n self.control_button_frame = tk.Frame(self.mainframe,bg=GUI.PRIMARY_COLOR)\r\n self.control_button_frame.grid(row=2,column=2,sticky=\"NS\")\r\n \r\n self.start_button = tk.Button(self.control_button_frame,text=\"Start Predicting\")\r\n self.start_button.config(command=self.predict)\r\n self.start_button.grid(row=0,column=0,sticky='nesw')\r\n \r\n self.stop_button = tk.Button(self.control_button_frame,text=\"Stop Predicting\")\r\n self.stop_button.config(command=self.stop_prediction)\r\n self.stop_button.grid(row=1,column=0,sticky='nesw')\r\n \r\n self.save_config_button = tk.Button(self.control_button_frame,text=\"Save\\nConfiguration\")\r\n self.save_config_button.config(command=self.save_config)\r\n self.save_config_button.grid(row=2,column=0,sticky='nesw')\r\n \r\n \r\n self.control_button_frame.grid_columnconfigure(0, weight=1, uniform=\"group1\")\r\n # self.control_button_frame.grid_columnconfigure(1, weight=1, uniform=\"group1\")\r\n self.control_button_frame.grid_rowconfigure(0, weight=1)\r\n self.control_button_frame.grid_rowconfigure(1, weight=1)\r\n self.control_button_frame.grid_rowconfigure(2, weight=1)\r\n \r\n def predict(self):\r\n self.ignore_warning = False\r\n try:\r\n self.pred.should_terminate = True\r\n print(\"Successfully terminated predicting thread !\")\r\n except Exception as err:\r\n print(err)\r\n \r\n # KILL EXISTING PROCESS\r\n try:\r\n subprocess.Popen.kill(self.graph_process)\r\n except Exception as err:\r\n print(err)\r\n \r\n date = (self.calendar.get_date())\r\n date = datetime.strptime(date,\"%Y-%m-%d\")\r\n import threading as th\r\n import multiprocessing as mp\r\n daydelta = 0\r\n fill_plot=bool(self.options[\"fill\"].get()) # fill plot\r\n scatter_plot = bool(self.options[\"scatter\"].get()) # use scatter plot\r\n print(\"plot options\",fill_plot,scatter_plot)\r\n use_gpu = bool(self.options[\"use_gpu\"].get()) # use gpu for tensorflow \r\n num_biomass = self.options[\"num_biomass\"].get()\r\n num_biogas = self.options[\"num_biogas\"].get()\r\n num_solar = self.options[\"num_solar\"].get()\r\n biomass_pv = self.options[\"biomass_pv\"].get()\r\n biogas_pv = self.options[\"biogas_pv\"].get()\r\n period = 15\r\n self.pred = predictor(dt=date,model_path=self.model,use_gpu=use_gpu,message_callback=self.power_error_callback) # creat new instance of predictor\r\n self.pred.iteration_delay = self.interval_scaler.get()/1000 # self.predictor loop delay\r\n self.pred.BIOMASS_PV = biomass_pv\r\n self.pred.BIOGAS_PV = biogas_pv\r\n self.pred.num_biomass = num_biomass\r\n self.pred.num_biogas = num_biogas\r\n self.pred.num_solar = num_solar\r\n pred_thread = th.Thread(target=self.pred.run)\r\n pred_thread.start()\r\n self.graph_process = subprocess.Popen([\r\n \"python\",\r\n \"plot_load.py\",\r\n \"--date\",str(date.date()),\r\n \"--scatter-plot\", str(scatter_plot),\r\n \"--fill-plot\", str(fill_plot)\r\n ])\r\n \r\nif __name__ == \"__main__\":\r\n GUI()", "repo_name": "phakawatTER/ee-load-prediction", "sub_path": "gui.py", "file_name": "gui.py", "file_ext": "py", "file_size_in_byte": 11065, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "12", "api": [{"api_name": "os.path.dirname", "line_number": 13, "usage_type": "call"}, {"api_name": "os.path", "line_number": 13, "usage_type": "attribute"}, {"api_name": "tkinter.Tk", "line_number": 15, "usage_type": "attribute"}, {"api_name": "datetime.datetime.now", "line_number": 19, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 19, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 26, "usage_type": "call"}, {"api_name": "os.path", "line_number": 26, "usage_type": "attribute"}, {"api_name": "tkinter.Tk.__init__", "line_number": 29, "usage_type": "call"}, {"api_name": "tkinter.Tk", "line_number": 29, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 39, "usage_type": "call"}, {"api_name": "os.path", "line_number": 39, "usage_type": "attribute"}, {"api_name": "pickle.load", "line_number": 41, "usage_type": "call"}, {"api_name": "tkinter.IntVar", "line_number": 53, "usage_type": "call"}, {"api_name": "tkinter.IntVar", "line_number": 54, "usage_type": "call"}, {"api_name": "tkinter.IntVar", "line_number": 55, "usage_type": "call"}, {"api_name": "tkinter.DoubleVar", "line_number": 56, "usage_type": "call"}, {"api_name": "tkinter.DoubleVar", "line_number": 57, "usage_type": "call"}, {"api_name": "tkinter.IntVar", "line_number": 58, "usage_type": "call"}, {"api_name": "tkinter.IntVar", "line_number": 59, "usage_type": "call"}, {"api_name": "tkinter.IntVar", "line_number": 60, "usage_type": "call"}, {"api_name": "tkinter.Frame", "line_number": 62, "usage_type": "call"}, {"api_name": "subprocess.Popen.kill", "line_number": 71, "usage_type": "call"}, {"api_name": "subprocess.Popen", "line_number": 71, "usage_type": "attribute"}, {"api_name": "tkinter.messagebox.askokcancel", "line_number": 89, "usage_type": "call"}, {"api_name": "tkinter.messagebox", "line_number": 89, "usage_type": "name"}, {"api_name": "subprocess.Popen.kill", "line_number": 93, "usage_type": "call"}, {"api_name": "subprocess.Popen", "line_number": 93, "usage_type": "attribute"}, {"api_name": "tkinter.Frame", "line_number": 98, "usage_type": "call"}, {"api_name": "tkinter.Label", "line_number": 102, "usage_type": "call"}, {"api_name": "tkinter.Checkbutton", "line_number": 103, "usage_type": "call"}, {"api_name": "tkinter.Label", "line_number": 108, "usage_type": "call"}, {"api_name": "tkinter.Checkbutton", "line_number": 109, "usage_type": "call"}, {"api_name": "tkinter.Checkbutton", "line_number": 115, "usage_type": "call"}, {"api_name": "tkinter.Label", "line_number": 120, "usage_type": "call"}, {"api_name": "tkinter.Scale", "line_number": 121, "usage_type": "call"}, {"api_name": "tkinter.HORIZONTAL", "line_number": 121, "usage_type": "attribute"}, {"api_name": "tkinter.Label", "line_number": 127, "usage_type": "call"}, {"api_name": "tkinter.Frame", "line_number": 128, "usage_type": "call"}, {"api_name": "tkcalendar.Calendar", "line_number": 130, "usage_type": "call"}, {"api_name": "tkinter.Frame", "line_number": 137, "usage_type": "call"}, {"api_name": "tkinter.Label", "line_number": 140, "usage_type": "call"}, {"api_name": "tkinter.Scale", "line_number": 141, "usage_type": "call"}, {"api_name": "tkinter.HORIZONTAL", "line_number": 141, "usage_type": "attribute"}, {"api_name": "tkinter.Label", "line_number": 143, "usage_type": "call"}, {"api_name": "tkinter.Scale", "line_number": 144, "usage_type": "call"}, {"api_name": "tkinter.HORIZONTAL", "line_number": 144, "usage_type": "attribute"}, {"api_name": "tkinter.Label", "line_number": 146, "usage_type": "call"}, {"api_name": "tkinter.Scale", "line_number": 147, "usage_type": "call"}, {"api_name": "tkinter.HORIZONTAL", "line_number": 147, "usage_type": "attribute"}, {"api_name": "tkinter.Label", "line_number": 151, "usage_type": "call"}, {"api_name": "tkinter.Scale", "line_number": 152, "usage_type": "call"}, {"api_name": "tkinter.HORIZONTAL", "line_number": 152, "usage_type": "attribute"}, {"api_name": "tkinter.Label", "line_number": 154, "usage_type": "call"}, {"api_name": "tkinter.Scale", "line_number": 155, "usage_type": "call"}, {"api_name": "tkinter.HORIZONTAL", "line_number": 155, "usage_type": "attribute"}, {"api_name": "subprocess.Popen.kill", "line_number": 164, "usage_type": "call"}, {"api_name": "subprocess.Popen", "line_number": 164, "usage_type": "attribute"}, {"api_name": "pickle.dump", "line_number": 178, "usage_type": "call"}, {"api_name": "tkinter.messagebox.showinfo", "line_number": 179, "usage_type": "call"}, {"api_name": "tkinter.messagebox", "line_number": 179, "usage_type": "name"}, {"api_name": "tkinter.Frame", "line_number": 183, "usage_type": "call"}, {"api_name": "tkinter.Button", "line_number": 186, "usage_type": "call"}, {"api_name": "tkinter.Button", "line_number": 190, "usage_type": "call"}, {"api_name": "tkinter.Button", "line_number": 194, "usage_type": "call"}, {"api_name": "subprocess.Popen.kill", "line_number": 215, "usage_type": "call"}, {"api_name": "subprocess.Popen", "line_number": 215, "usage_type": "attribute"}, {"api_name": "datetime.datetime.strptime", "line_number": 220, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 220, "usage_type": "name"}, {"api_name": "predict.predictor", "line_number": 234, "usage_type": "call"}, {"api_name": "threading.Thread", "line_number": 241, "usage_type": "call"}, {"api_name": "subprocess.Popen", "line_number": 243, "usage_type": "call"}]} +{"seq_id": "13960847271", "text": "from openpyxl import Workbook\n\nfrom data.tables.author import Author\nfrom data.workbooks.works_workbook import WorkTypes\nfrom utilities.global_setup import DATA_PATH\n\nALL_AUTHORS_WOS_FILE_NAME = DATA_PATH + r\"\\people\\authors_all_wos.xlsx\"\nALL_AUTHORS_SCOPUS_FILE_NAME = DATA_PATH + r\"\\people\\authors_all_scopus.xlsx\"\nALL_AUTHORS_SHEET = \"Svi\"\n\nALL_AUTHORS_FILE_NAMES = [ALL_AUTHORS_WOS_FILE_NAME, ALL_AUTHORS_SCOPUS_FILE_NAME]\n\n\nclass AuthorsAllWorkBook:\n def __init__(self, work_book_type: WorkTypes):\n self.work_book = Workbook()\n self.work_book.remove(self.work_book.active)\n self.sheet = self.work_book.create_sheet(ALL_AUTHORS_SHEET)\n self.file_name = ALL_AUTHORS_FILE_NAMES[work_book_type]\n Author.write_headers_to_sheet(self.sheet)\n self.row = 2\n\n def save_author(self, author: Author):\n author.write_to_sheet(self.sheet, self.row)\n self.row += 1\n\n def save(self):\n self.work_book.save(self.file_name)\n", "repo_name": "popina1994/university-of-belgrade-faculty-comparison", "sub_path": "data/workbooks/authors_all_workbook.py", "file_name": "authors_all_workbook.py", "file_ext": "py", "file_size_in_byte": 981, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "12", "api": [{"api_name": "utilities.global_setup.DATA_PATH", "line_number": 7, "usage_type": "name"}, {"api_name": "utilities.global_setup.DATA_PATH", "line_number": 8, "usage_type": "name"}, {"api_name": "data.workbooks.works_workbook.WorkTypes", "line_number": 15, "usage_type": "name"}, {"api_name": "openpyxl.Workbook", "line_number": 16, "usage_type": "call"}, {"api_name": "data.tables.author.Author.write_headers_to_sheet", "line_number": 20, "usage_type": "call"}, {"api_name": "data.tables.author.Author", "line_number": 20, "usage_type": "name"}, {"api_name": "data.tables.author.Author", "line_number": 23, "usage_type": "name"}]} +{"seq_id": "22377297658", "text": "import gmsh\nimport sys\nimport os\nimport numpy as np\nimport math\n\nclass Point(object):\n def __init__(self, tag, x, y, z):\n self.tag = tag\n self.X = x\n self.Y = y\n self.Z = z\n\n def get_tag(self):\n return self.tag\n \n def get_coords(self):\n return [self.X, self.Y, self.Z]\n\n def getX(self):\n return self.X\n\n def getY(self):\n return self.Y\n\n def getZ(self):\n return self.Z\n\n def distance(self, other):\n dx = self.X - other.X\n dy = self.Y - other.Y\n dz = self.Z - other.Z\n return math.sqrt(dx**2 + dy**2 + dz**2)\n\n def move(self, dx, dy, dz):\n self.X = self.X + dx\n self.Y = self.Y + dy\n self.Z = self.Z + dz\n\n def __str__(self):\n str1 = \"Point \" + str(self.tag) + \" has coords: \" + str(self.X) + \", \" + str(self.Y) + \", \" + str(self.Z) + \"\\n\"\n return str1\n\nclass Triangle:\n def __init__(self, tag, nodes_list):\n self.tag = tag\n self.nodes_list = nodes_list\n\n self.vertex_1 = points[int(nodes_list[0]) - 1]\n self.vertex_2 = points[int(nodes_list[1]) - 1]\n self.vertex_3 = points[int(nodes_list[2]) - 1]\n\n self.nodes_coords = Point.get_coords(self.vertex_1) + Point.get_coords(self.vertex_2) + Point.get_coords(self.vertex_3)\n self.center = (list(map(lambda x, y, z: (x + y + z)/3, Point.get_coords(self.vertex_1) , Point.get_coords(self.vertex_2), Point.get_coords(self.vertex_3))))\n\n def add_neighbours(self):\n neighb = []\n for t in triangles:\n nodes = Triangle.get_nodes(t)\n k = 0\n for i in nodes:\n if i in self.nodes_list:\n k = k+1\n \n if (k == 2):\n neighb.append(Triangle.get_tag(t))\n\n self.neighbours = tuple(neighb)\n\n def get_nodes(self):\n return self.nodes_list\n\n def get_tag(self):\n return self.tag\n\n def get_neighbours(self):\n return self.neighbours\n\n def __str__(self):\n nodeCoords_1 =Point.get_coords(self.vertex_1)\n nodeCoords_2 = Point.get_coords(self.vertex_2)\n nodeCoords_3 = Point.get_coords(self.vertex_3)\n str1 = \"Triangle \" + str(self.tag) + \" has nodes: \" + str(self.nodes_list[0]) + \", \" + str(self.nodes_list[1]) + \", \" + str(self.nodes_list[2]) + \"\\n\"\n str2 = \"Nodes coords are:\" + \"\\n\" + str(nodeCoords_1) + \"\\n\" + str(nodeCoords_2) +\"\\n\" + str(nodeCoords_3) + \"\\n\"\n str3 = \"Center coords are: \" + str(self.center) + \"\\n\"\n str4 = \"Neighbours of this triangle are: \" + str(self.neighbours) + \"\\n\"\n return str1 + str2 + str3 + str4 + \"\\n\"\n\ngmsh.initialize()\n\npath = os.path.dirname(os.path.abspath(__file__))\ngmsh.initialize()\n\ngmsh.model.add(\"Square\")\n\n# Build a square surface:\nlc = 0.7\np1 = gmsh.model.geo.addPoint(0, 0, 0, lc)\np2 = gmsh.model.geo.addPoint(1, 0, 0, lc)\np3 = gmsh.model.geo.addPoint(1, 1, 0, lc)\np4 = gmsh.model.geo.addPoint(0, 1, 0, lc)\n\nl1 = gmsh.model.geo.addLine(p1, p2)\nl2 = gmsh.model.geo.addLine(p2, p3)\nl3 = gmsh.model.geo.addLine(p3, p4)\nl4 = gmsh.model.geo.addLine(p4, p1)\n\ncl1 = gmsh.model.geo.addCurveLoop([l1, l2, l3, l4])\n\npl1 = gmsh.model.geo.addPlaneSurface([cl1])\n\ngmsh.model.geo.synchronize()\n\n# Generate mesh:\ngmsh.model.mesh.generate(2)\ngmsh.option.setNumber(\"Mesh.Format\", 1)\ngmsh.option.setNumber(\"Mesh.NodeLabels\", 1)\n\n# Save mesh:\n# gmsh.write(os.path.join(path, os.curdir, \"Simple_Square.msh\"))\n# gmsh.write(os.path.join(path, os.curdir, \"Simple_Square.geo_unrolled\"))\n\n# Access mesh data:\nelementTags, elementNodeTags = gmsh.model.mesh.getElementsByType(2)\nelemNodeTags = np.array(elementNodeTags) \nNodeTags = np.unique(elemNodeTags) # список всех узлов сетки\n\nN_tetr = len(elementTags)\nN_nodes = len(NodeTags)\n\n# Print data about every triangle:\nprint(\"Model has\", N_tetr, \"triangles\")\nprint(\"Model has\", N_nodes, \"points\")\nprint(\"Number of the first triangle:\", elementTags[0])\n\npoints = []\nfor i in range(1, N_nodes+1):\n nodeCoords = gmsh.model.mesh.getNode(i)[0]\n p = Point(NodeTags[i-1], nodeCoords[0], nodeCoords[1], nodeCoords[2])\n points.append(p)\n\ntriangles = []\nfor i in range(N_tetr):\n p_tetr = int(i)\n p_nodes = 3*p_tetr\n \n tag = elementTags[p_tetr] \n nodes_list = (elementNodeTags[p_nodes], elementNodeTags[p_nodes+1], elementNodeTags[p_nodes+2]) # кортеж, тк должен быть неизменяемым\n\n t = Triangle(tag, nodes_list)\n triangles.append(t)\n\n\nwith open(os.path.join(path, os.curdir, \"out_2D.txt\"), \"w\") as file:\n for t in triangles:\n Triangle.add_neighbours(t)\n file.write(Triangle.__str__(t))\n\nif \"-nopopup\" not in sys.argv:\n gmsh.fltk.initialize()\n while gmsh.fltk.isAvailable():\n gmsh.fltk.wait()\n\n# We can use this to clear all the model data:\ngmsh.clear()\n\ngmsh.finalize()\n", "repo_name": "alex-pann/IT_4sem", "sub_path": "project/test/Square.py", "file_name": "Square.py", "file_ext": "py", "file_size_in_byte": 4943, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "12", "api": [{"api_name": "math.sqrt", "line_number": 33, "usage_type": "call"}, {"api_name": "gmsh.initialize", "line_number": 89, "usage_type": "call"}, {"api_name": "os.path.dirname", "line_number": 91, "usage_type": "call"}, {"api_name": "os.path", "line_number": 91, "usage_type": "attribute"}, {"api_name": "os.path.abspath", "line_number": 91, "usage_type": "call"}, {"api_name": "gmsh.initialize", "line_number": 92, "usage_type": "call"}, {"api_name": "gmsh.model.add", "line_number": 94, "usage_type": "call"}, {"api_name": "gmsh.model", "line_number": 94, "usage_type": "attribute"}, {"api_name": "gmsh.model.geo.addPoint", "line_number": 98, "usage_type": "call"}, {"api_name": "gmsh.model", "line_number": 98, "usage_type": "attribute"}, {"api_name": "gmsh.model.geo.addPoint", "line_number": 99, "usage_type": "call"}, {"api_name": "gmsh.model", "line_number": 99, "usage_type": "attribute"}, {"api_name": "gmsh.model.geo.addPoint", "line_number": 100, "usage_type": "call"}, {"api_name": "gmsh.model", "line_number": 100, "usage_type": "attribute"}, {"api_name": "gmsh.model.geo.addPoint", "line_number": 101, "usage_type": "call"}, {"api_name": "gmsh.model", "line_number": 101, "usage_type": "attribute"}, {"api_name": "gmsh.model.geo.addLine", "line_number": 103, "usage_type": "call"}, {"api_name": "gmsh.model", "line_number": 103, "usage_type": "attribute"}, {"api_name": "gmsh.model.geo.addLine", "line_number": 104, "usage_type": "call"}, {"api_name": "gmsh.model", "line_number": 104, "usage_type": "attribute"}, {"api_name": "gmsh.model.geo.addLine", "line_number": 105, "usage_type": "call"}, {"api_name": "gmsh.model", "line_number": 105, "usage_type": "attribute"}, {"api_name": "gmsh.model.geo.addLine", "line_number": 106, "usage_type": "call"}, {"api_name": "gmsh.model", "line_number": 106, "usage_type": "attribute"}, {"api_name": "gmsh.model.geo.addCurveLoop", "line_number": 108, "usage_type": "call"}, {"api_name": "gmsh.model", "line_number": 108, "usage_type": "attribute"}, {"api_name": "gmsh.model.geo.addPlaneSurface", "line_number": 110, "usage_type": "call"}, {"api_name": "gmsh.model", "line_number": 110, "usage_type": "attribute"}, {"api_name": "gmsh.model.geo.synchronize", "line_number": 112, "usage_type": "call"}, {"api_name": "gmsh.model", "line_number": 112, "usage_type": "attribute"}, {"api_name": "gmsh.model.mesh.generate", "line_number": 115, "usage_type": "call"}, {"api_name": "gmsh.model", "line_number": 115, "usage_type": "attribute"}, {"api_name": "gmsh.option.setNumber", "line_number": 116, "usage_type": "call"}, {"api_name": "gmsh.option", "line_number": 116, "usage_type": "attribute"}, {"api_name": "gmsh.option.setNumber", "line_number": 117, "usage_type": "call"}, {"api_name": "gmsh.option", "line_number": 117, "usage_type": "attribute"}, {"api_name": "gmsh.model.mesh.getElementsByType", "line_number": 124, "usage_type": "call"}, {"api_name": "gmsh.model", "line_number": 124, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 125, "usage_type": "call"}, {"api_name": "numpy.unique", "line_number": 126, "usage_type": "call"}, {"api_name": "gmsh.model.mesh.getNode", "line_number": 138, "usage_type": "call"}, {"api_name": "gmsh.model", "line_number": 138, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 154, "usage_type": "call"}, {"api_name": "os.path", "line_number": 154, "usage_type": "attribute"}, {"api_name": "os.curdir", "line_number": 154, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 159, "usage_type": "attribute"}, {"api_name": "gmsh.fltk.initialize", "line_number": 160, "usage_type": "call"}, {"api_name": "gmsh.fltk", "line_number": 160, "usage_type": "attribute"}, {"api_name": "gmsh.fltk.isAvailable", "line_number": 161, "usage_type": "call"}, {"api_name": "gmsh.fltk", "line_number": 161, "usage_type": "attribute"}, {"api_name": "gmsh.fltk.wait", "line_number": 162, "usage_type": "call"}, {"api_name": "gmsh.fltk", "line_number": 162, "usage_type": "attribute"}, {"api_name": "gmsh.clear", "line_number": 165, "usage_type": "call"}, {"api_name": "gmsh.finalize", "line_number": 167, "usage_type": "call"}]} +{"seq_id": "2378202230", "text": "#!/bin/env python\n\n'''Demonstrate the logging module.'''\n\nimport logging\nimport logging.config\nimport logging.handlers\n\nlogging.config.fileConfig('loggingDemo.conf')\n\nlogger = logging.getLogger('demo')\n\ndef doWork():\n 'Log some messages.'\n \n logger.debug('A debug message')\n logger.info('An info message')\n logger.warning('A warning message')\n logger.error('An error message')\n logger.critical('A critical message')\n\nif __name__ == '__main__':\n \n doWork()\n \n", "repo_name": "PrincetonPy/TenThingsIUseAllTheTime", "sub_path": "loggingDemo.py", "file_name": "loggingDemo.py", "file_ext": "py", "file_size_in_byte": 488, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "12", "api": [{"api_name": "logging.config.fileConfig", "line_number": 9, "usage_type": "call"}, {"api_name": "logging.config", "line_number": 9, "usage_type": "attribute"}, {"api_name": "logging.getLogger", "line_number": 11, "usage_type": "call"}]} +{"seq_id": "42606081524", "text": "'''\n不要自己给自己加戏好吗?递归的都没写就去写迭代版本的。没有看到有题解是用迭代实现的。\n最初写了一个迭代的。不符合题意。理解错了。后面又想不出来。\n\nclass Solution:\n def lexicalOrder(self, n: int) -> List[int]:\n if n < 1:\n return []\n res = []\n\n def dfs(cur):\n if cur > n:\n return\n res.append(cur)\n for j in range(10):# 遍历0 ~ 9\n dfs(cur * 10 + j)\n\n for i in range(1, 10):# 遍历1 ~ 9\n dfs(i)\n return res\n看了递归版本的发现你居然不想用栈,来写非递归。哦,简直离谱。\n这题看了下,速度快的提交是用sort做的。这就是个nt题。\n'''\nfrom typing import List\n\n\nclass Solution:\n def lexicalOrder(self, n: int) -> List[int]:\n ans = [1] * n\n fir, x, idx = 1, 1, 0\n if n < 10:\n return [i + 1 for i in range(n)]\n while idx < n:\n while x*10 <= n:\n ans[idx] = x\n idx += 1\n x *= 10\n\n for i in range(x, min(x+11, n+1)):\n ans[idx] = i\n idx += 1\n x += 1\n while x % 10 == 0:\n x //= 10\n return ret\n\nmt = [34, 121]\nbug = [100]\nfor i in mt+bug:\n print('input:', i)\n print(Solution().lexicalOrder(i))\n\n", "repo_name": "z472/ProblemLeecode", "sub_path": "350-399/386. 字典序排数.py", "file_name": "386. 字典序排数.py", "file_ext": "py", "file_size_in_byte": 1413, "program_lang": "python", "lang": "zh", "doc_type": "code", "stars": 2, "dataset": "github-code", "pt": "12", "api": [{"api_name": "typing.List", "line_number": 28, "usage_type": "name"}]} +{"seq_id": "7656557800", "text": "from sklearn.metrics import confusion_matrix, accuracy_score, precision_score, recall_score, f1_score, roc_auc_score\r\nfrom sklearn.model_selection import cross_val_score\r\nimport pandas as pd\r\nimport plot\r\n\r\n\r\ndef get_clf_eval(y_test, pred):\r\n confusion = confusion_matrix(y_test, pred)\r\n accuracy = accuracy_score(y_test, pred)\r\n precision = precision_score(y_test, pred)\r\n recall = recall_score(y_test, pred)\r\n f1 = f1_score(y_test, pred)\r\n roc_score = roc_auc_score(y_test, pred)\r\n\r\n print(\"오차행렬\")\r\n print(confusion)\r\n print(\r\n \"정확도: {0:.4f}\\n정밀도: {1:.4f}\\n재현율: {2:.4f}\\nF1: {3:.4f}\\nROC AUC 값 :{4:.4f}\".format(accuracy, precision, recall,\r\n f1, roc_score))\r\n\r\n\r\ndef scoring(model, x_val, y_val):\r\n pred = model.predict(x_val)\r\n pred_prob = model.predict_proba(x_val)[:, 1]\r\n\r\n get_clf_eval(y_val, pred)\r\n plot.make_important_plot(model)\r\n plot.roc_plot(y_val, pred_prob)\r\n\r\n\r\ndef train_scoring(model, x_train, x_val, y_train, y_val):\r\n score = cross_val_score(model, x_train, y_train, scoring='accuracy', cv=5)\r\n print(\"cross_val_score: {0:.4f}\".format(score.mean()))\r\n\r\n scoring(model, x_val, y_val)\r\n\r\n\r\ndef search_param(model):\r\n score_df = pd.DataFrame(model.cv_results_)\r\n print(score_df[['params', 'mean_test_score', 'rank_test_score']])\r\n print(model.best_params_)\r\n", "repo_name": "kjyju3955/titanic", "sub_path": "clf_eval.py", "file_name": "clf_eval.py", "file_ext": "py", "file_size_in_byte": 1461, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "12", "api": [{"api_name": "sklearn.metrics.confusion_matrix", "line_number": 8, "usage_type": "call"}, {"api_name": "sklearn.metrics.accuracy_score", "line_number": 9, "usage_type": "call"}, {"api_name": "sklearn.metrics.precision_score", "line_number": 10, "usage_type": "call"}, {"api_name": "sklearn.metrics.recall_score", "line_number": 11, "usage_type": "call"}, {"api_name": "sklearn.metrics.f1_score", "line_number": 12, "usage_type": "call"}, {"api_name": "sklearn.metrics.roc_auc_score", "line_number": 13, "usage_type": "call"}, {"api_name": "plot.make_important_plot", "line_number": 27, "usage_type": "call"}, {"api_name": "plot.roc_plot", "line_number": 28, "usage_type": "call"}, {"api_name": "sklearn.model_selection.cross_val_score", "line_number": 32, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 39, "usage_type": "call"}]} +{"seq_id": "18830263837", "text": "import os\nimport modules.file_utils as file_utils\nfrom ..base_service import BaseService\n\nclass GiabEvaluator(BaseService):\n def transcriptome_regions_path(self, alignment_path, parameters):\n transcriptome_regions_path = alignment_path + \"aligned_coverage_regions.bed\"\n if not os.path.exists(transcriptome_regions_path):\n bam_path = alignment_path + \"Out.bam\"\n coverage_path = alignment_path + \"Out.base_coverage\"\n min_coverage = 2\n\n # Create coverage file\n command = \"bedtools genomecov -d -ibam /{}\".format(bam_path)\n output_parameters = {\n \"log_is_output\": True,\n \"out_file_path\": coverage_path,\n \"log_file_path\": parameters[\"destination\"] + \"Coverage.log\"\n }\n self.run_docker(command, parameters, output_parameters)\n file_utils.validate_file_content(coverage_path)\n\n # Create BED from coverage file\n command = \"python base_coverage_to_bed.py /{} {} /{}\".format(\n coverage_path,\n str(min_coverage),\n transcriptome_regions_path\n )\n self.run_docker(command, parameters, log_file_name=\"CoverageToBed.log\")\n file_utils.validate_file_content(transcriptome_regions_path)\n\n return transcriptome_regions_path\n\n def bedtools(self, function, a_file_path, b_file_path, out_file_path, parameters, options=\"\"):\n destination = parameters[\"destination\"]\n log_file_path = destination + function.capitalize() + \".log\"\n command = \"bedtools {} \" \\\n \"-a /{} \" \\\n \"-b /{} {}\".format(function, a_file_path, b_file_path, options)\n output_parameters = {\n \"log_is_output\": True,\n \"out_file_path\": out_file_path,\n \"log_file_path\": log_file_path\n }\n self.run_docker(command, parameters, output_parameters)\n\n def run(self, parameters):\n experiment = parameters[\"experiment\"]\n reference_id = experiment.get(\"reference\")\n destination = parameters[\"destination\"]\n vcf_file_path = destination + \"Out.vcf\"\n alignment_path = experiment.get(\"pipeline\")[\"alignment\"][\"directory\"]\n confidence_regions_path = alignment_path + \"confidence_calls.bed\".format(reference_id)\n\n # Intersect confidence regions with transcriptome regions if not already done\n if not os.path.exists(confidence_regions_path):\n confidence_genome_regions_path = \"data/giab/{}/confidence_calls.bed\".format(reference_id)\n transcriptome_regions_path = self.transcriptome_regions_path(alignment_path, parameters)\n self.bedtools(\n \"intersect\",\n confidence_genome_regions_path,\n transcriptome_regions_path,\n confidence_regions_path,\n parameters\n )\n file_utils.validate_file_content(confidence_regions_path)\n\n\n # Filter data if necessary\n action_handler = parameters[\"action_handler\"]\n additional_commands = \"\"\n if hasattr(action_handler, \"chromosomes\"):\n # Escape spaces for bash\n space_escape = \"%%\"\n additional_commands = \"--location{}{}\".format(\n space_escape,\n \",\".join(action_handler.chromosomes)\n )\n\n command = \"./hap.py /data/giab/{0}/confidence_calls.vcf /{1}Out.vcf \" \\\n \"-f /{2} \" \\\n \"-o /{1}Evaluation \" \\\n \"-r /data/references/{0}.fa \" \\\n \"--location {3}\".format(\n reference_id,\n destination,\n confidence_regions_path,\n additional_commands\n )\n output_parameters = { \"log_file_path\": destination + \"Evaluation.log\" }\n self.run_docker(command, parameters, output_parameters)\n\n for file_name in os.listdir(destination):\n if file_name.startswith(\"Evaluation\"):\n file_path = destination + file_name\n if not file_utils.file_has_content(file_path):\n file_utils.delete(file_path)\n", "repo_name": "tamslo/koala", "sub_path": "services/giab/__init__.py", "file_name": "__init__.py", "file_ext": "py", "file_size_in_byte": 4176, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "12", "api": [{"api_name": "base_service.BaseService", "line_number": 5, "usage_type": "name"}, {"api_name": "os.path.exists", "line_number": 8, "usage_type": "call"}, {"api_name": "os.path", "line_number": 8, "usage_type": "attribute"}, {"api_name": "modules.file_utils.validate_file_content", "line_number": 21, "usage_type": "call"}, {"api_name": "modules.file_utils", "line_number": 21, "usage_type": "name"}, {"api_name": "modules.file_utils.validate_file_content", "line_number": 30, "usage_type": "call"}, {"api_name": "modules.file_utils", "line_number": 30, "usage_type": "name"}, {"api_name": "os.path.exists", "line_number": 56, "usage_type": "call"}, {"api_name": "os.path", "line_number": 56, "usage_type": "attribute"}, {"api_name": "modules.file_utils.validate_file_content", "line_number": 66, "usage_type": "call"}, {"api_name": "modules.file_utils", "line_number": 66, "usage_type": "name"}, {"api_name": "os.listdir", "line_number": 93, "usage_type": "call"}, {"api_name": "modules.file_utils.file_has_content", "line_number": 96, "usage_type": "call"}, {"api_name": "modules.file_utils", "line_number": 96, "usage_type": "name"}, {"api_name": "modules.file_utils.delete", "line_number": 97, "usage_type": "call"}, {"api_name": "modules.file_utils", "line_number": 97, "usage_type": "name"}]} +{"seq_id": "24748091678", "text": "\"\"\"\nViews to manage tasks and task categories and submitted solutions.\n\n\"\"\"\n\nimport re\nfrom os.path import join\nfrom urllib.parse import unquote\n\nfrom django.conf import settings\nfrom django.contrib.auth.decorators import login_required\nfrom django.http import Http404, HttpRequest, HttpResponse\nfrom django.shortcuts import get_object_or_404\nfrom django.template.response import TemplateResponse\n\nfrom constance import config\n\nfrom inloop.common.sendfile import sendfile\nfrom inloop.tasks.models import Category, Task\n\n\n@login_required\ndef index(request: HttpRequest) -> HttpResponse:\n exam_category_slug = config.EXAM_CATEGORY_SLUG\n if exam_category_slug:\n return category(request, exam_category_slug)\n return TemplateResponse(\n request,\n \"tasks/index.html\",\n {\n \"categories\": Category.objects.order_by(\"display_order\", \"name\"),\n },\n )\n\n\n@login_required\ndef category(request: HttpRequest, slug: str) -> HttpResponse:\n category = get_object_or_404(Category, slug=slug)\n tasks = category.task_set.visible_by(user=request.user).completed_by_values(\n request.user, order_by=\"pubdate\"\n )\n have_deadlines = any(task.deadline for task in tasks)\n return TemplateResponse(\n request,\n \"tasks/category.html\",\n {\n \"category\": category,\n \"tasks\": tasks,\n \"have_deadlines\": have_deadlines,\n \"show_progress\": config.IMMEDIATE_FEEDBACK,\n },\n )\n\n\n@login_required\ndef serve_attachment(request: HttpRequest, slug: str, path: str) -> HttpResponse:\n \"\"\"\n Serve static files from a task subdirectory, but only for published tasks\n and for tasks the user has permission to view. Otherwise, return status 404.\n\n Access is granted exclusively to whitelisted subdirectories.\n \"\"\"\n if re.search(\"^(images|attachments)/\", path) is None:\n raise Http404\n\n if \"..\" in unquote(path):\n raise Http404\n\n task = get_object_or_404(Task.objects.published().visible_by(user=request.user), slug=slug)\n filesystem_path = join(task.system_name, path)\n\n return sendfile(request, filesystem_path, settings.REPOSITORY_ROOT)\n", "repo_name": "st-tu-dresden/inloop", "sub_path": "inloop/tasks/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 2180, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 28, "dataset": "github-code", "pt": "12", "api": [{"api_name": "django.http.HttpRequest", "line_number": 23, "usage_type": "name"}, {"api_name": "constance.config.EXAM_CATEGORY_SLUG", "line_number": 24, "usage_type": "attribute"}, {"api_name": "constance.config", "line_number": 24, "usage_type": "name"}, {"api_name": "django.template.response.TemplateResponse", "line_number": 27, "usage_type": "call"}, {"api_name": "inloop.tasks.models.Category.objects.order_by", "line_number": 31, "usage_type": "call"}, {"api_name": "inloop.tasks.models.Category.objects", "line_number": 31, "usage_type": "attribute"}, {"api_name": "inloop.tasks.models.Category", "line_number": 31, "usage_type": "name"}, {"api_name": "django.contrib.auth.decorators.login_required", "line_number": 22, "usage_type": "name"}, {"api_name": "django.http.HttpResponse", "line_number": 23, "usage_type": "name"}, {"api_name": "django.http.HttpRequest", "line_number": 37, "usage_type": "name"}, {"api_name": "django.shortcuts.get_object_or_404", "line_number": 38, "usage_type": "call"}, {"api_name": "inloop.tasks.models.Category", "line_number": 38, "usage_type": "argument"}, {"api_name": "django.template.response.TemplateResponse", "line_number": 43, "usage_type": "call"}, {"api_name": "constance.config.IMMEDIATE_FEEDBACK", "line_number": 50, "usage_type": "attribute"}, {"api_name": "constance.config", "line_number": 50, "usage_type": "name"}, {"api_name": "django.contrib.auth.decorators.login_required", "line_number": 36, "usage_type": "name"}, {"api_name": "django.http.HttpResponse", "line_number": 37, "usage_type": "name"}, {"api_name": "django.http.HttpRequest", "line_number": 56, "usage_type": "name"}, {"api_name": "re.search", "line_number": 63, "usage_type": "call"}, {"api_name": "django.http.Http404", "line_number": 64, "usage_type": "name"}, {"api_name": "urllib.parse.unquote", "line_number": 66, "usage_type": "call"}, {"api_name": "django.http.Http404", "line_number": 67, "usage_type": "name"}, {"api_name": "django.shortcuts.get_object_or_404", "line_number": 69, "usage_type": "call"}, {"api_name": "inloop.tasks.models.Task.objects.published", "line_number": 69, "usage_type": "call"}, {"api_name": "inloop.tasks.models.Task.objects", "line_number": 69, "usage_type": "attribute"}, {"api_name": "inloop.tasks.models.Task", "line_number": 69, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 70, "usage_type": "call"}, {"api_name": "inloop.common.sendfile.sendfile", "line_number": 72, "usage_type": "call"}, {"api_name": "django.conf.settings.REPOSITORY_ROOT", "line_number": 72, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 72, "usage_type": "name"}, {"api_name": "django.contrib.auth.decorators.login_required", "line_number": 55, "usage_type": "name"}, {"api_name": "django.http.HttpResponse", "line_number": 56, "usage_type": "name"}]} +{"seq_id": "15848772574", "text": "import datetime\nimport hashlib\n\nfrom PIL import ImageFont,ImageDraw,Image\nfrom random import randint\n\nclass VerifyCode:\n\tdef __init__(self,width=100,height=40,size=4):\n\t\t\"\"\"\n\n\t\t:param width: 验证码的宽度\n\t\t:param height: 验证码的高度\n\t\t:param size: 验证码的长度\n\t\t\"\"\"\n\t\tself.width = width if width > 0 else 100\n\t\tself.height = height if height > 0 else 40\n\t\tself.size = size if size > 0 else 4\n\t\tself.pen = None # 画笔\n\t\tself.code = \"\" # 保存验证码字符串\n\n\t# @property\n\t# def code(self):\n\t# \treturn self.__code\n\t# @code.setter\n\t# def code(self,code):\n\t# \tself.__code = code\n\n\tdef generate(self):\n\t\t# 1.生成画布 # 越靠近255的颜色越浅\n\t\tim = Image.new(\"RGB\",(self.width,self.height),self.randColor(160,255))\n\t\t# 2.生成画笔\n\t\tself.pen = ImageDraw.Draw(im)\n\t\t# 3.生成随机字符串\n\t\tself.randString()\n\t\t# 4.画字符串\n\t\tself.__drawCode()\n\t\t# 5.画干扰点\n\t\tself.__drawPoint()\n\t\t# 6.画干扰线\n\t\tself.__drawLine()\n\t\t# 7.保存图片\n\t\tim.save(\"vc.jpg\")\n\tdef __drawLine(self):\n\t\t\"\"\"\n\t\t画干扰线\n\t\t:return:\n\t\t\"\"\"\n\t\tfor i in range(6):\n\t\t\tstart = (randint(1,self.width-1),randint(1,self.height-1))\n\t\t\tend = (randint(1,self.width-1),randint(1,self.height-1))\n\t\t\tself.pen.line([start,end],fill=self.randColor(50,150),width = 1)\n\n\tdef __drawPoint(self):\n\t\t\"\"\"\n\t\t画干扰点\n\t\t:return:\n\t\t\"\"\"\n\t\tfor i in range(200):\n\t\t\tx = randint(1,self.width-1)\n\t\t\ty = randint(1,self.height-1)\n\t\t\tself.pen.point((x,y),fill= self.randColor(30,100))\n\tdef __drawCode(self):\n\t\t\"\"\"\n\t\t画字符串\n\t\t:return:\n\t\t\"\"\"\n\t\tmyFont = ImageFont.truetype(\"MSYH.TTF\",size=20,encoding=\"UTF-8\")\n\t\tfor i in range(self.size):\n\t\t\tx = 15 + i*(self.width - 20)/self.size # 为每个字符均匀分配位置\n\t\t\ty = randint(5,10) # 随机高度\n\t\t\tself.pen.text((x,y),self.code[i],fill = self.randColor(0,60),font = myFont)\n\n\tdef randString(self):\n\t\t\"\"\"\n\t\t产生随机整数字符串\n\t\t:return:\n\t\t\"\"\"\n\t\tresult = \"\"\n\t\tfor i in range(self.size):\n\t\t\tresult += str(randint(0,9))\n\t\tself.code = result\n\n\tdef randColor(self,low,high): # 随机背景颜色\n\t\treturn randint(low,high),randint(low,high),randint(low,high)\n\n# class StrCode(VerifyCode):\n# \tdef randString(self):\n# \t\ts1 =hashlib.md5(b\"2314\").hexdigest()\n# \t\tprint(s1)\n# \t\tself.code = s1[:self.size]\nif __name__ == \"__main__\":\n\tvc = VerifyCode()\n\t# vc = StrCode()\n\tvc.generate()\n\tprint(vc.code)\n", "repo_name": "zaoyuaner/Learning-materials", "sub_path": "python1812/python_1/17_测试_收发邮件_二维码/代码/04_验证码生成器.py", "file_name": "04_验证码生成器.py", "file_ext": "py", "file_size_in_byte": 2363, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "12", "api": [{"api_name": "PIL.Image.new", "line_number": 30, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 30, "usage_type": "name"}, {"api_name": "PIL.ImageDraw.Draw", "line_number": 32, "usage_type": "call"}, {"api_name": "PIL.ImageDraw", "line_number": 32, "usage_type": "name"}, {"api_name": "random.randint", "line_number": 49, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 50, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 59, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 60, "usage_type": "call"}, {"api_name": "PIL.ImageFont.truetype", "line_number": 67, "usage_type": "call"}, {"api_name": "PIL.ImageFont", "line_number": 67, "usage_type": "name"}, {"api_name": "random.randint", "line_number": 70, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 80, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 84, "usage_type": "call"}]} +{"seq_id": "71969897622", "text": "import os\nimport importlib\nimport sys\nimport gettext\nimport enum\nimport time\nimport contextlib\nimport io\nimport six\nimport pydoc\nimport collections\n\nfrom freenas.utils.permissions import get_unix_permissions, string_to_int\nfrom freenas.cli import config\nfrom freenas.utils import first_or_default\nfrom freenas.dispatcher import Password\nfrom threading import Lock, Thread\n\n\noutput_lock = Lock()\nt = gettext.translation('freenas-cli', fallback=True)\n_ = t.gettext\n\n\nclass ValueType(enum.Enum):\n STRING = 1\n TEXT_FILE = 2\n NUMBER = 3\n HEXNUMBER = 4\n OCTNUMBER = 5\n BOOLEAN = 6\n SIZE = 7\n TIME = 8\n SET = 9\n DICT = 10\n PERMISSIONS = 11\n ARRAY = 12\n PASSWORD = 13\n DATE = 14\n\n\nclass Object(list):\n class Item(object):\n def __init__(self, descr, name, value, vt=ValueType.STRING, editable=None):\n self.descr = descr\n self.name = name\n self.value = value\n self.vt = vt\n self.editable = editable\n\n def __getstate__(self):\n return {\n 'descr': self.descr,\n 'name': self.name,\n 'value': list(self.value) if hasattr(self.value, '__next__') else self.value,\n 'vt': self.vt.name,\n 'editable': self.editable\n }\n\n def append(self, p_object):\n if not isinstance(p_object, self.Item):\n raise ValueError('Can only add Object.Item instances')\n\n super(Object, self).append(p_object)\n\n def __getitem__(self, item):\n i = first_or_default(lambda x: x.name == item, self)\n if i:\n return i.value\n\n raise KeyError(item)\n\n def __setitem__(self, key, value):\n if not isinstance(value, self.Item):\n raise ValueError('Can only add Object.Item instances')\n\n super(Object, self).__setitem__(key, value)\n\n def __getstate__(self):\n return {\n 'type': self.__class__.__name__,\n 'data': [i.__getstate__() for i in self]\n }\n\n def __init__(self, *args):\n for i in args:\n self.append(i)\n\n\nclass Table(object):\n class Column(object):\n def __init__(self, label, accessor, vt=ValueType.STRING, width=None, name=None):\n self.name = name\n self.label = label\n self.accessor = accessor\n self.vt = vt\n self.width = width\n\n if not self.name and isinstance(accessor, str):\n self.name = accessor\n\n if not self.name:\n self.name = label\n\n def __getstate__(self):\n return {\n 'label': self.label,\n 'vt': self.vt.name,\n 'width': self.width\n }\n\n def __init__(self, data, columns):\n self.data = data\n self.columns = columns\n\n def __len__(self):\n return len(self.data)\n\n def __iter__(self):\n for i in self.data:\n yield {c.name: resolve_cell(i, c.accessor) for c in self.columns}\n\n def __getitem__(self, item):\n return {c.name: resolve_cell(self.data[item], c.accessor) for c in self.columns}\n\n def __getstate__(self):\n return {\n 'type': self.__class__.__name__,\n 'columns': [i.__getstate__() for i in self.columns],\n 'data': [\n [resolve_cell(i, c.accessor) for c in self.columns] for i in self.data\n ]\n }\n\n def pop(self, pop_index):\n return self.data.pop(pop_index)\n\n\nclass Sequence(list):\n def __init__(self, *items):\n super(Sequence, self).__init__(items)\n\n def unwind(self, force=False):\n return self if len(self) > 1 or force else self[0]\n\n def append_flat(self, item):\n if isinstance(item, Sequence):\n self.extend(item)\n return\n\n self.append(item)\n\n def __getstate__(self):\n return {\n 'type': self.__class__.__name__,\n 'data': list(self)\n }\n\n\nclass ProgressBar(object):\n def __init__(self):\n self.message = None\n self.percentage = 0\n self.draw_t = Thread(target=self.draw) if sys.stdout.isatty() else Thread(target=self.draw_static)\n self.finished = False\n sys.stdout.write('\\n')\n self.draw_t.daemon = True\n self.draw_t.start()\n\n def draw(self):\n progress_width = 40\n none_fill = ''.join('#' if i < 8 else '_' for i in range(progress_width))\n\n def get_none_fill(f):\n asc = True\n while True:\n yield f\n if asc:\n f = f[-1] + f[:-1]\n if f[-1] == '#':\n asc = False\n else:\n f = f[1:] + f[0]\n if f[0] == '#':\n asc = True\n\n generator = get_none_fill(none_fill)\n while True:\n if self.percentage is None:\n none_fill = next(generator)\n fill = none_fill\n else:\n filled_width = int(self.percentage * progress_width)\n fill = '#' * filled_width + '_' * (progress_width - filled_width)\n\n sys.stdout.write('\\033[2K\\033[A\\033[2K\\r')\n sys.stdout.write('Status: {}\\n'.format(self.message))\n sys.stdout.write('Total task progress: [{}] '.format(fill) +\n ('' if self.percentage is None else '{:.2%}'.format(self.percentage)))\n\n sys.stdout.flush()\n if self.finished:\n break\n time.sleep(0.5)\n\n def draw_static(self):\n old_message = ''\n\n while True:\n status = ''\n\n if self.percentage is not None:\n if self.message:\n status = 'Status {}. '.format(self.message)\n status += 'Progress {:.2%}\\n'.format(self.percentage)\n elif old_message != self.message:\n old_message = self.message\n status = 'Status {}\\n'.format(self.message)\n\n if status:\n sys.stdout.write(status)\n sys.stdout.flush()\n\n if self.finished:\n break\n\n time.sleep(1)\n\n def update(self, percentage=None, message=None):\n self.percentage = None if percentage is None else float(percentage / 100.0)\n\n if message:\n self.message = message\n\n def finish(self):\n self.percentage = 1\n\n def end(self):\n self.finished = True\n self.draw_t.join()\n sys.stdout.write('\\n')\n\n\ndef get_terminal_size(fd=1):\n \"\"\"\n Returns height and width of current terminal. First tries to get\n size via termios.TIOCGWINSZ, then from environment. Defaults to 25\n lines x 80 columns if both methods fail.\n\n :param fd: file descriptor (default: 1=stdout)\n \"\"\"\n try:\n import fcntl, termios, struct\n hw = struct.unpack('hh', fcntl.ioctl(fd, termios.TIOCGWINSZ, '1234'))\n except:\n try:\n hw = (os.environ['LINES'], os.environ['COLUMNS'])\n except:\n hw = (25, 80)\n\n if hw[0] == 0 or hw[1] == 0:\n hw = (25, 80)\n\n return hw\n\n\ndef resolve_cell(row, spec):\n if type(spec) == str:\n return row.get(spec)\n\n if isinstance(spec, collections.Callable):\n return spec(row)\n\n return ''\n\n\ndef read_value(value, tv=ValueType.STRING):\n if value is None:\n if tv == ValueType.ARRAY:\n return []\n\n if tv == ValueType.DICT:\n return {}\n\n if tv == ValueType.SET:\n return set()\n\n if tv == ValueType.BOOLEAN:\n return False\n\n return value\n\n if tv in (ValueType.STRING, ValueType.TEXT_FILE):\n return str(value)\n\n if tv in (ValueType.NUMBER, ValueType.SIZE):\n return int(value)\n\n if tv == ValueType.BOOLEAN:\n if type(value) is bool:\n return value\n\n if str(value).lower() in ('true', 'yes', 'on', '1'):\n return True\n\n if str(value).lower() in ('false', 'no', 'off', '0'):\n return False\n\n if tv == ValueType.SET:\n if type(value) is list:\n return set(value)\n else:\n return {value}\n\n if tv == ValueType.ARRAY:\n if type(value) is list:\n return value\n else:\n return [value]\n\n if tv == ValueType.DICT:\n if type(value) is dict:\n return value\n\n if tv == ValueType.OCTNUMBER:\n return int(value)\n\n if tv == ValueType.PERMISSIONS:\n if isinstance(value, str):\n value = string_to_int(value)\n else:\n if value > 0o777:\n raise ValueError('Invalid permissions format - use octal notation with maximum value of 0o777')\n\n return get_unix_permissions(value)\n\n if tv == ValueType.PASSWORD:\n return Password(str(value))\n\n raise ValueError(_(\"Invalid value '{0}', expected {1} value\".format(value, str(tv).split('ValueType.')[-1].lower())))\n\n\ndef format_value(value, vt=ValueType.STRING, fmt=None):\n fmt = fmt or config.instance.variables.get('output_format')\n return get_formatter(fmt).format_value(value, vt)\n\n\ndef output_value(value, fmt=None, **kwargs):\n fmt = fmt or config.instance.variables.get('output_format')\n return get_formatter(fmt).output_value(value, **kwargs)\n\n\ndef output_list(data, label=_(\"Items\"), fmt=None, **kwargs):\n fmt = fmt or config.instance.variables.get('output_format')\n return get_formatter(fmt).output_list(data, label, **kwargs)\n\n\ndef output_dict(data, key_label=_(\"Key\"), value_label=_(\"Value\"), fmt=None, **kwargs):\n fmt = fmt or config.instance.variables.get('output_format')\n return get_formatter(fmt).output_dict(data, key_label, value_label)\n\n\ndef output_table(table, fmt=None, **kwargs):\n fmt = fmt or config.instance.variables.get('output_format')\n return get_formatter(fmt).output_table(table, **kwargs)\n\n\ndef output_object(item, **kwargs):\n fmt = kwargs.pop('fmt', None)\n fmt = fmt or config.instance.variables.get('output_format')\n return get_formatter(fmt).output_object(item, **kwargs)\n\n\ndef output_tree(tree, children, label, fmt=None, **kwargs):\n fmt = fmt or config.instance.variables.get('output_format')\n return get_formatter(fmt).output_tree(tree, children, label, **kwargs)\n\n\ndef get_formatter(name):\n module = importlib.import_module('freenas.cli.output.' + name)\n return module._formatter()\n\n\ndef output_msg(message, fmt=None, **kwargs):\n fmt = fmt or config.instance.variables.get('output_format')\n return get_formatter(fmt).output_msg(message, **kwargs)\n\n\ndef output_is_ascii():\n return config.instance.variables.get('output_format') == 'ascii'\n\n\n# The following solution to implement `LESS(1)` style output is a combination\n# of snippets taken from the following stackoverflow answers:\n# 1. http://stackoverflow.com/questions/14197009/how-can-i-redirect-print-output-of-a-function-in-python#answer-14197079\n# 2. http://stackoverflow.com/questions/6728661/paging-output-from-python#answer-18234081\n@contextlib.contextmanager\ndef stdout_redirect(where):\n sys.stdout = where\n try:\n yield where\n finally:\n sys.stdout = sys.__stdout__\n\n\nclass StringIO(io.StringIO):\n \"\"\"\n Decode inputs so we can make it work in py2 and py3.\n In py2 the print function automatically encode inputs.\n \"\"\"\n def write(self, value, *args, **kwargs):\n if six.PY2 and isinstance(value, str):\n value = value.decode('utf8')\n return super(StringIO, self).write(value, *args, **kwargs)\n\n\ndef output_less(output_call_list):\n # First check if its either a list or a func (if not then raise TypeError)\n if hasattr(output_call_list, '__call__'):\n # It is a single func so just wrap it in a list and the below code\n # will DTRT\n output_call_list = [output_call_list]\n elif type(output_call_list) is list:\n for x in output_call_list:\n if not hasattr(x, '__call__'):\n raise TypeError('One of the items provided in the ' +\n 'output_call_list was not a function')\n else:\n raise TypeError('Input to `output_less` must either be a function or' +\n ' a list of functions. Instead the following type ' +\n 'was received: {0}'.format(type(output_call_list)))\n\n with stdout_redirect(StringIO()) as new_stdout:\n for output_func_call in output_call_list:\n output_func_call(new_stdout)\n\n new_stdout.seek(0)\n pydoc.pager(new_stdout.read())\n\n\ndef format_output(object, **kwargs):\n if isinstance(object, Object):\n output_object(object, **kwargs)\n\n elif isinstance(object, Table):\n output_table(object, **kwargs)\n\n elif isinstance(object, dict):\n output_dict(object, **kwargs)\n\n elif isinstance(object, Sequence):\n for i in object:\n format_output(i, **kwargs)\n\n elif isinstance(object, list):\n output_list(object, **kwargs)\n\n else:\n output_msg(object, **kwargs)\n\n\ndef refresh_prompt():\n if not config.instance.variables.get('tasks_blocking'):\n config.instance.ml.blank_readline()\n config.instance.ml.restore_readline()\n\n\ndef output_msg_locked(msg):\n output_lock.acquire()\n config.instance.ml.blank_readline()\n output_msg(msg)\n sys.stdout.flush()\n config.instance.ml.restore_readline()\n output_lock.release()\n\n\ndef get_humanized_size(value):\n value = int(value)\n suffixes = [\n 'iB',\n 'KiB',\n 'MiB',\n 'GiB'\n ]\n\n for suffix in suffixes:\n next_step = value / 1024\n if not int(next_step):\n return str(round(value, 2)) + ' ' + suffix\n value = next_step\n\n return str(round(value, 2)) + ' ' + 'TiB'\n", "repo_name": "freenas/cli", "sub_path": "freenas/cli/output/__init__.py", "file_name": "__init__.py", "file_ext": "py", "file_size_in_byte": 13856, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 32, "dataset": "github-code", "pt": "12", "api": [{"api_name": "threading.Lock", "line_number": 20, "usage_type": "call"}, {"api_name": "gettext.translation", "line_number": 21, "usage_type": "call"}, {"api_name": "enum.Enum", "line_number": 25, "usage_type": "attribute"}, {"api_name": "freenas.utils.first_or_default", "line_number": 67, "usage_type": "call"}, {"api_name": "sys.stdout.isatty", "line_number": 164, "usage_type": "call"}, {"api_name": "sys.stdout", "line_number": 164, "usage_type": "attribute"}, {"api_name": "threading.Thread", "line_number": 164, "usage_type": "call"}, {"api_name": "sys.stdout.write", "line_number": 166, "usage_type": "call"}, {"api_name": "sys.stdout", "line_number": 166, "usage_type": "attribute"}, {"api_name": "sys.stdout.write", "line_number": 196, "usage_type": "call"}, {"api_name": "sys.stdout", "line_number": 196, "usage_type": "attribute"}, {"api_name": "sys.stdout.write", "line_number": 197, "usage_type": "call"}, {"api_name": "sys.stdout", "line_number": 197, "usage_type": "attribute"}, {"api_name": "sys.stdout.write", "line_number": 198, "usage_type": "call"}, {"api_name": "sys.stdout", "line_number": 198, "usage_type": "attribute"}, {"api_name": "sys.stdout.flush", "line_number": 201, "usage_type": "call"}, {"api_name": "sys.stdout", "line_number": 201, "usage_type": "attribute"}, {"api_name": "time.sleep", "line_number": 204, "usage_type": "call"}, {"api_name": "sys.stdout.write", "line_number": 221, "usage_type": "call"}, {"api_name": "sys.stdout", "line_number": 221, "usage_type": "attribute"}, {"api_name": "sys.stdout.flush", "line_number": 222, "usage_type": "call"}, {"api_name": "sys.stdout", "line_number": 222, "usage_type": "attribute"}, {"api_name": "time.sleep", "line_number": 227, "usage_type": "call"}, {"api_name": "sys.stdout.write", "line_number": 241, "usage_type": "call"}, {"api_name": "sys.stdout", "line_number": 241, "usage_type": "attribute"}, {"api_name": "struct.unpack", "line_number": 254, "usage_type": "call"}, {"api_name": "fcntl.ioctl", "line_number": 254, "usage_type": "call"}, {"api_name": "termios.TIOCGWINSZ", "line_number": 254, "usage_type": "attribute"}, {"api_name": "os.environ", "line_number": 257, "usage_type": "attribute"}, {"api_name": "collections.Callable", "line_number": 271, "usage_type": "attribute"}, {"api_name": "freenas.utils.permissions.string_to_int", "line_number": 330, "usage_type": "call"}, {"api_name": "freenas.utils.permissions.get_unix_permissions", "line_number": 335, "usage_type": "call"}, {"api_name": "freenas.dispatcher.Password", "line_number": 338, "usage_type": "call"}, {"api_name": "freenas.cli.config.instance.variables.get", "line_number": 344, "usage_type": "call"}, {"api_name": "freenas.cli.config.instance", "line_number": 344, "usage_type": "attribute"}, {"api_name": "freenas.cli.config", "line_number": 344, "usage_type": "name"}, {"api_name": "freenas.cli.config.instance.variables.get", "line_number": 349, "usage_type": "call"}, {"api_name": "freenas.cli.config.instance", "line_number": 349, "usage_type": "attribute"}, {"api_name": "freenas.cli.config", "line_number": 349, "usage_type": "name"}, {"api_name": "freenas.cli.config.instance.variables.get", "line_number": 354, "usage_type": "call"}, {"api_name": "freenas.cli.config.instance", "line_number": 354, "usage_type": "attribute"}, {"api_name": "freenas.cli.config", "line_number": 354, "usage_type": "name"}, {"api_name": "freenas.cli.config.instance.variables.get", "line_number": 359, "usage_type": "call"}, {"api_name": "freenas.cli.config.instance", "line_number": 359, "usage_type": "attribute"}, {"api_name": "freenas.cli.config", "line_number": 359, "usage_type": "name"}, {"api_name": "freenas.cli.config.instance.variables.get", "line_number": 364, "usage_type": "call"}, {"api_name": "freenas.cli.config.instance", "line_number": 364, "usage_type": "attribute"}, {"api_name": "freenas.cli.config", "line_number": 364, "usage_type": "name"}, {"api_name": "freenas.cli.config.instance.variables.get", "line_number": 370, "usage_type": "call"}, {"api_name": "freenas.cli.config.instance", "line_number": 370, "usage_type": "attribute"}, {"api_name": "freenas.cli.config", "line_number": 370, "usage_type": "name"}, {"api_name": "freenas.cli.config.instance.variables.get", "line_number": 375, "usage_type": "call"}, {"api_name": "freenas.cli.config.instance", "line_number": 375, "usage_type": "attribute"}, {"api_name": "freenas.cli.config", "line_number": 375, "usage_type": "name"}, {"api_name": "importlib.import_module", "line_number": 380, "usage_type": "call"}, {"api_name": "freenas.cli.config.instance.variables.get", "line_number": 385, "usage_type": "call"}, {"api_name": "freenas.cli.config.instance", "line_number": 385, "usage_type": "attribute"}, {"api_name": "freenas.cli.config", "line_number": 385, "usage_type": "name"}, {"api_name": "freenas.cli.config.instance.variables.get", "line_number": 390, "usage_type": "call"}, {"api_name": "freenas.cli.config.instance", "line_number": 390, "usage_type": "attribute"}, {"api_name": "freenas.cli.config", "line_number": 390, "usage_type": "name"}, {"api_name": "sys.stdout", "line_number": 399, "usage_type": "attribute"}, {"api_name": "sys.stdout", "line_number": 403, "usage_type": "attribute"}, {"api_name": "sys.__stdout__", "line_number": 403, "usage_type": "attribute"}, {"api_name": "contextlib.contextmanager", "line_number": 397, "usage_type": "attribute"}, {"api_name": "io.StringIO", "line_number": 406, "usage_type": "attribute"}, {"api_name": "six.PY2", "line_number": 412, "usage_type": "attribute"}, {"api_name": "pydoc.pager", "line_number": 438, "usage_type": "call"}, {"api_name": "freenas.cli.config.instance.variables.get", "line_number": 463, "usage_type": "call"}, {"api_name": "freenas.cli.config.instance", "line_number": 463, "usage_type": "attribute"}, {"api_name": "freenas.cli.config", "line_number": 463, "usage_type": "name"}, {"api_name": "freenas.cli.config.instance.ml.blank_readline", "line_number": 464, "usage_type": "call"}, {"api_name": "freenas.cli.config.instance", "line_number": 464, "usage_type": "attribute"}, {"api_name": "freenas.cli.config", "line_number": 464, "usage_type": "name"}, {"api_name": "freenas.cli.config.instance.ml.restore_readline", "line_number": 465, "usage_type": "call"}, {"api_name": "freenas.cli.config.instance", "line_number": 465, "usage_type": "attribute"}, {"api_name": "freenas.cli.config", "line_number": 465, "usage_type": "name"}, {"api_name": "freenas.cli.config.instance.ml.blank_readline", "line_number": 470, "usage_type": "call"}, {"api_name": "freenas.cli.config.instance", "line_number": 470, "usage_type": "attribute"}, {"api_name": "freenas.cli.config", "line_number": 470, "usage_type": "name"}, {"api_name": "sys.stdout.flush", "line_number": 472, "usage_type": "call"}, {"api_name": "sys.stdout", "line_number": 472, "usage_type": "attribute"}, {"api_name": "freenas.cli.config.instance.ml.restore_readline", "line_number": 473, "usage_type": "call"}, {"api_name": "freenas.cli.config.instance", "line_number": 473, "usage_type": "attribute"}, {"api_name": "freenas.cli.config", "line_number": 473, "usage_type": "name"}]} +{"seq_id": "32977441804", "text": "import PyPDF2 as p2\nimport nltk\nimport fitz \nnltk.download('punkt')\nfrom pprint import pprint\n\nclass word_extract():\n content = []\n def __init__(self) -> None:\n pass\n \n def extract_from_pdf(self, file):\n pdfread = p2.PdfFileReader(file)\n # Extract single page\n\n if pdfread.getIsEncrypted() :\n return False\n \n # Extract entire pdf\n \n for i in range(0, pdfread.getNumPages()):\n pageinfo = pdfread.getPage(i)\n self.content.extend(pageinfo.extractText().split())\n return self.content\n \n def preprocess(self, content):\n def extractDigits(lst):\n return [[el.strip('\"\"')] for el in lst]\n a_list = nltk.tokenize.sent_tokenize(content) \n tmp = []\n list_list=extractDigits(a_list)\n \n for i in list_list:\n a = nltk.word_tokenize(i[0])\n tmp.append(a)\n\n for i in tmp:\n for n in i:\n if \"@\" in n:\n sentencenum = tmp.index(i)\n x = i.index(n)\n tmp[sentencenum][x-1 : x+2] = [''.join(tmp[sentencenum][x-1 : x+2])]\n return tmp\n \n def pdf_to_dict(self, file_bytes):\n text = \"\"\n with fitz.Document(stream=file_bytes, filetype='pdf') as doc:\n \n for page in doc:\n text += page.get_text()\n # print(res)\n list_list = self.preprocess(text)\n return list_list\n \n def extract_from_txt(self, file_bytes):\n text = file_bytes.rstrip()\n # print(text)\n list_list = self.preprocess(text)\n # print(res)\n return list_list\n \n\n \n \n ", "repo_name": "PII-detection/server", "sub_path": "upload/utils/word_extract.py", "file_name": "word_extract.py", "file_ext": "py", "file_size_in_byte": 1726, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "12", "api": [{"api_name": "nltk.download", "line_number": 4, "usage_type": "call"}, {"api_name": "PyPDF2.PdfFileReader", "line_number": 13, "usage_type": "call"}, {"api_name": "nltk.tokenize.sent_tokenize", "line_number": 29, "usage_type": "call"}, {"api_name": "nltk.tokenize", "line_number": 29, "usage_type": "attribute"}, {"api_name": "nltk.word_tokenize", "line_number": 34, "usage_type": "call"}, {"api_name": "fitz.Document", "line_number": 47, "usage_type": "call"}]} +{"seq_id": "21236210505", "text": "from tkinter import *\nfrom PIL import ImageTk, Image\nclass Hero():\n def __init__(self, root, canvas):\n self.root = root\n # spawn in random position??\n self.x = 0\n self.y = 0\n self.inplay = True\n \n self.speed = 5\n self.canvas = canvas\n\n self.PILimg1 = Image.open(\"media/cat.png\")\n self.PILimg1 = self.PILimg1.resize((32, 28))\n self.tkimg = ImageTk.PhotoImage(self.PILimg1)\n\n self.heroimg = self.canvas.create_image((25,25),image=self.tkimg)\n\n self.width = int(self.canvas.cget(\"width\"))\n self.height = int(self.canvas.cget(\"height\"))\n self.movement()\n\n \n def movement(self):\n # super hardcoding here for current board size\n if self.inplay == True:\n coords = self.canvas.coords(self.heroimg)\n if coords[0] < 25 and self.x < 0:\n self.x = 0\n elif coords[0] > (self.width - 25) and self.x > 0:\n self.x = 0\n if coords[1] < 25 and self.y < 0:\n self.y =0\n elif coords[1] > (self.height - 30) and self.y > 0:\n self.y = 0\n\n self.canvas.move(self.heroimg, self.x, self.y)\n self.canvas.after(5, self.movement)\n\n def left(self, event):\n self.x = -5\n self.y = 0\n \n # for motion in positive x direction\n def right(self, event):\n self.x = 5\n self.y = 0\n \n # for motion in positive y direction\n def up(self, event):\n self.x = 0\n self.y = -5\n \n # for motion in negative y direction\n def down(self, event):\n self.x = 0\n self.y = 5\n\n # this feature has been removed to make game more challenging\n def stop(self, event):\n self.x = 0\n self.y = 0\n\n def getSprite(self):\n return self.heroimg\n\n def setInPlay(self, inplay):\n self.inplay = inplay\n\n", "repo_name": "bsande6/halloween-game-project", "sub_path": "hero.py", "file_name": "hero.py", "file_ext": "py", "file_size_in_byte": 1912, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "12", "api": [{"api_name": "PIL.Image.open", "line_number": 14, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 14, "usage_type": "name"}, {"api_name": "PIL.ImageTk.PhotoImage", "line_number": 16, "usage_type": "call"}, {"api_name": "PIL.ImageTk", "line_number": 16, "usage_type": "name"}]} +{"seq_id": "1779467489", "text": "import shlex\nfrom typing import Callable, List, Union, cast\n\nimport click\nfrom click import Group\nfrom click.decorators import F\nfrom click.shell_completion import _resolve_context\nfrom prompt_toolkit.document import Document\n\n\nclass CompleterContext:\n def __init__(\n self,\n cli: Callable[[F], Group],\n click_ctx: click.Context,\n tokens: List[str],\n used_options: List[str],\n last_option: Union[str, None],\n incomplete: str,\n ) -> None:\n self.cli = cli\n self.click_ctx = click_ctx\n self.tockens = tokens\n self.used_options = used_options\n self.last_option = last_option\n self.incomplete = incomplete\n\n\nclass CommandParser:\n def __init__(self, cli: Callable[[F], Group]) -> None:\n self.cli = cast(Group, cli)\n\n def parse(self, document: Document) -> Union[CompleterContext, None]:\n tokens = document.text.split(\" \")\n used_options = [p for p in tokens if p.startswith(\"-\")]\n last_option = tokens[-2] if len(tokens) > 2 and tokens[-2].startswith(\"-\") else None\n\n try:\n args = shlex.split(document.text_before_cursor)\n except ValueError:\n # Invalid command, perhaps caused by missing closing quotation.\n return None\n\n cursor_within_command = document.text_before_cursor.rstrip() == document.text_before_cursor\n\n if args and cursor_within_command:\n # We've entered some text and no space, give completions for the\n # current word.\n incomplete = args.pop()\n else:\n # We've not entered anything, either at all or for the current\n # command, so give all relevant completions for this context.\n incomplete = \"\"\n ctx = _resolve_context(self.cli, {}, \"\", args)\n\n return CompleterContext(self.cli, ctx, tokens, used_options, last_option, incomplete)\n", "repo_name": "investoreight/i8-terminal", "sub_path": "i8_terminal/types/command_parser.py", "file_name": "command_parser.py", "file_ext": "py", "file_size_in_byte": 1917, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 41, "dataset": "github-code", "pt": "14", "api": [{"api_name": "typing.Callable", "line_number": 14, "usage_type": "name"}, {"api_name": "click.decorators.F", "line_number": 14, "usage_type": "name"}, {"api_name": "click.Group", "line_number": 14, "usage_type": "name"}, {"api_name": "click.Context", "line_number": 15, "usage_type": "attribute"}, {"api_name": "typing.List", "line_number": 16, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 17, "usage_type": "name"}, {"api_name": "typing.Union", "line_number": 18, "usage_type": "name"}, {"api_name": "typing.Callable", "line_number": 30, "usage_type": "name"}, {"api_name": "click.decorators.F", "line_number": 30, "usage_type": "name"}, {"api_name": "click.Group", "line_number": 30, "usage_type": "name"}, {"api_name": "typing.cast", "line_number": 31, "usage_type": "call"}, {"api_name": "click.Group", "line_number": 31, "usage_type": "argument"}, {"api_name": "prompt_toolkit.document.Document", "line_number": 33, "usage_type": "name"}, {"api_name": "shlex.split", "line_number": 39, "usage_type": "call"}, {"api_name": "click.shell_completion._resolve_context", "line_number": 54, "usage_type": "call"}, {"api_name": "typing.Union", "line_number": 33, "usage_type": "name"}]} +{"seq_id": "23239620367", "text": "import torch\nimport numpy as np\nimport torch.optim as optim\nimport torch.nn as nn\nfrom torch.nn import init\nfrom model.Linear import Linear\nfrom dataset.Linear_dataset import getdata\n\n\nnum_input = 2\nnet = nn.Sequential()\n# linear = Linear(num_input)\n# net.add_module(\"linear\", linear)\nnet.add_module(\"linear2\", nn.Linear(num_input, 1))\n\ndataset = getdata()\n\n#初始化模型参数\nprint(net)\ninit.normal_(net[0].weight, mean=0, std=0.01)\ninit.constant_(net[0].bias, val=0)\n# print(net[0])\n\n#初始化损失函数\nloss = nn.MSELoss()\n\n#定义优化器\noptimizer = optim.SGD(net.parameters(), lr=0.03)\nprint(optimizer)\n\n#开始训练\nepoch_num = 3\nfor epoch in range(epoch_num):\n for X, y in dataset:\n output = net(X)\n l = loss(output, y.view(-1,1))\n optimizer.zero_grad()\n l.backward()\n optimizer.step()\n print('epoch %d, loss: %f' % (epoch, l.item()))\n\ntrue_w = [2, -3.4]\ntrue_b = 4.2\ndense = net[0]\n\nprint(true_w, dense.weight)\nprint(true_b, dense.bias)\n\n", "repo_name": "yohoochen/pratice", "sub_path": "linear_train_2.py", "file_name": "linear_train_2.py", "file_ext": "py", "file_size_in_byte": 997, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "14", "api": [{"api_name": "torch.nn.Sequential", "line_number": 11, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 11, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 14, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 14, "usage_type": "name"}, {"api_name": "dataset.Linear_dataset", "line_number": 16, "usage_type": "name"}, {"api_name": "dataset.Linear_dataset.getdata", "line_number": 16, "usage_type": "call"}, {"api_name": "torch.nn.init.normal_", "line_number": 20, "usage_type": "call"}, {"api_name": "torch.nn.init", "line_number": 20, "usage_type": "name"}, {"api_name": "torch.nn.init.constant_", "line_number": 21, "usage_type": "call"}, {"api_name": "torch.nn.init", "line_number": 21, "usage_type": "name"}, {"api_name": "torch.nn.MSELoss", "line_number": 25, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 25, "usage_type": "name"}, {"api_name": "torch.optim.SGD", "line_number": 28, "usage_type": "call"}, {"api_name": "torch.optim", "line_number": 28, "usage_type": "name"}, {"api_name": "dataset.Linear_dataset", "line_number": 34, "usage_type": "name"}]} +{"seq_id": "35407612007", "text": "#!/usr/bin/python\nimport serial\nimport time\nimport json\nimport os\nfrom flask import Flask, request, jsonify\n\nPORT_NUMBER = 8080\n\napp = Flask(__name__)\n\n@app.route('/temp.json', methods=['GET'])\ndef getData():\n ser.write(b'g')\n\n retdata = ser.readline().rstrip()\n print(retdata)\n retdata = retdata.split(b\",\")\n\n if (len(retdata) != 8):\n return (\"malformed data from serial port: \" + repr(retdata), 400)\n\n\n acceltijd = retdata[0]\n acx = retdata[1]\n acy = retdata[2]\n acz = retdata[3]\n gyx = retdata[4]\n gyy = retdata[5]\n gyz = retdata[6]\n acceltemp = retdata[7]\n\n data = {'AccelTijd':acceltijd, 'Acx':acx, 'Acy':acy, 'Acz':acz, 'Gyx':gyx, 'Gyy':gyy, 'Gyz':gyz, 'AccelTemp':acceltemp}\n\n return jsonify(data)\n\n\ndevice = \"\"\nfor i in range(0,9):\n device_path = \"/dev/ttyUSB%d\" % i\n if os.path.exists(device_path):\n device = device_path\n break\nif device == \"\":\n print(\"No ttyUSB device found; is the Pim sensor board connected?\")\n exit(1)\n\n\nser = serial.Serial(device, baudrate=115200, timeout=0)\ntime.sleep(2)\nprint(ser.readline())\nprint(ser.readline())\n\napp.run(host=\"0.0.0.0\", port=PORT_NUMBER)\nser.close()\n", "repo_name": "pixelbar/pixelbar-pySerialThermometer", "sub_path": "serialThermometer.py", "file_name": "serialThermometer.py", "file_ext": "py", "file_size_in_byte": 1181, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "14", "api": [{"api_name": "flask.Flask", "line_number": 10, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 35, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 41, "usage_type": "call"}, {"api_name": "os.path", "line_number": 41, "usage_type": "attribute"}, {"api_name": "serial.Serial", "line_number": 49, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 50, "usage_type": "call"}]} +{"seq_id": "19832896604", "text": "import tkinter as tk\nfrom tkinter.filedialog import askopenfilename, asksaveasfilename\nfrom tkinter import messagebox\nfrom PIL import ImageTk, Image\n\n#### Globals\nNRM = \"normal\"\nVRT = \"vertical\"\nHOR = \"horizontal\"\n\nclass Playground:\n ##### Playground class, duhh\n \n def __init__(self, master) -> None:\n ##### Init\n \n ##### Save the reference to master\n self.master = master\n\n ##### Ah, yes, this too\n self.isPressed = False\n self.mode = NRM\n\n ##### Make and Pack the import button\n self.btn_import = tk.Button(master, text=\"Import\", command=self.import_)\n self.btn_import.configure(font=(\"Arial\", 15), width=17, bg=\"#1f1f1f\", fg=\"#f0f0f0\")\n self.btn_import.pack()\n \n def resize(self, img):\n #### This function resizes the original object which is passed!\n\n screen_width = self.master.winfo_screenwidth()\n screen_height = self.master.winfo_screenheight()\n width, height = img.size\n\n if width>height and width > 0.8*screen_width:\n new_width = 0.8*screen_width\n new_height = new_width * height / width\n elif height > 0.8*screen_height:\n new_height = 0.8*screen_height\n new_width = new_height * width / height\n else:\n new_width = width\n new_height = height\n self.resizefactor = new_width/width\n return height, width, img\n \n self.resizefactor = new_width/width\n #### I think ANTIALIAS keeps the quality of the image. otherwise performance. not sure\n img = img.resize((int(new_width), int(new_height)), Image.ANTIALIAS)\n\n # check if resize is neccesary and return resized PIL Image object\n return new_height, new_width, img\n \n \n def import_(self):\n ##### Get filepath\n filepath = askopenfilename(\n filetypes=[(\"Image Files\", \"*.png\"),\n (\"Image Files\", \"*.jpg\"),\n (\"Image Files\", \"*.jpeg\"),\n (\"All Files\", \"*.*\")]\n )\n if not filepath:\n return\n ##### save reference to filefapth\n self.img_path = filepath\n\n ##### Get rid of old import button.\n self.btn_import.destroy()\n self.master.configure(padx=5, pady=5)\n\n ##### Make tk_img object with resized size and keep its reference\n self.resized_height, self.resized_width, resized_img = self.resize(Image.open(filepath))\n self.tk_img = ImageTk.PhotoImage(resized_img)\n\n ##### Make a canvas with same dimentions as of resizeed image and draw image on it\n self.canvas = tk.Canvas(self.master, width=self.resized_width, height=self.resized_height)\n self.canvas.bind(\"\", self.motion)\n self.canvas.bind(\"\", self.buttonPressed)\n self.canvas.bind(\"\", self.buttonReleased)\n self.canvas.create_image((0, 0), image=self.tk_img, anchor='nw')\n self.canvas.pack()\n ##### End of function\n\n #### Takes a image and exports\n def export(self, cropped):\n filepath = asksaveasfilename(\n defaultextension=\"png\",\n filetypes=[(\"Image Files\", \"*.png\"), (\"All Files\", \"*.*\")],\n )\n if not filepath:\n return\n cropped.save(filepath)\n return filepath\n\n #### Takes a box and updates the canvas\n def updatecanvas(self, x0, y0, x, y):\n #### https://stackoverflow.com/questions/54637795/how-to-make-a-tkinter-canvas-rectangle-transparent/54645103\n\n #### Set canvas background to the image\n self.canvas.create_image((0, 0), image=self.tk_img, anchor='nw')\n \n ##### Depending on state\n #### Set color\n #### Draw line\n #### Make and draw a box\n if self.mode == NRM:\n line_color = \"#2fff00\"\n self.canvas.create_line(x0, y0, x0, y, x, y, x, y0, x0, y0, fill=line_color, width=1)\n temp = Image.new(\"RGBA\", (abs(x-x0), abs(y-y0)), line_color)\n temp.putalpha(50)\n self.temp_img = ImageTk.PhotoImage(temp)\n self.canvas.create_image((min(x,x0), min(y,y0)), image=self.temp_img, anchor='nw')\n else:\n line_color = \"red\"\n \n if self.mode == VRT:\n self.canvas.create_line(x0, 1, x0, self.resized_height-1, x, self.resized_height-1, x, 1, x0, 1, fill=line_color, width=1)\n temp = Image.new(\"RGBA\", (abs(x-x0), int(self.resized_height)), line_color)\n temp.putalpha(50)\n self.temp_img = ImageTk.PhotoImage(temp)\n self.canvas.create_image((min(x,x0), 0), image=self.temp_img, anchor='nw')\n elif self.mode == HOR:\n self.canvas.create_line(1, y0, self.resized_width-1, y0, self.resized_width-1, y, 1, y, 1, y0, fill=line_color, width=1)\n temp = Image.new(\"RGBA\", (int(self.resized_width), abs(y-y0)), line_color)\n temp.putalpha(50)\n self.temp_img = ImageTk.PhotoImage(temp)\n self.canvas.create_image((0, min(y,y0)), image=self.temp_img, anchor='nw')\n\n #### Motion of mouse event handler\n def motion(self, event):\n if self.isPressed:\n self.updatecanvas(self.buttonpressedeventinfo[0], self.buttonpressedeventinfo[1], event.x, event.y)\n\n #### Mouse pressed event handler\n def buttonPressed(self, event):\n self.buttonpressedeventinfo = [event.x, event.y]\n self.isPressed = True\n self.updatecanvas(self.buttonpressedeventinfo[0], self.buttonpressedeventinfo[1], event.x, event.y)\n\n #### Mouse released event handler\n def buttonReleased(self, event):\n self.isPressed = False\n left = min(self.buttonpressedeventinfo[0], event.x)\n right = max(self.buttonpressedeventinfo[0], event.x)\n top = min(self.buttonpressedeventinfo[1], event.y)\n bottom = max(self.buttonpressedeventinfo[1], event.y)\n \n self.box = (left / self.resizefactor, top / self.resizefactor, right / self.resizefactor, bottom / self.resizefactor)\n map(int, self.box)\n del self.buttonpressedeventinfo\n\n #### Enter key event handler\n def keyPressed(self, event):\n try:\n a,b,c,d = self.box\n if a == c or b == d:\n return\n except Exception:\n return\n \n #### The original non resized image\n img = Image.open(self.img_path)\n\n\n #### Normal cropping\n if self.mode == NRM:\n cropped =img.crop((self.box[0], self.box[1], self.box[2], self.box[3]))\n\n \n #### Vertical cropping\n if self.mode == VRT:\n im1 = img.crop((0, 0, self.box[0], img.height))\n im2 = img.crop((self.box[2], 0, img.width, img.height))\n cropped = Image.new(\"RGB\", (im1.width + im2.width, img.height))\n cropped.paste(im1, (0, 0))\n cropped.paste(im2, (im1.width, 0))\n\n \n #### Horizontal cropping\n if self.mode == HOR:\n im1 = img.crop((0, 0, img.width, self.box[1]))\n im2 = img.crop((0, self.box[3], img.width, img.height))\n cropped = Image.new(\"RGB\", (img.width, im1.height + im2.height))\n cropped.paste(im1, (0, 0))\n cropped.paste(im2, (0, im1.height))\n\n #### Export the cropped image\n filepath = self.export(cropped)\n if not filepath == None:\n messagebox.showinfo(\"Success\", \"Your image is successfuly exported to \" + filepath)\n\n #### Called when mode is changed from Toolbar\n def reset(self):\n try:\n del self.box\n self.updatecanvas(0,0,0,0)\n except Exception:\n pass", "repo_name": "aditi567/Advance-Image_cropping", "sub_path": "playground.py", "file_name": "playground.py", "file_ext": "py", "file_size_in_byte": 7749, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "12", "api": [{"api_name": "tkinter.Button", "line_number": 25, "usage_type": "call"}, {"api_name": "PIL.Image.ANTIALIAS", "line_number": 50, "usage_type": "attribute"}, {"api_name": "PIL.Image", "line_number": 50, "usage_type": "name"}, {"api_name": "tkinter.filedialog.askopenfilename", "line_number": 58, "usage_type": "call"}, {"api_name": "PIL.Image.open", "line_number": 74, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 74, "usage_type": "name"}, {"api_name": "PIL.ImageTk.PhotoImage", "line_number": 75, "usage_type": "call"}, {"api_name": "PIL.ImageTk", "line_number": 75, "usage_type": "name"}, {"api_name": "tkinter.Canvas", "line_number": 78, "usage_type": "call"}, {"api_name": "tkinter.filedialog.asksaveasfilename", "line_number": 88, "usage_type": "call"}, {"api_name": "PIL.Image.new", "line_number": 111, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 111, "usage_type": "name"}, {"api_name": "PIL.ImageTk.PhotoImage", "line_number": 113, "usage_type": "call"}, {"api_name": "PIL.ImageTk", "line_number": 113, "usage_type": "name"}, {"api_name": "PIL.Image.new", "line_number": 120, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 120, "usage_type": "name"}, {"api_name": "PIL.ImageTk.PhotoImage", "line_number": 122, "usage_type": "call"}, {"api_name": "PIL.ImageTk", "line_number": 122, "usage_type": "name"}, {"api_name": "PIL.Image.new", "line_number": 126, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 126, "usage_type": "name"}, {"api_name": "PIL.ImageTk.PhotoImage", "line_number": 128, "usage_type": "call"}, {"api_name": "PIL.ImageTk", "line_number": 128, "usage_type": "name"}, {"api_name": "PIL.Image.open", "line_number": 164, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 164, "usage_type": "name"}, {"api_name": "PIL.Image.new", "line_number": 176, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 176, "usage_type": "name"}, {"api_name": "PIL.Image.new", "line_number": 185, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 185, "usage_type": "name"}, {"api_name": "tkinter.messagebox.showinfo", "line_number": 192, "usage_type": "call"}, {"api_name": "tkinter.messagebox", "line_number": 192, "usage_type": "name"}]} +{"seq_id": "72127506894", "text": "import math\nimport re\nfrom collections import Counter\n\n# pattern = re.compile('\\w*,*\\w+') this includes numbers\n# pattern = re.compile('[a-zA-Z]+')\ndocs_path = ['./Docs/doc1.txt', './Docs/doc2.txt', './Docs/doc3.txt']\n\nN = len(docs_path)\ndf = {}\ntfs = []\n\nfor doc in docs_path:\n words = re.findall('[a-zA-Z]+', open(doc, 'r', encoding='utf8').read().lower())\n tf = Counter(words)\n tfs.append(tf)\n\nfor tfv in tfs:\n for ts in tfv:\n if ts not in df:\n df[ts] = 1\n else:\n df[ts] += 1\n\nfor idx, tf in enumerate(tfs):\n weight = []\n for term in tf:\n w = tf[term]*math.log10(N/df[term])\n weight.append((term, w))\n print(\"{0} Document TF-IDF\".format(idx+1))\n for wgt in sorted(weight, key = lambda x: -x[1])[0:5]:\n print(\"{0} {1:5f}\".format(wgt[0], wgt[1]))\n print()\n\n\n\n", "repo_name": "fregataa/Algorithm-Python", "sub_path": "Baekjoon/TF-IDF.py", "file_name": "TF-IDF.py", "file_ext": "py", "file_size_in_byte": 846, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "14", "api": [{"api_name": "re.findall", "line_number": 14, "usage_type": "call"}, {"api_name": "collections.Counter", "line_number": 15, "usage_type": "call"}, {"api_name": "math.log10", "line_number": 28, "usage_type": "call"}]} +{"seq_id": "38686493361", "text": "from flask import Flask\nfrom flask import request\nfrom followers import filejson\nimport telepot\nimport datetime\n\n\napp = Flask(__name__)\n\n\n@app.route('/instagram/username=')\ndef follow(username):\n \n now = datetime.datetime.now()\n waktu=now.strftime(\"%Y-%m-%d %H:%M:%S\")\n sistem = request.headers.get('User-Agent')\n if sistem is None:\n sistem = 'kosong'\n \n ip = request.environ.get('HTTP_X_REAL_IP', request.remote_addr)\n token = '5519356568:AAEFj6No6sTcE-ma_i60rBGmTVIjruC4e70'\n penerimaid = 1769420825\n jsonapi = filejson(username)\n bot = telepot.Bot(token)\n pesan = 'API insta-api-id '+username+' IP: '+ip+' '+'Sistem: '+sistem+' '+waktu\n bot.sendMessage(penerimaid, pesan)\n return jsonapi\n\nif __name__ == '__main__':\n app.run()\n ", "repo_name": "raufendro-dev/API-Instagram-Count", "sub_path": "app.py", "file_name": "app.py", "file_ext": "py", "file_size_in_byte": 795, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "14", "api": [{"api_name": "flask.Flask", "line_number": 8, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 14, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 14, "usage_type": "attribute"}, {"api_name": "flask.request.headers.get", "line_number": 16, "usage_type": "call"}, {"api_name": "flask.request.headers", "line_number": 16, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 16, "usage_type": "name"}, {"api_name": "flask.request.environ.get", "line_number": 20, "usage_type": "call"}, {"api_name": "flask.request.environ", "line_number": 20, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 20, "usage_type": "name"}, {"api_name": "flask.request.remote_addr", "line_number": 20, "usage_type": "attribute"}, {"api_name": "followers.filejson", "line_number": 23, "usage_type": "call"}, {"api_name": "telepot.Bot", "line_number": 24, "usage_type": "call"}]} +{"seq_id": "34413293786", "text": "from odoo import api, fields, models, _\nfrom odoo.tools import pycompat, float_repr\nfrom odoo.exceptions import ValidationError\nfrom odoo.tools.sql import column_exists, create_column\n\nfrom datetime import datetime\nfrom collections import namedtuple\nimport tempfile\nimport zipfile\nimport time\nimport io\nimport re\nimport os\n\nBalanceKey = namedtuple('BalanceKey', ['from_code', 'to_code', 'partner_id', 'tax_id'])\n\n\nclass AccountDatevCompany(models.Model):\n _inherit = 'res.company'\n\n # Adding the fields as company_dependent does not break stable policy\n l10n_de_datev_consultant_number = fields.Char(company_dependent=True)\n l10n_de_datev_client_number = fields.Char(company_dependent=True)\n\n\nclass ResPartner(models.Model):\n _inherit = 'res.partner'\n\n l10n_de_datev_identifier = fields.Integer(string='Datev Identifier',\n copy=False, tracking=True,\n help=\"The Datev identifier is a unique identifier for exchange with the government. \"\n \"If you had previous exports with another identifier, you can put it here. \"\n \"If it is 0, then it will take the database id + the value in the system parameter \"\n \"l10n_de.datev_start_count. \")\n\n @api.constrains('l10n_de_datev_identifier')\n def _check_datev_identifier(self):\n self.flush_model(['l10n_de_datev_identifier'])\n self.env.cr.execute(\"\"\"\n SELECT COUNT(id), l10n_de_datev_identifier FROM res_partner\n WHERE l10n_de_datev_identifier != 0\n GROUP BY l10n_de_datev_identifier\n HAVING COUNT(id) > 1\n \"\"\")\n\n if self.env.cr.dictfetchone():\n raise ValidationError(_('You have already defined a partner with the same Datev identifier. '))\n\n\nclass AccountMoveL10NDe(models.Model):\n _inherit = 'account.move'\n\n l10n_de_datev_main_account_id = fields.Many2one('account.account', compute='_get_datev_account', store=True)\n\n def _auto_init(self):\n if column_exists(self.env.cr, \"account_move\", \"l10n_de_datev_main_account_id\"):\n return super()._auto_init()\n\n cr = self.env.cr\n create_column(cr, \"account_move\", \"l10n_de_datev_main_account_id\", \"int4\")\n # If move has an invoice, return invoice's account_id\n cr.execute(\n \"\"\"\n UPDATE account_move\n SET l10n_de_datev_main_account_id = r.aid\n FROM (\n SELECT l.move_id mid,\n FIRST_VALUE(l.account_id) OVER(PARTITION BY l.move_id ORDER BY l.id DESC) aid\n FROM account_move_line l\n JOIN account_move m\n ON m.id = l.move_id\n JOIN account_account a\n ON a.id = l.account_id\n WHERE m.move_type in ('out_invoice', 'out_refund', 'in_refund', 'in_invoice', 'out_receipt', 'in_receipt')\n AND a.account_type in ('asset_receivable', 'liability_payable')\n ) r\n WHERE id = r.mid\n \"\"\")\n\n # If move belongs to a bank journal, return the journal's account (debit/credit should normally be the same)\n cr.execute(\n \"\"\"\n UPDATE account_move\n SET l10n_de_datev_main_account_id = r.aid\n FROM (\n SELECT m.id mid,\n j.default_account_id aid\n FROM account_move m\n JOIN account_journal j\n ON m.journal_id = j.id\n WHERE j.type = 'bank'\n AND j.default_account_id IS NOT NULL\n ) r\n WHERE id = r.mid\n AND l10n_de_datev_main_account_id IS NULL\n \"\"\")\n\n # If the move is an automatic exchange rate entry, take the gain/loss account set on the exchange journal\n cr.execute(\"\"\"\n UPDATE account_move m\n SET l10n_de_datev_main_account_id = r.aid\n FROM (\n SELECT l.move_id AS mid,\n l.account_id AS aid\n FROM account_move_line l\n JOIN account_move m\n ON l.move_id = m.id\n JOIN account_journal j\n ON m.journal_id = j.id\n JOIN res_company c\n ON c.currency_exchange_journal_id = j.id\n WHERE j.type='general'\n AND l.account_id = j.default_account_id\n GROUP BY l.move_id,\n l.account_id\n HAVING count(*)=1\n ) r\n WHERE id = r.mid\n AND l10n_de_datev_main_account_id IS NULL\n \"\"\")\n\n # Look for an account used a single time in the move, that has no originator tax\n query = \"\"\"\n UPDATE account_move m\n SET l10n_de_datev_main_account_id = r.aid\n FROM (\n SELECT l.move_id AS mid,\n min(l.account_id) AS aid\n FROM account_move_line l\n WHERE {}\n GROUP BY move_id\n HAVING count(*)=1\n ) r\n WHERE id = r.mid\n AND m.l10n_de_datev_main_account_id IS NULL\n \"\"\"\n cr.execute(query.format(\"l.debit > 0\"))\n cr.execute(query.format(\"l.credit > 0\"))\n cr.execute(query.format(\"l.debit > 0 AND l.tax_line_id IS NULL\"))\n cr.execute(query.format(\"l.credit > 0 AND l.tax_line_id IS NULL\"))\n\n return super()._auto_init()\n\n @api.depends('journal_id', 'line_ids', 'journal_id.default_account_id')\n def _get_datev_account(self):\n for move in self:\n move.l10n_de_datev_main_account_id = value = False\n # If move has an invoice, return invoice's account_id\n if move.is_invoice(include_receipts=True):\n payment_term_lines = move.line_ids.filtered(\n lambda line: line.account_id.account_type in ('asset_receivable', 'liability_payable'))\n if payment_term_lines:\n move.l10n_de_datev_main_account_id = payment_term_lines[0].account_id\n continue\n # If move belongs to a bank journal, return the journal's account (debit/credit should normally be the same)\n if move.journal_id.type == 'bank' and move.journal_id.default_account_id:\n move.l10n_de_datev_main_account_id = move.journal_id.default_account_id\n continue\n # If the move is an automatic exchange rate entry, take the gain/loss account set on the exchange journal\n elif move.journal_id.type == 'general' and move.journal_id == self.env.company.currency_exchange_journal_id:\n lines = move.line_ids.filtered(lambda r: r.account_id == move.journal_id.default_account_id)\n\n if len(lines) == 1:\n move.l10n_de_datev_main_account_id = lines.account_id\n continue\n\n # Look for an account used a single time in the move, that has no originator tax\n aml_debit = self.env['account.move.line']\n aml_credit = self.env['account.move.line']\n for aml in move.line_ids:\n if aml.debit > 0:\n aml_debit += aml\n if aml.credit > 0:\n aml_credit += aml\n if len(aml_debit) == 1:\n value = aml_debit[0].account_id\n elif len(aml_credit) == 1:\n value = aml_credit[0].account_id\n else:\n aml_debit_wo_tax = [a for a in aml_debit if not a.tax_line_id]\n aml_credit_wo_tax = [a for a in aml_credit if not a.tax_line_id]\n if len(aml_debit_wo_tax) == 1:\n value = aml_debit_wo_tax[0].account_id\n elif len(aml_credit_wo_tax) == 1:\n value = aml_credit_wo_tax[0].account_id\n move.l10n_de_datev_main_account_id = value\n\n\nclass GeneralLedgerCustomHandler(models.AbstractModel):\n _inherit = 'account.general.ledger.report.handler'\n\n def _custom_options_initializer(self, report, options, previous_options=None):\n \"\"\"\n Add the invoice lines search domain that common for all countries.\n :param dict options: Report options\n :param dict previous_options: Previous report options\n \"\"\"\n super()._custom_options_initializer(report, options, previous_options)\n if self.env.company.country_code == 'DE':\n options.setdefault('buttons', []).extend((\n {\n 'name': _('Datev (zip)'),\n 'sequence': 30,\n 'action': 'export_file',\n 'action_param': 'l10n_de_datev_export_to_zip',\n 'file_export_type': _('Datev zip'),\n },\n {\n 'name': _('Datev + ATCH (zip)'),\n 'sequence': 40,\n 'action': 'export_file',\n 'action_param': 'l10_de_datev_export_to_zip_and_attach',\n 'file_export_type': _('Datev + batch zip'),\n },\n ))\n\n def l10_de_datev_export_to_zip_and_attach(self, options):\n options['add_attachments'] = True\n return self.l10n_de_datev_export_to_zip(options)\n\n def l10n_de_datev_export_to_zip(self, options):\n \"\"\"\n Check ir_attachment for method _get_path\n create a sha and replace 2 first letters by something not hexadecimal\n Return full_path as 2nd args, use it as name for Zipfile\n Don't need to unlink as it will be done automatically by garbage collector\n of attachment cron\n \"\"\"\n report = self.env['account.report'].browse(options['report_id'])\n with tempfile.NamedTemporaryFile(mode='w+b', delete=True) as buf:\n with zipfile.ZipFile(buf, mode=\"w\", compression=zipfile.ZIP_DEFLATED, allowZip64=False) as zf:\n move_line_ids = []\n for line in report._get_lines({**options, 'unfold_all': True}):\n model, model_id = report._get_model_info_from_id(line['id'])\n if model == 'account.move.line':\n move_line_ids.append(model_id)\n\n domain = [\n ('line_ids', 'in', move_line_ids),\n ('company_id', 'in', report.get_report_company_ids(options)),\n ]\n if options.get('all_entries'):\n domain += [('state', '!=', 'cancel')]\n else:\n domain += [('state', '=', 'posted')]\n if options.get('date'):\n domain += [('date', '<=', options['date']['date_to'])]\n # cannot set date_from on move as domain depends on the move line account if \"strict_range\" is False\n domain += report._get_options_journals_domain(options)\n moves = self.env['account.move'].search(domain)\n zf.writestr('EXTF_accounting_entries.csv', self._l10n_de_datev_get_csv(options, moves))\n zf.writestr('EXTF_customer_accounts.csv', self._l10n_de_datev_get_partner_list(options, customer=True))\n zf.writestr('EXTF_vendor_accounts.csv', self._l10n_de_datev_get_partner_list(options, customer=False))\n if options.get('add_attachments'):\n # add all moves attachments in zip file, this is not part of DATEV specs\n slash_re = re.compile('[\\\\/]')\n for move in moves:\n # rename files by move name + sequence number (if more than 1 file)\n # '\\' is not allowed in file name, replace by '-'\n base_name = slash_re.sub('-', move.name)\n if len(move.attachment_ids) > 1:\n name_pattern = f'%(base)s-%(index)0.{len(str(len(move.attachment_ids)))}d%(extension)s'\n else:\n name_pattern = '%(base)s%(extension)s'\n for i, attachment in enumerate(move.attachment_ids.sorted('id'), 1):\n extension = os.path.splitext(attachment.name)[1]\n name = name_pattern % {'base': base_name, 'index': i, 'extension': extension}\n zf.writestr(name, attachment.raw)\n buf.seek(0)\n content = buf.read()\n return {\n 'file_name': report.get_default_report_filename('ZIP'),\n 'file_content': content,\n 'file_type': 'zip'\n }\n\n def _l10n_de_datev_get_client_number(self):\n consultant_number = self.env.company.l10n_de_datev_consultant_number\n client_number = self.env.company.l10n_de_datev_client_number\n if not consultant_number:\n consultant_number = 99999\n if not client_number:\n client_number = 999\n return [consultant_number, client_number]\n\n def _l10n_de_datev_get_partner_list(self, options, customer=True):\n date_to = fields.Date.from_string(options.get('date').get('date_to'))\n fy = self.env.company.compute_fiscalyear_dates(date_to)\n\n fy = datetime.strftime(fy.get('date_from'), '%Y%m%d')\n datev_info = self._l10n_de_datev_get_client_number()\n account_length = self._l10n_de_datev_get_account_length()\n\n output = io.BytesIO()\n writer = pycompat.csv_writer(output, delimiter=';', quotechar='\"', quoting=2)\n preheader = ['EXTF', 510, 16, 'Debitoren/Kreditoren', 4, None, None, '', '', '', datev_info[0], datev_info[1], fy, account_length,\n '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '']\n header = ['Konto', 'Name (AdressatentypUnternehmen)', 'Name (Adressatentypnatürl. Person)', '', '', '', 'Adressatentyp']\n lines = [preheader, header]\n\n move_line_ids = set()\n report = self.env['account.report'].browse(options['report_id'])\n for line in report._get_lines({**options, 'unfold_all': True}):\n model, model_id = report._parse_line_id(line['id'])[-1][-2:]\n if model == 'account.move.line':\n move_line_ids.add(str(model_id))\n\n if len(move_line_ids):\n if customer:\n move_types = ('out_refund', 'out_invoice', 'out_receipt')\n else:\n move_types = ('in_refund', 'in_invoice', 'in_receipt')\n select = \"\"\"SELECT distinct(aml.partner_id)\n FROM account_move_line aml\n LEFT JOIN account_move m\n ON aml.move_id = m.id\n WHERE aml.id IN %s\n AND aml.tax_line_id IS NULL\n AND aml.debit != aml.credit\n AND m.move_type IN %s\n AND aml.account_id != m.l10n_de_datev_main_account_id\"\"\"\n self.env.cr.execute(select, (tuple(move_line_ids), move_types))\n partners = self.env['res.partner'].browse([p.get('partner_id') for p in self.env.cr.dictfetchall()])\n for partner in partners:\n if customer:\n code = self._l10n_de_datev_find_partner_account(partner.property_account_receivable_id, partner)\n else:\n code = self._l10n_de_datev_find_partner_account(partner.property_account_payable_id, partner)\n line_value = {\n 'code': code,\n 'company_name': partner.name if partner.is_company else '',\n 'person_name': '' if partner.is_company else partner.name,\n 'natural': partner.is_company and '2' or '1'\n }\n # Idiotic program needs to have a line with 243 elements ordered in a given fashion as it\n # does not take into account the header and non mandatory fields\n array = ['' for x in range(243)]\n array[0] = line_value.get('code')\n array[1] = line_value.get('company_name')\n array[2] = line_value.get('person_name')\n array[6] = line_value.get('natural')\n lines.append(array)\n writer.writerows(lines)\n return output.getvalue()\n\n def _l10n_de_datev_get_account_length(self):\n param_start = self.env['ir.config_parameter'].sudo().get_param('l10n_de.datev_start_count', \"100000000\")[:9]\n param_start_vendors = self.env['ir.config_parameter'].sudo().get_param('l10n_de.datev_start_count_vendors', \"700000000\")[:9]\n\n # The gegenkonto should be 1 length higher than the account length, so we have to substract 1 to the params length\n return max(param_start.isdigit() and len(param_start) or 9, param_start_vendors.isdigit() and len(param_start_vendors) or 9, 5) - 1\n\n def _l10n_de_datev_find_partner_account(self, account, partner):\n len_param = self._l10n_de_datev_get_account_length() + 1\n if (account.account_type in ('asset_receivable', 'liability_payable') and partner):\n # Check if we have a property as receivable/payable on the partner\n # We use the property because in datev and in germany, partner can be of 2 types\n # important partner which have a specific account number or a virtual partner\n # Which has only a number. To differentiate between the two, if a partner in Odoo\n # explicitely has a receivable/payable account set, we use that account, otherwise\n # we assume it is not an important partner and his datev virtual id will be the\n # l10n_de_datev_identifier set or the id + the start count parameter.\n account = partner.property_account_receivable_id if account.account_type == 'asset_receivable' else partner.property_account_payable_id\n fname = \"property_account_receivable_id\" if account.account_type == 'asset_receivable' else \"property_account_payable_id\"\n prop = self.env['ir.property']._get(fname, \"res.partner\", partner.id)\n if prop == account:\n return str(account.code).ljust(len_param - 1, '0') if account else ''\n return self._l10n_de_datev_get_account_identifier(account, partner)\n return str(account.code).ljust(len_param - 1, '0') if account else ''\n\n def _l10n_de_datev_get_account_identifier(self, account, partner):\n len_param = self._l10n_de_datev_get_account_length() + 1\n if account.account_type == 'asset_receivable':\n param_start = self.env['ir.config_parameter'].sudo().get_param('l10n_de.datev_start_count', \"100000000\")[:9]\n start_count = param_start.isdigit() and int(param_start) or 100000000\n else:\n param_start_vendors = self.env['ir.config_parameter'].sudo().get_param('l10n_de.datev_start_count_vendors', \"700000000\")[:9]\n start_count = param_start_vendors.isdigit() and int(param_start_vendors) or 700000000\n start_count = int(str(start_count).ljust(len_param, '0'))\n return partner.l10n_de_datev_identifier or start_count + partner.id\n\n # Source: http://www.datev.de/dnlexom/client/app/index.html#/document/1036228/D103622800029\n def _l10n_de_datev_get_csv(self, options, moves):\n # last 2 element of preheader should be filled by \"consultant number\" and \"client number\"\n date_from = fields.Date.from_string(options.get('date').get('date_from'))\n date_to = fields.Date.from_string(options.get('date').get('date_to'))\n fy = self.env.company.compute_fiscalyear_dates(date_to)\n\n date_from = datetime.strftime(date_from, '%Y%m%d')\n date_to = datetime.strftime(date_to, '%Y%m%d')\n fy = datetime.strftime(fy.get('date_from'), '%Y%m%d')\n datev_info = self._l10n_de_datev_get_client_number()\n account_length = self._l10n_de_datev_get_account_length()\n\n output = io.BytesIO()\n writer = pycompat.csv_writer(output, delimiter=';', quotechar='\"', quoting=2)\n preheader = ['EXTF', 510, 21, 'Buchungsstapel', 7, '', '', '', '', '', datev_info[0], datev_info[1], fy, account_length,\n date_from, date_to, '', '', '', '', 0, 'EUR', '', '', '', '', '', '', '', '', '']\n header = ['Umsatz (ohne Soll/Haben-Kz)', 'Soll/Haben-Kennzeichen', 'WKZ Umsatz', 'Kurs', 'Basis-Umsatz', 'WKZ Basis-Umsatz', 'Konto', 'Gegenkonto (ohne BU-Schlüssel)', 'BU-Schlüssel', 'Belegdatum', 'Belegfeld 1', 'Belegfeld 2', 'Skonto', 'Buchungstext']\n\n # if we do _get_lines with some unfolded lines, only those will be returned, but we want all of them\n move_line_ids = []\n report = self.env['account.report'].browse(options['report_id'])\n for line in report._get_lines({**options, 'unfold_all': True}):\n model, model_id = report._parse_line_id(line['id'])[-1][-2:]\n if model == 'account.move.line':\n move_line_ids.append(int(model_id))\n\n lines = [preheader, header]\n\n for m in moves:\n line_values = {} # key: BalanceKey\n move_currencies = {}\n payment_account = 0 # Used for non-reconciled payments\n\n for aml in m.line_ids:\n if aml.debit == aml.credit:\n # Ignore debit = credit = 0\n continue\n # If both account and counteraccount are the same, ignore the line\n if aml.account_id == aml.move_id.l10n_de_datev_main_account_id:\n continue\n # If line is a tax ignore it as datev requires single line with gross amount and deduct tax itself based\n # on account or on the control key code\n if aml.tax_line_id:\n continue\n\n aml_taxes = aml.tax_ids.compute_all(aml.balance, aml.company_id.currency_id, partner=aml.partner_id, handle_price_include=False)\n line_amount = aml_taxes['total_included']\n\n code_correction = ''\n if aml.tax_ids:\n codes = set(aml.tax_ids.mapped('l10n_de_datev_code'))\n if len(codes) == 1:\n # there should only be one max, else skip code\n code_correction = codes.pop() or ''\n\n # account and counterpart account\n to_account_code = str(self._l10n_de_datev_find_partner_account(aml.move_id.l10n_de_datev_main_account_id, aml.partner_id))\n account_code = u'{code}'.format(code=self._l10n_de_datev_find_partner_account(aml.account_id, aml.partner_id))\n\n # We don't want to have lines with our outstanding payment/receipt as they don't represent real moves\n # So if payment skip one move line to write, while keeping the account\n # and replace bank account for outstanding payment/receipt for the other line\n\n if aml.payment_id:\n if payment_account == 0:\n payment_account = account_code\n continue\n else:\n to_account_code = payment_account\n\n # group lines by account, to_account & partner\n match_key = BalanceKey(from_code=account_code, to_code=to_account_code, partner_id=aml.partner_id,\n tax_id=code_correction)\n\n if match_key in line_values:\n # values already in line_values\n line_values[match_key]['line_amount'] += line_amount\n line_values[match_key]['line_base_amount'] += aml.price_total\n move_currencies[match_key].add(aml.currency_id)\n continue\n\n # reference\n receipt1 = aml.move_id.name\n if aml.move_id.journal_id.type == 'purchase' and aml.move_id.ref:\n receipt1 = aml.move_id.ref\n\n # on receivable/payable aml of sales/purchases\n receipt2 = ''\n if to_account_code == account_code and aml.date_maturity:\n receipt2 = aml.date\n\n move_currencies[match_key] = set([aml.currency_id])\n currency = aml.company_id.currency_id\n line_values[match_key] = {\n 'waehrung': currency.name,\n 'line_base_amount': aml.price_total,\n 'line_base_currency': aml.currency_id.name,\n 'buschluessel': code_correction,\n 'gegenkonto': to_account_code,\n 'belegfeld1': receipt1[-36:],\n 'belegfeld2': receipt2,\n 'datum': datetime.strftime(aml.move_id.date, '%-d%m'),\n 'konto': account_code,\n 'kurs': str(aml.currency_id.rate).replace('.', ','),\n 'buchungstext': receipt1,\n 'line_amount': line_amount\n }\n\n for match_key, line_value in line_values.items():\n # For DateV, we can't have negative amount on a line, so we need to inverse the amount and inverse the\n # credit/debit symbol.\n line_value['sollhaben'] = 'h' if line_value['line_amount'] < 0 else 's'\n line_value['line_amount'] = abs(line_value['line_amount'])\n # Idiotic program needs to have a line with 116 elements ordered in a given fashion as it\n # does not take into account the header and non mandatory fields\n array = ['' for x in range(116)]\n array[0] = float_repr(line_value['line_amount'], aml.company_id.currency_id.decimal_places).replace('.', ',')\n array[1] = line_value.get('sollhaben')\n array[2] = line_value.get('waehrung')\n if (len(move_currencies[match_key]) == 1) and line_value.get('line_base_currency') != line_value.get('waehrung'):\n array[3] = line_value.get('kurs')\n array[4] = float_repr(line_value['line_base_amount'], aml.currency_id.decimal_places).replace('.', ',')\n array[5] = line_value.get('line_base_currency')\n array[6] = line_value.get('konto')\n array[7] = line_value.get('gegenkonto')\n array[8] = line_value.get('buschluessel')\n array[9] = line_value.get('datum')\n array[10] = line_value.get('belegfeld1')\n array[11] = line_value.get('belegfeld2')\n array[13] = line_value.get('buchungstext')\n lines.append(array)\n\n writer.writerows(lines)\n return output.getvalue()\n", "repo_name": "dinar-it/odoo_16_enter", "sub_path": "l10n_de_reports/models/datev_export_csv.py", "file_name": "datev_export_csv.py", "file_ext": "py", "file_size_in_byte": 26940, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 2, "dataset": "github-code", "pt": "12", "api": [{"api_name": "collections.namedtuple", "line_number": 15, "usage_type": "call"}, {"api_name": "odoo.models.Model", "line_number": 18, "usage_type": "attribute"}, {"api_name": "odoo.models", "line_number": 18, "usage_type": "name"}, {"api_name": "odoo.fields.Char", "line_number": 22, "usage_type": "call"}, {"api_name": "odoo.fields", "line_number": 22, "usage_type": "name"}, {"api_name": "odoo.fields.Char", "line_number": 23, "usage_type": "call"}, {"api_name": "odoo.fields", "line_number": 23, "usage_type": "name"}, {"api_name": "odoo.models.Model", "line_number": 26, "usage_type": "attribute"}, {"api_name": "odoo.models", "line_number": 26, "usage_type": "name"}, {"api_name": "odoo.fields.Integer", "line_number": 29, "usage_type": "call"}, {"api_name": "odoo.fields", "line_number": 29, "usage_type": "name"}, {"api_name": "odoo.exceptions.ValidationError", "line_number": 47, "usage_type": "call"}, {"api_name": "odoo._", "line_number": 47, "usage_type": "call"}, {"api_name": "odoo.api.constrains", "line_number": 36, "usage_type": "call"}, {"api_name": "odoo.api", "line_number": 36, "usage_type": "name"}, {"api_name": "odoo.models.Model", "line_number": 50, "usage_type": "attribute"}, {"api_name": "odoo.models", "line_number": 50, "usage_type": "name"}, {"api_name": "odoo.fields.Many2one", "line_number": 53, "usage_type": "call"}, {"api_name": "odoo.fields", "line_number": 53, "usage_type": "name"}, {"api_name": "odoo.tools.sql.column_exists", "line_number": 56, "usage_type": "call"}, {"api_name": "odoo.tools.sql.create_column", "line_number": 60, "usage_type": "call"}, {"api_name": "odoo.api.depends", "line_number": 144, "usage_type": "call"}, {"api_name": "odoo.api", "line_number": 144, "usage_type": "name"}, {"api_name": "odoo.models.AbstractModel", "line_number": 189, "usage_type": "attribute"}, {"api_name": "odoo.models", "line_number": 189, "usage_type": "name"}, {"api_name": "odoo._", "line_number": 202, "usage_type": "call"}, {"api_name": "odoo._", "line_number": 206, "usage_type": "call"}, {"api_name": "odoo._", "line_number": 209, "usage_type": "call"}, {"api_name": "odoo._", "line_number": 213, "usage_type": "call"}, {"api_name": "tempfile.NamedTemporaryFile", "line_number": 230, "usage_type": "call"}, {"api_name": "zipfile.ZipFile", "line_number": 231, "usage_type": "call"}, {"api_name": "zipfile.ZIP_DEFLATED", "line_number": 231, "usage_type": "attribute"}, {"api_name": "re.compile", "line_number": 256, "usage_type": "call"}, {"api_name": "os.path.splitext", "line_number": 266, "usage_type": "call"}, {"api_name": "os.path", "line_number": 266, "usage_type": "attribute"}, {"api_name": "odoo.fields.Date.from_string", "line_number": 287, "usage_type": "call"}, {"api_name": "odoo.fields.Date", "line_number": 287, "usage_type": "attribute"}, {"api_name": "odoo.fields", "line_number": 287, "usage_type": "name"}, {"api_name": "datetime.datetime.strftime", "line_number": 290, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 290, "usage_type": "name"}, {"api_name": "io.BytesIO", "line_number": 294, "usage_type": "call"}, {"api_name": "odoo.tools.pycompat.csv_writer", "line_number": 295, "usage_type": "call"}, {"api_name": "odoo.tools.pycompat", "line_number": 295, "usage_type": "name"}, {"api_name": "odoo.fields.Date.from_string", "line_number": 385, "usage_type": "call"}, {"api_name": "odoo.fields.Date", "line_number": 385, "usage_type": "attribute"}, {"api_name": "odoo.fields", "line_number": 385, "usage_type": "name"}, {"api_name": "odoo.fields.Date.from_string", "line_number": 386, "usage_type": "call"}, {"api_name": "odoo.fields.Date", "line_number": 386, "usage_type": "attribute"}, {"api_name": "odoo.fields", "line_number": 386, "usage_type": "name"}, {"api_name": "datetime.datetime.strftime", "line_number": 389, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 389, "usage_type": "name"}, {"api_name": "datetime.datetime.strftime", "line_number": 390, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 390, "usage_type": "name"}, {"api_name": "datetime.datetime.strftime", "line_number": 391, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 391, "usage_type": "name"}, {"api_name": "io.BytesIO", "line_number": 395, "usage_type": "call"}, {"api_name": "odoo.tools.pycompat.csv_writer", "line_number": 396, "usage_type": "call"}, {"api_name": "odoo.tools.pycompat", "line_number": 396, "usage_type": "name"}, {"api_name": "datetime.datetime.strftime", "line_number": 484, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 484, "usage_type": "name"}, {"api_name": "odoo.tools.float_repr", "line_number": 499, "usage_type": "call"}, {"api_name": "odoo.tools.float_repr", "line_number": 504, "usage_type": "call"}]} +{"seq_id": "23674760053", "text": "import argparse\n\nfrom bigdl.orca import init_orca_context, stop_orca_context\nfrom bigdl.orca.learn.mxnet import Estimator, create_config\n\n\ndef get_train_data_iter(config, kv):\n from mxnet.test_utils import get_mnist_iterator\n from filelock import FileLock\n with FileLock(\"data.lock\"):\n iters = get_mnist_iterator(config[\"batch_size\"], (1, 28, 28),\n num_parts=kv.num_workers, part_index=kv.rank)\n return iters[0]\n\n\ndef get_test_data_iter(config, kv):\n from mxnet.test_utils import get_mnist_iterator\n from filelock import FileLock\n with FileLock(\"data.lock\"):\n iters = get_mnist_iterator(config[\"batch_size\"], (1, 28, 28),\n num_parts=kv.num_workers, part_index=kv.rank)\n return iters[1]\n\n\ndef get_model(config):\n import mxnet as mx\n from mxnet import gluon\n from mxnet.gluon import nn\n import mxnet.ndarray as F\n\n class LeNet(gluon.Block):\n def __init__(self, **kwargs):\n super(LeNet, self).__init__(**kwargs)\n with self.name_scope():\n # layers created in name_scope will inherit name space\n # from parent layer.\n self.conv1 = nn.Conv2D(20, kernel_size=(5, 5))\n self.pool1 = nn.MaxPool2D(pool_size=(2, 2), strides=(2, 2))\n self.conv2 = nn.Conv2D(50, kernel_size=(5, 5))\n self.pool2 = nn.MaxPool2D(pool_size=(2, 2), strides=(2, 2))\n self.fc1 = nn.Dense(500)\n self.fc2 = nn.Dense(10)\n\n def forward(self, x):\n x = self.pool1(F.tanh(self.conv1(x)))\n x = self.pool2(F.tanh(self.conv2(x)))\n # 0 means copy over size from corresponding dimension.\n # -1 means infer size from the rest of dimensions.\n x = x.reshape((0, -1))\n x = F.tanh(self.fc1(x))\n x = F.tanh(self.fc2(x))\n return x\n\n net = LeNet()\n net.initialize(mx.init.Xavier(magnitude=2.24), ctx=[mx.cpu()])\n return net\n\n\ndef get_loss(config):\n from mxnet import gluon\n return gluon.loss.SoftmaxCrossEntropyLoss()\n\n\ndef get_metrics(config):\n import mxnet as mx\n return mx.metric.Accuracy()\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(\n description='Train a LeNet model for handwritten digit recognition.')\n parser.add_argument('--cluster_mode', type=str, default=\"local\",\n help='The mode for the Spark cluster.')\n parser.add_argument('--cores', type=int, default=4,\n help='The number of cores you want to use on each node.')\n parser.add_argument('-n', '--num_workers', type=int, default=2,\n help='The number of MXNet workers to be launched.')\n parser.add_argument('-s', '--num_servers', type=int,\n help='The number of MXNet servers to be launched. If not specified, '\n 'default to be equal to the number of workers.')\n parser.add_argument('-b', '--batch_size', type=int, default=100,\n help='The number of samples per gradient update for each worker.')\n parser.add_argument('-e', '--epochs', type=int, default=10,\n help='The number of epochs to train the model.')\n parser.add_argument('-l', '--learning_rate', type=float, default=0.02,\n help='Learning rate for the LeNet model.')\n parser.add_argument('--log_interval', type=int, default=20,\n help='The number of batches to wait before logging throughput and '\n 'metrics information during the training process.')\n opt = parser.parse_args()\n\n num_nodes = 1 if opt.cluster_mode == \"local\" else opt.num_workers\n init_orca_context(cluster_mode=opt.cluster_mode, cores=opt.cores, num_nodes=num_nodes)\n\n config = create_config(optimizer=\"sgd\",\n optimizer_params={'learning_rate': opt.learning_rate},\n log_interval=opt.log_interval, seed=42)\n estimator = Estimator.from_mxnet(config=config, model_creator=get_model,\n loss_creator=get_loss, validation_metrics_creator=get_metrics,\n num_workers=opt.num_workers, num_servers=opt.num_servers,\n eval_metrics_creator=get_metrics)\n estimator.fit(data=get_train_data_iter, validation_data=get_test_data_iter,\n epochs=opt.epochs, batch_size=opt.batch_size)\n estimator.shutdown()\n stop_orca_context()\n", "repo_name": "intel-analytics/BigDL", "sub_path": "python/orca/example/learn/mxnet/lenet_mnist.py", "file_name": "lenet_mnist.py", "file_ext": "py", "file_size_in_byte": 4613, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 4540, "dataset": "github-code", "pt": "14", "api": [{"api_name": "filelock.FileLock", "line_number": 10, "usage_type": "call"}, {"api_name": "mxnet.test_utils.get_mnist_iterator", "line_number": 11, "usage_type": "call"}, {"api_name": "filelock.FileLock", "line_number": 19, "usage_type": "call"}, {"api_name": "mxnet.test_utils.get_mnist_iterator", "line_number": 20, "usage_type": "call"}, {"api_name": "mxnet.gluon.Block", "line_number": 31, "usage_type": "attribute"}, {"api_name": "mxnet.gluon", "line_number": 31, "usage_type": "name"}, {"api_name": "mxnet.gluon.nn.Conv2D", "line_number": 37, "usage_type": "call"}, {"api_name": "mxnet.gluon.nn", "line_number": 37, "usage_type": "name"}, {"api_name": "mxnet.gluon.nn.MaxPool2D", "line_number": 38, "usage_type": "call"}, {"api_name": "mxnet.gluon.nn", "line_number": 38, "usage_type": "name"}, {"api_name": "mxnet.gluon.nn.Conv2D", "line_number": 39, "usage_type": "call"}, {"api_name": "mxnet.gluon.nn", "line_number": 39, "usage_type": "name"}, {"api_name": "mxnet.gluon.nn.MaxPool2D", "line_number": 40, "usage_type": "call"}, {"api_name": "mxnet.gluon.nn", "line_number": 40, "usage_type": "name"}, {"api_name": "mxnet.gluon.nn.Dense", "line_number": 41, "usage_type": "call"}, {"api_name": "mxnet.gluon.nn", "line_number": 41, "usage_type": "name"}, {"api_name": "mxnet.gluon.nn.Dense", "line_number": 42, "usage_type": "call"}, {"api_name": "mxnet.gluon.nn", "line_number": 42, "usage_type": "name"}, {"api_name": "mxnet.ndarray.tanh", "line_number": 45, "usage_type": "call"}, {"api_name": "mxnet.ndarray", "line_number": 45, "usage_type": "name"}, {"api_name": "mxnet.ndarray.tanh", "line_number": 46, "usage_type": "call"}, {"api_name": "mxnet.ndarray", "line_number": 46, "usage_type": "name"}, {"api_name": "mxnet.ndarray.tanh", "line_number": 50, "usage_type": "call"}, {"api_name": "mxnet.ndarray", "line_number": 50, "usage_type": "name"}, {"api_name": "mxnet.ndarray.tanh", "line_number": 51, "usage_type": "call"}, {"api_name": "mxnet.ndarray", "line_number": 51, "usage_type": "name"}, {"api_name": "mxnet.init.Xavier", "line_number": 55, "usage_type": "call"}, {"api_name": "mxnet.init", "line_number": 55, "usage_type": "attribute"}, {"api_name": "mxnet.cpu", "line_number": 55, "usage_type": "call"}, {"api_name": "mxnet.gluon.loss.SoftmaxCrossEntropyLoss", "line_number": 61, "usage_type": "call"}, {"api_name": "mxnet.gluon.loss", "line_number": 61, "usage_type": "attribute"}, {"api_name": "mxnet.gluon", "line_number": 61, "usage_type": "name"}, {"api_name": "mxnet.metric.Accuracy", "line_number": 66, "usage_type": "call"}, {"api_name": "mxnet.metric", "line_number": 66, "usage_type": "attribute"}, {"api_name": "argparse.ArgumentParser", "line_number": 70, "usage_type": "call"}, {"api_name": "bigdl.orca.init_orca_context", "line_number": 93, "usage_type": "call"}, {"api_name": "bigdl.orca.learn.mxnet.create_config", "line_number": 95, "usage_type": "call"}, {"api_name": "bigdl.orca.learn.mxnet.Estimator.from_mxnet", "line_number": 98, "usage_type": "call"}, {"api_name": "bigdl.orca.learn.mxnet.Estimator", "line_number": 98, "usage_type": "name"}, {"api_name": "bigdl.orca.stop_orca_context", "line_number": 105, "usage_type": "call"}]} +{"seq_id": "35530434807", "text": "# Calculate the accuracy of a baseline that simply predicts \"London\" for every\n# example in the dev set.\n# Hint: Make use of existing code.\n# Your solution here should only be a few lines.\nimport utils\n\nwith open(\"birth_dev.tsv\") as f:\n data = f.readlines()\n l = len(data)\n\npredictions = [\"London\"] * l\ntotal, correct = utils.evaluate_places(\"birth_dev.tsv\", predictions)\nprint('Correct: {} out of {}: {}%'.format(correct, total, correct/total*100))", "repo_name": "lhoorie/NaturalLanguageProcessing-iust", "sub_path": "Assignments/A5/student_2023/src/london_baseline.py", "file_name": "london_baseline.py", "file_ext": "py", "file_size_in_byte": 457, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "14", "api": [{"api_name": "utils.evaluate_places", "line_number": 12, "usage_type": "call"}]} +{"seq_id": "14324024259", "text": "from keras.models import Sequential\r\nfrom keras.layers import Dense, Dropout, SpatialDropout1D, Conv1D, MaxPooling1D, Activation, Embedding, Flatten, GlobalMaxPooling1D, LSTM\r\nfrom keras import regularizers, callbacks, optimizers\r\nfrom keras.models import load_model\r\nfrom keras.utils import plot_model\r\nimport argparse\r\nimport os\r\nimport logging\r\nfrom data_loaders import TextsLoader, TokenizerLoader, WordVectorsLoader, TextSequencesLoader\r\nimport matplotlib.pyplot as plt\r\nimport numpy as np\r\nfrom sklearn import metrics\r\nimport tensorflow as tf\r\n\r\nos.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'\r\nconv_version = 5\r\nlstm_version = 1\r\nconv_lstm_version = 1\r\n\r\nsem_eval_path = ''\r\nseq_len = 800 # 5000 # 2500 # Inferred from checking the sequences length distributions\r\nwords_count = 1207#438\r\nembedding_mode = 0\r\ncrowdsourced = False\r\nalgorithm = 0\r\nfinal_model_name = ''\r\n\r\nimport pandas as pd\r\n\r\ndef load_embedding_layer(tokenizer):\r\n # Get vocabulary size\r\n vocab_size = len(tokenizer.word_index) + 1\r\n logging.info('Vocab size: {}'.format(vocab_size))\r\n logging.info(\"tokenizer word_index\", tokenizer.word_index)\r\n\r\n # Load word vectors\r\n word_vectors_loader = WordVectorsLoader(sem_eval_path, crowdsourced, embedding_mode)\r\n word_vectors_loader.load()\r\n weights_matrix = word_vectors_loader.create_embedding_weights_matrix(tokenizer.word_index)\r\n \r\n return Embedding(input_dim=vocab_size, \r\n output_dim=weights_matrix.shape[1], \r\n weights=[weights_matrix],\r\n input_length=seq_len,\r\n trainable=False\r\n )\r\n\r\ndef define_conv_model(tokenizer, filters=64, kernel_size=4, hidden_dims=256):\r\n model = Sequential()\r\n\r\n embedding_layer = load_embedding_layer(tokenizer)\r\n # embedding_layer = Embedding(words_count, embedding_size=100, input_length=seq_len)\r\n model.add(embedding_layer)\r\n model.add(SpatialDropout1D(0.6))\r\n\r\n model.add(Conv1D(filters,\r\n kernel_size,\r\n activation='relu'))\r\n model.add(Dropout(0.9))\r\n \r\n model.add(MaxPooling1D(pool_size=4))\r\n\r\n # model.add(Conv1D(filters,\r\n # kernel_size,\r\n # activation='relu'))\r\n # model.add(Dropout(0.5))\r\n # model.add(MaxPooling1D(pool_size=4))\r\n\r\n model.add(GlobalMaxPooling1D())\r\n # model.add(Flatten())\r\n\r\n model.add(Dense(hidden_dims, \r\n activation='relu', \r\n kernel_regularizer=regularizers.l2(0.1)\r\n ))\r\n model.add(Dropout(0.9))\r\n\r\n model.add(Dense(1, activation='sigmoid'))\r\n\r\n return model\r\n\r\ndef define_lstm_model(tokenizer, units=128, embedding_size=128):\r\n model = Sequential()\r\n\r\n logging.info('Building LSTM v2...')\r\n logging.info('words_count: {}'.format(words_count))\r\n logging.info('seq_len: {}'.format(seq_len))\r\n logging.info('embedding_size: {}'.format(embedding_size))\r\n\r\n model.add(load_embedding_layer(tokenizer))\r\n # model.add(Embedding(words_count, embedding_size, input_length=seq_len))\r\n model.add(SpatialDropout1D(0.2))\r\n\r\n model.add(LSTM(units, dropout=0.2, recurrent_dropout=0.2))\r\n model.add(Dense(1, activation='sigmoid'))\r\n\r\n return model\r\n\r\ndef define_conv_lstm_model(tokenizer, units=128, filters=64, kernel_size=4): # farkl olarak senteces input'unu alıyor\r\n model = Sequential()\r\n\r\n embedding_layer = load_embedding_layer(tokenizer)\r\n model.add(embedding_layer)\r\n\r\n model.add(Conv1D(filters,\r\n kernel_size,\r\n activation='relu'))\r\n model.add(Dropout(0.3))\r\n model.add(MaxPooling1D(pool_size=4))\r\n\r\n model.add(Conv1D(filters,\r\n kernel_size,\r\n activation='relu'))\r\n model.add(Dropout(0.3))\r\n model.add(MaxPooling1D(pool_size=2))\r\n \r\n model.add(LSTM(units, dropout=0.2, recurrent_dropout=0.2))\r\n\r\n model.add(Dense(1, activation='sigmoid'))\r\n\r\n return model\r\n\r\ndef generate_new_model_name():\r\n alg = ''\r\n version = 1\r\n if algorithm == 0:\r\n alg = 'conv'\r\n version = conv_version\r\n elif algorithm == 1:\r\n alg = 'conv_lstm'\r\n version = conv_lstm_version\r\n elif algorithm == 2:\r\n alg = 'lstm'\r\n version = lstm_version\r\n else:\r\n raise Exception('Unknown algorithm')\r\n return 'words_{}_model_w{}_v{}'.format(alg, embedding_mode, version)\r\n\r\ndef load_pretrained(model, model_name, model_weights_location):\r\n model_file = os.path.join(sem_eval_path, 'models', \"{}.h5\".format(model_name))#\"/homedtic/hkavas/SemEval/models/words_conv_lstm_model_w1_v1.h5\"#\r\n print(\"model LocatioN:\", model_file)\r\n if os.path.isfile(model_file) and os.path.isfile(model_weights_location):\r\n model_file_time = os.path.getmtime(model_file)\r\n weights_file_time = os.path.getmtime(model_weights_location)\r\n if weights_file_time > model_file_time:\r\n logging.info('Loading the weights (latest modified).')\r\n model.load_weights(model_weights_location)\r\n else:\r\n model = load_model(model_file)\r\n logging.info('Loading the model (latest modified)')\r\n elif os.path.isfile(model_weights_location):\r\n model.load_weights(model_weights_location)\r\n logging.info('Loading the weights')\r\n elif os.path.isfile(model_file):\r\n model = load_model(model_file)\r\n logging.info('Loading the model')\r\n else:\r\n raise Exception(\"Neither model nor weights file exists\")\r\n return model\r\n\r\ndef plot_model_history(history, model_name):\r\n #plt.plot(history.history['val_accuracy'])\r\n #plt.plot(history.history['val_loss'])\r\n plt.title('validation accuracy and loss')\r\n plt.ylabel('accuracy')\r\n plt.xlabel('epoch')\r\n plt.savefig(os.path.join(sem_eval_path, 'models', '{}_history.png'.format(model_name)))\r\n\r\ndef evaluate_model(model, X_val, y_val):\r\n y_predict = (np.asarray(model.predict(X_val))).round()\r\n\r\n acc = metrics.accuracy_score(y_val, y_predict)\r\n logging.info('Accuracy: {}'.format(acc))\r\n print('Accuracy: {}'.format(acc))\r\n # let's see\r\n logging.info('y_val: {}'.format(y_val))\r\n logging.info('y_predict: {}'.format(y_predict))\r\n \r\n conf_matrix = metrics.confusion_matrix(y_val, y_predict)\r\n logging.info('Confusion matrix: {}'.format(conf_matrix))\r\n\r\n precision = metrics.precision_score(y_val, y_predict)\r\n logging.info('Precision score: {}'.format(precision))\r\n\r\n recall = metrics.recall_score(y_val, y_predict)\r\n logging.info('Recall score: {}'.format(recall))\r\n\r\n val_f1 = metrics.f1_score(y_val, y_predict)\r\n logging.info('F1 score: {}'.format(val_f1))\r\n\r\n model_plot_file = os.path.join(sem_eval_path, 'models', '{}.png'.format(final_model_name))\r\n plot_model(model, to_file=model_plot_file, show_shapes=True, show_layer_names=True)\r\n \r\n\r\ndef transferLearning(model):\r\n print(\"Transfer learning is on!\")\r\n model.add(Dense(1, activation='sigmoid'))\r\n \r\n return model\r\n \r\n\r\ndef main(): \r\n tf.compat.v1.Session(config=tf.compat.v1.ConfigProto(log_device_placement=True))\r\n parser = argparse.ArgumentParser()\r\n parser.add_argument(\"--path\",'-p', default=\"/home/agon/Files/SemEval\",\r\n help=\"Use this argument to change the SemEval directory path (the default path is: '/home/ashwath/Files/SemEval')\")\r\n parser.add_argument(\"--crowdsourced\", '-c', action='store_true', default=\"False\",\r\n help=\"Use this argument to work with the crowdsourced file\")\r\n parser.add_argument(\"--model\", '-m', default=\"\", #\"words_conv_lstm_model_w1_v1\"\r\n help=\"Use this argument to continue training a stored model\")\r\n parser.add_argument(\"--word_vectors\", '-w', default=\"0\", # 2 for BERT\r\n help=\"Use this argument to set the word vectors to use: 0: Google's Word2vec, 1: GloVe, 2: Fasttext, 3: Custom pretrained word2vec, 4: Custom pretrained Fasttext, 5: Custom pretrained news word2vec. Default: 0\")\r\n parser.add_argument(\"--algorithm\", '-a', default=\"0\", # 1 used''!\r\n help=\"Use this argument to set the algorithm to use: 0: CNN, 1: CNN + LSTM, 2: LSTM. Default: 0\")\r\n parser.add_argument(\"--learning_rate\", '-l', default=\"0.001\",\r\n help=\"Use this argument to set the learning rate to use. Default: 0.001\")\r\n parser.add_argument(\"--evaluate\", '-e', action='store_true', default=\"False\", # True\r\n help=\"Use this argument to set run on evaluation mode\")\r\n args = parser.parse_args()\r\n \r\n global sem_eval_path\r\n sem_eval_path = args.path\r\n\r\n global embedding_mode\r\n embedding_mode = int(args.word_vectors)\r\n\r\n global algorithm\r\n algorithm = int(args.algorithm)\r\n\r\n evaluate_mode = args.evaluate\r\n\r\n global seq_len\r\n sentences = False\r\n if algorithm == 0:\r\n seq_len = 500 #700 #5000\r\n elif algorithm == 1:\r\n seq_len = 800 #2064\r\n elif algorithm == 2:\r\n seq_len = 800#100\r\n sentences = True\r\n else:\r\n raise Exception('Unknown algorithm')\r\n\r\n model_name = args.model\r\n model_dir = os.path.join(sem_eval_path, 'models')\r\n new_model_name = generate_new_model_name()\r\n model_location = os.path.join(model_dir, '{}.h5'.format(new_model_name))\r\n model_weights_location = os.path.join(model_dir, '{}_weights.h5'.format(new_model_name))\r\n print(\"location:\", model_location)\r\n\r\n # ---LOGS---\r\n logs_path = os.path.join(sem_eval_path, 'logs_new', '{}_log.log'.format(model_name if model_name else new_model_name))\r\n logging.basicConfig(filename=logs_path, filemode='w', \r\n format='%(asctime)s : %(levelname)s : %(message)s', level=logging.INFO)\r\n logging.info('model_location: {}'.format(model_location))\r\n \r\n global crowdsourced\r\n crowdsourced = args.crowdsourced\r\n\r\n learning_rate = float(args.learning_rate)\r\n batch_size = 16#32 # default\r\n\r\n # Get data (252-270 değiştirildi)\r\n texts_loader = TextsLoader(sem_eval_path, crowdsourced, logs_path)\r\n train_texts, y_train = texts_loader.load(sentences=sentences)\r\n \r\n logging.info('Train shape: {}'.format(train_texts.shape))\r\n logging.info('Number of biased samples: {}'.format(len(y_train[y_train == 1])))\r\n logging.info('Number of non-biased samples: {}'.format(len(y_train[y_train == 0])))\r\n \r\n \r\n # Get Test data\r\n #df = pd.read_csv('allsides_train.csv', engine='python')\r\n #print(\"1 is well\", df.head())\r\n #df_allsides = df[~df.text.str.isnumeric()]\r\n #print(\"all is well\", df_allsides.head())\r\n \r\n #train_texts_tl = df_allsides['text']\r\n #y_train_tl = df_allsides['predicted_hyperpartisan']\r\n \r\n # Get Tweets data\r\n df_ = pd.read_csv(\"84k_pol.csv\", encoding= 'unicode_escape')\r\n df_['Tweet'] = df_['Tweet'].astype('str')\r\n df_t = df_[(~df_.Tweet.fillna('').str.isnumeric())]\r\n \r\n train_texts_tl = df_t['Tweet']\r\n y_train_tl = df_t['Party']\r\n \r\n \r\n logging.info('Train shape(TL): {}'.format(train_texts_tl.shape))\r\n logging.info('Number of biased samples(TL): {}'.format(len(y_train_tl[y_train_tl == 1])))\r\n logging.info('Number of non-biased samples(TL): {}'.format(len(y_train_tl[y_train_tl == 0])))\r\n\r\n val_texts, y_val = texts_loader.load(sentences=sentences, validation=True)\r\n logging.info('Validation shape: {}'.format(val_texts.shape))\r\n logging.info('Number of biased samples (burası çokomelli!!! maine texts_loader.load(sentences[] ile çağrılıyor: {}'.format(len(y_val[y_val == 1])))\r\n logging.info('Number of non-biased samples: {}'.format(len(y_val[y_val == 0])))\r\n logging.info(train_texts[:20])\r\n logging.info('-----------------------------------------------------------------------------------------------------')\r\n\r\n tokenizer = TokenizerLoader(train_texts, sem_eval_path, logs_path, most_common_count=words_count).load()\r\n\r\n sequences_loader = TextSequencesLoader(tokenizer, seq_len, sem_eval_path=sem_eval_path)\r\n X_train = sequences_loader.load(train_texts, truncate_sequences=(algorithm == 2))\r\n X_train_tl = sequences_loader.load(train_texts_tl, truncate_sequences=(algorithm == 2))\r\n \r\n zeroes = []\r\n for seq in X_train:\r\n seq_zeroes = 0\r\n for item in seq:\r\n if item == 0:\r\n seq_zeroes += 1\r\n zeroes.append(seq_zeroes)\r\n zeroes = np.array(zeroes)\r\n logging.info('Min. number of zeroes: {}'.format(zeroes.min()))\r\n logging.info('Avg. number of zeroes: {}'.format(zeroes.mean()))\r\n logging.info('Std. number of zeroes: {}'.format(zeroes.std()))\r\n logging.info('Max. number of zeroes: {}'.format(zeroes.max()))\r\n logging.info('Training sequences: ')\r\n logging.info(X_train[:20])\r\n logging.info('-----------------------------------------------------------------------------------------------------')\r\n \r\n if len(sequences_loader.indices_to_remove) > 0:\r\n logging.info('Removing train {} sequences'.format(len(sequences_loader.indices_to_remove)))\r\n logging.info('X_train pre shape: {}'.format(X_train.shape))\r\n X_train = np.delete(X_train, sequences_loader.indices_to_remove, axis=0)\r\n logging.info('X_train post shape: {}'.format(X_train.shape))\r\n logging.info('y_train pre shape: {}'.format(y_train.shape))\r\n y_train.drop(y_train.index[sequences_loader.indices_to_remove], inplace=True)\r\n logging.info('y_train post shape: {}'.format(y_train.shape))\r\n\r\n # sequences_loader.indices_to_remove\r\n \r\n X_val = sequences_loader.load(val_texts)\r\n if len(sequences_loader.indices_to_remove) > 0:\r\n logging.info('Removing validation {} sequences'.format(len(sequences_loader.indices_to_remove)))\r\n logging.info('X_val pre shape: {}'.format(X_val.shape))\r\n X_val = np.delete(X_val, sequences_loader.indices_to_remove, axis=0)\r\n logging.info('X_val post shape: {}'.format(X_val.shape))\r\n logging.info('y_val pre shape: {}'.format(y_val.shape))\r\n y_val.drop(y_val.index[sequences_loader.indices_to_remove], inplace=True)\r\n logging.info('y_val post shape: {}'.format(y_val.shape))\r\n \r\n #seq_len = sequences_loader.seq_len\r\n\r\n if algorithm == 0:\r\n model = define_conv_model(tokenizer)\r\n elif algorithm == 1:\r\n model = define_conv_lstm_model(tokenizer)\r\n elif algorithm == 2:\r\n model = define_lstm_model(tokenizer)\r\n else:\r\n raise Exception('Unknown algorithm')\r\n\r\n if model_name:\r\n model = load_pretrained(model, model_name, model_weights_location)\r\n\r\n global final_model_name\r\n final_model_name = model_name if model_name else new_model_name\r\n \r\n logging.info(model.summary())\r\n\r\n if evaluate_mode is True: # MAKE EVALUATE MODE ON\r\n evaluate_model(model, X_val, y_val)\r\n else:\r\n # Implement Early Stopping\r\n \r\n \r\n \r\n early_stopping_callback = callbacks.EarlyStopping(monitor='val_loss',\r\n min_delta=0,\r\n patience=5,\r\n verbose=1)\r\n # restore_best_weights=True)\r\n save_best_model = callbacks.ModelCheckpoint(model_weights_location, monitor='val_loss', verbose=1, save_best_only=True, mode='auto')\r\n \r\n \r\n \r\n adam = optimizers.Adam(lr=learning_rate)\r\n model.compile(loss='binary_crossentropy',\r\n optimizer=adam,\r\n metrics=['accuracy'])\r\n\r\n history = model.fit(X_train, y_train,\r\n batch_size=batch_size,\r\n epochs=20,\r\n verbose=2,\r\n validation_data=(X_val, y_val),\r\n callbacks=[early_stopping_callback, save_best_model])\r\n \r\n #reload best weights\r\n model.load_weights(model_weights_location)\r\n \r\n model = transferLearning(model)\r\n print(\"SUCCESS!\")\r\n \r\n history = model.fit(X_train_tl, np.array(y_train_tl),\r\n batch_size=batch_size,\r\n epochs=10,\r\n verbose=2,\r\n validation_data=(X_val, y_val),\r\n callbacks=[early_stopping_callback])\r\n \r\n \r\n \r\n \r\n\r\n plot_model_history(history, final_model_name)\r\n\r\n logging.info('Model trained. Storing model on disk.')\r\n model.save(\"/homedtic/hkavas/SemEval/models_new/allsidesTL-3.h5\")\r\n #model.save(model_location) \r\n logging.info('Model stored on disk.')\r\n\r\n \r\nif __name__ == \"__main__\":\r\n main()", "repo_name": "hmtkvs/NLP-Political-Bias-Detection", "sub_path": "Train/train_words_dl_model.py", "file_name": "train_words_dl_model.py", "file_ext": "py", "file_size_in_byte": 16893, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "12", "api": [{"api_name": "os.environ", "line_number": 15, "usage_type": "attribute"}, {"api_name": "logging.info", "line_number": 33, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 34, "usage_type": "call"}, {"api_name": "data_loaders.WordVectorsLoader", "line_number": 37, "usage_type": "call"}, {"api_name": "keras.layers.Embedding", "line_number": 41, "usage_type": "call"}, {"api_name": "keras.models.Sequential", "line_number": 49, "usage_type": "call"}, {"api_name": "keras.layers.SpatialDropout1D", "line_number": 54, "usage_type": "call"}, {"api_name": "keras.layers.Conv1D", "line_number": 56, "usage_type": "call"}, {"api_name": "keras.layers.Dropout", "line_number": 59, "usage_type": "call"}, {"api_name": "keras.layers.MaxPooling1D", "line_number": 61, "usage_type": "call"}, {"api_name": "keras.layers.GlobalMaxPooling1D", "line_number": 69, "usage_type": "call"}, {"api_name": "keras.layers.Dense", "line_number": 72, "usage_type": "call"}, {"api_name": "keras.regularizers.l2", "line_number": 74, "usage_type": "call"}, {"api_name": "keras.regularizers", "line_number": 74, "usage_type": "name"}, {"api_name": "keras.layers.Dropout", "line_number": 76, "usage_type": "call"}, {"api_name": "keras.layers.Dense", "line_number": 78, "usage_type": "call"}, {"api_name": "keras.models.Sequential", "line_number": 83, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 85, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 86, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 87, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 88, "usage_type": "call"}, {"api_name": "keras.layers.SpatialDropout1D", "line_number": 92, "usage_type": "call"}, {"api_name": "keras.layers.LSTM", "line_number": 94, "usage_type": "call"}, {"api_name": "keras.layers.Dense", "line_number": 95, "usage_type": "call"}, {"api_name": "keras.models.Sequential", "line_number": 100, "usage_type": "call"}, {"api_name": "keras.layers.Conv1D", "line_number": 105, "usage_type": "call"}, {"api_name": "keras.layers.Dropout", "line_number": 108, "usage_type": "call"}, {"api_name": "keras.layers.MaxPooling1D", "line_number": 109, "usage_type": "call"}, {"api_name": "keras.layers.Conv1D", "line_number": 111, "usage_type": "call"}, {"api_name": "keras.layers.Dropout", "line_number": 114, "usage_type": "call"}, {"api_name": "keras.layers.MaxPooling1D", "line_number": 115, "usage_type": "call"}, {"api_name": "keras.layers.LSTM", "line_number": 117, "usage_type": "call"}, {"api_name": "keras.layers.Dense", "line_number": 119, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 140, "usage_type": "call"}, {"api_name": "os.path", "line_number": 140, "usage_type": "attribute"}, {"api_name": "os.path.isfile", "line_number": 142, "usage_type": "call"}, {"api_name": "os.path", "line_number": 142, "usage_type": "attribute"}, {"api_name": "os.path.getmtime", "line_number": 143, "usage_type": "call"}, {"api_name": "os.path", "line_number": 143, "usage_type": "attribute"}, {"api_name": "os.path.getmtime", "line_number": 144, "usage_type": "call"}, {"api_name": "os.path", "line_number": 144, "usage_type": "attribute"}, {"api_name": "logging.info", "line_number": 146, "usage_type": "call"}, {"api_name": "keras.models.load_model", "line_number": 149, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 150, "usage_type": "call"}, {"api_name": "os.path.isfile", "line_number": 151, "usage_type": "call"}, {"api_name": "os.path", "line_number": 151, "usage_type": "attribute"}, {"api_name": "logging.info", "line_number": 153, "usage_type": "call"}, {"api_name": "os.path.isfile", "line_number": 154, "usage_type": "call"}, {"api_name": "os.path", "line_number": 154, "usage_type": "attribute"}, {"api_name": "keras.models.load_model", "line_number": 155, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 156, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.title", "line_number": 164, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 164, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 165, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 165, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 166, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 166, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 167, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 167, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 167, "usage_type": "call"}, {"api_name": "os.path", "line_number": 167, "usage_type": "attribute"}, {"api_name": "numpy.asarray", "line_number": 170, "usage_type": "call"}, {"api_name": "sklearn.metrics.accuracy_score", "line_number": 172, "usage_type": "call"}, {"api_name": "sklearn.metrics", "line_number": 172, "usage_type": "name"}, {"api_name": "logging.info", "line_number": 173, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 176, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 177, "usage_type": "call"}, {"api_name": "sklearn.metrics.confusion_matrix", "line_number": 179, "usage_type": "call"}, {"api_name": "sklearn.metrics", "line_number": 179, "usage_type": "name"}, {"api_name": "logging.info", "line_number": 180, "usage_type": "call"}, {"api_name": "sklearn.metrics.precision_score", "line_number": 182, "usage_type": "call"}, {"api_name": "sklearn.metrics", "line_number": 182, "usage_type": "name"}, {"api_name": "logging.info", "line_number": 183, "usage_type": "call"}, {"api_name": "sklearn.metrics.recall_score", "line_number": 185, "usage_type": "call"}, {"api_name": "sklearn.metrics", "line_number": 185, "usage_type": "name"}, {"api_name": "logging.info", "line_number": 186, "usage_type": "call"}, {"api_name": "sklearn.metrics.f1_score", "line_number": 188, "usage_type": "call"}, {"api_name": "sklearn.metrics", "line_number": 188, "usage_type": "name"}, {"api_name": "logging.info", "line_number": 189, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 191, "usage_type": "call"}, {"api_name": "os.path", "line_number": 191, "usage_type": "attribute"}, {"api_name": "keras.utils.plot_model", "line_number": 192, "usage_type": "call"}, {"api_name": "keras.layers.Dense", "line_number": 197, "usage_type": "call"}, {"api_name": "tensorflow.compat.v1.Session", "line_number": 203, "usage_type": "call"}, {"api_name": "tensorflow.compat", "line_number": 203, "usage_type": "attribute"}, {"api_name": "tensorflow.compat.v1.ConfigProto", "line_number": 203, "usage_type": "call"}, {"api_name": "argparse.ArgumentParser", "line_number": 204, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 245, "usage_type": "call"}, {"api_name": "os.path", "line_number": 245, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 247, "usage_type": "call"}, {"api_name": "os.path", "line_number": 247, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 248, "usage_type": "call"}, {"api_name": "os.path", "line_number": 248, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 252, "usage_type": "call"}, {"api_name": "os.path", "line_number": 252, "usage_type": "attribute"}, {"api_name": "logging.basicConfig", "line_number": 253, "usage_type": "call"}, {"api_name": "logging.INFO", "line_number": 254, "usage_type": "attribute"}, {"api_name": "logging.info", "line_number": 255, "usage_type": "call"}, {"api_name": "data_loaders.TextsLoader", "line_number": 264, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 267, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 268, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 269, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 282, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 290, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 291, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 292, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 295, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 296, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 297, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 298, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 299, "usage_type": "call"}, {"api_name": "data_loaders.TokenizerLoader", "line_number": 301, "usage_type": "call"}, {"api_name": "data_loaders.TextSequencesLoader", "line_number": 303, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 314, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 315, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 316, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 317, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 318, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 319, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 320, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 321, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 324, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 325, "usage_type": "call"}, {"api_name": "numpy.delete", "line_number": 326, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 327, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 328, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 330, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 336, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 337, "usage_type": "call"}, {"api_name": "numpy.delete", "line_number": 338, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 339, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 340, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 342, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 361, "usage_type": "call"}, {"api_name": "keras.callbacks.EarlyStopping", "line_number": 370, "usage_type": "call"}, {"api_name": "keras.callbacks", "line_number": 370, "usage_type": "name"}, {"api_name": "keras.callbacks.ModelCheckpoint", "line_number": 375, "usage_type": "call"}, {"api_name": "keras.callbacks", "line_number": 375, "usage_type": "name"}, {"api_name": "keras.optimizers.Adam", "line_number": 379, "usage_type": "call"}, {"api_name": "keras.optimizers", "line_number": 379, "usage_type": "name"}, {"api_name": "numpy.array", "line_number": 397, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 410, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 413, "usage_type": "call"}]} +{"seq_id": "72621225613", "text": "# -*- coding: utf-8 -*-\n#!/usr/bin/env python\n\"\"\"\nCreate a zip file of all the images posted or shared from \nan account, zip into a file on S3, and email notification to them.\n\"\"\"\nimport models\nimport sys\nfrom lib.s3 import S3Bucket\nfrom boto.s3.key import Key\nfrom tornado.options import options\nimport json\nimport os\nimport subprocess\nimport postmark\n\nNAME = \"make-zip-of-images\"\n\ndef main():\n names = sys.argv[2:]\n for name in names:\n make_zip_file(name)\n \n results = {\n 'last_name': name, \n 'command' : 'make-zip-of-images'\n }\n return json.dumps(results)\n\ndef percent_cb(complete, total):\n sys.stdout.write('.')\n sys.stdout.flush()\n\ndef make_zip_file(for_user=None):\n \"\"\"\n get all shared files, pull to /mnt, zip them into a file and then email the\n user in their user account.\n \"\"\"\n if not for_user:\n sys.exit()\n\n s3_bucket = S3Bucket()\n\n user = models.User.get(\"name='{0}'\".format(for_user))\n if not user:\n return json.dumps({'status':'error', 'message':'user not found'})\n\n os.mkdir(\"/mnt/backups/users/{0}\".format(user.name))\n\n \n sfs = models.Sharedfile.where(\"user_id = %s and deleted=0 order by id\", user.id)\n\n if sfs:\n print(len(sfs))\n for sf in sfs:\n source = sf.sourcefile()\n if source.type == 'link':\n sys.stdout.write('x')\n sys.stdout.flush()\n continue\n else:\n sys.stdout.write('.')\n sys.stdout.flush()\n file_object = s3_bucket.get_key(\"originals/{0}\".format(source.file_key))\n extension = \"\"\n if sf.content_type == 'image/gif':\n extension = \"gif\"\n elif sf.content_type == 'image/jpg' or sf.content_type == 'image/jpeg':\n extension = \"jpg\"\n elif sf.content_type == 'image/png':\n extension = \"png\"\n\n if extension == \"\":\n print(sf.content_type)\n print(\"extension blank\")\n sys.exit()\n\n file_object.get_contents_to_filename(\"/mnt/backups/users/{0}/{1}.{2}\".format(user.name, sf.share_key, extension))\n\n #zip contents of directory and save to /users/id-name.zip\n subprocess.call([\"zip\", \"-r\", \"/mnt/backups/users/{0}.zip\".format(user.name), \"/mnt/backups/users/{0}/\".format(user.name)])\n\n #upload to s3 as /bucket-name/account/id/images.zip\n k = Key(s3_bucket)\n k.key = \"account/{0}/images.zip\".format(user.id)\n k.set_contents_from_filename(\"/mnt/backups/users/{0}.zip\".format(user.name), cb=percent_cb, num_cb=10)\n\n happy_url = k.generate_url(expires_in=72000)\n #email link to user email 8 hours\n pm = postmark.PMMail(api_key=options.postmark_api_key,\n sender=\"hello@mltshp.com\", to=user.email,\n subject=\"[mltshp] Your Images Are Ready!\",\n text_body=\"Hi, you requested to receive all of your images in a .zip file.\\n\" + \\\n \"Here they are! This link is good for the next TWENTY hours starting…now.\\n\\n\" + \\\n \"{0}\\n\\n\".format(happy_url) + \\\n \"Thanks for making MLTSHP so much fun. :D\\n\" + \\\n \"- MLTSHP\")\n pm.send()\n\n", "repo_name": "MLTSHP/mltshp", "sub_path": "scripts/make-zip-of-images.py", "file_name": "make-zip-of-images.py", "file_ext": "py", "file_size_in_byte": 3261, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 49, "dataset": "github-code", "pt": "14", "api": [{"api_name": "sys.argv", "line_number": 20, "usage_type": "attribute"}, {"api_name": "json.dumps", "line_number": 28, "usage_type": "call"}, {"api_name": "sys.stdout.write", "line_number": 31, "usage_type": "call"}, {"api_name": "sys.stdout", "line_number": 31, "usage_type": "attribute"}, {"api_name": "sys.stdout.flush", "line_number": 32, "usage_type": "call"}, {"api_name": "sys.stdout", "line_number": 32, "usage_type": "attribute"}, {"api_name": "sys.exit", "line_number": 40, "usage_type": "call"}, {"api_name": "lib.s3.S3Bucket", "line_number": 42, "usage_type": "call"}, {"api_name": "models.User.get", "line_number": 44, "usage_type": "call"}, {"api_name": "models.User", "line_number": 44, "usage_type": "attribute"}, {"api_name": "json.dumps", "line_number": 46, "usage_type": "call"}, {"api_name": "os.mkdir", "line_number": 48, "usage_type": "call"}, {"api_name": "models.Sharedfile.where", "line_number": 51, "usage_type": "call"}, {"api_name": "models.Sharedfile", "line_number": 51, "usage_type": "attribute"}, {"api_name": "sys.stdout.write", "line_number": 58, "usage_type": "call"}, {"api_name": "sys.stdout", "line_number": 58, "usage_type": "attribute"}, {"api_name": "sys.stdout.flush", "line_number": 59, "usage_type": "call"}, {"api_name": "sys.stdout", "line_number": 59, "usage_type": "attribute"}, {"api_name": "sys.stdout.write", "line_number": 62, "usage_type": "call"}, {"api_name": "sys.stdout", "line_number": 62, "usage_type": "attribute"}, {"api_name": "sys.stdout.flush", "line_number": 63, "usage_type": "call"}, {"api_name": "sys.stdout", "line_number": 63, "usage_type": "attribute"}, {"api_name": "sys.exit", "line_number": 76, "usage_type": "call"}, {"api_name": "subprocess.call", "line_number": 81, "usage_type": "call"}, {"api_name": "boto.s3.key.Key", "line_number": 84, "usage_type": "call"}, {"api_name": "postmark.PMMail", "line_number": 90, "usage_type": "call"}, {"api_name": "tornado.options.options.postmark_api_key", "line_number": 90, "usage_type": "attribute"}, {"api_name": "tornado.options.options", "line_number": 90, "usage_type": "name"}]} +{"seq_id": "74562512973", "text": "import Orange\ndata = Orange.data.Table(\"wine\")\n\neps = 0.5\n\ndef G(wineClass, S, w):\n k = 0\n for d in data:\n if d[len(d)-1] == wineClass:\n if abs(w[S]-d[S]) < eps:\n k = k+1\n return k\n\nn1 = [15, 2, 2.7, 18.6, 110, 2.60, 2.8, 1.31, 1.5, 5, 1.1, 3.8, 1300]\nn2 = [13, 1.7, 1.5, 24, 100, 2.74, 3.8, 0.4, 1.8, 5, 0.79, 2.9, 400]\nn3 = [14, 4, 2.6, 25.4, 95, 1.4, 0.4, 0.72, 1.25, 6.9, 0.85, 1.75, 550]\nn = 0\nprint(\"1)\", n1, \"\\n2)\", n2, \"\\n3)\", n3)\nprint(\"Введите номер образа, который вы хотели бы распознать или любую другую клавишу, если хотите ввести образ сами.\")\nv = input()\nif v == \"1\": n = n1\nelse: \n if v == \"2\": n = n2\n else : \n if v == \"3\": n = n3\n else :\n print(\"Алкоголь: \", end=''); \n alcohol = float(input())\n print(\"Яблочная кислота: \", end='')\n malicAcid = float(input())\n print(\"Щелочь: \", end='')\n ash = float(input())\n print(\"Содержание щелочи: \", end='')\n alcalinityOfAsh = float(input())\n print(\"Магний: \", end='')\n magnesium = float(input())\n print(\"Всего фенолов: \", end='')\n totalPhenols = float(input())\n print(\"Флавоноиды: \", end='')\n flavanoids = float(input())\n print(\"Нефлаваноиды фенолы: \", end='')\n nonflavanoidsPhenols = float(input())\n print(\"Проантоцианидины: \", end='')\n proanthocyanins = float(input())\n print(\"Интенсивность цвета: \", end='')\n colorIntensity = float(input())\n print(\"Оттенок: \", end='')\n hue = float(input())\n print(\"OD280: \", end='')\n OD280 = float(input())\n print(\"Пролин: \", end='')\n proline = float(input())\n n = [alcohol, malicAcid, ash, alcalinityOfAsh, magnesium, totalPhenols, flavanoids, nonflavanoidsPhenols, proanthocyanins, colorIntensity, hue, OD280, proline]\nprint (\"\\nВсе критерии независимы, поэтому будем считать S1 = x1, S2 = x2, ..., S14 = x14\")\nprint (\"\\nОбраз для распознавания: \", n)\nminED = float(\"inf\"); minHD = float(\"inf\"); minDB = float(\"inf\")\nwineED = 0; wineHD = 0; wineDB = 0\nstED = []; stHD = []; stDB = []\n\nmax = -1\nres = '0'\nfor cl in \"123\":\n sumG = 0\n for S in range(0, len(data[0])-1):\n sumG = sumG + G(cl, S, n)\n print(\"Г(w',\",cl,\") = \",sumG)\n if sumG > max:\n max = sumG\n res = cl\nprint (\"Сорт вина: \", res)\n\n", "repo_name": "izimin/pattern-recognition-labs", "sub_path": "Алгоритм голосования/HW2/HW2/HW2.py", "file_name": "HW2.py", "file_ext": "py", "file_size_in_byte": 2780, "program_lang": "python", "lang": "ru", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "14", "api": [{"api_name": "Orange.data.Table", "line_number": 2, "usage_type": "call"}, {"api_name": "Orange.data", "line_number": 2, "usage_type": "attribute"}]} +{"seq_id": "17153729209", "text": "# Timer core\n\nfrom rich.console import Console\nfrom time import sleep\nfrom cherryCore.cherry import CherrySession\n\nfrom datetime import datetime, timedelta\n\nimport sys\nimport tty, termios\nimport select\n\n\n_DEV_ = 1\n_DEV_ = 100\n\n\ndef aread_key():\n inp, _, _ = select.select([sys.stdin], [], [], 0)\n if inp:\n key = sys.stdin.read(1)\n return key\n else:\n return None\n\n\nclass Timer():\n def __init__(self, timer_settings=None, currentMode=None):\n self.old_terminal_settings = termios.tcgetattr(sys.stdin)\n self.cherrySession = CherrySession(timer_settings)\n self.finished = False\n self.running = False\n self.bufTime = {\n key: None for key in [ \n 'startTime', \n 'totalTime', \n 'endTime', \n 'currentTime', \n 'formattedTime',\n ]\n }\n self.currentMode = currentMode\n\n # temp\n self.console = Console()\n\n # @staticmethod\n def raw_mode_on_and_off(func):\n def wrapper(self, *args, **kwargs):\n tty.setraw(sys.stdin.fileno())\n result = func(self, *args, **kwargs)\n termios.tcsetattr(sys.stdin, termios.TCSADRAIN, self.old_terminal_settings)\n return result\n return wrapper\n # @staticmethod\n def raw_mode_off_and_on(func):\n def wrapper(self, *args, **kwargs):\n termios.tcsetattr(sys.stdin, termios.TCSADRAIN, self.old_terminal_settings)\n result = func(self, *args, **kwargs)\n tty.setraw(sys.stdin.fileno())\n return result\n return wrapper\n\n\n @raw_mode_on_and_off\n def run(self):\n self.running = True\n self.activateMode()\n # required for storing data later\n self.bufTime['startTime'] = datetime.now()\n\n \n for r in range(self.cherrySession.rounds):\n currentTime = timedelta(minutes=self.cherrySession.focusTime)\n self.displayAnything(f'Round {r+1}')\n while currentTime.total_seconds() > 0:\n \n # region Reading Keystrokes\n key = aread_key()\n if key == 'q':\n self.displayAnything('Timer has been stopped.\\nSee you later)\\n')\n # self.finished = True\n return\n elif key == ' ':\n self.running = not self.running\n elif key == '\\x03':\n try:\n raise KeyboardInterrupt\n except KeyboardInterrupt:\n self.displayAnything(f'Oh, you are leaving...\\nWell, bye-bye then\\n')\n # self.finished = True\n return\n else:\n # endregion\n if self.running:\n self.displayTime(currentTime)\n currentTime -= timedelta(milliseconds=100*_DEV_)\n\n \n sleep(0.1)\n else:\n self.displayAnything(f'Our work here is over... Well Done!)')\n\n @raw_mode_off_and_on\n def displayTime(self, currentTime):\n dummyTime = datetime(1, 1, 1) + currentTime\n formattedTime = dummyTime.strftime('%M:%S')\n self.console.print(f'_ {formattedTime}', end='\\r')\n\n @raw_mode_off_and_on\n def displayAnything(self, anything):\n self.console.print(f'{anything} ')\n\n def activateMode(self):\n self.console.print()\n\n\ndef main():\n a = Timer(Console())\n a.run()\n\n\nif __name__ == '__main__':\n main()\n\n\n", "repo_name": "gedfalk/cherry", "sub_path": "cherryCore/timer.py", "file_name": "timer.py", "file_ext": "py", "file_size_in_byte": 3620, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "14", "api": [{"api_name": "select.select", "line_number": 19, "usage_type": "call"}, {"api_name": "sys.stdin", "line_number": 19, "usage_type": "attribute"}, {"api_name": "sys.stdin.read", "line_number": 21, "usage_type": "call"}, {"api_name": "sys.stdin", "line_number": 21, "usage_type": "attribute"}, {"api_name": "termios.tcgetattr", "line_number": 29, "usage_type": "call"}, {"api_name": "sys.stdin", "line_number": 29, "usage_type": "attribute"}, {"api_name": "cherryCore.cherry.CherrySession", "line_number": 30, "usage_type": "call"}, {"api_name": "rich.console.Console", "line_number": 45, "usage_type": "call"}, {"api_name": "tty.setraw", "line_number": 50, "usage_type": "call"}, {"api_name": "sys.stdin.fileno", "line_number": 50, "usage_type": "call"}, {"api_name": "sys.stdin", "line_number": 50, "usage_type": "attribute"}, {"api_name": "termios.tcsetattr", "line_number": 52, "usage_type": "call"}, {"api_name": "sys.stdin", "line_number": 52, "usage_type": "attribute"}, {"api_name": "termios.TCSADRAIN", "line_number": 52, "usage_type": "attribute"}, {"api_name": "termios.tcsetattr", "line_number": 58, "usage_type": "call"}, {"api_name": "sys.stdin", "line_number": 58, "usage_type": "attribute"}, {"api_name": "termios.TCSADRAIN", "line_number": 58, "usage_type": "attribute"}, {"api_name": "tty.setraw", "line_number": 60, "usage_type": "call"}, {"api_name": "sys.stdin.fileno", "line_number": 60, "usage_type": "call"}, {"api_name": "sys.stdin", "line_number": 60, "usage_type": "attribute"}, {"api_name": "datetime.datetime.now", "line_number": 70, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 70, "usage_type": "name"}, {"api_name": "datetime.timedelta", "line_number": 74, "usage_type": "call"}, {"api_name": "datetime.timedelta", "line_number": 97, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 100, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 106, "usage_type": "call"}, {"api_name": "rich.console.Console", "line_number": 119, "usage_type": "call"}]} +{"seq_id": "38131710459", "text": "import numpy as np # Numerical library\nfrom std_msgs.msg import Float32MultiArray # Message type\nfrom ROSwrapper.nodecontrol import NodeControl # ROS2 controller\nfrom Problem3_2a import line1 # Line generator\nfrom iknode import IkNode # Derived RosNode\nfrom iknode2 import IkNode2 # Derived RosNode\nimport matplotlib.pyplot as plt # To plot data points\n\n\nclass twolink():\n \"\"\" This class is meant for fk and ik operations around a 2-link\n manipulator. This was updated from problem 10 to allow for \n starting theta values.\n \"\"\"\n\n def __init__(self, length1, length2, path, rate):\n \"\"\" Class initialization \"\"\"\n self.a1 = length1\n self.a2 = length2\n self.x = path[0]\n self.y = path[1]\n self.index = 0\n self.pts = zip(path[0], path[1])\n self.theta = (0.0, 0.0)\n self.plot_data_ik_x = []\n self.plot_data_ik_y = []\n self.plot_data_fk_x = []\n self.plot_data_fk_y = []\n self.showing_plot = False\n self.s_plot = plt.figure()\n\n # ROS init\n self.nc = NodeControl()\n self.nc.addnode(IkNode(name='node_xy',\n obj=self,\n pub_data_type=Float32MultiArray,\n pub_chan='/physData',\n pub_rate=5,\n pub_data=self.pts))\n self.nc.addnode(IkNode(name='node_theta_magic',\n obj=self,\n sub_data_type=Float32MultiArray,\n sub_chan='/physData',\n pub_data_type=Float32MultiArray,\n pub_chan='/thetaData',\n pub_data=self.theta))\n self.nc.addnode(IkNode2(name='node_dual_sub',\n obj=self,\n sub_data_type=Float32MultiArray,\n sub_chan=('/physData', '/thetaData')))\n\n self.nc.run()\n\n def getik(self, xy):\n \"\"\" Calculates the inverse kinematics to determine the theta1\n & theta2 values\n \"\"\"\n x = xy[0]\n y = xy[1]\n theta1 = 0.0\n theta2 = 0.0\n D = (x * x + y * y - self.a1 * self.a1 - self.a2 * self.a2)\\\n / (2 * self.a1 * self.a2)\n theta2 = np.arctan2(np.sqrt(1 - D * D), D)\n gamma = np.arctan2((self.a2 * np.sin(theta2)),\n (self.a1 + self.a2 * np.cos(theta2)))\n theta1 = np.arctan2(y, x) - gamma\n\n return theta1, theta2\n\n def getfk(self, thetas):\n \"\"\" Calculate the forward kinematics to determine the x & y\n values\n \"\"\"\n theta1 = thetas[0]\n theta2 = thetas[1]\n x = self.a2 * np.cos(theta1 + theta2) + \\\n self.a1 * np.cos(theta1)\n y = self.a2 * np.sin(theta1 + theta2) + \\\n self.a1 * np.sin(theta1)\n return x, y\n\n def append_plot_data_ik(self, data):\n if len(self.plot_data_ik_x) < 100:\n self.plot_data_ik_x.append(data[0])\n self.plot_data_ik_y.append(data[1])\n\n def append_plot_data_fk(self, data):\n if len(self.plot_data_fk_x) < 100:\n self.plot_data_fk_x.append(data[0])\n self.plot_data_fk_y.append(data[1])\n elif not self.showing_plot:\n plt.scatter(self.plot_data_ik_x,\n self.plot_data_ik_y,\n c='g',\n label='Workspace Points')\n plt.scatter(self.plot_data_fk_x,\n self.plot_data_fk_y,\n c='b',\n label='Computed Workspace Points')\n self.showing_plot = True\n plt.title('Verifying Workspace Points')\n plt.legend()\n plt.show()\n self.s_plot.savefig('Problem3_2c.pdf',\n format='pdf',\n dpi=1200)\n print('Press \\\"ctrl\\\" + \\\"c\\\" to exit')\n\n\ndef main():\n twolink(10, 10, path=line1(0, 10, 100), rate=5)\n\n\nif __name__ == \"__main__\":\n main()\n", "repo_name": "k-macmillan/RoboticsHW", "sub_path": "HW2/Problem3_2c.py", "file_name": "Problem3_2c.py", "file_ext": "py", "file_size_in_byte": 4266, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "12", "api": [{"api_name": "matplotlib.pyplot.figure", "line_number": 30, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 30, "usage_type": "name"}, {"api_name": "ROSwrapper.nodecontrol.NodeControl", "line_number": 33, "usage_type": "call"}, {"api_name": "iknode.IkNode", "line_number": 34, "usage_type": "call"}, {"api_name": "std_msgs.msg.Float32MultiArray", "line_number": 36, "usage_type": "name"}, {"api_name": "iknode.IkNode", "line_number": 40, "usage_type": "call"}, {"api_name": "std_msgs.msg.Float32MultiArray", "line_number": 42, "usage_type": "name"}, {"api_name": "std_msgs.msg.Float32MultiArray", "line_number": 44, "usage_type": "name"}, {"api_name": "iknode2.IkNode2", "line_number": 47, "usage_type": "call"}, {"api_name": "std_msgs.msg.Float32MultiArray", "line_number": 49, "usage_type": "name"}, {"api_name": "numpy.arctan2", "line_number": 64, "usage_type": "call"}, {"api_name": "numpy.sqrt", "line_number": 64, "usage_type": "call"}, {"api_name": "numpy.arctan2", "line_number": 65, "usage_type": "call"}, {"api_name": "numpy.sin", "line_number": 65, "usage_type": "call"}, {"api_name": "numpy.cos", "line_number": 66, "usage_type": "call"}, {"api_name": "numpy.arctan2", "line_number": 67, "usage_type": "call"}, {"api_name": "numpy.cos", "line_number": 77, "usage_type": "call"}, {"api_name": "numpy.cos", "line_number": 78, "usage_type": "call"}, {"api_name": "numpy.sin", "line_number": 79, "usage_type": "call"}, {"api_name": "numpy.sin", "line_number": 80, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.scatter", "line_number": 93, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 93, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.scatter", "line_number": 97, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 97, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 102, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 102, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 103, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 103, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 104, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 104, "usage_type": "name"}, {"api_name": "Problem3_2a.line1", "line_number": 112, "usage_type": "call"}]} +{"seq_id": "36291134341", "text": "#Produces MLD (average and maximum)\n#Rowan Brown\n#17 May 2023\n\nimport numpy as np\nimport pandas as pd\nimport xarray as xr\nimport os\n\ndef MLD(run,mask_choice,movie=False):\n\n #== creating directory if doesn't already exist ==#\n dir = run + '_MLD/'\n if not os.path.exists(dir):\n os.makedirs(dir)\n\n #== masks ==#\n with xr.open_dataset('masks/ANHA4_mesh_mask.nc') as DS: #mask for land, bathymetry, etc. and horiz. grid dimensions\n tmask = DS.tmask[0,:,:,:].rename({'z': 'deptht', 'y': 'y_grid_T', 'x': 'x_grid_T'}) #DataArray with dims (t: 1, z: 50, y: 800, x: 544)\n e1t = DS.e1t[0,:,:].rename({'y': 'y_grid_T', 'x': 'x_grid_T'})\n e2t = DS.e2t[0,:,:].rename({'y': 'y_grid_T', 'x': 'x_grid_T'})\n if mask_choice == 'LS2k': #mask for 2000m depth interior area\n mask = xr.open_dataarray('masks/mask_LS_2k.nc').astype(int)\n elif mask_choice == 'LS': #mask for entire LS region\n mask = xr.open_dataarray('masks/mask_LS.nc').astype(int)\n elif mask_choice == 'LSCR': #mask for LS convection region\n mask = xr.open_dataset('masks/ARGOProfiles_mask.nc').tmask.astype(int).rename({'x':'x_grid_T','y':'y_grid_T'})\n else:\n print(\"Y'all didn't choose a mask\")\n quit()\n\n #== opening model output ==# \n gridT_txt = run + '_filepaths/' + run + '_gridT_filepaths.txt' #text file of paths to non-empty model output\n with open(gridT_txt) as f: lines = f.readlines() #open the text files\n filepaths_gridT = [line.strip() for line in lines] #get lists of the .nc output filepaths\n num_files = len(filepaths_gridT)\n preprocess_gridT = lambda ds: ds[['e3t','somxl010']] #specify veriables to retrieve \n DS = xr.open_mfdataset(filepaths_gridT,preprocess=preprocess_gridT) #open the files (and look at e3t and sohmld)\n\n #== applying masks ==#\n DS[['e1t','e2t']] = e1t,e2t #add T cell dimensions as variables\n DS = DS.where(tmask == 1) #apply tmask (ie masking bathy)\n if mask_choice == 'LSCR' or mask_choice == 'LS2k' or mask_choice == 'LS': #apply mask\n DS.coords['mask'] = mask\n DS = DS.where(DS.mask == 1, drop=True)\n DS = DS.drop_vars(['mask','time_centered'])\n\n #== selecting only one depth slice (since MLD is constant throughout the water column) ==#\n MLD = DS.somxl010.isel(deptht = 0)\n\n ##masking shelves\n ##NOTE: bathy is masked to avoid skewed understandings/results from the on-shelf values this section could be commented out if needed \n #bottom_slice = DS_d.vosaline.isel(deptht = -1).isel(time_counter = 0)\n #bottom_slice_bool = bottom_slice.notnull()\n #shelf_mask, temp = xr.broadcast(bottom_slice_bool, DS_d.vosaline.isel(time_counter=0))\n #DS_d = DS_d.where(shelf_mask)\n\n #== movie ==#\n if movie==True:\n dir2 = run + '_MLD/movie_NCs'\n if not os.path.exists(dir2):\n os.makedirs(dir2)\n for i in range(num_files):\n date = str(MLD.time_counter[i].to_numpy())[0:10]\n MLD.isel(time_counter=i).to_netcdf(dir2 + '/' + run + 'MLD_map_' + mask_choice + '_' + date + '.nc')\n return\n\n #== non-movie plots ==#\n if movie==False:\n \n #max MLD\n maxMLD_col = MLD.max(dim=['time_counter'], skipna=True) #max MLD in each column during the whole period (i.e., for mapping reasons)\n maxMLD_region = MLD.max(dim=['y_grid_T','x_grid_T'], skipna=True) #max MLD in the masked region for each time-step (i.e., for time-plotting reasons)\n\n #average MLD\n areas = DS.e1t*DS.e2t\n areas = areas.isel(deptht = 0)\n avgArea = areas.mean(dim=['y_grid_T','x_grid_T'])\n weights = areas/avgArea #CHECK THAT THIS IS RIGHT!!!!!!!!!!!!!!!!!!!!!!!!!!\n weights = weights.fillna(0)\n MLD = MLD.weighted(weights)\n avgMLD_col = MLD.mean(dim='time_counter',skipna=True) #average MLD in each column during the whole period\n avgMLD_region = MLD.mean(dim=['y_grid_T','x_grid_T'],skipna=True) #average MLD in the masked region for each time-step \n\n #saving\n maxMLD_col.to_netcdf(run + '_MLD/' + run + '_max_MLD_map_' + mask_choice + '.nc')\n maxMLD_region.to_netcdf(run + '_MLD/' + run + '_max_MLD_time_plot_' + mask_choice + '.nc')\n avgMLD_col.to_netcdf(run + '_MLD/' + run + '_avg_MLD_map_' + mask_choice + '.nc')\n avgMLD_region.to_netcdf(run + '_MLD/' + run + '_avg_MLD_time_plot_' + mask_choice + '.nc')\n \n print('test')\n\nif __name__ == '__main__':\n MLD(run='EPM158',mask_choice='LS',movie=True)\n \n\n\n", "repo_name": "rjb641/NEMO-analysis-Graham", "sub_path": "MLD.py", "file_name": "MLD.py", "file_ext": "py", "file_size_in_byte": 4512, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "14", "api": [{"api_name": "os.path.exists", "line_number": 14, "usage_type": "call"}, {"api_name": "os.path", "line_number": 14, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 15, "usage_type": "call"}, {"api_name": "xarray.open_dataset", "line_number": 18, "usage_type": "call"}, {"api_name": "xarray.open_dataarray", "line_number": 23, "usage_type": "call"}, {"api_name": "xarray.open_dataarray", "line_number": 25, "usage_type": "call"}, {"api_name": "xarray.open_dataset", "line_number": 27, "usage_type": "call"}, {"api_name": "xarray.open_mfdataset", "line_number": 38, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 61, "usage_type": "call"}, {"api_name": "os.path", "line_number": 61, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 62, "usage_type": "call"}]} +{"seq_id": "10746012122", "text": "import json\nimport sys\nimport os\nimport mne\n\nfrom scripts.data.constants import PIPE_NAME, INTERM, FINAL\n\n\ndef read_dict_to_json(dict_array, file, datatype, root):\n if dict_array is None:\n print(\"Invalid dictionary array\", file=sys.stderr)\n sys.exit(1)\n\n # get file metadata\n subj, ses, task, run = file.subject, file.session, file.task, file.run\n\n # Creates the directory if it does not exist\n dir_path = '{}/derivatives/pipeline_{}/{}/sub-{}/ses-{}/{}/'.format(\n root, PIPE_NAME, PIPE_NAME + FINAL, subj, ses, datatype)\n\n temp = \"\"\n for sec in dir_path.split(\"/\"):\n temp += sec + \"/\"\n # checks that the directory path doesn't already exist\n if not os.path.isdir(temp):\n os.chmod(temp, 0o644) # set temp to be writable by user\n os.mkdir(temp) # creates the directory path\n\n bids_format = 'output_preproc_sub-{}_ses-{}_task-{}_run-{}_{}.json'.format(\n subj, ses, task, run, datatype)\n\n with open(dir_path + bids_format, 'w') as file:\n str = json.dumps(dict_array, indent=4)\n file.seek(0)\n file.write(str)\n\n\ndef write_eeg_data(obj, func, file, datatype, final, root):\n \"\"\"Used to store the modified raw file after each processing step\n Parameters:\n -----------\n obj: mne.io.Raw | mne.Epochs\n EEG Object generated from pipeline\n func: String\n name of the function\n subject: String\n name of the subject\n session: String\n session number\n task: String\n name of the task\n datatype: String\n type of data(e.g EEG, MEG, etc )\n final: boolean\n boolean that determines if eeg object written is the final\n root: String\n directory from where the data was loaded\n \"\"\"\n # get file metadata\n subj, ses, task, run = file.subject, file.session, file.task, file.run\n\n # determine file extension based on object type\n obj_type = \"_epo.fif\" if isinstance(obj, mne.Epochs) else \".fif\"\n\n # determine directory child based on feature position\n child_dir = PIPE_NAME + FINAL if final else PIPE_NAME + INTERM\n\n # Un-standardize function names for close-to-BIDS standard\n func = PIPE_NAME if final else func.replace(\"_\", \"\")\n\n # puts together the path to be created\n dir_path = '{}/derivatives/pipeline_{}/{}/sub-{}/ses-{}/{}/'.format(\n root, PIPE_NAME, child_dir, subj, ses, datatype)\n\n dir_section = dir_path.split(\"/\")\n\n # creates the directory path\n temp = \"\"\n for sec in dir_section:\n temp += sec + \"/\"\n # checks that the directory path doesn't already exist\n if not os.path.isdir(temp):\n os.mkdir(temp) # creates the directory path\n\n # saves the raw file in the directory\n raw_savePath = dir_path + 'sub-{}_ses-{}_task-{}_run-{}_proc-{}_{}'.format(\n subj, ses, task, run, func, datatype) + obj_type\n\n obj.save(raw_savePath, overwrite=True)\n\n\ndef write_template_params(root, subjects=None, tasks=None,\n e_subj=None, e_task=None, e_run=None, to_file=None):\n \"\"\"Function to write out default user_params.json file\n Parameters:\n -----------\n root: string\n string of path to data root\n subjects: list | None\n a list of subjects for subject selection. None is default\n tasks: list | None\n a list of tasks for task selection. None is default\n e_subj, e_task, e_run: list(s) | None\n list to compose cartesian product of exceptions\n None if default\n to_file: string | None\n path to write user_params to. None if no writing required.\n\n Returns:\n ----------\n A dictionary of the default user_params\n \"\"\"\n user_params = {}\n\n # Create default values of exceptions\n exceptions = {\n \"subjects\": \"\" if e_subj is None else e_subj,\n \"tasks\": \"\" if e_task is None else e_task,\n \"runs\": \"\" if e_run is None else e_run\n }\n\n # set up default load_data params\n user_params[\"load_data\"] = {\n \"root\": root,\n \"subjects\": [\"*\"] if subjects is None else subjects,\n \"tasks\": [\"*\"] if tasks is None else tasks,\n \"exceptions\": exceptions,\n \"channel-type\": \"eeg\"\n }\n\n # set up default preprocess params\n user_params[\"preprocess\"] = {\n \"filter_data\": {\n \"l_freq\": 0.3,\n \"h_freq\": 40\n },\n \"identify_badchans_raw\": {\n },\n \"ica_raw\": {\n \"montage\": \"standard_1020\"\n },\n \"segment_data\": {\n \"tmin\": -0.2,\n \"tmax\": 0.5,\n \"baseline\": None,\n \"picks\": None,\n \"reject_tmin\": None,\n \"reject_tmax\": None,\n \"decim\": 1,\n \"verbose\": False,\n \"preload\": None\n },\n \"final_reject_epoch\": {\n },\n \"interpolate_data\": {\n \"mode\": \"accurate\",\n \"method\": None,\n \"reset_bads\": None\n },\n \"reref_raw\": {\n }\n }\n\n # set up postprocess params Pipeline has not yet been implemented!\n user_params[\"postprocess\"] = {}\n\n # set up write_data params\n user_params[\"output_data\"] = {\n \"root\": \"CMI\"\n }\n\n if to_file is not None:\n path_to_file = os.path.join(to_file, \"user_params.json\")\n with open(path_to_file, 'w') as file:\n str = json.dumps(user_params, indent=4)\n file.seek(0)\n file.write(str)\n\n return user_params\n", "repo_name": "NDCLab/pepper-pipeline", "sub_path": "scripts/data/write.py", "file_name": "write.py", "file_ext": "py", "file_size_in_byte": 5597, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 3, "dataset": "github-code", "pt": "14", "api": [{"api_name": "sys.stderr", "line_number": 11, "usage_type": "attribute"}, {"api_name": "sys.exit", "line_number": 12, "usage_type": "call"}, {"api_name": "scripts.data.constants.PIPE_NAME", "line_number": 19, "usage_type": "argument"}, {"api_name": "scripts.data.constants.FINAL", "line_number": 19, "usage_type": "name"}, {"api_name": "os.path.isdir", "line_number": 25, "usage_type": "call"}, {"api_name": "os.path", "line_number": 25, "usage_type": "attribute"}, {"api_name": "os.chmod", "line_number": 26, "usage_type": "call"}, {"api_name": "os.mkdir", "line_number": 27, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 33, "usage_type": "call"}, {"api_name": "mne.Epochs", "line_number": 63, "usage_type": "attribute"}, {"api_name": "scripts.data.constants.PIPE_NAME", "line_number": 66, "usage_type": "name"}, {"api_name": "scripts.data.constants.FINAL", "line_number": 66, "usage_type": "name"}, {"api_name": "scripts.data.constants.INTERM", "line_number": 66, "usage_type": "name"}, {"api_name": "scripts.data.constants.PIPE_NAME", "line_number": 69, "usage_type": "name"}, {"api_name": "scripts.data.constants.PIPE_NAME", "line_number": 73, "usage_type": "argument"}, {"api_name": "os.path.isdir", "line_number": 82, "usage_type": "call"}, {"api_name": "os.path", "line_number": 82, "usage_type": "attribute"}, {"api_name": "os.mkdir", "line_number": 83, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 173, "usage_type": "call"}, {"api_name": "os.path", "line_number": 173, "usage_type": "attribute"}, {"api_name": "json.dumps", "line_number": 175, "usage_type": "call"}]} +{"seq_id": "20612803099", "text": "import os\nimport abc\nimport typing\nimport datetime\nimport dataclasses\n\nfrom selenium import webdriver\nfrom selenium.webdriver.remote.webelement import WebElement\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.support.wait import WebDriverWait\nfrom selenium.common.exceptions import NoSuchElementException\nfrom selenium.webdriver.support.ui import Select\nfrom selenium.webdriver.support import expected_conditions as EC\n\nfrom controllers.core.recaptcha import ReCaptcha\nfrom controllers.functionalities.tools import selecionar_contas\n\ndef custom_to_float(_str: typing.AnyStr) -> float:\n _ = _str.replace('.', '')\n _ = _.replace(',', '.')\n\n try: \n return float(_)\n\n except ValueError:\n return float()\n\n@dataclasses.dataclass\nclass GrabOnPage(abc.ABC):\n driver: webdriver.Chrome = dataclasses.field(repr=False)\n\n def grab_text(self, by: typing.AnyStr, value: typing.AnyStr, default = None) -> WebElement:\n try:\n text = self.driver.find_element(by, value).text\n\n return text if text else default\n\n except NoSuchElementException:\n return default\n\n def has_elmt(self, by: typing.AnyStr, value: typing.AnyStr) -> bool:\n return bool(len(self.driver.find_elements(by, value)))\n\n def extract_float_from_table(self, elmt: WebElement) -> typing.Dict:\n _pre_dict = {}\n\n def _(m: WebElement) -> typing.Dict:\n td = m.find_elements(By.TAG_NAME, \"td\")\n _r = {}\n\n _t = td[4].text.replace('.', '')\n _t = _t.replace(',', '.')\n\n try:\n _r[td[0].text] = float(_t)\n except ValueError:\n _r[td[0].text] = 0.00\n\n return _r\n\n try:\n for i in elmt.find_elements(By.TAG_NAME, \"tr\"):\n \n _pre_dict = {**_pre_dict, **_(i)}\n except:\n pass\n\n return _pre_dict\n\n def extract_float_from_table_ipva(self, elmt: WebElement) -> typing.Dict:\n _pre_dict = {}\n\n def _(m: WebElement) -> typing.Dict:\n td = m.find_elements(By.TAG_NAME, \"td\")\n _r = {}\n\n try:\n\n _t = td[4].text.replace('.', '')\n _t = _t.replace(',', '.')\n \n except:\n _t = 0\n\n\n try:\n _r[td[0].text] = float(_t)\n except ValueError:\n _r[td[0].text] = 0.00\n\n return _r\n\n try:\n for i in elmt.find_elements(By.TAG_NAME, \"tr\"):\n _pre_dict = {**_pre_dict, **_(i)}\n except Exception as err:\n print(err, \"<- erro Ignorado\")\n\n return _pre_dict\n\n@dataclasses.dataclass\nclass Licenciamento(GrabOnPage):\n total_licenciamento: float = dataclasses.field(default_factory=float)\n\n def __post_init__(self):\n year_actual = datetime.date.today().year\n year_prev = datetime.date.today().year + 1\n\n _ = self.extract_float_from_table(self.driver.find_element(By.ID, \"conteudoPaginaPlaceHolder_tbTaxasDetalhe\"))\n keys = _.keys()\n\n if f'Licenciamento {year_prev}' in keys:\n for i in keys:\n if \"Licenciamento\" in i:\n if str(year_prev) in i:\n continue\n\n self.total_licenciamento += _[i]\n else:\n for i in keys:\n if \"Licenciamento\" in i:\n if str(year_actual) in i:\n continue\n\n self.total_licenciamento += _[i]\n\n@dataclasses.dataclass\nclass Ipva(GrabOnPage):\n\n ipva: float = dataclasses.field(default_factory=float)\n\n def __post_init__(self):\n default = 0.00\n year_actual = datetime.date.today().year\n\n _ = self.extract_float_from_table_ipva(self.driver.find_element(By.ID, \"conteudoPaginaPlaceHolder_tbIpvaPend\"))\n\n for y in range(2000, year_actual + 1):\n self.ipva += round(_.get(str(y), default), ndigits=2)\n\n@dataclasses.dataclass\nclass Multas(GrabOnPage):\n\n detran: float = dataclasses.field(default_factory=float)\n renainf: float = dataclasses.field(default_factory=float)\n outras_multas: float = dataclasses.field(default_factory=float)\n\n\n def __post_init__(self):\n default = 0.00\n _ = self.extract_float_from_table(self.driver.find_element(By.ID, \"conteudoPaginaPlaceHolder_tbMultaResumo\"))\n \n self.detran = _.get('DETRAN', default)\n self.renainf= _.get('RENAINF', default)\n \n municipal = _.get('MUNICIPAL', default)\n convenio = _.get('CONVENIO', default)\n der = _.get('D.E.R.', default)\n\n self.outras_multas = round(municipal + convenio + der, ndigits=2)\n\n@dataclasses.dataclass\nclass MultasDetalhadas(GrabOnPage):\n\n\n detran: float = dataclasses.field(default_factory=float)\n renainf: float = dataclasses.field(default_factory=float)\n outras_multas: float = dataclasses.field(default_factory=float)\n detalhamento: typing.List[typing.Dict] = dataclasses.field(default_factory=list)\n\n\n def __post_init__(self):\n default = 0.00\n _ = self.extract_float_from_table(self.driver.find_element(By.ID, \"conteudoPaginaPlaceHolder_tbMultaResumo\"))\n \n self.detran = _.get('DETRAN', default)\n self.renainf= _.get('RENAINF', default)\n \n municipal = _.get('MUNICIPAL', default)\n convenio = _.get('CONVENIO', default)\n der = _.get('D.E.R.', default)\n\n self.outras_multas = round(municipal + convenio + der, ndigits=2)\n\n if self.has_elmt(By.ID, \"conteudoPaginaPlaceHolder_btnDetalharMultas\"):\n\n # Vai a sessão de multas detalhadas\n self.driver.find_element(By.ID, \"conteudoPaginaPlaceHolder_btnDetalharMultas\").click()\n\n # Captura as multas uma a uma\n self.detalhamento = self.grab_details_debt()\n\n # Retorna a pagina principal dos dados\n self.driver.back()\n\n else: self.detalhamento = []\n\n \n\n def grab_details_debt(self) -> typing.List[typing.Dict]:\n _final = []\n\n target_child = self.driver.find_element(By.ID, \"conteudoPaginaPlaceHolder_trMultaCab\")\n target = target_child.find_element(By.XPATH, \"..\")\n\n rows = target.find_elements(By.TAG_NAME, \"tr\")\n del rows[-1]\n\n for i in range(0, len(rows), 5):\n\n second_tr = rows[i+2]\n third_tr = rows[i+3]\n fourth_tr = rows[i+4]\n\n value = custom_to_float(\n third_tr.find_elements(By.TAG_NAME, \"td\")[5].text.replace('R$','')\n )\n\n _final.append(\n {\n 'name': fourth_tr.find_elements(By.TAG_NAME, \"td\")[3].text,\n 'guia': third_tr.find_elements(By.TAG_NAME, \"td\")[3].text,\n 'ait': second_tr.find_elements(By.TAG_NAME, \"td\")[3].text,\n 'value': value\n }\n )\n \n return _final\n\n\n@dataclasses.dataclass\nclass SFPDividas(GrabOnPage):\n anti_captcha_key: str\n\n balance: float\n lote: typing.AnyStr\n\n is_valid: bool = False\n\n renavam: typing.AnyStr = None # OK\n placa: typing.AnyStr = None # Ok\n ipva: typing.AnyStr = None # OK\n divida_ativa: typing.AnyStr = None # OK\n multas_renainf: typing.AnyStr = None # OK\n multas_detran: typing.AnyStr = None # Ok\n outras_multas_sp: typing.AnyStr = None # OK\n dpvat: typing.AnyStr = None # OK\n taxa_licenciamento: typing.AnyStr = None # OK\n\n data: typing.List[dict] = dataclasses.field(default_factory=list)\n\n multas: Multas = None\n\n def __post_init__(self):\n\n self.is_valid = self.has_elmt(By.ID, \"tituloPaginaPlaceHolder_txtDataConsulta\")\n\n if self.is_valid:\n self.placa = self.grab_text(By.ID, \"conteudoPaginaPlaceHolder_txtPlaca\")\n self.renavam = self.grab_text(By.ID, \"conteudoPaginaPlaceHolder_txtRenavam\")\n self.dpvat = self.grab_text(By.XPATH, '/html/body/form/table[3]/tbody/tr/td[2]/table/tbody/tr/td[2]/div/table[22]/tbody/tr/td/table/tbody/tr[2]/td[5]', default=0.00)\n\n # = Ipva ===\n ipva = \\\n Ipva(self.driver)\n\n self.ipva = ipva.ipva\n\n # = Licenciamento ===\n licenciamento = \\\n Licenciamento(self.driver)\n\n self.taxa_licenciamento = licenciamento.total_licenciamento\n\n # = Multas ===\n self.multas = \\\n MultasDetalhadas(self.driver)\n \n self.multas_detran = self.multas.detran\n self.outras_multas_sp = self.multas.outras_multas\n self.multas_renainf = self.multas.renainf\n\n # = Divida ativa ===\n divida_ativa = \\\n DividaAtiva(self.driver, self.renavam, anti_captcha_key=self.anti_captcha_key)\n \n self.divida_ativa = divida_ativa.total\n\n self.process_debt()\n\n def process_debt(self) -> typing.NoReturn:\n order = [\n # { 'name' : 'outras_multas', 'value' : self.outras_multas_sp },\n { 'name' : 'ipva', 'value' : self.ipva },\n { 'name' : 'divida_ativa', 'value' : self.divida_ativa },\n\n { 'name' : 'multas_detran', 'value' : self.multas_detran },\n { 'name' : 'renainf', 'value' : self.multas_renainf },\n { 'name' : 'outras_multas', 'value' : self.outras_multas_sp },\n { 'name' : 'taxa_licenciamento', 'value' : self.taxa_licenciamento },\n ]\n\n\n for index, seq in enumerate(order):\n ait = \"RENAVAM\"\n guia = \"RENAVAM\"\n pay_type = seq['name'].upper()\n\n\n # se a divida estiver zerada pula o mesmo\n if seq['value'] <= 0:\n continue \n\n if (self.balance - seq['value']) < 0:\n \n # Paga tudo ou nada\n if seq['name'] in ('renainf', 'ipva', 'taxa_licenciamento'):\n break\n \n elif seq['name'] in ('divida_ativa'):\n \n # subtrai o saldo\n self.balance -= seq['value']\n pay_type = \"DIVIDA ATIVA\"\n\n # Paga com total ou parcial\n seq['value'] += self.balance\n\n # break\n\n elif(seq['name'] == \"multas_detran\" and seq['value'] > self.balance):\n\n detran = selecionar_contas(self.balance, \\\n list(filter(lambda x: x['name']==\"DETRAN\", self.multas.detalhamento))\n )\n \n for d in detran:\n self.balance -= d['value']\n\n # self.data.append([\n # self.lote,\n # self.placa,\n # self.renavam,\n # d['value'],\n # d['ait'],\n # d['guia'],\n # \"MULTA DETRAN\",\n # ])\n\n self.data.append({\n \"lote\": self.lote,\n \"placa\": self.placa,\n \"renavam\": self.renavam,\n \"valor\": d['value'],\n \"ait\": d['ait'],\n \"guia\": d['guia'],\n \"tipo_debito\": \"MULTA DETRAN\",\n })\n \n\n\n # break\n\n elif(seq['name'] == \"outras_multas\" and seq['value'] > self.balance):\n\n der = selecionar_contas(self.balance, \\\n list(filter(lambda x: x['name']==\"D.E.R.\", self.multas.detalhamento))\n )\n\n for d in der:\n self.balance -= d['value']\n\n # self.data.append([\n # self.lote,\n # self.placa,\n # self.renavam,\n # d['value'],\n # d['ait'],\n # d['guia'],\n # \"D.E.R.\",\n # ])\n\n \n self.data.append({\n \"lote\": self.lote,\n \"placa\": self.placa,\n \"renavam\": self.renavam,\n \"valor\": d['value'],\n \"ait\": d['ait'],\n \"guia\": d['guia'],\n \"tipo_debito\": \"D.E.R.\",\n })\n\n municipal = selecionar_contas(self.balance, \\\n list(filter(lambda x: x['name']==\"MUNICIPAL\", self.multas.detalhamento))\n )\n \n for m in municipal:\n self.balance -= m['value']\n\n # self.data.append([\n # self.lote,\n # self.placa,\n # self.renavam,\n # m['value'],\n # m['ait'],\n # m['guia'],\n # \"MUNICIPAL\",\n # ])\n\n self.data.append({\n \"lote\": self.lote,\n \"placa\": self.placa,\n \"renavam\": self.renavam,\n \"valor\": m['value'],\n \"ait\": m['ait'],\n \"guia\": m['guia'],\n \"tipo_debito\": \"MUNICIPAL\",\n })\n\n \n\n convenio = selecionar_contas(self.balance, \\\n list(filter(lambda x: x['name']==\"CONVENIO\", self.multas.detalhamento))\n )\n\n for c in convenio:\n self.balance -= c['value']\n\n # self.data.append([\n # self.lote,\n # self.placa,\n # self.renavam,\n # c['value'],\n # c['ait'],\n # c['guia'],\n # \"CONVENIO\",\n # ])\n\n self.data.append({\n \"lote\": self.lote,\n \"placa\": self.placa,\n \"renavam\": self.renavam,\n \"valor\": c['value'],\n \"ait\": c['ait'],\n \"guia\": c['guia'],\n \"tipo_debito\": \"CONVENIO\",\n })\n \n\n\n # break\n\n # caso nao tenha saldo pula a sequencia\n if not seq['name'] in ('divida_ativa'):\n # self.data.append([\n # self.lote,\n # self.placa,\n # self.renavam,\n # '-',\n # '-',\n # '-',\n # '-1',\n # ])\n\n self.data.append({\n \"lote\": self.lote,\n \"placa\": self.placa,\n \"renavam\": self.renavam,\n \"valor\": '0.00',\n \"ait\": '-',\n \"guia\": '-',\n \"tipo_debito\": '-1',\n })\n continue\n else:\n self.balance-=seq['value']\n\n # renomeando\n if seq['name'] == 'divida_ativa':\n ait = \"BOLETO\"\n guia = \"BOLETO\"\n\n elif seq['name'] == 'multas_detran':\n pay_type = \"MULTA DETRAN (PAGAR TUDO)\"\n\n elif seq['name'] == 'outras_multas':\n pay_type = \"OUTRAS MULTAS SP (PAGAR TUDO)\"\n \n elif seq['name'] == 'taxa_licenciamento':\n pay_type = \"TAXA DE LICENCIAMENTO\"\n\n # self.data.append([\n # self.lote,\n # self.placa,\n # self.renavam,\n # seq['value'],\n # ait,\n # guia,\n # pay_type,\n # ])\n\n self.data.append({\n \"lote\": self.lote,\n \"placa\": self.placa,\n \"renavam\": self.renavam,\n \"valor\": seq['value'],\n \"ait\": ait,\n \"guia\": guia,\n \"tipo_debito\": pay_type,\n })\n \n def __iter__(self) -> typing.List:\n \n return iter(self.data)\n\n@dataclasses.dataclass\nclass SFP(GrabOnPage):\n anti_captcha_key: str\n\n is_valid: bool = False\n\n renavam: typing.AnyStr = None # OK\n placa: typing.AnyStr = None # Ok\n ipva: typing.AnyStr = None # OK\n divida_ativa: typing.AnyStr = None # OK\n multas_renainf: typing.AnyStr = None # OK\n multas_detran: typing.AnyStr = None # Ok\n outras_multas_sp: typing.AnyStr = None # OK\n dpvat: typing.AnyStr = None # OK\n taxa_licenciamento: typing.AnyStr = None # OK\n\n\n def __post_init__(self):\n\n self.is_valid = self.has_elmt(By.ID, \"tituloPaginaPlaceHolder_txtDataConsulta\")\n\n if self.is_valid:\n self.placa = self.grab_text(By.ID, \"conteudoPaginaPlaceHolder_txtPlaca\")\n self.renavam = self.grab_text(By.ID, \"conteudoPaginaPlaceHolder_txtRenavam\")\n self.dpvat = self.grab_text(By.XPATH, '/html/body/form/table[3]/tbody/tr/td[2]/table/tbody/tr/td[2]/div/table[22]/tbody/tr/td/table/tbody/tr[2]/td[5]', default=0.00)\n\n # = Ipva ===\n ipva = \\\n Ipva(self.driver)\n\n self.ipva = ipva.ipva\n\n # = Multas ===\n multas = \\\n Multas(self.driver)\n \n self.multas_detran = multas.detran\n self.outras_multas_sp = multas.outras_multas\n self.multas_renainf = multas.renainf\n\n # = Licenciamento ===\n licenciamento = \\\n Licenciamento(self.driver)\n\n self.taxa_licenciamento = licenciamento.total_licenciamento\n\n # = Divida ativa ===\n divida_ativa = \\\n DividaAtiva(self.driver, self.renavam, anti_captcha_key=self.anti_captcha_key)\n \n self.divida_ativa = divida_ativa.total\n\n@dataclasses.dataclass\nclass DividaAtiva(GrabOnPage):\n renavam: typing.AnyStr\n anti_captcha_key: typing.AnyStr\n\n is_valid: bool = False\n total: float = dataclasses.field(default_factory=float)\n\n def __post_init__(self):\n\n ignore_divida_ativa = False\n key = self.anti_captcha_key\n\n if \"NADA CONSTA\" not in self.grab_text(By.ID, \"conteudoPaginaPlaceHolder_txtExisteDividaAtiva\"):\n\n if not ignore_divida_ativa:\n self.driver.get(\"https://www.dividaativa.pge.sp.gov.br/sc/pages/consultas/consultarDebito.jsf\")\n WebDriverWait(self.driver, 10).until(\n EC.presence_of_element_located((By.ID, \"consultaDebitoForm:consulta\"))\n )\n self.is_valid = self.has_elmt(By.ID, \"consultaDebitoForm:consulta\")\n \n else:\n self.total = \"COM\"\n\n if self.is_valid:\n\n \n \n # ------ Codigo feio\n \n sl = WebDriverWait(self.driver, 20).until(\n EC.presence_of_element_located((By.ID, \"consultaDebitoForm:decLblTipoConsulta:opcoesPesquisa\"))\n )\n \n sl = Select(sl)\n\n # sl = Select(self.driver.find_element(By.ID,\"consultaDebitoForm:decLblTipoConsulta:opcoesPesquisa\"))\n sl.select_by_value('RENAVAM')\n \n element = WebDriverWait(self.driver, 20).until(\n EC.presence_of_element_located((By.ID, \"consultaDebitoForm:decTxtTipoConsulta:cdaEtiqueta\"))\n )\n \n element.send_keys(self.renavam)\n \n \n for i in range(1):\n if ReCaptcha(self.driver, key, recaptcha_data_site_key_ID=\"recaptcha\").solve(recaptcha_res=\"g-recaptcha-response\"):\n break\n \n \n\n self.driver.find_element(By.NAME, \"consultaDebitoForm:j_id104\").click()\n \n # ------- fim de codigo feio\n \n \n sl = WebDriverWait(self.driver, 10).until(\n EC.presence_of_element_located((By.ID, \"consultaDebitoForm:decLblTipoConsulta:opcoesPesquisa\"))\n )\n \n \n # sl = Select(sl)\n # sl.select_by_value('RENAVAM')\n \n element = WebDriverWait(self.driver, 10).until(\n EC.presence_of_element_located((By.ID, \"consultaDebitoForm:decTxtTipoConsulta:renavam\"))\n )\n\n element.send_keys(self.renavam)\n \n for i in range(1):\n if ReCaptcha(self.driver, key, recaptcha_data_site_key_ID=\"recaptcha\").solve(recaptcha_res=\"g-recaptcha-response\"):\n break\n \n self.driver.find_element(By.NAME, \"consultaDebitoForm:j_id104\").click()\n\n if self.has_elmt(By.ID, \"consultaDebitoForm:dataTable:j_id164\"):\n _ = self.grab_text(By.ID, \"consultaDebitoForm:dataTable:j_id164\")\n\n self.total = custom_to_float(_)\n", "repo_name": "codexfast/sysma", "sub_path": "sysma/controllers/core/grabonpage.py", "file_name": "grabonpage.py", "file_ext": "py", "file_size_in_byte": 21910, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "12", "api": [{"api_name": "typing.AnyStr", "line_number": 18, "usage_type": "attribute"}, {"api_name": "abc.ABC", "line_number": 29, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.Chrome", "line_number": 30, "usage_type": "attribute"}, {"api_name": "selenium.webdriver", "line_number": 30, "usage_type": "name"}, {"api_name": "dataclasses.field", "line_number": 30, "usage_type": "call"}, {"api_name": "typing.AnyStr", "line_number": 32, "usage_type": "attribute"}, {"api_name": "selenium.common.exceptions.NoSuchElementException", "line_number": 38, "usage_type": "name"}, {"api_name": "selenium.webdriver.remote.webelement.WebElement", "line_number": 32, "usage_type": "name"}, {"api_name": "typing.AnyStr", "line_number": 41, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.remote.webelement.WebElement", "line_number": 44, "usage_type": "name"}, {"api_name": "selenium.webdriver.remote.webelement.WebElement", "line_number": 47, "usage_type": "name"}, {"api_name": "selenium.webdriver.common.by.By.TAG_NAME", "line_number": 48, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.common.by.By", "line_number": 48, "usage_type": "name"}, {"api_name": "typing.Dict", "line_number": 47, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.common.by.By.TAG_NAME", "line_number": 62, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.common.by.By", "line_number": 62, "usage_type": "name"}, {"api_name": "typing.Dict", "line_number": 44, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.remote.webelement.WebElement", "line_number": 70, "usage_type": "name"}, {"api_name": "selenium.webdriver.remote.webelement.WebElement", "line_number": 73, "usage_type": "name"}, {"api_name": "selenium.webdriver.common.by.By.TAG_NAME", "line_number": 74, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.common.by.By", "line_number": 74, "usage_type": "name"}, {"api_name": "typing.Dict", "line_number": 73, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.common.by.By.TAG_NAME", "line_number": 94, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.common.by.By", "line_number": 94, "usage_type": "name"}, {"api_name": "typing.Dict", "line_number": 70, "usage_type": "attribute"}, {"api_name": "dataclasses.dataclass", "line_number": 28, "usage_type": "attribute"}, {"api_name": "dataclasses.field", "line_number": 103, "usage_type": "call"}, {"api_name": "datetime.date.today", "line_number": 106, "usage_type": "call"}, {"api_name": "datetime.date", "line_number": 106, "usage_type": "attribute"}, {"api_name": "datetime.date.today", "line_number": 107, "usage_type": "call"}, {"api_name": "datetime.date", "line_number": 107, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.common.by.By.ID", "line_number": 109, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.common.by.By", "line_number": 109, "usage_type": "name"}, {"api_name": "dataclasses.dataclass", "line_number": 101, "usage_type": "attribute"}, {"api_name": "dataclasses.field", "line_number": 130, "usage_type": "call"}, {"api_name": "datetime.date.today", "line_number": 134, "usage_type": "call"}, {"api_name": "datetime.date", "line_number": 134, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.common.by.By.ID", "line_number": 136, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.common.by.By", "line_number": 136, "usage_type": "name"}, {"api_name": "dataclasses.dataclass", "line_number": 127, "usage_type": "attribute"}, {"api_name": "dataclasses.field", "line_number": 144, "usage_type": "call"}, {"api_name": "dataclasses.field", "line_number": 145, "usage_type": "call"}, {"api_name": "dataclasses.field", "line_number": 146, "usage_type": "call"}, {"api_name": "selenium.webdriver.common.by.By.ID", "line_number": 151, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.common.by.By", "line_number": 151, "usage_type": "name"}, {"api_name": "dataclasses.dataclass", "line_number": 141, "usage_type": "attribute"}, {"api_name": "dataclasses.field", "line_number": 166, "usage_type": "call"}, {"api_name": "dataclasses.field", "line_number": 167, "usage_type": "call"}, {"api_name": "dataclasses.field", "line_number": 168, "usage_type": "call"}, {"api_name": "typing.List", "line_number": 169, "usage_type": "attribute"}, {"api_name": "typing.Dict", "line_number": 169, "usage_type": "attribute"}, {"api_name": "dataclasses.field", "line_number": 169, "usage_type": "call"}, {"api_name": "selenium.webdriver.common.by.By.ID", "line_number": 174, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.common.by.By", "line_number": 174, "usage_type": "name"}, {"api_name": "selenium.webdriver.common.by.By.ID", "line_number": 185, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.common.by.By", "line_number": 185, "usage_type": "name"}, {"api_name": "selenium.webdriver.common.by.By.ID", "line_number": 188, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.common.by.By", "line_number": 188, "usage_type": "name"}, {"api_name": "selenium.webdriver.common.by.By.ID", "line_number": 203, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.common.by.By", "line_number": 203, "usage_type": "name"}, {"api_name": "selenium.webdriver.common.by.By.XPATH", "line_number": 204, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.common.by.By", "line_number": 204, "usage_type": "name"}, {"api_name": "selenium.webdriver.common.by.By.TAG_NAME", "line_number": 206, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.common.by.By", "line_number": 206, "usage_type": "name"}, {"api_name": "selenium.webdriver.common.by.By.TAG_NAME", "line_number": 216, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.common.by.By", "line_number": 216, "usage_type": "name"}, {"api_name": "selenium.webdriver.common.by.By.TAG_NAME", "line_number": 221, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.common.by.By", "line_number": 221, "usage_type": "name"}, {"api_name": "selenium.webdriver.common.by.By.TAG_NAME", "line_number": 222, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.common.by.By", "line_number": 222, "usage_type": "name"}, {"api_name": "selenium.webdriver.common.by.By.TAG_NAME", "line_number": 223, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.common.by.By", "line_number": 223, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 200, "usage_type": "attribute"}, {"api_name": "typing.Dict", "line_number": 200, "usage_type": "attribute"}, {"api_name": "dataclasses.dataclass", "line_number": 162, "usage_type": "attribute"}, {"api_name": "typing.AnyStr", "line_number": 236, "usage_type": "attribute"}, {"api_name": "typing.AnyStr", "line_number": 240, "usage_type": "attribute"}, {"api_name": "typing.AnyStr", "line_number": 241, "usage_type": "attribute"}, {"api_name": "typing.AnyStr", "line_number": 242, "usage_type": "attribute"}, {"api_name": "typing.AnyStr", "line_number": 243, "usage_type": "attribute"}, {"api_name": "typing.AnyStr", "line_number": 244, "usage_type": "attribute"}, {"api_name": "typing.AnyStr", "line_number": 245, "usage_type": "attribute"}, {"api_name": "typing.AnyStr", "line_number": 246, "usage_type": "attribute"}, {"api_name": "typing.AnyStr", "line_number": 247, "usage_type": "attribute"}, {"api_name": "typing.AnyStr", "line_number": 248, "usage_type": "attribute"}, {"api_name": "typing.List", "line_number": 250, "usage_type": "attribute"}, {"api_name": "dataclasses.field", "line_number": 250, "usage_type": "call"}, {"api_name": "selenium.webdriver.common.by.By.ID", "line_number": 256, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.common.by.By", "line_number": 256, "usage_type": "name"}, {"api_name": "selenium.webdriver.common.by.By.ID", "line_number": 259, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.common.by.By", "line_number": 259, "usage_type": "name"}, {"api_name": "selenium.webdriver.common.by.By.ID", "line_number": 260, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.common.by.By", "line_number": 260, "usage_type": "name"}, {"api_name": "selenium.webdriver.common.by.By.XPATH", "line_number": 261, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.common.by.By", "line_number": 261, "usage_type": "name"}, {"api_name": "controllers.functionalities.tools.selecionar_contas", "line_number": 333, "usage_type": "call"}, {"api_name": "controllers.functionalities.tools.selecionar_contas", "line_number": 366, "usage_type": "call"}, {"api_name": "controllers.functionalities.tools.selecionar_contas", "line_number": 394, "usage_type": "call"}, {"api_name": "controllers.functionalities.tools.selecionar_contas", "line_number": 423, "usage_type": "call"}, {"api_name": "typing.NoReturn", "line_number": 291, "usage_type": "attribute"}, {"api_name": "typing.List", "line_number": 513, "usage_type": "attribute"}, {"api_name": "dataclasses.dataclass", "line_number": 231, "usage_type": "attribute"}, {"api_name": "typing.AnyStr", "line_number": 523, "usage_type": "attribute"}, {"api_name": "typing.AnyStr", "line_number": 524, "usage_type": "attribute"}, {"api_name": "typing.AnyStr", "line_number": 525, "usage_type": "attribute"}, {"api_name": "typing.AnyStr", "line_number": 526, "usage_type": "attribute"}, {"api_name": "typing.AnyStr", "line_number": 527, "usage_type": "attribute"}, {"api_name": "typing.AnyStr", "line_number": 528, "usage_type": "attribute"}, {"api_name": "typing.AnyStr", "line_number": 529, "usage_type": "attribute"}, {"api_name": "typing.AnyStr", "line_number": 530, "usage_type": "attribute"}, {"api_name": "typing.AnyStr", "line_number": 531, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.common.by.By.ID", "line_number": 536, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.common.by.By", "line_number": 536, "usage_type": "name"}, {"api_name": "selenium.webdriver.common.by.By.ID", "line_number": 539, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.common.by.By", "line_number": 539, "usage_type": "name"}, {"api_name": "selenium.webdriver.common.by.By.ID", "line_number": 540, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.common.by.By", "line_number": 540, "usage_type": "name"}, {"api_name": "selenium.webdriver.common.by.By.XPATH", "line_number": 541, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.common.by.By", "line_number": 541, "usage_type": "name"}, {"api_name": "dataclasses.dataclass", "line_number": 517, "usage_type": "attribute"}, {"api_name": "typing.AnyStr", "line_number": 571, "usage_type": "attribute"}, {"api_name": "typing.AnyStr", "line_number": 572, "usage_type": "attribute"}, {"api_name": "dataclasses.field", "line_number": 575, "usage_type": "call"}, {"api_name": "selenium.webdriver.common.by.By.ID", "line_number": 582, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.common.by.By", "line_number": 582, "usage_type": "name"}, {"api_name": "selenium.webdriver.support.wait.WebDriverWait", "line_number": 586, "usage_type": "call"}, {"api_name": "selenium.webdriver.support.expected_conditions.presence_of_element_located", "line_number": 587, "usage_type": "call"}, {"api_name": "selenium.webdriver.support.expected_conditions", "line_number": 587, "usage_type": "name"}, {"api_name": "selenium.webdriver.common.by.By.ID", "line_number": 587, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.common.by.By", "line_number": 587, "usage_type": "name"}, {"api_name": "selenium.webdriver.common.by.By.ID", "line_number": 589, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.common.by.By", "line_number": 589, "usage_type": "name"}, {"api_name": "selenium.webdriver.support.wait.WebDriverWait", "line_number": 600, "usage_type": "call"}, {"api_name": "selenium.webdriver.support.expected_conditions.presence_of_element_located", "line_number": 601, "usage_type": "call"}, {"api_name": "selenium.webdriver.support.expected_conditions", "line_number": 601, "usage_type": "name"}, {"api_name": "selenium.webdriver.common.by.By.ID", "line_number": 601, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.common.by.By", "line_number": 601, "usage_type": "name"}, {"api_name": "selenium.webdriver.support.ui.Select", "line_number": 604, "usage_type": "call"}, {"api_name": "selenium.webdriver.support.wait.WebDriverWait", "line_number": 609, "usage_type": "call"}, {"api_name": "selenium.webdriver.support.expected_conditions.presence_of_element_located", "line_number": 610, "usage_type": "call"}, {"api_name": "selenium.webdriver.support.expected_conditions", "line_number": 610, "usage_type": "name"}, {"api_name": "selenium.webdriver.common.by.By.ID", "line_number": 610, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.common.by.By", "line_number": 610, "usage_type": "name"}, {"api_name": "controllers.core.recaptcha.ReCaptcha", "line_number": 617, "usage_type": "call"}, {"api_name": "selenium.webdriver.common.by.By.NAME", "line_number": 622, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.common.by.By", "line_number": 622, "usage_type": "name"}, {"api_name": "selenium.webdriver.support.wait.WebDriverWait", "line_number": 627, "usage_type": "call"}, {"api_name": "selenium.webdriver.support.expected_conditions.presence_of_element_located", "line_number": 628, "usage_type": "call"}, {"api_name": "selenium.webdriver.support.expected_conditions", "line_number": 628, "usage_type": "name"}, {"api_name": "selenium.webdriver.common.by.By.ID", "line_number": 628, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.common.by.By", "line_number": 628, "usage_type": "name"}, {"api_name": "selenium.webdriver.support.wait.WebDriverWait", "line_number": 635, "usage_type": "call"}, {"api_name": "selenium.webdriver.support.expected_conditions.presence_of_element_located", "line_number": 636, "usage_type": "call"}, {"api_name": "selenium.webdriver.support.expected_conditions", "line_number": 636, "usage_type": "name"}, {"api_name": "selenium.webdriver.common.by.By.ID", "line_number": 636, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.common.by.By", "line_number": 636, "usage_type": "name"}, {"api_name": "controllers.core.recaptcha.ReCaptcha", "line_number": 642, "usage_type": "call"}, {"api_name": "selenium.webdriver.common.by.By.NAME", "line_number": 645, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.common.by.By", "line_number": 645, "usage_type": "name"}, {"api_name": "selenium.webdriver.common.by.By.ID", "line_number": 647, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.common.by.By", "line_number": 647, "usage_type": "name"}, {"api_name": "selenium.webdriver.common.by.By.ID", "line_number": 648, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.common.by.By", "line_number": 648, "usage_type": "name"}, {"api_name": "dataclasses.dataclass", "line_number": 569, "usage_type": "attribute"}]} +{"seq_id": "16533934476", "text": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\nimport pandas as pd\nfrom nltk import sent_tokenize\nfrom transformers import RobertaTokenizer, RobertaModel\nfrom sentence_transformers import SentenceTransformer\nimport torch\nfrom sklearn.metrics.pairwise import cosine_similarity\nfrom tqdm import trange\nimport argparse\n\nparser = argparse.ArgumentParser(description='call tnlg')\nparser.add_argument(\"--src\", default=\"surprise\",\n choices=[\"surprise\", \"story_cloze\"])\nargs = parser.parse_args()\n\nsrc = args.src\n\nmode = \"sbert\" # \"roberta\"\n\nif src == \"story_cloze\":\n filename = \"cloze_test_val_winter2018_features_combined_tnlg_sentence.csv\"\n output_filename = \"story_cloze_sentence_cosine_similarity_updated_sbert.csv\"\nelse:\n filename = \"hippocorpus_paragraph_type_and_surprise_annotation_by_sentence_tnlg_sentence.csv\"\n output_filename = \"sentence_cosine_similarity_updated_sbert.csv\"\n\ndf = pd.read_csv(filename)\n\ntnlg = list(df['tnlg_generated_following_sentence'])\n\nsentences = list(df['sentence'])\nprior_sentences = list(df['prior_sentences_in_parapgraph'])\n\nuseful_indices = [i-1 for i in range(len(prior_sentences)) if isinstance(prior_sentences[i], str)]\n\ndef filter_indices(original, indices):\n return [original[i] for i in indices]\ndef get_first_sentence(text):\n if len(sent_tokenize(text)) > 0:\n return sent_tokenize(text)[0]\n return text\n\nprior_sentences = filter_indices(sentences, useful_indices)\nground_sentences = filter_indices(sentences, [i+1 for i in useful_indices])\n\ntnlg = filter_indices(tnlg, useful_indices)\ntnlg = [get_first_sentence(text) for text in tnlg]\n\nif mode == \"roberta\":\n tokenizer = RobertaTokenizer.from_pretrained('roberta-base')\n model = RobertaModel.from_pretrained('roberta-base')\n model.eval()\n\n def get_sentence_embedding(sentence):\n input_ids = torch.tensor(tokenizer.encode(sentence, add_special_tokens=True)).unsqueeze(0) # Batch size 1\n with torch.no_grad():\n outputs = model(input_ids)\n last_hidden_states = outputs[0].squeeze(0) #(batch_size, input_len, embedding_size) But I need single vector for each sentence\n sentence_vector = torch.mean(last_hidden_states, axis=0)\n return sentence_vector.numpy()\n\nelif mode == \"sbert\":\n model = SentenceTransformer('sentence-transformers/paraphrase-mpnet-base-v2')\n model.eval()\n \n def get_sentence_embedding(sentence):\n embeddings = model.encode(sentence)\n return embeddings\n\ndef get_cosine_similarity(sentence0, sentence1):\n a = get_sentence_embedding(sentence0)\n b = get_sentence_embedding(sentence1)\n c = cosine_similarity(a.reshape(1, -1), Y=b.reshape(1, -1))\n return c.item()\n\n\ncosine_similarities = [get_cosine_similarity(ground_sentences[i], tnlg[i]) for i in trange(len(tnlg))]\n\nnew_df = pd.DataFrame.from_dict({\n 'prior_sentence':prior_sentences,\n 'ground_sentence':ground_sentences,\n 'tnlg_generated_next_sentence':tnlg,\n 'cosine_similarities_sentence':cosine_similarities\n })\n\nnew_df.to_csv(output_filename)\n", "repo_name": "Zhilin123/story_events", "sub_path": "feature_extraction/get_cosine_similarity_tnlg_and_ground_sentence.py", "file_name": "get_cosine_similarity_tnlg_and_ground_sentence.py", "file_ext": "py", "file_size_in_byte": 3127, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "14", "api": [{"api_name": "argparse.ArgumentParser", "line_number": 12, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 28, "usage_type": "call"}, {"api_name": "nltk.sent_tokenize", "line_number": 40, "usage_type": "call"}, {"api_name": "nltk.sent_tokenize", "line_number": 41, "usage_type": "call"}, {"api_name": "transformers.RobertaTokenizer.from_pretrained", "line_number": 51, "usage_type": "call"}, {"api_name": "transformers.RobertaTokenizer", "line_number": 51, "usage_type": "name"}, {"api_name": "transformers.RobertaModel.from_pretrained", "line_number": 52, "usage_type": "call"}, {"api_name": "transformers.RobertaModel", "line_number": 52, "usage_type": "name"}, {"api_name": "torch.tensor", "line_number": 56, "usage_type": "call"}, {"api_name": "torch.no_grad", "line_number": 57, "usage_type": "call"}, {"api_name": "torch.mean", "line_number": 60, "usage_type": "call"}, {"api_name": "sentence_transformers.SentenceTransformer", "line_number": 64, "usage_type": "call"}, {"api_name": "sklearn.metrics.pairwise.cosine_similarity", "line_number": 74, "usage_type": "call"}, {"api_name": "tqdm.trange", "line_number": 78, "usage_type": "call"}, {"api_name": "pandas.DataFrame.from_dict", "line_number": 80, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 80, "usage_type": "attribute"}]} +{"seq_id": "9516391601", "text": "\"\"\"\n Description: Create DataLoader for train, val, test\n\"\"\"\nimport math\nimport os\nfrom datetime import datetime\nfrom operator import itemgetter\n\nimport numpy as np\nimport torch\nimport torchvision.transforms as transforms\nfrom PIL import Image\nfrom PIL import ImageFile\nfrom torch.utils.data import Dataset\n\nImageFile.LOAD_TRUNCATED_IMAGES = True\n\n\ndef resize_pad_images(img_h, img_w, images, keep_ratio_with_pad, vertical_lettering):\n # print([image.size for image in images])\n img_h_max = max(([image.size for image in images]), key=itemgetter(1))[1]\n img_w_max = max(([image.size for image in images]), key=itemgetter(0))[0]\n\n img_w_max = max(img_w_max, img_w)\n img_h_max = max(img_h_max, img_h)\n\n if keep_ratio_with_pad:\n input_channel = 3 if images[0].mode == 'RGB' else 1\n\n if vertical_lettering:\n # print(\"vertical_lettering\", vertical_lettering)\n transform = NormalizePAD((input_channel, img_h_max, img_w), vertical_lettering)\n\n resized_images = []\n for image in images:\n w, h = image.size\n ratio = h / float(w)\n if math.ceil(img_w * ratio) > img_h_max:\n resized_h = img_h_max\n else:\n resized_h = math.ceil(img_w * ratio)\n\n resized_image = image.resize((img_w, resized_h), Image.BICUBIC)\n resized_images.append(transform(resized_image))\n else:\n # same concept with 'Rosetta' paper\n\n resized_max_w = img_w_max\n transform = NormalizePAD((input_channel, img_h, resized_max_w), vertical_lettering)\n\n resized_images = []\n for image in images:\n w, h = image.size\n ratio = w / float(h)\n if math.ceil(img_h * ratio) > img_w_max:\n resized_w = img_w_max\n else:\n resized_w = math.ceil(img_h * ratio)\n\n resized_image = image.resize((resized_w, img_h), Image.BICUBIC)\n resized_images.append(transform(resized_image))\n\n image_tensors = torch.cat([t.unsqueeze(0) for t in resized_images], 0)\n\n else:\n transform = ResizeNormalize((img_w_max, img_h))\n image_tensors = [transform(image) for image in images]\n image_tensors = torch.cat([t.unsqueeze(0) for t in image_tensors], 0)\n\n return image_tensors\n\n\ndef log_error(exp_name, e, image_name=\"\"):\n print(e)\n if not os.path.isfile(f'./saved_models/{exp_name}/log_errors.txt'):\n log = open(f'./saved_models/{exp_name}/log_errors.txt', \"w\")\n else:\n log = open(f'./saved_models/{exp_name}/log_errors.txt', \"a\")\n log.write(f\"{datetime.now()}:{e}\\t{image_name}\\n\")\n log.close()\n\n\nclass AlignCollate(object):\n def __init__(self, img_h=64, img_w=1000, keep_ratio_with_pad=False, vertical_lettering=False):\n self.imgH = img_h\n self.imgW = img_w\n self.keep_ratio_with_pad = keep_ratio_with_pad\n self.vertical_lettering = vertical_lettering\n\n def __call__(self, batch):\n batch = filter(lambda x: x is not None, batch)\n images, labels = zip(*batch)\n\n image_tensors = resize_pad_images(self.imgH, self.imgW, images, self.keep_ratio_with_pad,\n self.vertical_lettering)\n\n return image_tensors, labels\n\n\nclass ListDataset(Dataset):\n def __init__(self, list_img, opt):\n self.opt = opt\n self.list_img = list_img\n self.nSamples = len(self.list_img)\n self.list_hard_img = []\n\n def __len__(self):\n return self.nSamples\n\n def __getitem__(self, index):\n if self.opt.rgb:\n img = Image.fromarray(np.uint8(self.list_img[index])).convert('RGB')\n else:\n print(index)\n img = Image.fromarray(np.uint8(self.list_img[index])).convert('L')\n return img, f\"{index}\"\n\n\nclass RawDataset(Dataset):\n def __init__(self):\n pass\n\n def __len__(self):\n return 1\n\n def get_gt(self, image_name):\n return \"\"\n\n def __getitem__(self, index):\n dir_name = os.path.dirname(os.path.realpath(__file__))\n try:\n if self.opt.rgb:\n # for color image\n img = Image.open(f\"{dir_name}/{self.image_folder}/{self.image_path_list[index]}\").convert('RGB')\n else:\n img = Image.open(f\"{dir_name}/{self.image_folder}/{self.image_path_list[index]}\").convert('L')\n\n except IOError:\n print(f'Corrupted image for {index}')\n # make dummy image and dummy label for corrupted image.\n if self.opt.rgb:\n img = Image.new('RGB', (self.opt.imgW, self.opt.imgH))\n else:\n img = Image.new('L', (self.opt.imgW, self.opt.imgH))\n\n return img, self.image_path_list[index]\n\n\nclass ResizeNormalize(object):\n def __init__(self, size, interpolation=Image.BICUBIC):\n self.size = size\n self.interpolation = interpolation\n self.toTensor = transforms.ToTensor()\n\n def __call__(self, img):\n img = img.resize(self.size, self.interpolation)\n img = self.toTensor(img)\n img.sub_(0.5).div_(0.5)\n return img\n\n\nclass NormalizePAD(object):\n def __init__(self, max_size, vertical_lettering, pad_type='right'):\n self.toTensor = transforms.ToTensor()\n self.max_size = max_size\n self.max_width_half = math.floor(max_size[2] / 2)\n self.PAD_type = pad_type\n self.vertical_lettering = vertical_lettering\n\n def __call__(self, img):\n img = self.toTensor(img)\n img.sub_(0.5).div_(0.5)\n c, h, w = img.size()\n pad_img = torch.FloatTensor(*self.max_size).fill_(0)\n if self.vertical_lettering:\n pad_img[:, :h, :] = img # under pad\n if self.max_size[1] != h: # add border Pad\n pad_img[:, h:, :] = img[:, h - 1, :].unsqueeze(1).expand(c, self.max_size[1] - h, w)\n else:\n pad_img[:, :, :w] = img # right pad\n if self.max_size[2] != w: # add border Pad\n pad_img[:, :, w:] = img[:, :, w - 1].unsqueeze(2).expand(c, h, self.max_size[2] - w)\n\n return pad_img\n\n\ndef tensor2im(image_tensor, img_type=np.uint8):\n image_numpy = image_tensor.cpu().float().numpy()\n if image_numpy.shape[0] == 1:\n image_numpy = np.tile(image_numpy, (3, 1, 1))\n image_numpy = (np.transpose(image_numpy, (1, 2, 0)) + 1) / 2.0 * 255.0\n return image_numpy.astype(img_type)\n\n\ndef save_image(image_numpy, image_path):\n image_pil = Image.fromarray(image_numpy)\n image_pil.save(image_path)\n", "repo_name": "tuanvxatgem/ocr-label", "sub_path": "crnn/dataset.py", "file_name": "dataset.py", "file_ext": "py", "file_size_in_byte": 6684, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "14", "api": [{"api_name": "PIL.ImageFile.LOAD_TRUNCATED_IMAGES", "line_number": 16, "usage_type": "attribute"}, {"api_name": "PIL.ImageFile", "line_number": 16, "usage_type": "name"}, {"api_name": "operator.itemgetter", "line_number": 21, "usage_type": "call"}, {"api_name": "operator.itemgetter", "line_number": 22, "usage_type": "call"}, {"api_name": "math.ceil", "line_number": 38, "usage_type": "call"}, {"api_name": "math.ceil", "line_number": 41, "usage_type": "call"}, {"api_name": "PIL.Image.BICUBIC", "line_number": 43, "usage_type": "attribute"}, {"api_name": "PIL.Image", "line_number": 43, "usage_type": "name"}, {"api_name": "math.ceil", "line_number": 55, "usage_type": "call"}, {"api_name": "math.ceil", "line_number": 58, "usage_type": "call"}, {"api_name": "PIL.Image.BICUBIC", "line_number": 60, "usage_type": "attribute"}, {"api_name": "PIL.Image", "line_number": 60, "usage_type": "name"}, {"api_name": "torch.cat", "line_number": 63, "usage_type": "call"}, {"api_name": "torch.cat", "line_number": 68, "usage_type": "call"}, {"api_name": "os.path.isfile", "line_number": 75, "usage_type": "call"}, {"api_name": "os.path", "line_number": 75, "usage_type": "attribute"}, {"api_name": "datetime.datetime.now", "line_number": 79, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 79, "usage_type": "name"}, {"api_name": "torch.utils.data.Dataset", "line_number": 100, "usage_type": "name"}, {"api_name": "PIL.Image.fromarray", "line_number": 112, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 112, "usage_type": "name"}, {"api_name": "numpy.uint8", "line_number": 112, "usage_type": "call"}, {"api_name": "PIL.Image.fromarray", "line_number": 115, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 115, "usage_type": "name"}, {"api_name": "numpy.uint8", "line_number": 115, "usage_type": "call"}, {"api_name": "torch.utils.data.Dataset", "line_number": 119, "usage_type": "name"}, {"api_name": "os.path.dirname", "line_number": 130, "usage_type": "call"}, {"api_name": "os.path", "line_number": 130, "usage_type": "attribute"}, {"api_name": "os.path.realpath", "line_number": 130, "usage_type": "call"}, {"api_name": "PIL.Image.open", "line_number": 134, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 134, "usage_type": "name"}, {"api_name": "PIL.Image.open", "line_number": 136, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 136, "usage_type": "name"}, {"api_name": "PIL.Image.new", "line_number": 142, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 142, "usage_type": "name"}, {"api_name": "PIL.Image.new", "line_number": 144, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 144, "usage_type": "name"}, {"api_name": "PIL.Image.BICUBIC", "line_number": 150, "usage_type": "attribute"}, {"api_name": "PIL.Image", "line_number": 150, "usage_type": "name"}, {"api_name": "torchvision.transforms.ToTensor", "line_number": 153, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 153, "usage_type": "name"}, {"api_name": "torchvision.transforms.ToTensor", "line_number": 164, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 164, "usage_type": "name"}, {"api_name": "math.floor", "line_number": 166, "usage_type": "call"}, {"api_name": "torch.FloatTensor", "line_number": 174, "usage_type": "call"}, {"api_name": "numpy.uint8", "line_number": 187, "usage_type": "attribute"}, {"api_name": "numpy.tile", "line_number": 190, "usage_type": "call"}, {"api_name": "numpy.transpose", "line_number": 191, "usage_type": "call"}, {"api_name": "PIL.Image.fromarray", "line_number": 196, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 196, "usage_type": "name"}]} +{"seq_id": "42068517382", "text": "from torchvision import transforms\nfrom torchvision.datasets import ImageFolder\nfrom torchvision.transforms.functional import Image\n\nTRAIN_TRANSFORMS = transforms.Compose([\n transforms.RandomResizedCrop(224),\n transforms.RandomHorizontalFlip(),\n transforms.ToTensor(),\n transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])\n])\n\nTEST_TRANSFORMS = transforms.Compose([\n transforms.Resize(256),\n transforms.CenterCrop(224),\n transforms.ToTensor(),\n transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])\n])\n\n\ndef load_image(file):\n image = Image.open(file)\n return TEST_TRANSFORMS(image)\n\n\ndef load_dataset(data_folder, split='train'):\n if split == 'train':\n transform = TRAIN_TRANSFORMS\n else:\n transform = TEST_TRANSFORMS\n\n return ImageFolder(data_folder, transform=transform)\n", "repo_name": "cumason123/bhacks2019", "sub_path": "classifier/data.py", "file_name": "data.py", "file_ext": "py", "file_size_in_byte": 854, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 2, "dataset": "github-code", "pt": "12", "api": [{"api_name": "torchvision.transforms.Compose", "line_number": 5, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 5, "usage_type": "name"}, {"api_name": "torchvision.transforms.RandomResizedCrop", "line_number": 6, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 6, "usage_type": "name"}, {"api_name": "torchvision.transforms.RandomHorizontalFlip", "line_number": 7, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 7, "usage_type": "name"}, {"api_name": "torchvision.transforms.ToTensor", "line_number": 8, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 8, "usage_type": "name"}, {"api_name": "torchvision.transforms.Normalize", "line_number": 9, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 9, "usage_type": "name"}, {"api_name": "torchvision.transforms.Compose", "line_number": 12, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 12, "usage_type": "name"}, {"api_name": "torchvision.transforms.Resize", "line_number": 13, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 13, "usage_type": "name"}, {"api_name": "torchvision.transforms.CenterCrop", "line_number": 14, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 14, "usage_type": "name"}, {"api_name": "torchvision.transforms.ToTensor", "line_number": 15, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 15, "usage_type": "name"}, {"api_name": "torchvision.transforms.Normalize", "line_number": 16, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 16, "usage_type": "name"}, {"api_name": "torchvision.transforms.functional.Image.open", "line_number": 21, "usage_type": "call"}, {"api_name": "torchvision.transforms.functional.Image", "line_number": 21, "usage_type": "name"}, {"api_name": "torchvision.datasets.ImageFolder", "line_number": 31, "usage_type": "call"}]} +{"seq_id": "31502794570", "text": "\n#Check Contract Details\n\nimport json\nimport requests\nimport pandas as pd\nfrom web3 import Web3\n\nTokenAddress = '0xCBd8aECe0c920eEF3F215ad4e7319052Bd8eaa74'\nBurnWallet = '0x000000000000000000000000000000000000dead'\nAirdropWallet = '0xc148b9e8da1fd3d87b5f870c61b8cbfc5f57e7fa'\nLPWallet = '0x3c3af41a28beacd86c2e46c5a54c71fb43ef0d12'\nRWallet = '0xd8f262fd1c4d0e48a8b11fceb2bdd7d2c23b763b'\nTWallet = '0xcbd8aece0c920eef3f215ad4e7319052bd8eaa74'\n\n#- Get ABI from BSCscan\nbsc = 'https://bsc-dataseed.binance.org/'\nweb3 = Web3(Web3.HTTPProvider(bsc))\n\nurl_eth = 'https://api.bscscan.com/api'\ncontract_address = web3.toChecksumAddress(TokenAddress)\nprint(\"contract_address\",contract_address)\nAPI_ENDPOINT = url_eth+'?module=contract&action=getabi&address='+str(contract_address)\n\nr = requests.get(url = API_ENDPOINT)\nresponse = r.json()\n#response_df = pd.DataFrame([response])\n#response_df.to_csv(\"response.csv\")\n#print (response)\n#response_df.head()\nabi=json.loads(response['result'])\n\n#- Call contract\ncontract = web3.eth.contract(address=contract_address, abi=abi)\ntotalSupply = contract.functions.totalSupply().call()\nprint(\"Total Supply:\",\"{:,}\".format(totalSupply))\nprint(\"Contract Name:\",contract.functions.name().call())\nprint(\"Contract Symbol:\",contract.functions.symbol().call())\n#Burnwallet Count\nBurnWalletaddress = web3.toChecksumAddress(BurnWallet)\nburnbalance=contract.functions.balanceOf(BurnWalletaddress).call()\nprint('Burn Wallet in ether:',web3.fromWei(burnbalance, 'ether'))\nprint('BurnWallet Balance:',\"{:,}\".format(burnbalance))\n#Airdrop wallet balance\nAirdropWalletaddress = web3.toChecksumAddress(AirdropWallet)\nAirdropWalletbalance=contract.functions.balanceOf(AirdropWalletaddress).call()\nprint('Airdrop wallet Balance:',\"{:,}\".format(AirdropWalletbalance))\n#LP wallet balance\nLPWalletaddress = web3.toChecksumAddress(LPWallet)\nLPWalletbalance=contract.functions.balanceOf(LPWalletaddress).call()\nprint('LP wallet Balance:',\"{:,}\".format(LPWalletbalance))\n#Rewards wallet balance\nRWalletaddress = web3.toChecksumAddress(RWallet)\nRWalletbalance=contract.functions.balanceOf(RWalletaddress).call()\nprint('Rewards wallet Balance:',\"{:,}\".format(RWalletbalance))\n#Token wallet balance\nTWalletaddress = web3.toChecksumAddress(TWallet)\nTWalletbalance=contract.functions.balanceOf(TWalletaddress).call()\nprint('Token wallet Balance:',\"{:,}\".format(TWalletbalance))\n\nCirculatingSupply = totalSupply - burnbalance - AirdropWalletbalance #- LPWalletbalance - RWalletbalance - TWalletbalance\nprint('Circulating Supply:',\"{:,}\".format(CirculatingSupply))\n\nrewardToken = contract.functions.rewardToken().call()\n\n#Contract Info\nprint(\"***********Contract Info*****\")\nprint (\"rewardToken\",rewardToken)\nprint (\"autoBuybackAccumulator\",contract.functions.autoBuybackAccumulator().call())\nprint (\"liquidityFee\",contract.functions.liquidityFee().call())\nprint (\"buybackFee\",contract.functions.buybackFee().call())\nprint (\"reflectionFee\",contract.functions.reflectionFee().call())\nprint (\"marketingFee\",contract.functions.marketingFee().call())\nprint (\"feeDenominator\",contract.functions.feeDenominator().call())\n\n#result = web3.eth.get_transaction('0xc5e6539ae242209fee009069d7563ce92727c5ba5f096e758434cc0a03b336fa')\nresult = web3.eth.get_transaction_count('0xB7ccCC09863Cc97801955B2f1760eeCB5D4c34fD')\nprint(\"Transaction_count for wallet\",result)\n#walletlist = web3.eth.accounts(contract_address)\n#print(walletlist)\n\n", "repo_name": "JayakrishnanGnair/BSCConnection", "sub_path": "ContractDetails.py", "file_name": "ContractDetails.py", "file_ext": "py", "file_size_in_byte": 3420, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "12", "api": [{"api_name": "web3.Web3", "line_number": 18, "usage_type": "call"}, {"api_name": "web3.Web3.HTTPProvider", "line_number": 18, "usage_type": "call"}, {"api_name": "web3.toChecksumAddress", "line_number": 21, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 25, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 31, "usage_type": "call"}, {"api_name": "web3.eth.contract", "line_number": 34, "usage_type": "call"}, {"api_name": "web3.eth", "line_number": 34, "usage_type": "attribute"}, {"api_name": "web3.toChecksumAddress", "line_number": 40, "usage_type": "call"}, {"api_name": "web3.fromWei", "line_number": 42, "usage_type": "call"}, {"api_name": "web3.toChecksumAddress", "line_number": 45, "usage_type": "call"}, {"api_name": "web3.toChecksumAddress", "line_number": 49, "usage_type": "call"}, {"api_name": "web3.toChecksumAddress", "line_number": 53, "usage_type": "call"}, {"api_name": "web3.toChecksumAddress", "line_number": 57, "usage_type": "call"}, {"api_name": "web3.eth.get_transaction_count", "line_number": 77, "usage_type": "call"}, {"api_name": "web3.eth", "line_number": 77, "usage_type": "attribute"}]} +{"seq_id": "29591038462", "text": "import numpy as np\nimport pyqtgraph as pg\nfrom PySide6.QtWidgets import (\n QCheckBox,\n QFormLayout,\n QHBoxLayout,\n QLineEdit,\n QProgressBar,\n QPushButton,\n QSpinBox,\n QVBoxLayout,\n QWidget,\n)\n\nfrom .gui_widgets import ListView\n\n\nclass LFPWidget(QWidget):\n def __init__(self):\n super().__init__()\n self.initUi()\n\n def initUi(self):\n self.main_layout = QHBoxLayout()\n self.setLayout(self.main_layout)\n self.load_layout = QVBoxLayout()\n self.main_layout.addLayout(self.load_layout)\n self.load_widget = ListView()\n self.load_widget.setMaximumWidth(300)\n self.load_widget.clicked.connect(self.set_acq_spinbox)\n self.load_layout.addWidget(self.load_widget)\n self.exp_manager = {}\n self.load_widget.setData(self.exp_manager)\n\n self.del_sel_button = QPushButton(\"Delete selection\")\n self.del_sel_button.setMaximumWidth(300)\n self.load_layout.addWidget(self.del_sel_button)\n self.del_sel_button.clicked.connect(self.delSelection)\n\n self.pbar = QProgressBar(self)\n self.pbar.setMaximumWidth(300)\n self.load_layout.addWidget(self.pbar)\n\n self.plot_check_list = QFormLayout()\n self.main_layout.addLayout(self.plot_check_list)\n\n self.plot_spinbox = QSpinBox()\n self.plot_spinbox.valueChanged.connect(self.plot_acq)\n self.plot_check_list.addRow(\"Acquisition\", self.plot_spinbox)\n\n self.plot_bursts = QCheckBox()\n self.plot_check_list.addRow(\"Plot bursts\", self.plot_bursts)\n\n self.channel_map = QCheckBox()\n self.plot_check_list.addRow(\"Map channels\", self.channel_map)\n\n self.cmr = QCheckBox()\n self.plot_check_list.addRow(\"CMR\", self.cmr)\n\n self.cmr_probe = QLineEdit()\n self.plot_check_list.addRow(\"CMR probe\", self.cmr_probe)\n\n self.plot_layout = QVBoxLayout()\n self.main_layout.addLayout(self.plot_layout)\n self.main_plot = pg.PlotWidget(useOpenGl=True)\n self.plot_layout.addWidget(self.main_plot)\n self.ste_plot = pg.PlotWidget(useOpenGl=True)\n self.plot_layout.addWidget(self.ste_plot)\n self.access_plot = pg.PlotWidget(useOpenGl=True)\n self.access_plot.setMaximumHeight(200)\n self.access_plot.plotItem.setMouseEnabled(x=False)\n self.access_plot.plotItem.setMouseEnabled(y=False)\n self.plot_layout.addWidget(self.access_plot)\n\n self.region = pg.LinearRegionItem()\n self.region.sigRegionChanged.connect(self.update)\n self.main_plot.sigRangeChanged.connect(self.updateRegion)\n self.ste_plot.sigRangeChanged.connect(self.updateRegion)\n\n # Set the initial bounds of the region and its layer\n # position.\n self.region.setRegion([0, 30])\n self.region.setZValue(10)\n\n def update(self):\n \"\"\"\n This functions is used for the draggable region.\n See PyQtGraphs documentation for more information.\n \"\"\"\n self.region.setZValue(10)\n minX, maxX = self.region.getRegion()\n self.main_plot.setXRange(minX, maxX, padding=0)\n self.ste_plot.setXRange(minX, maxX, padding=0)\n\n def updateRegion(self, window, viewRange):\n \"\"\"\n This functions is used for the draggable region.\n See PyQtGraphs documentation for more information\n \"\"\"\n rgn = viewRange[0]\n self.region.setRegion(rgn)\n\n def updateProgress(self, value):\n if isinstance(value, (int, float)):\n self.pbar.setValue(value)\n elif isinstance(value, str):\n self.pbar.setFormat(value)\n\n def delSelection(self):\n self.load_widget.clearData()\n self.exp_manager = {}\n self.load_widget.setData(self.exp_manager)\n self.main_plot.clear()\n self.access_plot.clear()\n self.ste_plot.clear()\n\n def set_acq_spinbox(self):\n id = self.load_widget.getAcqID()\n channels = self.exp_manager[id].n_chans\n self.plot_spinbox.setRange(1, channels)\n\n def plot_acq(self, num):\n self.main_plot.clear()\n self.access_plot.clear()\n self.ste_plot.clear()\n id = self.load_widget.getAcqID()\n acq = self.exp_manager[id].acq(\n num - 1,\n \"lfp\",\n map_channel=self.channel_map.isChecked(),\n cmr_probe=self.cmr_probe.text(),\n cmr=self.cmr.isChecked(),\n )\n x = np.arange(acq.size) / 1000\n self.main_plot.plot(x=x, y=acq, name=\"main\")\n self.access_plot.plot(x=x, y=acq, name=\"access\")\n self.access_plot.addItem(self.region, ignoreBounds=True)\n fs = self.exp_manager[id].get_grp_attr(\"lfp\", \"sample_rate\")\n wlen = self.exp_manager[id].get_grp_attr(\"lfp_bursts\", \"wlen\")\n window = self.exp_manager[id].get_grp_attr(\"lfp_bursts\", \"window\")\n ste = self.exp_manager[id].get_short_time_energy(\n acq,\n wlen=wlen,\n window=window,\n fs=fs,\n )\n baseline = self.exp_manager[id].get_ste_baseline(ste)\n self.ste_plot.plot(x=x, y=ste)\n self.ste_plot.plot(x=x, y=baseline, pen=\"r\")\n if self.plot_bursts.isChecked():\n b = self.exp_manager[id].get_lfp_burst_indexes(\n num - 1, map_channel=self.channel_map.isChecked()\n )\n for i in range(b.shape[0]):\n self.main_plot.plot(\n x=x[int(b[i, 0]) : int(b[i, 1])],\n y=acq[int(b[i, 0]) : int(b[i, 1])],\n name=i,\n pen=\"r\",\n )\n self.exp_manager[id].close()\n", "repo_name": "LarsHenrikNelson/InVivoSuite", "sub_path": "invivosuite/gui/lfp_window.py", "file_name": "lfp_window.py", "file_ext": "py", "file_size_in_byte": 5659, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "14", "api": [{"api_name": "PySide6.QtWidgets.QWidget", "line_number": 18, "usage_type": "name"}, {"api_name": "PySide6.QtWidgets.QHBoxLayout", "line_number": 24, "usage_type": "call"}, {"api_name": "PySide6.QtWidgets.QVBoxLayout", "line_number": 26, "usage_type": "call"}, {"api_name": "gui_widgets.ListView", "line_number": 28, "usage_type": "call"}, {"api_name": "PySide6.QtWidgets.QPushButton", "line_number": 35, "usage_type": "call"}, {"api_name": "PySide6.QtWidgets.QProgressBar", "line_number": 40, "usage_type": "call"}, {"api_name": "PySide6.QtWidgets.QFormLayout", "line_number": 44, "usage_type": "call"}, {"api_name": "PySide6.QtWidgets.QSpinBox", "line_number": 47, "usage_type": "call"}, {"api_name": "PySide6.QtWidgets.QCheckBox", "line_number": 51, "usage_type": "call"}, {"api_name": "PySide6.QtWidgets.QCheckBox", "line_number": 54, "usage_type": "call"}, {"api_name": "PySide6.QtWidgets.QCheckBox", "line_number": 57, "usage_type": "call"}, {"api_name": "PySide6.QtWidgets.QLineEdit", "line_number": 60, "usage_type": "call"}, {"api_name": "PySide6.QtWidgets.QVBoxLayout", "line_number": 63, "usage_type": "call"}, {"api_name": "pyqtgraph.PlotWidget", "line_number": 65, "usage_type": "call"}, {"api_name": "pyqtgraph.PlotWidget", "line_number": 67, "usage_type": "call"}, {"api_name": "pyqtgraph.PlotWidget", "line_number": 69, "usage_type": "call"}, {"api_name": "pyqtgraph.LinearRegionItem", "line_number": 75, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 134, "usage_type": "call"}]} +{"seq_id": "24254473988", "text": "from collections import defaultdict, deque\nfrom operator import itemgetter\nfrom pprint import pprint\nimport sqlite3\nfrom flask import (\n Blueprint, flash, g, redirect, render_template, request, url_for, jsonify\n)\nimport sys\nimport json\nimport datetime\n\nfrom werkzeug.exceptions import abort\n\nfrom server_side.f_db import get_db\nfrom server_side.add_mover import add_new_mover\nfrom server_side.workout_writer import workout_writer\nfrom server_side.workout_recorder import workout_recorder\nfrom server_side.mover_info_dict import mover_info_dict\n# these give a \"double-ended queue\" of values that can be rotated by step!\nfrom server_side.db_ref_vals import syn_zone_deque\nfrom server_side.db_ref_vals import ses_zone_deque\nfrom server_side.db_ref_vals import spine_zone_deque\n\n\nbp = Blueprint('tissues', __name__)\n\n\n@bp.route('/')\ndef index(mover_id):\n # print(f\"Got this far (to {index})\", file=sys.stderr)\n db = get_db()\n bouts = db.execute(\n 'SELECT * FROM bout_log'\n ).fetchall()\n\n return 'Done', 201\n\n\n@bp.route('/coaches_list')\ndef get_coaches():\n # print(datetime.datetime.now())\n db = get_db()\n coach_rows = db.execute('SELECT * FROM coaches').fetchall()\n res = {}\n for m in coach_rows:\n res[m[\"id\"]] = {\"first_name\": m[\"first_name\"],\n \"last_name\": m[\"last_name\"]}\n\n return jsonify(res), 201\n\n\n@bp.route('/movers_list/')\ndef get_movers(coach_id):\n # print(datetime.datetime.now())\n db = get_db()\n mover_rows = db.execute(\n 'SELECT * FROM movers WHERE coach_id = (?)', (coach_id, )).fetchall()\n res = {}\n for m in mover_rows:\n res[m[\"id\"]] = [i for i in m]\n print(res)\n\n return jsonify(res), 201\n\n\n@bp.route('/add_mover', methods=('POST',))\ndef add_mover_to_db():\n\n db = get_db()\n req = request.get_json()[0]\n fname = req['firstName']\n lname = req['lastName']\n coach_id = req[\"coachID\"]\n bw = req['bodyweight']\n mover_id = add_new_mover(db, fname, lname, coach_id, bw)\n\n return f\"{fname} {lname} is added to the DB! ID: {mover_id}\", 201\n\n\n@bp.route('/write_workout', methods=('POST',))\ndef write_workout():\n db = get_db()\n req = request.get_json()\n with open('fake_workout_seeds.json', 'a') as json_seeds:\n json.dump(req, json_seeds)\n\n wkt_id = workout_writer(db, req)\n\n return f\"Workout ID: {wkt_id} is written!\", 201\n\n\n@bp.route('/record_bout', methods=('POST',))\ndef record_bout():\n db = get_db()\n req = request.get_json()\n # ACTIVATE some bout harvesting as needed\n with open('fake_record_seeds.json', 'a') as json_seeds:\n json.dump(req, json_seeds)\n # pprint(req)\n workout_recorder(db, req)\n\n return f\"Workout/results recorded!\", 201\n\n\n@bp.route('/record_workout', methods=('POST',))\ndef record_workout():\n db = get_db()\n req = request.get_json()\n with open('fake_rwkout_seeds.json', 'a') as json_seeds:\n json.dump(req, json_seeds)\n print(\"Recording workout as done\")\n pprint(req)\n\n date_done, workout_id, moverid = req\n\n curs = db.cursor()\n\n curs.execute(\n '''UPDATE workouts \n SET last_done = (?) \n WHERE id = (?) \n AND moverid = (?)''',\n (date_done, workout_id, moverid))\n db.commit()\n\n return f\"Workout/results recorded!\", 201\n\n\n@bp.route('/delete_workout', methods=('POST',))\ndef delete_workout():\n db = get_db()\n curs = db.cursor()\n req = request.get_json()\n mover_id, id_to_delete = req\n\n pprint(req)\n curs.execute('''DELETE FROM programmed_drills\n WHERE moverid = (?)\n AND workout_id = (?)\n ''', (mover_id, id_to_delete))\n db.commit()\n curs.execute('''DELETE FROM workouts\n WHERE workouts.moverid = (?)\n AND workouts.id = (?)\n ''', (mover_id, id_to_delete))\n db.commit()\n to_return = get_workouts(mover_id)\n return to_return\n\n\n@ bp.route('/workouts/')\ndef get_workouts(mover_id):\n if mover_id == 0:\n return json.dumps([]), 200\n db = get_db()\n curs = db.cursor()\n workout_rows = curs.execute('''SELECT\n workouts.id,\n date_init,\n last_done,\n workout_title,\n workouts.moverid,\n workouts.comments\n FROM workouts\n WHERE workouts.moverid = (?)\n ''', (mover_id,)).fetchall()\n\n wkouts = {}\n\n def schema_factory():\n return {\"circuit\": [], \"iterations\": 0}\n\n for row in workout_rows:\n wkout = {k: row[k] for k in row.keys()}\n wkout[\"inputs\"] = []\n wkout[\"schema\"] = defaultdict(schema_factory)\n wkouts[wkout['id']] = wkout\n\n # pprint(wkouts)\n\n for workout_id in wkouts.keys():\n curr_wkout = wkouts[workout_id]\n input_rows = curs.execute('''SELECT\n programmed_drills.id,\n programmed_drills.moverid,\n\n input_sequence,\n circuit_iterations,\n ref_zones_id_a,\n ref_zones_id_b,\n fixed_side_anchor_id,\n rotational_value,\n start_coord,\n end_coord,\n drill_name,\n rails,\n reps_array,\n multijoint,\n duration,\n passive_duration,\n rpe,\n external_load,\n comments,\n joints.side,\n ref_joints.rowid,\n ref_joints.joint_name,\n ref_joints.joint_type,\n ref_joints.joint_name\n FROM programmed_drills\n LEFT JOIN joints\n ON joints.id = joint_id\n LEFT JOIN ref_joints\n ON ref_joints.rowid = joints.ref_joints_id\n WHERE programmed_drills.moverid = (?) AND workout_id = (?)''', (mover_id, workout_id))\n for row in input_rows:\n input = {k: row[k] for k in row.keys() if k != \"moverid\"}\n\n input_sequence = input.pop(\"input_sequence\")\n circuit_iterations = input.pop(\"circuit_iterations\")\n\n input[\"ref_joint_name\"] = input.pop(\"joint_name\")\n input[\"ref_joint_type\"] = input.pop(\"joint_type\")\n input[\"ref_joint_id\"] = input.pop(\"rowid\")\n input[\"reps_array\"] = [int(i)\n for i in input[\"reps_array\"].split(',')]\n\n if input[\"rails\"] == \"1\":\n input[\"rails\"] = True\n elif input[\"rails\"] == \"0\":\n input[\"rails\"] = False\n\n curr_wkout[\"schema\"][input_sequence[0]][\"circuit\"].append(\n (input_sequence[1], input[\"id\"]))\n curr_wkout[\"schema\"][input_sequence[0]\n ][\"iterations\"] = circuit_iterations\n\n curr_wkout[\"inputs\"].append(input)\n\n for circ in curr_wkout[\"schema\"]:\n # sort the \"circuit\" array based on passed in ordering\n curr_wkout[\"schema\"][circ][\"circuit\"].sort(key=lambda inp: inp[0])\n # list-comprehension to only snag the id part of each tuple\n new_circuit = [inp[1]\n for inp in curr_wkout[\"schema\"][circ][\"circuit\"]]\n # replace old \"circuit\" with streamlined array!\n curr_wkout[\"schema\"][circ][\"circuit\"] = new_circuit\n curr_wkout[\"schema\"] = [i for i in curr_wkout[\"schema\"].values()]\n\n array_to_send = [value for (key, value) in wkouts.items()]\n return json.dumps(array_to_send), 200\n\n# inputs is not getting used for anything right now\n\n\n@ bp.route('/inputs')\ndef get_inputs():\n with open('/Users/williamhbelew/Hacking/MSWN/server_side/fakeInputData.json') as w:\n inputs = json.load(w)\n return jsonify(inputs), 200\n\n\n@ bp.route('/drill_ref')\ndef drill_ref():\n drills_to_send = {\n \"CARs\": {},\n \"capsule CAR\": {\"zones\": []},\n \"PRH\": {\"zones\": [], \"bias\": [], \"position\": [0], \"rotation\": []},\n \"Muscular Scan\": {\"zones\": [], \"bias\": [], \"position\": [0], \"rotation\": []},\n \"IC1\": {\"zones\": [], \"bias\": [-100, 100], \"rails\": [], \"position\": [100], \"rotation\": [-100, 100], \"passive duration\": []},\n \"IC2\": {\"zones\": [], \"bias\": [], \"rails\": [], \"position\": [100], \"rotation\": [], \"passive duration\": []},\n \"IC3\": {\"zones\": [], \"bias\": [], \"position\": [], \"rotation\": [], \"position B\": []}\n }\n\n return jsonify(drills_to_send), 200\n\n\n@ bp.route('/joint_ref')\ndef joint_ref():\n db = get_db()\n joint_ref = defaultdict(list)\n joint_ref_final = []\n\n # BELOW returns a list of sqlite3.Row objects (with index, and keys), but is NOT a real dict\n zone_ref_rows = db.execute('''SELECT\n ref_zones.id,\n ref_zones.side,\n ref_zones.zone_name,\n ref_joints.joint_type,\n ref_joints.joint_name,\n ref_joints.rowid\n FROM ref_zones\n INNER JOIN ref_joints\n ON ref_zones.ref_joints_id=ref_joints.rowid''').fetchall()\n for row in zone_ref_rows:\n zone = {k: row[k] for k in row.keys()}\n # only want one side and spine for reference\n if row[\"side\"] != \"mid\":\n joint_ref[row[\"side\"]+\" \" + row[\"joint_name\"]].append(zone)\n else:\n joint_ref[row[\"joint_name\"]].append(zone)\n # cleaning up nested object to send to react:\n for joint in joint_ref.keys():\n j = joint_ref[joint]\n joint_obj = {\"name\": joint, \"id\": j[0][\"rowid\"], \"zones\": j}\n joint_ref_final.append(joint_obj)\n\n return jsonify(joint_ref_final), 200\n\n\n# can't quite get my selects working right! so just hard-coding in adjacency table...\nprox_neighbor_joints = {'l-ankle': {'proximal_joint_id': 15,\n 'proximal_joint_selector': 'l-knee',\n 'ref_joint_id': 17},\n 'l-elbow': {'proximal_joint_id': 7,\n 'proximal_joint_selector': 'l-gh',\n 'ref_joint_id': 9},\n 'l-gh': {'proximal_joint_id': 5,\n 'proximal_joint_selector': 'l-scapular-thoracic',\n 'ref_joint_id': 7},\n 'l-hallux': {'proximal_joint_id': 17,\n 'proximal_joint_selector': 'l-ankle',\n 'ref_joint_id': 21},\n 'l-hip': {'proximal_joint_id': 3,\n 'proximal_joint_selector': 'lt',\n 'ref_joint_id': 13},\n 'l-knee': {'proximal_joint_id': 13,\n 'proximal_joint_selector': 'l-hip',\n 'ref_joint_id': 15},\n 'l-scapular-thoracic': {'proximal_joint_id': 2,\n 'proximal_joint_selector': 'tc',\n 'ref_joint_id': 5},\n 'l-toes': {'proximal_joint_id': 17,\n 'proximal_joint_selector': 'l-ankle',\n 'ref_joint_id': 20},\n 'l-wrist': {'proximal_joint_id': 9,\n 'proximal_joint_selector': 'l-elbow',\n 'ref_joint_id': 11},\n 'ao': {'proximal_joint_id': None, 'ref_joint_id': 1},\n 'lt': {'proximal_joint_id': 2,\n 'proximal_joint_selector': 'tc',\n 'ref_joint_id': 3},\n 'tc': {'proximal_joint_id': 1,\n 'proximal_joint_selector': 'ao',\n 'ref_joint_id': 2},\n 'r-ankle': {'proximal_joint_id': 14,\n 'proximal_joint_selector': 'r-knee',\n 'ref_joint_id': 16},\n 'r-elbow': {'proximal_joint_id': 6,\n 'proximal_joint_selector': 'r-gh',\n 'ref_joint_id': 8},\n 'r-gh': {'proximal_joint_id': 4,\n 'proximal_joint_selector': 'r-scapular-thoracic',\n 'ref_joint_id': 6},\n 'r-hallux': {'proximal_joint_id': 16,\n 'proximal_joint_selector': 'r-ankle',\n 'ref_joint_id': 19},\n 'r-hip': {'proximal_joint_id': 3,\n 'proximal_joint_selector': 'lt',\n 'ref_joint_id': 12},\n 'r-knee': {'proximal_joint_id': 12,\n 'proximal_joint_selector': 'r-hip',\n 'ref_joint_id': 14},\n 'r-scapular-thoracic': {'proximal_joint_id': 2,\n 'proximal_joint_selector': 'tc',\n 'ref_joint_id': 4},\n 'r-toes': {'proximal_joint_id': 16,\n 'proximal_joint_selector': 'r-ankle',\n 'ref_joint_id': 18},\n 'r-wrist': {'proximal_joint_id': 8,\n 'proximal_joint_selector': 'r-elbow',\n 'ref_joint_id': 10}}\n\n\ndef failed_proximal_joint_lookups():\n db = get_db()\n curs = db.cursor()\n # start by building an adjacency table (neighboring joints)\n bone_lookups = {}\n ref_joints_rows = curs.execute('''SELECT \n ref_bone_end.id,\n ref_bone_end.bone_name,\n ref_bone_end.side, \n ref_bone_end.end,\n ref_joints.rowid AS ref_joint_id,\n ref_joints.joint_name \n FROM ref_bone_end\n LEFT JOIN ref_joints\n ON ref_bone_end.id = ref_joints.bone_end_id_b''').fetchall()\n for row in ref_joints_rows:\n if row[\"end\"] == 1:\n print(\"this is a bone-end of '1'...\")\n pprint({k: row[k] for k in row.keys()})\n continue\n # these are bone-ends that are the bone_end_id_a for a joint!!!! thus, None in this SELECT\n # print(row)\n else:\n raw_ref = {k: row[k] for k in row.keys()}\n bone_lookups[(raw_ref[\"bone_name\"], raw_ref[\"side\"])\n ] = raw_ref\n\n print(\"here are the bone lookups...\")\n pprint(bone_lookups)\n\n neighbor_joints = {}\n\n for k, v in bone_lookups.items():\n res = curs.execute('''SELECT ref_joints.rowid AS ref_joint_id,\n ref_joints.joint_name,\n ref_joints.side, \n ref_joints.joint_type,\n ref_joints.bone_end_id_a,\n ref_joints.bone_end_id_b,\n ref_bone_end.bone_name AS proximal_bone, \n ref_bone_end.side AS bone_side\n FROM ref_joints\n LEFT JOIN ref_bone_end\n ON ref_bone_end.id = ref_joints.bone_end_id_a''').fetchall()\n for row in res:\n # NEED TO TROUBLE SHOOT THESE... proximal_joint_id is NONE for toes, scapular-thoracic, hallux\n # possibly related to my 'ends' filter used in the inital select (on bone-ends)\n raw_row = {\n k: row[k] for k in row.keys()}\n # pprint(raw_row)\n jname_str = f'{row[\"side\"]}-{row[\"joint_name\"]}'\n\n prox_joint_id = bone_lookups[(\n row[\"proximal_bone\"], row[\"bone_side\"])][\"ref_joint_id\"]\n neighbor_joints[jname_str.lower()] = {\n \"ref_joint_id\": row[\"ref_joint_id\"], \"proximal_joint_id\": prox_joint_id}\n\n pprint(neighbor_joints)\n\n\n@ bp.route('/status/')\ndef status(mover_id):\n # print(f\"Got this far (to {index})\", file=sys.stderr)\n db = get_db()\n curs = db.cursor()\n # start by building an adjacency table (neighboring joints)\n\n tissue_status = []\n # BELOW returns a list of sqlite3.Row objects (with index, and keys), but is NOT a real dict\n tissue_status_rows = db.execute(\n '''SELECT bout_log.joint_id,\n bout_log.id,\n bout_log.date, \n bout_log.duration, \n bout_log.rpe, \n bout_log.tissue_type,\n bout_log.rotational_value,\n joints.ref_joints_id,\n ref_joints.joint_name,\n ref_joints.side\n FROM bout_log \n LEFT JOIN joints\n ON bout_log.joint_id = joints.id\n LEFT JOIN ref_joints\n ON joints.ref_joints_id = ref_joints.rowid\n WHERE bout_log.moverid = (?) \n ''', (mover_id,)\n ).fetchall()\n\n for row in tissue_status_rows:\n row_goods = {k: row[k] for k in row.keys()}\n if row_goods[\"side\"] != 'mid':\n svg_zone_selector = f'{row_goods[\"side\"].lower()}-{row_goods.pop(\"joint_name\").lower()}'\n else:\n svg_zone_selector = f'{row_goods.pop(\"joint_name\").lower()}'\n if svg_zone_selector == \"ao\":\n proximal_joint_selector = \"occiput\"\n else:\n proximal_joint_selector = prox_neighbor_joints[svg_zone_selector]['proximal_joint_selector']\n row_goods['svg_zone_selector'] = svg_zone_selector\n row_goods['rotational_value'] = int(\n row_goods['rotational_value']) if row_goods['rotational_value'] is not None else \"\"\n row_goods['joint_name_selector'] = f'jt-{svg_zone_selector}'\n row_goods['proximal_joint_selector'] = f'jt-{proximal_joint_selector}'\n\n tissue_status.append(row_goods)\n return jsonify({\"status\": tissue_status}), 200\n\n\n@ bp.route('/training_log/')\ndef training_log(mover_id):\n\n db = get_db()\n training_log = []\n # BELOW returns a list of sqlite3.Row objects (with index, and keys),\n # but is NOT a real dict\n\n # START HERE ... I now need this\n # to select from bout_log ONLY\n # unique workouts\n # (left join on programmed_drills)\n # so that I can list them in the timeline\n training_log_rows = db.execute(\n '''SELECT DISTINCT workouts.workout_title, \n workouts.id,\n bout_log.date \n FROM bout_log \n LEFT JOIN programmed_drills \n ON bout_log.programmed_drills_id = programmed_drills.id\n LEFT JOIN workouts \n ON programmed_drills.workout_id = workouts.id \n WHERE bout_log.moverid = (?)''', (\n mover_id,)\n ).fetchall()\n # this converts all rows returned into dictiornary, that is added to the tissue_status list\n for row in training_log_rows:\n training_log.append({k: row[k] for k in row.keys()})\n # sort on way to react into DESCENDING order from most recent (by ['date'])\n training_log_final = sorted(\n training_log, key=itemgetter('date'), reverse=True)\n return jsonify({\"training_log\": training_log_final}), 200\n\n\n@ bp.route('/add_bout/', methods=('POST',))\ndef add_bout(moverid):\n req = request.get_json()\n print(req, file=sys.stderr)\n return \"Oh ya!\", 201\n\n db = get_db()\n curs = db.cursor()\n bouts_to_input = []\n print(req)\n\n for b in req:\n bundle = []\n field_names = []\n qmarks = []\n for key in list(b.keys()):\n # exec() function runs a string as python, so I can use an f-string to dynamically create vraible from a stirng\n exec(f\"{key}_field = b[key]\")\n exec(f\"print({key}_field)\")\n # then, I add the field_name (string) to an array -- THIS MUST MATCH THE DB!\n field_names.append(f\"{key}\")\n # then, I add the value of that variable to the bundle\n exec(f\"bundle.append({key}_field)\")\n # and finally a question mark for each field\n qmarks += '?'\n # once I've gone through ALL keys included in the request bout, I will have a tuple that has...\n bundle.insert(1, moverid)\n field_names.insert(1, \"moverid\")\n qmarks.insert(1, \"?\")\n bouts_to_input.append([field_names, qmarks, bundle])\n\n for bout in bouts_to_input:\n field_names = \",\".join(bout[0])\n qmarks = \",\".join(bout[1])\n curs.execute(\n f'INSERT INTO bout_log ({field_names}) VALUES ({qmarks})', (bout[2]))\n db.commit()\n\n return f\"{len(bouts_to_input)} bout(s) logged!\", 201\n\n\nif __name__ == \"__main__\":\n \"\"\"db = sqlite3.connect('/Users/williamhbelew/Hacking/MSWN/instance/mswnapp.sqlite')\n mover_1_id = 1\n mover_2_id = 2\n mover_1_info = mover_info_dict(db, mover_1_id)\n mover_2_info = mover_info_dict(db, mover_2_id)\n print(\"hello\")\n\n get_movers()\n \"\"\"\n\n\n\"\"\"\n@bp.route('/delete_bouts')\n\n@bp.route('/read_bouts') \"\"\"\n", "repo_name": "welew204/MSWN", "sub_path": "server_side/crud_bp.py", "file_name": "crud_bp.py", "file_ext": "py", "file_size_in_byte": 22551, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "12", "api": [{"api_name": "flask.Blueprint", "line_number": 25, "usage_type": "call"}, {"api_name": "server_side.f_db.get_db", "line_number": 31, "usage_type": "call"}, {"api_name": "server_side.f_db.get_db", "line_number": 42, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 49, "usage_type": "call"}, {"api_name": "server_side.f_db.get_db", "line_number": 55, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 63, "usage_type": "call"}, {"api_name": "server_side.f_db.get_db", "line_number": 69, "usage_type": "call"}, {"api_name": "flask.request.get_json", "line_number": 70, "usage_type": "call"}, {"api_name": "flask.request", "line_number": 70, "usage_type": "name"}, {"api_name": "server_side.add_mover.add_new_mover", "line_number": 75, "usage_type": "call"}, {"api_name": "server_side.f_db.get_db", "line_number": 82, "usage_type": "call"}, {"api_name": "flask.request.get_json", "line_number": 83, "usage_type": "call"}, {"api_name": "flask.request", "line_number": 83, "usage_type": "name"}, {"api_name": "json.dump", "line_number": 85, "usage_type": "call"}, {"api_name": "server_side.workout_writer.workout_writer", "line_number": 87, "usage_type": "call"}, {"api_name": "server_side.f_db.get_db", "line_number": 94, "usage_type": "call"}, {"api_name": "flask.request.get_json", "line_number": 95, "usage_type": "call"}, {"api_name": "flask.request", "line_number": 95, "usage_type": "name"}, {"api_name": "json.dump", "line_number": 98, "usage_type": "call"}, {"api_name": "server_side.workout_recorder.workout_recorder", "line_number": 100, "usage_type": "call"}, {"api_name": "server_side.f_db.get_db", "line_number": 107, "usage_type": "call"}, {"api_name": "flask.request.get_json", "line_number": 108, "usage_type": "call"}, {"api_name": "flask.request", "line_number": 108, "usage_type": "name"}, {"api_name": "json.dump", "line_number": 110, "usage_type": "call"}, {"api_name": "pprint.pprint", "line_number": 112, "usage_type": "call"}, {"api_name": "server_side.f_db.get_db", "line_number": 131, "usage_type": "call"}, {"api_name": "flask.request.get_json", "line_number": 133, "usage_type": "call"}, {"api_name": "flask.request", "line_number": 133, "usage_type": "name"}, {"api_name": "pprint.pprint", "line_number": 136, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 154, "usage_type": "call"}, {"api_name": "server_side.f_db.get_db", "line_number": 155, "usage_type": "call"}, {"api_name": "collections.defaultdict", "line_number": 176, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 250, "usage_type": "call"}, {"api_name": "json.load", "line_number": 258, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 259, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 274, "usage_type": "call"}, {"api_name": "server_side.f_db.get_db", "line_number": 279, "usage_type": "call"}, {"api_name": "collections.defaultdict", "line_number": 280, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 307, "usage_type": "call"}, {"api_name": "server_side.f_db.get_db", "line_number": 375, "usage_type": "call"}, {"api_name": "pprint.pprint", "line_number": 392, "usage_type": "call"}, {"api_name": "pprint.pprint", "line_number": 402, "usage_type": "call"}, {"api_name": "pprint.pprint", "line_number": 431, "usage_type": "call"}, {"api_name": "server_side.f_db.get_db", "line_number": 437, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 480, "usage_type": "call"}, {"api_name": "server_side.f_db.get_db", "line_number": 486, "usage_type": "call"}, {"api_name": "operator.itemgetter", "line_number": 513, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 514, "usage_type": "call"}, {"api_name": "flask.request.get_json", "line_number": 519, "usage_type": "call"}, {"api_name": "flask.request", "line_number": 519, "usage_type": "name"}, {"api_name": "sys.stderr", "line_number": 520, "usage_type": "attribute"}, {"api_name": "server_side.f_db.get_db", "line_number": 523, "usage_type": "call"}]} +{"seq_id": "21938994443", "text": "import hashlib\nimport itertools\nimport json\nimport logging\nimport pickle\nimport urllib2\nimport time\nimport os\nfrom collections import namedtuple\n\nfrom flask_restful import marshal\n\nfrom lewas.exceptions import ConfigError\n\nlogger = logging.getLogger(__name__)\n\ndef mkey(m):\n return (m.station, m.instrument)\n\nauth_attrs = ['password', 'sslcrt', 'sslkey']\n\nAuth = namedtuple('Auth', ' '.join(auth_attrs))\nclass RESTStore():\n def __init__(self, **kwargs):\n self.host = kwargs.get('host') \n self.endpoint = kwargs.get('endpoint')\n self.fields = kwargs.get('fields', None)\n self.saveOnFail = kwargs.get('saveOnFail', True)\n self.storage = kwargs.get('storage', None) \n self.auth = Auth( *[ kwargs.get(label, None) for label in auth_attrs ] )\n #try\n if self.saveOnFail:\n try:\n fn = save_request({ 'test': 'to check for write permission' }, self.storage)\n os.remove(fn)\n except AttributeError:\n raise ConfigError('saveOnFail is set but could not find storage information')\n except IOError:\n raise ConfigError('{}: could not write to storage directory, does it even exist?'.format(self.storage))\n\n def post(self, measurements, **kwargs):\n for g,k in itertools.groupby(sorted(measurements, key=mkey), mkey):\n site_id, instrument_name = g\n url = self.host \\\n + urllib2.quote(self.endpoint.format(site_id=site_id, instrument_name=instrument_name))\n \n logger.log(logging.DEBUG, 'posting to {}'.format(url))\n # marshal measurements into request data\n #for m in k:\n # logger.log(logging.DEBUG, 'm: {}'.format(m))\n d = [ marshal(m, self.fields) for m in k ]\n try:\n request = urllib2.Request(url, json.dumps(d),\n {'Content-Type': 'application/json'})\n logger.log(logging.INFO, 'request of {} measurements\\n'.format(len(d)))\n except TypeError as e:\n print(e)\n logger.log(logging.ERROR, 'message: {}\\nobject: {}'.format(e,d))\n else:\n submitRequest(request, self.auth, storage=self.storage, **kwargs)\n\ndef submitRequest(request, auth, saveOnFail=True, **kwargs):\n #config is ONLY used for authentication\n storage = kwargs.get('storage') if saveOnFail else None\n if auth.password:\n d = json.loads(request.data)\n for m in d:\n m['magicsecret'] = auth.password\n request.data = json.dumps(d)\n\n response = None\n if auth.sslkey and auth.sslcrt:\n opener = urllib2.build_opener(HTTPSClientAuthHandler(\n auth.sslkey, auth.sslcrt)).open\n else:\n opener = urllib2.urlopen\n success = False\n try:\n response = opener(request)\n success = True\n except urllib2.HTTPError as e:\n # import traceback; traceback.print_exc()\n logger.log(logging.ERROR, \"{}\\n\\trequest: {}\".format(e, request.data))\n logger.log(logging.ERROR, \"\\tresponse: {}\".format(e.read()));\n except urllib2.URLError as e:\n logger.log(logging.ERROR, \"{}\\n\\turl: {}\\n\\trequest: {}\".format(e, request.get_full_url(), request.data))\n else:\n logger.log(logging.INFO, \"{}\\t{}\\n\\trequest: {}\".format(\n response.getcode(), request.get_full_url(), request.data))\n\t#TODO: would it be more clear to have the\n\t#if saveOnFail # here?\n finally:\n if response is not None:\n logger.log(logging.INFO, \"\\tresponse: {}\".format(response.read()))\n if saveOnFail and not success:\n save_request(request, storage)\n return success\n\ndef save_request(request, storage):\n p = pickle.dumps(request)\n h = hashlib.sha256()\n h.update(p)\n fn = str(int(time.mktime(time.gmtime())))+h.hexdigest() #todo: include instrument\n fn = os.path.join(storage, h.hexdigest())\n with open(fn, 'w') as f:\n f.write(p)\n return fn\n\nclass HTTPSClientAuthHandler(urllib2.HTTPSHandler):\n def __init__(self, key, cert):\n urllib2.HTTPSHandler.__init__(self)\n self.key = key\n self.cert = cert\n\n def https_open(self, req):\n # Rather than pass in a reference to a connection class, we pass in\n # a reference to a function which, for all intents and purposes,\n # will behave as a constructor\n return self.do_open(self.getConnection, req)\n\n def getConnection(self, host, timeout=300):\n return httplib.HTTPSConnection(host, key_file=self.key, cert_file=self.cert, strict=True)\n", "repo_name": "lewas-lab/lewas", "sub_path": "lewas/stores/reststore.py", "file_name": "reststore.py", "file_ext": "py", "file_size_in_byte": 4644, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "12", "api": [{"api_name": "logging.getLogger", "line_number": 15, "usage_type": "call"}, {"api_name": "collections.namedtuple", "line_number": 22, "usage_type": "call"}, {"api_name": "os.remove", "line_number": 35, "usage_type": "call"}, {"api_name": "lewas.exceptions.ConfigError", "line_number": 37, "usage_type": "call"}, {"api_name": "lewas.exceptions.ConfigError", "line_number": 39, "usage_type": "call"}, {"api_name": "itertools.groupby", "line_number": 42, "usage_type": "call"}, {"api_name": "urllib2.quote", "line_number": 45, "usage_type": "call"}, {"api_name": "logging.DEBUG", "line_number": 47, "usage_type": "attribute"}, {"api_name": "flask_restful.marshal", "line_number": 51, "usage_type": "call"}, {"api_name": "urllib2.Request", "line_number": 53, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 53, "usage_type": "call"}, {"api_name": "logging.INFO", "line_number": 55, "usage_type": "attribute"}, {"api_name": "logging.ERROR", "line_number": 58, "usage_type": "attribute"}, {"api_name": "json.loads", "line_number": 66, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 69, "usage_type": "call"}, {"api_name": "urllib2.build_opener", "line_number": 73, "usage_type": "call"}, {"api_name": "urllib2.urlopen", "line_number": 76, "usage_type": "attribute"}, {"api_name": "urllib2.HTTPError", "line_number": 81, "usage_type": "attribute"}, {"api_name": "logging.ERROR", "line_number": 83, "usage_type": "attribute"}, {"api_name": "logging.ERROR", "line_number": 84, "usage_type": "attribute"}, {"api_name": "urllib2.URLError", "line_number": 85, "usage_type": "attribute"}, {"api_name": "logging.ERROR", "line_number": 86, "usage_type": "attribute"}, {"api_name": "logging.INFO", "line_number": 88, "usage_type": "attribute"}, {"api_name": "logging.INFO", "line_number": 94, "usage_type": "attribute"}, {"api_name": "pickle.dumps", "line_number": 100, "usage_type": "call"}, {"api_name": "hashlib.sha256", "line_number": 101, "usage_type": "call"}, {"api_name": "time.mktime", "line_number": 103, "usage_type": "call"}, {"api_name": "time.gmtime", "line_number": 103, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 104, "usage_type": "call"}, {"api_name": "os.path", "line_number": 104, "usage_type": "attribute"}, {"api_name": "urllib2.HTTPSHandler", "line_number": 109, "usage_type": "attribute"}, {"api_name": "urllib2.HTTPSHandler.__init__", "line_number": 111, "usage_type": "call"}, {"api_name": "urllib2.HTTPSHandler", "line_number": 111, "usage_type": "attribute"}]} +{"seq_id": "30585294746", "text": "import requests,json\nconfig = json.loads(open('config.json', 'r').read())\n\nUSER_AGENT = 'YetAnotherContentThief by /u/%s' % config['reddit_username']\n\ndef get_posts(subreddit='shitposting', time='week', limit=10, after=None):\n url = 'https://www.reddit.com/r/%s/top.json?t=%s&limit=%s&after=%s' % (subreddit, time, limit, after)\n r = requests.get(url, headers={'User-Agent': USER_AGENT})\n if r.status_code == 200:\n # filter out stickied posts and nsfw posts\n json = r.json()\n json['data']['children'] = [post for post in json['data']['children'] if not post['data']['stickied'] and not post['data']['over_18']]\n return json\n else:\n return None\n\ndef download_images(json, skip=0):\n for post in json['data']['children'][skip:]:\n url = post['data']['url']\n filename = url.split('/')[-1]\n # Ensure that this is an image\n if filename.split('.')[-1] not in ['jpg', 'jpeg', 'png', 'gif', 'gifv', 'webp']:\n continue\n\n r = requests.get(url, headers={'User-Agent': USER_AGENT})\n if r.status_code == 200:\n with open('to_upload/%s' % filename, 'wb') as f:\n for chunk in r:\n f.write(chunk)\n", "repo_name": "edde746/tiktok-uploader", "sub_path": "api/reddit.py", "file_name": "reddit.py", "file_ext": "py", "file_size_in_byte": 1225, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 26, "dataset": "github-code", "pt": "12", "api": [{"api_name": "json.loads", "line_number": 2, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 8, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 25, "usage_type": "call"}]} +{"seq_id": "36481390076", "text": "import unittest\n\nimport ai_flow as af\nfrom ai_flow.api.ai_flow_context import ENGINE_NAME\nfrom ai_flow.common import path_util, json_utils\nfrom ai_flow.graph.graph import _default_ai_graph\nfrom ai_flow.test import test_util\nfrom ai_flow.workflow.workflow_config import WorkFlowConfig\n\n\nclass ContextTests(unittest.TestCase):\n\n def test_context(self):\n global_config = af.BaseJobConfig(platform='a', engine='b', properties={'c': 'c'})\n job_config = af.BaseJobConfig(platform='aa', engine='bb', properties={'cc': 'cc'})\n with af.global_config(global_config):\n with af.config(job_config):\n af.user_define_operation(executor=None)\n node_list = list(_default_ai_graph.nodes.values())\n self.assertEqual('bb', node_list[0].properties[ENGINE_NAME])\n self.assertEqual('cc', node_list[0].config.properties[\"cc\"])\n self.assertEqual('c', node_list[0].config.properties[\"c\"])\n self.assertEqual('bb', node_list[0].config.engine)\n self.assertEqual('aa', node_list[0].config.platform)\n\n def test_context_with_file(self):\n config_file = path_util.get_file_dir(__file__) + \"/workflow_config.json\"\n\n def generate_workflow_config():\n workflow_config = WorkFlowConfig()\n workflow_config.add_job_config(config_key=\"global_config_key\",\n job_config=af.BaseJobConfig(platform=\"local\", engine=\"python\",\n properties={\"common_key\": \"common_value\"}))\n workflow_config.add_job_config(config_key=\"test_job\",\n job_config=af.BaseJobConfig(platform=None, engine=None,\n properties={\"job_key\": \"job_value\"}))\n workflow_config.add_job_config(config_key=\"test_job_1\",\n job_config=af.BaseJobConfig(platform='kubernetes', engine='flink',\n properties={\"job_key_1\": \"job_value_1\"}))\n with open(config_file, 'w') as f:\n f.write(json_utils.dumps(workflow_config))\n\n generate_workflow_config()\n\n with af.global_config_file(config_path=config_file):\n with af.config(config=\"test_job\") as cc:\n cc.properties['aa'] = 'aa'\n af.user_define_operation(executor=None)\n node_list = list(_default_ai_graph.nodes.values())\n self.assertEqual('python', node_list[len(node_list) - 1].properties[ENGINE_NAME])\n self.assertEqual('common_value', node_list[len(node_list) - 1].config.properties[\"common_key\"])\n self.assertEqual('job_value', node_list[len(node_list) - 1].config.properties[\"job_key\"])\n self.assertEqual('aa', node_list[len(node_list) - 1].config.properties[\"aa\"])\n\n self.assertEqual('python', node_list[len(node_list) - 1].config.engine)\n self.assertEqual('local', node_list[len(node_list) - 1].config.platform)\n with af.config(config=\"test_job_1\"):\n af.user_define_operation(executor=None)\n node_list = list(_default_ai_graph.nodes.values())\n self.assertEqual('flink', node_list[len(node_list) - 1].properties[ENGINE_NAME])\n self.assertEqual('common_value', node_list[len(node_list) - 1].config.properties[\"common_key\"])\n self.assertEqual('job_value_1', node_list[len(node_list) - 1].config.properties[\"job_key_1\"])\n self.assertEqual('flink', node_list[len(node_list) - 1].config.engine)\n self.assertEqual('kubernetes', node_list[len(node_list) - 1].config.platform)\n\n\nif __name__ == '__main__':\n test_util.set_project_config(__file__)\n unittest.main()\n", "repo_name": "jxxmskulong/flink-ai-extended", "sub_path": "flink-ai-flow/ai_flow/test/api/test_af_context.py", "file_name": "test_af_context.py", "file_ext": "py", "file_size_in_byte": 3852, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "github-code", "pt": "12", "api": [{"api_name": "unittest.TestCase", "line_number": 11, "usage_type": "attribute"}, {"api_name": "ai_flow.BaseJobConfig", "line_number": 14, "usage_type": "call"}, {"api_name": "ai_flow.BaseJobConfig", "line_number": 15, "usage_type": "call"}, {"api_name": "ai_flow.global_config", "line_number": 16, "usage_type": "call"}, {"api_name": "ai_flow.config", "line_number": 17, "usage_type": "call"}, {"api_name": "ai_flow.user_define_operation", "line_number": 18, "usage_type": "call"}, {"api_name": "ai_flow.graph.graph._default_ai_graph.nodes.values", "line_number": 19, "usage_type": "call"}, {"api_name": "ai_flow.graph.graph._default_ai_graph.nodes", "line_number": 19, "usage_type": "attribute"}, {"api_name": "ai_flow.graph.graph._default_ai_graph", "line_number": 19, "usage_type": "name"}, {"api_name": "ai_flow.api.ai_flow_context.ENGINE_NAME", "line_number": 20, "usage_type": "name"}, {"api_name": "ai_flow.common.path_util.get_file_dir", "line_number": 27, "usage_type": "call"}, {"api_name": "ai_flow.common.path_util", "line_number": 27, "usage_type": "name"}, {"api_name": "ai_flow.workflow.workflow_config.WorkFlowConfig", "line_number": 30, "usage_type": "call"}, {"api_name": "ai_flow.BaseJobConfig", "line_number": 32, "usage_type": "call"}, {"api_name": "ai_flow.BaseJobConfig", "line_number": 35, "usage_type": "call"}, {"api_name": "ai_flow.BaseJobConfig", "line_number": 38, "usage_type": "call"}, {"api_name": "ai_flow.common.json_utils.dumps", "line_number": 41, "usage_type": "call"}, {"api_name": "ai_flow.common.json_utils", "line_number": 41, "usage_type": "name"}, {"api_name": "ai_flow.global_config_file", "line_number": 45, "usage_type": "call"}, {"api_name": "ai_flow.config", "line_number": 46, "usage_type": "call"}, {"api_name": "ai_flow.user_define_operation", "line_number": 48, "usage_type": "call"}, {"api_name": "ai_flow.graph.graph._default_ai_graph.nodes.values", "line_number": 49, "usage_type": "call"}, {"api_name": "ai_flow.graph.graph._default_ai_graph.nodes", "line_number": 49, "usage_type": "attribute"}, {"api_name": "ai_flow.graph.graph._default_ai_graph", "line_number": 49, "usage_type": "name"}, {"api_name": "ai_flow.api.ai_flow_context.ENGINE_NAME", "line_number": 50, "usage_type": "name"}, {"api_name": "ai_flow.config", "line_number": 57, "usage_type": "call"}, {"api_name": "ai_flow.user_define_operation", "line_number": 58, "usage_type": "call"}, {"api_name": "ai_flow.graph.graph._default_ai_graph.nodes.values", "line_number": 59, "usage_type": "call"}, {"api_name": "ai_flow.graph.graph._default_ai_graph.nodes", "line_number": 59, "usage_type": "attribute"}, {"api_name": "ai_flow.graph.graph._default_ai_graph", "line_number": 59, "usage_type": "name"}, {"api_name": "ai_flow.api.ai_flow_context.ENGINE_NAME", "line_number": 60, "usage_type": "name"}, {"api_name": "ai_flow.test.test_util.set_project_config", "line_number": 68, "usage_type": "call"}, {"api_name": "ai_flow.test.test_util", "line_number": 68, "usage_type": "name"}, {"api_name": "unittest.main", "line_number": 69, "usage_type": "call"}]} +{"seq_id": "26522964261", "text": "import streamlit as st\nimport pandas as pd\nimport numpy as np\nst.title(\"hey Guys find out your BMI !!\")\ndf=pd.read_csv(\"/content/500_Person_Gender_Height_Weight_Index.csv\")\nx=df.iloc[:,[1,2]].values\ndf=df.replace({'Index' : { 0 : \"Extremely_Weak\", 1 : \"Weak\", 2 : \"Normal\",3:\"Overweight\",4:\"Obesity\",5:\"Extreme_Obesity\" }})\ny=df.iloc[:,-1].values\nfrom sklearn.neighbors import KNeighborsClassifier\nmodel=KNeighborsClassifier(n_neighbors=23,metric=\"euclidean\")\nmodel.fit(x,y)\nxmin=np.min(x,axis=0)\nxmax=np.max(x,axis=0)\nheight=st.slider(\"Height\",float(xmin[0]),float(xmax[0]))\n\nweight=st.slider(\"Weight\",float(xmin[1]),float(xmax[1]))\n\ny_pred=model.predict([[height,weight]])\nprint (\"Your Body mass Index is :\")\n\nop=[\"Extremely_Weak\",\"Weak\",\"Normal\",\"Overweight\",\"Obesity\",\"Extreme_Obesity\"]\nst.title(op[y_pred[0]])\n\n", "repo_name": "GitbuddySharry/st_bmi_calc", "sub_path": "bmi.py", "file_name": "bmi.py", "file_ext": "py", "file_size_in_byte": 816, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "12", "api": [{"api_name": "streamlit.title", "line_number": 4, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 5, "usage_type": "call"}, {"api_name": "sklearn.neighbors.KNeighborsClassifier", "line_number": 10, "usage_type": "call"}, {"api_name": "numpy.min", "line_number": 12, "usage_type": "call"}, {"api_name": "numpy.max", "line_number": 13, "usage_type": "call"}, {"api_name": "streamlit.slider", "line_number": 14, "usage_type": "call"}, {"api_name": "streamlit.slider", "line_number": 16, "usage_type": "call"}, {"api_name": "streamlit.title", "line_number": 22, "usage_type": "call"}]} +{"seq_id": "39923570461", "text": "import os\nimport sys\nimport traceback\nimport subprocess\nimport platform\nimport imp\nfrom pathlib import Path\nfrom PrismUtils.Decorators import err_catcher_plugin as err_catcher\n\ntry:\n from PySide2.QtCore import *\n from PySide2.QtGui import *\n from PySide2.QtWidgets import *\n\n psVersion = 2\nexcept Exception:\n from PySide.QtCore import *\n from PySide.QtGui import *\n\n psVersion = 1\n\nsys.path.append(os.path.join(os.path.dirname(__file__), \"UserInterfaces\"))\nif psVersion == 1:\n import FtrackPublish_ui\nelse:\n import FtrackPublish_ui_ps2 as FtrackPublish_ui\n\ntry:\n import CreateItem\n\nexcept Exception:\n modPath = imp.query_module(\"CreateItem\")[1]\n if modPath.endswith(\".pyc\") and os.path.exists(modPath[:-1]):\n os.remove(modPath)\n# 20221216 - change by Danko\n# path = r'D:\\dev\\GitHub\\Prism-CXPlugin\\Scripts'\npath = r'C:\\Prism\\Plugins\\Custom\\CXPlugin\\Scripts'\n\nsys.path.append(path)\nimport Prism_CXPlugin_Functions\n\n\nclass ftrackPublish(QDialog, FtrackPublish_ui.Ui_dlg_ftrackPublish):\n def __init__(self, core, origin, ptype, shotName, task, version, sources, startFrame, endFrame):\n QDialog.__init__(self)\n self.setupUi(self)\n\n self.core = core\n self.core.parentWindow(self)\n self.ptype = ptype\n self.shotName = shotName\n self.taskVersion = version\n self.fileSources = sources\n self.startFrame = startFrame\n self.endFrame = endFrame\n self.shotList = {}\n self.task = task\n\n ftrackData = origin.connectToFtrack()\n\n if ftrackData[0] is None or ftrackData[1] is None:\n return\n\n self.session, self.ftrackProjectName, self.ftrackUser = ftrackData\n\n self.core.appPlugin.ftrackPublish_startup(self)\n\n # for i in range(7):\n # self.cb_playlist.addItem(\n # \"DAILIES_%s\" % (datetime.date.today() + datetime.timedelta(days=i))\n # )\n\n if self.ptype == \"Asset Build\":\n self.rb_asset.setChecked(True)\n else:\n self.rb_shot.setChecked(True)\n\n self.updateShots()\n try:\n self.navigateToCurrent(self.shotName, self.task)\n except Exception:\n return\n\n if self.core.appPlugin.pluginName == \"Houdini\" and hasattr(\n self.core.appPlugin, \"fixStyleSheet\"\n ):\n self.core.appPlugin.fixStyleSheet(self.gb_playlist)\n\n self.connectEvents()\n\n @err_catcher(name=__name__)\n def connectEvents(self):\n self.rb_asset.pressed.connect(self.updateShots)\n self.rb_shot.pressed.connect(self.updateShots)\n # self.b_addTask.clicked.connect(self.createTask)\n self.b_addTask.setVisible(False)\n self.cb_shot.activated.connect(self.updateTasks)\n self.cb_task.activated.connect(self.updateTasks)\n self.b_ftrackPublish.clicked.connect(self.publish)\n\n @err_catcher(name=__name__)\n def updateShots(self):\n if self.rb_asset.isDown():\n self.ptype = 'Asset Build'\n elif self.rb_shot.isDown():\n self.ptype = 'Shot'\n\n ftrackTasks, self.ftrackDict = Prism_CXPlugin_Functions.Prism_CXPlugin_Functions.getFtrackEntityData(self, self.ptype)\n\n self.cb_shot.clear()\n\n for x in self.ftrackDict.keys():\n if self.ptype == 'Shot':\n name = \"%s%s%s\" % (\n x['parent']['name'],\n self.core.sequenceSeparator,\n x['name']\n )\n self.shotList[name] = x['name']\n else:\n name = x['name']\n localHierarchy = os.path.join(x['_link'][2:][0]['name'], name)\n self.shotList[name] = localHierarchy\n\n self.cb_shot.addItems(sorted(self.shotList.keys(), key=lambda s: s.lower()))\n self.updateTasks()\n\n @err_catcher(name=__name__)\n def updateTasks(self, idx=None):\n self.cb_task.clear()\n self.ftrackTasks = []\n # shotName is also assetName, seqName is also parentName\n shotName, seqName = self.core.entities.splitShotname(self.shotName)\n\n for i in self.ftrackDict:\n if i['name'] == shotName and (i['parent']['name'] == seqName or seqName == 'no sequence'):\n self.ftrackTasks = self.ftrackDict[i]\n self.curShot = i\n\n success = False\n for x in self.ftrackTasks:\n if x['name'] == self.task:\n self.curTask = x\n success = True\n\n if success is False:\n self.curTask = None\n QMessageBox.warning(self.core.messageParent, \"Ftrack Publish\", \"That %s has not been assignt to you.\" % self.ptype,)\n return\n\n if len(self.ftrackTasks) == 0:\n QMessageBox.warning(self.core.messageParent, \"Ftrack Publish\", \"That %s has not been assignt to you.\" % self.ptype,)\n return\n\n ftrackTaskNames = [x['name'] for x in self.ftrackTasks]\n ftrackTaskNames = list(set(ftrackTaskNames))\n\n self.cb_task.addItems(ftrackTaskNames)\n\n checklist = ['Animation', 'Compositing']\n\n if self.curTask['type']['name'] in checklist:\n self.chb_proxyVid.setChecked(True)\n else:\n self.chb_proxyVid.setChecked(False)\n\n @err_catcher(name=__name__)\n def navigateToCurrent(self, shotName, task):\n idx = self.cb_shot.findText(shotName)\n if idx != -1:\n self.cb_shot.setCurrentIndex(idx)\n\n self.updateTasks()\n\n idx = self.cb_task.findText(task)\n if idx != -1:\n self.cb_task.setCurrentIndex(idx)\n\n @err_catcher(name=__name__)\n def enterEvent(self, event):\n QApplication.restoreOverrideCursor()\n\n @err_catcher(name=__name__)\n def publish(self):\n if self.cb_shot.currentText() == \"\":\n QMessageBox.warning(\n self.core.messageParent,\n \"Ftrack Publish\",\n \"No %s exists in the Ftrack project. Publish canceled\" % self.ptype,\n )\n return\n\n if self.cb_task.currentText() == \"\":\n QMessageBox.warning(\n self.core.messageParent,\n \"Ftrack Publish\",\n \"No task is selected. Publish canceled.\",\n )\n return\n\n curShot = self.curShot\n curTask = self.curTask\n\n def frames_to_TC(frames):\n h = int(frames / 180000)\n m = int(frames / 3000) % 60\n s = (frames % 3000) / 50\n return (\"%02d:%02d:%2.1f\" % (h, m, s))\n\n pubVersions = []\n for source in self.fileSources:\n versionInfoPath = os.path.join(os.path.dirname(source[0]), \"versioninfo.yml\")\n if not os.path.exists(versionInfoPath):\n versionInfoPath = os.path.join(\n os.path.dirname(os.path.dirname(source[0])), \"versioninfo.yml\"\n )\n\n if not os.path.exists(versionInfoPath):\n QMessageBox.warning(self.core.messageParent, \"Error\", 'Could not find the versionInfo file.')\n return\n\n localScenefile = self.core.getConfig(\"information\", \"Source scene\", configPath=versionInfoPath)\n scenefile = str(source[0].rpartition('03_Workflow')[0]) + '03_Workflow' + str(localScenefile.rpartition('03_Workflow')[2])\n scenefile = self.core.fixPath(scenefile)\n\n versionName = \"%s_%s_%s\" % (\n self.cb_shot.currentText(),\n self.cb_task.currentText(),\n self.taskVersion,\n )\n\n if len(self.fileSources) > 1:\n versionName += \"_%s\" % os.path.splitext(os.path.basename(source[0]))[0]\n\n baseName, extension = os.path.splitext(source[0])\n videoInput = extension in [\".mp4\", \".mov\"]\n\n if videoInput:\n sequenceName = source[0]\n else:\n try:\n sequenceName = baseName[:-self.core.framePadding] + \"#\" * self.core.framePadding + extension\n except Exception:\n sequenceName = source[0]\n\n tmpFiles = []\n\n ffmpegIsInstalled = False\n if platform.system() == \"Windows\":\n ffmpegPath = os.path.join(\n self.core.prismLibs, \"Tools\", \"FFmpeg\", \"bin\", \"ffmpeg.exe\"\n )\n if os.path.exists(ffmpegPath):\n ffmpegIsInstalled = True\n elif platform.system() == \"Linux\":\n ffmpegPath = \"ffmpeg\"\n try:\n subprocess.Popen([ffmpegPath])\n ffmpegIsInstalled = True\n except Exception:\n pass\n elif platform.system() == \"Darwin\":\n ffmpegPath = os.path.join(self.core.prismLibs, \"Tools\", \"ffmpeg\")\n if os.path.exists(ffmpegPath):\n ffmpegIsInstalled = True\n\n imgPath = source[0]\n\n if extension in [\".exr\", \".mp4\", \".mov\"]:\n inputpath = self.core.fixPath(source[0])\n outputpath = os.path.splitext(inputpath)[0] + \".jpg\"\n\n if ffmpegIsInstalled:\n if videoInput:\n nProc = subprocess.Popen(\n [\n ffmpegPath,\n \"-apply_trc\",\n \"iec61966_2_1\",\n \"-i\",\n inputpath,\n \"-pix_fmt\",\n \"yuv420p\",\n \"-vf\",\n \"select=gte(n\\,%s)\" % source[1],\n \"-frames\",\n \"1\",\n outputpath,\n \"-y\",\n ]\n )\n else:\n nProc = subprocess.Popen(\n [\n ffmpegPath,\n \"-apply_trc\",\n \"iec61966_2_1\",\n \"-i\",\n inputpath,\n \"-pix_fmt\",\n \"yuv420p\",\n outputpath,\n \"-y\",\n ]\n )\n result = nProc.communicate()\n imgPath = outputpath\n tmpFiles.append(imgPath)\n else:\n QMessageBox.warning(self.core.messageParent, \"FFmpeg Error\", 'No FFmpeg Instalation found!')\n\n asset_parent = curShot\n asset_name = curTask['name']\n asset = self.session.query('Asset where name is \"{0}\" and parent.id is \"{1}\"'.format(asset_name, curShot['id'])).first()\n asset_type = self.session.query('AssetType where name is \"{0}\"'.format('Upload')).one() # Undedingt Ändern!!!\n # status = self.session.query('Status where name is \"{0}\"'.format('Awaiting Approval CX')).one()\n status = self.session.query('Status where name is \"{0}\"'.format('Awaiting Client Approval')).one()\n version = self.taskVersion[1:5]\n local_location = self.session.query('Location where name is \"ftrack.unmanaged\"').one()\n server_location = self.session.query('Location where name is \"ftrack.server\"').one()\n\n data = {}\n\n if asset is None:\n data = {\n 'name': asset_name,\n 'type': asset_type,\n 'parent': asset_parent\n }\n try:\n asset = self.session.create('Asset', data)\n self.session.commit()\n\n except Exception:\n exc_type, exc_obj, exc_tb = sys.exc_info()\n erStr = \"ERROR:\\n%s\" % traceback.format_exc()\n QMessageBox.warning(self.core.messageParent, \"Ftrack Publish\", erStr)\n return\n\n # QMessageBox.warning(self.core.messageParent, \"asset parent\", asset['parent']['parent']['name'] + '-' + asset['parent']['name'])\n # QMessageBox.warning(self.core.messageParent, \"curTask parent\", curTask['parent']['parent']['name'] + '-' + curTask['parent']['name'])\n\n data = {}\n data = {\n 'comment': self.te_description.toPlainText(),\n 'asset': asset,\n 'task': curTask,\n 'version': version,\n 'is_published': False\n }\n\n try:\n createdVersion = self.session.create(\"AssetVersion\", data)\n self.session.commit()\n\n user = self.session.query('User where username is \"{0}\"'.format(self.ftrackUser)).first()\n note = self.session.create('Note', {\n 'content': self.te_description.toPlainText(),\n 'author': user\n })\n createdVersion['notes'].append(note)\n curTask['status'] = status\n # self.session.commit()\n\n if self.chb_proxyVid.isChecked() and ffmpegIsInstalled:\n createdVersion['custom_attributes']['clientReview'] = True\n # self.session.commit()\n\n ftrackPrj = self.session.query('Project where name is \"{0}\"'.format(self.ftrackProjectName)).first()\n pre = ftrackPrj['root']\n project = self.core.getConfig('globals', 'project_name', configPath=self.core.prismIni)\n sequenceName = os.path.normpath(pre + sequenceName.rpartition(project)[2])\n scenefile = os.path.normpath(pre + scenefile.rpartition(project)[2])\n\n createdVersion.create_component(sequenceName, {'name': 'Global SequencePath'}, location=local_location)\n createdVersion.create_component(scenefile, {'name': 'Global SceneFilePath'}, location=local_location)\n\n # exportFilePath = scenefile.split('.')[0] + 'versionInfo.yml'\n # exportFile = self.core.getConfig(\"information\", \"export-path\", configPath=exportFilePath)\n\n # if exportFile is None:\n # QMessageBox.warning(self.core.messageParent, \"Warning\", 'No Exportfile has been created with this Version.')\n # else:\n # exportFileList = exportFile.split(', ')\n # exportFileList.pop()\n # exportNewFileList = []\n\n # for i in exportFileList:\n # exportNewFileList.append(os.path.normpath(pre + i.rpartition(project)[2]))\n\n # exportFile = ', '.join(exportNewFileList)\n # # exportFile = os.path.normpath(pre + exportFile.rpartition(project)[2])\n # # createdVersion.create_component(exportFile, {'name': 'Global ExportFilePath'}, location=local_location)\n\n if os.path.exists(imgPath):\n thumbnail_component = self.session.create_component(imgPath, dict(name='thumbnail'), location=server_location)\n createdVersion['thumbnail'] = thumbnail_component\n\n createdVersion['is_published'] = True\n self.session.commit()\n\n except Exception:\n QMessageBox.warning(self.core.messageParent, \"Debug\", 'Version already published',)\n for i in tmpFiles:\n os.remove(i)\n exc_type, exc_obj, exc_tb = sys.exc_info()\n erStr = \"ERROR:\\n%s\" % traceback.format_exc()\n QMessageBox.warning(self.core.messageParent, \"Ftrack Publish\", erStr)\n return\n\n if self.chb_proxyVid.isChecked() and ffmpegIsInstalled:\n proxyPath = \"\"\n inputpath = self.core.fixPath(source[0])\n soundfilePath = os.path.normpath(str(Path(os.path.dirname(inputpath)).parents[2]) + os.path.sep + \"Incoming\" + os.path.sep + \"03_VR-Storyboard\")\n soundfilePath = self.core.convertPath(soundfilePath, 'global')\n try:\n with open(os.path.join(soundfilePath, 'AudioStart.txt')) as f:\n lines = int(f.readlines()[0])\n delay = frames_to_TC(self.startFrame - lines)\n except Exception:\n delay = frames_to_TC(self.startFrame - 1000)\n \n fullAudioFilePath = ''\n\n if os.path.exists(soundfilePath):\n for file in os.listdir(soundfilePath):\n if file.endswith(\".mp4\"):\n filename = file.split('.')[0] + '.mp4'\n fullAudioFilePath = os.path.join(soundfilePath, filename)\n\n mp4File = (\n os.path.join(\n os.path.dirname(inputpath) + \"(mp4)\",\n os.path.basename(inputpath),\n )[:-9] + \".mp4\"\n )\n\n pwidth = 0\n pheight = 0\n\n if os.path.exists(mp4File):\n proxyPath = mp4File\n else:\n isSequence = False\n\n if len(os.listdir(os.path.dirname(inputpath))) > 2:\n if not videoInput:\n isSequence = True\n else:\n pass\n\n if os.path.splitext(inputpath)[1] in [\n \".jpg\",\n \".jpeg\",\n \".JPG\",\n \".png\",\n \".tif\",\n \".tiff\",\n ]:\n size = QImage(inputpath).size()\n pwidth = size.width()\n pheight = size.height()\n elif os.path.splitext(inputpath)[1] in [\".exr\"]:\n oiio = self.core.media.getOIIO()\n\n if oiio:\n imgSpecs = oiio.ImageBuf(str(inputpath)).spec()\n pwidth = imgSpecs.full_width\n pheight = imgSpecs.full_height\n\n elif os.path.splitext(inputpath)[1] in [\".mp4\", \".mov\"]:\n try:\n import imageio\n except Exception:\n pass\n vidReader = imageio.get_reader(inputpath, \"ffmpeg\")\n\n pwidth = vidReader._meta[\"size\"][0]\n pheight = vidReader._meta[\"size\"][1]\n\n if int(pwidth) % 2 == 1 or int(pheight) % 2 == 1:\n QMessageBox.warning(\n self.core.messageParent,\n \"Media conversion\",\n \"Media with odd resolution can't be converted to mp4. No proxy video could be generated.\",\n )\n else:\n if isSequence or videoInput:\n if isSequence:\n inputpath = os.path.splitext(inputpath)[0][:-(self.core.framePadding)] + \"%04d\".replace(\"4\", str(self.core.framePadding)) + os.path.splitext(inputpath)[1]\n outputpath = os.path.splitext(inputpath)[0][:-(self.core.framePadding + 1)] + \".mp4\"\n\n if platform.system() == \"Windows\":\n overlay = \"\"\"[in]\n drawbox=y=ih-24:color=black@0.4:width=iw:height=24:t=fill,\n drawbox=y=0:color=black@0.4:width=iw:height=24:t=fill,\n drawtext='fontfile=c\\:/Windows/Fonts/l_10646.ttf:text=Modul\\: xModul Task\\: xTaskName':start_number=1:x=(w-tw)/2: y=(lh/2):fontcolor=white:fontsize=15:,\n drawtext='fontfile=c\\:/Windows/Fonts/l_10646.ttf:text=MayaFrame\\: %{frame_num}':start_number=xSnum:x=(w-tw-300)/2:y=h-(lh+lh/2-2):fontcolor=white:fontsize=15:,\n drawtext='fontfile=c\\:/Windows/Fonts/l_10646.ttf:text=VideoFrame\\: %{eif\\:n\\:d\\:4} / xFanz':start_number=1:x=(w-tw+300)/2:y=h-(lh+lh/2):fontcolor=white:fontsize=15: \n [OUT]\"\"\"\n elif platform.system() == \"Linux\":\n overlay = \"\"\"[in]\n drawbox=y=ih-24:color=black@0.4:width=iw:height=24:t=fill,\n drawbox=y=0:color=black@0.4:width=iw:height=24:t=fill,\n drawtext='font=adobe-source-code-pro:text=Modul\\: xModul Task\\: xTaskName':start_number=1:x=(w-tw)/2: y=(lh/2):fontcolor=white:fontsize=15:,\n drawtext='font=adobe-source-code-pro:text=MayaFrame\\: %{frame_num}':start_number=xSnum:x=(w-tw-300)/2:y=h-(lh+lh/2-2):fontcolor=white:fontsize=15:,\n drawtext='font=adobe-source-code-pro:text=VideoFrame\\: %{eif\\:n\\:d\\:4} / xFanz':start_number=1:x=(w-tw+300)/2:y=h-(lh+lh/2):fontcolor=white:fontsize=15: \n [OUT]\"\"\"\n elif platform.system() == \"Darwin\":\n overlay = \"\"\"[in]\n drawbox=y=ih-24:color=black@0.4:width=iw:height=24:t=fill,\n drawbox=y=0:color=black@0.4:width=iw:height=24:t=fill,\n drawtext='font=adobe-source-code-pro:text=Modul\\: xModul Task\\: xTaskName':start_number=1:x=(w-tw)/2: y=(lh/2):fontcolor=white:fontsize=15:,\n drawtext='font=adobe-source-code-pro:text=MayaFrame\\: %{frame_num}':start_number=xSnum:x=(w-tw-300)/2:y=h-(lh+lh/2-2):fontcolor=white:fontsize=15:,\n drawtext='font=adobe-source-code-pro:text=VideoFrame\\: %{eif\\:n\\:d\\:4} / xFanz':start_number=1:x=(w-tw+300)/2:y=h-(lh+lh/2):fontcolor=white:fontsize=15: \n [OUT]\"\"\"\n\n overlay = overlay.replace('xModul', self.cb_shot.currentText())\n overlay = overlay.replace('xTaskName', curTask['name'])\n overlay = overlay.replace('xSnum', str(self.startFrame))\n overlay = overlay.replace('xFanz', str(self.endFrame - self.startFrame).zfill(4))\n\n fnameData = self.core.getScenefileData(scenefile)\n step = fnameData[\"step\"]\n\n if step == 'srf':\n fps = str(12)\n else:\n fps = str(curShot['custom_attributes']['fps'])\n\n # QMessageBox.information(self.core.messageParent, 'Debug', str(delay))\n # QMessageBox.information(self.core.messageParent, 'Debug', fullAudioFilePath)\n # QMessageBox.information(self.core.messageParent, 'startFrame', str(self.startFrame))\n # QMessageBox.information(self.core.messageParent, 'fps', fps)\n # QMessageBox.information(self.core.messageParent, 'inputpath', inputpath)\n # # QMessageBox.information(self.core.messageParent, 'Debug', overlay)\n # QMessageBox.information(self.core.messageParent, 'outputpath', outputpath)\n\n if step == 'anm':\n nProc = subprocess.Popen(\n [\n ffmpegPath,\n \"-ss\",\n str(delay),\n \"-i\",\n fullAudioFilePath,\n \"-start_number\",\n str(self.startFrame),\n \"-framerate\",\n fps,\n \"-apply_trc\",\n \"iec61966_2_1\",\n \"-i\",\n inputpath,\n \"-map\",\n \"0:a\",\n \"-map\",\n \"1:v\",\n \"-vf\",\n overlay,\n \"-pix_fmt\",\n \"yuv420p\",\n \"-start_number\",\n str(self.startFrame),\n \"-shortest\",\n outputpath,\n \"-y\",\n ]\n )\n else:\n nProc = subprocess.Popen(\n [\n ffmpegPath,\n \"-start_number\",\n str(self.startFrame),\n \"-framerate\",\n fps,\n \"-apply_trc\",\n \"iec61966_2_1\",\n \"-i\",\n inputpath,\n \"-pix_fmt\",\n \"yuv420p\",\n \"-start_number\",\n str(self.startFrame),\n outputpath,\n \"-y\",\n ]\n )\n\n else:\n outputpath = os.path.splitext(inputpath)[0][:-(self.core.framePadding + 1)] + \"(proxy).mp4\"\n\n nProc = subprocess.Popen(\n [\n ffmpegPath,\n \"-apply_trc\",\n \"iec61966_2_1\",\n \"-i\",\n inputpath,\n \"-pix_fmt\",\n \"yuv420p\",\n \"-start_number\",\n str(self.startFrame),\n outputpath,\n \"-y\",\n ]\n )\n mp4Result = nProc.communicate()\n proxyPath = outputpath\n tmpFiles.append(proxyPath)\n\n else:\n try:\n import json\n component = createdVersion.create_component(\n path=inputpath,\n data={\n 'name': 'ftrackreview-image'\n },\n location=server_location\n )\n\n # Meta data needs to contain *format*.\n component['metadata']['ftr_meta'] = json.dumps({\n 'format': 'image',\n })\n\n component.session.commit()\n\n except Exception as e:\n QMessageBox.warning(\n self.core.messageParent,\n \"Warning\",\n \"Uploading image failed:\\n\\n%s\" % str(e),\n )\n\n if (proxyPath != \"\" and os.path.exists(proxyPath) and os.stat(proxyPath).st_size != 0):\n try:\n # Retrieve or create version.\n import json\n component = createdVersion.create_component(\n path=proxyPath,\n data={\n 'name': 'ftrackreview-mp4'\n },\n location=server_location\n )\n\n component['metadata']['ftr_meta'] = json.dumps({\n 'frameIn': self.startFrame,\n 'frameOut': self.endFrame,\n 'frameRate': curShot['custom_attributes']['fps'],\n 'height': pheight,\n 'width': pwidth\n })\n component.session.commit()\n\n except Exception as e:\n QMessageBox.warning(\n self.core.messageParent,\n \"Warning\",\n \"Uploading proxy failed:\\n\\n%s\" % str(e),\n )\n\n pubVersions.append(versionName)\n\n for i in tmpFiles:\n os.remove(i)\n\n ftrackSite = self.core.getConfig(\"ftrack\", \"site\", configPath=self.core.prismIni)\n ftrackPrj = self.session.query('Project where name is \"{0}\"'.format(self.ftrackProjectName)).first()\n ftrackPrjId = ftrackPrj['id']\n user_security_roles = self.session.query('UserSecurityRole where user.username is \"{0}\"'.format(self.session.api_user)).all()\n\n for i in user_security_roles:\n userRole = i['security_role']['type']\n\n if userRole == 'PROJECT':\n ftrackSite += \"/#slideEntityId=\" + str(createdVersion[\"id\"]) + \"&slideEntityType=assetversion&view=tasks&itemId=projects&entityId=\" + str(ftrackPrjId) + \"&entityType=show\"\n elif userRole == 'ASSIGNED':\n ftrackSite += '/#slideEntityId=' + str(createdVersion[\"id\"]) + '&slideEntityType=assetversion&itemId=home'\n\n versionInfoPath = os.path.join(os.path.dirname(source[0]), \"versioninfo.yml\")\n if not os.path.exists(versionInfoPath):\n versionInfoPath = os.path.join(os.path.dirname(os.path.dirname(source[0])), \"versioninfo.yml\")\n\n self.core.setConfig(\"information\", \"ftrack-url\", ftrackSite, configPath=versionInfoPath)\n\n msgStr = \"Successfully published:\"\n for i in pubVersions:\n msgStr += \"\\n%s\" % i\n\n msg = QMessageBox(QMessageBox.Information, \"Ftrack Publish\", msgStr, parent=self.core.messageParent,)\n msg.addButton(\"Open version in Ftrack\", QMessageBox.YesRole)\n msg.addButton(\"Close\", QMessageBox.YesRole)\n msg.setFocus()\n action = msg.exec_()\n\n if action == 0:\n import webbrowser\n\n webbrowser.open(ftrackSite)\n\n self.accept()\n", "repo_name": "Cine-Chromatix/Prism", "sub_path": "Prism/Plugins/ProjectManagers/Ftrack/Scripts/FtrackPublish.py", "file_name": "FtrackPublish.py", "file_ext": "py", "file_size_in_byte": 32104, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "14", "api": [{"api_name": "sys.path.append", "line_number": 22, "usage_type": "call"}, {"api_name": "sys.path", "line_number": 22, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 22, "usage_type": "call"}, {"api_name": "os.path", "line_number": 22, "usage_type": "attribute"}, {"api_name": "os.path.dirname", "line_number": 22, "usage_type": "call"}, {"api_name": "imp.query_module", "line_number": 32, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 33, "usage_type": "call"}, {"api_name": "os.path", "line_number": 33, "usage_type": "attribute"}, {"api_name": "os.remove", "line_number": 34, "usage_type": "call"}, {"api_name": "sys.path.append", "line_number": 39, "usage_type": "call"}, {"api_name": "sys.path", "line_number": 39, "usage_type": "attribute"}, {"api_name": "FtrackPublish_ui_ps2.Ui_dlg_ftrackPublish", "line_number": 43, "usage_type": "attribute"}, {"api_name": "PrismUtils.Decorators.err_catcher_plugin", "line_number": 91, "usage_type": "call"}, {"api_name": "Prism_CXPlugin_Functions.Prism_CXPlugin_Functions.getFtrackEntityData", "line_number": 108, "usage_type": "call"}, {"api_name": "Prism_CXPlugin_Functions.Prism_CXPlugin_Functions", "line_number": 108, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 122, "usage_type": "call"}, {"api_name": "os.path", "line_number": 122, "usage_type": "attribute"}, {"api_name": "PrismUtils.Decorators.err_catcher_plugin", "line_number": 101, "usage_type": "call"}, {"api_name": "PrismUtils.Decorators.err_catcher_plugin", "line_number": 128, "usage_type": "call"}, {"api_name": "PrismUtils.Decorators.err_catcher_plugin", "line_number": 167, "usage_type": "call"}, {"api_name": "PrismUtils.Decorators.err_catcher_plugin", "line_number": 179, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 212, "usage_type": "call"}, {"api_name": "os.path", "line_number": 212, "usage_type": "attribute"}, {"api_name": "os.path.dirname", "line_number": 212, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 213, "usage_type": "call"}, {"api_name": "os.path", "line_number": 213, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 214, "usage_type": "call"}, {"api_name": "os.path", "line_number": 214, "usage_type": "attribute"}, {"api_name": "os.path.dirname", "line_number": 215, "usage_type": "call"}, {"api_name": "os.path", "line_number": 215, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 218, "usage_type": "call"}, {"api_name": "os.path", "line_number": 218, "usage_type": "attribute"}, {"api_name": "os.path.splitext", "line_number": 233, "usage_type": "call"}, {"api_name": "os.path", "line_number": 233, "usage_type": "attribute"}, {"api_name": "os.path.basename", "line_number": 233, "usage_type": "call"}, {"api_name": "os.path.splitext", "line_number": 235, "usage_type": "call"}, {"api_name": "os.path", "line_number": 235, "usage_type": "attribute"}, {"api_name": "platform.system", "line_number": 249, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 250, "usage_type": "call"}, {"api_name": "os.path", "line_number": 250, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 253, "usage_type": "call"}, {"api_name": "os.path", "line_number": 253, "usage_type": "attribute"}, {"api_name": "platform.system", "line_number": 255, "usage_type": "call"}, {"api_name": "subprocess.Popen", "line_number": 258, "usage_type": "call"}, {"api_name": "platform.system", "line_number": 262, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 263, "usage_type": "call"}, {"api_name": "os.path", "line_number": 263, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 264, "usage_type": "call"}, {"api_name": "os.path", "line_number": 264, "usage_type": "attribute"}, {"api_name": "os.path.splitext", "line_number": 271, "usage_type": "call"}, {"api_name": "os.path", "line_number": 271, "usage_type": "attribute"}, {"api_name": "subprocess.Popen", "line_number": 275, "usage_type": "call"}, {"api_name": "subprocess.Popen", "line_number": 293, "usage_type": "call"}, {"api_name": "sys.exc_info", "line_number": 335, "usage_type": "call"}, {"api_name": "traceback.format_exc", "line_number": 336, "usage_type": "call"}, {"api_name": "os.path.normpath", "line_number": 372, "usage_type": "call"}, {"api_name": "os.path", "line_number": 372, "usage_type": "attribute"}, {"api_name": "os.path.normpath", "line_number": 373, "usage_type": "call"}, {"api_name": "os.path", "line_number": 373, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 395, "usage_type": "call"}, {"api_name": "os.path", "line_number": 395, "usage_type": "attribute"}, {"api_name": "os.remove", "line_number": 405, "usage_type": "call"}, {"api_name": "sys.exc_info", "line_number": 406, "usage_type": "call"}, {"api_name": "traceback.format_exc", "line_number": 407, "usage_type": "call"}, {"api_name": "os.path.normpath", "line_number": 414, "usage_type": "call"}, {"api_name": "os.path", "line_number": 414, "usage_type": "attribute"}, {"api_name": "pathlib.Path", "line_number": 414, "usage_type": "call"}, {"api_name": "os.path.dirname", "line_number": 414, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 417, "usage_type": "call"}, {"api_name": "os.path", "line_number": 417, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 425, "usage_type": "call"}, {"api_name": "os.path", "line_number": 425, "usage_type": "attribute"}, {"api_name": "os.listdir", "line_number": 426, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 429, "usage_type": "call"}, {"api_name": "os.path", "line_number": 429, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 432, "usage_type": "call"}, {"api_name": "os.path", "line_number": 432, "usage_type": "attribute"}, {"api_name": "os.path.dirname", "line_number": 433, "usage_type": "call"}, {"api_name": "os.path", "line_number": 433, "usage_type": "attribute"}, {"api_name": "os.path.basename", "line_number": 434, "usage_type": "call"}, {"api_name": "os.path", "line_number": 434, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 441, "usage_type": "call"}, {"api_name": "os.path", "line_number": 441, "usage_type": "attribute"}, {"api_name": "os.listdir", "line_number": 446, "usage_type": "call"}, {"api_name": "os.path.dirname", "line_number": 446, "usage_type": "call"}, {"api_name": "os.path", "line_number": 446, "usage_type": "attribute"}, {"api_name": "os.path.splitext", "line_number": 452, "usage_type": "call"}, {"api_name": "os.path", "line_number": 452, "usage_type": "attribute"}, {"api_name": "os.path.splitext", "line_number": 463, "usage_type": "call"}, {"api_name": "os.path", "line_number": 463, "usage_type": "attribute"}, {"api_name": "os.path.splitext", "line_number": 471, "usage_type": "call"}, {"api_name": "os.path", "line_number": 471, "usage_type": "attribute"}, {"api_name": "imageio.get_reader", "line_number": 476, "usage_type": "call"}, {"api_name": "os.path.splitext", "line_number": 490, "usage_type": "call"}, {"api_name": "os.path", "line_number": 490, "usage_type": "attribute"}, {"api_name": "os.path.splitext", "line_number": 491, "usage_type": "call"}, {"api_name": "os.path", "line_number": 491, "usage_type": "attribute"}, {"api_name": "platform.system", "line_number": 493, "usage_type": "call"}, {"api_name": "platform.system", "line_number": 501, "usage_type": "call"}, {"api_name": "platform.system", "line_number": 509, "usage_type": "call"}, {"api_name": "subprocess.Popen", "line_number": 540, "usage_type": "call"}, {"api_name": "subprocess.Popen", "line_number": 571, "usage_type": "call"}, {"api_name": "os.path.splitext", "line_number": 592, "usage_type": "call"}, {"api_name": "os.path", "line_number": 592, "usage_type": "attribute"}, {"api_name": "subprocess.Popen", "line_number": 594, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 625, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 638, "usage_type": "call"}, {"api_name": "os.path", "line_number": 638, "usage_type": "attribute"}, {"api_name": "os.stat", "line_number": 638, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 650, "usage_type": "call"}, {"api_name": "os.remove", "line_number": 669, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 684, "usage_type": "call"}, {"api_name": "os.path", "line_number": 684, "usage_type": "attribute"}, {"api_name": "os.path.dirname", "line_number": 684, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 685, "usage_type": "call"}, {"api_name": "os.path", "line_number": 685, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 686, "usage_type": "call"}, {"api_name": "os.path", "line_number": 686, "usage_type": "attribute"}, {"api_name": "os.path.dirname", "line_number": 686, "usage_type": "call"}, {"api_name": "webbrowser.open", "line_number": 703, "usage_type": "call"}, {"api_name": "PrismUtils.Decorators.err_catcher_plugin", "line_number": 183, "usage_type": "call"}]} +{"seq_id": "14172429135", "text": "import copy\nimport random\nfrom collections import Counter\n\n'''\n\nTOMBS - Python Version\n\nInput example: [0,1,5] or 0,1,5\nPlaces a Blade at (0,1)\n\nActivating Thorn example: [0,1,2,1,2] or 0,1,2,1,2\nPlaces a Thorn at (0,1) while removing target at (1,2)\n\nDiscard input example: 3\nDiscards an Ombra from hand\n\nQuit input: [0]\n\nPlaying board (x,y):\n\n 00 | 10 | 20\n--------------\n 01 | 11 | 21\n--------------\n 02 | 12 | 22\n\nTile types:\n0 - Empty\n1 - Tombstone\n2 - Thorn (old name: priest)\n3 - Ombra (old name: rogue)\n4 - Magus (old name: mage)\n5 - Blade (old name: warrior)\n\nTile owner:\n0 - neutral (for empty tiles and tombstones)\n1-n: Player 1-n\n\nWin conditions:\n1. Field-lock\n2. Kill-count\n3. Unit-count\n\nTo do: GUI\n\n'''\n\n\nclass Tile:\n\n # ex: Tile(0, 1, 5, 1): Player 1's Blade at position (x,y) = (0,1)\n\n def __init__(self, x, y, type, owner):\n self.x = x\n self.y = y\n self.type = type\n self.owner = owner\n\n def __str__(self):\n return \"(\" + str(self.type) + \" \" + str(self.owner) + \")\"\n\n\nclass Error(Exception):\n pass\n\n\nclass Deck:\n\n # nb: number of cards -> evenly distributed between the 4 character types\n\n def __init__(self, nb):\n self.nb = nb\n self.cards = []\n for x in range(nb//4):\n self.cards += [2, 3, 4, 5]\n\n def __str__(self):\n return str(self.cards)\n\n def shuffle(self):\n random.shuffle(self.cards)\n\n # take1: returns first card of deck, which is then removed from it\n\n def take1(self):\n drawn = self.cards[0]\n self.cards = self.cards[1:]\n return drawn\n\n\nclass Player:\n\n # index: player's identification\n # d: deck\n\n def __init__(self, index, d):\n self.index = index\n self.hand = []\n self.draw(d)\n self.draw(d)\n\n def draw(self, d):\n self.hand += [d.take1()]\n\n # undraw: in case of error in input -> rollback\n\n def undraw(self, d):\n d.cards = [self.hand[-1]] + d.cards\n self.hand = self.hand[0:-1]\n\n # play1: removes a card of unit u from hand\n\n def play1(self, u):\n if u in self.hand:\n self.hand.remove(u)\n else:\n raise Error(\"Card unavailable.\")\n\n\nclass Game:\n\n def __init__(self, numP):\n\n self.numP = numP # number of players\n self.currentP = 1 # current player\n self.board = [[Tile(i, j, 0, 0) for j in range(3)] for i in range(3)]\n # initialize an empty board\n\n '''\n\n For testing purposes\n\n self.board = [[Tile(0, 0, 3, 1), Tile(0, 1, 3, 2), Tile(0, 2, 3, 3)],\n [Tile(1, 0, 3, 4), Tile(1, 1, 3, 1), Tile(1, 2, 3, 2)],\n [Tile(2, 0, 0, 0), Tile(2, 1, 0, 0), Tile(2, 2, 0, 0)]]\n\n '''\n \n self.turnCount = 1\n self.stuckCount = 0 # to check for Field-lock win condition\n\n try:\n if numP == 2:\n\n self.score = [0, 0]\n self.safeTurns = 3 # no violence during first 3 turns\n self.killWin = 8 # need 8 kills to win\n self.deck = Deck(32) # deck of 32 cards\n self.lastDraw = self.deck.nb - 2*numP # turn on which the last card is drawn\n self.deck.shuffle()\n self.players = [Player(1, self.deck), Player(2, self.deck)] # initialize 2 players\n\n elif numP == 3:\n\n self.score = [0, 0, 0]\n self.safeTurns = 2\n self.killWin = 7\n self.deck = Deck(36)\n self.lastDraw = self.deck.nb - 2*numP\n self.deck.shuffle()\n self.players = [Player(1, self.deck), Player(2, self.deck), Player(3, self.deck)]\n\n elif numP == 4:\n\n self.score = [0, 0, 0, 0]\n self.safeTurns = 3\n self.killWin = 6\n self.deck = Deck(40)\n self.lastDraw = self.deck.nb - 2*numP\n self.deck.shuffle()\n self.players = [Player(1, self.deck), Player(2, self.deck),\n Player(3, self.deck), Player(4, self.deck)]\n\n else:\n\n raise Error(\"Game is unsupported for \" + str(numP) + \" players.\")\n\n except Error as msg:\n print(msg)\n raise SystemExit\n\n # printBoard: prints the board state and other info\n\n def printBoard(self, b):\n\n for j in range(3):\n print(\"\\n\")\n for i in range(3):\n print(b[int(i)][int(j)], end=\" \")\n print(\"\\n-----------------\")\n\n print(\"Score:\", self.score, \"\\n\")\n\n '''\n\n # for testing purposes\n\n for x in self.players:\n print(x.hand)\n\n print(self.deck.cards, len(self.deck.cards))\n\n '''\n\n # btile: board x-coord y-coord -> list of tiles\n # returns tiles in the threat zones of a Blade at (x,y)\n\n @staticmethod\n def btile(b, x, y):\n lst = []\n if y < 2:\n lst = [b[x][y+1]] + lst # down\n if x < 2:\n lst = [b[x+1][y]] + lst # right\n if x > 0:\n lst = [b[x-1][y]] + lst # left\n if y > 0:\n lst = [b[x][y-1]] + lst # up\n return lst\n\n # mtile1: board x-coord y-coord -> list of tiles\n # returns adjacent diagonal tiles to a character at (x,y)\n\n @staticmethod\n def mtile1(b, x, y):\n lst = []\n if x < 2 and y < 2:\n lst = [b[x+1][y+1]] + lst # dr\n if x > 0 and y < 2:\n lst = [b[x-1][y+1]] + lst # dl\n if x < 2 and y > 0:\n lst = [b[x+1][y-1]] + lst # tr\n if x > 0 and y > 0:\n lst = [b[x-1][y-1]] + lst # tl\n return lst\n\n # mtile: board x-coord y-coord -> list of tiles\n # returns tiles in the threat zones of a Magus at (x,y)\n\n @staticmethod\n def mtile(b, x, y):\n lst = Game.mtile1(b, x, y)\n if x == 0 and y == 0:\n lst += [b[2][2]]\n elif x == 2 and y == 0:\n lst += [b[0][2]]\n elif x == 0 and y == 2:\n lst = [b[2][0]] + lst\n elif x == 2 and y == 2:\n lst = [b[0][0]] + lst\n return lst\n\n # ttile: board x-coord y-coord -> list of tiles\n # returns tiles in the threat zones of a Thorn at (x,y)\n\n @staticmethod\n def ttile(b, x, y):\n return Game.btile(b, x, y) + Game.mtile1(b, x, y)\n\n # enemy: tile list-of-tiles unit -> boolean\n # returns whether there is a specific enemy unit to the given tile in the list of tiles\n\n @staticmethod\n def enemy(t, lst, u):\n truth = False\n for x in lst:\n if x.type == u and x.owner != t.owner:\n truth = True\n return truth\n\n # target: tile list-of-tiles -> boolean\n # returns whether there is an enemy to the given tile in the list of tiles\n\n @staticmethod\n def target(t, lst):\n truth = False\n for x in lst:\n if not (x.owner == t.owner or x.owner == 0):\n truth = True\n return truth\n\n # singletarget: tile1 tile2 -> boolean\n # returns whether tile2 is an enemy to tile1\n\n @staticmethod\n def singletarget(t1, t2):\n if not (t2.owner == t1.owner or t2.owner == 0):\n truth = True\n else:\n truth = False\n return truth\n\n '''\n\n Checking process:\n\n 1. check1\n 2. place (maybe thornkill)\n 3. check2\n 4. movef (maybe killupdate)\n\n '''\n\n # check1: board tile -> boolean\n # checks if the tile can be placed* on the board\n\n def check1(self, b, t):\n\n if self.turnCount == 1 and t.x == 1 and t.y == 1:\n truth = False # turn 1: cannot place in middle tile\n\n elif self.turnCount <= self.safeTurns \\\n and ((t.type == 5 and Game.target(t, Game.btile(b, t.x, t.y)))\n or (t.type == 4 and Game.target(t, Game.mtile(b, t.x, t.y)))):\n truth = False # no violence turns\n\n elif t.type == 2 and b[t.x][t.y].type == 1 and Game.target(t, Game.ttile(b, t.x, t.y)) \\\n and self.turnCount > self.safeTurns and not (Game.enemy(t, Game.ttile(b, t.x, t.y), 2)):\n truth = True # activated Thorn on tombstone with target to kill and no adjacent enemy Thorn\n\n elif t.type == 3 and Game.target(t, [b[t.x][t.y]]) and b[t.x][t.y].type != 3 \\\n and self.turnCount > self.safeTurns:\n truth = True # Ombra assassinating target\n\n elif b[t.x][t.y].type == 0:\n truth = True # normal placement on empty space\n\n else:\n truth = False\n\n return truth\n\n # thornkill: board tile x-coord y-coord -> board\n # updates the board by removing Thorn's target at (i, j)\n\n @staticmethod\n def thornkill(b, t, i, j):\n\n new = []\n\n for e in Game.ttile(b, t.x, t.y):\n if (e.x == i and e.y == j) and (not (e.owner == t.owner or e.owner == 0)):\n new = copy.deepcopy(b)\n new[i][j] = Tile(i, j, 0, 0)\n new[t.x][t.y] = t\n\n if not new:\n raise Error(\"Invalid move.\")\n\n return new\n\n # check2: board tile -> boolean\n # checks if the tile would be threatened by something after it is placed on the board\n\n @staticmethod\n def check2(b, t):\n if Game.enemy(t, Game.btile(b, t.x, t.y), 5):\n truth = False # threatened by enemy Blade\n elif Game.enemy(t, Game.mtile(b, t.x, t.y), 4):\n truth = False # threatened by enemy Magus\n else:\n truth = True\n return truth\n\n # maketomb: tile list-of-tiles -> list-of-tiles\n # checks for the tile's kills in the input list and returns a list of tombstone tiles\n\n @staticmethod\n def maketomb(t, lst):\n lst1 = []\n for e in lst:\n if Game.singletarget(t, e):\n lst1 = [Tile(e.x, e.y, 1, 0)] + lst1\n return lst1\n\n # update: board list-of-tiles -> board\n # returns board updated with tiles from the list\n\n @staticmethod\n def update(b, lst):\n new = copy.deepcopy(b)\n for e in lst:\n new[e.x][e.y] = e\n return new\n\n # killupdate: board tile -> board\n # returns board after checking for tombstones created\n\n @staticmethod\n def killupdate(b, t):\n if t.type == 5:\n new = Game.update(b, Game.maketomb(t, Game.btile(b, t.x, t.y)))\n elif t.type == 4:\n new = Game.update(b, Game.maketomb(t, Game.mtile(b, t.x, t.y)))\n else:\n new = copy.deepcopy(b)\n return new\n\n # place: board tile x-coord y-coord -> board\n # returns board with the tile placed on it, and maybe call thornkill\n # (i,j) only matters when activating a Thorn\n # otherwise: (1,1) by default\n\n def place(self, b, t, i, j):\n if (t.type == 2 and b[t.x][t.y].type == 1) and self.check1(b, t):\n new = Game.thornkill(b, t, i, j)\n elif self.check1(b, t):\n new = copy.deepcopy(b)\n new[t.x][t.y] = t\n else:\n raise Error(\"Cannot place here.\")\n return new\n\n # movef: board tile x-coord y-coord -> board\n # purely functional one-turn action\n\n def movef(self, b, t, i, j):\n if Game.check2(self.place(b, t, i, j), t):\n new = Game.killupdate(self.place(b, t, i, j), t)\n else:\n raise Error(\"Zone is threatened.\")\n return new\n\n # nodupe: list -> list\n # removes duplicates from the list\n\n @staticmethod\n def nodupe(lst):\n seen = set()\n seen_add = seen.add\n return [x for x in lst if not (x in seen or seen_add(x))]\n\n # choices: board player -> list-of-moves\n # returns all legal moves the player can make\n\n def choices(self, b, p):\n\n sol = []\n for u in Game.nodupe(p.hand):\n for x in range(3):\n for y in range(3):\n\n if u == 2 and b[int(x)][int(y)].type == 1:\n\n for i in range(3):\n for j in range(3):\n\n try:\n self.movef(b, Tile(x, y, u, p.index), i, j)\n except:\n pass\n else: # moves involving activating Thorn\n sol += [[x, y, u, i, j]]\n\n else:\n\n try:\n self.movef(b, Tile(x, y, u, p.index), 1, 1)\n except:\n pass\n else: # normal moves\n sol += [[x, y, u]]\n\n return sol\n\n # move: tile x-coord y-coord\n # non-functional version of movef, changes the board\n # example: move(Tile(0, 1, 2, 4), 1, 1)\n\n def move(self, t, i, j):\n\n temp = copy.deepcopy(self.board)\n\n self.board = self.movef(self.board, t, i, j)\n\n # score incrementation\n\n if t.type == 5:\n self.score[t.owner-1] += len(Game.maketomb(t, Game.btile(temp, t.x, t.y)))\n elif t.type == 4:\n self.score[t.owner-1] += len(Game.maketomb(t, Game.mtile(temp, t.x, t.y)))\n\n # hire: x-coord y-coord owner type\n # shortcut for move: normal character at (x,y)\n\n def hire(self, x, y, o, t):\n self.move(Tile(x, y, t, o), 1, 1)\n\n # drain: x-coord y-coord owner x-coord y-coord\n # shortcut for move: activating Thorn at (x,y) with target at (i,j)\n\n def drain(self, x, y, o, i, j):\n self.move(Tile(x, y, 2, o), i, j)\n\n # countUnits: board -> list-of-owners\n # returns list of owners who have the most units on the board\n\n @staticmethod\n def countUnits(b):\n\n lst = []\n for i in range(3):\n for j in range(3):\n lst += [b[i][j].owner]\n\n lst = list(filter((0).__ne__, lst))\n count = Counter(lst)\n freq = count.values()\n total = list(freq).count(max(freq))\n\n return [elem[0] for elem in count.most_common(total)]\n\n # compare: list-of-owners -> owner or list-of-owners\n # takes list from countUnits and compares the owners' scores if their unit count ties\n # returns winner(s)\n\n def compare(self, lst):\n\n if len(lst) == 1:\n winner = lst[0]\n\n else:\n kills = []\n for i in lst:\n for x in range(self.score[i-1]):\n kills += [i]\n\n count = Counter(kills)\n freq = count.values()\n total = list(freq).count(max(freq))\n top = [elem[0] for elem in count.most_common(total)]\n\n if len(top) == 1:\n winner = top[0]\n else:\n winner = top\n\n return winner\n\n # main code for one turn's actions\n\n def turn(self):\n\n # (for testing purposes)\n # print(\"Possible moves: \", self.choices(self.board, self.players[self.currentP-1]))\n\n self.printBoard(self.board)\n\n if not self.choices(self.board, self.players[self.currentP - 1]):\n\n discard = eval(input(\"No possible moves for Player \" + str(self.currentP) +\n \", please discard a card.\\nYour hand is: \" +\n str(self.players[self.currentP-1].hand) + \"\\n\"))\n\n self.players[self.currentP-1].play1(discard)\n self.stuckCount += 1\n\n else:\n\n try:\n command = list(eval(input(\"Show me your move, Player \" +\n str(self.currentP) + \"!\\nYour hand is: \" +\n str(self.players[self.currentP-1].hand) + \"\\n\")))\n except:\n raise Error(\"Unknown command.\")\n\n if len(command) == 1 and command[0] == 0:\n raise SystemExit\n\n elif len(command) == 3:\n self.hire(command[0], command[1], self.currentP, command[2])\n self.players[self.currentP-1].play1(command[2])\n\n elif len(command) == 5 and command[2] == 2:\n self.drain(command[0], command[1], self.currentP, command[3], command[4])\n self.players[self.currentP-1].play1(command[2])\n\n else:\n raise Error(\"Unknown command.\")\n\n self.stuckCount = 0\n\n # main code for the game's execution\n # includes the 3 win conditions\n # recursion of turns until a win condition is met (or an error occurs)\n\n def play(self):\n\n try:\n if self.turnCount <= self.lastDraw:\n self.players[self.currentP-1].draw(self.deck)\n\n self.turn()\n\n # win by kills\n\n if self.score[self.currentP-1] >= self.killWin:\n print(\"Player \" + str(self.currentP) + \" wins!\")\n raise SystemExit\n\n # win by units\n\n if self.turnCount == self.deck.nb:\n win = self.compare(Game.countUnits(self.board))\n\n if isinstance(win, list):\n print(\"It's a tie between Players \" + str(win) + \"!\")\n else:\n print(\"Player \" + str(win) + \" wins!\")\n raise SystemExit\n\n self.currentP += 1\n self.turnCount += 1\n\n if self.currentP > self.numP:\n self.currentP = 1\n\n except Error as msg:\n if self.turnCount <= self.lastDraw:\n self.players[self.currentP-1].undraw(self.deck)\n print(msg)\n\n # win by field lock\n\n if self.stuckCount == self.numP - 1:\n print(\"Player \" + str(self.currentP) + \" wins!\")\n raise SystemExit\n\n self.play()\n\nstart = eval(input(\"Number of players:\\n\"))\nGame(start).play()\n", "repo_name": "Waznop/Tombs", "sub_path": "tombs.py", "file_name": "tombs.py", "file_ext": "py", "file_size_in_byte": 17761, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "12", "api": [{"api_name": "random.shuffle", "line_number": 82, "usage_type": "call"}, {"api_name": "copy.deepcopy", "line_number": 344, "usage_type": "call"}, {"api_name": "copy.deepcopy", "line_number": 382, "usage_type": "call"}, {"api_name": "copy.deepcopy", "line_number": 397, "usage_type": "call"}, {"api_name": "copy.deepcopy", "line_number": 409, "usage_type": "call"}, {"api_name": "copy.deepcopy", "line_number": 473, "usage_type": "call"}, {"api_name": "collections.Counter", "line_number": 508, "usage_type": "call"}, {"api_name": "collections.Counter", "line_number": 529, "usage_type": "call"}]} +{"seq_id": "27962768", "text": "import time\r\nimport cv2\r\nimport numpy\r\n\r\nvideo = cv2.VideoCapture('cp2077.mp4')\r\nflash = False\r\nspace_begin = 0\r\nspace_end = 0\r\nstart = None\r\nend = None\r\nresult = ''\r\n\r\nwhile video.isOpened():\r\n ret, frame = video.read()\r\n if ret:\r\n\r\n roi = frame[310:350, 510:620]\r\n avg_color_per_row = numpy.average(roi, axis=0)\r\n avg_color = numpy.average(avg_color_per_row, axis=0)\r\n num = int(avg_color[0]) - (int(avg_color[0]) % 10)\r\n\r\n if num >= 100:\r\n\r\n if flash == 0: \r\n \r\n start = time.time()\r\n flash = True\r\n space_end = time.time()\r\n diff = space_end - space_begin\r\n if round(diff, 2) >= 0.14: result += ' '\r\n\r\n if flash and num < 100:\r\n\r\n end = time.time()\r\n flash_length = end - start\r\n if round(flash_length, 1) == 0.0: result += '.' \r\n elif round(flash_length, 1) == 0.1: result += '-'\r\n \r\n flash = False\r\n start = None\r\n space_begin = time.time()\r\n\r\n else: break\r\n\r\n cv2.waitKey(1)\r\n\r\nprint(result.strip())", "repo_name": "obrgit/cp2077morze", "sub_path": "morze.py", "file_name": "morze.py", "file_ext": "py", "file_size_in_byte": 1148, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "12", "api": [{"api_name": "cv2.VideoCapture", "line_number": 5, "usage_type": "call"}, {"api_name": "numpy.average", "line_number": 18, "usage_type": "call"}, {"api_name": "numpy.average", "line_number": 19, "usage_type": "call"}, {"api_name": "time.time", "line_number": 26, "usage_type": "call"}, {"api_name": "time.time", "line_number": 28, "usage_type": "call"}, {"api_name": "time.time", "line_number": 34, "usage_type": "call"}, {"api_name": "time.time", "line_number": 41, "usage_type": "call"}, {"api_name": "cv2.waitKey", "line_number": 45, "usage_type": "call"}]} +{"seq_id": "25275584365", "text": "from django.urls import path\n\nfrom .views import index, register, user_login, user_logout, ShowPromo, ShowContacts\n\n\nurlpatterns = [\n path('', index, name='home'),\n path('promo/', ShowPromo.as_view(), name='promo'),\n path('contacts/', ShowContacts.as_view(), name='contacts'),\n path('main_app/register/', register, name='register'),\n path('main_app/login/', user_login, name='login'),\n path('main_app/logout/', user_logout, name='logout'),\n]\n", "repo_name": "Ensin1031/it_company_card", "sub_path": "it_company_card/apps/main_app/urls.py", "file_name": "urls.py", "file_ext": "py", "file_size_in_byte": 470, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "12", "api": [{"api_name": "django.urls.path", "line_number": 7, "usage_type": "call"}, {"api_name": "views.index", "line_number": 7, "usage_type": "argument"}, {"api_name": "django.urls.path", "line_number": 8, "usage_type": "call"}, {"api_name": "views.ShowPromo.as_view", "line_number": 8, "usage_type": "call"}, {"api_name": "views.ShowPromo", "line_number": 8, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 9, "usage_type": "call"}, {"api_name": "views.ShowContacts.as_view", "line_number": 9, "usage_type": "call"}, {"api_name": "views.ShowContacts", "line_number": 9, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 10, "usage_type": "call"}, {"api_name": "views.register", "line_number": 10, "usage_type": "argument"}, {"api_name": "django.urls.path", "line_number": 11, "usage_type": "call"}, {"api_name": "views.user_login", "line_number": 11, "usage_type": "argument"}, {"api_name": "django.urls.path", "line_number": 12, "usage_type": "call"}, {"api_name": "views.user_logout", "line_number": 12, "usage_type": "argument"}]} +{"seq_id": "70298282262", "text": "import shlex\nfrom unidecode import unidecode\nfrom util.loghandler import *\nfrom bottypes.invalid_command import *\n\n\nclass HandlerFactory():\n \"\"\"\n Every handler should initialize the `commands` dictionary with the commands\n he can handle and the corresponding command class\n\n The handler factory will then check, if the handler can process a command, resolve it and execute it\n \"\"\"\n handlers = {}\n\n def register(handler_name, handler):\n log.info(\"Registering new handler: %s (%s)\" %\n (handler_name, handler.__class__.__name__))\n\n HandlerFactory.handlers[handler_name] = handler\n handler.handler_name = handler_name\n\n def initialize(slack_client, bot_id):\n \"\"\"\n Initializes all handler with common information.\n\n Might remove bot_id from here later on?\n \"\"\"\n for handler in HandlerFactory.handlers:\n HandlerFactory.handlers[handler].init(slack_client, bot_id)\n\n def process(slack_client, botserver, msg, channel, user):\n log.debug(\"Processing message: %s from %s (%s)\" % (msg, channel, user))\n\n try:\n command_line = unidecode(msg.lower())\n args = shlex.split(command_line)\n except:\n message = \"Command failed : Malformed input.\"\n slack_client.api_call(\"chat.postMessage\",\n channel=channel, text=message, as_user=True)\n return\n\n try:\n handler_name = args[0]\n\n processed = False\n\n usage_msg = \"\"\n\n # Call a specific handler with this command\n handler = HandlerFactory.handlers.get(handler_name)\n if handler:\n if (len(args) < 2) or (args[1] == \"help\"):\n # Generic help handling\n usage_msg += handler.usage\n processed = True\n else:\n command = args[1]\n\n if handler.can_handle(command):\n handler.process(slack_client, command,\n args[2:], channel, user)\n processed = True\n else:\n # Pass the command to every available handler\n command = args[0]\n\n for handler_name in HandlerFactory.handlers:\n handler = HandlerFactory.handlers[handler_name]\n\n if command == \"help\":\n usage_msg += handler.usage\n processed = True\n elif handler.can_handle(command):\n handler.process(slack_client, command,\n args[1:], channel, user)\n processed = True\n\n if not processed:\n msg = \"Unknown handler or command : `%s`\" % msg\n slack_client.api_call(\"chat.postMessage\",\n channel=channel, text=msg, as_user=True)\n\n if usage_msg:\n slack_client.api_call(\"chat.postMessage\",\n channel=user if botserver.get_config_option(\"send_help_as_dm\")==\"1\" else channel, text=usage_msg, as_user=True)\n\n except InvalidCommand as e:\n slack_client.api_call(\n \"chat.postMessage\", channel=channel, text=e.message, as_user=True)\n except Exception as ex:\n log.exception(\"An error has occured while processing a command\")\n", "repo_name": "Kileak/Kileak-Slack-Base-Bot", "sub_path": "handlers/handler_factory.py", "file_name": "handler_factory.py", "file_ext": "py", "file_size_in_byte": 3498, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "12", "api": [{"api_name": "unidecode.unidecode", "line_number": 36, "usage_type": "call"}, {"api_name": "shlex.split", "line_number": 37, "usage_type": "call"}]} +{"seq_id": "10004083142", "text": "import os\nimport torch\nimport logging\nfrom sagemaker_huggingface_inference_toolkit import content_types, decoder_encoder\n\nimport torch.nn.functional as F\n\nlogger = logging.getLogger()\nlogger.setLevel(logging.INFO)\nlogger.addHandler(logging.StreamHandler())\n\n\ndef output_fn(prediction, accept):\n \"\"\"Format prediction output\n\n The default accept/content-type between containers for serial inference is JSON.\n We also want to set the ContentType or mimetype as the same value as accept so the next\n container can read the response payload correctly.\n \"\"\"\n if_knn_search = os.getenv(\"HF_KNN\", \"false\") == \"true\"\n if if_knn_search:\n from utils import get_es_client\n\n es_client = get_es_client(\n host=os.getenv(\"ES_HOST\", None),\n region=os.getenv(\"ES_REGION\", None),\n )\n k = int(os.getenv(\"ES_K\", \"20\"))\n for instance in prediction:\n body = {\n \"size\": k,\n \"_source\": {\n \"exclude\": [\"embeddings\"],\n },\n \"query\": {\n \"knn\": {\n \"embeddings\": {\n \"vector\": instance[\"embedding\"],\n \"k\": k,\n }\n }\n },\n }\n res = es_client.search(index=os.getenv(\"ES_INDEX_NAME\", None), body=body)\n similar_items = res[\"hits\"][\"hits\"]\n instance[\"similar_items\"] = similar_items\n else:\n pass\n return decoder_encoder.encode(prediction, accept)\n\n\ndef predict_fn(data, hf_pipeline):\n # destruct model and tokenizer\n\n # pop inputs for pipeline\n inputs = data.pop(\"inputs\", data)\n parameters = data.pop(\"parameters\", {})\n\n if_extract_emb = os.getenv(\"HF_EMB\", \"false\") == \"true\"\n\n activation = {\"embeddings\": []}\n if if_extract_emb:\n\n def get_activation(name, embeddings):\n def hook(model, input, output):\n activation[name].append(output)\n\n return hook\n\n hf_pipeline.model.pre_classifier.register_forward_hook(\n get_activation(\"embeddings\", activation)\n )\n\n prediction = hf_pipeline(inputs, **parameters)\n\n if if_extract_emb:\n sentence_embeddings = torch.cat(activation[\"embeddings\"])\n # logger.info(f\"sentence_embeddings shape: {sentence_embeddings.shape}\")\n # Normalize embeddings\n sentence_embeddings = F.normalize(sentence_embeddings, p=2, dim=1)\n\n # logger.info(f\"af sentence_embeddings shape: {sentence_embeddings.shape}\")\n sentence_embeddings = sentence_embeddings.numpy().tolist()\n\n # logger.info(f\"sentence_embeddings: {len(sentence_embeddings)}\")\n return [\n {**pred, \"embedding\": embed}\n for pred, embed in zip(prediction, sentence_embeddings)\n ]\n else:\n return prediction\n", "repo_name": "xiaochuan-du/similar-items", "sub_path": "sim_items_src/inference.py", "file_name": "inference.py", "file_ext": "py", "file_size_in_byte": 2913, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "12", "api": [{"api_name": "logging.getLogger", "line_number": 8, "usage_type": "call"}, {"api_name": "logging.INFO", "line_number": 9, "usage_type": "attribute"}, {"api_name": "logging.StreamHandler", "line_number": 10, "usage_type": "call"}, {"api_name": "os.getenv", "line_number": 20, "usage_type": "call"}, {"api_name": "utils.get_es_client", "line_number": 24, "usage_type": "call"}, {"api_name": "os.getenv", "line_number": 25, "usage_type": "call"}, {"api_name": "os.getenv", "line_number": 26, "usage_type": "call"}, {"api_name": "os.getenv", "line_number": 28, "usage_type": "call"}, {"api_name": "os.getenv", "line_number": 44, "usage_type": "call"}, {"api_name": "sagemaker_huggingface_inference_toolkit.decoder_encoder.encode", "line_number": 49, "usage_type": "call"}, {"api_name": "sagemaker_huggingface_inference_toolkit.decoder_encoder", "line_number": 49, "usage_type": "name"}, {"api_name": "os.getenv", "line_number": 59, "usage_type": "call"}, {"api_name": "torch.cat", "line_number": 77, "usage_type": "call"}, {"api_name": "torch.nn.functional.normalize", "line_number": 80, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 80, "usage_type": "name"}]} +{"seq_id": "13143210918", "text": "from datetime import date, timedelta\nfrom odoo import fields, models\n\n\nclass ResPartner(models.Model):\n _inherit = \"res.partner\"\n\n invoice_list = fields.One2many('account.move', 'partner_id',\n string=\"Invoice Details\",\n readonly=True,\n domain=(\n [('invoice_payment_state', '=', 'not_paid'),\n ('type', '=', 'out_invoice')]))\n total_due = fields.Monetary(compute='_compute_for_followup', store=False,\n readonly=True)\n next_reminder_date = fields.Date(compute='_compute_for_followup',\n store=False, readonly=True)\n total_overdue = fields.Monetary(compute='_compute_for_followup',\n store=False, readonly=True)\n followup_status = fields.Selection(\n [('in_need_of_action', 'In need of action'),\n ('with_overdue_invoices', 'With overdue invoices'),\n ('no_action_needed', 'No action needed')],\n string='Followup status',\n )\n\n def _compute_for_followup(self):\n \"\"\"\n Compute the fields 'total_due', 'total_overdue' , 'next_reminder_date' and 'followup_status'\n \"\"\"\n for record in self:\n total_due = 0\n total_overdue = 0\n today = fields.Date.today()\n for am in record.invoice_list:\n if am.company_id == self.env.company:\n amount = am.amount_residual\n total_due += amount\n is_overdue = today > am.invoice_date_due if am.invoice_date_due else today > am.date\n if is_overdue:\n total_overdue += not am.invoice_sent and amount or 0\n min_date = record.get_min_date()\n action = record.action_after()\n if min_date:\n date_reminder = min_date + timedelta(days=action)\n if date_reminder:\n record.next_reminder_date = date_reminder\n else:\n date_reminder = today\n record.next_reminder_date = date_reminder\n if total_overdue > 0 and date_reminder > today:\n followup_status = \"with_overdue_invoices\"\n elif total_due > 0 and date_reminder <= today:\n followup_status = \"in_need_of_action\"\n else:\n followup_status = \"no_action_needed\"\n record.total_due = total_due\n record.total_overdue = total_overdue\n record.followup_status = followup_status\n\n def get_min_date(self):\n today = date.today()\n for this in self:\n if this.invoice_list:\n min_list = this.invoice_list.mapped('invoice_date_due')\n while False in min_list:\n min_list.remove(False)\n return min(min_list)\n else:\n return today\n\n def get_delay(self):\n delay = \"\"\"select id,delay from followup_line where followup_id =\n (select id from account_followup where company_id = %s)\n order by delay limit 1\"\"\"\n self._cr.execute(delay, [self.env.company.id])\n record = self.env.cr.dictfetchall()\n return record\n\n def action_after(self):\n lines = self.env['followup.line'].search([(\n 'followup_id.company_id', '=', self.env.company.id)])\n if lines:\n record = self.get_delay()\n for i in record:\n return i['delay']\n", "repo_name": "jobaer123s/Odoo-custom-module", "sub_path": "base_accounting_kit/models/res_partner.py", "file_name": "res_partner.py", "file_ext": "py", "file_size_in_byte": 3611, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 2, "dataset": "github-code", "pt": "12", "api": [{"api_name": "odoo.models.Model", "line_number": 5, "usage_type": "attribute"}, {"api_name": "odoo.models", "line_number": 5, "usage_type": "name"}, {"api_name": "odoo.fields.One2many", "line_number": 8, "usage_type": "call"}, {"api_name": "odoo.fields", "line_number": 8, "usage_type": "name"}, {"api_name": "odoo.fields.Monetary", "line_number": 14, "usage_type": "call"}, {"api_name": "odoo.fields", "line_number": 14, "usage_type": "name"}, {"api_name": "odoo.fields.Date", "line_number": 16, "usage_type": "call"}, {"api_name": "odoo.fields", "line_number": 16, "usage_type": "name"}, {"api_name": "odoo.fields.Monetary", "line_number": 18, "usage_type": "call"}, {"api_name": "odoo.fields", "line_number": 18, "usage_type": "name"}, {"api_name": "odoo.fields.Selection", "line_number": 20, "usage_type": "call"}, {"api_name": "odoo.fields", "line_number": 20, "usage_type": "name"}, {"api_name": "odoo.fields.Date.today", "line_number": 34, "usage_type": "call"}, {"api_name": "odoo.fields.Date", "line_number": 34, "usage_type": "attribute"}, {"api_name": "odoo.fields", "line_number": 34, "usage_type": "name"}, {"api_name": "datetime.timedelta", "line_number": 45, "usage_type": "call"}, {"api_name": "datetime.date.today", "line_number": 62, "usage_type": "call"}, {"api_name": "datetime.date", "line_number": 62, "usage_type": "name"}]} +{"seq_id": "22675333520", "text": "#!/usr/bin/python3\n\"\"\"Module for FileStorage class.\"\"\"\nimport datetime\nimport json\nimport os\n\n\nclass FileStorage:\n\n \"\"\"Class for storing and retrieving data\"\"\"\n __file_path = \"file.json\"\n __objects = {}\n\n def all(self):\n \"\"\"returns the dictionary __objects\"\"\"\n return FileStorage.__objects\n\n def new(self, obj):\n \"\"\"sets in __objects the obj with key .id\"\"\"\n key = f\"{obj.__class__.__name__}.{obj.id}\"\n FileStorage.__objects[key] = obj\n\n def save(self):\n \"\"\" serializes __objects to the JSON file (path: __file_path)\"\"\"\n objects_dict = {\n key: value.to_dict()\n for key, value in FileStorage.__objects.items()\n }\n with open(FileStorage.__file_path, \"w\") as f:\n json.dump(objects_dict, f, indent=4)\n\n def classes(self):\n \"\"\"Returns a dictionary of valid classes and their references\"\"\"\n from models.base_model import BaseModel\n from models.user import User\n from models.state import State\n from models.city import City\n from models.amenity import Amenity\n from models.place import Place\n from models.review import Review\n\n classes = {\"BaseModel\": BaseModel,\n \"User\": User,\n \"State\": State,\n \"City\": City,\n \"Amenity\": Amenity,\n \"Place\": Place,\n \"Review\": Review}\n return classes\n\n def reload(self):\n \"\"\"Reloads the stored objects\"\"\"\n from . import import_all_classes\n classes = import_all_classes()\n\n try:\n with open(FileStorage.__file_path, \"r\") as f:\n loaded_dict = json.load(f)\n FileStorage.__objects = {\n key: eval(value['__class__'], classes)(**value)\n for key, value in loaded_dict.items()\n }\n except FileNotFoundError:\n pass\n except json.decoder.JSONDecodeError:\n pass\n\n def attributes(self):\n \"\"\"Returns the valid attributes and their types for classname\"\"\"\n attributes = {\n \"BaseModel\":\n {\"id\": str,\n \"created_at\": datetime.datetime,\n \"updated_at\": datetime.datetime},\n \"User\":\n {\"email\": str,\n \"password\": str,\n \"first_name\": str,\n \"last_name\": str},\n \"State\":\n {\"name\": str},\n \"City\":\n {\"state_id\": str,\n \"name\": str},\n \"Amenity\":\n {\"name\": str},\n \"Place\":\n {\"city_id\": str,\n \"user_id\": str,\n \"name\": str,\n \"description\": str,\n \"number_rooms\": int,\n \"number_bathrooms\": int,\n \"max_guest\": int,\n \"price_by_night\": int,\n \"latitude\": float,\n \"longitude\": float,\n \"amenity_ids\": list},\n \"Review\":\n {\"place_id\": str,\n \"user_id\": str,\n \"text\": str}\n }\n return attributes\n", "repo_name": "MosehMaguah/AirBnB_clone", "sub_path": "models/engine/file_storage.py", "file_name": "file_storage.py", "file_ext": "py", "file_size_in_byte": 3360, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "12", "api": [{"api_name": "json.dump", "line_number": 30, "usage_type": "call"}, {"api_name": "models.base_model.BaseModel", "line_number": 42, "usage_type": "name"}, {"api_name": "models.user.User", "line_number": 43, "usage_type": "name"}, {"api_name": "models.state.State", "line_number": 44, "usage_type": "name"}, {"api_name": "models.city.City", "line_number": 45, "usage_type": "name"}, {"api_name": "models.amenity.Amenity", "line_number": 46, "usage_type": "name"}, {"api_name": "models.place.Place", "line_number": 47, "usage_type": "name"}, {"api_name": "models.review.Review", "line_number": 48, "usage_type": "name"}, {"api_name": "{'BaseModel': 'models.base_model.BaseModel', 'User': 'models.user.User', 'State': 'models.state.State', 'City': 'models.city.City', 'Amenity': 'models.amenity.Amenity', 'Place': 'models.place.Place', 'Review': 'models.review.Review'}.__file_path", "line_number": 57, "usage_type": "attribute"}, {"api_name": "json.load", "line_number": 58, "usage_type": "call"}, {"api_name": "{'BaseModel': 'models.base_model.BaseModel', 'User': 'models.user.User', 'State': 'models.state.State', 'City': 'models.city.City', 'Amenity': 'models.amenity.Amenity', 'Place': 'models.place.Place', 'Review': 'models.review.Review'}.__objects", "line_number": 59, "usage_type": "attribute"}, {"api_name": "json.decoder", "line_number": 65, "usage_type": "attribute"}, {"api_name": "datetime.datetime", "line_number": 73, "usage_type": "attribute"}, {"api_name": "datetime.datetime", "line_number": 74, "usage_type": "attribute"}]} +{"seq_id": "36077502721", "text": "from torchvision import transforms\n\ntransform = transforms.Compose(\n [transforms.ToTensor(),\n transforms.Resize((50, 200)), # Hint: this might not be the best way to resize images\n transforms.RandomAffine(30, (0.1,0.1), (0.9, 1.1)),\n transforms.ColorJitter(0.2, 0.2, 0.2, 0.2),\n transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) # Hint: this might not be the best normalization\n ]\n)\n", "repo_name": "Egor-Baryshnikov/ysda_toloka1", "sub_path": "src/transforms.py", "file_name": "transforms.py", "file_ext": "py", "file_size_in_byte": 432, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "12", "api": [{"api_name": "torchvision.transforms.Compose", "line_number": 3, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 3, "usage_type": "name"}, {"api_name": "torchvision.transforms.ToTensor", "line_number": 4, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 4, "usage_type": "name"}, {"api_name": "torchvision.transforms.Resize", "line_number": 5, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 5, "usage_type": "name"}, {"api_name": "torchvision.transforms.RandomAffine", "line_number": 6, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 6, "usage_type": "name"}, {"api_name": "torchvision.transforms.ColorJitter", "line_number": 7, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 7, "usage_type": "name"}, {"api_name": "torchvision.transforms.Normalize", "line_number": 8, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 8, "usage_type": "name"}]} +{"seq_id": "27549661940", "text": "import itertools\n\ndef dequantize(x, scale, offset):\n return (x - offset) * scale\n\ndef quantize(x, scale, offset):\n return max(0, min(255, int(round(x / scale)) + offset))\n\ndef create_test(input0_scale, input0_offset,\n input1_scale, input1_offset,\n output_scale, output_offset):\n def sub_quantized(a, b):\n a_dequantized = dequantize(a, input0_scale, input0_offset)\n b_dequantized = dequantize(b, input1_scale, input1_offset)\n return quantize(a_dequantized - b_dequantized, output_scale, output_offset)\n\n values = [0, 1, 2, 3, 4, 5, 250, 251, 252, 253, 254, 255]\n inputs = list(itertools.product(values, values))\n input0_values, input1_values = zip(*inputs)\n output_values = [sub_quantized(a, b) for a, b in inputs]\n size = len(output_values)\n input0 = Input(\"input0\", \"TENSOR_QUANT8_ASYMM\",\n \"{%d}, %g, %d\" % (size, input0_scale, input0_offset))\n input1 = Input(\"input1\", \"TENSOR_QUANT8_ASYMM\",\n \"{%d}, %g, %d\" % (size, input1_scale, input1_offset))\n activation = 0\n output0 = Output(\"output0\", \"TENSOR_QUANT8_ASYMM\",\n \"{%d}, %g, %d\" % (size, output_scale, output_offset))\n model = Model().Operation(\"SUB\", input0, input1, activation).To(output0)\n Example({\n input0: input0_values,\n input1: input1_values,\n output0: output_values,\n })\n\nscales_and_offsets = [(1.0, 0),\n (1.0, 1),\n (0.01, 120),\n (10.0, 120)]\nfor params in itertools.product(scales_and_offsets,\n scales_and_offsets,\n scales_and_offsets):\n input0_params, input1_params, output_params = params\n create_test(*input0_params, *input1_params, *output_params)\n", "repo_name": "Samsung/ONE", "sub_path": "tests/nnapi/specs/skip/V1_2/sub_quantized_different_scales.mod.py", "file_name": "sub_quantized_different_scales.mod.py", "file_ext": "py", "file_size_in_byte": 1762, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 363, "dataset": "github-code", "pt": "12", "api": [{"api_name": "itertools.product", "line_number": 18, "usage_type": "call"}, {"api_name": "itertools.product", "line_number": 40, "usage_type": "call"}]} +{"seq_id": "25331240543", "text": "import os\nfrom pydub import AudioSegment\nfrom tqdm import tqdm\nimport sys\n\ndef main(dir_path, audio_file):\n \"\"\"\n Combines all music files in the provided directory \n path with the provided audio file. Results are written\n to disk in individual sound files.\n\n :param dir_path: path to the directory with sound files\n to be overlayed\n :param audio_file: full path to the audio file with \n which each file in the directory is combined\n \"\"\"\n file_names = os.listdir(dir_path)\n file_names = [x for x in file_names if x.endswith(\".mp3\")]\n sound2 = AudioSegment.from_file(audio_file)\n for i, file_name in tqdm(enumerate(file_names)):\n sound1 = AudioSegment.from_file(os.path.join(dir_path, file_name))\n \n combined = sound1.overlay(sound2)\n\n combined.export(\"combined_{}.mp3\".format(file_name.replace(\" \", \"\").replace(\"-\", \"\")), format='mp3')\n\nif __name__ == \"__main__\":\n if len(sys.argv) != 3:\n print(\"Usage: python3 overlay.py \")\n sys.exit(1)\n main(sys.argv[1], sys.argv[2])\n", "repo_name": "Hernqvist/searchproj", "sub_path": "overlay.py", "file_name": "overlay.py", "file_ext": "py", "file_size_in_byte": 1120, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "12", "api": [{"api_name": "os.listdir", "line_number": 17, "usage_type": "call"}, {"api_name": "pydub.AudioSegment.from_file", "line_number": 19, "usage_type": "call"}, {"api_name": "pydub.AudioSegment", "line_number": 19, "usage_type": "name"}, {"api_name": "tqdm.tqdm", "line_number": 20, "usage_type": "call"}, {"api_name": "pydub.AudioSegment.from_file", "line_number": 21, "usage_type": "call"}, {"api_name": "pydub.AudioSegment", "line_number": 21, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 21, "usage_type": "call"}, {"api_name": "os.path", "line_number": 21, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 28, "usage_type": "attribute"}, {"api_name": "sys.exit", "line_number": 30, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 31, "usage_type": "attribute"}]} +{"seq_id": "31513503543", "text": "import os\nimport ffmpy\nimport shutil\nimport subprocess\nimport json\n\ndef process_audio(audio_file, analyzed_data, output_directory):\n target_lufs = -18\n headroom = 18\n\n gain_adjustment = target_lufs - analyzed_data['lufs']\n output_file = os.path.join(output_directory, 'processed_' + os.path.basename(audio_file))\n\n # Get the input file's bits per sample and sample rate\n ffprobe = ffmpy.FFprobe(\n inputs={audio_file: None},\n global_options='-v error -show_entries stream=bits_per_sample,sample_rate -of json'\n )\n ffprobe_output = ffprobe.run(stdout=subprocess.PIPE)[0].decode()\n ffprobe_json = json.loads(ffprobe_output)\n bits_per_sample = int(ffprobe_json['streams'][0]['bits_per_sample'])\n sample_rate = int(ffprobe_json['streams'][0]['sample_rate'])\n\n # Set the output sample format based on the input bits per sample\n sample_format = {16: 'pcm_s16le', 32: 'pcm_s32le'}.get(bits_per_sample, 'pcm_s16le')\n\n if gain_adjustment != 0 or analyzed_data['max_dbfs'] > target_lufs + headroom:\n # Apply gain adjustment and set the output sample format\n ff = ffmpy.FFmpeg(\n inputs={audio_file: None},\n outputs={\n output_file: f'-ar {sample_rate} -acodec {sample_format} -af volume={gain_adjustment}dB'}\n )\n ff.run()\n\n else:\n # No processing needed, just copy the input file to the output file\n shutil.copyfile(audio_file, output_file)\n", "repo_name": "Gabeiscool420/SoundSage---LLM-Audio-Processing", "sub_path": "SoundSage-LLM Integration/SoundSage/WorkBench/AudioTools/AutoGain/audio_processor.py", "file_name": "audio_processor.py", "file_ext": "py", "file_size_in_byte": 1466, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 14, "dataset": "github-code", "pt": "12", "api": [{"api_name": "os.path.join", "line_number": 12, "usage_type": "call"}, {"api_name": "os.path", "line_number": 12, "usage_type": "attribute"}, {"api_name": "os.path.basename", "line_number": 12, "usage_type": "call"}, {"api_name": "ffmpy.FFprobe", "line_number": 15, "usage_type": "call"}, {"api_name": "subprocess.PIPE", "line_number": 19, "usage_type": "attribute"}, {"api_name": "json.loads", "line_number": 20, "usage_type": "call"}, {"api_name": "ffmpy.FFmpeg", "line_number": 29, "usage_type": "call"}, {"api_name": "shutil.copyfile", "line_number": 38, "usage_type": "call"}]} +{"seq_id": "29336892737", "text": "import uuid\n\nfrom django.http import HttpResponse, HttpResponseNotFound\nfrom django.shortcuts import render, redirect\nfrom django.contrib.auth.models import User\nfrom sl_app.models import User_to_list, MallList, Item\nfrom django.template import loader\nfrom django.contrib.auth import logout, authenticate, login\n\n\ndef authorization(request):\n if request.method == 'GET':\n template = loader.get_template('authorization.html')\n context = {}\n return HttpResponse(template.render(context, request))\n else:\n username = request.POST.get(\"login\")\n password = request.POST.get(\"psw\")\n user = authenticate(request, username=username, password=password)\n if user is not None:\n login(request, user)\n return redirect('/shop_list')\n\n else:\n return redirect('/user/authorization')\n\ndef logaut(request):\n if request.user.is_authenticated:\n logout(request)\n return redirect('/user/authorization')\n else:\n return HttpResponse(\"Вы не авторизованы\")\n\n\ndef register(request):\n if request.method == 'GET':\n template = loader.get_template('register.html')\n context = {}\n return HttpResponse(template.render(context, request))\n else:\n username = request.POST.get(\"login\")\n password = request.POST.get(\"psw\")\n email = request.POST.get(\"email\")\n user = User.objects.create_user(username, email, password)\n user.save()\n list_id = uuid.uuid4()\n user_to_list = User_to_list(user_id=user.id, list_id=list_id, or_list=list_id)\n user_to_list.save()\n return redirect('/user/authorization')\ndef invite(request):\n if request.method == 'GET':\n template = loader.get_template('invite.html')\n context = {}\n return HttpResponse(template.render(context, request))\n else:\n email = request.POST.get(\"email\")\n invate_user = User.objects.filter(email=email).first()\n if invate_user is None:\n return HttpResponseNotFound('Пользователь не найден')\n\n current_user_list = User_to_list.objects.get(user_id=request.user.id).list_id\n User_to_list.objects.filter(user_id=invate_user.id).update(list_id=current_user_list)\n template = loader.get_template('invite_ok.html')\n context = {'name': invate_user.username}\n return HttpResponse(template.render(context, request))\n\ndef remove_user(request):\n if request.method == 'GET':\n template = loader.get_template('remove.html')\n context = {}\n return HttpResponse(template.render(context, request))\n else:\n email = request.POST.get(\"email\")\n invate_user = User.objects.filter(email=email).first()\n if invate_user is None:\n return HttpResponse('Пользователь не найден')\n\n original_user_list = User_to_list.objects.get(user_id=invate_user.id).or_list\n User_to_list.objects.filter(user_id=invate_user.id).update(list_id=original_user_list)\n template = loader.get_template('remove_ok.html')\n context = {'name': invate_user.username}\n return HttpResponse(template.render(context, request))\n\ndef add_shop(request):\n if request.method == 'GET':\n template = loader.get_template('add_mall.html')\n context = {}\n return HttpResponse(template.render(context, request))\n else:\n user_id = request.user.id\n lst = User_to_list.objects.get(user_id=user_id).list_id\n mall = request.POST.get(\"name_mall\")\n save_mall = MallList(name_mall=mall, list_id=lst)\n save_mall.save()\n return redirect('/user/add_item')\n\ndef add_item(request):\n if request.method == 'GET':\n user_id = request.user.id\n lst = User_to_list.objects.get(user_id=user_id).list_id\n mall = MallList.objects.filter(list_id=lst).values()\n\n return render(request, 'add_item.html', {'mall': mall})\n else:\n name_item = request.POST.get(\"name_item\")\n mall_id_chk = int(request.POST.get(\"mall\"))\n item = Item(name_item=name_item, shop_id_id=mall_id_chk)\n item.save()\n\n return redirect('/shop_list/slist')\ndef analytics(request):\n return HttpResponse('Расходы за месяц')\n", "repo_name": "Galyshev/Django", "sub_path": "Shop_list/users/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 4311, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "12", "api": [{"api_name": "django.template.loader.get_template", "line_number": 13, "usage_type": "call"}, {"api_name": "django.template.loader", "line_number": 13, "usage_type": "name"}, {"api_name": "django.http.HttpResponse", "line_number": 15, "usage_type": "call"}, {"api_name": "django.contrib.auth.authenticate", "line_number": 19, "usage_type": "call"}, {"api_name": "django.contrib.auth.login", "line_number": 21, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 22, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 25, "usage_type": "call"}, {"api_name": "django.contrib.auth.logout", "line_number": 29, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 30, "usage_type": "call"}, {"api_name": "django.http.HttpResponse", "line_number": 32, "usage_type": "call"}, {"api_name": "django.template.loader.get_template", "line_number": 37, "usage_type": "call"}, {"api_name": "django.template.loader", "line_number": 37, "usage_type": "name"}, {"api_name": "django.http.HttpResponse", "line_number": 39, "usage_type": "call"}, {"api_name": "django.contrib.auth.models.User.objects.create_user", "line_number": 44, "usage_type": "call"}, {"api_name": "django.contrib.auth.models.User.objects", "line_number": 44, "usage_type": "attribute"}, {"api_name": "django.contrib.auth.models.User", "line_number": 44, "usage_type": "name"}, {"api_name": "uuid.uuid4", "line_number": 46, "usage_type": "call"}, {"api_name": "sl_app.models.User_to_list", "line_number": 47, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 49, "usage_type": "call"}, {"api_name": "django.template.loader.get_template", "line_number": 52, "usage_type": "call"}, {"api_name": "django.template.loader", "line_number": 52, "usage_type": "name"}, {"api_name": "django.http.HttpResponse", "line_number": 54, "usage_type": "call"}, {"api_name": "django.contrib.auth.models.User.objects.filter", "line_number": 57, "usage_type": "call"}, {"api_name": "django.contrib.auth.models.User.objects", "line_number": 57, "usage_type": "attribute"}, {"api_name": "django.contrib.auth.models.User", "line_number": 57, "usage_type": "name"}, {"api_name": "django.http.HttpResponseNotFound", "line_number": 59, "usage_type": "call"}, {"api_name": "sl_app.models.User_to_list.objects.get", "line_number": 61, "usage_type": "call"}, {"api_name": "sl_app.models.User_to_list.objects", "line_number": 61, "usage_type": "attribute"}, {"api_name": "sl_app.models.User_to_list", "line_number": 61, "usage_type": "name"}, {"api_name": "sl_app.models.User_to_list.objects.filter", "line_number": 62, "usage_type": "call"}, {"api_name": "sl_app.models.User_to_list.objects", "line_number": 62, "usage_type": "attribute"}, {"api_name": "sl_app.models.User_to_list", "line_number": 62, "usage_type": "name"}, {"api_name": "django.template.loader.get_template", "line_number": 63, "usage_type": "call"}, {"api_name": "django.template.loader", "line_number": 63, "usage_type": "name"}, {"api_name": "django.http.HttpResponse", "line_number": 65, "usage_type": "call"}, {"api_name": "django.template.loader.get_template", "line_number": 69, "usage_type": "call"}, {"api_name": "django.template.loader", "line_number": 69, "usage_type": "name"}, {"api_name": "django.http.HttpResponse", "line_number": 71, "usage_type": "call"}, {"api_name": "django.contrib.auth.models.User.objects.filter", "line_number": 74, "usage_type": "call"}, {"api_name": "django.contrib.auth.models.User.objects", "line_number": 74, "usage_type": "attribute"}, {"api_name": "django.contrib.auth.models.User", "line_number": 74, "usage_type": "name"}, {"api_name": "django.http.HttpResponse", "line_number": 76, "usage_type": "call"}, {"api_name": "sl_app.models.User_to_list.objects.get", "line_number": 78, "usage_type": "call"}, {"api_name": "sl_app.models.User_to_list.objects", "line_number": 78, "usage_type": "attribute"}, {"api_name": "sl_app.models.User_to_list", "line_number": 78, "usage_type": "name"}, {"api_name": "sl_app.models.User_to_list.objects.filter", "line_number": 79, "usage_type": "call"}, {"api_name": "sl_app.models.User_to_list.objects", "line_number": 79, "usage_type": "attribute"}, {"api_name": "sl_app.models.User_to_list", "line_number": 79, "usage_type": "name"}, {"api_name": "django.template.loader.get_template", "line_number": 80, "usage_type": "call"}, {"api_name": "django.template.loader", "line_number": 80, "usage_type": "name"}, {"api_name": "django.http.HttpResponse", "line_number": 82, "usage_type": "call"}, {"api_name": "django.template.loader.get_template", "line_number": 86, "usage_type": "call"}, {"api_name": "django.template.loader", "line_number": 86, "usage_type": "name"}, {"api_name": "django.http.HttpResponse", "line_number": 88, "usage_type": "call"}, {"api_name": "sl_app.models.User_to_list.objects.get", "line_number": 91, "usage_type": "call"}, {"api_name": "sl_app.models.User_to_list.objects", "line_number": 91, "usage_type": "attribute"}, {"api_name": "sl_app.models.User_to_list", "line_number": 91, "usage_type": "name"}, {"api_name": "sl_app.models.MallList", "line_number": 93, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 95, "usage_type": "call"}, {"api_name": "sl_app.models.User_to_list.objects.get", "line_number": 100, "usage_type": "call"}, {"api_name": "sl_app.models.User_to_list.objects", "line_number": 100, "usage_type": "attribute"}, {"api_name": "sl_app.models.User_to_list", "line_number": 100, "usage_type": "name"}, {"api_name": "sl_app.models.MallList.objects.filter", "line_number": 101, "usage_type": "call"}, {"api_name": "sl_app.models.MallList.objects", "line_number": 101, "usage_type": "attribute"}, {"api_name": "sl_app.models.MallList", "line_number": 101, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 103, "usage_type": "call"}, {"api_name": "sl_app.models.Item", "line_number": 107, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 110, "usage_type": "call"}, {"api_name": "django.http.HttpResponse", "line_number": 112, "usage_type": "call"}]} +{"seq_id": "3177183427", "text": "from logging import PlaceHolder\nimport math\nfrom multiprocessing import Value\nfrom itertools import combinations\nimport pylab as pl\nfrom bs4 import BeautifulSoup\nfrom urllib.request import Request, urlopen\nimport re\nimport numpy as np\nimport numpy.random as random\nfrom numpy.core.fromnumeric import *\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport math as m\nimport plotly.figure_factory as ff\nfrom collections import Counter\nimport streamlit as st\n\ndef app(Data):\n st.title(\"Assignment 8\")\n def printf(url):\n st.markdown(f'

{url}

', unsafe_allow_html=True)\n operation = st.selectbox(\"Operation\", [\"Crawler_bfs\",\"Crawler_dfs\",\"Page_Rank_Algo\"])\n if operation == \"Crawler_bfs\":\n vis = dict()\n sst = set()\n q = []\n def bfs():\n while(len(q)>0):\n nxt_url = q[0] \n q.pop(0)\n try:\n st.write(\"*************************level: \",vis[nxt_url],\"*******************************\")\n # if(vis[link.get('href')])>6:\n # return \n except KeyError:\n pass\n try:\n req = Request(nxt_url)\n html_page = urlopen(req)\n soup = BeautifulSoup(html_page, \"lxml\")\n except Exception:\n pass\n cnt = 0 ;\n for link in soup.findAll('a'):\n st.write(link.get('href'))\n # if(vis[link.get('href')]==0):\n if(link.get('href') not in sst):\n q.append(link.get('href'))\n sst.add(link.get('href'))\n try:\n vis[link.get('href')] = vis[nxt_url] + 1\n if(vis[link.get('href')])>5:\n return \n except KeyError:\n pass\n cnt = cnt + 1 ;\n if(cnt>20):\n break \n \n # for link in links:\n # print(link) \n if(len(sst)>10000):\n return \n\n seed_url = \"http://google.com\"\n # req = Request(\"http://google.com\")\n # html_page = urlopen(req)\n\n # soup = BeautifulSoup(html_page, \"lxml\")\n\n # links = []\n # for link in soup.findAll('a'):\n # links.append(link.get('href'))\n vis[seed_url] = 0 \n q.append(seed_url)\n bfs()\n\n # for link in st: \n # print(link)\n # for link in links:\n # print(link)\n if operation == \"Crawler_dfs\":\n q = []\n # vis = dict()\n sst = set()\n def dfs():\n while(len(q)>0):\n nxt_url = q[-1] \n q.pop(-1)\n try:\n req = Request(nxt_url)\n html_page = urlopen(req)\n\n soup = BeautifulSoup(html_page, \"lxml\")\n except Exception:\n pass\n cnt = 0 ;\n for link in soup.findAll('a'):\n if(link is not None):\n st.write(link.get('href'))\n # if(vis[link.get('href')]==0):\n if(link.get('href') not in sst):\n q.append(link.get('href'))\n sst.add(link.get('href'))\n try:\n pass\n # vis[link.get('href')] = vis[nxt_url] + 1\n # if(vis[link.get('href')])>5:\n # return \n except KeyError:\n pass\n cnt = cnt + 1 ;\n if(cnt>20):\n break \n \n # for link in links:\n # print(link) \n if(len(sst)>10000):\n return \n\n seed_url = \"http://google.com\"\n # req = Request(\"http://google.com\")\n # html_page = urlopen(req)\n\n # soup = BeautifulSoup(html_page, \"lxml\")\n\n # links = []\n # for link in soup.findAll('a'):\n # links.append(link.get('href'))\n # vis[seed_url] = 0 \n q.append(seed_url)\n dfs()\n\n # for link in st: \n # print(link)\n # for link in links:\n # print(link)\n\n if operation == \"Page_Rank_Algo\":\n file = open(\"C:\\\\Users\\\\Akash\\\\Downloads\\\\data mining\\\\Assignment8\\\\stnfordgraph.txt\", \"r\")\n flg = 0 ;\n content = file.readlines()\n\n adj_mat = {}\n for line in content:\n # print(line)\n if(flg==0):\n lin = line.split(' ')\n vertex = int(lin[0])\n edges = int(lin[1][:])\n # print(edges)\n flg = 1\n adj_mat = {new_list: [] for new_list in range(vertex+1)}\n in_deg = [0]*(vertex+1)\n out_deg = [0]*(vertex+1)\n else:\n lin = line.split(' ')\n tmp = lin[0].split('\\t')\n # print(tmp) \n adj_mat[int(tmp[1][:-1])].append(int(tmp[0]))\n in_deg[int(tmp[1][:-1])] += 1\n out_deg[int(tmp[0])] += 1\n file = open('geek.txt','w')\n # print(out_deg)\n def calclute_pagerank():\n cnt = 0\n itr = 1\n while(cnt<=vertex+1):\n file.write(str(\"******Iteration\" +str(itr)+\" ******\"))\n for i in range(1,vertex+1):\n tmp_prnk[i] = 0 ;\n for no in adj_mat[i]:\n tmp_prnk[i] += (page_Rank[no]/out_deg[no])\n if((abs(tmp_prnk[i]-page_Rank[i])/(page_Rank[i]))*100<=0.0001):\n cnt += 1\n if(tmp_prnk[i]):\n page_Rank[i] = tmp_prnk[i]\n file.write(str(page_Rank[i])+\" \")\n itr+=1 \n return itr ;\n page_Rank = [1/(vertex)]*(vertex+1)\n tmp_prnk = [0]*(vertex+1) \n page_Rank[0] = 0\n # out_deg = [0,2,0,3,2,2,1]\n # file.write(str(page_Rank))\n itr = calclute_pagerank()\n index = {}\n st.write(\"Number of iteration is : \", itr)\n for i in range(1,vertex+1):\n index[page_Rank[i]] = i ;\n page_Rank.sort()\n for i in range(1,11):\n st.write(\"Top \",i, \"web page number is \", index[page_Rank[-i]] , \"page rank is \",page_Rank[-i]);\n # st.write(\"Web and their Page rank\")\n # for i in range(1,vertex+1):\n # st.write(i,\" page their \",page_Rank[-i])\n if operation == \"HITS\":\n input_list = []\n \n st.subheader(\"Dataset\")\n st.dataframe(Data.head(1000), width=1000, height=500)\n vertex = set()\n for i in range(len(Data)):\n input_list.append([Data.loc[i, 'fromNode'],Data.loc[i, 'toNode']])\n vertex.add(Data.loc[i, 'fromNode'])\n vertex.add(Data.loc[i, 'toNode'])\n size = len(vertex)\n adj_matrix = np.zeros([size+1,size+1])\n\n for i in input_list:\n adj_matrix[i[0]][i[1]] = 1\n \n printf(\"No of Nodes: \"+str(size))\n printf(\"No of Edges: \"+str(len(Data)))\n st.subheader(\"Adjecency Matrix\")\n st.dataframe(adj_matrix, width=1000, height=500)\n A = adj_matrix\n # st.dataframe(A)\n At = adj_matrix.transpose()\n st.subheader(\"Transpose of Adj matrix\")\n st.dataframe(At)\n\n u = [1 for i in range(size+1)]\n printf(\"Hub weight matrix (U)\")\n st.dataframe(u)\n # printf(\"Hub weight vector (V)\")\n # printf(u)\n v = np.matrix([])\n for i in range(5):\n v = np.dot(At,u)\n u = np.dot(A,v)\n\n # u.sort(reverse=True)\n hubdict = dict()\n for i in range(len(u)):\n hubdict[i]= u[i]\n \n authdict = dict()\n for i in range(len(v)):\n authdict[i]=v[i]\n\n hubdict = dict( sorted(hubdict.items(), key=operator.itemgetter(1),reverse=True))\n authdict = dict( sorted(authdict.items(), key=operator.itemgetter(1),reverse=True))\n # printf(sorted_rank)\n printf(\"HubPages : \")\n i = 1\n printf(f\"Rank ___ Node ________ Hubs score\")\n for key, rank in hubdict.items():\n if i == 11:\n break\n printf(f\"{i} _____ {key} ________ {rank}\")\n i += 1\n\n printf(\"Authoritative Pages : \")\n i = 1\n printf(f\"Rank ___ Node ________ Auth score\")\n for key, rank in authdict.items():\n if i == 11:\n break\n printf(f\"{i} _____ {key} ________ {rank}\")\n i += 1\n\n\n\n\n\n ", "repo_name": "akashraut03/RES", "sub_path": "Apps/asg8.py", "file_name": "asg8.py", "file_ext": "py", "file_size_in_byte": 8980, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "12", "api": [{"api_name": "streamlit.title", "line_number": 21, "usage_type": "call"}, {"api_name": "streamlit.markdown", "line_number": 23, "usage_type": "call"}, {"api_name": "streamlit.selectbox", "line_number": 24, "usage_type": "call"}, {"api_name": "streamlit.write", "line_number": 34, "usage_type": "call"}, {"api_name": "urllib.request.Request", "line_number": 40, "usage_type": "call"}, {"api_name": "urllib.request.urlopen", "line_number": 41, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 42, "usage_type": "call"}, {"api_name": "streamlit.write", "line_number": 47, "usage_type": "call"}, {"api_name": "urllib.request.Request", "line_number": 93, "usage_type": "call"}, {"api_name": "urllib.request.urlopen", "line_number": 94, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 96, "usage_type": "call"}, {"api_name": "streamlit.write", "line_number": 102, "usage_type": "call"}, {"api_name": "streamlit.write", "line_number": 190, "usage_type": "call"}, {"api_name": "streamlit.write", "line_number": 195, "usage_type": "call"}, {"api_name": "streamlit.subheader", "line_number": 202, "usage_type": "call"}, {"api_name": "streamlit.dataframe", "line_number": 203, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 210, "usage_type": "call"}, {"api_name": "streamlit.subheader", "line_number": 217, "usage_type": "call"}, {"api_name": "streamlit.dataframe", "line_number": 218, "usage_type": "call"}, {"api_name": "streamlit.subheader", "line_number": 222, "usage_type": "call"}, {"api_name": "streamlit.dataframe", "line_number": 223, "usage_type": "call"}, {"api_name": "streamlit.dataframe", "line_number": 227, "usage_type": "call"}, {"api_name": "numpy.matrix", "line_number": 230, "usage_type": "call"}, {"api_name": "numpy.dot", "line_number": 232, "usage_type": "call"}, {"api_name": "numpy.dot", "line_number": 233, "usage_type": "call"}]} +{"seq_id": "36652202300", "text": "import math\nimport numpy as np\n\nfrom cereal import log\nfrom common.filter_simple import FirstOrderFilter\nfrom common.numpy_fast import clip, interp\nfrom common.realtime import DT_CTRL\nfrom selfdrive.car import apply_toyota_steer_torque_limits\nfrom selfdrive.car.toyota.values import CarControllerParams\nfrom selfdrive.controls.lib.drive_helpers import get_steer_max\n\n\nclass LatControlINDI():\n def __init__(self, CP):\n self.angle_steers_des = 0.\n\n A = np.array([[1.0, DT_CTRL, 0.0],\n [0.0, 1.0, DT_CTRL],\n [0.0, 0.0, 1.0]])\n C = np.array([[1.0, 0.0, 0.0],\n [0.0, 1.0, 0.0]])\n\n # Q = np.matrix([[1e-2, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 10.0]])\n # R = np.matrix([[1e-2, 0.0], [0.0, 1e3]])\n\n # (x, l, K) = control.dare(np.transpose(A), np.transpose(C), Q, R)\n # K = np.transpose(K)\n K = np.array([[7.30262179e-01, 2.07003658e-04],\n [7.29394177e+00, 1.39159419e-02],\n [1.71022442e+01, 3.38495381e-02]])\n\n self.speed = 0.\n\n self.K = K\n self.A_K = A - np.dot(K, C)\n self.x = np.array([[0.], [0.], [0.]])\n\n self.enforce_rate_limit = CP.carName == \"toyota\"\n\n self._RC = (CP.lateralTuning.indi.timeConstantBP, CP.lateralTuning.indi.timeConstantV)\n self._G = (CP.lateralTuning.indi.actuatorEffectivenessBP, CP.lateralTuning.indi.actuatorEffectivenessV)\n self._outer_loop_gain = (CP.lateralTuning.indi.outerLoopGainBP, CP.lateralTuning.indi.outerLoopGainV)\n self._inner_loop_gain = (CP.lateralTuning.indi.innerLoopGainBP, CP.lateralTuning.indi.innerLoopGainV)\n\n self.sat_count_rate = 1.0 * DT_CTRL\n self.sat_limit = CP.steerLimitTimer\n self.steer_filter = FirstOrderFilter(0., self.RC, DT_CTRL)\n\n self.reset()\n\n @property\n def RC(self):\n return interp(self.speed, self._RC[0], self._RC[1])\n\n @property\n def G(self):\n return interp(self.speed, self._G[0], self._G[1])\n\n @property\n def outer_loop_gain(self):\n return interp(self.speed, self._outer_loop_gain[0], self._outer_loop_gain[1])\n\n @property\n def inner_loop_gain(self):\n return interp(self.speed, self._inner_loop_gain[0], self._inner_loop_gain[1])\n\n def reset(self):\n self.steer_filter.x = 0.\n self.output_steer = 0.\n self.sat_count = 0.\n self.speed = 0.\n\n def _check_saturation(self, control, check_saturation, limit):\n saturated = abs(control) == limit\n\n if saturated and check_saturation:\n self.sat_count += self.sat_count_rate\n else:\n self.sat_count -= self.sat_count_rate\n\n self.sat_count = clip(self.sat_count, 0.0, 1.0)\n\n return self.sat_count > self.sat_limit\n\n def update(self, active, CS, CP, VM, params, curvature, curvature_rate):\n self.speed = CS.vEgo\n # Update Kalman filter\n y = np.array([[math.radians(CS.steeringAngleDeg)], [math.radians(CS.steeringRateDeg)]])\n self.x = np.dot(self.A_K, self.x) + np.dot(self.K, y)\n\n indi_log = log.ControlsState.LateralINDIState.new_message()\n indi_log.steeringAngleDeg = math.degrees(self.x[0])\n indi_log.steeringRateDeg = math.degrees(self.x[1])\n indi_log.steeringAccelDeg = math.degrees(self.x[2])\n\n steers_des = VM.get_steer_from_curvature(-curvature, CS.vEgo)\n steers_des += math.radians(params.angleOffsetDeg)\n if CS.vEgo < 0.3 or not active:\n indi_log.active = False\n self.output_steer = 0.0\n self.steer_filter.x = 0.0\n else:\n\n rate_des = VM.get_steer_from_curvature(-curvature_rate, CS.vEgo)\n\n # Expected actuator value\n self.steer_filter.update_alpha(self.RC)\n self.steer_filter.update(self.output_steer)\n\n # Compute acceleration error\n rate_sp = self.outer_loop_gain * (steers_des - self.x[0]) + rate_des\n accel_sp = self.inner_loop_gain * (rate_sp - self.x[1])\n accel_error = accel_sp - self.x[2]\n\n # Compute change in actuator\n g_inv = 1. / self.G\n delta_u = g_inv * accel_error\n\n # If steering pressed, only allow wind down\n if CS.steeringPressed and (delta_u * self.output_steer > 0):\n delta_u = 0\n\n # Enforce rate limit\n if self.enforce_rate_limit:\n steer_max = float(CarControllerParams.STEER_MAX)\n new_output_steer_cmd = steer_max * (self.steer_filter.x + delta_u)\n prev_output_steer_cmd = steer_max * self.output_steer\n new_output_steer_cmd = apply_toyota_steer_torque_limits(new_output_steer_cmd, prev_output_steer_cmd, prev_output_steer_cmd, CarControllerParams)\n self.output_steer = new_output_steer_cmd / steer_max\n else:\n self.output_steer = self.steer_filter.x + delta_u\n\n steers_max = get_steer_max(CP, CS.vEgo)\n self.output_steer = clip(self.output_steer, -steers_max, steers_max)\n\n indi_log.active = True\n indi_log.rateSetPoint = float(rate_sp)\n indi_log.accelSetPoint = float(accel_sp)\n indi_log.accelError = float(accel_error)\n indi_log.delayedOutput = float(self.steer_filter.x)\n indi_log.delta = float(delta_u)\n indi_log.output = float(self.output_steer)\n\n check_saturation = (CS.vEgo > 10.) and not CS.steeringRateLimited and not CS.steeringPressed\n indi_log.saturated = self._check_saturation(self.output_steer, check_saturation, steers_max)\n\n return float(self.output_steer), float(steers_des), indi_log\n", "repo_name": "eFiniLan/xnxpilot", "sub_path": "selfdrive/controls/lib/latcontrol_indi.py", "file_name": "latcontrol_indi.py", "file_ext": "py", "file_size_in_byte": 5287, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 102, "dataset": "github-code", "pt": "12", "api": [{"api_name": "numpy.array", "line_number": 17, "usage_type": "call"}, {"api_name": "common.realtime.DT_CTRL", "line_number": 17, "usage_type": "name"}, {"api_name": "common.realtime.DT_CTRL", "line_number": 18, "usage_type": "name"}, {"api_name": "numpy.array", "line_number": 20, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 28, "usage_type": "call"}, {"api_name": "numpy.dot", "line_number": 35, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 36, "usage_type": "call"}, {"api_name": "common.realtime.DT_CTRL", "line_number": 45, "usage_type": "name"}, {"api_name": "common.filter_simple.FirstOrderFilter", "line_number": 47, "usage_type": "call"}, {"api_name": "common.realtime.DT_CTRL", "line_number": 47, "usage_type": "argument"}, {"api_name": "common.numpy_fast.interp", "line_number": 53, "usage_type": "call"}, {"api_name": "common.numpy_fast.interp", "line_number": 57, "usage_type": "call"}, {"api_name": "common.numpy_fast.interp", "line_number": 61, "usage_type": "call"}, {"api_name": "common.numpy_fast.interp", "line_number": 65, "usage_type": "call"}, {"api_name": "common.numpy_fast.clip", "line_number": 81, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 88, "usage_type": "call"}, {"api_name": "math.radians", "line_number": 88, "usage_type": "call"}, {"api_name": "numpy.dot", "line_number": 89, "usage_type": "call"}, {"api_name": "cereal.log.ControlsState.LateralINDIState.new_message", "line_number": 91, "usage_type": "call"}, {"api_name": "cereal.log.ControlsState", "line_number": 91, "usage_type": "attribute"}, {"api_name": "cereal.log", "line_number": 91, "usage_type": "name"}, {"api_name": "math.degrees", "line_number": 92, "usage_type": "call"}, {"api_name": "math.degrees", "line_number": 93, "usage_type": "call"}, {"api_name": "math.degrees", "line_number": 94, "usage_type": "call"}, {"api_name": "math.radians", "line_number": 97, "usage_type": "call"}, {"api_name": "selfdrive.car.toyota.values.CarControllerParams.STEER_MAX", "line_number": 125, "usage_type": "attribute"}, {"api_name": "selfdrive.car.toyota.values.CarControllerParams", "line_number": 125, "usage_type": "name"}, {"api_name": "selfdrive.car.apply_toyota_steer_torque_limits", "line_number": 128, "usage_type": "call"}, {"api_name": "selfdrive.car.toyota.values.CarControllerParams", "line_number": 128, "usage_type": "argument"}, {"api_name": "selfdrive.controls.lib.drive_helpers.get_steer_max", "line_number": 133, "usage_type": "call"}, {"api_name": "common.numpy_fast.clip", "line_number": 134, "usage_type": "call"}]} +{"seq_id": "38456474244", "text": "import tkinter as tk\nfrom tkinter import ttk\nfrom PIL import Image, ImageTk\n\nclass ConversorTemperatura:\n\n def __init__(self, master):\n self.master = master\n self.master.title('Conversor de Temperatura')\n self.master.geometry('400x250')\n self.master.resizable(False, False)\n self.master.iconbitmap('clear.png')\n\n # Cria o ícone e o título da janela\n self.imagem = Image.open('clear.png')\n self.imagem = self.imagem.resize((30, 30))\n self.icone = ImageTk.PhotoImage(self.imagem)\n self.master.iconphoto(False, self.icone)\n\n # Define o estilo de fonte\n self.fonte = ('Helvetica', 12)\n\n # Cria o rótulo de entrada\n self.label_entrada = ttk.Label(self.master, text='Digite a temperatura em Fahrenheit:', font=self.fonte)\n self.label_entrada.pack(pady=10)\n\n # Cria a entrada de temperatura\n self.entrada_temperatura = ttk.Entry(self.master, font=self.fonte)\n self.entrada_temperatura.pack()\n\n # Cria o botão de conversão\n self.botao_converter = ttk.Button(self.master, text='Converter', command=self.converter_temperatura)\n self.botao_converter.pack(pady=10)\n\n # Cria o botão de limpeza\n self.botao_limpar = ttk.Button(self.master, text='Limpar', command=self.limpar_temperatura)\n self.botao_limpar.pack(pady=5)\n\n # Cria a mensagem de erro\n self.mensagem_erro = ttk.Label(self.master, text='', foreground='red', font=self.fonte)\n self.mensagem_erro.pack(pady=10)\n\n # Cria o rótulo de saída\n self.label_saida = ttk.Label(self.master, text='Resultado:', font=self.fonte)\n self.label_saida.pack(pady=5)\n\n # Cria a saída de temperatura\n self.saida_temperatura = ttk.Label(self.master, text='', font=self.fonte)\n self.saida_temperatura.pack()\n\n def converter_temperatura(self):\n try:\n temperatura_fahrenheit = float(self.entrada_temperatura.get())\n temperatura_celsius = (temperatura_fahrenheit - 32) * 5/9\n self.saida_temperatura.config(text=f'{temperatura_celsius:.2f}')\n self.mensagem_erro.config(text='')\n except ValueError:\n self.mensagem_erro.config(text='Insira uma temperatura válida!')\n\n def limpar_temperatura(self):\n self.entrada_temperatura.delete(0, tk.END)\n self.saida_temperatura.config(text='')\n self.mensagem_erro.config(text='')\n\nroot = tk.Tk()\nconversor = ConversorTemperatura(root)\nroot.mainloop()\n", "repo_name": "llevisouza/Conversor_temperatura", "sub_path": "conversor_temperatura.py", "file_name": "conversor_temperatura.py", "file_ext": "py", "file_size_in_byte": 2547, "program_lang": "python", "lang": "pt", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "12", "api": [{"api_name": "PIL.Image.open", "line_number": 15, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 15, "usage_type": "name"}, {"api_name": "PIL.ImageTk.PhotoImage", "line_number": 17, "usage_type": "call"}, {"api_name": "PIL.ImageTk", "line_number": 17, "usage_type": "name"}, {"api_name": "tkinter.ttk.Label", "line_number": 24, "usage_type": "call"}, {"api_name": "tkinter.ttk", "line_number": 24, "usage_type": "name"}, {"api_name": "tkinter.ttk.Entry", "line_number": 28, "usage_type": "call"}, {"api_name": "tkinter.ttk", "line_number": 28, "usage_type": "name"}, {"api_name": "tkinter.ttk.Button", "line_number": 32, "usage_type": "call"}, {"api_name": "tkinter.ttk", "line_number": 32, "usage_type": "name"}, {"api_name": "tkinter.ttk.Button", "line_number": 36, "usage_type": "call"}, {"api_name": "tkinter.ttk", "line_number": 36, "usage_type": "name"}, {"api_name": "tkinter.ttk.Label", "line_number": 40, "usage_type": "call"}, {"api_name": "tkinter.ttk", "line_number": 40, "usage_type": "name"}, {"api_name": "tkinter.ttk.Label", "line_number": 44, "usage_type": "call"}, {"api_name": "tkinter.ttk", "line_number": 44, "usage_type": "name"}, {"api_name": "tkinter.ttk.Label", "line_number": 48, "usage_type": "call"}, {"api_name": "tkinter.ttk", "line_number": 48, "usage_type": "name"}, {"api_name": "tkinter.END", "line_number": 61, "usage_type": "attribute"}, {"api_name": "tkinter.Tk", "line_number": 65, "usage_type": "call"}]} +{"seq_id": "36890283437", "text": "from fastapi import HTTPException, status\nfrom pony.orm import db_session\n\nfrom database.models.models import Robot, Match\nfrom view_entities.robot_view_entities import *\n\n\nROBOT_NAME_EXCEPTION = HTTPException(\n status_code=status.HTTP_409_CONFLICT,\n detail=\"User already has a robot with this name.\"\n)\n\nROBOT_DB_EXCEPTION = HTTPException(\n status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,\n detail=\"Internal error when creating the new robot in the database.\"\n)\n\nERROR_INSERTING_ROBOTS = HTTPException(\n status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,\n detail=\"Internal error when adding the default robots.\"\n)\n\n\n# Transforms the robots selected from the database to the format that will be\n# sent to the frontend.\n@db_session\ndef robot_db_to_view(robots: Robot):\n return [ShowRobot.from_orm(r) for r in robots]\n\n\ndef insert_filename_to_file(file: str, filename: str):\n if file == \"\":\n return \"\"\n return \"name:\" + filename + \";\" + file\n\n\n# @db_session\n# def get_player_in_match(match: AbandonMatch, abandoning_username: str):\n\n# query = left_join(\n# (r.owner)\n# for m in Match for r in m.robots_joined\n# if m.name == match.name and\n# m.creator_user == User.get(username=match.creator_user) and\n# r.owner == User.get(username=abandoning_username)\n# )\n# print(\"\\n\")\n# query.show()\n# return query\n\n\n@db_session\ndef get_robot_in_match_by_owner(match_id: int, owner_username: str):\n match = Match[match_id]\n for r in match.robots_joined:\n if r.owner.username == owner_username:\n return r\n\n return None\n", "repo_name": "whileTrue-FaMAFyC/backend", "sub_path": "app/utils/robot_utils.py", "file_name": "robot_utils.py", "file_ext": "py", "file_size_in_byte": 1629, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "12", "api": [{"api_name": "fastapi.HTTPException", "line_number": 8, "usage_type": "call"}, {"api_name": "fastapi.status.HTTP_409_CONFLICT", "line_number": 9, "usage_type": "attribute"}, {"api_name": "fastapi.status", "line_number": 9, "usage_type": "name"}, {"api_name": "fastapi.HTTPException", "line_number": 13, "usage_type": "call"}, {"api_name": "fastapi.status.HTTP_500_INTERNAL_SERVER_ERROR", "line_number": 14, "usage_type": "attribute"}, {"api_name": "fastapi.status", "line_number": 14, "usage_type": "name"}, {"api_name": "fastapi.HTTPException", "line_number": 18, "usage_type": "call"}, {"api_name": "fastapi.status.HTTP_500_INTERNAL_SERVER_ERROR", "line_number": 19, "usage_type": "attribute"}, {"api_name": "fastapi.status", "line_number": 19, "usage_type": "name"}, {"api_name": "database.models.models.Robot", "line_number": 27, "usage_type": "name"}, {"api_name": "pony.orm.db_session", "line_number": 26, "usage_type": "name"}, {"api_name": "database.models.models.Match", "line_number": 54, "usage_type": "name"}, {"api_name": "pony.orm.db_session", "line_number": 52, "usage_type": "name"}]} +{"seq_id": "25600431628", "text": "import logging\n\n# solver sanity check\nBOOL_HAS_COPT = True\nBOOL_HAS_GRB = True\n\nlogger = logging.getLogger(\"sfhub.util\")\ntry:\n import coptpy\nexcept ImportError as e:\n logger.warning(\"Cannot find COPT & coptpy\")\n # logger.exception(e)\n BOOL_HAS_COPT = False\ntry:\n import gurobipy\nexcept ImportError as e:\n logger.warning(\"Cannot find GUROBI & gurobipy\")\n BOOL_HAS_GRB = False\n\ntry:\n import mosek.fusion as mf\n expr = mf.Expr\n dom = mf.Domain\n mat = mf.Matrix\nexcept ImportError as e:\n logger.warning('no mosek detected')\n\n\nclass ModelWrapper(object):\n \"\"\"\n a wrapper class to work with different solvers\n \"\"\"\n\n def __init__(self, model=None, object_map=None, solver_name=\"\", name=\"model\", *args, **kwargs):\n _solver_name = solver_name.upper()\n if model:\n self.model = model\n else:\n if _solver_name == 'COPT':\n if BOOL_HAS_COPT:\n envr = coptpy.Envr()\n self.model = envr.createModel(name=name)\n else:\n raise ValueError(\"Cannot find COPT!\")\n elif _solver_name == 'GUROBI':\n if BOOL_HAS_GRB:\n self.model = gurobipy.Model(name)\n else:\n logger.warning('Cannot find GUROBI, pls install the API properly')\n else:\n logger.info('Unknown solver, fallback to COPT')\n try:\n envr = coptpy.Envr()\n self.model = envr.createModel(name=name)\n except Exception as e:\n logger.error(\"Cannot find COPT!\")\n raise e\n self.obj_map = object_map\n self._objective_value = None\n self.is_copt, self.is_grb = False, False\n if BOOL_HAS_COPT:\n self.is_copt = self.model.__class__ == coptpy.Model\n if BOOL_HAS_GRB:\n self.is_grb = self.model.__class__ == gurobipy.Model\n\n if not (self.is_grb or self.is_copt):\n raise ValueError(\"unsupported, neither COPT nor GUROBI\")\n\n # wrapper constants\n if self.is_copt:\n self.INTEGER = coptpy.COPT.INTEGER\n self.BINARY = coptpy.COPT.BINARY\n self.CONTINUOUS = coptpy.COPT.CONTINUOUS\n self.INF = coptpy.COPT.INFINITY\n self.MINIMIZE = coptpy.COPT.MINIMIZE\n self.xsum = self.quicksum = coptpy.quicksum\n elif self.is_grb:\n self.INTEGER = gurobipy.GRB.INTEGER\n self.BINARY = gurobipy.GRB.BINARY\n self.CONTINUOUS = gurobipy.GRB.CONTINUOUS\n self.INF = gurobipy.GRB.INFINITY\n self.MINIMIZE = gurobipy.GRB.MINIMIZE\n self.xsum = self.quicksum = gurobipy.quicksum\n else:\n raise ValueError(\"solver name unknown\")\n\n self.set_properties(**kwargs)\n\n def optimize(self, **kwargs):\n time_limit = kwargs.get(\"max_seconds\", 1000)\n max_solutions = kwargs.get(\"max_solutions\", 1000)\n if self.is_copt:\n return self.model.solve()\n elif self.is_grb:\n return self.model.optimize()\n raise TypeError(\"unknown model class, not in [copt, grb]\")\n\n @property\n def objective_value(self):\n if self.is_copt:\n value = self.model.objval\n elif self.is_grb:\n value = self.model.objVal\n else:\n value = 0.0\n self._objective_value = value\n return self._objective_value\n\n def isfeasible(self):\n \"\"\"\n Not safe, only after optimize call!\n :return:\n \"\"\"\n if self.is_copt:\n return self.model.status != coptpy.COPT.INFEASIBLE\n if self.is_grb:\n return self.model.status != gurobipy.GRB.INFEASIBLE\n return False\n\n def set_properties(self, **kwargs):\n verbose, max_mip_gap = kwargs.get('verbose', 1), kwargs.get('maxMipGap', 0.00)\n if self.is_copt:\n self.model.setParam('Logging', verbose)\n self.model.setParam('RelGap', max_mip_gap)\n elif self.is_grb:\n self.model.setParam('OutputFlag', verbose)\n self.model.setParam('MIPGap', max_mip_gap)\n else:\n raise ValueError(\"solver name unknown\")\n", "repo_name": "brentian/artoopt", "sub_path": "utils.py", "file_name": "utils.py", "file_ext": "py", "file_size_in_byte": 4276, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "12", "api": [{"api_name": "logging.getLogger", "line_number": 7, "usage_type": "call"}, {"api_name": "mosek.fusion.Expr", "line_number": 22, "usage_type": "attribute"}, {"api_name": "mosek.fusion", "line_number": 22, "usage_type": "name"}, {"api_name": "mosek.fusion.Domain", "line_number": 23, "usage_type": "attribute"}, {"api_name": "mosek.fusion", "line_number": 23, "usage_type": "name"}, {"api_name": "mosek.fusion.Matrix", "line_number": 24, "usage_type": "attribute"}, {"api_name": "mosek.fusion", "line_number": 24, "usage_type": "name"}, {"api_name": "coptpy.Envr", "line_number": 41, "usage_type": "call"}, {"api_name": "gurobipy.Model", "line_number": 47, "usage_type": "call"}, {"api_name": "coptpy.Envr", "line_number": 53, "usage_type": "call"}, {"api_name": "coptpy.Model", "line_number": 62, "usage_type": "attribute"}, {"api_name": "gurobipy.Model", "line_number": 64, "usage_type": "attribute"}, {"api_name": "coptpy.COPT", "line_number": 71, "usage_type": "attribute"}, {"api_name": "coptpy.COPT", "line_number": 72, "usage_type": "attribute"}, {"api_name": "coptpy.COPT", "line_number": 73, "usage_type": "attribute"}, {"api_name": "coptpy.COPT", "line_number": 74, "usage_type": "attribute"}, {"api_name": "coptpy.COPT", "line_number": 75, "usage_type": "attribute"}, {"api_name": "coptpy.quicksum", "line_number": 76, "usage_type": "attribute"}, {"api_name": "gurobipy.GRB", "line_number": 78, "usage_type": "attribute"}, {"api_name": "gurobipy.GRB", "line_number": 79, "usage_type": "attribute"}, {"api_name": "gurobipy.GRB", "line_number": 80, "usage_type": "attribute"}, {"api_name": "gurobipy.GRB", "line_number": 81, "usage_type": "attribute"}, {"api_name": "gurobipy.GRB", "line_number": 82, "usage_type": "attribute"}, {"api_name": "gurobipy.quicksum", "line_number": 83, "usage_type": "attribute"}, {"api_name": "coptpy.COPT", "line_number": 115, "usage_type": "attribute"}, {"api_name": "gurobipy.GRB", "line_number": 117, "usage_type": "attribute"}]} +{"seq_id": "15467293088", "text": "import warnings\nfrom collections.abc import Callable\nfrom numbers import Integral\n\nimport numpy as np\nfrom numba import TypingError, njit\nfrom numpy.random import Generator\n\nfrom tsbootstrap.utils.types import RngTypes\nfrom tsbootstrap.utils.validate import (\n validate_block_indices,\n validate_rng,\n validate_weights,\n)\n\n\nclass BlockResampler:\n \"\"\"\n A class to perform block resampling.\n\n Methods\n -------\n resample_blocks()\n Resamples blocks and their corresponding tapered_weights with replacement to create a new list of blocks and tapered_weights with total length equal to n.\n resample_block_indices_and_data()\n Generate block indices and corresponding data for the input data array X.\n \"\"\"\n\n def __init__(\n self,\n blocks: list[np.ndarray],\n X: np.ndarray,\n block_weights: np.ndarray | Callable | None = None,\n tapered_weights: Callable | None = None,\n rng: RngTypes = None,\n ):\n \"\"\"\n Initialize the BlockResampler with the selected distribution and average block length.\n\n Parameters\n ----------\n blocks : List[np.ndarray]\n A list of numpy arrays where each array represents the indices of a block in the time series.\n X : np.ndarray\n The input data array.\n block_weights : Union[np.ndarray, Callable], optional\n An array of weights or a callable function to generate weights. If None, then the default uniform weights are used.\n tapered_weights : Union[np.ndarray, Callable], optional\n An array of weights to apply to the data within the blocks. If None, then the default uniform weights are used.\n rng : np.random.Generator, optional\n Generator for reproducibility. If None, the global random state is used.\n \"\"\"\n self.X = X\n self.blocks = blocks\n self.rng = rng\n self.block_weights = block_weights\n self.tapered_weights = tapered_weights\n\n @property\n def X(self) -> np.ndarray:\n \"\"\"The input data array.\"\"\"\n return self._X\n\n @X.setter\n def X(self, value: np.ndarray) -> None:\n \"\"\"\n Set the input data array.\n\n Parameters\n ----------\n value : np.ndarray\n The input data array.\n\n\n Raises\n ------\n TypeError\n If the input data array is not a numpy array.\n ValueError\n If the input data array has less than two elements or if it is not a 1D or 2D array.\n\n\n Notes\n -----\n If the input data array is a 1D array, then it is reshaped to a 2D array.\n\n Examples\n --------\n >>> import numpy as np\n >>> from block_resampler import BlockResampler\n >>> X = np.array([1, 2, 3, 4, 5])\n >>> block_resampler = BlockResampler(blocks=[[0, 1, 2], [3, 4]], X=X)\n >>> block_resampler.X\n array([[1],\n [2],\n [3],\n [4],\n [5]])\n \"\"\"\n if not isinstance(value, np.ndarray):\n raise TypeError(\"'X' must be a numpy array.\")\n else:\n if value.size < 2:\n raise ValueError(\"'X' must have at least two elements.\")\n elif value.ndim == 1:\n warnings.warn(\n \"Input 'X' is a 1D array. It will be reshaped to a 2D array.\",\n stacklevel=2,\n )\n value = value.reshape(-1, 1)\n elif value.ndim > 2:\n raise ValueError(\"'X' must be a 1D or 2D numpy array.\")\n self._X = value\n\n @property\n def blocks(self) -> list[np.ndarray]:\n \"\"\"A list of numpy arrays where each array represents the indices of a block in the time series.\"\"\"\n return self._blocks\n\n @blocks.setter\n def blocks(self, value: list[np.ndarray]) -> None:\n \"\"\"\n Set the list of blocks.\n\n Parameters\n ----------\n value : List[np.ndarray]\n A list of numpy arrays where each array represents the indices of a block in the time series.\n\n\n Raises\n ------\n TypeError\n If the list of blocks is not a list.\n ValueError\n If the list of blocks is empty or if it contains non-integer arrays.\n\n\n Notes\n -----\n The list of blocks is sorted in ascending order.\n \"\"\"\n validate_block_indices(value, self.X.shape[0]) # type: ignore\n self._blocks = value\n\n @property\n def rng(self) -> Generator:\n \"\"\"Generator for reproducibility.\"\"\"\n return self._rng\n\n @rng.setter\n def rng(self, value: RngTypes) -> None:\n \"\"\"\n Set the random number generator.\n\n Parameters\n ----------\n value : RngTypes\n Generator for reproducibility.\n\n\n Raises\n ------\n TypeError\n If the random number generator is not a numpy random Generator or an integer.\n ValueError\n If the random number generator is an integer but it is not a non-negative integer.\n \"\"\"\n self._rng = validate_rng(value, allow_seed=True)\n\n @property\n def block_weights(self) -> np.ndarray:\n \"\"\"An array of normalized block_weights.\"\"\"\n return self._block_weights\n\n @block_weights.setter\n def block_weights(self, value: np.ndarray | Callable | None) -> None:\n \"\"\"\n Set the block_weights array.\n\n Parameters\n ----------\n value : Union[np.ndarray, Callable]\n An array of weights or a callable function to generate weights.\n If None, then the default uniform weights are used.\n\n\n Raises\n ------\n TypeError\n If the block_weights array is not a numpy array or a callable function.\n ValueError\n If the block_weights array is a numpy array but it is empty or if it contains non-integer arrays.\n If the block_weights array is a callable function but the output is not a 1D array of length 'size'.\n \"\"\"\n self._block_weights = self._prepare_block_weights(value)\n\n @property\n def tapered_weights(self) -> list[np.ndarray]:\n \"\"\"A list of normalized weights.\"\"\"\n return self._tapered_weights\n\n @tapered_weights.setter\n def tapered_weights(self, value: Callable | None) -> None:\n \"\"\"\n Set the tapered_weights array.\n\n Parameters\n ----------\n value : Optional[Callable]\n A callable function to generate weights.\n If None, then the default uniform weights are used.\n\n Raises\n ------\n TypeError\n If the tapered_weights array is not a callable function.\n ValueError\n If the tapered_weights array is a callable function but the output is not a 1D array of length 'size'.\n \"\"\"\n self._tapered_weights = self._prepare_tapered_weights(value)\n\n @staticmethod\n def _normalize_array(array: np.ndarray) -> np.ndarray:\n \"\"\"\n Normalize the weights array.\n\n Parameters\n ----------\n array : np.ndarray\n n-dimensional array.\n\n Returns\n -------\n np.ndarray\n An array of normalized values, with the same shape as the input array.\n \"\"\"\n sum_array = np.sum(array, axis=0, keepdims=True)\n zero_mask = sum_array != 0\n normalized_array = np.where(\n zero_mask, array / sum_array, 1.0 / array.shape[0]\n )\n return normalized_array\n\n def _prepare_tapered_weights(\n self, tapered_weights: Callable | None = None\n ) -> list[np.ndarray]:\n \"\"\"\n Prepare the tapered weights array by normalizing it or generating it.\n\n Parameters\n ----------\n tapered_weights : Union[np.ndarray, Callable]\n An array of weights or a callable function to generate weights.\n size : int, optional\n The size of the weights array (required for \"tapered_weights\").\n If None, then the size is the same as the block length.\n\n Returns\n -------\n np.ndarray or List[np.ndarray]\n An array or list of normalized weights.\n \"\"\"\n block_lengths = np.array([len(block) for block in self.blocks])\n size = block_lengths\n\n if callable(tapered_weights):\n tapered_weights_arr = self._handle_callable_weights(\n tapered_weights, size\n )\n # Ensure that the edges are not exactly 0, while ensure that the max weight stays the same.\n tapered_weights_arr = [\n np.maximum(weights, 0.1) for weights in tapered_weights_arr\n ]\n # Ensure that the maximum weight is 1.\n tapered_weights_arr = [\n weights / np.max(weights) for weights in tapered_weights_arr\n ]\n elif tapered_weights is None:\n tapered_weights_arr = [np.full(size_iter, 1) for size_iter in size]\n else:\n raise TypeError(\n f\"{tapered_weights} must be a callable function or None.\"\n )\n\n for weights in tapered_weights_arr:\n validate_weights(weights)\n\n return tapered_weights_arr\n\n def _prepare_block_weights(\n self, block_weights: np.ndarray | Callable | None = None\n ) -> np.ndarray:\n \"\"\"\n Prepare the block_weights array by normalizing it or generating it based on the callable function provided.\n\n Parameters\n ----------\n block_weights : Union[np.ndarray, Callable], optional\n An array of weights or a callable function to generate weights. Defaults to None.\n\n Returns\n -------\n np.ndarray\n An array of normalized block_weights.\n \"\"\"\n size = self.X.shape[0]\n\n if callable(block_weights):\n block_weights_arr = self._handle_callable_weights(\n block_weights, size\n )\n elif isinstance(block_weights, np.ndarray):\n block_weights_arr = self._handle_array_block_weights(\n block_weights, size\n )\n elif block_weights is None:\n block_weights_arr = np.full(size, 1 / size)\n else:\n raise TypeError(\n \"'block_weights' must be a numpy array or a callable function or None.\"\n )\n\n # Validate the block_weights array\n validate_weights(block_weights_arr)\n # Normalize the block_weights array\n block_weights_arr = self._normalize_array(block_weights_arr)\n\n return block_weights_arr\n\n def _handle_callable_weights(\n self,\n weights_func: Callable,\n size: Integral | list[Integral] | np.ndarray,\n ) -> np.ndarray:\n \"\"\"\n Handle callable block_weights by executing the function and validating the output.\n\n Parameters\n ----------\n block_weights : Callable\n A callable function to generate block weights.\n size : int\n The size of the block_weights array.\n\n Returns\n -------\n np.ndarray\n An array of block_weights.\n \"\"\"\n try:\n weights_jitted = njit(weights_func)\n weights_arr = self._generate_weights_from_callable(\n weights_jitted, size\n )\n except TypingError:\n weights_arr = self._generate_weights_from_callable(\n weights_func, size\n )\n\n self._validate_callable_generated_weights(\n weights_arr, size, weights_func.__name__\n )\n\n return weights_arr\n\n def _generate_weights_from_callable(\n self,\n weights_func: Callable,\n size: Integral | list[Integral] | np.ndarray,\n ):\n \"\"\"\n Generate weights from a callable function.\n\n Parameters\n ----------\n weights_func : Callable\n A callable function to generate weights.\n size : Union[Integral, List[Integral], np.ndarray]\n The size of the weights array.\n\n Returns\n -------\n np.ndarray\n An array of weights.\n \"\"\"\n if isinstance(size, Integral):\n return weights_func(size)\n elif isinstance(size, np.ndarray | list):\n return [weights_func(size_iter) for size_iter in size]\n else:\n raise TypeError(\n \"size must be an integer or a list/array of integers\"\n )\n\n def _validate_callable_generated_weights(\n self,\n weights_arr: np.ndarray | list[np.ndarray],\n size: Integral | list[Integral],\n callable_name: str,\n ):\n \"\"\"\n Validate the output of a callable function that generates either block_weights or tapered_weights.\n\n Parameters\n ----------\n weights_arr : Union[np.ndarray, List[np.ndarray]]\n An array or list of arrays of weights.\n size : Union[Integral, List[Integral]]\n The size of the weights array.\n callable_name : str\n The name of the callable function.\n\n Raises\n ------\n TypeError\n If the output of the callable function is not a numpy array.\n ValueError\n If the output of the callable function is not a 1d array of length 'size'.\n\n Returns\n -------\n None\n \"\"\"\n if isinstance(weights_arr, list):\n print(\"dealing with tapered_weights\")\n weights_arr = weights_arr[0]\n size = size[0]\n if not isinstance(weights_arr, np.ndarray):\n raise TypeError(\n f\"Output of '{callable_name}(size)' must be a numpy array.\"\n )\n if len(weights_arr) != size or weights_arr.ndim != 1:\n raise ValueError(\n f\"Output of '{callable_name}(size)' must be a 1d array of length 'size'.\"\n )\n\n def _handle_array_block_weights(\n self, block_weights: np.ndarray, size: int\n ) -> np.ndarray:\n \"\"\"\n Handle array block_weights by validating the array and returning it.\n\n Parameters\n ----------\n block_weights : np.ndarray\n An array of block_weights.\n size : int\n The size of the block_weights array.\n\n Returns\n -------\n np.ndarray\n An array of block_weights.\n \"\"\"\n if block_weights.shape[0] == 0:\n return np.full(size, 1 / size)\n elif block_weights.shape[0] != size:\n raise ValueError(\n \"block_weights array must have the same size as X\"\n )\n return block_weights\n\n def resample_blocks(self) -> tuple[list[np.ndarray], list[np.ndarray]]:\n \"\"\"\n Resample blocks and corresponding tapered weights with replacement to create a new list of blocks and tapered weights with total length equal to n.\n\n Returns\n -------\n Tuple[list of ndarray, list of ndarray]\n The newly generated list of blocks and their corresponding tapered_weights\n with total length equal to n.\n\n Example\n -------\n >>> block_resampler = BlockResampler(blocks=blocks, X=data)\n >>> new_blocks, new_tapered_weights = block_resampler.resample_blocks()\n >>> len(new_blocks) == len(data)\n True\n \"\"\"\n n = self.X.shape[0]\n block_dict = {block[0]: block for block in self.blocks}\n tapered_weights_dict = {\n block[0]: weight\n for block, weight in zip(self.blocks, self.tapered_weights)\n }\n first_indices = np.array(list(block_dict.keys()))\n block_lengths = np.array([len(block) for block in self.blocks])\n block_weights = np.array(\n [self.block_weights[idx] for idx in first_indices]\n )\n\n new_blocks, new_tapered_weights, total_samples = [], [], 0\n while total_samples < n:\n eligible_mask = (block_lengths <= n - total_samples) & (\n block_weights > 0 # type: ignore\n )\n if not np.any(eligible_mask):\n incomplete_eligible_mask = (block_lengths > 0) & (\n block_weights > 0 # type: ignore\n )\n incomplete_eligible_weights = block_weights[\n incomplete_eligible_mask\n ]\n\n index = self.rng.choice(\n first_indices[incomplete_eligible_mask],\n p=incomplete_eligible_weights\n / incomplete_eligible_weights.sum(),\n )\n selected_block = block_dict[index]\n selected_tapered_weights = tapered_weights_dict[index]\n new_blocks.append(selected_block[: n - total_samples])\n new_tapered_weights.append(\n selected_tapered_weights[: n - total_samples]\n )\n break\n\n eligible_weights = block_weights[eligible_mask]\n index = self.rng.choice(\n first_indices[eligible_mask],\n p=eligible_weights / eligible_weights.sum(),\n )\n selected_block = block_dict[index]\n selected_tapered_weights = tapered_weights_dict[index]\n new_blocks.append(selected_block)\n new_tapered_weights.append(selected_tapered_weights)\n total_samples += len(selected_block)\n\n return new_blocks, new_tapered_weights\n\n def resample_block_indices_and_data(\n self,\n ) -> tuple[list[np.ndarray], list[np.ndarray]]:\n \"\"\"\n Generate block indices and corresponding data for the input data array X.\n\n Returns\n -------\n Tuple[List[np.ndarray], List[np.ndarray]]\n A tuple containing a list of block indices and a list of corresponding modified data blocks.\n\n Example\n -------\n >>> block_resampler = BlockResampler(blocks=blocks, X=data)\n >>> block_indices, block_data = block_resampler.resample_block_indices_and_data()\n >>> len(block_indices) == len(data)\n True\n\n Notes\n -----\n The block indices are generated using the following steps:\n 1. Generate block weights using the block_weights argument.\n 2. Resample blocks with replacement to create a new list of blocks with total length equal to n.\n 3. Apply tapered_weights to the data within the blocks if provided.\n \"\"\"\n (\n resampled_block_indices,\n resampled_tapered_weights,\n ) = self.resample_blocks()\n block_data = []\n\n for i, block in enumerate(resampled_block_indices):\n taper = resampled_tapered_weights[i]\n data_block = self.X[block]\n block_data.append(data_block * taper.reshape(-1, 1))\n\n return resampled_block_indices, block_data\n\n def __repr__(self) -> str:\n return f\"BlockResampler(blocks={self.blocks}, X={self.X}, block_weights={self.block_weights}, tapered_weights={self.tapered_weights}, rng={self.rng})\"\n\n def __str__(self) -> str:\n return f\"BlockResampler with blocks of length {len(self.blocks)}, input data of shape {self.X.shape}, block weights {self.block_weights}, tapered weights {self.tapered_weights}, and random number generator {self.rng}\"\n\n def __eq__(self, other: object) -> bool:\n if isinstance(other, BlockResampler):\n return (\n self.blocks == other.blocks\n and np.array_equal(self.X, other.X)\n and self.block_weights == other.block_weights\n and self.tapered_weights == other.tapered_weights\n and self.rng == other.rng\n )\n return False\n", "repo_name": "astrogilda/tsbootstrap", "sub_path": "src/tsbootstrap/block_resampler.py", "file_name": "block_resampler.py", "file_ext": "py", "file_size_in_byte": 19821, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 31, "dataset": "github-code", "pt": "12", "api": [{"api_name": "numpy.ndarray", "line_number": 31, "usage_type": "attribute"}, {"api_name": "numpy.ndarray", "line_number": 32, "usage_type": "attribute"}, {"api_name": "numpy.ndarray", "line_number": 33, "usage_type": "attribute"}, {"api_name": "collections.abc.Callable", "line_number": 33, "usage_type": "name"}, {"api_name": "collections.abc.Callable", "line_number": 34, "usage_type": "name"}, {"api_name": "tsbootstrap.utils.types.RngTypes", "line_number": 35, "usage_type": "name"}, {"api_name": "numpy.ndarray", "line_number": 60, "usage_type": "attribute"}, {"api_name": "numpy.ndarray", "line_number": 65, "usage_type": "attribute"}, {"api_name": "numpy.ndarray", "line_number": 100, "usage_type": "attribute"}, {"api_name": "warnings.warn", "line_number": 106, "usage_type": "call"}, {"api_name": "numpy.ndarray", "line_number": 116, "usage_type": "attribute"}, {"api_name": "numpy.ndarray", "line_number": 121, "usage_type": "attribute"}, {"api_name": "tsbootstrap.utils.validate.validate_block_indices", "line_number": 143, "usage_type": "call"}, {"api_name": "numpy.random.Generator", "line_number": 147, "usage_type": "name"}, {"api_name": "tsbootstrap.utils.types.RngTypes", "line_number": 152, "usage_type": "name"}, {"api_name": "tsbootstrap.utils.validate.validate_rng", "line_number": 169, "usage_type": "call"}, {"api_name": "numpy.ndarray", "line_number": 172, "usage_type": "attribute"}, {"api_name": "numpy.ndarray", "line_number": 177, "usage_type": "attribute"}, {"api_name": "collections.abc.Callable", "line_number": 177, "usage_type": "name"}, {"api_name": "numpy.ndarray", "line_number": 199, "usage_type": "attribute"}, {"api_name": "collections.abc.Callable", "line_number": 204, "usage_type": "name"}, {"api_name": "numpy.ndarray", "line_number": 224, "usage_type": "attribute"}, {"api_name": "numpy.sum", "line_number": 238, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 240, "usage_type": "call"}, {"api_name": "collections.abc.Callable", "line_number": 246, "usage_type": "name"}, {"api_name": "numpy.array", "line_number": 264, "usage_type": "call"}, {"api_name": "numpy.maximum", "line_number": 273, "usage_type": "call"}, {"api_name": "numpy.max", "line_number": 277, "usage_type": "call"}, {"api_name": "numpy.full", "line_number": 280, "usage_type": "call"}, {"api_name": "tsbootstrap.utils.validate.validate_weights", "line_number": 287, "usage_type": "call"}, {"api_name": "numpy.ndarray", "line_number": 247, "usage_type": "attribute"}, {"api_name": "numpy.ndarray", "line_number": 292, "usage_type": "attribute"}, {"api_name": "collections.abc.Callable", "line_number": 292, "usage_type": "name"}, {"api_name": "numpy.ndarray", "line_number": 313, "usage_type": "attribute"}, {"api_name": "numpy.full", "line_number": 318, "usage_type": "call"}, {"api_name": "tsbootstrap.utils.validate.validate_weights", "line_number": 325, "usage_type": "call"}, {"api_name": "numpy.ndarray", "line_number": 293, "usage_type": "attribute"}, {"api_name": "collections.abc.Callable", "line_number": 333, "usage_type": "name"}, {"api_name": "numbers.Integral", "line_number": 334, "usage_type": "name"}, {"api_name": "numpy.ndarray", "line_number": 334, "usage_type": "attribute"}, {"api_name": "numba.njit", "line_number": 352, "usage_type": "call"}, {"api_name": "numba.TypingError", "line_number": 356, "usage_type": "name"}, {"api_name": "numpy.ndarray", "line_number": 335, "usage_type": "attribute"}, {"api_name": "collections.abc.Callable", "line_number": 369, "usage_type": "name"}, {"api_name": "numbers.Integral", "line_number": 370, "usage_type": "name"}, {"api_name": "numpy.ndarray", "line_number": 370, "usage_type": "attribute"}, {"api_name": "numbers.Integral", "line_number": 387, "usage_type": "argument"}, {"api_name": "numpy.ndarray", "line_number": 389, "usage_type": "attribute"}, {"api_name": "numpy.ndarray", "line_number": 398, "usage_type": "attribute"}, {"api_name": "numbers.Integral", "line_number": 399, "usage_type": "name"}, {"api_name": "numpy.ndarray", "line_number": 429, "usage_type": "attribute"}, {"api_name": "numpy.ndarray", "line_number": 439, "usage_type": "attribute"}, {"api_name": "numpy.full", "line_number": 457, "usage_type": "call"}, {"api_name": "numpy.ndarray", "line_number": 440, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 487, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 488, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 489, "usage_type": "call"}, {"api_name": "numpy.any", "line_number": 498, "usage_type": "call"}, {"api_name": "numpy.ndarray", "line_number": 464, "usage_type": "attribute"}, {"api_name": "numpy.ndarray", "line_number": 534, "usage_type": "attribute"}, {"api_name": "numpy.array_equal", "line_number": 580, "usage_type": "call"}]} +{"seq_id": "26422148614", "text": "from typing import Dict, List, Union\nfrom datetime import datetime\nfrom pathlib import Path\nimport logging\nimport yaml\n\n\n# Globals\nPKD_CONFIG_DIR = \".peekingduck\"\nPKD_PLAYLIST_FILE = \"playlist.yml\"\n\n\nclass PipelineStats:\n \"\"\"Implements immutable PipelineStats class to store pipeline-related information.\"\"\"\n\n def __init__(self, pipeline: str) -> None:\n self._hash = hash(pipeline)\n self._pipeline = pipeline\n pipeline_path = Path(pipeline)\n self._name = pipeline_path.name\n self._datetime = (\n pipeline_path.stat().st_mtime if pipeline_path.exists() else None\n )\n\n def __eq__(self, obj: \"PipelineStats\") -> bool:\n return self._pipeline == obj._pipeline\n\n def __hash__(self) -> int:\n return self._hash\n\n def __lt__(self, obj: \"PipelineStats\") -> bool:\n return (self._name, self._pipeline) < (obj._name, obj._pipeline)\n\n def __repr__(self) -> str:\n return self._pipeline\n\n @property\n def datetime(self) -> str:\n \"\"\"Get last modified date/time of pipeline file.\n\n Returns:\n str: Last modified date/time string.\n \"\"\"\n if self._datetime:\n return datetime.fromtimestamp(self._datetime).strftime(\"%Y-%m-%d-%H:%M:%S\")\n return \"\"\n\n @property\n def name(self) -> str:\n \"\"\"Get pipeline name.\n\n Returns:\n str: Name of pipeline.\n \"\"\"\n return self._name\n\n @property\n def pipeline(self) -> str:\n \"\"\"Get pipeline full path name.\n\n Returns:\n str: Full path name of pipeline.\n \"\"\"\n return self._pipeline\n\n\nclass PlayList:\n \"\"\"Implements the PlayList class to store pipelines in playlist and to handle\n the internal data structures for managing pipelines\"\"\"\n\n def __init__(self, home_path: Path) -> None:\n self.logger = logging.getLogger(__name__)\n # Construct path to ~user_home/.peekingduck/playlist.yaml\n self._playlist_dir = home_path / PKD_CONFIG_DIR\n self._playlist_dir.mkdir(exist_ok=True)\n self._playlist_path = self._playlist_dir / PKD_PLAYLIST_FILE\n self.logger.debug(f\"playlist_path={self._playlist_path}\")\n self.load_playlist_file()\n\n def __iter__(self) -> \"PlayList\":\n self._iter_idx = -1\n return self\n\n def __next__(self) -> PipelineStats:\n self._iter_idx += 1\n if self._iter_idx < len(self.pipeline_stats):\n return self.pipeline_stats[self._iter_idx]\n raise StopIteration\n\n def __contains__(self, item: str) -> bool:\n if not isinstance(item, str):\n item = str(item)\n res = False\n for pipeline_stats in self.pipeline_stats:\n if pipeline_stats.pipeline == item:\n res = True\n break\n return res\n\n def __getitem__(self, key: str) -> PipelineStats:\n return self._pipelines_dict[key]\n\n def __len__(self) -> int:\n return len(self.pipeline_stats)\n\n #\n # Internal methods\n #\n def _read_playlist_file(self) -> List[str]:\n \"\"\"Read contents of playlist file, if any\n\n Returns:\n List[str]: contents of playlist file, a list of pipelines\n \"\"\"\n if not Path.exists(self._playlist_path):\n self.logger.debug(f\"{self._playlist_path} not found\")\n return []\n\n with open(self._playlist_path, \"r\", encoding=\"utf-8\") as file:\n playlist = yaml.safe_load(file)\n\n return playlist[\"playlist\"]\n\n #\n # External methods\n #\n def add_pipeline(self, pipeline_path: Union[Path, str]) -> None:\n \"\"\"Add pipeline yaml file to playlist.\n Do nothing if pipeline is already in playlist.\n\n Args:\n pipeline_path (Union[Path, str]): path of yaml file to add\n \"\"\"\n pipeline_str = str(pipeline_path)\n if pipeline_str in self:\n self.logger.info(f\"{pipeline_str} already in playlist\")\n return\n pipeline_stats = PipelineStats(pipeline_str)\n self.pipeline_stats.append(pipeline_stats)\n self._pipelines_dict[pipeline_str] = pipeline_stats\n\n def delete_pipeline(self, pipeline_path: Union[Path, str]) -> None:\n \"\"\"Delete pipeline yaml file from playlist.\n Do nothing if pipeline is not in playlist.\n\n Args:\n pipeline_path (Union[Path, str]): path of yaml file to delete\n \"\"\"\n pipeline_str = str(pipeline_path)\n if pipeline_str in self:\n pipeline_stats = PipelineStats(pipeline_str)\n self.pipeline_stats.remove(pipeline_stats)\n self._pipelines_dict.pop(pipeline_str)\n\n def load_playlist_file(self) -> None:\n \"\"\"Load playlist file\"\"\"\n pipelines = self._read_playlist_file()\n self.pipeline_stats: List[PipelineStats] = []\n self._pipelines_dict: Dict[str, PipelineStats] = {}\n for pipeline in pipelines:\n self.add_pipeline(pipeline)\n\n def save_playlist_file(self) -> None:\n \"\"\"Save playlist file\"\"\"\n # construct playlist contents with full pathnames\n playlist = [str(stats) for stats in self.pipeline_stats]\n playlist_dict = {\"playlist\": playlist}\n self.logger.debug(f\"playlist_dict={playlist_dict}\")\n\n with open(self._playlist_path, \"w\", encoding=\"utf8\") as file:\n yaml.dump(playlist_dict, file)\n", "repo_name": "aisingapore/PeekingDuck", "sub_path": "peekingduck/viewer/playlist.py", "file_name": "playlist.py", "file_ext": "py", "file_size_in_byte": 5407, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 154, "dataset": "github-code", "pt": "12", "api": [{"api_name": "pathlib.Path", "line_number": 19, "usage_type": "call"}, {"api_name": "datetime.datetime.fromtimestamp", "line_number": 45, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 45, "usage_type": "name"}, {"api_name": "pathlib.Path", "line_number": 71, "usage_type": "name"}, {"api_name": "logging.getLogger", "line_number": 72, "usage_type": "call"}, {"api_name": "pathlib.Path.exists", "line_number": 115, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 115, "usage_type": "name"}, {"api_name": "yaml.safe_load", "line_number": 120, "usage_type": "call"}, {"api_name": "typing.List", "line_number": 109, "usage_type": "name"}, {"api_name": "typing.Union", "line_number": 127, "usage_type": "name"}, {"api_name": "pathlib.Path", "line_number": 127, "usage_type": "name"}, {"api_name": "typing.Union", "line_number": 142, "usage_type": "name"}, {"api_name": "pathlib.Path", "line_number": 142, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 158, "usage_type": "name"}, {"api_name": "typing.Dict", "line_number": 159, "usage_type": "name"}, {"api_name": "yaml.dump", "line_number": 171, "usage_type": "call"}]} +{"seq_id": "5966500745", "text": "# ===============================================================================\r\n# Copyright 2020-2021 Intel Corporation\r\n#\r\n# Licensed under the Apache License, Version 2.0 (the \"License\");\r\n# you may not use this file except in compliance with the License.\r\n# You may obtain a copy of the License at\r\n#\r\n# http://www.apache.org/licenses/LICENSE-2.0\r\n#\r\n# Unless required by applicable law or agreed to in writing, software\r\n# distributed under the License is distributed on an \"AS IS\" BASIS,\r\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r\n# See the License for the specific language governing permissions and\r\n# limitations under the License.\r\n# ===============================================================================\r\n\r\nimport argparse\r\n\r\nimport bench\r\nimport numpy as np\r\n\r\n\r\ndef main():\r\n from sklearn.neighbors import KNeighborsRegressor\r\n\r\n # Load generated data\r\n X_train, X_test, y_train, y_test = bench.load_data(params)\r\n params.n_classes = len(np.unique(y_train))\r\n\r\n # Create a regression object\r\n knn_regr = KNeighborsRegressor(n_neighbors=params.n_neighbors,\r\n weights=params.weights,\r\n algorithm=params.method,\r\n metric=params.metric,\r\n n_jobs=params.n_jobs)\r\n\r\n # Measure time and accuracy on fitting\r\n train_time, _ = bench.measure_function_time(\r\n knn_regr.fit, X_train, y_train, params=params)\r\n if params.task == 'regression':\r\n y_pred = knn_regr.predict(X_train)\r\n train_rmse = bench.rmse_score(y_train, y_pred)\r\n train_r2 = bench.r2_score(y_train, y_pred)\r\n\r\n # Measure time and accuracy on prediction\r\n if params.task == 'regression':\r\n predict_time, yp = bench.measure_function_time(knn_regr.predict, X_test,\r\n params=params)\r\n test_rmse = bench.rmse_score(y_test, yp)\r\n test_r2 = bench.r2_score(y_test, yp)\r\n else:\r\n predict_time, _ = bench.measure_function_time(knn_regr.kneighbors, X_test,\r\n params=params)\r\n\r\n if params.task == 'regression':\r\n bench.print_output(\r\n library='sklearn',\r\n algorithm=knn_regr._fit_method + '_knn_regr',\r\n stages=['training', 'prediction'],\r\n params=params,\r\n functions=['knn_regr.fit', 'knn_regr.predict'],\r\n times=[train_time, predict_time],\r\n metric_type=['rmse', 'r2_score'],\r\n metrics=[[train_rmse, test_rmse], [train_r2, test_r2]],\r\n data=[X_train, X_test],\r\n alg_instance=knn_regr,\r\n )\r\n else:\r\n bench.print_output(\r\n library='sklearn',\r\n algorithm=knn_regr._fit_method + '_knn_search',\r\n stages=['training', 'search'],\r\n params=params,\r\n functions=['knn_regr.fit', 'knn_regr.kneighbors'],\r\n times=[train_time, predict_time],\r\n metric_type=None,\r\n metrics=[],\r\n data=[X_train, X_test],\r\n alg_instance=knn_regr,\r\n )\r\n\r\n\r\nif __name__ == \"__main__\":\r\n parser = argparse.ArgumentParser(\r\n description='scikit-learn kNN classifier benchmark')\r\n\r\n parser.add_argument('--task', default='regression', type=str,\r\n choices=('search', 'regression'),\r\n help='The type of kNN task: search or regression')\r\n parser.add_argument('--n-neighbors', default=5, type=int,\r\n help='The number of neighbors to use')\r\n parser.add_argument('--weights', type=str, default='uniform',\r\n help='The weight function to be used in prediction')\r\n parser.add_argument('--method', type=str, default='brute',\r\n choices=('brute', 'kd_tree', 'ball_tree', 'auto'),\r\n help='The method to find the nearest neighbors')\r\n parser.add_argument('--metric', type=str, default='euclidean',\r\n help='The metric to calculate distances')\r\n params = bench.parse_args(parser)\r\n bench.run_with_context(params, main)\r\n", "repo_name": "IntelPython/scikit-learn_bench", "sub_path": "sklearn_bench/knn_regr.py", "file_name": "knn_regr.py", "file_ext": "py", "file_size_in_byte": 4253, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 102, "dataset": "github-code", "pt": "12", "api": [{"api_name": "bench.load_data", "line_number": 27, "usage_type": "call"}, {"api_name": "numpy.unique", "line_number": 28, "usage_type": "call"}, {"api_name": "sklearn.neighbors.KNeighborsRegressor", "line_number": 31, "usage_type": "call"}, {"api_name": "bench.measure_function_time", "line_number": 38, "usage_type": "call"}, {"api_name": "bench.rmse_score", "line_number": 42, "usage_type": "call"}, {"api_name": "bench.r2_score", "line_number": 43, "usage_type": "call"}, {"api_name": "bench.measure_function_time", "line_number": 47, "usage_type": "call"}, {"api_name": "bench.rmse_score", "line_number": 49, "usage_type": "call"}, {"api_name": "bench.r2_score", "line_number": 50, "usage_type": "call"}, {"api_name": "bench.measure_function_time", "line_number": 52, "usage_type": "call"}, {"api_name": "bench.print_output", "line_number": 56, "usage_type": "call"}, {"api_name": "bench.print_output", "line_number": 69, "usage_type": "call"}, {"api_name": "argparse.ArgumentParser", "line_number": 84, "usage_type": "call"}, {"api_name": "bench.parse_args", "line_number": 99, "usage_type": "call"}, {"api_name": "bench.run_with_context", "line_number": 100, "usage_type": "call"}]} +{"seq_id": "30208016786", "text": "\"\"\"\ndownload raw subtitle files from opensubtitles.org and subhd\n\"\"\"\n# !/usr/bin/python2\n# -*- coding: utf-8 -*-\nimport logging\nimport sys\nfrom os.path import splitext, basename, exists\n\nfrom datachar_armory.os_utils.logging_utils import configure_stream_logger\n\nfrom downloader.opensubtitle.opensubtitle import get_opensubtitle_sub\nfrom downloader.shooter.shooter import get_shooter_sub\nfrom merger.merge_subs import srt_merge\n\nlogger = logging.getLogger()\n\nLOGGING_FORMAT = '%(asctime)-15s %(levelname)s %(message)s'\nDATE_FORMAT = '[%Y-%m-%d %H:%M:%S]'\n\n\ndef download_subtitles():\n reload(sys)\n sys.setdefaultencoding('utf-8')\n configure_stream_logger()\n files_paths = sys.argv[1:]\n\n for file_path in files_paths:\n try:\n logger.info('PROCESSING FILE:' + basename(file_path))\n check_error_messages = files_check_error_messages(file_path)\n if check_error_messages:\n logger.info(check_error_messages)\n else:\n get_subtitles_for_the_file(file_path)\n except Exception:\n logger.exception('error occurred on processing file %s', file_path)\n\n\ndef files_check_error_messages(file_path):\n if not exists(file_path):\n return '%s not found', file_path\n if exists(splitext(file_path)[0] + \".lrc\"):\n return 'lrc file existed,quit now!'\n\n\ndef get_subtitles_for_the_file(file_path):\n result_subtitle_file = splitext(file_path)[0] + \".combined.srt\"\n sub_shooter, lang_ = get_shooter_sub(file_path)\n if lang_ == 'chs_eng':\n srt_merge([sub_shooter], result_subtitle_file, 0, 1)\n if lang_ == 'chs':\n get_eng_from_open_subtitile(sub_shooter, file_path, result_subtitle_file)\n if lang_ == 'eng':\n get_chs_from_open_subtitle(sub_shooter, file_path, result_subtitle_file)\n if lang_ == 'none':\n get_both_subtitles_from_open_subtitle(file_path, result_subtitle_file)\n\n\ndef get_eng_from_open_subtitile(sub_shooter, file_path, result_subtitle_file):\n sub_eng = get_opensubtitle_sub(file_path)\n if not sub_eng:\n logger.info('english sub file not found')\n srt_merge([sub_shooter], result_subtitle_file, 0, 2)\n else:\n srt_merge([sub_eng, sub_shooter], result_subtitle_file, 0)\n\n\ndef get_chs_from_open_subtitle(sub_shooter, file_path, result_subtitle_file):\n open_subtitle = get_opensubtitle_sub(file_path, 'chs')\n if not open_subtitle:\n logger.info('chinese sub file not found')\n srt_merge([sub_shooter], result_subtitle_file, 0, 2)\n else:\n srt_merge([sub_shooter, open_subtitle], result_subtitle_file, 0)\n\n\ndef get_both_subtitles_from_open_subtitle(file_path, result_subtitle_file):\n open_subtitle_chs = get_opensubtitle_sub(file_path, 'chs')\n open_subtitle_eng = get_opensubtitle_sub(file_path)\n if open_subtitle_eng and open_subtitle_chs:\n srt_merge([open_subtitle_chs, open_subtitle_eng], result_subtitle_file, 0)\n elif open_subtitle_eng:\n srt_merge([open_subtitle_eng], result_subtitle_file, 0, 2)\n logger.info('chinese sub file not found')\n elif open_subtitle_chs:\n srt_merge([open_subtitle_chs], result_subtitle_file, 0, 2)\n logger.info('english sub file not found')\n else:\n logger.warning('NO SUB FILE FOUND for file: %s', file_path)\n\n\nif __name__ == '__main__':\n download_subtitles()\n", "repo_name": "delbao/subtitleAutomation", "sub_path": "subProcessor/downloader/download_subs.py", "file_name": "download_subs.py", "file_ext": "py", "file_size_in_byte": 3365, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 3, "dataset": "github-code", "pt": "12", "api": [{"api_name": "logging.getLogger", "line_number": 16, "usage_type": "call"}, {"api_name": "sys.setdefaultencoding", "line_number": 24, "usage_type": "call"}, {"api_name": "datachar_armory.os_utils.logging_utils.configure_stream_logger", "line_number": 25, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 26, "usage_type": "attribute"}, {"api_name": "os.path.basename", "line_number": 30, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 41, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 43, "usage_type": "call"}, {"api_name": "os.path.splitext", "line_number": 43, "usage_type": "call"}, {"api_name": "os.path.splitext", "line_number": 48, "usage_type": "call"}, {"api_name": "downloader.shooter.shooter.get_shooter_sub", "line_number": 49, "usage_type": "call"}, {"api_name": "merger.merge_subs.srt_merge", "line_number": 51, "usage_type": "call"}, {"api_name": "downloader.opensubtitle.opensubtitle.get_opensubtitle_sub", "line_number": 61, "usage_type": "call"}, {"api_name": "merger.merge_subs.srt_merge", "line_number": 64, "usage_type": "call"}, {"api_name": "merger.merge_subs.srt_merge", "line_number": 66, "usage_type": "call"}, {"api_name": "downloader.opensubtitle.opensubtitle.get_opensubtitle_sub", "line_number": 70, "usage_type": "call"}, {"api_name": "merger.merge_subs.srt_merge", "line_number": 73, "usage_type": "call"}, {"api_name": "merger.merge_subs.srt_merge", "line_number": 75, "usage_type": "call"}, {"api_name": "downloader.opensubtitle.opensubtitle.get_opensubtitle_sub", "line_number": 79, "usage_type": "call"}, {"api_name": "downloader.opensubtitle.opensubtitle.get_opensubtitle_sub", "line_number": 80, "usage_type": "call"}, {"api_name": "merger.merge_subs.srt_merge", "line_number": 82, "usage_type": "call"}, {"api_name": "merger.merge_subs.srt_merge", "line_number": 84, "usage_type": "call"}, {"api_name": "merger.merge_subs.srt_merge", "line_number": 87, "usage_type": "call"}]} +{"seq_id": "28141456643", "text": "# -*- coding: utf-8 -*-\n\nimport datetime \nimport os\nimport db_connect as db\ncs = db.Conn().conn()\ncur = cs.cursor()\n\n#Pobranie z BD numerów projektów z statusem APPROVED\ncur.execute(\"SELECT nr_ki, rev FROM all_ki WHERE rev_status = 'APPROVED'\")\nprint(\"Projekty oczekujące na uwolnienie do produkcji: \")\nfor app in cur:\n print(app[0]+app[1])\n \nnr_ki = input('Który numer KI jest zwolniony do produkcji?: ')\naccept = input('Czy projekt jest ok? [T/N]: ')\n\nif accept == 't' or accept == 'T':\n dat = datetime.datetime.now().date()\n tim = datetime.datetime.now().time()\n nr = nr_ki[:7]\n rev = nr_ki[-1]\n\n # jeżeli rewizja projektu jest wyższa niż A to poprzednia dostaje status ARCHIVED\n if ord(rev) > 65 and ord(rev) < 91:\n prev_rev = chr(ord(rev)-1) # poprzednia litera od podanej\n\n # pobiera z BD status ARCHIVED\n cur.execute(\"SELECT stat_name FROM status WHERE stat_id = 4\")\n for arch in cur:\n arch = arch[0]\n # update numeru projektu do ARCHIVEED\n cur.execute(\"UPDATE all_ki SET rev_status = %s WHERE nr_ki = %s AND rev = %s\",\\\n (arch, nr, prev_rev))\n cs.commit()\n cur.execute(\"INSERT INTO history (nr_ki, rev, stat, dat, tim) \\\n VALUES (%s, %s, %s, %s, %s)\", (nr, prev_rev, arch, dat, tim))\n cur.execute(\"DELETE FROM all_ki WHERE nr_ki = %s AND rev = %s AND rev_status = %s\",\\\n (nr, prev_rev, arch))\n\n cur.execute(\"SELECT stat_name FROM status WHERE stat_id = 3\")\n for rel in cur:\n rel = rel[0]\n cur.execute(\"UPDATE all_ki SET rev_status = %s WHERE nr_ki = %s AND rev = %s\",\\\n (rel, nr, rev))\n cur.execute(\"INSERT INTO history (nr_ki, rev, stat, dat, tim) \\\n VALUES (%s, %s, %s, %s, %s)\", (nr, rev, rel, dat, tim))\n\n # wybiera ścieżke katalogu projektu\n cur.execute(\"SELECT ki_path FROM all_ki WHERE nr_ki = %s and rev = %s\"\\\n % (\"'\" + nr + \"'\", \"'\" + rev + \"'\"))\n for path in cur: path\n #podaje ścieżkę ostatniej rewizji produktu\n all_rev = os.listdir(path[0])\n for i in all_rev:\n if i[-1] == rev:\n rev_path = path[0] + '\\\\' + i\n # tworzenie nowego pliku BOM dla reewizji\n bom_file = rev_path + '\\\\' + 'BOM.txt'\n file = open(bom_file, 'w')\n files = os.listdir(rev_path)\n for f in files:\n if f != 'BOM.txt' and f[0] != 'F' and f[-4:] == '.pdf' and f[-7:] != 'doc.pdf':\n file.write(f[:-4] + '\\n')\n file.close()\n \nelif accept.lower() != 't':\n print(\"Brak akceptacji\")\n \ncs.commit()\ncs.close()\n", "repo_name": "arcik888/Aplikacja_Wyszukiwarka_Projektow", "sub_path": "update_release.py", "file_name": "update_release.py", "file_ext": "py", "file_size_in_byte": 2550, "program_lang": "python", "lang": "pl", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "12", "api": [{"api_name": "db_connect.Conn", "line_number": 6, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 19, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 19, "usage_type": "attribute"}, {"api_name": "datetime.datetime.now", "line_number": 20, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 20, "usage_type": "attribute"}, {"api_name": "os.listdir", "line_number": 54, "usage_type": "call"}, {"api_name": "os.listdir", "line_number": 61, "usage_type": "call"}]} +{"seq_id": "27398438210", "text": "import discord\nimport json\n\nfrom email import message\nfrom discord.utils import get\n\nintents=discord.Intents().all() # 獲取所有的 Intents 對象\nintents.message_content = True # 允許讀取消息內容\nintents.members = True # 允許讀取成員資料\n\ntry:\n # 讀取 config.json 的設定檔案\n with open(\"config.json\") as f:\n config=json.load(f)\n token=config['discord_bot_token']\n channelID=int(config['discord_channel_id'])\nexcept FileNotFoundError:\n print('找不到 config.json 檔案')\nexcept KeyError:\n print('config.json 檔案格式不正確')\n \n# Discord 機器人變數設置\nbot = discord.Client(intents=intents)\n \n# Discord 機器人狀態設置\n@bot.event\nasync def on_ready():\n print('目前登入身份:',bot.user)\n game = discord.Game('coderyo.com')\n # discord.Status.<狀態>,可以是online,offline,idle,dnd,invisible\n await bot.change_presence(status=discord.Status.online, activity=game)\n\n# Discord AutoRole\n@bot.event\n#當有成員加入(DC群)時\nasync def on_member_join(member):\n #給予加入DC群的成員 config.id 的身分組\n role = discord.utils.get(member.guild.roles, id=962360300563230800)\n await member.add_roles(role)\n #發送歡迎訊息\n channel = discord.utils.get(member.guild.channels, id=962354694997540935)\n await channel.send(f\"{member.mention} 剛剛加入了【CodeRyoᶠʳⁱᵉˢ🍟碼凌薯】!\")\n\n# Discord 機器人 TOKEN\ntry:\n bot.run(token)\nexcept discord.LoginFailure:\n print(\"Discord Bot Token 錯誤!\")", "repo_name": "CodeRyoDeveloper/FrenchFries", "sub_path": "example/mainexample.py", "file_name": "mainexample.py", "file_ext": "py", "file_size_in_byte": 1577, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "12", "api": [{"api_name": "discord.Intents", "line_number": 7, "usage_type": "call"}, {"api_name": "json.load", "line_number": 14, "usage_type": "call"}, {"api_name": "discord.Client", "line_number": 23, "usage_type": "call"}, {"api_name": "discord.Game", "line_number": 29, "usage_type": "call"}, {"api_name": "discord.Status", "line_number": 31, "usage_type": "attribute"}, {"api_name": "discord.utils.get", "line_number": 38, "usage_type": "call"}, {"api_name": "discord.utils", "line_number": 38, "usage_type": "attribute"}, {"api_name": "discord.utils.get", "line_number": 41, "usage_type": "call"}, {"api_name": "discord.utils", "line_number": 41, "usage_type": "attribute"}, {"api_name": "discord.LoginFailure", "line_number": 47, "usage_type": "attribute"}]} +{"seq_id": "29475806936", "text": "\"\"\" Script for preparing directories and files for WRF simulation. This creates a seperate directory \r\nfor each WRF simulation e.g. each initialsation time. These simulation directories are self-contained, \r\nwith all inputs, outputs, and excutables either copied or linked into them. \r\nThis enables job-level parallelisation when running large reanalysis or reforecasting cases.\r\n\r\nConfig comes from a file, specified as --config argument. Some configuration options (listed below) \r\ncan also be given at the command line, where they will override the configuration file. \r\nSee example/forecast.yaml for a full list of configuration options. \r\n\r\nUsage:\r\n prepare.py [--config=] [options]\r\n \r\nOptions:\r\n --config= yaml/json file specificying any of the options below\r\n --start=